aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/cpu-freq/user-guide.txt16
-rw-r--r--Documentation/filesystems/sysfs-pci.txt13
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/bug.h4
-rw-r--r--arch/frv/mm/dma-alloc.c2
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c2
-rw-r--r--arch/ia64/sn/kernel/io_init.c2
-rw-r--r--arch/mips/include/asm/spinlock.h1
-rw-r--r--arch/sh/boards/board-ap325rxa.c63
-rw-r--r--arch/sh/configs/ap325rxa_defconfig27
-rw-r--r--arch/sh/configs/migor_defconfig42
-rw-r--r--arch/sh/include/asm/mutex-llsc.h21
-rw-r--r--arch/sh/include/asm/syscall_32.h22
-rw-r--r--arch/sh/include/asm/syscall_64.h22
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c2
-rw-r--r--arch/sh/kernel/setup.c8
-rw-r--r--arch/sh/kernel/signal_32.c4
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sh/lib/checksum.S69
-rw-r--r--arch/sparc/kernel/head_64.S31
-rw-r--r--arch/sparc/kernel/nmi.c1
-rw-r--r--arch/sparc/kernel/pcr.c7
-rw-r--r--arch/sparc/lib/GENbzero.S6
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S8
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S9
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S9
-rw-r--r--arch/sparc/lib/NGbzero.S6
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S9
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S9
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3copy_from_user.S6
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/bzero.S6
-rw-r--r--arch/sparc/lib/copy_in_user.S61
-rw-r--r--arch/x86/ia32/ia32entry.S8
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/spinlock.h1
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig11
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c28
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c15
-rw-r--r--arch/x86/kernel/entry_64.S1
-rw-r--r--arch/x86/kernel/io_apic.c5
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/mach-default/setup.c12
-rw-r--r--arch/x86/mach-voyager/setup.c12
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c25
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/xen/multicalls.h4
-rw-r--r--crypto/algapi.c6
-rw-r--r--crypto/api.c20
-rw-r--r--crypto/scatterwalk.c3
-rw-r--r--crypto/shash.c7
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/acpica/uteval.c21
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/dock.c14
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/glue.c8
-rw-r--r--drivers/acpi/osl.c54
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/processor_idle.c667
-rw-r--r--drivers/acpi/processor_perflib.c105
-rw-r--r--drivers/acpi/sleep.c53
-rw-r--r--drivers/acpi/tables.c7
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/atm/solos-pci.c1
-rw-r--r--drivers/char/sx.c8
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c47
-rw-r--r--drivers/firewire/fw-card.c9
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_irq.c1
-rw-r--r--drivers/gpu/drm/drm_memory.c7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c83
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c91
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c870
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h404
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/hwmon/hp_accel.c9
-rw-r--r--drivers/ieee1394/dv1394.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c24
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/atmel-ssc.c2
-rw-r--r--drivers/misc/hpilo.c6
-rw-r--r--drivers/misc/hpilo.h2
-rw-r--r--drivers/misc/sgi-xp/xpc.h5
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c11
-rw-r--r--drivers/net/3c509.c1
-rw-r--r--drivers/net/cxgb3/sge.c3
-rw-r--r--drivers/net/gianfar.c9
-rw-r--r--drivers/net/gianfar.h2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/r8169.c93
-rw-r--r--drivers/net/sungem.c8
-rw-r--r--drivers/net/sunhme.c12
-rw-r--r--drivers/net/tulip/de2104x.c3
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c2
-rw-r--r--drivers/pci/pci-driver.c164
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/portdrv_pci.c16
-rw-r--r--drivers/pci/rom.c8
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/asus-laptop.c176
-rw-r--r--drivers/platform/x86/asus_acpi.c16
-rw-r--r--drivers/platform/x86/eeepc-laptop.c164
-rw-r--r--drivers/platform/x86/hp-wmi.c6
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/rtc/Kconfig6
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-dm355evm.c175
-rw-r--r--drivers/rtc/rtc-ds1390.c1
-rw-r--r--drivers/staging/android/Kconfig1
-rw-r--r--drivers/staging/android/ram_console.c14
-rw-r--r--drivers/staging/android/timed_gpio.c2
-rw-r--r--drivers/staging/at76_usb/Kconfig2
-rw-r--r--drivers/staging/at76_usb/at76_usb.c4620
-rw-r--r--drivers/staging/at76_usb/at76_usb.h227
-rw-r--r--drivers/staging/panel/panel.c10
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c46
-rw-r--r--drivers/usb/serial/aircable.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.h3
-rw-r--r--drivers/usb/serial/option.c93
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/transport.c36
-rw-r--r--drivers/usb/storage/unusual_devs.h17
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/aty/aty128fb.c33
-rw-r--r--drivers/video/aty/atyfb_base.c42
-rw-r--r--drivers/video/aty/radeon_base.c10
-rw-r--r--drivers/video/aty/radeon_pm.c103
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/da903x_bl.c (renamed from drivers/video/backlight/da903x.c)0
-rw-r--r--drivers/video/fbcmap.c20
-rw-r--r--drivers/video/fbmem.c135
-rw-r--r--fs/binfmt_elf.c14
-rw-r--r--fs/btrfs/Kconfig13
-rw-r--r--fs/btrfs/async-thread.c61
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c277
-rw-r--r--fs/btrfs/ctree.h28
-rw-r--r--fs/btrfs/disk-io.c120
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c438
-rw-r--r--fs/btrfs/extent_io.c132
-rw-r--r--fs/btrfs/extent_io.h18
-rw-r--r--fs/btrfs/extent_map.c1
-rw-r--r--fs/btrfs/file.c5
-rw-r--r--fs/btrfs/inode.c84
-rw-r--r--fs/btrfs/ioctl.c1
-rw-r--r--fs/btrfs/locking.c218
-rw-r--r--fs/btrfs/locking.h6
-rw-r--r--fs/btrfs/ordered-data.c4
-rw-r--r--fs/btrfs/ref-cache.c1
-rw-r--r--fs/btrfs/ref-cache.h1
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/tree-defrag.c1
-rw-r--r--fs/btrfs/tree-log.c354
-rw-r--r--fs/btrfs/volumes.c49
-rw-r--r--fs/btrfs/xattr.c48
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/configfs/dir.c59
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/exec.c28
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/internal.h2
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--fs/seq_file.c115
-rw-r--r--fs/super.c4
-rw-r--r--include/acpi/pdc_intel.h2
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/linux/async.h8
-rw-r--r--include/linux/crypto.h7
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--include/linux/fb.h15
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/module.h1
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/wait.h11
-rw-r--r--include/video/aty128.h4
-rw-r--r--include/video/mach64.h24
-rw-r--r--include/video/radeon.h18
-rw-r--r--ipc/shm.c12
-rw-r--r--kernel/async.c94
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/irq/numa_migrate.c7
-rw-r--r--kernel/power/main.c26
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sys.c16
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/wait.c59
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/hugetlb.c39
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mlock.c5
-rw-r--r--mm/mmap.c38
-rw-r--r--mm/mprotect.c5
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/bridge/br_forward.c7
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6_output.c67
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c25
-rw-r--r--net/netfilter/nf_conntrack_netlink.c15
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/pci/hda/hda_codec.c9
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_proc.c3
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/intel8x0.c2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h2
-rw-r--r--sound/soc/codecs/wm8350.c2
-rw-r--r--sound/soc/codecs/wm8990.c3
-rw-r--r--sound/soc/omap/omap-pcm.c5
-rw-r--r--sound/usb/usbaudio.c1
260 files changed, 8974 insertions, 3627 deletions
diff --git a/.mailmap b/.mailmap
index 4e83e7b52d15..a62e6a84fd1e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -92,6 +92,7 @@ Rudolf Marek <R.Marek@sh.cvut.cz>
92Rui Saraiva <rmps@joel.ist.utl.pt> 92Rui Saraiva <rmps@joel.ist.utl.pt>
93Sachin P Sant <ssant@in.ibm.com> 93Sachin P Sant <ssant@in.ibm.com>
94Sam Ravnborg <sam@mars.ravnborg.org> 94Sam Ravnborg <sam@mars.ravnborg.org>
95Sascha Hauer <s.hauer@pengutronix.de>
95S.Çağlar Onur <caglar@pardus.org.tr> 96S.Çağlar Onur <caglar@pardus.org.tr>
96Simon Kelley <simon@thekelleys.org.uk> 97Simon Kelley <simon@thekelleys.org.uk>
97Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 98Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
@@ -100,6 +101,7 @@ Tejun Heo <htejun@gmail.com>
100Thomas Graf <tgraf@suug.ch> 101Thomas Graf <tgraf@suug.ch>
101Tony Luck <tony.luck@intel.com> 102Tony Luck <tony.luck@intel.com>
102Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com> 103Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
103Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
104Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> 104Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
105Uwe Kleine-König <ukl@pengutronix.de>
106Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
105Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 107Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt
index e3443ddcfb89..917918f84fc7 100644
--- a/Documentation/cpu-freq/user-guide.txt
+++ b/Documentation/cpu-freq/user-guide.txt
@@ -195,19 +195,3 @@ scaling_setspeed. By "echoing" a new frequency into this
195 you can change the speed of the CPU, 195 you can change the speed of the CPU,
196 but only within the limits of 196 but only within the limits of
197 scaling_min_freq and scaling_max_freq. 197 scaling_min_freq and scaling_max_freq.
198
199
2003.2 Deprecated Interfaces
201-------------------------
202
203Depending on your kernel configuration, you might find the following
204cpufreq-related files:
205/proc/cpufreq
206/proc/sys/cpu/*/speed
207/proc/sys/cpu/*/speed-min
208/proc/sys/cpu/*/speed-max
209
210These are files for deprecated interfaces to cpufreq, which offer far
211less functionality. Because of this, these interfaces aren't described
212here.
213
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt
index 68ef48839c04..9f8740ca3f3b 100644
--- a/Documentation/filesystems/sysfs-pci.txt
+++ b/Documentation/filesystems/sysfs-pci.txt
@@ -9,6 +9,7 @@ that support it. For example, a given bus might look like this:
9 | |-- class 9 | |-- class
10 | |-- config 10 | |-- config
11 | |-- device 11 | |-- device
12 | |-- enable
12 | |-- irq 13 | |-- irq
13 | |-- local_cpus 14 | |-- local_cpus
14 | |-- resource 15 | |-- resource
@@ -32,6 +33,7 @@ files, each with their own function.
32 class PCI class (ascii, ro) 33 class PCI class (ascii, ro)
33 config PCI config space (binary, rw) 34 config PCI config space (binary, rw)
34 device PCI device (ascii, ro) 35 device PCI device (ascii, ro)
36 enable Whether the device is enabled (ascii, rw)
35 irq IRQ number (ascii, ro) 37 irq IRQ number (ascii, ro)
36 local_cpus nearby CPU mask (cpumask, ro) 38 local_cpus nearby CPU mask (cpumask, ro)
37 resource PCI resource host addresses (ascii, ro) 39 resource PCI resource host addresses (ascii, ro)
@@ -57,10 +59,19 @@ used to do actual device programming from userspace. Note that some platforms
57don't support mmapping of certain resources, so be sure to check the return 59don't support mmapping of certain resources, so be sure to check the return
58value from any attempted mmap. 60value from any attempted mmap.
59 61
62The 'enable' file provides a counter that indicates how many times the device
63has been enabled. If the 'enable' file currently returns '4', and a '1' is
64echoed into it, it will then return '5'. Echoing a '0' into it will decrease
65the count. Even when it returns to 0, though, some of the initialisation
66may not be reversed.
67
60The 'rom' file is special in that it provides read-only access to the device's 68The 'rom' file is special in that it provides read-only access to the device's
61ROM file, if available. It's disabled by default, however, so applications 69ROM file, if available. It's disabled by default, however, so applications
62should write the string "1" to the file to enable it before attempting a read 70should write the string "1" to the file to enable it before attempting a read
63call, and disable it following the access by writing "0" to the file. 71call, and disable it following the access by writing "0" to the file. Note
72that the device must be enabled for a rom read to return data succesfully.
73In the event a driver is not bound to the device, it can be enabled using the
74'enable' file, documented above.
64 75
65Accessing legacy resources through sysfs 76Accessing legacy resources through sysfs
66---------------------------------------- 77----------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 5bff376d297c..0ea3a6d98714 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1021,6 +1021,14 @@ M: mb@bu3sch.de
1021W: http://bu3sch.de/btgpio.php 1021W: http://bu3sch.de/btgpio.php
1022S: Maintained 1022S: Maintained
1023 1023
1024BTRFS FILE SYSTEM
1025P: Chris Mason
1026M: chris.mason@oracle.com
1027L: linux-btrfs@vger.kernel.org
1028W: http://btrfs.wiki.kernel.org/
1029T: git kernel.org:/pub/scm/linux/kernel/git/mason/btrfs-unstable.git
1030S: Maintained
1031
1024BTTV VIDEO4LINUX DRIVER 1032BTTV VIDEO4LINUX DRIVER
1025P: Mauro Carvalho Chehab 1033P: Mauro Carvalho Chehab
1026M: mchehab@infradead.org 1034M: mchehab@infradead.org
@@ -2212,7 +2220,7 @@ P: Sean Hefty
2212M: sean.hefty@intel.com 2220M: sean.hefty@intel.com
2213P: Hal Rosenstock 2221P: Hal Rosenstock
2214M: hal.rosenstock@gmail.com 2222M: hal.rosenstock@gmail.com
2215L: general@lists.openfabrics.org 2223L: general@lists.openfabrics.org (moderated for non-subscribers)
2216W: http://www.openib.org/ 2224W: http://www.openib.org/
2217T: git kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git 2225T: git kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
2218S: Supported 2226S: Supported
@@ -4841,6 +4849,7 @@ P: Ingo Molnar
4841M: mingo@redhat.com 4849M: mingo@redhat.com
4842P: H. Peter Anvin 4850P: H. Peter Anvin
4843M: hpa@zytor.com 4851M: hpa@zytor.com
4852M: x86@kernel.org
4844L: linux-kernel@vger.kernel.org 4853L: linux-kernel@vger.kernel.org
4845T: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git 4854T: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
4846S: Maintained 4855S: Maintained
diff --git a/Makefile b/Makefile
index 7715b2c14fb4..681c1d23b4d4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 29 3SUBLEVEL = 29
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc4
5NAME = Erotic Pickled Herring 5NAME = Erotic Pickled Herring
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/bug.h b/arch/alpha/include/asm/bug.h
index 7b85b7c93709..1720c8ad86fe 100644
--- a/arch/alpha/include/asm/bug.h
+++ b/arch/alpha/include/asm/bug.h
@@ -8,12 +8,12 @@
8 8
9/* ??? Would be nice to use .gprel32 here, but we can't be sure that the 9/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
10 function loaded the GP, so this could fail in modules. */ 10 function loaded the GP, so this could fail in modules. */
11#define BUG() { \ 11#define BUG() do { \
12 __asm__ __volatile__( \ 12 __asm__ __volatile__( \
13 "call_pal %0 # bugchk\n\t" \ 13 "call_pal %0 # bugchk\n\t" \
14 ".long %1\n\t.8byte %2" \ 14 ".long %1\n\t.8byte %2" \
15 : : "i"(PAL_bugchk), "i"(__LINE__), "i"(__FILE__)); \ 15 : : "i"(PAL_bugchk), "i"(__LINE__), "i"(__FILE__)); \
16 for ( ; ; ); } 16 for ( ; ; ); } while (0)
17 17
18#define HAVE_ARCH_BUG 18#define HAVE_ARCH_BUG
19#endif 19#endif
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
index dc6522c464d4..44840e73e907 100644
--- a/arch/frv/mm/dma-alloc.c
+++ b/arch/frv/mm/dma-alloc.c
@@ -36,10 +36,10 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/hardirq.h>
39 40
40#include <asm/pgalloc.h> 41#include <asm/pgalloc.h>
41#include <asm/io.h> 42#include <asm/io.h>
42#include <asm/hardirq.h>
43#include <asm/mmu_context.h> 43#include <asm/mmu_context.h>
44#include <asm/pgtable.h> 44#include <asm/pgtable.h>
45#include <asm/mmu.h> 45#include <asm/mmu.h>
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index c5a214026a77..d0223abbbbd4 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -443,7 +443,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev)
443 size = pci_resource_len(dev, PCI_ROM_RESOURCE); 443 size = pci_resource_len(dev, PCI_ROM_RESOURCE);
444 addr = ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE], 444 addr = ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
445 size); 445 size);
446 image_size = pci_get_rom_size(addr, size); 446 image_size = pci_get_rom_size(dev, addr, size);
447 dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr; 447 dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
448 dev->resource[PCI_ROM_RESOURCE].end = 448 dev->resource[PCI_ROM_RESOURCE].end =
449 (unsigned long) addr + image_size - 1; 449 (unsigned long) addr + image_size - 1;
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 4e1801bad83a..e2eb2da60f96 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -269,7 +269,7 @@ sn_io_slot_fixup(struct pci_dev *dev)
269 269
270 rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE), 270 rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE),
271 size + 1); 271 size + 1);
272 image_size = pci_get_rom_size(rom, size + 1); 272 image_size = pci_get_rom_size(dev, rom, size + 1);
273 dev->resource[PCI_ROM_RESOURCE].end = 273 dev->resource[PCI_ROM_RESOURCE].end =
274 dev->resource[PCI_ROM_RESOURCE].start + 274 dev->resource[PCI_ROM_RESOURCE].start +
275 image_size - 1; 275 image_size - 1;
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 1a1f320c30d8..0884947ebe27 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -51,6 +51,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
51 51
52 return (((counters >> 14) - counters) & 0x1fff) > 1; 52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53} 53}
54#define __raw_spin_is_contended __raw_spin_is_contended
54 55
55static inline void __raw_spin_lock(raw_spinlock_t *lock) 56static inline void __raw_spin_lock(raw_spinlock_t *lock)
56{ 57{
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index caf4c33f4e84..7c35787d29b4 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -22,6 +22,7 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
24#include <linux/spi/spi_gpio.h> 24#include <linux/spi/spi_gpio.h>
25#include <media/ov772x.h>
25#include <media/soc_camera_platform.h> 26#include <media/soc_camera_platform.h>
26#include <media/sh_mobile_ceu.h> 27#include <media/sh_mobile_ceu.h>
27#include <video/sh_mobile_lcdc.h> 28#include <video/sh_mobile_lcdc.h>
@@ -216,7 +217,14 @@ static struct platform_device lcdc_device = {
216 }, 217 },
217}; 218};
218 219
220static void camera_power(int val)
221{
222 gpio_set_value(GPIO_PTZ5, val); /* RST_CAM/RSTB */
223 mdelay(10);
224}
225
219#ifdef CONFIG_I2C 226#ifdef CONFIG_I2C
227/* support for the old ncm03j camera */
220static unsigned char camera_ncm03j_magic[] = 228static unsigned char camera_ncm03j_magic[] =
221{ 229{
222 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8, 230 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8,
@@ -237,6 +245,23 @@ static unsigned char camera_ncm03j_magic[] =
237 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F, 245 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F,
238}; 246};
239 247
248static int camera_probe(void)
249{
250 struct i2c_adapter *a = i2c_get_adapter(0);
251 struct i2c_msg msg;
252 int ret;
253
254 camera_power(1);
255 msg.addr = 0x6e;
256 msg.buf = camera_ncm03j_magic;
257 msg.len = 2;
258 msg.flags = 0;
259 ret = i2c_transfer(a, &msg, 1);
260 camera_power(0);
261
262 return ret;
263}
264
240static int camera_set_capture(struct soc_camera_platform_info *info, 265static int camera_set_capture(struct soc_camera_platform_info *info,
241 int enable) 266 int enable)
242{ 267{
@@ -245,9 +270,11 @@ static int camera_set_capture(struct soc_camera_platform_info *info,
245 int ret = 0; 270 int ret = 0;
246 int i; 271 int i;
247 272
273 camera_power(0);
248 if (!enable) 274 if (!enable)
249 return 0; /* no disable for now */ 275 return 0; /* no disable for now */
250 276
277 camera_power(1);
251 for (i = 0; i < ARRAY_SIZE(camera_ncm03j_magic); i += 2) { 278 for (i = 0; i < ARRAY_SIZE(camera_ncm03j_magic); i += 2) {
252 u_int8_t buf[8]; 279 u_int8_t buf[8];
253 280
@@ -286,8 +313,35 @@ static struct platform_device camera_device = {
286 .platform_data = &camera_info, 313 .platform_data = &camera_info,
287 }, 314 },
288}; 315};
316
317static int __init camera_setup(void)
318{
319 if (camera_probe() > 0)
320 platform_device_register(&camera_device);
321
322 return 0;
323}
324late_initcall(camera_setup);
325
289#endif /* CONFIG_I2C */ 326#endif /* CONFIG_I2C */
290 327
328static int ov7725_power(struct device *dev, int mode)
329{
330 camera_power(0);
331 if (mode)
332 camera_power(1);
333
334 return 0;
335}
336
337static struct ov772x_camera_info ov7725_info = {
338 .buswidth = SOCAM_DATAWIDTH_8,
339 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
340 .link = {
341 .power = ov7725_power,
342 },
343};
344
291static struct sh_mobile_ceu_info sh_mobile_ceu_info = { 345static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
292 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | 346 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
293 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, 347 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8,
@@ -338,9 +392,6 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
338 &ap325rxa_nor_flash_device, 392 &ap325rxa_nor_flash_device,
339 &lcdc_device, 393 &lcdc_device,
340 &ceu_device, 394 &ceu_device,
341#ifdef CONFIG_I2C
342 &camera_device,
343#endif
344 &nand_flash_device, 395 &nand_flash_device,
345 &sdcard_cn3_device, 396 &sdcard_cn3_device,
346}; 397};
@@ -349,6 +400,10 @@ static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
349 { 400 {
350 I2C_BOARD_INFO("pcf8563", 0x51), 401 I2C_BOARD_INFO("pcf8563", 0x51),
351 }, 402 },
403 {
404 I2C_BOARD_INFO("ov772x", 0x21),
405 .platform_data = &ov7725_info,
406 },
352}; 407};
353 408
354static struct spi_board_info ap325rxa_spi_devices[] = { 409static struct spi_board_info ap325rxa_spi_devices[] = {
@@ -426,7 +481,7 @@ static int __init ap325rxa_devices_setup(void)
426 gpio_request(GPIO_PTZ6, NULL); 481 gpio_request(GPIO_PTZ6, NULL);
427 gpio_direction_output(GPIO_PTZ6, 0); /* STBY_CAM */ 482 gpio_direction_output(GPIO_PTZ6, 0); /* STBY_CAM */
428 gpio_request(GPIO_PTZ5, NULL); 483 gpio_request(GPIO_PTZ5, NULL);
429 gpio_direction_output(GPIO_PTZ5, 1); /* RST_CAM */ 484 gpio_direction_output(GPIO_PTZ5, 0); /* RST_CAM */
430 gpio_request(GPIO_PTZ4, NULL); 485 gpio_request(GPIO_PTZ4, NULL);
431 gpio_direction_output(GPIO_PTZ4, 0); /* SADDR */ 486 gpio_direction_output(GPIO_PTZ4, 0); /* SADDR */
432 487
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 5c423fa8e6b8..352f87d50fdc 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28 3# Linux kernel version: 2.6.29-rc2
4# Fri Jan 9 16:54:19 2009 4# Tue Jan 27 11:45:08 2009
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_SUPERH32=y 7CONFIG_SUPERH32=y
@@ -45,12 +45,12 @@ CONFIG_BSD_PROCESS_ACCT=y
45# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46# CONFIG_IKCONFIG is not set 46# CONFIG_IKCONFIG is not set
47CONFIG_LOG_BUF_SHIFT=14 47CONFIG_LOG_BUF_SHIFT=14
48# CONFIG_CGROUPS is not set
49CONFIG_GROUP_SCHED=y 48CONFIG_GROUP_SCHED=y
50CONFIG_FAIR_GROUP_SCHED=y 49CONFIG_FAIR_GROUP_SCHED=y
51# CONFIG_RT_GROUP_SCHED is not set 50# CONFIG_RT_GROUP_SCHED is not set
52CONFIG_USER_SCHED=y 51CONFIG_USER_SCHED=y
53# CONFIG_CGROUP_SCHED is not set 52# CONFIG_CGROUP_SCHED is not set
53# CONFIG_CGROUPS is not set
54CONFIG_SYSFS_DEPRECATED=y 54CONFIG_SYSFS_DEPRECATED=y
55CONFIG_SYSFS_DEPRECATED_V2=y 55CONFIG_SYSFS_DEPRECATED_V2=y
56# CONFIG_RELAY is not set 56# CONFIG_RELAY is not set
@@ -378,6 +378,7 @@ CONFIG_WIRELESS=y
378# CONFIG_WIRELESS_EXT is not set 378# CONFIG_WIRELESS_EXT is not set
379# CONFIG_LIB80211 is not set 379# CONFIG_LIB80211 is not set
380# CONFIG_MAC80211 is not set 380# CONFIG_MAC80211 is not set
381# CONFIG_WIMAX is not set
381# CONFIG_RFKILL is not set 382# CONFIG_RFKILL is not set
382# CONFIG_NET_9P is not set 383# CONFIG_NET_9P is not set
383 384
@@ -400,6 +401,7 @@ CONFIG_MTD=y
400# CONFIG_MTD_DEBUG is not set 401# CONFIG_MTD_DEBUG is not set
401CONFIG_MTD_CONCAT=y 402CONFIG_MTD_CONCAT=y
402CONFIG_MTD_PARTITIONS=y 403CONFIG_MTD_PARTITIONS=y
404# CONFIG_MTD_TESTS is not set
403# CONFIG_MTD_REDBOOT_PARTS is not set 405# CONFIG_MTD_REDBOOT_PARTS is not set
404CONFIG_MTD_CMDLINE_PARTS=y 406CONFIG_MTD_CMDLINE_PARTS=y
405# CONFIG_MTD_AR7_PARTS is not set 407# CONFIG_MTD_AR7_PARTS is not set
@@ -447,9 +449,7 @@ CONFIG_MTD_CFI_UTIL=y
447# 449#
448# CONFIG_MTD_COMPLEX_MAPPINGS is not set 450# CONFIG_MTD_COMPLEX_MAPPINGS is not set
449CONFIG_MTD_PHYSMAP=y 451CONFIG_MTD_PHYSMAP=y
450CONFIG_MTD_PHYSMAP_START=0xffffffff 452# CONFIG_MTD_PHYSMAP_COMPAT is not set
451CONFIG_MTD_PHYSMAP_LEN=0
452CONFIG_MTD_PHYSMAP_BANKWIDTH=0
453# CONFIG_MTD_PLATRAM is not set 453# CONFIG_MTD_PLATRAM is not set
454 454
455# 455#
@@ -480,6 +480,12 @@ CONFIG_MTD_NAND_SH_FLCTL=y
480# CONFIG_MTD_ONENAND is not set 480# CONFIG_MTD_ONENAND is not set
481 481
482# 482#
483# LPDDR flash memory drivers
484#
485# CONFIG_MTD_LPDDR is not set
486# CONFIG_MTD_QINFO_PROBE is not set
487
488#
483# UBI - Unsorted block images 489# UBI - Unsorted block images
484# 490#
485CONFIG_MTD_UBI=y 491CONFIG_MTD_UBI=y
@@ -607,6 +613,10 @@ CONFIG_SMSC911X=y
607# CONFIG_WLAN_PRE80211 is not set 613# CONFIG_WLAN_PRE80211 is not set
608# CONFIG_WLAN_80211 is not set 614# CONFIG_WLAN_80211 is not set
609# CONFIG_IWLWIFI_LEDS is not set 615# CONFIG_IWLWIFI_LEDS is not set
616
617#
618# Enable WiMAX (Networking options) to see the WiMAX drivers
619#
610# CONFIG_WAN is not set 620# CONFIG_WAN is not set
611# CONFIG_PPP is not set 621# CONFIG_PPP is not set
612# CONFIG_SLIP is not set 622# CONFIG_SLIP is not set
@@ -790,6 +800,7 @@ CONFIG_SSB_POSSIBLE=y
790# CONFIG_PMIC_DA903X is not set 800# CONFIG_PMIC_DA903X is not set
791# CONFIG_MFD_WM8400 is not set 801# CONFIG_MFD_WM8400 is not set
792# CONFIG_MFD_WM8350_I2C is not set 802# CONFIG_MFD_WM8350_I2C is not set
803# CONFIG_MFD_PCF50633 is not set
793# CONFIG_REGULATOR is not set 804# CONFIG_REGULATOR is not set
794 805
795# 806#
@@ -837,7 +848,7 @@ CONFIG_SOC_CAMERA=y
837# CONFIG_SOC_CAMERA_MT9V022 is not set 848# CONFIG_SOC_CAMERA_MT9V022 is not set
838# CONFIG_SOC_CAMERA_TW9910 is not set 849# CONFIG_SOC_CAMERA_TW9910 is not set
839CONFIG_SOC_CAMERA_PLATFORM=y 850CONFIG_SOC_CAMERA_PLATFORM=y
840# CONFIG_SOC_CAMERA_OV772X is not set 851CONFIG_SOC_CAMERA_OV772X=y
841CONFIG_VIDEO_SH_MOBILE_CEU=y 852CONFIG_VIDEO_SH_MOBILE_CEU=y
842# CONFIG_RADIO_ADAPTERS is not set 853# CONFIG_RADIO_ADAPTERS is not set
843# CONFIG_DAB is not set 854# CONFIG_DAB is not set
@@ -1012,6 +1023,7 @@ CONFIG_FS_POSIX_ACL=y
1012CONFIG_FILE_LOCKING=y 1023CONFIG_FILE_LOCKING=y
1013# CONFIG_XFS_FS is not set 1024# CONFIG_XFS_FS is not set
1014# CONFIG_OCFS2_FS is not set 1025# CONFIG_OCFS2_FS is not set
1026# CONFIG_BTRFS_FS is not set
1015CONFIG_DNOTIFY=y 1027CONFIG_DNOTIFY=y
1016CONFIG_INOTIFY=y 1028CONFIG_INOTIFY=y
1017CONFIG_INOTIFY_USER=y 1029CONFIG_INOTIFY_USER=y
@@ -1060,6 +1072,7 @@ CONFIG_MISC_FILESYSTEMS=y
1060# CONFIG_JFFS2_FS is not set 1072# CONFIG_JFFS2_FS is not set
1061# CONFIG_UBIFS_FS is not set 1073# CONFIG_UBIFS_FS is not set
1062# CONFIG_CRAMFS is not set 1074# CONFIG_CRAMFS is not set
1075# CONFIG_SQUASHFS is not set
1063# CONFIG_VXFS_FS is not set 1076# CONFIG_VXFS_FS is not set
1064# CONFIG_MINIX_FS is not set 1077# CONFIG_MINIX_FS is not set
1065# CONFIG_OMFS_FS is not set 1078# CONFIG_OMFS_FS is not set
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index 7758263514bc..678576796bdf 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28 3# Linux kernel version: 2.6.29-rc1
4# Fri Jan 9 17:09:35 2009 4# Thu Jan 22 09:16:16 2009
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_SUPERH32=y 7CONFIG_SUPERH32=y
@@ -45,8 +45,12 @@ CONFIG_SYSVIPC_SYSCTL=y
45CONFIG_IKCONFIG=y 45CONFIG_IKCONFIG=y
46CONFIG_IKCONFIG_PROC=y 46CONFIG_IKCONFIG_PROC=y
47CONFIG_LOG_BUF_SHIFT=14 47CONFIG_LOG_BUF_SHIFT=14
48# CONFIG_CGROUPS is not set
49# CONFIG_GROUP_SCHED is not set 48# CONFIG_GROUP_SCHED is not set
49
50#
51# Control Group support
52#
53# CONFIG_CGROUPS is not set
50CONFIG_SYSFS_DEPRECATED=y 54CONFIG_SYSFS_DEPRECATED=y
51CONFIG_SYSFS_DEPRECATED_V2=y 55CONFIG_SYSFS_DEPRECATED_V2=y
52# CONFIG_RELAY is not set 56# CONFIG_RELAY is not set
@@ -389,6 +393,7 @@ CONFIG_WIRELESS_EXT=y
389CONFIG_WIRELESS_EXT_SYSFS=y 393CONFIG_WIRELESS_EXT_SYSFS=y
390# CONFIG_LIB80211 is not set 394# CONFIG_LIB80211 is not set
391# CONFIG_MAC80211 is not set 395# CONFIG_MAC80211 is not set
396# CONFIG_WIMAX is not set
392# CONFIG_RFKILL is not set 397# CONFIG_RFKILL is not set
393# CONFIG_NET_9P is not set 398# CONFIG_NET_9P is not set
394 399
@@ -411,6 +416,7 @@ CONFIG_MTD=y
411# CONFIG_MTD_DEBUG is not set 416# CONFIG_MTD_DEBUG is not set
412CONFIG_MTD_CONCAT=y 417CONFIG_MTD_CONCAT=y
413CONFIG_MTD_PARTITIONS=y 418CONFIG_MTD_PARTITIONS=y
419# CONFIG_MTD_TESTS is not set
414# CONFIG_MTD_REDBOOT_PARTS is not set 420# CONFIG_MTD_REDBOOT_PARTS is not set
415CONFIG_MTD_CMDLINE_PARTS=y 421CONFIG_MTD_CMDLINE_PARTS=y
416# CONFIG_MTD_AR7_PARTS is not set 422# CONFIG_MTD_AR7_PARTS is not set
@@ -458,9 +464,7 @@ CONFIG_MTD_CFI_UTIL=y
458# 464#
459# CONFIG_MTD_COMPLEX_MAPPINGS is not set 465# CONFIG_MTD_COMPLEX_MAPPINGS is not set
460CONFIG_MTD_PHYSMAP=y 466CONFIG_MTD_PHYSMAP=y
461CONFIG_MTD_PHYSMAP_START=0xffffffff 467# CONFIG_MTD_PHYSMAP_COMPAT is not set
462CONFIG_MTD_PHYSMAP_LEN=0
463CONFIG_MTD_PHYSMAP_BANKWIDTH=0
464# CONFIG_MTD_PLATRAM is not set 468# CONFIG_MTD_PLATRAM is not set
465 469
466# 470#
@@ -488,6 +492,12 @@ CONFIG_MTD_NAND_PLATFORM=y
488# CONFIG_MTD_ONENAND is not set 492# CONFIG_MTD_ONENAND is not set
489 493
490# 494#
495# LPDDR flash memory drivers
496#
497# CONFIG_MTD_LPDDR is not set
498# CONFIG_MTD_QINFO_PROBE is not set
499
500#
491# UBI - Unsorted block images 501# UBI - Unsorted block images
492# 502#
493# CONFIG_MTD_UBI is not set 503# CONFIG_MTD_UBI is not set
@@ -587,6 +597,10 @@ CONFIG_SMC91X=y
587# CONFIG_WLAN_PRE80211 is not set 597# CONFIG_WLAN_PRE80211 is not set
588# CONFIG_WLAN_80211 is not set 598# CONFIG_WLAN_80211 is not set
589# CONFIG_IWLWIFI_LEDS is not set 599# CONFIG_IWLWIFI_LEDS is not set
600
601#
602# Enable WiMAX (Networking options) to see the WiMAX drivers
603#
590# CONFIG_WAN is not set 604# CONFIG_WAN is not set
591# CONFIG_PPP is not set 605# CONFIG_PPP is not set
592# CONFIG_SLIP is not set 606# CONFIG_SLIP is not set
@@ -761,6 +775,7 @@ CONFIG_SSB_POSSIBLE=y
761# CONFIG_PMIC_DA903X is not set 775# CONFIG_PMIC_DA903X is not set
762# CONFIG_MFD_WM8400 is not set 776# CONFIG_MFD_WM8400 is not set
763# CONFIG_MFD_WM8350_I2C is not set 777# CONFIG_MFD_WM8350_I2C is not set
778# CONFIG_MFD_PCF50633 is not set
764# CONFIG_REGULATOR is not set 779# CONFIG_REGULATOR is not set
765 780
766# 781#
@@ -806,9 +821,9 @@ CONFIG_SOC_CAMERA=y
806# CONFIG_SOC_CAMERA_MT9M111 is not set 821# CONFIG_SOC_CAMERA_MT9M111 is not set
807# CONFIG_SOC_CAMERA_MT9T031 is not set 822# CONFIG_SOC_CAMERA_MT9T031 is not set
808# CONFIG_SOC_CAMERA_MT9V022 is not set 823# CONFIG_SOC_CAMERA_MT9V022 is not set
809# CONFIG_SOC_CAMERA_TW9910 is not set 824CONFIG_SOC_CAMERA_TW9910=y
810CONFIG_SOC_CAMERA_PLATFORM=y 825# CONFIG_SOC_CAMERA_PLATFORM is not set
811# CONFIG_SOC_CAMERA_OV772X is not set 826CONFIG_SOC_CAMERA_OV772X=y
812CONFIG_VIDEO_SH_MOBILE_CEU=y 827CONFIG_VIDEO_SH_MOBILE_CEU=y
813# CONFIG_RADIO_ADAPTERS is not set 828# CONFIG_RADIO_ADAPTERS is not set
814# CONFIG_DAB is not set 829# CONFIG_DAB is not set
@@ -866,11 +881,13 @@ CONFIG_USB_GADGET_SELECTED=y
866# CONFIG_USB_GADGET_PXA25X is not set 881# CONFIG_USB_GADGET_PXA25X is not set
867# CONFIG_USB_GADGET_PXA27X is not set 882# CONFIG_USB_GADGET_PXA27X is not set
868# CONFIG_USB_GADGET_S3C2410 is not set 883# CONFIG_USB_GADGET_S3C2410 is not set
884# CONFIG_USB_GADGET_IMX is not set
869CONFIG_USB_GADGET_M66592=y 885CONFIG_USB_GADGET_M66592=y
870CONFIG_USB_M66592=y 886CONFIG_USB_M66592=y
871CONFIG_SUPERH_BUILT_IN_M66592=y 887CONFIG_SUPERH_BUILT_IN_M66592=y
872# CONFIG_USB_GADGET_AMD5536UDC is not set 888# CONFIG_USB_GADGET_AMD5536UDC is not set
873# CONFIG_USB_GADGET_FSL_QE is not set 889# CONFIG_USB_GADGET_FSL_QE is not set
890# CONFIG_USB_GADGET_CI13XXX is not set
874# CONFIG_USB_GADGET_NET2280 is not set 891# CONFIG_USB_GADGET_NET2280 is not set
875# CONFIG_USB_GADGET_GOKU is not set 892# CONFIG_USB_GADGET_GOKU is not set
876# CONFIG_USB_GADGET_DUMMY_HCD is not set 893# CONFIG_USB_GADGET_DUMMY_HCD is not set
@@ -883,6 +900,11 @@ CONFIG_USB_G_SERIAL=y
883# CONFIG_USB_MIDI_GADGET is not set 900# CONFIG_USB_MIDI_GADGET is not set
884# CONFIG_USB_G_PRINTER is not set 901# CONFIG_USB_G_PRINTER is not set
885# CONFIG_USB_CDC_COMPOSITE is not set 902# CONFIG_USB_CDC_COMPOSITE is not set
903
904#
905# OTG and related infrastructure
906#
907# CONFIG_USB_GPIO_VBUS is not set
886# CONFIG_MMC is not set 908# CONFIG_MMC is not set
887# CONFIG_MEMSTICK is not set 909# CONFIG_MEMSTICK is not set
888# CONFIG_NEW_LEDS is not set 910# CONFIG_NEW_LEDS is not set
@@ -961,6 +983,7 @@ CONFIG_UIO_PDRV_GENIRQ=y
961CONFIG_FILE_LOCKING=y 983CONFIG_FILE_LOCKING=y
962# CONFIG_XFS_FS is not set 984# CONFIG_XFS_FS is not set
963# CONFIG_OCFS2_FS is not set 985# CONFIG_OCFS2_FS is not set
986# CONFIG_BTRFS_FS is not set
964# CONFIG_DNOTIFY is not set 987# CONFIG_DNOTIFY is not set
965# CONFIG_INOTIFY is not set 988# CONFIG_INOTIFY is not set
966# CONFIG_QUOTA is not set 989# CONFIG_QUOTA is not set
@@ -1004,6 +1027,7 @@ CONFIG_MISC_FILESYSTEMS=y
1004# CONFIG_EFS_FS is not set 1027# CONFIG_EFS_FS is not set
1005# CONFIG_JFFS2_FS is not set 1028# CONFIG_JFFS2_FS is not set
1006# CONFIG_CRAMFS is not set 1029# CONFIG_CRAMFS is not set
1030# CONFIG_SQUASHFS is not set
1007# CONFIG_VXFS_FS is not set 1031# CONFIG_VXFS_FS is not set
1008# CONFIG_MINIX_FS is not set 1032# CONFIG_MINIX_FS is not set
1009# CONFIG_OMFS_FS is not set 1033# CONFIG_OMFS_FS is not set
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
index ee839ee58ac8..090358a7e1bb 100644
--- a/arch/sh/include/asm/mutex-llsc.h
+++ b/arch/sh/include/asm/mutex-llsc.h
@@ -21,38 +21,36 @@
21static inline void 21static inline void
22__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) 22__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
23{ 23{
24 int __ex_flag, __res; 24 int __done, __res;
25 25
26 __asm__ __volatile__ ( 26 __asm__ __volatile__ (
27 "movli.l @%2, %0 \n" 27 "movli.l @%2, %0 \n"
28 "add #-1, %0 \n" 28 "add #-1, %0 \n"
29 "movco.l %0, @%2 \n" 29 "movco.l %0, @%2 \n"
30 "movt %1 \n" 30 "movt %1 \n"
31 : "=&z" (__res), "=&r" (__ex_flag) 31 : "=&z" (__res), "=&r" (__done)
32 : "r" (&(count)->counter) 32 : "r" (&(count)->counter)
33 : "t"); 33 : "t");
34 34
35 __res |= !__ex_flag; 35 if (unlikely(!__done || __res != 0))
36 if (unlikely(__res != 0))
37 fail_fn(count); 36 fail_fn(count);
38} 37}
39 38
40static inline int 39static inline int
41__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
42{ 41{
43 int __ex_flag, __res; 42 int __done, __res;
44 43
45 __asm__ __volatile__ ( 44 __asm__ __volatile__ (
46 "movli.l @%2, %0 \n" 45 "movli.l @%2, %0 \n"
47 "add #-1, %0 \n" 46 "add #-1, %0 \n"
48 "movco.l %0, @%2 \n" 47 "movco.l %0, @%2 \n"
49 "movt %1 \n" 48 "movt %1 \n"
50 : "=&z" (__res), "=&r" (__ex_flag) 49 : "=&z" (__res), "=&r" (__done)
51 : "r" (&(count)->counter) 50 : "r" (&(count)->counter)
52 : "t"); 51 : "t");
53 52
54 __res |= !__ex_flag; 53 if (unlikely(!__done || __res != 0))
55 if (unlikely(__res != 0))
56 __res = fail_fn(count); 54 __res = fail_fn(count);
57 55
58 return __res; 56 return __res;
@@ -61,19 +59,18 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
61static inline void 59static inline void
62__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 60__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
63{ 61{
64 int __ex_flag, __res; 62 int __done, __res;
65 63
66 __asm__ __volatile__ ( 64 __asm__ __volatile__ (
67 "movli.l @%2, %0 \n\t" 65 "movli.l @%2, %0 \n\t"
68 "add #1, %0 \n\t" 66 "add #1, %0 \n\t"
69 "movco.l %0, @%2 \n\t" 67 "movco.l %0, @%2 \n\t"
70 "movt %1 \n\t" 68 "movt %1 \n\t"
71 : "=&z" (__res), "=&r" (__ex_flag) 69 : "=&z" (__res), "=&r" (__done)
72 : "r" (&(count)->counter) 70 : "r" (&(count)->counter)
73 : "t"); 71 : "t");
74 72
75 __res |= !__ex_flag; 73 if (unlikely(!__done || __res <= 0))
76 if (unlikely(__res <= 0))
77 fail_fn(count); 74 fail_fn(count);
78} 75}
79 76
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 05a868a71ef5..5bc34681d994 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -21,23 +21,10 @@ static inline void syscall_rollback(struct task_struct *task,
21 */ 21 */
22} 22}
23 23
24static inline bool syscall_has_error(struct pt_regs *regs)
25{
26 return (regs->sr & 0x1) ? true : false;
27}
28static inline void syscall_set_error(struct pt_regs *regs)
29{
30 regs->sr |= 0x1;
31}
32static inline void syscall_clear_error(struct pt_regs *regs)
33{
34 regs->sr &= ~0x1;
35}
36
37static inline long syscall_get_error(struct task_struct *task, 24static inline long syscall_get_error(struct task_struct *task,
38 struct pt_regs *regs) 25 struct pt_regs *regs)
39{ 26{
40 return syscall_has_error(regs) ? regs->regs[0] : 0; 27 return IS_ERR_VALUE(regs->regs[0]) ? regs->regs[0] : 0;
41} 28}
42 29
43static inline long syscall_get_return_value(struct task_struct *task, 30static inline long syscall_get_return_value(struct task_struct *task,
@@ -50,13 +37,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
50 struct pt_regs *regs, 37 struct pt_regs *regs,
51 int error, long val) 38 int error, long val)
52{ 39{
53 if (error) { 40 if (error)
54 syscall_set_error(regs);
55 regs->regs[0] = -error; 41 regs->regs[0] = -error;
56 } else { 42 else
57 syscall_clear_error(regs);
58 regs->regs[0] = val; 43 regs->regs[0] = val;
59 }
60} 44}
61 45
62static inline void syscall_get_arguments(struct task_struct *task, 46static inline void syscall_get_arguments(struct task_struct *task,
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
index e1143b9784d6..c3561ca72bee 100644
--- a/arch/sh/include/asm/syscall_64.h
+++ b/arch/sh/include/asm/syscall_64.h
@@ -21,23 +21,10 @@ static inline void syscall_rollback(struct task_struct *task,
21 */ 21 */
22} 22}
23 23
24static inline bool syscall_has_error(struct pt_regs *regs)
25{
26 return (regs->sr & 0x1) ? true : false;
27}
28static inline void syscall_set_error(struct pt_regs *regs)
29{
30 regs->sr |= 0x1;
31}
32static inline void syscall_clear_error(struct pt_regs *regs)
33{
34 regs->sr &= ~0x1;
35}
36
37static inline long syscall_get_error(struct task_struct *task, 24static inline long syscall_get_error(struct task_struct *task,
38 struct pt_regs *regs) 25 struct pt_regs *regs)
39{ 26{
40 return syscall_has_error(regs) ? regs->regs[9] : 0; 27 return IS_ERR_VALUE(regs->regs[9]) ? regs->regs[9] : 0;
41} 28}
42 29
43static inline long syscall_get_return_value(struct task_struct *task, 30static inline long syscall_get_return_value(struct task_struct *task,
@@ -50,13 +37,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
50 struct pt_regs *regs, 37 struct pt_regs *regs,
51 int error, long val) 38 int error, long val)
52{ 39{
53 if (error) { 40 if (error)
54 syscall_set_error(regs);
55 regs->regs[9] = -error; 41 regs->regs[9] = -error;
56 } else { 42 else
57 syscall_clear_error(regs);
58 regs->regs[9] = val; 43 regs->regs[9] = val;
59 }
60} 44}
61 45
62static inline void syscall_get_arguments(struct task_struct *task, 46static inline void syscall_get_arguments(struct task_struct *task,
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index 2780917c0088..e3ea5411da6d 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -423,7 +423,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
423 int m; 423 int m;
424 unsigned int hx; 424 unsigned int hx;
425 425
426 m = (finsn >> 9) & 0x7; 426 m = (finsn >> 8) & 0x7;
427 hx = tsk->thread.fpu.hard.fp_regs[m]; 427 hx = tsk->thread.fpu.hard.fp_regs[m];
428 428
429 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR) 429 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 534247508572..370d2cfa34eb 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -262,11 +262,11 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
262 BOOTMEM_DEFAULT); 262 BOOTMEM_DEFAULT);
263 263
264 /* 264 /*
265 * reserve physical page 0 - it's a special BIOS page on many boxes, 265 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
266 * enabling clean reboots, SMP operation, laptop functions.
267 */ 266 */
268 reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET, 267 if (CONFIG_ZERO_PAGE_OFFSET != 0)
269 BOOTMEM_DEFAULT); 268 reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
269 BOOTMEM_DEFAULT);
270 270
271 sparse_memory_present_with_active_regions(0); 271 sparse_memory_present_with_active_regions(0);
272 272
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 77c21bde376a..17784e19ae34 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -510,7 +510,6 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
510 case -ERESTARTNOHAND: 510 case -ERESTARTNOHAND:
511 no_system_call_restart: 511 no_system_call_restart:
512 regs->regs[0] = -EINTR; 512 regs->regs[0] = -EINTR;
513 regs->sr |= 1;
514 break; 513 break;
515 514
516 case -ERESTARTSYS: 515 case -ERESTARTSYS:
@@ -589,8 +588,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
589 588
590 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 589 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
591 if (signr > 0) { 590 if (signr > 0) {
592 if (regs->sr & 1) 591 handle_syscall_restart(save_r0, regs, &ka.sa);
593 handle_syscall_restart(save_r0, regs, &ka.sa);
594 592
595 /* Whee! Actually deliver the signal. */ 593 /* Whee! Actually deliver the signal. */
596 if (handle_signal(signr, &ka, &info, oldset, 594 if (handle_signal(signr, &ka, &info, oldset,
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index b22fdfaaa191..0663a0ee6021 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -60,7 +60,6 @@ handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
60 case -ERESTARTNOHAND: 60 case -ERESTARTNOHAND:
61 no_system_call_restart: 61 no_system_call_restart:
62 regs->regs[REG_RET] = -EINTR; 62 regs->regs[REG_RET] = -EINTR;
63 regs->sr |= 1;
64 break; 63 break;
65 64
66 case -ERESTARTSYS: 65 case -ERESTARTSYS:
@@ -109,8 +108,7 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
109 108
110 signr = get_signal_to_deliver(&info, &ka, regs, 0); 109 signr = get_signal_to_deliver(&info, &ka, regs, 0);
111 if (signr > 0) { 110 if (signr > 0) {
112 if (regs->sr & 1) 111 handle_syscall_restart(regs, &ka.sa);
113 handle_syscall_restart(regs, &ka.sa);
114 112
115 /* Whee! Actually deliver the signal. */ 113 /* Whee! Actually deliver the signal. */
116 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 114 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index cbdd0d40e545..356c8ec92893 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -36,8 +36,7 @@
36 */ 36 */
37 37
38/* 38/*
39 * unsigned int csum_partial(const unsigned char *buf, int len, 39 * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
40 * unsigned int sum);
41 */ 40 */
42 41
43.text 42.text
@@ -49,11 +48,31 @@ ENTRY(csum_partial)
49 * Fortunately, it is easy to convert 2-byte alignment to 4-byte 48 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
50 * alignment for the unrolled loop. 49 * alignment for the unrolled loop.
51 */ 50 */
52 mov r5, r1
53 mov r4, r0 51 mov r4, r0
54 tst #2, r0 ! Check alignment. 52 tst #3, r0 ! Check alignment.
55 bt 2f ! Jump if alignment is ok. 53 bt/s 2f ! Jump if alignment is ok.
54 mov r4, r7 ! Keep a copy to check for alignment
56 ! 55 !
56 tst #1, r0 ! Check alignment.
57 bt 21f ! Jump if alignment is boundary of 2bytes.
58
59 ! buf is odd
60 tst r5, r5
61 add #-1, r5
62 bt 9f
63 mov.b @r4+, r0
64 extu.b r0, r0
65 addc r0, r6 ! t=0 from previous tst
66 mov r6, r0
67 shll8 r6
68 shlr16 r0
69 shlr8 r0
70 or r0, r6
71 mov r4, r0
72 tst #2, r0
73 bt 2f
7421:
75 ! buf is 2 byte aligned (len could be 0)
57 add #-2, r5 ! Alignment uses up two bytes. 76 add #-2, r5 ! Alignment uses up two bytes.
58 cmp/pz r5 ! 77 cmp/pz r5 !
59 bt/s 1f ! Jump if we had at least two bytes. 78 bt/s 1f ! Jump if we had at least two bytes.
@@ -61,16 +80,17 @@ ENTRY(csum_partial)
61 bra 6f 80 bra 6f
62 add #2, r5 ! r5 was < 2. Deal with it. 81 add #2, r5 ! r5 was < 2. Deal with it.
631: 821:
64 mov r5, r1 ! Save new len for later use.
65 mov.w @r4+, r0 83 mov.w @r4+, r0
66 extu.w r0, r0 84 extu.w r0, r0
67 addc r0, r6 85 addc r0, r6
68 bf 2f 86 bf 2f
69 add #1, r6 87 add #1, r6
702: 882:
89 ! buf is 4 byte aligned (len could be 0)
90 mov r5, r1
71 mov #-5, r0 91 mov #-5, r0
72 shld r0, r5 92 shld r0, r1
73 tst r5, r5 93 tst r1, r1
74 bt/s 4f ! if it's =0, go to 4f 94 bt/s 4f ! if it's =0, go to 4f
75 clrt 95 clrt
76 .align 2 96 .align 2
@@ -92,30 +112,31 @@ ENTRY(csum_partial)
92 addc r0, r6 112 addc r0, r6
93 addc r2, r6 113 addc r2, r6
94 movt r0 114 movt r0
95 dt r5 115 dt r1
96 bf/s 3b 116 bf/s 3b
97 cmp/eq #1, r0 117 cmp/eq #1, r0
98 ! here, we know r5==0 118 ! here, we know r1==0
99 addc r5, r6 ! add carry to r6 119 addc r1, r6 ! add carry to r6
1004: 1204:
101 mov r1, r0 121 mov r5, r0
102 and #0x1c, r0 122 and #0x1c, r0
103 tst r0, r0 123 tst r0, r0
104 bt/s 6f 124 bt 6f
105 mov r0, r5 125 ! 4 bytes or more remaining
106 shlr2 r5 126 mov r0, r1
127 shlr2 r1
107 mov #0, r2 128 mov #0, r2
1085: 1295:
109 addc r2, r6 130 addc r2, r6
110 mov.l @r4+, r2 131 mov.l @r4+, r2
111 movt r0 132 movt r0
112 dt r5 133 dt r1
113 bf/s 5b 134 bf/s 5b
114 cmp/eq #1, r0 135 cmp/eq #1, r0
115 addc r2, r6 136 addc r2, r6
116 addc r5, r6 ! r5==0 here, so it means add carry-bit 137 addc r1, r6 ! r1==0 here, so it means add carry-bit
1176: 1386:
118 mov r1, r5 139 ! 3 bytes or less remaining
119 mov #3, r0 140 mov #3, r0
120 and r0, r5 141 and r0, r5
121 tst r5, r5 142 tst r5, r5
@@ -139,8 +160,18 @@ ENTRY(csum_partial)
1398: 1608:
140 addc r0, r6 161 addc r0, r6
141 mov #0, r0 162 mov #0, r0
142 addc r0, r6 163 addc r0, r6
1439: 1649:
165 ! Check if the buffer was misaligned, if so realign sum
166 mov r7, r0
167 tst #1, r0
168 bt 10f
169 mov r6, r0
170 shll8 r6
171 shlr16 r0
172 shlr8 r0
173 or r0, r6
17410:
144 rts 175 rts
145 mov r6, r0 176 mov r6, r0
146 177
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 8ffee714f932..a46c3a21e26d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -891,10 +891,35 @@ prom_tba: .xword 0
891tlb_type: .word 0 /* Must NOT end up in BSS */ 891tlb_type: .word 0 /* Must NOT end up in BSS */
892 .section ".fixup",#alloc,#execinstr 892 .section ".fixup",#alloc,#execinstr
893 893
894 .globl __ret_efault, __retl_efault 894 .globl __ret_efault, __retl_efault, __ret_one, __retl_one
895__ret_efault: 895ENTRY(__ret_efault)
896 ret 896 ret
897 restore %g0, -EFAULT, %o0 897 restore %g0, -EFAULT, %o0
898__retl_efault: 898ENDPROC(__ret_efault)
899
900ENTRY(__retl_efault)
899 retl 901 retl
900 mov -EFAULT, %o0 902 mov -EFAULT, %o0
903ENDPROC(__retl_efault)
904
905ENTRY(__retl_one)
906 retl
907 mov 1, %o0
908ENDPROC(__retl_one)
909
910ENTRY(__ret_one_asi)
911 wr %g0, ASI_AIUS, %asi
912 ret
913 restore %g0, 1, %o0
914ENDPROC(__ret_one_asi)
915
916ENTRY(__retl_one_asi)
917 wr %g0, ASI_AIUS, %asi
918 retl
919 mov 1, %o0
920ENDPROC(__retl_one_asi)
921
922ENTRY(__retl_o1)
923 retl
924 mov %o1, %o0
925ENDPROC(__retl_o1)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 09f088ed4a64..f3577223c863 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -70,6 +70,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
70 printk(" on CPU%d, ip %08lx, registers:\n", 70 printk(" on CPU%d, ip %08lx, registers:\n",
71 smp_processor_id(), regs->tpc); 71 smp_processor_id(), regs->tpc);
72 show_regs(regs); 72 show_regs(regs);
73 dump_stack();
73 74
74 bust_spinlocks(0); 75 bust_spinlocks(0);
75 76
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 92e0dda141a4..1ae8cdd7e703 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -133,11 +133,16 @@ int __init pcr_arch_init(void)
133 133
134 case cheetah: 134 case cheetah:
135 case cheetah_plus: 135 case cheetah_plus:
136 case spitfire:
137 pcr_ops = &direct_pcr_ops; 136 pcr_ops = &direct_pcr_ops;
138 pcr_enable = PCR_SUN4U_ENABLE; 137 pcr_enable = PCR_SUN4U_ENABLE;
139 break; 138 break;
140 139
140 case spitfire:
141 /* UltraSPARC-I/II and derivatives lack a profile
142 * counter overflow interrupt so we can't make use of
143 * their hardware currently.
144 */
145 /* fallthrough */
141 default: 146 default:
142 err = -ENODEV; 147 err = -ENODEV;
143 goto out_unregister; 148 goto out_unregister;
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
index 6a4f956a2f7a..8e7a843ddd88 100644
--- a/arch/sparc/lib/GENbzero.S
+++ b/arch/sparc/lib/GENbzero.S
@@ -6,13 +6,9 @@
6 6
7#define EX_ST(x,y) \ 7#define EX_ST(x,y) \
898: x,y; \ 898: x,y; \
9 .section .fixup; \
10 .align 4; \
1199: retl; \
12 mov %o1, %o0; \
13 .section __ex_table,"a";\ 9 .section __ex_table,"a";\
14 .align 4; \ 10 .align 4; \
15 .word 98b, 99b; \ 11 .word 98b, __retl_o1; \
16 .text; \ 12 .text; \
17 .align 4; 13 .align 4;
18 14
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index 2b9df99e87f9..b7d0bd6b1406 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -5,13 +5,9 @@
5 5
6#define EX_LD(x) \ 6#define EX_LD(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
13 .align 4; \ 9 .align 4; \
14 .word 98b, 99b; \ 10 .word 98b, __retl_one; \
15 .text; \ 11 .text; \
16 .align 4; 12 .align 4;
17 13
@@ -27,7 +23,7 @@
27#define PREAMBLE \ 23#define PREAMBLE \
28 rd %asi, %g1; \ 24 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \ 25 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \ 26 bne,pn %icc, ___copy_in_user; \
31 nop 27 nop
32#endif 28#endif
33 29
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index bb3f7084daf9..780550e1afc7 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -5,13 +5,9 @@
5 5
6#define EX_ST(x) \ 6#define EX_ST(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
13 .align 4; \ 9 .align 4; \
14 .word 98b, 99b; \ 10 .word 98b, __retl_one; \
15 .text; \ 11 .text; \
16 .align 4; 12 .align 4;
17 13
@@ -31,7 +27,7 @@
31#define PREAMBLE \ 27#define PREAMBLE \
32 rd %asi, %g1; \ 28 rd %asi, %g1; \
33 cmp %g1, ASI_AIUS; \ 29 cmp %g1, ASI_AIUS; \
34 bne,pn %icc, memcpy_user_stub; \ 30 bne,pn %icc, ___copy_in_user; \
35 nop 31 nop
36#endif 32#endif
37 33
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index c77ef5f22102..119ccb9a54f4 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -5,14 +5,9 @@
5 5
6#define EX_LD(x) \ 6#define EX_LD(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\
11 retl; \
12 mov 1, %o0; \
13 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
14 .align 4; \ 9 .align 4; \
15 .word 98b, 99b; \ 10 .word 98b, __retl_one_asi;\
16 .text; \ 11 .text; \
17 .align 4; 12 .align 4;
18 13
@@ -33,7 +28,7 @@
33#define PREAMBLE \ 28#define PREAMBLE \
34 rd %asi, %g1; \ 29 rd %asi, %g1; \
35 cmp %g1, ASI_AIUS; \ 30 cmp %g1, ASI_AIUS; \
36 bne,pn %icc, memcpy_user_stub; \ 31 bne,pn %icc, ___copy_in_user; \
37 nop 32 nop
38#endif 33#endif
39 34
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4bd4093acbbd..7fe1ccefd9d0 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -5,14 +5,9 @@
5 5
6#define EX_ST(x) \ 6#define EX_ST(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\
11 retl; \
12 mov 1, %o0; \
13 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
14 .align 4; \ 9 .align 4; \
15 .word 98b, 99b; \ 10 .word 98b, __retl_one_asi;\
16 .text; \ 11 .text; \
17 .align 4; 12 .align 4;
18 13
@@ -42,7 +37,7 @@
42#define PREAMBLE \ 37#define PREAMBLE \
43 rd %asi, %g1; \ 38 rd %asi, %g1; \
44 cmp %g1, ASI_AIUS; \ 39 cmp %g1, ASI_AIUS; \
45 bne,pn %icc, memcpy_user_stub; \ 40 bne,pn %icc, ___copy_in_user; \
46 nop 41 nop
47#endif 42#endif
48 43
diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
index 814d5f7a45e1..beab29bf419b 100644
--- a/arch/sparc/lib/NGbzero.S
+++ b/arch/sparc/lib/NGbzero.S
@@ -6,13 +6,9 @@
6 6
7#define EX_ST(x,y) \ 7#define EX_ST(x,y) \
898: x,y; \ 898: x,y; \
9 .section .fixup; \
10 .align 4; \
1199: retl; \
12 mov %o1, %o0; \
13 .section __ex_table,"a";\ 9 .section __ex_table,"a";\
14 .align 4; \ 10 .align 4; \
15 .word 98b, 99b; \ 11 .word 98b, __retl_o1; \
16 .text; \ 12 .text; \
17 .align 4; 13 .align 4;
18 14
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index e7f433f71b42..5d1e4d1ac21e 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -5,14 +5,9 @@
5 5
6#define EX_LD(x) \ 6#define EX_LD(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\
11 ret; \
12 restore %g0, 1, %o0; \
13 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
14 .align 4; \ 9 .align 4; \
15 .word 98b, 99b; \ 10 .word 98b, __ret_one_asi;\
16 .text; \ 11 .text; \
17 .align 4; 12 .align 4;
18 13
@@ -30,7 +25,7 @@
30#define PREAMBLE \ 25#define PREAMBLE \
31 rd %asi, %g1; \ 26 rd %asi, %g1; \
32 cmp %g1, ASI_AIUS; \ 27 cmp %g1, ASI_AIUS; \
33 bne,pn %icc, memcpy_user_stub; \ 28 bne,pn %icc, ___copy_in_user; \
34 nop 29 nop
35#endif 30#endif
36 31
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index 6ea01c5532a0..ff630dcb273c 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -5,14 +5,9 @@
5 5
6#define EX_ST(x) \ 6#define EX_ST(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\
11 ret; \
12 restore %g0, 1, %o0; \
13 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
14 .align 4; \ 9 .align 4; \
15 .word 98b, 99b; \ 10 .word 98b, __ret_one_asi;\
16 .text; \ 11 .text; \
17 .align 4; 12 .align 4;
18 13
@@ -33,7 +28,7 @@
33#define PREAMBLE \ 28#define PREAMBLE \
34 rd %asi, %g1; \ 29 rd %asi, %g1; \
35 cmp %g1, ASI_AIUS; \ 30 cmp %g1, ASI_AIUS; \
36 bne,pn %icc, memcpy_user_stub; \ 31 bne,pn %icc, ___copy_in_user; \
37 nop 32 nop
38#endif 33#endif
39 34
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index 3192b0bf4fab..a6ae2ea04bf5 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -5,13 +5,9 @@
5 5
6#define EX_LD(x) \ 6#define EX_LD(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
13 .align 4; \ 9 .align 4; \
14 .word 98b, 99b; \ 10 .word 98b, __retl_one; \
15 .text; \ 11 .text; \
16 .align 4; 12 .align 4;
17 13
@@ -27,7 +23,7 @@
27#define PREAMBLE \ 23#define PREAMBLE \
28 rd %asi, %g1; \ 24 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \ 25 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \ 26 bne,pn %icc, ___copy_in_user; \
31 nop; \ 27 nop; \
32 28
33#include "U1memcpy.S" 29#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index d1210ffb0b82..f4b970eeb485 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -5,13 +5,9 @@
5 5
6#define EX_ST(x) \ 6#define EX_ST(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
13 .align 4; \ 9 .align 4; \
14 .word 98b, 99b; \ 10 .word 98b, __retl_one; \
15 .text; \ 11 .text; \
16 .align 4; 12 .align 4;
17 13
@@ -27,7 +23,7 @@
27#define PREAMBLE \ 23#define PREAMBLE \
28 rd %asi, %g1; \ 24 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \ 25 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \ 26 bne,pn %icc, ___copy_in_user; \
31 nop; \ 27 nop; \
32 28
33#include "U1memcpy.S" 29#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index f5bfc8d9d216..b1acd1331c33 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -5,13 +5,9 @@
5 5
6#define EX_LD(x) \ 6#define EX_LD(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
13 .align 4; \ 9 .align 4; \
14 .word 98b, 99b; \ 10 .word 98b, __retl_one; \
15 .text; \ 11 .text; \
16 .align 4; 12 .align 4;
17 13
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 2334f111bb0c..ef1e493afdfa 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -5,13 +5,9 @@
5 5
6#define EX_ST(x) \ 6#define EX_ST(x) \
798: x; \ 798: x; \
8 .section .fixup; \
9 .align 4; \
1099: retl; \
11 mov 1, %o0; \
12 .section __ex_table,"a";\ 8 .section __ex_table,"a";\
13 .align 4; \ 9 .align 4; \
14 .word 98b, 99b; \ 10 .word 98b, __retl_one; \
15 .text; \ 11 .text; \
16 .align 4; 12 .align 4;
17 13
@@ -27,7 +23,7 @@
27#define PREAMBLE \ 23#define PREAMBLE \
28 rd %asi, %g1; \ 24 rd %asi, %g1; \
29 cmp %g1, ASI_AIUS; \ 25 cmp %g1, ASI_AIUS; \
30 bne,pn %icc, memcpy_user_stub; \ 26 bne,pn %icc, ___copy_in_user; \
31 nop; \ 27 nop; \
32 28
33#include "U3memcpy.S" 29#include "U3memcpy.S"
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index c7bbae8c590f..b6557297440f 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -88,13 +88,9 @@ __bzero_done:
88 88
89#define EX_ST(x,y) \ 89#define EX_ST(x,y) \
9098: x,y; \ 9098: x,y; \
91 .section .fixup; \
92 .align 4; \
9399: retl; \
94 mov %o1, %o0; \
95 .section __ex_table,"a";\ 91 .section __ex_table,"a";\
96 .align 4; \ 92 .align 4; \
97 .word 98b, 99b; \ 93 .word 98b, __retl_o1; \
98 .text; \ 94 .text; \
99 .align 4; 95 .align 4;
100 96
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 650af3f21f78..302c0e60dc2c 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -3,19 +3,16 @@
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) 3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 */ 4 */
5 5
6#include <linux/linkage.h>
6#include <asm/asi.h> 7#include <asm/asi.h>
7 8
8#define XCC xcc 9#define XCC xcc
9 10
10#define EX(x,y) \ 11#define EX(x,y) \
1198: x,y; \ 1298: x,y; \
12 .section .fixup; \
13 .align 4; \
1499: retl; \
15 mov 1, %o0; \
16 .section __ex_table,"a";\ 13 .section __ex_table,"a";\
17 .align 4; \ 14 .align 4; \
18 .word 98b, 99b; \ 15 .word 98b, __retl_one; \
19 .text; \ 16 .text; \
20 .align 4; 17 .align 4;
21 18
@@ -31,18 +28,7 @@
31 * to copy register windows around during thread cloning. 28 * to copy register windows around during thread cloning.
32 */ 29 */
33 30
34 .globl ___copy_in_user 31ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
35 .type ___copy_in_user,#function
36___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
37 /* Writing to %asi is _expensive_ so we hardcode it.
38 * Reading %asi to check for KERNEL_DS is comparatively
39 * cheap.
40 */
41 rd %asi, %g1
42 cmp %g1, ASI_AIUS
43 bne,pn %icc, memcpy_user_stub
44 nop
45
46 cmp %o2, 0 32 cmp %o2, 0
47 be,pn %XCC, 85f 33 be,pn %XCC, 85f
48 or %o0, %o1, %o3 34 or %o0, %o1, %o3
@@ -53,22 +39,24 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
53 /* 16 < len <= 64 */ 39 /* 16 < len <= 64 */
54 andcc %o3, 0x7, %g0 40 andcc %o3, 0x7, %g0
55 bne,pn %XCC, 90f 41 bne,pn %XCC, 90f
56 sub %o0, %o1, %o3 42 nop
57 43
58 andn %o2, 0x7, %o4 44 andn %o2, 0x7, %o4
59 and %o2, 0x7, %o2 45 and %o2, 0x7, %o2
601: subcc %o4, 0x8, %o4 461: subcc %o4, 0x8, %o4
61 EX(ldxa [%o1] %asi, %o5) 47 EX(ldxa [%o1] %asi, %o5)
62 EX(stxa %o5, [%o1 + %o3] ASI_AIUS) 48 EX(stxa %o5, [%o0] %asi)
49 add %o1, 0x8, %o1
63 bgu,pt %XCC, 1b 50 bgu,pt %XCC, 1b
64 add %o1, 0x8, %o1 51 add %o0, 0x8, %o0
65 andcc %o2, 0x4, %g0 52 andcc %o2, 0x4, %g0
66 be,pt %XCC, 1f 53 be,pt %XCC, 1f
67 nop 54 nop
68 sub %o2, 0x4, %o2 55 sub %o2, 0x4, %o2
69 EX(lduwa [%o1] %asi, %o5) 56 EX(lduwa [%o1] %asi, %o5)
70 EX(stwa %o5, [%o1 + %o3] ASI_AIUS) 57 EX(stwa %o5, [%o0] %asi)
71 add %o1, 0x4, %o1 58 add %o1, 0x4, %o1
59 add %o0, 0x4, %o0
721: cmp %o2, 0 601: cmp %o2, 0
73 be,pt %XCC, 85f 61 be,pt %XCC, 85f
74 nop 62 nop
@@ -78,14 +66,15 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
7880: /* 0 < len <= 16 */ 6680: /* 0 < len <= 16 */
79 andcc %o3, 0x3, %g0 67 andcc %o3, 0x3, %g0
80 bne,pn %XCC, 90f 68 bne,pn %XCC, 90f
81 sub %o0, %o1, %o3 69 nop
82 70
8382: 7182:
84 subcc %o2, 4, %o2 72 subcc %o2, 4, %o2
85 EX(lduwa [%o1] %asi, %g1) 73 EX(lduwa [%o1] %asi, %g1)
86 EX(stwa %g1, [%o1 + %o3] ASI_AIUS) 74 EX(stwa %g1, [%o0] %asi)
75 add %o1, 4, %o1
87 bgu,pt %XCC, 82b 76 bgu,pt %XCC, 82b
88 add %o1, 4, %o1 77 add %o0, 4, %o0
89 78
9085: retl 7985: retl
91 clr %o0 80 clr %o0
@@ -94,26 +83,10 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
9490: 8390:
95 subcc %o2, 1, %o2 84 subcc %o2, 1, %o2
96 EX(lduba [%o1] %asi, %g1) 85 EX(lduba [%o1] %asi, %g1)
97 EX(stba %g1, [%o1 + %o3] ASI_AIUS) 86 EX(stba %g1, [%o0] %asi)
87 add %o1, 1, %o1
98 bgu,pt %XCC, 90b 88 bgu,pt %XCC, 90b
99 add %o1, 1, %o1 89 add %o0, 1, %o0
100 retl 90 retl
101 clr %o0 91 clr %o0
102 92ENDPROC(___copy_in_user)
103 .size ___copy_in_user, .-___copy_in_user
104
105 /* Act like copy_{to,in}_user(), ie. return zero instead
106 * of original destination pointer. This is invoked when
107 * copy_{to,in}_user() finds that %asi is kernel space.
108 */
109 .globl memcpy_user_stub
110 .type memcpy_user_stub,#function
111memcpy_user_stub:
112 save %sp, -192, %sp
113 mov %i0, %o0
114 mov %i1, %o1
115 call memcpy
116 mov %i2, %o2
117 ret
118 restore %g0, %g0, %o0
119 .size memcpy_user_stub, .-memcpy_user_stub
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 256b00b61892..5a0d76dc56a4 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -418,9 +418,9 @@ ENTRY(ia32_syscall)
418 orl $TS_COMPAT,TI_status(%r10) 418 orl $TS_COMPAT,TI_status(%r10)
419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
420 jnz ia32_tracesys 420 jnz ia32_tracesys
421ia32_do_syscall:
422 cmpl $(IA32_NR_syscalls-1),%eax 421 cmpl $(IA32_NR_syscalls-1),%eax
423 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ 422 ja ia32_badsys
423ia32_do_call:
424 IA32_ARG_FIXUP 424 IA32_ARG_FIXUP
425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative 425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
426ia32_sysret: 426ia32_sysret:
@@ -435,7 +435,9 @@ ia32_tracesys:
435 call syscall_trace_enter 435 call syscall_trace_enter
436 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 436 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
437 RESTORE_REST 437 RESTORE_REST
438 jmp ia32_do_syscall 438 cmpl $(IA32_NR_syscalls-1),%eax
439 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
440 jmp ia32_do_call
439END(ia32_syscall) 441END(ia32_syscall)
440 442
441ia32_badsys: 443ia32_badsys:
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ba3e2ff6aedc..c09a14127584 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1402,6 +1402,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1402{ 1402{
1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1404} 1404}
1405#define __raw_spin_is_contended __raw_spin_is_contended
1405 1406
1406static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 1407static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1407{ 1408{
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index d17c91981da2..8247e94ac6b1 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -245,6 +245,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
245{ 245{
246 return __ticket_spin_is_contended(lock); 246 return __ticket_spin_is_contended(lock);
247} 247}
248#define __raw_spin_is_contended __raw_spin_is_contended
248 249
249static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 250static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
250{ 251{
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 707c1f6f95fa..a60c1f3bcb87 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -156,11 +156,11 @@ static int __init acpi_sleep_setup(char *str)
156#ifdef CONFIG_HIBERNATION 156#ifdef CONFIG_HIBERNATION
157 if (strncmp(str, "s4_nohwsig", 10) == 0) 157 if (strncmp(str, "s4_nohwsig", 10) == 0)
158 acpi_no_s4_hw_signature(); 158 acpi_no_s4_hw_signature();
159 if (strncmp(str, "s4_nonvs", 8) == 0)
160 acpi_s4_no_nvs();
159#endif 161#endif
160 if (strncmp(str, "old_ordering", 12) == 0) 162 if (strncmp(str, "old_ordering", 12) == 0)
161 acpi_old_suspend_ordering(); 163 acpi_old_suspend_ordering();
162 if (strncmp(str, "s4_nonvs", 8) == 0)
163 acpi_s4_no_nvs();
164 str = strchr(str, ','); 164 str = strchr(str, ',');
165 if (str != NULL) 165 if (str != NULL)
166 str += strspn(str, ", \t"); 166 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 4b6df2469fe3..115449f869ee 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void)
1436 switch (boot_cpu_data.x86_vendor) { 1436 switch (boot_cpu_data.x86_vendor) {
1437 case X86_VENDOR_AMD: 1437 case X86_VENDOR_AMD:
1438 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || 1438 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1439 (boot_cpu_data.x86 == 15)) 1439 (boot_cpu_data.x86 >= 15))
1440 break; 1440 break;
1441 goto no_apic; 1441 goto no_apic;
1442 case X86_VENDOR_INTEL: 1442 case X86_VENDOR_INTEL:
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index efae3b22a0ff..65792c2cc462 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -245,17 +245,6 @@ config X86_E_POWERSAVER
245 245
246comment "shared options" 246comment "shared options"
247 247
248config X86_ACPI_CPUFREQ_PROC_INTF
249 bool "/proc/acpi/processor/../performance interface (deprecated)"
250 depends on PROC_FS
251 depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
252 help
253 This enables the deprecated /proc/acpi/processor/../performance
254 interface. While it is helpful for debugging, the generic,
255 cross-architecture cpufreq interfaces should be used.
256
257 If in doubt, say N.
258
259config X86_SPEEDSTEP_LIB 248config X86_SPEEDSTEP_LIB
260 tristate 249 tristate
261 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) 250 default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 5c28b37dea11..fb039cd345d8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
939 free_cpumask_var(data->acpi_data.shared_cpu_map); 939 free_cpumask_var(data->acpi_data.shared_cpu_map);
940} 940}
941 941
942static int get_transition_latency(struct powernow_k8_data *data)
943{
944 int max_latency = 0;
945 int i;
946 for (i = 0; i < data->acpi_data.state_count; i++) {
947 int cur_latency = data->acpi_data.states[i].transition_latency
948 + data->acpi_data.states[i].bus_master_latency;
949 if (cur_latency > max_latency)
950 max_latency = cur_latency;
951 }
952 /* value in usecs, needs to be in nanoseconds */
953 return 1000 * max_latency;
954}
955
942#else 956#else
943static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 957static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
944static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 958static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
945static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 959static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
960static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
946#endif /* CONFIG_X86_POWERNOW_K8_ACPI */ 961#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
947 962
948/* Take a frequency, and issue the fid/vid transition command */ 963/* Take a frequency, and issue the fid/vid transition command */
@@ -1173,7 +1188,13 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1173 if (rc) { 1188 if (rc) {
1174 goto err_out; 1189 goto err_out;
1175 } 1190 }
1176 } 1191 /* Take a crude guess here.
1192 * That guess was in microseconds, so multiply with 1000 */
1193 pol->cpuinfo.transition_latency = (
1194 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
1195 ((1 << data->irt) * 30)) * 1000;
1196 } else /* ACPI _PSS objects available */
1197 pol->cpuinfo.transition_latency = get_transition_latency(data);
1177 1198
1178 /* only run on specific CPU from here on */ 1199 /* only run on specific CPU from here on */
1179 oldmask = current->cpus_allowed; 1200 oldmask = current->cpus_allowed;
@@ -1204,11 +1225,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1204 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); 1225 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
1205 data->available_cores = pol->cpus; 1226 data->available_cores = pol->cpus;
1206 1227
1207 /* Take a crude guess here.
1208 * That guess was in microseconds, so multiply with 1000 */
1209 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
1210 + (3 * (1 << data->irt) * 10)) * 1000;
1211
1212 if (cpu_family == CPU_HW_PSTATE) 1228 if (cpu_family == CPU_HW_PSTATE)
1213 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); 1229 pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
1214 else 1230 else
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 48533d77be78..da299eb85fc0 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ 88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ 89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ 90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
88 { 0x00, 0, 0} 103 { 0x00, 0, 0}
89}; 104};
90 105
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e28c7a987793..a1346217e43c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -346,6 +346,7 @@ ENTRY(save_args)
346 popq_cfi %rax /* move return address... */ 346 popq_cfi %rax /* move return address... */
347 mov %gs:pda_irqstackptr,%rsp 347 mov %gs:pda_irqstackptr,%rsp
348 EMPTY_FRAME 0 348 EMPTY_FRAME 0
349 pushq_cfi %rbp /* backlink for unwinder */
349 pushq_cfi %rax /* ... to the new stack */ 350 pushq_cfi %rax /* ... to the new stack */
350 /* 351 /*
351 * We entered an interrupt context - irqs are off: 352 * We entered an interrupt context - irqs are off:
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 1c4a1302536c..9b0c480c383b 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp)
2528 2528
2529 vector = ~get_irq_regs()->orig_ax; 2529 vector = ~get_irq_regs()->orig_ax;
2530 me = smp_processor_id(); 2530 me = smp_processor_id();
2531
2532 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
2531#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 2533#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2532 *descp = desc = move_irq_desc(desc, me); 2534 *descp = desc = move_irq_desc(desc, me);
2533 /* get the new one */ 2535 /* get the new one */
2534 cfg = desc->chip_data; 2536 cfg = desc->chip_data;
2535#endif 2537#endif
2536
2537 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2538 send_cleanup_vector(cfg); 2538 send_cleanup_vector(cfg);
2539 }
2539} 2540}
2540#else 2541#else
2541static inline void irq_complete_move(struct irq_desc **descp) {} 2542static inline void irq_complete_move(struct irq_desc **descp) {}
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 1507ad4e674d..10a09c2f1828 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
78 } 78 }
79} 79}
80 80
81/*
82 * IRQ2 is cascade interrupt to second interrupt controller
83 */
84static struct irqaction irq2 = {
85 .handler = no_action,
86 .mask = CPU_MASK_NONE,
87 .name = "cascade",
88};
89
90DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 81DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
91 [0 ... IRQ0_VECTOR - 1] = -1, 82 [0 ... IRQ0_VECTOR - 1] = -1,
92 [IRQ0_VECTOR] = 0, 83 [IRQ0_VECTOR] = 0,
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
178 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 169 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
179#endif 170#endif
180 171
181 if (!acpi_ioapic)
182 setup_irq(2, &irq2);
183
184 /* setup after call gates are initialised (usually add in 172 /* setup after call gates are initialised (usually add in
185 * the architecture specific gates) 173 * the architecture specific gates)
186 */ 174 */
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index df167f265622..a265a7c63190 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
38 init_ISA_irqs(); 38 init_ISA_irqs();
39} 39}
40 40
41/*
42 * IRQ2 is cascade interrupt to second interrupt controller
43 */
44static struct irqaction irq2 = {
45 .handler = no_action,
46 .mask = CPU_MASK_NONE,
47 .name = "cascade",
48};
49
41/** 50/**
42 * intr_init_hook - post gate setup interrupt initialisation 51 * intr_init_hook - post gate setup interrupt initialisation
43 * 52 *
@@ -53,6 +62,9 @@ void __init intr_init_hook(void)
53 if (x86_quirks->arch_intr_init()) 62 if (x86_quirks->arch_intr_init())
54 return; 63 return;
55 } 64 }
65 if (!acpi_ioapic)
66 setup_irq(2, &irq2);
67
56} 68}
57 69
58/** 70/**
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index a580b9562e76..d914a7996a66 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -33,13 +33,23 @@ void __init intr_init_hook(void)
33 setup_irq(2, &irq2); 33 setup_irq(2, &irq2);
34} 34}
35 35
36void __init pre_setup_arch_hook(void) 36static void voyager_disable_tsc(void)
37{ 37{
38 /* Voyagers run their CPUs from independent clocks, so disable 38 /* Voyagers run their CPUs from independent clocks, so disable
39 * the TSC code because we can't sync them */ 39 * the TSC code because we can't sync them */
40 setup_clear_cpu_cap(X86_FEATURE_TSC); 40 setup_clear_cpu_cap(X86_FEATURE_TSC);
41} 41}
42 42
43void __init pre_setup_arch_hook(void)
44{
45 voyager_disable_tsc();
46}
47
48void __init pre_time_init_hook(void)
49{
50 voyager_disable_tsc();
51}
52
43void __init trap_init_hook(void) 53void __init trap_init_hook(void)
44{ 54{
45} 55}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 9840b7ec749a..7ffcdeec4631 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
81static void disable_local_vic_irq(unsigned int irq); 81static void disable_local_vic_irq(unsigned int irq);
82static void before_handle_vic_irq(unsigned int irq); 82static void before_handle_vic_irq(unsigned int irq);
83static void after_handle_vic_irq(unsigned int irq); 83static void after_handle_vic_irq(unsigned int irq);
84static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); 84static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
85static void ack_vic_irq(unsigned int irq); 85static void ack_vic_irq(unsigned int irq);
86static void vic_enable_cpi(void); 86static void vic_enable_cpi(void);
87static void do_boot_cpu(__u8 cpuid); 87static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
211static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 211static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
212 212
213/* This is for the new dynamic CPU boot code */ 213/* This is for the new dynamic CPU boot code */
214cpumask_t cpu_callin_map = CPU_MASK_NONE;
215cpumask_t cpu_callout_map = CPU_MASK_NONE;
216 214
217/* The per processor IRQ masks (these are usually kept in sync) */ 215/* The per processor IRQ masks (these are usually kept in sync) */
218static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; 216static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
378 cpus_addr(phys_cpu_present_map)[0] |= 376 cpus_addr(phys_cpu_present_map)[0] |=
379 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 377 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
380 3) << 24; 378 3) << 24;
381 cpu_possible_map = phys_cpu_present_map; 379 init_cpu_possible(&phys_cpu_present_map);
382 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", 380 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
383 cpus_addr(phys_cpu_present_map)[0]); 381 cpus_addr(phys_cpu_present_map)[0]);
384 /* Here we set up the VIC to enable SMP */ 382 /* Here we set up the VIC to enable SMP */
@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq)
1599 * change the mask and then do an interrupt enable CPI to re-enable on 1597 * change the mask and then do an interrupt enable CPI to re-enable on
1600 * the selected processors */ 1598 * the selected processors */
1601 1599
1602void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) 1600void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
1603{ 1601{
1604 /* Only extended processors handle interrupts */ 1602 /* Only extended processors handle interrupts */
1605 unsigned long real_mask; 1603 unsigned long real_mask;
1606 unsigned long irq_mask = 1 << irq; 1604 unsigned long irq_mask = 1 << irq;
1607 int cpu; 1605 int cpu;
1608 1606
1609 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1607 real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
1610 1608
1611 if (cpus_addr(mask)[0] == 0) 1609 if (cpus_addr(*mask)[0] == 0)
1612 /* can't have no CPUs to accept the interrupt -- extremely 1610 /* can't have no CPUs to accept the interrupt -- extremely
1613 * bad things will happen */ 1611 * bad things will happen */
1614 return; 1612 return;
@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1750 init_gdt(smp_processor_id()); 1748 init_gdt(smp_processor_id());
1751 switch_to_new_gdt(); 1749 switch_to_new_gdt();
1752 1750
1753 cpu_set(smp_processor_id(), cpu_online_map); 1751 cpu_online_map = cpumask_of_cpu(smp_processor_id());
1754 cpu_set(smp_processor_id(), cpu_callout_map); 1752 cpu_callout_map = cpumask_of_cpu(smp_processor_id());
1755 cpu_set(smp_processor_id(), cpu_possible_map); 1753 cpu_callin_map = CPU_MASK_NONE;
1756 cpu_set(smp_processor_id(), cpu_present_map); 1754 cpu_present_map = cpumask_of_cpu(smp_processor_id());
1755
1757} 1756}
1758 1757
1759static int __cpuinit voyager_cpu_up(unsigned int cpu) 1758static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void)
1783 x86_write_percpu(cpu_number, hard_smp_processor_id()); 1782 x86_write_percpu(cpu_number, hard_smp_processor_id());
1784} 1783}
1785 1784
1786static void voyager_send_call_func(cpumask_t callmask) 1785static void voyager_send_call_func(const struct cpumask *callmask)
1787{ 1786{
1788 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); 1787 __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
1789 send_CPI(mask, VIC_CALL_FUNCTION_CPI); 1788 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1790} 1789}
1791 1790
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 90dfae511a41..c76ef1d701c9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -603,8 +603,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
603 603
604 si_code = SEGV_MAPERR; 604 si_code = SEGV_MAPERR;
605 605
606 if (notify_page_fault(regs))
607 return;
608 if (unlikely(kmmio_fault(regs, address))) 606 if (unlikely(kmmio_fault(regs, address)))
609 return; 607 return;
610 608
@@ -634,6 +632,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
634 if (spurious_fault(address, error_code)) 632 if (spurious_fault(address, error_code))
635 return; 633 return;
636 634
635 /* kprobes don't want to hook the spurious faults. */
636 if (notify_page_fault(regs))
637 return;
637 /* 638 /*
638 * Don't take the mm semaphore here. If we fixup a prefetch 639 * Don't take the mm semaphore here. If we fixup a prefetch
639 * fault we could otherwise deadlock. 640 * fault we could otherwise deadlock.
@@ -641,6 +642,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
641 goto bad_area_nosemaphore; 642 goto bad_area_nosemaphore;
642 } 643 }
643 644
645 /* kprobes don't want to hook the spurious faults. */
646 if (notify_page_fault(regs))
647 return;
644 648
645 /* 649 /*
646 * It's safe to allow irq's after cr2 has been saved and the 650 * It's safe to allow irq's after cr2 has been saved and the
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 858938241616..fa3e10725d98 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
19 paired with xen_mc_issue() */ 19 paired with xen_mc_issue() */
20static inline void xen_mc_batch(void) 20static inline void xen_mc_batch(void)
21{ 21{
22 unsigned long flags;
22 /* need to disable interrupts until this entry is complete */ 23 /* need to disable interrupts until this entry is complete */
23 local_irq_save(__get_cpu_var(xen_mc_irq_flags)); 24 local_irq_save(flags);
25 __get_cpu_var(xen_mc_irq_flags) = flags;
24} 26}
25 27
26static inline struct multicall_space xen_mc_entry(size_t args) 28static inline struct multicall_space xen_mc_entry(size_t args)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 7c41e7405c41..56c62e2858d5 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -149,6 +149,9 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
149 if (q == alg) 149 if (q == alg)
150 goto err; 150 goto err;
151 151
152 if (crypto_is_moribund(q))
153 continue;
154
152 if (crypto_is_larval(q)) { 155 if (crypto_is_larval(q)) {
153 if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) 156 if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
154 goto err; 157 goto err;
@@ -197,7 +200,7 @@ void crypto_alg_tested(const char *name, int err)
197 200
198 down_write(&crypto_alg_sem); 201 down_write(&crypto_alg_sem);
199 list_for_each_entry(q, &crypto_alg_list, cra_list) { 202 list_for_each_entry(q, &crypto_alg_list, cra_list) {
200 if (!crypto_is_larval(q)) 203 if (crypto_is_moribund(q) || !crypto_is_larval(q))
201 continue; 204 continue;
202 205
203 test = (struct crypto_larval *)q; 206 test = (struct crypto_larval *)q;
@@ -210,6 +213,7 @@ void crypto_alg_tested(const char *name, int err)
210 goto unlock; 213 goto unlock;
211 214
212found: 215found:
216 q->cra_flags |= CRYPTO_ALG_DEAD;
213 alg = test->adult; 217 alg = test->adult;
214 if (err || list_empty(&alg->cra_list)) 218 if (err || list_empty(&alg->cra_list))
215 goto complete; 219 goto complete;
diff --git a/crypto/api.c b/crypto/api.c
index 9975a7bd246c..efe77df6863f 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -557,34 +557,34 @@ err:
557 return ERR_PTR(err); 557 return ERR_PTR(err);
558} 558}
559EXPORT_SYMBOL_GPL(crypto_alloc_tfm); 559EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
560 560
561/* 561/*
562 * crypto_free_tfm - Free crypto transform 562 * crypto_destroy_tfm - Free crypto transform
563 * @mem: Start of tfm slab
563 * @tfm: Transform to free 564 * @tfm: Transform to free
564 * 565 *
565 * crypto_free_tfm() frees up the transform and any associated resources, 566 * This function frees up the transform and any associated resources,
566 * then drops the refcount on the associated algorithm. 567 * then drops the refcount on the associated algorithm.
567 */ 568 */
568void crypto_free_tfm(struct crypto_tfm *tfm) 569void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
569{ 570{
570 struct crypto_alg *alg; 571 struct crypto_alg *alg;
571 int size; 572 int size;
572 573
573 if (unlikely(!tfm)) 574 if (unlikely(!mem))
574 return; 575 return;
575 576
576 alg = tfm->__crt_alg; 577 alg = tfm->__crt_alg;
577 size = sizeof(*tfm) + alg->cra_ctxsize; 578 size = ksize(mem);
578 579
579 if (!tfm->exit && alg->cra_exit) 580 if (!tfm->exit && alg->cra_exit)
580 alg->cra_exit(tfm); 581 alg->cra_exit(tfm);
581 crypto_exit_ops(tfm); 582 crypto_exit_ops(tfm);
582 crypto_mod_put(alg); 583 crypto_mod_put(alg);
583 memset(tfm, 0, size); 584 memset(mem, 0, size);
584 kfree(tfm); 585 kfree(mem);
585} 586}
586 587EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
587EXPORT_SYMBOL_GPL(crypto_free_tfm);
588 588
589int crypto_has_alg(const char *name, u32 type, u32 mask) 589int crypto_has_alg(const char *name, u32 type, u32 mask)
590{ 590{
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 9aeeb52004a5..3de89a424401 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -54,7 +54,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
54 struct page *page; 54 struct page *page;
55 55
56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); 56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
57 flush_dcache_page(page); 57 if (!PageSlab(page))
58 flush_dcache_page(page);
58 } 59 }
59 60
60 if (more) { 61 if (more) {
diff --git a/crypto/shash.c b/crypto/shash.c
index c9df367332ff..d5a2b619c55f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -388,10 +388,15 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
388 struct shash_desc *desc = crypto_tfm_ctx(tfm); 388 struct shash_desc *desc = crypto_tfm_ctx(tfm);
389 struct crypto_shash *shash; 389 struct crypto_shash *shash;
390 390
391 if (!crypto_mod_get(calg))
392 return -EAGAIN;
393
391 shash = __crypto_shash_cast(crypto_create_tfm( 394 shash = __crypto_shash_cast(crypto_create_tfm(
392 calg, &crypto_shash_type)); 395 calg, &crypto_shash_type));
393 if (IS_ERR(shash)) 396 if (IS_ERR(shash)) {
397 crypto_mod_put(calg);
394 return PTR_ERR(shash); 398 return PTR_ERR(shash);
399 }
395 400
396 desc->tfm = shash; 401 desc->tfm = shash;
397 tfm->exit = crypto_exit_shash_ops_compat; 402 tfm->exit = crypto_exit_shash_ops_compat;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d7f9839ba264..a7799a99f2d9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -9,6 +9,7 @@ menuconfig ACPI
9 depends on PCI 9 depends on PCI
10 depends on PM 10 depends on PM
11 select PNP 11 select PNP
12 select CPU_IDLE
12 default y 13 default y
13 ---help--- 14 ---help---
14 Advanced Configuration and Power Interface (ACPI) support for 15 Advanced Configuration and Power Interface (ACPI) support for
@@ -287,7 +288,7 @@ config ACPI_CONTAINER
287 support physical cpu/memory hot-plug. 288 support physical cpu/memory hot-plug.
288 289
289 If one selects "m", this driver can be loaded with 290 If one selects "m", this driver can be loaded with
290 "modprobe acpi_container". 291 "modprobe container".
291 292
292config ACPI_HOTPLUG_MEMORY 293config ACPI_HOTPLUG_MEMORY
293 tristate "Memory Hotplug" 294 tristate "Memory Hotplug"
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 9684cc827930..22ce48985720 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -538,10 +538,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
538 if (ACPI_FAILURE(status)) { 538 if (ACPI_FAILURE(status)) {
539 ACPI_WARNING((AE_INFO, 539 ACPI_WARNING((AE_INFO,
540 "Truncating %u table entries!", 540 "Truncating %u table entries!",
541 (unsigned) 541 (unsigned) (table_count -
542 (acpi_gbl_root_table_list.size - 542 (acpi_gbl_root_table_list.
543 acpi_gbl_root_table_list. 543 count - 2))));
544 count)));
545 break; 544 break;
546 } 545 }
547 } 546 }
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index da9450bc60f7..9c9897dbe907 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -116,9 +116,9 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
116 return_ACPI_STATUS(AE_NO_MEMORY); 116 return_ACPI_STATUS(AE_NO_MEMORY);
117 } 117 }
118 118
119 /* Default return value is SUPPORTED */ 119 /* Default return value is 0, NOT-SUPPORTED */
120 120
121 return_desc->integer.value = ACPI_UINT32_MAX; 121 return_desc->integer.value = 0;
122 walk_state->return_desc = return_desc; 122 walk_state->return_desc = return_desc;
123 123
124 /* Compare input string to static table of supported interfaces */ 124 /* Compare input string to static table of supported interfaces */
@@ -127,10 +127,8 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
127 if (!ACPI_STRCMP 127 if (!ACPI_STRCMP
128 (string_desc->string.pointer, 128 (string_desc->string.pointer,
129 acpi_interfaces_supported[i])) { 129 acpi_interfaces_supported[i])) {
130 130 return_desc->integer.value = ACPI_UINT32_MAX;
131 /* The interface is supported */ 131 goto done;
132
133 return_ACPI_STATUS(AE_OK);
134 } 132 }
135 } 133 }
136 134
@@ -141,15 +139,14 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
141 */ 139 */
142 status = acpi_os_validate_interface(string_desc->string.pointer); 140 status = acpi_os_validate_interface(string_desc->string.pointer);
143 if (ACPI_SUCCESS(status)) { 141 if (ACPI_SUCCESS(status)) {
144 142 return_desc->integer.value = ACPI_UINT32_MAX;
145 /* The interface is supported */
146
147 return_ACPI_STATUS(AE_OK);
148 } 143 }
149 144
150 /* The interface is not supported */ 145done:
146 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "ACPI: BIOS _OSI(%s) %ssupported\n",
147 string_desc->string.pointer,
148 return_desc->integer.value == 0 ? "not-" : ""));
151 149
152 return_desc->integer.value = 0;
153 return_ACPI_STATUS(AE_OK); 150 return_ACPI_STATUS(AE_OK);
154} 151}
155 152
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 17020c12623c..fe0cdf83641a 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -163,7 +163,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
163 case ACPI_NOTIFY_BUS_CHECK: 163 case ACPI_NOTIFY_BUS_CHECK:
164 /* Fall through */ 164 /* Fall through */
165 case ACPI_NOTIFY_DEVICE_CHECK: 165 case ACPI_NOTIFY_DEVICE_CHECK:
166 printk("Container driver received %s event\n", 166 printk(KERN_WARNING "Container driver received %s event\n",
167 (type == ACPI_NOTIFY_BUS_CHECK) ? 167 (type == ACPI_NOTIFY_BUS_CHECK) ?
168 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); 168 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
169 status = acpi_bus_get_device(handle, &device); 169 status = acpi_bus_get_device(handle, &device);
@@ -174,7 +174,8 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
174 kobject_uevent(&device->dev.kobj, 174 kobject_uevent(&device->dev.kobj,
175 KOBJ_ONLINE); 175 KOBJ_ONLINE);
176 else 176 else
177 printk("Failed to add container\n"); 177 printk(KERN_WARNING
178 "Failed to add container\n");
178 } 179 }
179 } else { 180 } else {
180 if (ACPI_SUCCESS(status)) { 181 if (ACPI_SUCCESS(status)) {
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 5b30b8d91d71..35094f230b1e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -855,10 +855,14 @@ fdd_out:
855static ssize_t show_docked(struct device *dev, 855static ssize_t show_docked(struct device *dev,
856 struct device_attribute *attr, char *buf) 856 struct device_attribute *attr, char *buf)
857{ 857{
858 struct acpi_device *tmp;
859
858 struct dock_station *dock_station = *((struct dock_station **) 860 struct dock_station *dock_station = *((struct dock_station **)
859 dev->platform_data); 861 dev->platform_data);
860 return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station));
861 862
863 if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
864 return snprintf(buf, PAGE_SIZE, "1\n");
865 return snprintf(buf, PAGE_SIZE, "0\n");
862} 866}
863static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); 867static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
864 868
@@ -984,7 +988,7 @@ static int dock_add(acpi_handle handle)
984 988
985 ret = device_create_file(&dock_device->dev, &dev_attr_docked); 989 ret = device_create_file(&dock_device->dev, &dev_attr_docked);
986 if (ret) { 990 if (ret) {
987 printk("Error %d adding sysfs file\n", ret); 991 printk(KERN_ERR "Error %d adding sysfs file\n", ret);
988 platform_device_unregister(dock_device); 992 platform_device_unregister(dock_device);
989 kfree(dock_station); 993 kfree(dock_station);
990 dock_station = NULL; 994 dock_station = NULL;
@@ -992,7 +996,7 @@ static int dock_add(acpi_handle handle)
992 } 996 }
993 ret = device_create_file(&dock_device->dev, &dev_attr_undock); 997 ret = device_create_file(&dock_device->dev, &dev_attr_undock);
994 if (ret) { 998 if (ret) {
995 printk("Error %d adding sysfs file\n", ret); 999 printk(KERN_ERR "Error %d adding sysfs file\n", ret);
996 device_remove_file(&dock_device->dev, &dev_attr_docked); 1000 device_remove_file(&dock_device->dev, &dev_attr_docked);
997 platform_device_unregister(dock_device); 1001 platform_device_unregister(dock_device);
998 kfree(dock_station); 1002 kfree(dock_station);
@@ -1001,7 +1005,7 @@ static int dock_add(acpi_handle handle)
1001 } 1005 }
1002 ret = device_create_file(&dock_device->dev, &dev_attr_uid); 1006 ret = device_create_file(&dock_device->dev, &dev_attr_uid);
1003 if (ret) { 1007 if (ret) {
1004 printk("Error %d adding sysfs file\n", ret); 1008 printk(KERN_ERR "Error %d adding sysfs file\n", ret);
1005 device_remove_file(&dock_device->dev, &dev_attr_docked); 1009 device_remove_file(&dock_device->dev, &dev_attr_docked);
1006 device_remove_file(&dock_device->dev, &dev_attr_undock); 1010 device_remove_file(&dock_device->dev, &dev_attr_undock);
1007 platform_device_unregister(dock_device); 1011 platform_device_unregister(dock_device);
@@ -1011,7 +1015,7 @@ static int dock_add(acpi_handle handle)
1011 } 1015 }
1012 ret = device_create_file(&dock_device->dev, &dev_attr_flags); 1016 ret = device_create_file(&dock_device->dev, &dev_attr_flags);
1013 if (ret) { 1017 if (ret) {
1014 printk("Error %d adding sysfs file\n", ret); 1018 printk(KERN_ERR "Error %d adding sysfs file\n", ret);
1015 device_remove_file(&dock_device->dev, &dev_attr_docked); 1019 device_remove_file(&dock_device->dev, &dev_attr_docked);
1016 device_remove_file(&dock_device->dev, &dev_attr_undock); 1020 device_remove_file(&dock_device->dev, &dev_attr_undock);
1017 device_remove_file(&dock_device->dev, &dev_attr_uid); 1021 device_remove_file(&dock_device->dev, &dev_attr_uid);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a2b82c90a683..5c2f5d343be6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -982,7 +982,7 @@ int __init acpi_ec_ecdt_probe(void)
982 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 982 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
983 if (!saved_ec) 983 if (!saved_ec)
984 return -ENOMEM; 984 return -ENOMEM;
985 memcpy(&saved_ec, boot_ec, sizeof(saved_ec)); 985 memcpy(saved_ec, boot_ec, sizeof(*saved_ec));
986 /* fall through */ 986 /* fall through */
987 } 987 }
988 /* This workaround is needed only on some broken machines, 988 /* This workaround is needed only on some broken machines,
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index adec3d15810a..5479b9f42513 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -255,12 +255,12 @@ static int acpi_platform_notify(struct device *dev)
255 } 255 }
256 type = acpi_get_bus_type(dev->bus); 256 type = acpi_get_bus_type(dev->bus);
257 if (!type) { 257 if (!type) {
258 DBG("No ACPI bus support for %s\n", dev->bus_id); 258 DBG("No ACPI bus support for %s\n", dev_name(dev));
259 ret = -EINVAL; 259 ret = -EINVAL;
260 goto end; 260 goto end;
261 } 261 }
262 if ((ret = type->find_device(dev, &handle)) != 0) 262 if ((ret = type->find_device(dev, &handle)) != 0)
263 DBG("Can't get handler for %s\n", dev->bus_id); 263 DBG("Can't get handler for %s\n", dev_name(dev));
264 end: 264 end:
265 if (!ret) 265 if (!ret)
266 acpi_bind_one(dev, handle); 266 acpi_bind_one(dev, handle);
@@ -271,10 +271,10 @@ static int acpi_platform_notify(struct device *dev)
271 271
272 acpi_get_name(dev->archdata.acpi_handle, 272 acpi_get_name(dev->archdata.acpi_handle,
273 ACPI_FULL_PATHNAME, &buffer); 273 ACPI_FULL_PATHNAME, &buffer);
274 DBG("Device %s -> %s\n", dev->bus_id, (char *)buffer.pointer); 274 DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
275 kfree(buffer.pointer); 275 kfree(buffer.pointer);
276 } else 276 } else
277 DBG("Device %s -> No ACPI support\n", dev->bus_id); 277 DBG("Device %s -> No ACPI support\n", dev_name(dev));
278#endif 278#endif
279 279
280 return ret; 280 return ret;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 6729a4992f2b..b3193ec0a2ef 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -228,10 +228,10 @@ void acpi_os_vprintf(const char *fmt, va_list args)
228 if (acpi_in_debugger) { 228 if (acpi_in_debugger) {
229 kdb_printf("%s", buffer); 229 kdb_printf("%s", buffer);
230 } else { 230 } else {
231 printk("%s", buffer); 231 printk(KERN_CONT "%s", buffer);
232 } 232 }
233#else 233#else
234 printk("%s", buffer); 234 printk(KERN_CONT "%s", buffer);
235#endif 235#endif
236} 236}
237 237
@@ -1317,6 +1317,54 @@ acpi_os_validate_interface (char *interface)
1317 return AE_SUPPORT; 1317 return AE_SUPPORT;
1318} 1318}
1319 1319
1320#ifdef CONFIG_X86
1321
1322struct aml_port_desc {
1323 uint start;
1324 uint end;
1325 char* name;
1326 char warned;
1327};
1328
1329static struct aml_port_desc aml_invalid_port_list[] = {
1330 {0x20, 0x21, "PIC0", 0},
1331 {0xA0, 0xA1, "PIC1", 0},
1332 {0x4D0, 0x4D1, "ELCR", 0}
1333};
1334
1335/*
1336 * valid_aml_io_address()
1337 *
1338 * if valid, return true
1339 * else invalid, warn once, return false
1340 */
1341static bool valid_aml_io_address(uint address, uint length)
1342{
1343 int i;
1344 int entries = sizeof(aml_invalid_port_list) / sizeof(struct aml_port_desc);
1345
1346 for (i = 0; i < entries; ++i) {
1347 if ((address >= aml_invalid_port_list[i].start &&
1348 address <= aml_invalid_port_list[i].end) ||
1349 (address + length >= aml_invalid_port_list[i].start &&
1350 address + length <= aml_invalid_port_list[i].end))
1351 {
1352 if (!aml_invalid_port_list[i].warned)
1353 {
1354 printk(KERN_ERR "ACPI: Denied BIOS AML access"
1355 " to invalid port 0x%x+0x%x (%s)\n",
1356 address, length,
1357 aml_invalid_port_list[i].name);
1358 aml_invalid_port_list[i].warned = 1;
1359 }
1360 return false; /* invalid */
1361 }
1362 }
1363 return true; /* valid */
1364}
1365#else
1366static inline bool valid_aml_io_address(uint address, uint length) { return true; }
1367#endif
1320/****************************************************************************** 1368/******************************************************************************
1321 * 1369 *
1322 * FUNCTION: acpi_os_validate_address 1370 * FUNCTION: acpi_os_validate_address
@@ -1346,6 +1394,8 @@ acpi_os_validate_address (
1346 1394
1347 switch (space_id) { 1395 switch (space_id) {
1348 case ACPI_ADR_SPACE_SYSTEM_IO: 1396 case ACPI_ADR_SPACE_SYSTEM_IO:
1397 if (!valid_aml_io_address(address, length))
1398 return AE_AML_ILLEGAL_ADDRESS;
1349 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1399 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1350 /* Only interference checks against SystemIO and SytemMemory 1400 /* Only interference checks against SystemIO and SytemMemory
1351 are needed */ 1401 are needed */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 1c6e73c7865e..6c772ca76bd1 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -593,7 +593,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
593 return -ENODEV; 593 return -ENODEV;
594 } else { 594 } else {
595 acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING; 595 acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
596 printk(PREFIX "%s [%s] enabled at IRQ %d\n", 596 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
597 acpi_device_name(link->device), 597 acpi_device_name(link->device),
598 acpi_device_bid(link->device), link->irq.active); 598 acpi_device_bid(link->device), link->irq.active);
599 } 599 }
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 66a9d8145562..7bc22a471fe3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -66,43 +66,17 @@ ACPI_MODULE_NAME("processor_idle");
66#define ACPI_PROCESSOR_FILE_POWER "power" 66#define ACPI_PROCESSOR_FILE_POWER "power"
67#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 67#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
69#ifndef CONFIG_CPU_IDLE
70#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
72static void (*pm_idle_save) (void) __read_mostly;
73#else
74#define C2_OVERHEAD 1 /* 1us */ 69#define C2_OVERHEAD 1 /* 1us */
75#define C3_OVERHEAD 1 /* 1us */ 70#define C3_OVERHEAD 1 /* 1us */
76#endif
77#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 71#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 72
79static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 73static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
80#ifdef CONFIG_CPU_IDLE
81module_param(max_cstate, uint, 0000); 74module_param(max_cstate, uint, 0000);
82#else
83module_param(max_cstate, uint, 0644);
84#endif
85static unsigned int nocst __read_mostly; 75static unsigned int nocst __read_mostly;
86module_param(nocst, uint, 0000); 76module_param(nocst, uint, 0000);
87 77
88#ifndef CONFIG_CPU_IDLE
89/*
90 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
91 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
92 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
93 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
94 * reduce history for more aggressive entry into C3
95 */
96static unsigned int bm_history __read_mostly =
97 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
98module_param(bm_history, uint, 0644);
99
100static int acpi_processor_set_power_policy(struct acpi_processor *pr);
101
102#else /* CONFIG_CPU_IDLE */
103static unsigned int latency_factor __read_mostly = 2; 78static unsigned int latency_factor __read_mostly = 2;
104module_param(latency_factor, uint, 0644); 79module_param(latency_factor, uint, 0644);
105#endif
106 80
107/* 81/*
108 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 82 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
@@ -224,71 +198,6 @@ static void acpi_safe_halt(void)
224 current_thread_info()->status |= TS_POLLING; 198 current_thread_info()->status |= TS_POLLING;
225} 199}
226 200
227#ifndef CONFIG_CPU_IDLE
228
229static void
230acpi_processor_power_activate(struct acpi_processor *pr,
231 struct acpi_processor_cx *new)
232{
233 struct acpi_processor_cx *old;
234
235 if (!pr || !new)
236 return;
237
238 old = pr->power.state;
239
240 if (old)
241 old->promotion.count = 0;
242 new->demotion.count = 0;
243
244 /* Cleanup from old state. */
245 if (old) {
246 switch (old->type) {
247 case ACPI_STATE_C3:
248 /* Disable bus master reload */
249 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
250 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
251 break;
252 }
253 }
254
255 /* Prepare to use new state. */
256 switch (new->type) {
257 case ACPI_STATE_C3:
258 /* Enable bus master reload */
259 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
260 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
261 break;
262 }
263
264 pr->power.state = new;
265
266 return;
267}
268
269static atomic_t c3_cpu_count;
270
271/* Common C-state entry for C2, C3, .. */
272static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
273{
274 /* Don't trace irqs off for idle */
275 stop_critical_timings();
276 if (cstate->entry_method == ACPI_CSTATE_FFH) {
277 /* Call into architectural FFH based C-state */
278 acpi_processor_ffh_cstate_enter(cstate);
279 } else {
280 int unused;
281 /* IO port based C-state */
282 inb(cstate->address);
283 /* Dummy wait op - must do something useless after P_LVL2 read
284 because chipsets cannot guarantee that STPCLK# signal
285 gets asserted in time to freeze execution properly. */
286 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
287 }
288 start_critical_timings();
289}
290#endif /* !CONFIG_CPU_IDLE */
291
292#ifdef ARCH_APICTIMER_STOPS_ON_C3 201#ifdef ARCH_APICTIMER_STOPS_ON_C3
293 202
294/* 203/*
@@ -390,421 +299,6 @@ static int tsc_halts_in_c(int state)
390} 299}
391#endif 300#endif
392 301
393#ifndef CONFIG_CPU_IDLE
394static void acpi_processor_idle(void)
395{
396 struct acpi_processor *pr = NULL;
397 struct acpi_processor_cx *cx = NULL;
398 struct acpi_processor_cx *next_state = NULL;
399 int sleep_ticks = 0;
400 u32 t1, t2 = 0;
401
402 /*
403 * Interrupts must be disabled during bus mastering calculations and
404 * for C2/C3 transitions.
405 */
406 local_irq_disable();
407
408 pr = __get_cpu_var(processors);
409 if (!pr) {
410 local_irq_enable();
411 return;
412 }
413
414 /*
415 * Check whether we truly need to go idle, or should
416 * reschedule:
417 */
418 if (unlikely(need_resched())) {
419 local_irq_enable();
420 return;
421 }
422
423 cx = pr->power.state;
424 if (!cx || acpi_idle_suspend) {
425 if (pm_idle_save) {
426 pm_idle_save(); /* enables IRQs */
427 } else {
428 acpi_safe_halt();
429 local_irq_enable();
430 }
431
432 return;
433 }
434
435 /*
436 * Check BM Activity
437 * -----------------
438 * Check for bus mastering activity (if required), record, and check
439 * for demotion.
440 */
441 if (pr->flags.bm_check) {
442 u32 bm_status = 0;
443 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
444
445 if (diff > 31)
446 diff = 31;
447
448 pr->power.bm_activity <<= diff;
449
450 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
451 if (bm_status) {
452 pr->power.bm_activity |= 0x1;
453 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
454 }
455 /*
456 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
457 * the true state of bus mastering activity; forcing us to
458 * manually check the BMIDEA bit of each IDE channel.
459 */
460 else if (errata.piix4.bmisx) {
461 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
462 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
463 pr->power.bm_activity |= 0x1;
464 }
465
466 pr->power.bm_check_timestamp = jiffies;
467
468 /*
469 * If bus mastering is or was active this jiffy, demote
470 * to avoid a faulty transition. Note that the processor
471 * won't enter a low-power state during this call (to this
472 * function) but should upon the next.
473 *
474 * TBD: A better policy might be to fallback to the demotion
475 * state (use it for this quantum only) istead of
476 * demoting -- and rely on duration as our sole demotion
477 * qualification. This may, however, introduce DMA
478 * issues (e.g. floppy DMA transfer overrun/underrun).
479 */
480 if ((pr->power.bm_activity & 0x1) &&
481 cx->demotion.threshold.bm) {
482 local_irq_enable();
483 next_state = cx->demotion.state;
484 goto end;
485 }
486 }
487
488#ifdef CONFIG_HOTPLUG_CPU
489 /*
490 * Check for P_LVL2_UP flag before entering C2 and above on
491 * an SMP system. We do it here instead of doing it at _CST/P_LVL
492 * detection phase, to work cleanly with logical CPU hotplug.
493 */
494 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
495 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
496 cx = &pr->power.states[ACPI_STATE_C1];
497#endif
498
499 /*
500 * Sleep:
501 * ------
502 * Invoke the current Cx state to put the processor to sleep.
503 */
504 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
505 current_thread_info()->status &= ~TS_POLLING;
506 /*
507 * TS_POLLING-cleared state must be visible before we
508 * test NEED_RESCHED:
509 */
510 smp_mb();
511 if (need_resched()) {
512 current_thread_info()->status |= TS_POLLING;
513 local_irq_enable();
514 return;
515 }
516 }
517
518 switch (cx->type) {
519
520 case ACPI_STATE_C1:
521 /*
522 * Invoke C1.
523 * Use the appropriate idle routine, the one that would
524 * be used without acpi C-states.
525 */
526 if (pm_idle_save) {
527 pm_idle_save(); /* enables IRQs */
528 } else {
529 acpi_safe_halt();
530 local_irq_enable();
531 }
532
533 /*
534 * TBD: Can't get time duration while in C1, as resumes
535 * go to an ISR rather than here. Need to instrument
536 * base interrupt handler.
537 *
538 * Note: the TSC better not stop in C1, sched_clock() will
539 * skew otherwise.
540 */
541 sleep_ticks = 0xFFFFFFFF;
542
543 break;
544
545 case ACPI_STATE_C2:
546 /* Get start time (ticks) */
547 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
548 /* Tell the scheduler that we are going deep-idle: */
549 sched_clock_idle_sleep_event();
550 /* Invoke C2 */
551 acpi_state_timer_broadcast(pr, cx, 1);
552 acpi_cstate_enter(cx);
553 /* Get end time (ticks) */
554 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
555
556#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
557 /* TSC halts in C2, so notify users */
558 if (tsc_halts_in_c(ACPI_STATE_C2))
559 mark_tsc_unstable("possible TSC halt in C2");
560#endif
561 /* Compute time (ticks) that we were actually asleep */
562 sleep_ticks = ticks_elapsed(t1, t2);
563
564 /* Tell the scheduler how much we idled: */
565 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
566
567 /* Re-enable interrupts */
568 local_irq_enable();
569 /* Do not account our idle-switching overhead: */
570 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
571
572 current_thread_info()->status |= TS_POLLING;
573 acpi_state_timer_broadcast(pr, cx, 0);
574 break;
575
576 case ACPI_STATE_C3:
577 acpi_unlazy_tlb(smp_processor_id());
578 /*
579 * Must be done before busmaster disable as we might
580 * need to access HPET !
581 */
582 acpi_state_timer_broadcast(pr, cx, 1);
583 /*
584 * disable bus master
585 * bm_check implies we need ARB_DIS
586 * !bm_check implies we need cache flush
587 * bm_control implies whether we can do ARB_DIS
588 *
589 * That leaves a case where bm_check is set and bm_control is
590 * not set. In that case we cannot do much, we enter C3
591 * without doing anything.
592 */
593 if (pr->flags.bm_check && pr->flags.bm_control) {
594 if (atomic_inc_return(&c3_cpu_count) ==
595 num_online_cpus()) {
596 /*
597 * All CPUs are trying to go to C3
598 * Disable bus master arbitration
599 */
600 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
601 }
602 } else if (!pr->flags.bm_check) {
603 /* SMP with no shared cache... Invalidate cache */
604 ACPI_FLUSH_CPU_CACHE();
605 }
606
607 /* Get start time (ticks) */
608 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
609 /* Invoke C3 */
610 /* Tell the scheduler that we are going deep-idle: */
611 sched_clock_idle_sleep_event();
612 acpi_cstate_enter(cx);
613 /* Get end time (ticks) */
614 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
615 if (pr->flags.bm_check && pr->flags.bm_control) {
616 /* Enable bus master arbitration */
617 atomic_dec(&c3_cpu_count);
618 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
619 }
620
621#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
622 /* TSC halts in C3, so notify users */
623 if (tsc_halts_in_c(ACPI_STATE_C3))
624 mark_tsc_unstable("TSC halts in C3");
625#endif
626 /* Compute time (ticks) that we were actually asleep */
627 sleep_ticks = ticks_elapsed(t1, t2);
628 /* Tell the scheduler how much we idled: */
629 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
630
631 /* Re-enable interrupts */
632 local_irq_enable();
633 /* Do not account our idle-switching overhead: */
634 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
635
636 current_thread_info()->status |= TS_POLLING;
637 acpi_state_timer_broadcast(pr, cx, 0);
638 break;
639
640 default:
641 local_irq_enable();
642 return;
643 }
644 cx->usage++;
645 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
646 cx->time += sleep_ticks;
647
648 next_state = pr->power.state;
649
650#ifdef CONFIG_HOTPLUG_CPU
651 /* Don't do promotion/demotion */
652 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
653 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
654 next_state = cx;
655 goto end;
656 }
657#endif
658
659 /*
660 * Promotion?
661 * ----------
662 * Track the number of longs (time asleep is greater than threshold)
663 * and promote when the count threshold is reached. Note that bus
664 * mastering activity may prevent promotions.
665 * Do not promote above max_cstate.
666 */
667 if (cx->promotion.state &&
668 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
669 if (sleep_ticks > cx->promotion.threshold.ticks &&
670 cx->promotion.state->latency <=
671 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
672 cx->promotion.count++;
673 cx->demotion.count = 0;
674 if (cx->promotion.count >=
675 cx->promotion.threshold.count) {
676 if (pr->flags.bm_check) {
677 if (!
678 (pr->power.bm_activity & cx->
679 promotion.threshold.bm)) {
680 next_state =
681 cx->promotion.state;
682 goto end;
683 }
684 } else {
685 next_state = cx->promotion.state;
686 goto end;
687 }
688 }
689 }
690 }
691
692 /*
693 * Demotion?
694 * ---------
695 * Track the number of shorts (time asleep is less than time threshold)
696 * and demote when the usage threshold is reached.
697 */
698 if (cx->demotion.state) {
699 if (sleep_ticks < cx->demotion.threshold.ticks) {
700 cx->demotion.count++;
701 cx->promotion.count = 0;
702 if (cx->demotion.count >= cx->demotion.threshold.count) {
703 next_state = cx->demotion.state;
704 goto end;
705 }
706 }
707 }
708
709 end:
710 /*
711 * Demote if current state exceeds max_cstate
712 * or if the latency of the current state is unacceptable
713 */
714 if ((pr->power.state - pr->power.states) > max_cstate ||
715 pr->power.state->latency >
716 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
717 if (cx->demotion.state)
718 next_state = cx->demotion.state;
719 }
720
721 /*
722 * New Cx State?
723 * -------------
724 * If we're going to start using a new Cx state we must clean up
725 * from the previous and prepare to use the new.
726 */
727 if (next_state != pr->power.state)
728 acpi_processor_power_activate(pr, next_state);
729}
730
731static int acpi_processor_set_power_policy(struct acpi_processor *pr)
732{
733 unsigned int i;
734 unsigned int state_is_set = 0;
735 struct acpi_processor_cx *lower = NULL;
736 struct acpi_processor_cx *higher = NULL;
737 struct acpi_processor_cx *cx;
738
739
740 if (!pr)
741 return -EINVAL;
742
743 /*
744 * This function sets the default Cx state policy (OS idle handler).
745 * Our scheme is to promote quickly to C2 but more conservatively
746 * to C3. We're favoring C2 for its characteristics of low latency
747 * (quick response), good power savings, and ability to allow bus
748 * mastering activity. Note that the Cx state policy is completely
749 * customizable and can be altered dynamically.
750 */
751
752 /* startup state */
753 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
754 cx = &pr->power.states[i];
755 if (!cx->valid)
756 continue;
757
758 if (!state_is_set)
759 pr->power.state = cx;
760 state_is_set++;
761 break;
762 }
763
764 if (!state_is_set)
765 return -ENODEV;
766
767 /* demotion */
768 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
769 cx = &pr->power.states[i];
770 if (!cx->valid)
771 continue;
772
773 if (lower) {
774 cx->demotion.state = lower;
775 cx->demotion.threshold.ticks = cx->latency_ticks;
776 cx->demotion.threshold.count = 1;
777 if (cx->type == ACPI_STATE_C3)
778 cx->demotion.threshold.bm = bm_history;
779 }
780
781 lower = cx;
782 }
783
784 /* promotion */
785 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
786 cx = &pr->power.states[i];
787 if (!cx->valid)
788 continue;
789
790 if (higher) {
791 cx->promotion.state = higher;
792 cx->promotion.threshold.ticks = cx->latency_ticks;
793 if (cx->type >= ACPI_STATE_C2)
794 cx->promotion.threshold.count = 4;
795 else
796 cx->promotion.threshold.count = 10;
797 if (higher->type == ACPI_STATE_C3)
798 cx->promotion.threshold.bm = bm_history;
799 }
800
801 higher = cx;
802 }
803
804 return 0;
805}
806#endif /* !CONFIG_CPU_IDLE */
807
808static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 302static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
809{ 303{
810 304
@@ -1047,11 +541,7 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1047 */ 541 */
1048 cx->valid = 1; 542 cx->valid = 1;
1049 543
1050#ifndef CONFIG_CPU_IDLE
1051 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1052#else
1053 cx->latency_ticks = cx->latency; 544 cx->latency_ticks = cx->latency;
1054#endif
1055 545
1056 return; 546 return;
1057} 547}
@@ -1121,7 +611,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1121 " for C3 to be enabled on SMP systems\n")); 611 " for C3 to be enabled on SMP systems\n"));
1122 return; 612 return;
1123 } 613 }
1124 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1125 } 614 }
1126 615
1127 /* 616 /*
@@ -1132,11 +621,16 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1132 */ 621 */
1133 cx->valid = 1; 622 cx->valid = 1;
1134 623
1135#ifndef CONFIG_CPU_IDLE
1136 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1137#else
1138 cx->latency_ticks = cx->latency; 624 cx->latency_ticks = cx->latency;
1139#endif 625 /*
626 * On older chipsets, BM_RLD needs to be set
627 * in order for Bus Master activity to wake the
628 * system from C3. Newer chipsets handle DMA
629 * during C3 automatically and BM_RLD is a NOP.
630 * In either case, the proper way to
631 * handle BM_RLD is to set it and leave it set.
632 */
633 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1140 634
1141 return; 635 return;
1142} 636}
@@ -1201,20 +695,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
1201 695
1202 pr->power.count = acpi_processor_power_verify(pr); 696 pr->power.count = acpi_processor_power_verify(pr);
1203 697
1204#ifndef CONFIG_CPU_IDLE
1205 /*
1206 * Set Default Policy
1207 * ------------------
1208 * Now that we know which states are supported, set the default
1209 * policy. Note that this policy can be changed dynamically
1210 * (e.g. encourage deeper sleeps to conserve battery life when
1211 * not on AC).
1212 */
1213 result = acpi_processor_set_power_policy(pr);
1214 if (result)
1215 return result;
1216#endif
1217
1218 /* 698 /*
1219 * if one state of type C2 or C3 is available, mark this 699 * if one state of type C2 or C3 is available, mark this
1220 * CPU as being "idle manageable" 700 * CPU as being "idle manageable"
@@ -1312,69 +792,6 @@ static const struct file_operations acpi_processor_power_fops = {
1312 .release = single_release, 792 .release = single_release,
1313}; 793};
1314 794
1315#ifndef CONFIG_CPU_IDLE
1316
1317int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1318{
1319 int result = 0;
1320
1321 if (boot_option_idle_override)
1322 return 0;
1323
1324 if (!pr)
1325 return -EINVAL;
1326
1327 if (nocst) {
1328 return -ENODEV;
1329 }
1330
1331 if (!pr->flags.power_setup_done)
1332 return -ENODEV;
1333
1334 /*
1335 * Fall back to the default idle loop, when pm_idle_save had
1336 * been initialized.
1337 */
1338 if (pm_idle_save) {
1339 pm_idle = pm_idle_save;
1340 /* Relies on interrupts forcing exit from idle. */
1341 synchronize_sched();
1342 }
1343
1344 pr->flags.power = 0;
1345 result = acpi_processor_get_power_info(pr);
1346 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1347 pm_idle = acpi_processor_idle;
1348
1349 return result;
1350}
1351
1352#ifdef CONFIG_SMP
1353static void smp_callback(void *v)
1354{
1355 /* we already woke the CPU up, nothing more to do */
1356}
1357
1358/*
1359 * This function gets called when a part of the kernel has a new latency
1360 * requirement. This means we need to get all processors out of their C-state,
1361 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1362 * wakes them all right up.
1363 */
1364static int acpi_processor_latency_notify(struct notifier_block *b,
1365 unsigned long l, void *v)
1366{
1367 smp_call_function(smp_callback, NULL, 1);
1368 return NOTIFY_OK;
1369}
1370
1371static struct notifier_block acpi_processor_latency_notifier = {
1372 .notifier_call = acpi_processor_latency_notify,
1373};
1374
1375#endif
1376
1377#else /* CONFIG_CPU_IDLE */
1378 795
1379/** 796/**
1380 * acpi_idle_bm_check - checks if bus master activity was detected 797 * acpi_idle_bm_check - checks if bus master activity was detected
@@ -1383,7 +800,7 @@ static int acpi_idle_bm_check(void)
1383{ 800{
1384 u32 bm_status = 0; 801 u32 bm_status = 0;
1385 802
1386 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 803 acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1387 if (bm_status) 804 if (bm_status)
1388 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 805 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1389 /* 806 /*
@@ -1400,25 +817,6 @@ static int acpi_idle_bm_check(void)
1400} 817}
1401 818
1402/** 819/**
1403 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1404 * @pr: the processor
1405 * @target: the new target state
1406 */
1407static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1408 struct acpi_processor_cx *target)
1409{
1410 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1411 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1412 pr->flags.bm_rld_set = 0;
1413 }
1414
1415 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1416 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1417 pr->flags.bm_rld_set = 1;
1418 }
1419}
1420
1421/**
1422 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 820 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1423 * @cx: cstate data 821 * @cx: cstate data
1424 * 822 *
@@ -1473,9 +871,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1473 return 0; 871 return 0;
1474 } 872 }
1475 873
1476 if (pr->flags.bm_check)
1477 acpi_idle_update_bm_rld(pr, cx);
1478
1479 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 874 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1480 acpi_idle_do_entry(cx); 875 acpi_idle_do_entry(cx);
1481 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 876 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
@@ -1527,9 +922,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1527 */ 922 */
1528 acpi_state_timer_broadcast(pr, cx, 1); 923 acpi_state_timer_broadcast(pr, cx, 1);
1529 924
1530 if (pr->flags.bm_check)
1531 acpi_idle_update_bm_rld(pr, cx);
1532
1533 if (cx->type == ACPI_STATE_C3) 925 if (cx->type == ACPI_STATE_C3)
1534 ACPI_FLUSH_CPU_CACHE(); 926 ACPI_FLUSH_CPU_CACHE();
1535 927
@@ -1621,8 +1013,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1621 */ 1013 */
1622 acpi_state_timer_broadcast(pr, cx, 1); 1014 acpi_state_timer_broadcast(pr, cx, 1);
1623 1015
1624 acpi_idle_update_bm_rld(pr, cx);
1625
1626 /* 1016 /*
1627 * disable bus master 1017 * disable bus master
1628 * bm_check implies we need ARB_DIS 1018 * bm_check implies we need ARB_DIS
@@ -1795,8 +1185,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1795 return ret; 1185 return ret;
1796} 1186}
1797 1187
1798#endif /* CONFIG_CPU_IDLE */
1799
1800int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1188int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1801 struct acpi_device *device) 1189 struct acpi_device *device)
1802{ 1190{
@@ -1825,10 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1825 "ACPI: processor limited to max C-state %d\n", 1213 "ACPI: processor limited to max C-state %d\n",
1826 max_cstate); 1214 max_cstate);
1827 first_run++; 1215 first_run++;
1828#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1829 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1830 &acpi_processor_latency_notifier);
1831#endif
1832 } 1216 }
1833 1217
1834 if (!pr) 1218 if (!pr)
@@ -1852,11 +1236,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1852 * platforms that only support C1. 1236 * platforms that only support C1.
1853 */ 1237 */
1854 if (pr->flags.power) { 1238 if (pr->flags.power) {
1855#ifdef CONFIG_CPU_IDLE
1856 acpi_processor_setup_cpuidle(pr); 1239 acpi_processor_setup_cpuidle(pr);
1857 if (cpuidle_register_device(&pr->power.dev)) 1240 if (cpuidle_register_device(&pr->power.dev))
1858 return -EIO; 1241 return -EIO;
1859#endif
1860 1242
1861 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 1243 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1862 for (i = 1; i <= pr->power.count; i++) 1244 for (i = 1; i <= pr->power.count; i++)
@@ -1864,13 +1246,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1864 printk(" C%d[C%d]", i, 1246 printk(" C%d[C%d]", i,
1865 pr->power.states[i].type); 1247 pr->power.states[i].type);
1866 printk(")\n"); 1248 printk(")\n");
1867
1868#ifndef CONFIG_CPU_IDLE
1869 if (pr->id == 0) {
1870 pm_idle_save = pm_idle;
1871 pm_idle = acpi_processor_idle;
1872 }
1873#endif
1874 } 1249 }
1875 1250
1876 /* 'power' [R] */ 1251 /* 'power' [R] */
@@ -1889,34 +1264,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1889 if (boot_option_idle_override) 1264 if (boot_option_idle_override)
1890 return 0; 1265 return 0;
1891 1266
1892#ifdef CONFIG_CPU_IDLE
1893 cpuidle_unregister_device(&pr->power.dev); 1267 cpuidle_unregister_device(&pr->power.dev);
1894#endif
1895 pr->flags.power_setup_done = 0; 1268 pr->flags.power_setup_done = 0;
1896 1269
1897 if (acpi_device_dir(device)) 1270 if (acpi_device_dir(device))
1898 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1271 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1899 acpi_device_dir(device)); 1272 acpi_device_dir(device));
1900 1273
1901#ifndef CONFIG_CPU_IDLE
1902
1903 /* Unregister the idle handler when processor #0 is removed. */
1904 if (pr->id == 0) {
1905 if (pm_idle_save)
1906 pm_idle = pm_idle_save;
1907
1908 /*
1909 * We are about to unload the current idle thread pm callback
1910 * (pm_idle), Wait for all processors to update cached/local
1911 * copies of pm_idle before proceeding.
1912 */
1913 cpu_idle_wait();
1914#ifdef CONFIG_SMP
1915 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1916 &acpi_processor_latency_notifier);
1917#endif
1918 }
1919#endif
1920
1921 return 0; 1274 return 0;
1922} 1275}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 846e227592d4..9cc769b587ff 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -31,14 +31,6 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/cpufreq.h> 32#include <linux/cpufreq.h>
33 33
34#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/mutex.h>
38
39#include <asm/uaccess.h>
40#endif
41
42#ifdef CONFIG_X86 34#ifdef CONFIG_X86
43#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
44#endif 36#endif
@@ -434,96 +426,6 @@ int acpi_processor_notify_smm(struct module *calling_module)
434 426
435EXPORT_SYMBOL(acpi_processor_notify_smm); 427EXPORT_SYMBOL(acpi_processor_notify_smm);
436 428
437#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
438/* /proc/acpi/processor/../performance interface (DEPRECATED) */
439
440static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
441static struct file_operations acpi_processor_perf_fops = {
442 .owner = THIS_MODULE,
443 .open = acpi_processor_perf_open_fs,
444 .read = seq_read,
445 .llseek = seq_lseek,
446 .release = single_release,
447};
448
449static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
450{
451 struct acpi_processor *pr = seq->private;
452 int i;
453
454
455 if (!pr)
456 goto end;
457
458 if (!pr->performance) {
459 seq_puts(seq, "<not supported>\n");
460 goto end;
461 }
462
463 seq_printf(seq, "state count: %d\n"
464 "active state: P%d\n",
465 pr->performance->state_count, pr->performance->state);
466
467 seq_puts(seq, "states:\n");
468 for (i = 0; i < pr->performance->state_count; i++)
469 seq_printf(seq,
470 " %cP%d: %d MHz, %d mW, %d uS\n",
471 (i == pr->performance->state ? '*' : ' '), i,
472 (u32) pr->performance->states[i].core_frequency,
473 (u32) pr->performance->states[i].power,
474 (u32) pr->performance->states[i].transition_latency);
475
476 end:
477 return 0;
478}
479
480static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
481{
482 return single_open(file, acpi_processor_perf_seq_show,
483 PDE(inode)->data);
484}
485
486static void acpi_cpufreq_add_file(struct acpi_processor *pr)
487{
488 struct acpi_device *device = NULL;
489
490
491 if (acpi_bus_get_device(pr->handle, &device))
492 return;
493
494 /* add file 'performance' [R/W] */
495 proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
496 acpi_device_dir(device),
497 &acpi_processor_perf_fops, acpi_driver_data(device));
498 return;
499}
500
501static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
502{
503 struct acpi_device *device = NULL;
504
505
506 if (acpi_bus_get_device(pr->handle, &device))
507 return;
508
509 /* remove file 'performance' */
510 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
511 acpi_device_dir(device));
512
513 return;
514}
515
516#else
517static void acpi_cpufreq_add_file(struct acpi_processor *pr)
518{
519 return;
520}
521static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
522{
523 return;
524}
525#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
526
527static int acpi_processor_get_psd(struct acpi_processor *pr) 429static int acpi_processor_get_psd(struct acpi_processor *pr)
528{ 430{
529 int result = 0; 431 int result = 0;
@@ -747,14 +649,12 @@ err_ret:
747} 649}
748EXPORT_SYMBOL(acpi_processor_preregister_performance); 650EXPORT_SYMBOL(acpi_processor_preregister_performance);
749 651
750
751int 652int
752acpi_processor_register_performance(struct acpi_processor_performance 653acpi_processor_register_performance(struct acpi_processor_performance
753 *performance, unsigned int cpu) 654 *performance, unsigned int cpu)
754{ 655{
755 struct acpi_processor *pr; 656 struct acpi_processor *pr;
756 657
757
758 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 658 if (!(acpi_processor_ppc_status & PPC_REGISTERED))
759 return -EINVAL; 659 return -EINVAL;
760 660
@@ -781,8 +681,6 @@ acpi_processor_register_performance(struct acpi_processor_performance
781 return -EIO; 681 return -EIO;
782 } 682 }
783 683
784 acpi_cpufreq_add_file(pr);
785
786 mutex_unlock(&performance_mutex); 684 mutex_unlock(&performance_mutex);
787 return 0; 685 return 0;
788} 686}
@@ -795,7 +693,6 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
795{ 693{
796 struct acpi_processor *pr; 694 struct acpi_processor *pr;
797 695
798
799 mutex_lock(&performance_mutex); 696 mutex_lock(&performance_mutex);
800 697
801 pr = per_cpu(processors, cpu); 698 pr = per_cpu(processors, cpu);
@@ -808,8 +705,6 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
808 kfree(pr->performance->states); 705 kfree(pr->performance->states);
809 pr->performance = NULL; 706 pr->performance = NULL;
810 707
811 acpi_cpufreq_remove_file(pr);
812
813 mutex_unlock(&performance_mutex); 708 mutex_unlock(&performance_mutex);
814 709
815 return; 710 return;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7e3c609cbef2..519266654f06 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -90,31 +90,6 @@ void __init acpi_old_suspend_ordering(void)
90 old_suspend_ordering = true; 90 old_suspend_ordering = true;
91} 91}
92 92
93/*
94 * According to the ACPI specification the BIOS should make sure that ACPI is
95 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
96 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
97 * on such systems during resume. Unfortunately that doesn't help in
98 * particularly pathological cases in which SCI_EN has to be set directly on
99 * resume, although the specification states very clearly that this flag is
100 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
101 * cases.
102 */
103static bool set_sci_en_on_resume;
104/*
105 * The ACPI specification wants us to save NVS memory regions during hibernation
106 * and to restore them during the subsequent resume. However, it is not certain
107 * if this mechanism is going to work on all machines, so we allow the user to
108 * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
109 * option.
110 */
111static bool s4_no_nvs;
112
113void __init acpi_s4_no_nvs(void)
114{
115 s4_no_nvs = true;
116}
117
118/** 93/**
119 * acpi_pm_disable_gpes - Disable the GPEs. 94 * acpi_pm_disable_gpes - Disable the GPEs.
120 */ 95 */
@@ -193,6 +168,18 @@ static void acpi_pm_end(void)
193#endif /* CONFIG_ACPI_SLEEP */ 168#endif /* CONFIG_ACPI_SLEEP */
194 169
195#ifdef CONFIG_SUSPEND 170#ifdef CONFIG_SUSPEND
171/*
172 * According to the ACPI specification the BIOS should make sure that ACPI is
173 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
174 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
175 * on such systems during resume. Unfortunately that doesn't help in
176 * particularly pathological cases in which SCI_EN has to be set directly on
177 * resume, although the specification states very clearly that this flag is
178 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
179 * cases.
180 */
181static bool set_sci_en_on_resume;
182
196extern void do_suspend_lowlevel(void); 183extern void do_suspend_lowlevel(void);
197 184
198static u32 acpi_suspend_states[] = { 185static u32 acpi_suspend_states[] = {
@@ -396,6 +383,20 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
396#endif /* CONFIG_SUSPEND */ 383#endif /* CONFIG_SUSPEND */
397 384
398#ifdef CONFIG_HIBERNATION 385#ifdef CONFIG_HIBERNATION
386/*
387 * The ACPI specification wants us to save NVS memory regions during hibernation
388 * and to restore them during the subsequent resume. However, it is not certain
389 * if this mechanism is going to work on all machines, so we allow the user to
390 * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
391 * option.
392 */
393static bool s4_no_nvs;
394
395void __init acpi_s4_no_nvs(void)
396{
397 s4_no_nvs = true;
398}
399
399static unsigned long s4_hardware_signature; 400static unsigned long s4_hardware_signature;
400static struct acpi_table_facs *facs; 401static struct acpi_table_facs *facs;
401static bool nosigcheck; 402static bool nosigcheck;
@@ -679,7 +680,7 @@ static void acpi_power_off_prepare(void)
679static void acpi_power_off(void) 680static void acpi_power_off(void)
680{ 681{
681 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 682 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
682 printk("%s called\n", __func__); 683 printk(KERN_DEBUG "%s called\n", __func__);
683 local_irq_disable(); 684 local_irq_disable();
684 acpi_enable_wakeup_device(ACPI_STATE_S5); 685 acpi_enable_wakeup_device(ACPI_STATE_S5);
685 acpi_enter_sleep_state(ACPI_STATE_S5); 686 acpi_enter_sleep_state(ACPI_STATE_S5);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 775c97a282bd..a8852952fac4 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -293,7 +293,12 @@ static void __init check_multiple_madt(void)
293 293
294int __init acpi_table_init(void) 294int __init acpi_table_init(void)
295{ 295{
296 acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); 296 acpi_status status;
297
298 status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
299 if (ACPI_FAILURE(status))
300 return 1;
301
297 check_multiple_madt(); 302 check_multiple_madt();
298 return 0; 303 return 0;
299} 304}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index f261737636da..bb5ed059114a 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1020,7 +1020,7 @@ acpi_video_device_brightness_seq_show(struct seq_file *seq, void *offset)
1020 } 1020 }
1021 1021
1022 seq_printf(seq, "levels: "); 1022 seq_printf(seq, "levels: ");
1023 for (i = 0; i < dev->brightness->count; i++) 1023 for (i = 2; i < dev->brightness->count; i++)
1024 seq_printf(seq, " %d", dev->brightness->levels[i]); 1024 seq_printf(seq, " %d", dev->brightness->levels[i]);
1025 seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr); 1025 seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr);
1026 1026
@@ -1059,7 +1059,7 @@ acpi_video_device_write_brightness(struct file *file,
1059 return -EFAULT; 1059 return -EFAULT;
1060 1060
1061 /* validate through the list of available levels */ 1061 /* validate through the list of available levels */
1062 for (i = 0; i < dev->brightness->count; i++) 1062 for (i = 2; i < dev->brightness->count; i++)
1063 if (level == dev->brightness->levels[i]) { 1063 if (level == dev->brightness->levels[i]) {
1064 if (ACPI_SUCCESS 1064 if (ACPI_SUCCESS
1065 (acpi_video_device_lcd_set_level(dev, level))) 1065 (acpi_video_device_lcd_set_level(dev, level)))
@@ -1260,7 +1260,7 @@ static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
1260 printk(KERN_WARNING PREFIX 1260 printk(KERN_WARNING PREFIX
1261 "This indicates a BIOS bug. Please contact the manufacturer.\n"); 1261 "This indicates a BIOS bug. Please contact the manufacturer.\n");
1262 } 1262 }
1263 printk("%llx\n", options); 1263 printk(KERN_WARNING "%llx\n", options);
1264 seq_printf(seq, "can POST: <integrated video>"); 1264 seq_printf(seq, "can POST: <integrated video>");
1265 if (options & 2) 1265 if (options & 2)
1266 seq_printf(seq, " <PCI video>"); 1266 seq_printf(seq, " <PCI video>");
@@ -1712,7 +1712,7 @@ acpi_video_get_next_level(struct acpi_video_device *device,
1712 max = max_below = 0; 1712 max = max_below = 0;
1713 min = min_above = 255; 1713 min = min_above = 255;
1714 /* Find closest level to level_current */ 1714 /* Find closest level to level_current */
1715 for (i = 0; i < device->brightness->count; i++) { 1715 for (i = 2; i < device->brightness->count; i++) {
1716 l = device->brightness->levels[i]; 1716 l = device->brightness->levels[i];
1717 if (abs(l - level_current) < abs(delta)) { 1717 if (abs(l - level_current) < abs(delta)) {
1718 delta = l - level_current; 1718 delta = l - level_current;
@@ -1722,7 +1722,7 @@ acpi_video_get_next_level(struct acpi_video_device *device,
1722 } 1722 }
1723 /* Ajust level_current to closest available level */ 1723 /* Ajust level_current to closest available level */
1724 level_current += delta; 1724 level_current += delta;
1725 for (i = 0; i < device->brightness->count; i++) { 1725 for (i = 2; i < device->brightness->count; i++) {
1726 l = device->brightness->levels[i]; 1726 l = device->brightness->levels[i];
1727 if (l < min) 1727 if (l < min)
1728 min = l; 1728 min = l;
@@ -2006,6 +2006,12 @@ static int acpi_video_bus_add(struct acpi_device *device)
2006 device->pnp.bus_id[3] = '0' + instance; 2006 device->pnp.bus_id[3] = '0' + instance;
2007 instance ++; 2007 instance ++;
2008 } 2008 }
2009 /* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
2010 if (!strcmp(device->pnp.bus_id, "VGA")) {
2011 if (instance)
2012 device->pnp.bus_id[3] = '0' + instance;
2013 instance++;
2014 }
2009 2015
2010 video->device = device; 2016 video->device = device;
2011 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); 2017 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 72fc0f799a64..89d7a6e94c9c 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -685,6 +685,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
685 out_release_regions: 685 out_release_regions:
686 pci_release_regions(dev); 686 pci_release_regions(dev);
687 out: 687 out:
688 kfree(card);
688 return err; 689 return err;
689} 690}
690 691
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index b60be7b0decf..f146e90404fa 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -1713,8 +1713,8 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
1713 for (i = 0; i < SX_NBOARDS; i++) 1713 for (i = 0; i < SX_NBOARDS; i++)
1714 sx_dprintk(SX_DEBUG_FIRMWARE, "<%x> ", boards[i].flags); 1714 sx_dprintk(SX_DEBUG_FIRMWARE, "<%x> ", boards[i].flags);
1715 sx_dprintk(SX_DEBUG_FIRMWARE, "\n"); 1715 sx_dprintk(SX_DEBUG_FIRMWARE, "\n");
1716 unlock_kernel(); 1716 rc = -EIO;
1717 return -EIO; 1717 goto out;
1718 } 1718 }
1719 1719
1720 switch (cmd) { 1720 switch (cmd) {
@@ -1747,7 +1747,8 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
1747 break; 1747 break;
1748 case SXIO_DO_RAMTEST: 1748 case SXIO_DO_RAMTEST:
1749 if (sx_initialized) /* Already initialized: better not ramtest the board. */ 1749 if (sx_initialized) /* Already initialized: better not ramtest the board. */
1750 return -EPERM; 1750 rc = -EPERM;
1751 break;
1751 if (IS_SX_BOARD(board)) { 1752 if (IS_SX_BOARD(board)) {
1752 rc = do_memtest(board, 0, 0x7000); 1753 rc = do_memtest(board, 0, 0x7000);
1753 if (!rc) 1754 if (!rc)
@@ -1844,6 +1845,7 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
1844 rc = -ENOTTY; 1845 rc = -ENOTTY;
1845 break; 1846 break;
1846 } 1847 }
1848out:
1847 unlock_kernel(); 1849 unlock_kernel();
1848 func_exit(); 1850 func_exit();
1849 return rc; 1851 return rc;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a2b036c9389..6f45b1658a67 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
120 120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
121 if (!dbs_tuners_ins.ignore_nice) {
122 busy_time = cputime64_add(busy_time,
123 kstat_cpu(cpu).cpustat.nice);
124 }
125 121
126 idle_time = cputime64_sub(cur_wall_time, busy_time); 122 idle_time = cputime64_sub(cur_wall_time, busy_time);
127 if (wall) 123 if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
137 if (idle_time == -1ULL) 133 if (idle_time == -1ULL)
138 return get_cpu_idle_time_jiffy(cpu, wall); 134 return get_cpu_idle_time_jiffy(cpu, wall);
139 135
140 if (dbs_tuners_ins.ignore_nice) {
141 cputime64_t cur_nice;
142 unsigned long cur_nice_jiffies;
143 struct cpu_dbs_info_s *dbs_info;
144
145 dbs_info = &per_cpu(cpu_dbs_info, cpu);
146 cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
147 dbs_info->prev_cpu_nice);
148 /*
149 * Assumption: nice time between sampling periods will be
150 * less than 2^32 jiffies for 32 bit sys
151 */
152 cur_nice_jiffies = (unsigned long)
153 cputime64_to_jiffies64(cur_nice);
154 dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
155 return idle_time + jiffies_to_usecs(cur_nice_jiffies);
156 }
157 return idle_time; 136 return idle_time;
158} 137}
159 138
@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
319 dbs_info = &per_cpu(cpu_dbs_info, j); 298 dbs_info = &per_cpu(cpu_dbs_info, j);
320 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 299 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
321 &dbs_info->prev_cpu_wall); 300 &dbs_info->prev_cpu_wall);
301 if (dbs_tuners_ins.ignore_nice)
302 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
303
322 } 304 }
323 mutex_unlock(&dbs_mutex); 305 mutex_unlock(&dbs_mutex);
324 306
@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
419 j_dbs_info->prev_cpu_idle); 401 j_dbs_info->prev_cpu_idle);
420 j_dbs_info->prev_cpu_idle = cur_idle_time; 402 j_dbs_info->prev_cpu_idle = cur_idle_time;
421 403
404 if (dbs_tuners_ins.ignore_nice) {
405 cputime64_t cur_nice;
406 unsigned long cur_nice_jiffies;
407
408 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
409 j_dbs_info->prev_cpu_nice);
410 /*
411 * Assumption: nice time between sampling periods will
412 * be less than 2^32 jiffies for 32 bit sys
413 */
414 cur_nice_jiffies = (unsigned long)
415 cputime64_to_jiffies64(cur_nice);
416
417 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
418 idle_time += jiffies_to_usecs(cur_nice_jiffies);
419 }
420
422 if (unlikely(!wall_time || wall_time < idle_time)) 421 if (unlikely(!wall_time || wall_time < idle_time))
423 continue; 422 continue;
424 423
@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
575 574
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 575 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
577 &j_dbs_info->prev_cpu_wall); 576 &j_dbs_info->prev_cpu_wall);
577 if (dbs_tuners_ins.ignore_nice) {
578 j_dbs_info->prev_cpu_nice =
579 kstat_cpu(j).cpustat.nice;
580 }
578 } 581 }
579 this_dbs_info->cpu = cpu; 582 this_dbs_info->cpu = cpu;
580 /* 583 /*
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 7be2cf3514e7..a5dd7a665aa8 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -412,6 +412,7 @@ fw_card_add(struct fw_card *card,
412{ 412{
413 u32 *config_rom; 413 u32 *config_rom;
414 size_t length; 414 size_t length;
415 int err;
415 416
416 card->max_receive = max_receive; 417 card->max_receive = max_receive;
417 card->link_speed = link_speed; 418 card->link_speed = link_speed;
@@ -422,7 +423,13 @@ fw_card_add(struct fw_card *card,
422 list_add_tail(&card->link, &card_list); 423 list_add_tail(&card->link, &card_list);
423 mutex_unlock(&card_mutex); 424 mutex_unlock(&card_mutex);
424 425
425 return card->driver->enable(card, config_rom, length); 426 err = card->driver->enable(card, config_rom, length);
427 if (err < 0) {
428 mutex_lock(&card_mutex);
429 list_del(&card->link);
430 mutex_unlock(&card_mutex);
431 }
432 return err;
426} 433}
427EXPORT_SYMBOL(fw_card_add); 434EXPORT_SYMBOL(fw_card_add);
428 435
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 5130b72d593c..4be3acbaaf9a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -70,7 +70,7 @@ config DRM_I915
70 select FB_CFB_FILLRECT 70 select FB_CFB_FILLRECT
71 select FB_CFB_COPYAREA 71 select FB_CFB_COPYAREA
72 select FB_CFB_IMAGEBLIT 72 select FB_CFB_IMAGEBLIT
73 depends on FB 73 select FB
74 tristate "i915 driver" 74 tristate "i915 driver"
75 help 75 help
76 Choose this option if you have a system that has Intel 830M, 845G, 76 Choose this option if you have a system that has Intel 830M, 845G,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 69aa0ab28403..3795dbc0f50c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -276,6 +276,7 @@ int drm_irq_uninstall(struct drm_device * dev)
276 for (i = 0; i < dev->num_crtcs; i++) { 276 for (i = 0; i < dev->num_crtcs; i++) {
277 DRM_WAKEUP(&dev->vbl_queue[i]); 277 DRM_WAKEUP(&dev->vbl_queue[i]);
278 dev->vblank_enabled[i] = 0; 278 dev->vblank_enabled[i] = 0;
279 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
279 } 280 }
280 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 281 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
281 282
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 803bc9e7ce3c..bcc869bc4092 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -171,9 +171,14 @@ EXPORT_SYMBOL(drm_core_ioremap);
171 171
172void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) 172void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
173{ 173{
174 map->handle = ioremap_wc(map->offset, map->size); 174 if (drm_core_has_AGP(dev) &&
175 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
176 map->handle = agp_remap(map->offset, map->size, dev);
177 else
178 map->handle = ioremap_wc(map->offset, map->size);
175} 179}
176EXPORT_SYMBOL(drm_core_ioremap_wc); 180EXPORT_SYMBOL(drm_core_ioremap_wc);
181
177void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) 182void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
178{ 183{
179 if (!map->handle || !map->size) 184 if (!map->handle || !map->size)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ee64b7301f67..81f1cff56fd5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -731,8 +731,11 @@ static int i915_getparam(struct drm_device *dev, void *data,
731 case I915_PARAM_HAS_GEM: 731 case I915_PARAM_HAS_GEM:
732 value = dev_priv->has_gem; 732 value = dev_priv->has_gem;
733 break; 733 break;
734 case I915_PARAM_NUM_FENCES_AVAIL:
735 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
736 break;
734 default: 737 default:
735 DRM_ERROR("Unknown parameter %d\n", param->param); 738 DRM_DEBUG("Unknown parameter %d\n", param->param);
736 return -EINVAL; 739 return -EINVAL;
737 } 740 }
738 741
@@ -764,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data,
764 case I915_SETPARAM_ALLOW_BATCHBUFFER: 767 case I915_SETPARAM_ALLOW_BATCHBUFFER:
765 dev_priv->allow_batchbuffer = param->value; 768 dev_priv->allow_batchbuffer = param->value;
766 break; 769 break;
770 case I915_SETPARAM_NUM_USED_FENCES:
771 if (param->value > dev_priv->num_fence_regs ||
772 param->value < 0)
773 return -EINVAL;
774 /* Userspace can use first N regs */
775 dev_priv->fence_reg_start = param->value;
776 break;
767 default: 777 default:
768 DRM_ERROR("unknown parameter %d\n", param->param); 778 DRM_DEBUG("unknown parameter %d\n", param->param);
769 return -EINVAL; 779 return -EINVAL;
770 } 780 }
771 781
@@ -966,10 +976,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
966 if (ret) 976 if (ret)
967 goto kfree_devname; 977 goto kfree_devname;
968 978
969 dev_priv->mm.gtt_mapping =
970 io_mapping_create_wc(dev->agp->base,
971 dev->agp->agp_info.aper_size * 1024*1024);
972
973 /* Allow hardware batchbuffers unless told otherwise. 979 /* Allow hardware batchbuffers unless told otherwise.
974 */ 980 */
975 dev_priv->allow_batchbuffer = 1; 981 dev_priv->allow_batchbuffer = 1;
@@ -1081,6 +1087,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1081 goto free_priv; 1087 goto free_priv;
1082 } 1088 }
1083 1089
1090 dev_priv->mm.gtt_mapping =
1091 io_mapping_create_wc(dev->agp->base,
1092 dev->agp->agp_info.aper_size * 1024*1024);
1093 /* Set up a WC MTRR for non-PAT systems. This is more common than
1094 * one would think, because the kernel disables PAT on first
1095 * generation Core chips because WC PAT gets overridden by a UC
1096 * MTRR if present. Even if a UC MTRR isn't present.
1097 */
1098 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1099 dev->agp->agp_info.aper_size *
1100 1024 * 1024,
1101 MTRR_TYPE_WRCOMB, 1);
1102 if (dev_priv->mm.gtt_mtrr < 0) {
1103 DRM_INFO("MTRR allocation failed\n. Graphics "
1104 "performance may suffer.\n");
1105 }
1106
1084#ifdef CONFIG_HIGHMEM64G 1107#ifdef CONFIG_HIGHMEM64G
1085 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ 1108 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
1086 dev_priv->has_gem = 0; 1109 dev_priv->has_gem = 0;
@@ -1089,6 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1089 dev_priv->has_gem = 1; 1112 dev_priv->has_gem = 1;
1090#endif 1113#endif
1091 1114
1115 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1116 if (IS_GM45(dev))
1117 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1118
1092 i915_gem_load(dev); 1119 i915_gem_load(dev);
1093 1120
1094 /* Init HWS */ 1121 /* Init HWS */
@@ -1145,8 +1172,14 @@ int i915_driver_unload(struct drm_device *dev)
1145{ 1172{
1146 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
1147 1174
1175 io_mapping_free(dev_priv->mm.gtt_mapping);
1176 if (dev_priv->mm.gtt_mtrr >= 0) {
1177 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1178 dev->agp->agp_info.aper_size * 1024 * 1024);
1179 dev_priv->mm.gtt_mtrr = -1;
1180 }
1181
1148 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1182 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1149 io_mapping_free(dev_priv->mm.gtt_mapping);
1150 drm_irq_uninstall(dev); 1183 drm_irq_uninstall(dev);
1151 } 1184 }
1152 1185
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f8b3df0926c0..aac12ee31a46 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -112,7 +112,6 @@ static struct drm_driver driver = {
112 .suspend = i915_suspend, 112 .suspend = i915_suspend,
113 .resume = i915_resume, 113 .resume = i915_resume,
114 .device_is_agp = i915_driver_device_is_agp, 114 .device_is_agp = i915_driver_device_is_agp,
115 .get_vblank_counter = i915_get_vblank_counter,
116 .enable_vblank = i915_enable_vblank, 115 .enable_vblank = i915_enable_vblank,
117 .disable_vblank = i915_disable_vblank, 116 .disable_vblank = i915_disable_vblank,
118 .irq_preinstall = i915_driver_irq_preinstall, 117 .irq_preinstall = i915_driver_irq_preinstall,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e13518252007..7325363164f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -284,6 +284,7 @@ typedef struct drm_i915_private {
284 struct drm_mm gtt_space; 284 struct drm_mm gtt_space;
285 285
286 struct io_mapping *gtt_mapping; 286 struct io_mapping *gtt_mapping;
287 int gtt_mtrr;
287 288
288 /** 289 /**
289 * List of objects currently involved in rendering from the 290 * List of objects currently involved in rendering from the
@@ -534,6 +535,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
534extern int i915_enable_vblank(struct drm_device *dev, int crtc); 535extern int i915_enable_vblank(struct drm_device *dev, int crtc);
535extern void i915_disable_vblank(struct drm_device *dev, int crtc); 536extern void i915_disable_vblank(struct drm_device *dev, int crtc);
536extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 537extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
538extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
537extern int i915_vblank_swap(struct drm_device *dev, void *data, 539extern int i915_vblank_swap(struct drm_device *dev, void *data,
538 struct drm_file *file_priv); 540 struct drm_file *file_priv);
539extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 541extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
@@ -601,6 +603,7 @@ int i915_gem_init_object(struct drm_gem_object *obj);
601void i915_gem_free_object(struct drm_gem_object *obj); 603void i915_gem_free_object(struct drm_gem_object *obj);
602int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 604int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
603void i915_gem_object_unpin(struct drm_gem_object *obj); 605void i915_gem_object_unpin(struct drm_gem_object *obj);
606int i915_gem_object_unbind(struct drm_gem_object *obj);
604void i915_gem_lastclose(struct drm_device *dev); 607void i915_gem_lastclose(struct drm_device *dev);
605uint32_t i915_get_gem_seqno(struct drm_device *dev); 608uint32_t i915_get_gem_seqno(struct drm_device *dev);
606void i915_gem_retire_requests(struct drm_device *dev); 609void i915_gem_retire_requests(struct drm_device *dev);
@@ -784,6 +787,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 787 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
785 788
786#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 789#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
790/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
791 * rows, which changed the alignment requirements and fence programming.
792 */
793#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
794 IS_I915GM(dev)))
787#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) 795#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
788 796
789#define PRIMARY_RINGBUFFER_SIZE (128*1024) 797#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index debad5c04cc0..818576654092 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -52,7 +52,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment); 54 unsigned alignment);
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 55static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev); 57static int i915_gem_evict_something(struct drm_device *dev);
58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -567,6 +567,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
567 pgoff_t page_offset; 567 pgoff_t page_offset;
568 unsigned long pfn; 568 unsigned long pfn;
569 int ret = 0; 569 int ret = 0;
570 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
570 571
571 /* We don't use vmf->pgoff since that has the fake offset */ 572 /* We don't use vmf->pgoff since that has the fake offset */
572 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 573 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
@@ -585,8 +586,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
585 586
586 /* Need a new fence register? */ 587 /* Need a new fence register? */
587 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 588 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
588 obj_priv->tiling_mode != I915_TILING_NONE) 589 obj_priv->tiling_mode != I915_TILING_NONE) {
589 i915_gem_object_get_fence_reg(obj); 590 ret = i915_gem_object_get_fence_reg(obj, write);
591 if (ret) {
592 mutex_unlock(&dev->struct_mutex);
593 return VM_FAULT_SIGBUS;
594 }
595 }
590 596
591 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 597 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
592 page_offset; 598 page_offset;
@@ -1211,7 +1217,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1211/** 1217/**
1212 * Unbinds an object from the GTT aperture. 1218 * Unbinds an object from the GTT aperture.
1213 */ 1219 */
1214static int 1220int
1215i915_gem_object_unbind(struct drm_gem_object *obj) 1221i915_gem_object_unbind(struct drm_gem_object *obj)
1216{ 1222{
1217 struct drm_device *dev = obj->dev; 1223 struct drm_device *dev = obj->dev;
@@ -1445,21 +1451,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1445 drm_i915_private_t *dev_priv = dev->dev_private; 1451 drm_i915_private_t *dev_priv = dev->dev_private;
1446 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1452 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1447 int regnum = obj_priv->fence_reg; 1453 int regnum = obj_priv->fence_reg;
1454 int tile_width;
1448 uint32_t val; 1455 uint32_t val;
1449 uint32_t pitch_val; 1456 uint32_t pitch_val;
1450 1457
1451 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1458 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1452 (obj_priv->gtt_offset & (obj->size - 1))) { 1459 (obj_priv->gtt_offset & (obj->size - 1))) {
1453 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1460 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1461 __func__, obj_priv->gtt_offset, obj->size);
1454 return; 1462 return;
1455 } 1463 }
1456 1464
1457 if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || 1465 if (obj_priv->tiling_mode == I915_TILING_Y &&
1458 IS_I945GM(dev) || 1466 HAS_128_BYTE_Y_TILING(dev))
1459 IS_G33(dev))) 1467 tile_width = 128;
1460 pitch_val = (obj_priv->stride / 128) - 1;
1461 else 1468 else
1462 pitch_val = (obj_priv->stride / 512) - 1; 1469 tile_width = 512;
1470
1471 /* Note: pitch better be a power of two tile widths */
1472 pitch_val = obj_priv->stride / tile_width;
1473 pitch_val = ffs(pitch_val) - 1;
1463 1474
1464 val = obj_priv->gtt_offset; 1475 val = obj_priv->gtt_offset;
1465 if (obj_priv->tiling_mode == I915_TILING_Y) 1476 if (obj_priv->tiling_mode == I915_TILING_Y)
@@ -1483,7 +1494,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1483 1494
1484 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1495 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1485 (obj_priv->gtt_offset & (obj->size - 1))) { 1496 (obj_priv->gtt_offset & (obj->size - 1))) {
1486 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1497 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1498 __func__, obj_priv->gtt_offset);
1487 return; 1499 return;
1488 } 1500 }
1489 1501
@@ -1503,6 +1515,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1503/** 1515/**
1504 * i915_gem_object_get_fence_reg - set up a fence reg for an object 1516 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1505 * @obj: object to map through a fence reg 1517 * @obj: object to map through a fence reg
1518 * @write: object is about to be written
1506 * 1519 *
1507 * When mapping objects through the GTT, userspace wants to be able to write 1520 * When mapping objects through the GTT, userspace wants to be able to write
1508 * to them without having to worry about swizzling if the object is tiled. 1521 * to them without having to worry about swizzling if the object is tiled.
@@ -1513,8 +1526,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1513 * It then sets up the reg based on the object's properties: address, pitch 1526 * It then sets up the reg based on the object's properties: address, pitch
1514 * and tiling format. 1527 * and tiling format.
1515 */ 1528 */
1516static void 1529static int
1517i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 1530i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1518{ 1531{
1519 struct drm_device *dev = obj->dev; 1532 struct drm_device *dev = obj->dev;
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1533 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1527,12 +1540,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
1527 WARN(1, "allocating a fence for non-tiled object?\n"); 1540 WARN(1, "allocating a fence for non-tiled object?\n");
1528 break; 1541 break;
1529 case I915_TILING_X: 1542 case I915_TILING_X:
1530 WARN(obj_priv->stride & (512 - 1), 1543 if (!obj_priv->stride)
1531 "object is X tiled but has non-512B pitch\n"); 1544 return -EINVAL;
1545 WARN((obj_priv->stride & (512 - 1)),
1546 "object 0x%08x is X tiled but has non-512B pitch\n",
1547 obj_priv->gtt_offset);
1532 break; 1548 break;
1533 case I915_TILING_Y: 1549 case I915_TILING_Y:
1534 WARN(obj_priv->stride & (128 - 1), 1550 if (!obj_priv->stride)
1535 "object is Y tiled but has non-128B pitch\n"); 1551 return -EINVAL;
1552 WARN((obj_priv->stride & (128 - 1)),
1553 "object 0x%08x is Y tiled but has non-128B pitch\n",
1554 obj_priv->gtt_offset);
1536 break; 1555 break;
1537 } 1556 }
1538 1557
@@ -1563,10 +1582,11 @@ try_again:
1563 * objects to finish before trying again. 1582 * objects to finish before trying again.
1564 */ 1583 */
1565 if (i == dev_priv->num_fence_regs) { 1584 if (i == dev_priv->num_fence_regs) {
1566 ret = i915_gem_object_wait_rendering(reg->obj); 1585 ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
1567 if (ret) { 1586 if (ret) {
1568 WARN(ret, "wait_rendering failed: %d\n", ret); 1587 WARN(ret != -ERESTARTSYS,
1569 return; 1588 "switch to GTT domain failed: %d\n", ret);
1589 return ret;
1570 } 1590 }
1571 goto try_again; 1591 goto try_again;
1572 } 1592 }
@@ -1591,6 +1611,8 @@ try_again:
1591 i915_write_fence_reg(reg); 1611 i915_write_fence_reg(reg);
1592 else 1612 else
1593 i830_write_fence_reg(reg); 1613 i830_write_fence_reg(reg);
1614
1615 return 0;
1594} 1616}
1595 1617
1596/** 1618/**
@@ -1631,7 +1653,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1631 if (dev_priv->mm.suspended) 1653 if (dev_priv->mm.suspended)
1632 return -EBUSY; 1654 return -EBUSY;
1633 if (alignment == 0) 1655 if (alignment == 0)
1634 alignment = PAGE_SIZE; 1656 alignment = i915_gem_get_gtt_alignment(obj);
1635 if (alignment & (PAGE_SIZE - 1)) { 1657 if (alignment & (PAGE_SIZE - 1)) {
1636 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 1658 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1637 return -EINVAL; 1659 return -EINVAL;
@@ -2652,6 +2674,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2652 DRM_ERROR("Failure to bind: %d", ret); 2674 DRM_ERROR("Failure to bind: %d", ret);
2653 return ret; 2675 return ret;
2654 } 2676 }
2677 /*
2678 * Pre-965 chips need a fence register set up in order to
2679 * properly handle tiled surfaces.
2680 */
2681 if (!IS_I965G(dev) &&
2682 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2683 obj_priv->tiling_mode != I915_TILING_NONE)
2684 i915_gem_object_get_fence_reg(obj, true);
2655 } 2685 }
2656 obj_priv->pin_count++; 2686 obj_priv->pin_count++;
2657 2687
@@ -3229,10 +3259,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3229 dev_priv->mm.wedged = 0; 3259 dev_priv->mm.wedged = 0;
3230 } 3260 }
3231 3261
3232 dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
3233 dev->agp->agp_info.aper_size
3234 * 1024 * 1024);
3235
3236 mutex_lock(&dev->struct_mutex); 3262 mutex_lock(&dev->struct_mutex);
3237 dev_priv->mm.suspended = 0; 3263 dev_priv->mm.suspended = 0;
3238 3264
@@ -3255,7 +3281,6 @@ int
3255i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 3281i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3256 struct drm_file *file_priv) 3282 struct drm_file *file_priv)
3257{ 3283{
3258 drm_i915_private_t *dev_priv = dev->dev_private;
3259 int ret; 3284 int ret;
3260 3285
3261 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3286 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -3264,7 +3289,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3264 ret = i915_gem_idle(dev); 3289 ret = i915_gem_idle(dev);
3265 drm_irq_uninstall(dev); 3290 drm_irq_uninstall(dev);
3266 3291
3267 io_mapping_free(dev_priv->mm.gtt_mapping);
3268 return ret; 3292 return ret;
3269} 3293}
3270 3294
@@ -3273,6 +3297,9 @@ i915_gem_lastclose(struct drm_device *dev)
3273{ 3297{
3274 int ret; 3298 int ret;
3275 3299
3300 if (drm_core_check_feature(dev, DRIVER_MODESET))
3301 return;
3302
3276 ret = i915_gem_idle(dev); 3303 ret = i915_gem_idle(dev);
3277 if (ret) 3304 if (ret)
3278 DRM_ERROR("failed to idle hardware: %d\n", ret); 3305 DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -3294,7 +3321,7 @@ i915_gem_load(struct drm_device *dev)
3294 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3321 /* Old X drivers will take 0-2 for front, back, depth buffers */
3295 dev_priv->fence_reg_start = 3; 3322 dev_priv->fence_reg_start = 3;
3296 3323
3297 if (IS_I965G(dev)) 3324 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3298 dev_priv->num_fence_regs = 16; 3325 dev_priv->num_fence_regs = 16;
3299 else 3326 else
3300 dev_priv->num_fence_regs = 8; 3327 dev_priv->num_fence_regs = 8;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 241f39b7f460..fa1685cba840 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -173,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
173 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 173 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
174} 174}
175 175
176
177/**
178 * Returns the size of the fence for a tiled object of the given size.
179 */
180static int
181i915_get_fence_size(struct drm_device *dev, int size)
182{
183 int i;
184 int start;
185
186 if (IS_I965G(dev)) {
187 /* The 965 can have fences at any page boundary. */
188 return ALIGN(size, 4096);
189 } else {
190 /* Align the size to a power of two greater than the smallest
191 * fence size.
192 */
193 if (IS_I9XX(dev))
194 start = 1024 * 1024;
195 else
196 start = 512 * 1024;
197
198 for (i = start; i < size; i <<= 1)
199 ;
200
201 return i;
202 }
203}
204
205/* Check pitch constriants for all chips & tiling formats */
206static bool
207i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
208{
209 int tile_width;
210
211 /* Linear is always fine */
212 if (tiling_mode == I915_TILING_NONE)
213 return true;
214
215 if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
216 tile_width = 128;
217 else
218 tile_width = 512;
219
220 /* 965+ just needs multiples of tile width */
221 if (IS_I965G(dev)) {
222 if (stride & (tile_width - 1))
223 return false;
224 return true;
225 }
226
227 /* Pre-965 needs power of two tile widths */
228 if (stride < tile_width)
229 return false;
230
231 if (stride & (stride - 1))
232 return false;
233
234 /* We don't handle the aperture area covered by the fence being bigger
235 * than the object size.
236 */
237 if (i915_get_fence_size(dev, size) != size)
238 return false;
239
240 return true;
241}
242
176/** 243/**
177 * Sets the tiling mode of an object, returning the required swizzling of 244 * Sets the tiling mode of an object, returning the required swizzling of
178 * bit 6 of addresses in the object. 245 * bit 6 of addresses in the object.
@@ -191,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
191 return -EINVAL; 258 return -EINVAL;
192 obj_priv = obj->driver_private; 259 obj_priv = obj->driver_private;
193 260
261 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
262 drm_gem_object_unreference(obj);
263 return -EINVAL;
264 }
265
194 mutex_lock(&dev->struct_mutex); 266 mutex_lock(&dev->struct_mutex);
195 267
196 if (args->tiling_mode == I915_TILING_NONE) { 268 if (args->tiling_mode == I915_TILING_NONE) {
@@ -207,7 +279,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
207 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 279 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
208 } 280 }
209 } 281 }
210 obj_priv->tiling_mode = args->tiling_mode; 282 if (args->tiling_mode != obj_priv->tiling_mode) {
283 int ret;
284
285 /* Unbind the object, as switching tiling means we're
286 * switching the cache organization due to fencing, probably.
287 */
288 ret = i915_gem_object_unbind(obj);
289 if (ret != 0) {
290 WARN(ret != -ERESTARTSYS,
291 "failed to unbind object for tiling switch");
292 args->tiling_mode = obj_priv->tiling_mode;
293 mutex_unlock(&dev->struct_mutex);
294 drm_gem_object_unreference(obj);
295
296 return ret;
297 }
298 obj_priv->tiling_mode = args->tiling_mode;
299 }
211 obj_priv->stride = args->stride; 300 obj_priv->stride = args->stride;
212 301
213 mutex_unlock(&dev->struct_mutex); 302 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6290219de6c8..548ff2c66431 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -174,6 +174,19 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
174 return count; 174 return count;
175} 175}
176 176
177u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
178{
179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
180 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
181
182 if (!i915_pipe_enabled(dev, pipe)) {
183 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
184 return 0;
185 }
186
187 return I915_READ(reg);
188}
189
177irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 190irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
178{ 191{
179 struct drm_device *dev = (struct drm_device *) arg; 192 struct drm_device *dev = (struct drm_device *) arg;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 273162579e1b..9d6539a868b3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,12 +186,12 @@
186#define FENCE_REG_830_0 0x2000 186#define FENCE_REG_830_0 0x2000
187#define I830_FENCE_START_MASK 0x07f80000 187#define I830_FENCE_START_MASK 0x07f80000
188#define I830_FENCE_TILING_Y_SHIFT 12 188#define I830_FENCE_TILING_Y_SHIFT 12
189#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8) 189#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
190#define I830_FENCE_PITCH_SHIFT 4 190#define I830_FENCE_PITCH_SHIFT 4
191#define I830_FENCE_REG_VALID (1<<0) 191#define I830_FENCE_REG_VALID (1<<0)
192 192
193#define I915_FENCE_START_MASK 0x0ff00000 193#define I915_FENCE_START_MASK 0x0ff00000
194#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8) 194#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
195 195
196#define FENCE_REG_965_0 0x03000 196#define FENCE_REG_965_0 0x03000
197#define I965_FENCE_PITCH_SHIFT 2 197#define I965_FENCE_PITCH_SHIFT 2
@@ -1371,6 +1371,9 @@
1371#define PIPE_FRAME_LOW_SHIFT 24 1371#define PIPE_FRAME_LOW_SHIFT 24
1372#define PIPE_PIXEL_MASK 0x00ffffff 1372#define PIPE_PIXEL_MASK 0x00ffffff
1373#define PIPE_PIXEL_SHIFT 0 1373#define PIPE_PIXEL_SHIFT 0
1374/* GM45+ just has to be different */
1375#define PIPEA_FRMCOUNT_GM45 0x70040
1376#define PIPEA_FLIPCOUNT_GM45 0x70044
1374 1377
1375/* Cursor A & B regs */ 1378/* Cursor A & B regs */
1376#define CURACNTR 0x70080 1379#define CURACNTR 0x70080
@@ -1439,6 +1442,9 @@
1439#define PIPEBSTAT 0x71024 1442#define PIPEBSTAT 0x71024
1440#define PIPEBFRAMEHIGH 0x71040 1443#define PIPEBFRAMEHIGH 0x71040
1441#define PIPEBFRAMEPIXEL 0x71044 1444#define PIPEBFRAMEPIXEL 0x71044
1445#define PIPEB_FRMCOUNT_GM45 0x71040
1446#define PIPEB_FLIPCOUNT_GM45 0x71044
1447
1442 1448
1443/* Display B control */ 1449/* Display B control */
1444#define DSPBCNTR 0x71180 1450#define DSPBCNTR 0x71180
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 31c3732b7a69..bbdd72909a11 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -755,6 +755,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
755 case INTEL_OUTPUT_SDVO: 755 case INTEL_OUTPUT_SDVO:
756 case INTEL_OUTPUT_HDMI: 756 case INTEL_OUTPUT_HDMI:
757 is_sdvo = true; 757 is_sdvo = true;
758 if (intel_output->needs_tv_clock)
759 is_tv = true;
758 break; 760 break;
759 case INTEL_OUTPUT_DVO: 761 case INTEL_OUTPUT_DVO:
760 is_dvo = true; 762 is_dvo = true;
@@ -1452,6 +1454,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
1452 1454
1453static void intel_setup_outputs(struct drm_device *dev) 1455static void intel_setup_outputs(struct drm_device *dev)
1454{ 1456{
1457 struct drm_i915_private *dev_priv = dev->dev_private;
1455 struct drm_connector *connector; 1458 struct drm_connector *connector;
1456 1459
1457 intel_crt_init(dev); 1460 intel_crt_init(dev);
@@ -1463,13 +1466,16 @@ static void intel_setup_outputs(struct drm_device *dev)
1463 if (IS_I9XX(dev)) { 1466 if (IS_I9XX(dev)) {
1464 int found; 1467 int found;
1465 1468
1466 found = intel_sdvo_init(dev, SDVOB); 1469 if (I915_READ(SDVOB) & SDVO_DETECTED) {
1467 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1470 found = intel_sdvo_init(dev, SDVOB);
1468 intel_hdmi_init(dev, SDVOB); 1471 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1469 1472 intel_hdmi_init(dev, SDVOB);
1470 found = intel_sdvo_init(dev, SDVOC); 1473 }
1471 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1474 if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
1472 intel_hdmi_init(dev, SDVOC); 1475 found = intel_sdvo_init(dev, SDVOC);
1476 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1477 intel_hdmi_init(dev, SDVOC);
1478 }
1473 } else 1479 } else
1474 intel_dvo_init(dev); 1480 intel_dvo_init(dev);
1475 1481
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a4cc50c5b4e..957daef8edff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -82,6 +82,7 @@ struct intel_output {
82 struct intel_i2c_chan *i2c_bus; /* for control functions */ 82 struct intel_i2c_chan *i2c_bus; /* for control functions */
83 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ 83 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
84 bool load_detect_temp; 84 bool load_detect_temp;
85 bool needs_tv_clock;
85 void *dev_priv; 86 void *dev_priv;
86}; 87};
87 88
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b36a5214d8df..6d4f91265354 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
27 * Jesse Barnes <jesse.barnes@intel.com> 27 * Jesse Barnes <jesse.barnes@intel.com>
28 */ 28 */
29 29
30#include <linux/dmi.h>
30#include <linux/i2c.h> 31#include <linux/i2c.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "drm.h" 33#include "drm.h"
@@ -311,10 +312,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
311 if (dev_priv->panel_fixed_mode != NULL) { 312 if (dev_priv->panel_fixed_mode != NULL) {
312 struct drm_display_mode *mode; 313 struct drm_display_mode *mode;
313 314
314 mutex_lock(&dev->mode_config.mutex);
315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
316 drm_mode_probed_add(connector, mode); 316 drm_mode_probed_add(connector, mode);
317 mutex_unlock(&dev->mode_config.mutex);
318 317
319 return 1; 318 return 1;
320 } 319 }
@@ -405,6 +404,16 @@ void intel_lvds_init(struct drm_device *dev)
405 u32 lvds; 404 u32 lvds;
406 int pipe; 405 int pipe;
407 406
407 /* Blacklist machines that we know falsely report LVDS. */
408 /* FIXME: add a check for the Aopen Mini PC */
409
410 /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */
411 if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") ||
412 dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) {
413 DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n");
414 return;
415 }
416
408 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 417 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
409 if (!intel_output) { 418 if (!intel_output) {
410 return; 419 return;
@@ -458,7 +467,7 @@ void intel_lvds_init(struct drm_device *dev)
458 dev_priv->panel_fixed_mode = 467 dev_priv->panel_fixed_mode =
459 drm_mode_duplicate(dev, scan); 468 drm_mode_duplicate(dev, scan);
460 mutex_unlock(&dev->mode_config.mutex); 469 mutex_unlock(&dev->mode_config.mutex);
461 goto out; /* FIXME: check for quirks */ 470 goto out;
462 } 471 }
463 mutex_unlock(&dev->mode_config.mutex); 472 mutex_unlock(&dev->mode_config.mutex);
464 } 473 }
@@ -492,7 +501,7 @@ void intel_lvds_init(struct drm_device *dev)
492 if (dev_priv->panel_fixed_mode) { 501 if (dev_priv->panel_fixed_mode) {
493 dev_priv->panel_fixed_mode->type |= 502 dev_priv->panel_fixed_mode->type |=
494 DRM_MODE_TYPE_PREFERRED; 503 DRM_MODE_TYPE_PREFERRED;
495 goto out; /* FIXME: check for quirks */ 504 goto out;
496 } 505 }
497 } 506 }
498 507
@@ -500,38 +509,6 @@ void intel_lvds_init(struct drm_device *dev)
500 if (!dev_priv->panel_fixed_mode) 509 if (!dev_priv->panel_fixed_mode)
501 goto failed; 510 goto failed;
502 511
503 /* FIXME: detect aopen & mac mini type stuff automatically? */
504 /*
505 * Blacklist machines with BIOSes that list an LVDS panel without
506 * actually having one.
507 */
508 if (IS_I945GM(dev)) {
509 /* aopen mini pc */
510 if (dev->pdev->subsystem_vendor == 0xa0a0)
511 goto failed;
512
513 if ((dev->pdev->subsystem_vendor == 0x8086) &&
514 (dev->pdev->subsystem_device == 0x7270)) {
515 /* It's a Mac Mini or Macbook Pro.
516 *
517 * Apple hardware is out to get us. The macbook pro
518 * has a real LVDS panel, but the mac mini does not,
519 * and they have the same device IDs. We'll
520 * distinguish by panel size, on the assumption
521 * that Apple isn't about to make any machines with an
522 * 800x600 display.
523 */
524
525 if (dev_priv->panel_fixed_mode != NULL &&
526 dev_priv->panel_fixed_mode->hdisplay == 800 &&
527 dev_priv->panel_fixed_mode->vdisplay == 600) {
528 DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
529 goto failed;
530 }
531 }
532 }
533
534
535out: 512out:
536 drm_sysfs_connector_add(connector); 513 drm_sysfs_connector_add(connector);
537 return; 514 return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 407215469102..a30508b639ba 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -40,13 +40,59 @@
40struct intel_sdvo_priv { 40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 41 struct intel_i2c_chan *i2c_bus;
42 int slaveaddr; 42 int slaveaddr;
43
44 /* Register for the SDVO device: SDVOB or SDVOC */
43 int output_device; 45 int output_device;
44 46
45 u16 active_outputs; 47 /* Active outputs controlled by this SDVO output */
48 uint16_t controlled_output;
46 49
50 /*
51 * Capabilities of the SDVO device returned by
52 * i830_sdvo_get_capabilities()
53 */
47 struct intel_sdvo_caps caps; 54 struct intel_sdvo_caps caps;
55
56 /* Pixel clock limitations reported by the SDVO device, in kHz */
48 int pixel_clock_min, pixel_clock_max; 57 int pixel_clock_min, pixel_clock_max;
49 58
59 /**
60 * This is set if we're going to treat the device as TV-out.
61 *
62 * While we have these nice friendly flags for output types that ought
63 * to decide this for us, the S-Video output on our HDMI+S-Video card
64 * shows up as RGB1 (VGA).
65 */
66 bool is_tv;
67
68 /**
69 * This is set if we treat the device as HDMI, instead of DVI.
70 */
71 bool is_hdmi;
72
73 /**
74 * Returned SDTV resolutions allowed for the current format, if the
75 * device reported it.
76 */
77 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
78
79 /**
80 * Current selected TV format.
81 *
82 * This is stored in the same structure that's passed to the device, for
83 * convenience.
84 */
85 struct intel_sdvo_tv_format tv_format;
86
87 /*
88 * supported encoding mode, used to determine whether HDMI is
89 * supported
90 */
91 struct intel_sdvo_encode encode;
92
93 /* DDC bus used by this SDVO output */
94 uint8_t ddc_bus;
95
50 int save_sdvo_mult; 96 int save_sdvo_mult;
51 u16 save_active_outputs; 97 u16 save_active_outputs;
52 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 98 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
@@ -148,8 +194,8 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
148#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 194#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
149/** Mapping of command numbers to names, for debug output */ 195/** Mapping of command numbers to names, for debug output */
150const static struct _sdvo_cmd_name { 196const static struct _sdvo_cmd_name {
151 u8 cmd; 197 u8 cmd;
152 char *name; 198 char *name;
153} sdvo_cmd_names[] = { 199} sdvo_cmd_names[] = {
154 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), 200 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
155 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), 201 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -186,8 +232,35 @@ const static struct _sdvo_cmd_name {
186 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), 232 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
187 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), 233 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
188 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), 234 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
189 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), 235 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
236 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
237 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
238 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
190 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), 239 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
240 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
241 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
242 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
243 /* HDMI op code */
244 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
245 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
246 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
247 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
248 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
249 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
250 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
251 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
252 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
253 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
254 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
255 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
256 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
257 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
258 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
259 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
260 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
261 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
262 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
263 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
191}; 264};
192 265
193#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 266#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
@@ -506,6 +579,50 @@ static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
506 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 579 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
507} 580}
508 581
582static bool
583intel_sdvo_create_preferred_input_timing(struct intel_output *output,
584 uint16_t clock,
585 uint16_t width,
586 uint16_t height)
587{
588 struct intel_sdvo_preferred_input_timing_args args;
589 uint8_t status;
590
591 args.clock = clock;
592 args.width = width;
593 args.height = height;
594 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
595 &args, sizeof(args));
596 status = intel_sdvo_read_response(output, NULL, 0);
597 if (status != SDVO_CMD_STATUS_SUCCESS)
598 return false;
599
600 return true;
601}
602
603static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
604 struct intel_sdvo_dtd *dtd)
605{
606 bool status;
607
608 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
609 NULL, 0);
610
611 status = intel_sdvo_read_response(output, &dtd->part1,
612 sizeof(dtd->part1));
613 if (status != SDVO_CMD_STATUS_SUCCESS)
614 return false;
615
616 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
617 NULL, 0);
618
619 status = intel_sdvo_read_response(output, &dtd->part2,
620 sizeof(dtd->part2));
621 if (status != SDVO_CMD_STATUS_SUCCESS)
622 return false;
623
624 return false;
625}
509 626
510static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) 627static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
511{ 628{
@@ -536,36 +653,12 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8
536 return true; 653 return true;
537} 654}
538 655
539static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, 656static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
540 struct drm_display_mode *mode, 657 struct drm_display_mode *mode)
541 struct drm_display_mode *adjusted_mode)
542{
543 /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
544 * device will be told of the multiplier during mode_set.
545 */
546 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
547 return true;
548}
549
550static void intel_sdvo_mode_set(struct drm_encoder *encoder,
551 struct drm_display_mode *mode,
552 struct drm_display_mode *adjusted_mode)
553{ 658{
554 struct drm_device *dev = encoder->dev; 659 uint16_t width, height;
555 struct drm_i915_private *dev_priv = dev->dev_private; 660 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
556 struct drm_crtc *crtc = encoder->crtc; 661 uint16_t h_sync_offset, v_sync_offset;
557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
558 struct intel_output *intel_output = enc_to_intel_output(encoder);
559 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
560 u16 width, height;
561 u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
562 u16 h_sync_offset, v_sync_offset;
563 u32 sdvox;
564 struct intel_sdvo_dtd output_dtd;
565 int sdvo_pixel_multiply;
566
567 if (!mode)
568 return;
569 662
570 width = mode->crtc_hdisplay; 663 width = mode->crtc_hdisplay;
571 height = mode->crtc_vdisplay; 664 height = mode->crtc_vdisplay;
@@ -580,93 +673,423 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
580 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 673 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
581 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 674 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
582 675
583 output_dtd.part1.clock = mode->clock / 10; 676 dtd->part1.clock = mode->clock / 10;
584 output_dtd.part1.h_active = width & 0xff; 677 dtd->part1.h_active = width & 0xff;
585 output_dtd.part1.h_blank = h_blank_len & 0xff; 678 dtd->part1.h_blank = h_blank_len & 0xff;
586 output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | 679 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
587 ((h_blank_len >> 8) & 0xf); 680 ((h_blank_len >> 8) & 0xf);
588 output_dtd.part1.v_active = height & 0xff; 681 dtd->part1.v_active = height & 0xff;
589 output_dtd.part1.v_blank = v_blank_len & 0xff; 682 dtd->part1.v_blank = v_blank_len & 0xff;
590 output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | 683 dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
591 ((v_blank_len >> 8) & 0xf); 684 ((v_blank_len >> 8) & 0xf);
592 685
593 output_dtd.part2.h_sync_off = h_sync_offset; 686 dtd->part2.h_sync_off = h_sync_offset;
594 output_dtd.part2.h_sync_width = h_sync_len & 0xff; 687 dtd->part2.h_sync_width = h_sync_len & 0xff;
595 output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | 688 dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
596 (v_sync_len & 0xf); 689 (v_sync_len & 0xf);
597 output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | 690 dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
598 ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | 691 ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
599 ((v_sync_len & 0x30) >> 4); 692 ((v_sync_len & 0x30) >> 4);
600 693
601 output_dtd.part2.dtd_flags = 0x18; 694 dtd->part2.dtd_flags = 0x18;
602 if (mode->flags & DRM_MODE_FLAG_PHSYNC) 695 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
603 output_dtd.part2.dtd_flags |= 0x2; 696 dtd->part2.dtd_flags |= 0x2;
604 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 697 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
605 output_dtd.part2.dtd_flags |= 0x4; 698 dtd->part2.dtd_flags |= 0x4;
699
700 dtd->part2.sdvo_flags = 0;
701 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
702 dtd->part2.reserved = 0;
703}
704
705static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
706 struct intel_sdvo_dtd *dtd)
707{
708 uint16_t width, height;
709 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
710 uint16_t h_sync_offset, v_sync_offset;
711
712 width = mode->crtc_hdisplay;
713 height = mode->crtc_vdisplay;
714
715 /* do some mode translations */
716 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
717 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
718
719 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
720 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
721
722 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
723 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
724
725 mode->hdisplay = dtd->part1.h_active;
726 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
727 mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
728 mode->hsync_start += (dtd->part2.sync_off_width_high & 0xa0) << 2;
729 mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
730 mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
731 mode->htotal = mode->hdisplay + dtd->part1.h_blank;
732 mode->htotal += (dtd->part1.h_high & 0xf) << 8;
733
734 mode->vdisplay = dtd->part1.v_active;
735 mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
736 mode->vsync_start = mode->vdisplay;
737 mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
738 mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0a) << 2;
739 mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
740 mode->vsync_end = mode->vsync_start +
741 (dtd->part2.v_sync_off_width & 0xf);
742 mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
743 mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
744 mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
745
746 mode->clock = dtd->part1.clock * 10;
747
748 mode->flags &= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
749 if (dtd->part2.dtd_flags & 0x2)
750 mode->flags |= DRM_MODE_FLAG_PHSYNC;
751 if (dtd->part2.dtd_flags & 0x4)
752 mode->flags |= DRM_MODE_FLAG_PVSYNC;
753}
754
755static bool intel_sdvo_get_supp_encode(struct intel_output *output,
756 struct intel_sdvo_encode *encode)
757{
758 uint8_t status;
759
760 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
761 status = intel_sdvo_read_response(output, encode, sizeof(*encode));
762 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
763 memset(encode, 0, sizeof(*encode));
764 return false;
765 }
766
767 return true;
768}
769
770static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode)
771{
772 uint8_t status;
773
774 intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1);
775 status = intel_sdvo_read_response(output, NULL, 0);
776
777 return (status == SDVO_CMD_STATUS_SUCCESS);
778}
779
780static bool intel_sdvo_set_colorimetry(struct intel_output *output,
781 uint8_t mode)
782{
783 uint8_t status;
784
785 intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
786 status = intel_sdvo_read_response(output, NULL, 0);
787
788 return (status == SDVO_CMD_STATUS_SUCCESS);
789}
790
791#if 0
792static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
793{
794 int i, j;
795 uint8_t set_buf_index[2];
796 uint8_t av_split;
797 uint8_t buf_size;
798 uint8_t buf[48];
799 uint8_t *pos;
800
801 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
802 intel_sdvo_read_response(output, &av_split, 1);
803
804 for (i = 0; i <= av_split; i++) {
805 set_buf_index[0] = i; set_buf_index[1] = 0;
806 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX,
807 set_buf_index, 2);
808 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
809 intel_sdvo_read_response(output, &buf_size, 1);
810
811 pos = buf;
812 for (j = 0; j <= buf_size; j += 8) {
813 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA,
814 NULL, 0);
815 intel_sdvo_read_response(output, pos, 8);
816 pos += 8;
817 }
818 }
819}
820#endif
821
822static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index,
823 uint8_t *data, int8_t size, uint8_t tx_rate)
824{
825 uint8_t set_buf_index[2];
826
827 set_buf_index[0] = index;
828 set_buf_index[1] = 0;
829
830 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2);
831
832 for (; size > 0; size -= 8) {
833 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8);
834 data += 8;
835 }
836
837 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
838}
839
840static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
841{
842 uint8_t csum = 0;
843 int i;
844
845 for (i = 0; i < size; i++)
846 csum += data[i];
847
848 return 0x100 - csum;
849}
850
851#define DIP_TYPE_AVI 0x82
852#define DIP_VERSION_AVI 0x2
853#define DIP_LEN_AVI 13
854
855struct dip_infoframe {
856 uint8_t type;
857 uint8_t version;
858 uint8_t len;
859 uint8_t checksum;
860 union {
861 struct {
862 /* Packet Byte #1 */
863 uint8_t S:2;
864 uint8_t B:2;
865 uint8_t A:1;
866 uint8_t Y:2;
867 uint8_t rsvd1:1;
868 /* Packet Byte #2 */
869 uint8_t R:4;
870 uint8_t M:2;
871 uint8_t C:2;
872 /* Packet Byte #3 */
873 uint8_t SC:2;
874 uint8_t Q:2;
875 uint8_t EC:3;
876 uint8_t ITC:1;
877 /* Packet Byte #4 */
878 uint8_t VIC:7;
879 uint8_t rsvd2:1;
880 /* Packet Byte #5 */
881 uint8_t PR:4;
882 uint8_t rsvd3:4;
883 /* Packet Byte #6~13 */
884 uint16_t top_bar_end;
885 uint16_t bottom_bar_start;
886 uint16_t left_bar_end;
887 uint16_t right_bar_start;
888 } avi;
889 struct {
890 /* Packet Byte #1 */
891 uint8_t channel_count:3;
892 uint8_t rsvd1:1;
893 uint8_t coding_type:4;
894 /* Packet Byte #2 */
895 uint8_t sample_size:2; /* SS0, SS1 */
896 uint8_t sample_frequency:3;
897 uint8_t rsvd2:3;
898 /* Packet Byte #3 */
899 uint8_t coding_type_private:5;
900 uint8_t rsvd3:3;
901 /* Packet Byte #4 */
902 uint8_t channel_allocation;
903 /* Packet Byte #5 */
904 uint8_t rsvd4:3;
905 uint8_t level_shift:4;
906 uint8_t downmix_inhibit:1;
907 } audio;
908 uint8_t payload[28];
909 } __attribute__ ((packed)) u;
910} __attribute__((packed));
911
912static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
913 struct drm_display_mode * mode)
914{
915 struct dip_infoframe avi_if = {
916 .type = DIP_TYPE_AVI,
917 .version = DIP_VERSION_AVI,
918 .len = DIP_LEN_AVI,
919 };
920
921 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
922 4 + avi_if.len);
923 intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len,
924 SDVO_HBUF_TX_VSYNC);
925}
926
927static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
928 struct drm_display_mode *mode,
929 struct drm_display_mode *adjusted_mode)
930{
931 struct intel_output *output = enc_to_intel_output(encoder);
932 struct intel_sdvo_priv *dev_priv = output->dev_priv;
606 933
607 output_dtd.part2.sdvo_flags = 0; 934 if (!dev_priv->is_tv) {
608 output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; 935 /* Make the CRTC code factor in the SDVO pixel multiplier. The
609 output_dtd.part2.reserved = 0; 936 * SDVO device will be told of the multiplier during mode_set.
937 */
938 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
939 } else {
940 struct intel_sdvo_dtd output_dtd;
941 bool success;
942
943 /* We need to construct preferred input timings based on our
944 * output timings. To do that, we have to set the output
945 * timings, even though this isn't really the right place in
946 * the sequence to do it. Oh well.
947 */
948
949
950 /* Set output timings */
951 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
952 intel_sdvo_set_target_output(output,
953 dev_priv->controlled_output);
954 intel_sdvo_set_output_timing(output, &output_dtd);
955
956 /* Set the input timing to the screen. Assume always input 0. */
957 intel_sdvo_set_target_input(output, true, false);
958
959
960 success = intel_sdvo_create_preferred_input_timing(output,
961 mode->clock / 10,
962 mode->hdisplay,
963 mode->vdisplay);
964 if (success) {
965 struct intel_sdvo_dtd input_dtd;
610 966
611 /* Set the output timing to the screen */ 967 intel_sdvo_get_preferred_input_timing(output,
612 intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs); 968 &input_dtd);
613 intel_sdvo_set_output_timing(intel_output, &output_dtd); 969 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
970
971 } else {
972 return false;
973 }
974 }
975 return true;
976}
977
978static void intel_sdvo_mode_set(struct drm_encoder *encoder,
979 struct drm_display_mode *mode,
980 struct drm_display_mode *adjusted_mode)
981{
982 struct drm_device *dev = encoder->dev;
983 struct drm_i915_private *dev_priv = dev->dev_private;
984 struct drm_crtc *crtc = encoder->crtc;
985 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
986 struct intel_output *output = enc_to_intel_output(encoder);
987 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
988 u32 sdvox = 0;
989 int sdvo_pixel_multiply;
990 struct intel_sdvo_in_out_map in_out;
991 struct intel_sdvo_dtd input_dtd;
992 u8 status;
993
994 if (!mode)
995 return;
996
997 /* First, set the input mapping for the first input to our controlled
998 * output. This is only correct if we're a single-input device, in
999 * which case the first input is the output from the appropriate SDVO
1000 * channel on the motherboard. In a two-input device, the first input
1001 * will be SDVOB and the second SDVOC.
1002 */
1003 in_out.in0 = sdvo_priv->controlled_output;
1004 in_out.in1 = 0;
1005
1006 intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP,
1007 &in_out, sizeof(in_out));
1008 status = intel_sdvo_read_response(output, NULL, 0);
1009
1010 if (sdvo_priv->is_hdmi) {
1011 intel_sdvo_set_avi_infoframe(output, mode);
1012 sdvox |= SDVO_AUDIO_ENABLE;
1013 }
1014
1015 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1016
1017 /* If it's a TV, we already set the output timing in mode_fixup.
1018 * Otherwise, the output timing is equal to the input timing.
1019 */
1020 if (!sdvo_priv->is_tv) {
1021 /* Set the output timing to the screen */
1022 intel_sdvo_set_target_output(output,
1023 sdvo_priv->controlled_output);
1024 intel_sdvo_set_output_timing(output, &input_dtd);
1025 }
614 1026
615 /* Set the input timing to the screen. Assume always input 0. */ 1027 /* Set the input timing to the screen. Assume always input 0. */
616 intel_sdvo_set_target_input(intel_output, true, false); 1028 intel_sdvo_set_target_input(output, true, false);
617 1029
618 /* We would like to use i830_sdvo_create_preferred_input_timing() to 1030 /* We would like to use intel_sdvo_create_preferred_input_timing() to
619 * provide the device with a timing it can support, if it supports that 1031 * provide the device with a timing it can support, if it supports that
620 * feature. However, presumably we would need to adjust the CRTC to 1032 * feature. However, presumably we would need to adjust the CRTC to
621 * output the preferred timing, and we don't support that currently. 1033 * output the preferred timing, and we don't support that currently.
622 */ 1034 */
623 intel_sdvo_set_input_timing(intel_output, &output_dtd); 1035#if 0
1036 success = intel_sdvo_create_preferred_input_timing(output, clock,
1037 width, height);
1038 if (success) {
1039 struct intel_sdvo_dtd *input_dtd;
1040
1041 intel_sdvo_get_preferred_input_timing(output, &input_dtd);
1042 intel_sdvo_set_input_timing(output, &input_dtd);
1043 }
1044#else
1045 intel_sdvo_set_input_timing(output, &input_dtd);
1046#endif
624 1047
625 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1048 switch (intel_sdvo_get_pixel_multiplier(mode)) {
626 case 1: 1049 case 1:
627 intel_sdvo_set_clock_rate_mult(intel_output, 1050 intel_sdvo_set_clock_rate_mult(output,
628 SDVO_CLOCK_RATE_MULT_1X); 1051 SDVO_CLOCK_RATE_MULT_1X);
629 break; 1052 break;
630 case 2: 1053 case 2:
631 intel_sdvo_set_clock_rate_mult(intel_output, 1054 intel_sdvo_set_clock_rate_mult(output,
632 SDVO_CLOCK_RATE_MULT_2X); 1055 SDVO_CLOCK_RATE_MULT_2X);
633 break; 1056 break;
634 case 4: 1057 case 4:
635 intel_sdvo_set_clock_rate_mult(intel_output, 1058 intel_sdvo_set_clock_rate_mult(output,
636 SDVO_CLOCK_RATE_MULT_4X); 1059 SDVO_CLOCK_RATE_MULT_4X);
637 break; 1060 break;
638 } 1061 }
639 1062
640 /* Set the SDVO control regs. */ 1063 /* Set the SDVO control regs. */
641 if (0/*IS_I965GM(dev)*/) { 1064 if (IS_I965G(dev)) {
642 sdvox = SDVO_BORDER_ENABLE; 1065 sdvox |= SDVO_BORDER_ENABLE |
643 } else { 1066 SDVO_VSYNC_ACTIVE_HIGH |
644 sdvox = I915_READ(sdvo_priv->output_device); 1067 SDVO_HSYNC_ACTIVE_HIGH;
645 switch (sdvo_priv->output_device) { 1068 } else {
646 case SDVOB: 1069 sdvox |= I915_READ(sdvo_priv->output_device);
647 sdvox &= SDVOB_PRESERVE_MASK; 1070 switch (sdvo_priv->output_device) {
648 break; 1071 case SDVOB:
649 case SDVOC: 1072 sdvox &= SDVOB_PRESERVE_MASK;
650 sdvox &= SDVOC_PRESERVE_MASK; 1073 break;
651 break; 1074 case SDVOC:
652 } 1075 sdvox &= SDVOC_PRESERVE_MASK;
653 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1076 break;
654 } 1077 }
1078 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1079 }
655 if (intel_crtc->pipe == 1) 1080 if (intel_crtc->pipe == 1)
656 sdvox |= SDVO_PIPE_B_SELECT; 1081 sdvox |= SDVO_PIPE_B_SELECT;
657 1082
658 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1083 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
659 if (IS_I965G(dev)) { 1084 if (IS_I965G(dev)) {
660 /* done in crtc_mode_set as the dpll_md reg must be written 1085 /* done in crtc_mode_set as the dpll_md reg must be written early */
661 early */ 1086 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
662 } else if (IS_I945G(dev) || IS_I945GM(dev)) { 1087 /* done in crtc_mode_set as it lives inside the dpll register */
663 /* done in crtc_mode_set as it lives inside the
664 dpll register */
665 } else { 1088 } else {
666 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1089 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
667 } 1090 }
668 1091
669 intel_sdvo_write_sdvox(intel_output, sdvox); 1092 intel_sdvo_write_sdvox(output, sdvox);
670} 1093}
671 1094
672static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1095static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
@@ -714,7 +1137,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
714 1137
715 if (0) 1138 if (0)
716 intel_sdvo_set_encoder_power_state(intel_output, mode); 1139 intel_sdvo_set_encoder_power_state(intel_output, mode);
717 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs); 1140 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output);
718 } 1141 }
719 return; 1142 return;
720} 1143}
@@ -752,6 +1175,9 @@ static void intel_sdvo_save(struct drm_connector *connector)
752 &sdvo_priv->save_output_dtd[o]); 1175 &sdvo_priv->save_output_dtd[o]);
753 } 1176 }
754 } 1177 }
1178 if (sdvo_priv->is_tv) {
1179 /* XXX: Save TV format/enhancements. */
1180 }
755 1181
756 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); 1182 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
757} 1183}
@@ -759,7 +1185,6 @@ static void intel_sdvo_save(struct drm_connector *connector)
759static void intel_sdvo_restore(struct drm_connector *connector) 1185static void intel_sdvo_restore(struct drm_connector *connector)
760{ 1186{
761 struct drm_device *dev = connector->dev; 1187 struct drm_device *dev = connector->dev;
762 struct drm_i915_private *dev_priv = dev->dev_private;
763 struct intel_output *intel_output = to_intel_output(connector); 1188 struct intel_output *intel_output = to_intel_output(connector);
764 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1189 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
765 int o; 1190 int o;
@@ -790,7 +1215,11 @@ static void intel_sdvo_restore(struct drm_connector *connector)
790 1215
791 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); 1216 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
792 1217
793 I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); 1218 if (sdvo_priv->is_tv) {
1219 /* XXX: Restore TV format/enhancements. */
1220 }
1221
1222 intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX);
794 1223
795 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) 1224 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
796 { 1225 {
@@ -916,20 +1345,173 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
916 status = intel_sdvo_read_response(intel_output, &response, 2); 1345 status = intel_sdvo_read_response(intel_output, &response, 2);
917 1346
918 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); 1347 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
1348
1349 if (status != SDVO_CMD_STATUS_SUCCESS)
1350 return connector_status_unknown;
1351
919 if ((response[0] != 0) || (response[1] != 0)) 1352 if ((response[0] != 0) || (response[1] != 0))
920 return connector_status_connected; 1353 return connector_status_connected;
921 else 1354 else
922 return connector_status_disconnected; 1355 return connector_status_disconnected;
923} 1356}
924 1357
925static int intel_sdvo_get_modes(struct drm_connector *connector) 1358static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
926{ 1359{
927 struct intel_output *intel_output = to_intel_output(connector); 1360 struct intel_output *intel_output = to_intel_output(connector);
1361 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
928 1362
929 /* set the bus switch and get the modes */ 1363 /* set the bus switch and get the modes */
930 intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2); 1364 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
931 intel_ddc_get_modes(intel_output); 1365 intel_ddc_get_modes(intel_output);
932 1366
1367#if 0
1368 struct drm_device *dev = encoder->dev;
1369 struct drm_i915_private *dev_priv = dev->dev_private;
1370 /* Mac mini hack. On this device, I get DDC through the analog, which
1371 * load-detects as disconnected. I fail to DDC through the SDVO DDC,
1372 * but it does load-detect as connected. So, just steal the DDC bits
1373 * from analog when we fail at finding it the right way.
1374 */
1375 crt = xf86_config->output[0];
1376 intel_output = crt->driver_private;
1377 if (intel_output->type == I830_OUTPUT_ANALOG &&
1378 crt->funcs->detect(crt) == XF86OutputStatusDisconnected) {
1379 I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A");
1380 edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus);
1381 xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true);
1382 }
1383 if (edid_mon) {
1384 xf86OutputSetEDID(output, edid_mon);
1385 modes = xf86OutputGetEDIDModes(output);
1386 }
1387#endif
1388}
1389
1390/**
1391 * This function checks the current TV format, and chooses a default if
1392 * it hasn't been set.
1393 */
1394static void
1395intel_sdvo_check_tv_format(struct intel_output *output)
1396{
1397 struct intel_sdvo_priv *dev_priv = output->dev_priv;
1398 struct intel_sdvo_tv_format format, unset;
1399 uint8_t status;
1400
1401 intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0);
1402 status = intel_sdvo_read_response(output, &format, sizeof(format));
1403 if (status != SDVO_CMD_STATUS_SUCCESS)
1404 return;
1405
1406 memset(&unset, 0, sizeof(unset));
1407 if (memcmp(&format, &unset, sizeof(format))) {
1408 DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n",
1409 SDVO_NAME(dev_priv));
1410
1411 format.ntsc_m = true;
1412 intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, NULL, 0);
1413 status = intel_sdvo_read_response(output, NULL, 0);
1414 }
1415}
1416
1417/*
1418 * Set of SDVO TV modes.
1419 * Note! This is in reply order (see loop in get_tv_modes).
1420 * XXX: all 60Hz refresh?
1421 */
1422struct drm_display_mode sdvo_tv_modes[] = {
1423 { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815680, 321, 384, 416,
1424 200, 0, 232, 201, 233, 4196112, 0,
1425 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1426 { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814080, 321, 384, 416,
1427 240, 0, 272, 241, 273, 4196112, 0,
1428 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1429 { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910080, 401, 464, 496,
1430 300, 0, 332, 301, 333, 4196112, 0,
1431 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1432 { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913280, 641, 704, 736,
1433 350, 0, 382, 351, 383, 4196112, 0,
1434 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1435 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736,
1436 400, 0, 432, 401, 433, 4196112, 0,
1437 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1438 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736,
1439 400, 0, 432, 401, 433, 4196112, 0,
1440 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1441 { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624000, 705, 768, 800,
1442 480, 0, 512, 481, 513, 4196112, 0,
1443 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1444 { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232000, 705, 768, 800,
1445 576, 0, 608, 577, 609, 4196112, 0,
1446 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1447 { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751680, 721, 784, 816,
1448 350, 0, 382, 351, 383, 4196112, 0,
1449 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1450 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199680, 721, 784, 816,
1451 400, 0, 432, 401, 433, 4196112, 0,
1452 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1453 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116480, 721, 784, 816,
1454 480, 0, 512, 481, 513, 4196112, 0,
1455 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1456 { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054080, 721, 784, 816,
1457 540, 0, 572, 541, 573, 4196112, 0,
1458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1459 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816640, 721, 784, 816,
1460 576, 0, 608, 577, 609, 4196112, 0,
1461 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1462 { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570560, 769, 832, 864,
1463 576, 0, 608, 577, 609, 4196112, 0,
1464 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1465 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030080, 801, 864, 896,
1466 600, 0, 632, 601, 633, 4196112, 0,
1467 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1468 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581760, 833, 896, 928,
1469 624, 0, 656, 625, 657, 4196112, 0,
1470 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1471 { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707040, 921, 984, 1016,
1472 766, 0, 798, 767, 799, 4196112, 0,
1473 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1474 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827200, 1025, 1088, 1120,
1475 768, 0, 800, 769, 801, 4196112, 0,
1476 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1477 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265920, 1281, 1344, 1376,
1478 1024, 0, 1056, 1025, 1057, 4196112, 0,
1479 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1480};
1481
1482static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1483{
1484 struct intel_output *output = to_intel_output(connector);
1485 uint32_t reply = 0;
1486 uint8_t status;
1487 int i = 0;
1488
1489 intel_sdvo_check_tv_format(output);
1490
1491 /* Read the list of supported input resolutions for the selected TV
1492 * format.
1493 */
1494 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1495 NULL, 0);
1496 status = intel_sdvo_read_response(output, &reply, 3);
1497 if (status != SDVO_CMD_STATUS_SUCCESS)
1498 return;
1499
1500 for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
1501 if (reply & (1 << i))
1502 drm_mode_probed_add(connector, &sdvo_tv_modes[i]);
1503}
1504
1505static int intel_sdvo_get_modes(struct drm_connector *connector)
1506{
1507 struct intel_output *output = to_intel_output(connector);
1508 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1509
1510 if (sdvo_priv->is_tv)
1511 intel_sdvo_get_tv_modes(connector);
1512 else
1513 intel_sdvo_get_ddc_modes(connector);
1514
933 if (list_empty(&connector->probed_modes)) 1515 if (list_empty(&connector->probed_modes))
934 return 0; 1516 return 0;
935 return 1; 1517 return 1;
@@ -978,6 +1560,65 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
978}; 1560};
979 1561
980 1562
1563/**
1564 * Choose the appropriate DDC bus for control bus switch command for this
1565 * SDVO output based on the controlled output.
1566 *
1567 * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
1568 * outputs, then LVDS outputs.
1569 */
1570static void
1571intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
1572{
1573 uint16_t mask = 0;
1574 unsigned int num_bits;
1575
1576 /* Make a mask of outputs less than or equal to our own priority in the
1577 * list.
1578 */
1579 switch (dev_priv->controlled_output) {
1580 case SDVO_OUTPUT_LVDS1:
1581 mask |= SDVO_OUTPUT_LVDS1;
1582 case SDVO_OUTPUT_LVDS0:
1583 mask |= SDVO_OUTPUT_LVDS0;
1584 case SDVO_OUTPUT_TMDS1:
1585 mask |= SDVO_OUTPUT_TMDS1;
1586 case SDVO_OUTPUT_TMDS0:
1587 mask |= SDVO_OUTPUT_TMDS0;
1588 case SDVO_OUTPUT_RGB1:
1589 mask |= SDVO_OUTPUT_RGB1;
1590 case SDVO_OUTPUT_RGB0:
1591 mask |= SDVO_OUTPUT_RGB0;
1592 break;
1593 }
1594
1595 /* Count bits to find what number we are in the priority list. */
1596 mask &= dev_priv->caps.output_flags;
1597 num_bits = hweight16(mask);
1598 if (num_bits > 3) {
1599 /* if more than 3 outputs, default to DDC bus 3 for now */
1600 num_bits = 3;
1601 }
1602
1603 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1604 dev_priv->ddc_bus = 1 << num_bits;
1605}
1606
1607static bool
1608intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
1609{
1610 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1611 uint8_t status;
1612
1613 intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
1614
1615 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
1616 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
1617 if (status != SDVO_CMD_STATUS_SUCCESS)
1618 return false;
1619 return true;
1620}
1621
981bool intel_sdvo_init(struct drm_device *dev, int output_device) 1622bool intel_sdvo_init(struct drm_device *dev, int output_device)
982{ 1623{
983 struct drm_connector *connector; 1624 struct drm_connector *connector;
@@ -1040,45 +1681,76 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1040 1681
1041 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 1682 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1042 1683
1043 memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs)); 1684 if (sdvo_priv->caps.output_flags &
1685 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
1686 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
1687 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
1688 else
1689 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1690
1691 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1692 encoder_type = DRM_MODE_ENCODER_TMDS;
1693 connector_type = DRM_MODE_CONNECTOR_DVID;
1044 1694
1045 /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ 1695 if (intel_sdvo_get_supp_encode(intel_output,
1046 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) 1696 &sdvo_priv->encode) &&
1697 intel_sdvo_get_digital_encoding_mode(intel_output) &&
1698 sdvo_priv->is_hdmi) {
1699 /* enable hdmi encoding mode if supported */
1700 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
1701 intel_sdvo_set_colorimetry(intel_output,
1702 SDVO_COLORIMETRY_RGB256);
1703 connector_type = DRM_MODE_CONNECTOR_HDMIA;
1704 }
1705 }
1706 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
1047 { 1707 {
1048 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; 1708 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1709 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1710 encoder_type = DRM_MODE_ENCODER_TVDAC;
1711 connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1712 sdvo_priv->is_tv = true;
1713 intel_output->needs_tv_clock = true;
1714 }
1715 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1716 {
1717 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1049 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1718 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1050 encoder_type = DRM_MODE_ENCODER_DAC; 1719 encoder_type = DRM_MODE_ENCODER_DAC;
1051 connector_type = DRM_MODE_CONNECTOR_VGA; 1720 connector_type = DRM_MODE_CONNECTOR_VGA;
1052 } 1721 }
1053 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) 1722 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1054 { 1723 {
1055 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; 1724 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1056 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1725 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1057 encoder_type = DRM_MODE_ENCODER_DAC; 1726 encoder_type = DRM_MODE_ENCODER_DAC;
1058 connector_type = DRM_MODE_CONNECTOR_VGA; 1727 connector_type = DRM_MODE_CONNECTOR_VGA;
1059 } 1728 }
1060 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) 1729 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
1061 { 1730 {
1062 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; 1731 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1063 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1732 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1064 encoder_type = DRM_MODE_ENCODER_TMDS; 1733 encoder_type = DRM_MODE_ENCODER_LVDS;
1065 connector_type = DRM_MODE_CONNECTOR_DVID; 1734 connector_type = DRM_MODE_CONNECTOR_LVDS;
1066 } 1735 }
1067 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) 1736 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
1068 { 1737 {
1069 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; 1738 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1070 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1739 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1071 encoder_type = DRM_MODE_ENCODER_TMDS; 1740 encoder_type = DRM_MODE_ENCODER_LVDS;
1072 connector_type = DRM_MODE_CONNECTOR_DVID; 1741 connector_type = DRM_MODE_CONNECTOR_LVDS;
1073 } 1742 }
1074 else 1743 else
1075 { 1744 {
1076 unsigned char bytes[2]; 1745 unsigned char bytes[2];
1077 1746
1747 sdvo_priv->controlled_output = 0;
1078 memcpy (bytes, &sdvo_priv->caps.output_flags, 2); 1748 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1079 DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", 1749 DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n",
1080 SDVO_NAME(sdvo_priv), 1750 SDVO_NAME(sdvo_priv),
1081 bytes[0], bytes[1]); 1751 bytes[0], bytes[1]);
1752 encoder_type = DRM_MODE_ENCODER_NONE;
1753 connector_type = DRM_MODE_CONNECTOR_Unknown;
1082 goto err_i2c; 1754 goto err_i2c;
1083 } 1755 }
1084 1756
@@ -1089,6 +1761,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1089 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1761 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1090 drm_sysfs_connector_add(connector); 1762 drm_sysfs_connector_add(connector);
1091 1763
1764 intel_sdvo_select_ddc_bus(sdvo_priv);
1765
1092 /* Set the input timing to the screen. Assume always input 0. */ 1766 /* Set the input timing to the screen. Assume always input 0. */
1093 intel_sdvo_set_target_input(intel_output, true, false); 1767 intel_sdvo_set_target_input(intel_output, true, false);
1094 1768
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 861a43f8693c..1117b9c151a6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -173,6 +173,9 @@ struct intel_sdvo_get_trained_inputs_response {
173 * Returns two struct intel_sdvo_output_flags structures. 173 * Returns two struct intel_sdvo_output_flags structures.
174 */ 174 */
175#define SDVO_CMD_GET_IN_OUT_MAP 0x06 175#define SDVO_CMD_GET_IN_OUT_MAP 0x06
176struct intel_sdvo_in_out_map {
177 u16 in0, in1;
178};
176 179
177/** 180/**
178 * Sets the current mapping of SDVO inputs to outputs on the device. 181 * Sets the current mapping of SDVO inputs to outputs on the device.
@@ -206,7 +209,8 @@ struct intel_sdvo_get_trained_inputs_response {
206struct intel_sdvo_get_interrupt_event_source_response { 209struct intel_sdvo_get_interrupt_event_source_response {
207 u16 interrupt_status; 210 u16 interrupt_status;
208 unsigned int ambient_light_interrupt:1; 211 unsigned int ambient_light_interrupt:1;
209 unsigned int pad:7; 212 unsigned int hdmi_audio_encrypt_change:1;
213 unsigned int pad:6;
210} __attribute__((packed)); 214} __attribute__((packed));
211 215
212/** 216/**
@@ -305,23 +309,411 @@ struct intel_sdvo_set_target_input_args {
305# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) 309# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
306 310
307#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 311#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
312/** 5 bytes of bit flags for TV formats shared by all TV format functions */
313struct intel_sdvo_tv_format {
314 unsigned int ntsc_m:1;
315 unsigned int ntsc_j:1;
316 unsigned int ntsc_443:1;
317 unsigned int pal_b:1;
318 unsigned int pal_d:1;
319 unsigned int pal_g:1;
320 unsigned int pal_h:1;
321 unsigned int pal_i:1;
322
323 unsigned int pal_m:1;
324 unsigned int pal_n:1;
325 unsigned int pal_nc:1;
326 unsigned int pal_60:1;
327 unsigned int secam_b:1;
328 unsigned int secam_d:1;
329 unsigned int secam_g:1;
330 unsigned int secam_k:1;
331
332 unsigned int secam_k1:1;
333 unsigned int secam_l:1;
334 unsigned int secam_60:1;
335 unsigned int hdtv_std_smpte_240m_1080i_59:1;
336 unsigned int hdtv_std_smpte_240m_1080i_60:1;
337 unsigned int hdtv_std_smpte_260m_1080i_59:1;
338 unsigned int hdtv_std_smpte_260m_1080i_60:1;
339 unsigned int hdtv_std_smpte_274m_1080i_50:1;
340
341 unsigned int hdtv_std_smpte_274m_1080i_59:1;
342 unsigned int hdtv_std_smpte_274m_1080i_60:1;
343 unsigned int hdtv_std_smpte_274m_1080p_23:1;
344 unsigned int hdtv_std_smpte_274m_1080p_24:1;
345 unsigned int hdtv_std_smpte_274m_1080p_25:1;
346 unsigned int hdtv_std_smpte_274m_1080p_29:1;
347 unsigned int hdtv_std_smpte_274m_1080p_30:1;
348 unsigned int hdtv_std_smpte_274m_1080p_50:1;
349
350 unsigned int hdtv_std_smpte_274m_1080p_59:1;
351 unsigned int hdtv_std_smpte_274m_1080p_60:1;
352 unsigned int hdtv_std_smpte_295m_1080i_50:1;
353 unsigned int hdtv_std_smpte_295m_1080p_50:1;
354 unsigned int hdtv_std_smpte_296m_720p_59:1;
355 unsigned int hdtv_std_smpte_296m_720p_60:1;
356 unsigned int hdtv_std_smpte_296m_720p_50:1;
357 unsigned int hdtv_std_smpte_293m_480p_59:1;
358
359 unsigned int hdtv_std_smpte_170m_480i_59:1;
360 unsigned int hdtv_std_iturbt601_576i_50:1;
361 unsigned int hdtv_std_iturbt601_576p_50:1;
362 unsigned int hdtv_std_eia_7702a_480i_60:1;
363 unsigned int hdtv_std_eia_7702a_480p_60:1;
364 unsigned int pad:3;
365} __attribute__((packed));
308 366
309#define SDVO_CMD_GET_TV_FORMAT 0x28 367#define SDVO_CMD_GET_TV_FORMAT 0x28
310 368
311#define SDVO_CMD_SET_TV_FORMAT 0x29 369#define SDVO_CMD_SET_TV_FORMAT 0x29
312 370
371/** Returns the resolutiosn that can be used with the given TV format */
372#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
373struct intel_sdvo_sdtv_resolution_request {
374 unsigned int ntsc_m:1;
375 unsigned int ntsc_j:1;
376 unsigned int ntsc_443:1;
377 unsigned int pal_b:1;
378 unsigned int pal_d:1;
379 unsigned int pal_g:1;
380 unsigned int pal_h:1;
381 unsigned int pal_i:1;
382
383 unsigned int pal_m:1;
384 unsigned int pal_n:1;
385 unsigned int pal_nc:1;
386 unsigned int pal_60:1;
387 unsigned int secam_b:1;
388 unsigned int secam_d:1;
389 unsigned int secam_g:1;
390 unsigned int secam_k:1;
391
392 unsigned int secam_k1:1;
393 unsigned int secam_l:1;
394 unsigned int secam_60:1;
395 unsigned int pad:5;
396} __attribute__((packed));
397
398struct intel_sdvo_sdtv_resolution_reply {
399 unsigned int res_320x200:1;
400 unsigned int res_320x240:1;
401 unsigned int res_400x300:1;
402 unsigned int res_640x350:1;
403 unsigned int res_640x400:1;
404 unsigned int res_640x480:1;
405 unsigned int res_704x480:1;
406 unsigned int res_704x576:1;
407
408 unsigned int res_720x350:1;
409 unsigned int res_720x400:1;
410 unsigned int res_720x480:1;
411 unsigned int res_720x540:1;
412 unsigned int res_720x576:1;
413 unsigned int res_768x576:1;
414 unsigned int res_800x600:1;
415 unsigned int res_832x624:1;
416
417 unsigned int res_920x766:1;
418 unsigned int res_1024x768:1;
419 unsigned int res_1280x1024:1;
420 unsigned int pad:5;
421} __attribute__((packed));
422
423/* Get supported resolution with squire pixel aspect ratio that can be
424 scaled for the requested HDTV format */
425#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
426
427struct intel_sdvo_hdtv_resolution_request {
428 unsigned int hdtv_std_smpte_240m_1080i_59:1;
429 unsigned int hdtv_std_smpte_240m_1080i_60:1;
430 unsigned int hdtv_std_smpte_260m_1080i_59:1;
431 unsigned int hdtv_std_smpte_260m_1080i_60:1;
432 unsigned int hdtv_std_smpte_274m_1080i_50:1;
433 unsigned int hdtv_std_smpte_274m_1080i_59:1;
434 unsigned int hdtv_std_smpte_274m_1080i_60:1;
435 unsigned int hdtv_std_smpte_274m_1080p_23:1;
436
437 unsigned int hdtv_std_smpte_274m_1080p_24:1;
438 unsigned int hdtv_std_smpte_274m_1080p_25:1;
439 unsigned int hdtv_std_smpte_274m_1080p_29:1;
440 unsigned int hdtv_std_smpte_274m_1080p_30:1;
441 unsigned int hdtv_std_smpte_274m_1080p_50:1;
442 unsigned int hdtv_std_smpte_274m_1080p_59:1;
443 unsigned int hdtv_std_smpte_274m_1080p_60:1;
444 unsigned int hdtv_std_smpte_295m_1080i_50:1;
445
446 unsigned int hdtv_std_smpte_295m_1080p_50:1;
447 unsigned int hdtv_std_smpte_296m_720p_59:1;
448 unsigned int hdtv_std_smpte_296m_720p_60:1;
449 unsigned int hdtv_std_smpte_296m_720p_50:1;
450 unsigned int hdtv_std_smpte_293m_480p_59:1;
451 unsigned int hdtv_std_smpte_170m_480i_59:1;
452 unsigned int hdtv_std_iturbt601_576i_50:1;
453 unsigned int hdtv_std_iturbt601_576p_50:1;
454
455 unsigned int hdtv_std_eia_7702a_480i_60:1;
456 unsigned int hdtv_std_eia_7702a_480p_60:1;
457 unsigned int pad:6;
458} __attribute__((packed));
459
460struct intel_sdvo_hdtv_resolution_reply {
461 unsigned int res_640x480:1;
462 unsigned int res_800x600:1;
463 unsigned int res_1024x768:1;
464 unsigned int res_1280x960:1;
465 unsigned int res_1400x1050:1;
466 unsigned int res_1600x1200:1;
467 unsigned int res_1920x1440:1;
468 unsigned int res_2048x1536:1;
469
470 unsigned int res_2560x1920:1;
471 unsigned int res_3200x2400:1;
472 unsigned int res_3840x2880:1;
473 unsigned int pad1:5;
474
475 unsigned int res_848x480:1;
476 unsigned int res_1064x600:1;
477 unsigned int res_1280x720:1;
478 unsigned int res_1360x768:1;
479 unsigned int res_1704x960:1;
480 unsigned int res_1864x1050:1;
481 unsigned int res_1920x1080:1;
482 unsigned int res_2128x1200:1;
483
484 unsigned int res_2560x1400:1;
485 unsigned int res_2728x1536:1;
486 unsigned int res_3408x1920:1;
487 unsigned int res_4264x2400:1;
488 unsigned int res_5120x2880:1;
489 unsigned int pad2:3;
490
491 unsigned int res_768x480:1;
492 unsigned int res_960x600:1;
493 unsigned int res_1152x720:1;
494 unsigned int res_1124x768:1;
495 unsigned int res_1536x960:1;
496 unsigned int res_1680x1050:1;
497 unsigned int res_1728x1080:1;
498 unsigned int res_1920x1200:1;
499
500 unsigned int res_2304x1440:1;
501 unsigned int res_2456x1536:1;
502 unsigned int res_3072x1920:1;
503 unsigned int res_3840x2400:1;
504 unsigned int res_4608x2880:1;
505 unsigned int pad3:3;
506
507 unsigned int res_1280x1024:1;
508 unsigned int pad4:7;
509
510 unsigned int res_1280x768:1;
511 unsigned int pad5:7;
512} __attribute__((packed));
513
514/* Get supported power state returns info for encoder and monitor, rely on
515 last SetTargetInput and SetTargetOutput calls */
313#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a 516#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
517/* Get power state returns info for encoder and monitor, rely on last
518 SetTargetInput and SetTargetOutput calls */
519#define SDVO_CMD_GET_POWER_STATE 0x2b
314#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b 520#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
315#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c 521#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
316# define SDVO_ENCODER_STATE_ON (1 << 0) 522# define SDVO_ENCODER_STATE_ON (1 << 0)
317# define SDVO_ENCODER_STATE_STANDBY (1 << 1) 523# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
318# define SDVO_ENCODER_STATE_SUSPEND (1 << 2) 524# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
319# define SDVO_ENCODER_STATE_OFF (1 << 3) 525# define SDVO_ENCODER_STATE_OFF (1 << 3)
526# define SDVO_MONITOR_STATE_ON (1 << 4)
527# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
528# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
529# define SDVO_MONITOR_STATE_OFF (1 << 7)
530
531#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
532#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
533#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
534/**
535 * The panel power sequencing parameters are in units of milliseconds.
536 * The high fields are bits 8:9 of the 10-bit values.
537 */
538struct sdvo_panel_power_sequencing {
539 u8 t0;
540 u8 t1;
541 u8 t2;
542 u8 t3;
543 u8 t4;
544
545 unsigned int t0_high:2;
546 unsigned int t1_high:2;
547 unsigned int t2_high:2;
548 unsigned int t3_high:2;
549
550 unsigned int t4_high:2;
551 unsigned int pad:6;
552} __attribute__((packed));
553
554#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
555struct sdvo_max_backlight_reply {
556 u8 max_value;
557 u8 default_value;
558} __attribute__((packed));
559
560#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
561#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
562
563#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
564struct sdvo_get_ambient_light_reply {
565 u16 trip_low;
566 u16 trip_high;
567 u16 value;
568} __attribute__((packed));
569#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
570struct sdvo_set_ambient_light_reply {
571 u16 trip_low;
572 u16 trip_high;
573 unsigned int enable:1;
574 unsigned int pad:7;
575} __attribute__((packed));
576
577/* Set display power state */
578#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
579# define SDVO_DISPLAY_STATE_ON (1 << 0)
580# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
581# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
582# define SDVO_DISPLAY_STATE_OFF (1 << 3)
583
584#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
585struct intel_sdvo_enhancements_reply {
586 unsigned int flicker_filter:1;
587 unsigned int flicker_filter_adaptive:1;
588 unsigned int flicker_filter_2d:1;
589 unsigned int saturation:1;
590 unsigned int hue:1;
591 unsigned int brightness:1;
592 unsigned int contrast:1;
593 unsigned int overscan_h:1;
594
595 unsigned int overscan_v:1;
596 unsigned int position_h:1;
597 unsigned int position_v:1;
598 unsigned int sharpness:1;
599 unsigned int dot_crawl:1;
600 unsigned int dither:1;
601 unsigned int max_tv_chroma_filter:1;
602 unsigned int max_tv_luma_filter:1;
603} __attribute__((packed));
604
605/* Picture enhancement limits below are dependent on the current TV format,
606 * and thus need to be queried and set after it.
607 */
608#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
609#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
610#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
611#define SDVO_CMD_GET_MAX_SATURATION 0x55
612#define SDVO_CMD_GET_MAX_HUE 0x58
613#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
614#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
615#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
616#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
617#define SDVO_CMD_GET_MAX_POSITION_H 0x67
618#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
619#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
620#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
621#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
622struct intel_sdvo_enhancement_limits_reply {
623 u16 max_value;
624 u16 default_value;
625} __attribute__((packed));
320 626
321#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 627#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
628#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
629# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
630# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
631# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
632# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
633# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
634# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
635
636#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
637#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
638#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
639#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
640#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
641#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
642#define SDVO_CMD_GET_SATURATION 0x56
643#define SDVO_CMD_SET_SATURATION 0x57
644#define SDVO_CMD_GET_HUE 0x59
645#define SDVO_CMD_SET_HUE 0x5a
646#define SDVO_CMD_GET_BRIGHTNESS 0x5c
647#define SDVO_CMD_SET_BRIGHTNESS 0x5d
648#define SDVO_CMD_GET_CONTRAST 0x5f
649#define SDVO_CMD_SET_CONTRAST 0x60
650#define SDVO_CMD_GET_OVERSCAN_H 0x62
651#define SDVO_CMD_SET_OVERSCAN_H 0x63
652#define SDVO_CMD_GET_OVERSCAN_V 0x65
653#define SDVO_CMD_SET_OVERSCAN_V 0x66
654#define SDVO_CMD_GET_POSITION_H 0x68
655#define SDVO_CMD_SET_POSITION_H 0x69
656#define SDVO_CMD_GET_POSITION_V 0x6b
657#define SDVO_CMD_SET_POSITION_V 0x6c
658#define SDVO_CMD_GET_SHARPNESS 0x6e
659#define SDVO_CMD_SET_SHARPNESS 0x6f
660#define SDVO_CMD_GET_TV_CHROMA 0x75
661#define SDVO_CMD_SET_TV_CHROMA 0x76
662#define SDVO_CMD_GET_TV_LUMA 0x78
663#define SDVO_CMD_SET_TV_LUMA 0x79
664struct intel_sdvo_enhancements_arg {
665 u16 value;
666}__attribute__((packed));
667
668#define SDVO_CMD_GET_DOT_CRAWL 0x70
669#define SDVO_CMD_SET_DOT_CRAWL 0x71
670# define SDVO_DOT_CRAWL_ON (1 << 0)
671# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
672
673#define SDVO_CMD_GET_DITHER 0x72
674#define SDVO_CMD_SET_DITHER 0x73
675# define SDVO_DITHER_ON (1 << 0)
676# define SDVO_DITHER_DEFAULT_ON (1 << 1)
322 677
323#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a 678#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
324# define SDVO_CONTROL_BUS_PROM 0x0 679# define SDVO_CONTROL_BUS_PROM (1 << 0)
325# define SDVO_CONTROL_BUS_DDC1 0x1 680# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
326# define SDVO_CONTROL_BUS_DDC2 0x2 681# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
327# define SDVO_CONTROL_BUS_DDC3 0x3 682# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
683
684/* HDMI op codes */
685#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
686#define SDVO_CMD_GET_ENCODE 0x9e
687#define SDVO_CMD_SET_ENCODE 0x9f
688 #define SDVO_ENCODE_DVI 0x0
689 #define SDVO_ENCODE_HDMI 0x1
690#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
691#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
692#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
693#define SDVO_CMD_SET_COLORIMETRY 0x8e
694 #define SDVO_COLORIMETRY_RGB256 0x0
695 #define SDVO_COLORIMETRY_RGB220 0x1
696 #define SDVO_COLORIMETRY_YCrCb422 0x3
697 #define SDVO_COLORIMETRY_YCrCb444 0x4
698#define SDVO_CMD_GET_COLORIMETRY 0x8f
699#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
700#define SDVO_CMD_SET_AUDIO_STAT 0x91
701#define SDVO_CMD_GET_AUDIO_STAT 0x92
702#define SDVO_CMD_SET_HBUF_INDEX 0x93
703#define SDVO_CMD_GET_HBUF_INDEX 0x94
704#define SDVO_CMD_GET_HBUF_INFO 0x95
705#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
706#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
707#define SDVO_CMD_SET_HBUF_DATA 0x98
708#define SDVO_CMD_GET_HBUF_DATA 0x99
709#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
710#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
711 #define SDVO_HBUF_TX_DISABLED (0 << 6)
712 #define SDVO_HBUF_TX_ONCE (2 << 6)
713 #define SDVO_HBUF_TX_VSYNC (3 << 6)
714#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
715
716struct intel_sdvo_encode{
717 u8 dvi_rev;
718 u8 hdmi_rev;
719} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 63212d7bbc28..df4cf97e5d97 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1039,9 +1039,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1039 1039
1040#if __OS_HAS_AGP 1040#if __OS_HAS_AGP
1041 if (dev_priv->flags & RADEON_IS_AGP) { 1041 if (dev_priv->flags & RADEON_IS_AGP) {
1042 drm_core_ioremap(dev_priv->cp_ring, dev); 1042 drm_core_ioremap_wc(dev_priv->cp_ring, dev);
1043 drm_core_ioremap(dev_priv->ring_rptr, dev); 1043 drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
1044 drm_core_ioremap(dev->agp_buffer_map, dev); 1044 drm_core_ioremap_wc(dev->agp_buffer_map, dev);
1045 if (!dev_priv->cp_ring->handle || 1045 if (!dev_priv->cp_ring->handle ||
1046 !dev_priv->ring_rptr->handle || 1046 !dev_priv->ring_rptr->handle ||
1047 !dev->agp_buffer_map->handle) { 1047 !dev->agp_buffer_map->handle) {
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 03705240000f..abf4dfc8ec22 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -153,7 +153,10 @@ static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
153static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3}; 153static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
154static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3}; 154static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
155static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3}; 155static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
156static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3};
156static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3}; 157static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
158static struct axis_conversion lis3lv02d_axis_xy_rotated_right = {2, -1, 3};
159static struct axis_conversion lis3lv02d_axis_xy_swap_yz_inverted = {2, -1, -3};
157 160
158#define AXIS_DMI_MATCH(_ident, _name, _axis) { \ 161#define AXIS_DMI_MATCH(_ident, _name, _axis) { \
159 .ident = _ident, \ 162 .ident = _ident, \
@@ -172,10 +175,12 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
172 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted), 175 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
173 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted), 176 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
174 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left), 177 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
178 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
179 AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd),
180 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
181 AXIS_DMI_MATCH("NC671xx", "HP Compaq 671", xy_swap_yz_inverted),
175 { NULL, } 182 { NULL, }
176/* Laptop models without axis info (yet): 183/* Laptop models without axis info (yet):
177 * "NC651xx" "HP Compaq 651"
178 * "NC671xx" "HP Compaq 671"
179 * "NC6910" "HP Compaq 6910" 184 * "NC6910" "HP Compaq 6910"
180 * HP Compaq 8710x Notebook PC / Mobile Workstation 185 * HP Compaq 8710x Notebook PC / Mobile Workstation
181 * "NC2400" "HP Compaq nc2400" 186 * "NC2400" "HP Compaq nc2400"
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index a329e6bd5d2d..3838bc4acaba 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1823,6 +1823,10 @@ static int dv1394_open(struct inode *inode, struct file *file)
1823 1823
1824#endif 1824#endif
1825 1825
1826 printk(KERN_INFO "%s: NOTE, the dv1394 interface is unsupported "
1827 "and will not be available in the new firewire driver stack. "
1828 "Try libraw1394 based programs instead.\n", current->comm);
1829
1826 return 0; 1830 return 0;
1827} 1831}
1828 1832
@@ -2567,10 +2571,6 @@ static int __init dv1394_init_module(void)
2567{ 2571{
2568 int ret; 2572 int ret;
2569 2573
2570 printk(KERN_WARNING
2571 "NOTE: The dv1394 driver is unsupported and may be removed in a "
2572 "future Linux release. Use raw1394 instead.\n");
2573
2574 cdev_init(&dv1394_cdev, &dv1394_fops); 2574 cdev_init(&dv1394_cdev, &dv1394_fops);
2575 dv1394_cdev.owner = THIS_MODULE; 2575 dv1394_cdev.owner = THIS_MODULE;
2576 ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16); 2576 ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 595ba8eb4a07..0b28141e43bf 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4599,6 +4599,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4599 printk(KERN_ERR "%s: no memory for coeffs\n", 4599 printk(KERN_ERR "%s: no memory for coeffs\n",
4600 __func__); 4600 __func__);
4601 ret = -ENOMEM; 4601 ret = -ENOMEM;
4602 kfree(bch);
4602 goto free_chan; 4603 goto free_chan;
4603 } 4604 }
4604 bch->nr = ch; 4605 bch->nr = ch;
@@ -4767,6 +4768,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
4767 printk(KERN_ERR "%s: no memory for coeffs\n", 4768 printk(KERN_ERR "%s: no memory for coeffs\n",
4768 __func__); 4769 __func__);
4769 ret = -ENOMEM; 4770 ret = -ENOMEM;
4771 kfree(bch);
4770 goto free_chan; 4772 goto free_chan;
4771 } 4773 }
4772 bch->nr = ch + 1; 4774 bch->nr = ch + 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 1e3aea9eecf1..09658b218474 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -25,13 +25,13 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
25{ 25{
26 dev_info_t *hash; 26 dev_info_t *hash;
27 linear_conf_t *conf = mddev_to_conf(mddev); 27 linear_conf_t *conf = mddev_to_conf(mddev);
28 sector_t idx = sector >> conf->sector_shift;
28 29
29 /* 30 /*
30 * sector_div(a,b) returns the remainer and sets a to a/b 31 * sector_div(a,b) returns the remainer and sets a to a/b
31 */ 32 */
32 sector >>= conf->sector_shift; 33 (void)sector_div(idx, conf->spacing);
33 (void)sector_div(sector, conf->spacing); 34 hash = conf->hash_table[idx];
34 hash = conf->hash_table[sector];
35 35
36 while (sector >= hash->num_sectors + hash->start_sector) 36 while (sector >= hash->num_sectors + hash->start_sector)
37 hash++; 37 hash++;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 41e2509bf896..4495104f6c9f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1481,6 +1481,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1481 if (find_rdev_nr(mddev, rdev->desc_nr)) 1481 if (find_rdev_nr(mddev, rdev->desc_nr))
1482 return -EBUSY; 1482 return -EBUSY;
1483 } 1483 }
1484 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1485 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1486 mdname(mddev), mddev->max_disks);
1487 return -EBUSY;
1488 }
1484 bdevname(rdev->bdev,b); 1489 bdevname(rdev->bdev,b);
1485 while ( (s=strchr(b, '/')) != NULL) 1490 while ( (s=strchr(b, '/')) != NULL)
1486 *s = '!'; 1491 *s = '!';
@@ -2441,6 +2446,15 @@ static void analyze_sbs(mddev_t * mddev)
2441 2446
2442 i = 0; 2447 i = 0;
2443 rdev_for_each(rdev, tmp, mddev) { 2448 rdev_for_each(rdev, tmp, mddev) {
2449 if (rdev->desc_nr >= mddev->max_disks ||
2450 i > mddev->max_disks) {
2451 printk(KERN_WARNING
2452 "md: %s: %s: only %d devices permitted\n",
2453 mdname(mddev), bdevname(rdev->bdev, b),
2454 mddev->max_disks);
2455 kick_rdev_from_array(rdev);
2456 continue;
2457 }
2444 if (rdev != freshest) 2458 if (rdev != freshest)
2445 if (super_types[mddev->major_version]. 2459 if (super_types[mddev->major_version].
2446 validate_super(mddev, rdev)) { 2460 validate_super(mddev, rdev)) {
@@ -4614,13 +4628,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
4614 * noticed in interrupt contexts ... 4628 * noticed in interrupt contexts ...
4615 */ 4629 */
4616 4630
4617 if (rdev->desc_nr == mddev->max_disks) {
4618 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
4619 mdname(mddev));
4620 err = -EBUSY;
4621 goto abort_unbind_export;
4622 }
4623
4624 rdev->raid_disk = -1; 4631 rdev->raid_disk = -1;
4625 4632
4626 md_update_sb(mddev, 1); 4633 md_update_sb(mddev, 1);
@@ -4634,9 +4641,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
4634 md_new_event(mddev); 4641 md_new_event(mddev);
4635 return 0; 4642 return 0;
4636 4643
4637abort_unbind_export:
4638 unbind_rdev_from_array(rdev);
4639
4640abort_export: 4644abort_export:
4641 export_rdev(rdev); 4645 export_rdev(rdev);
4642 return err; 4646 return err;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b4f5f7155d8..01e3cffd03b8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1640,7 +1640,8 @@ static void raid1d(mddev_t *mddev)
1640 } 1640 }
1641 1641
1642 bio = r1_bio->bios[r1_bio->read_disk]; 1642 bio = r1_bio->bios[r1_bio->read_disk];
1643 if ((disk=read_balance(conf, r1_bio)) == -1) { 1643 if ((disk=read_balance(conf, r1_bio)) == -1 ||
1644 disk == r1_bio->read_disk) {
1644 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1645 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1645 " read error for block %llu\n", 1646 " read error for block %llu\n",
1646 bdevname(bio->bi_bdev,b), 1647 bdevname(bio->bi_bdev,b),
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 56073199ceba..c64e6798878a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -217,6 +217,7 @@ config DELL_LAPTOP
217 depends on EXPERIMENTAL 217 depends on EXPERIMENTAL
218 depends on BACKLIGHT_CLASS_DEVICE 218 depends on BACKLIGHT_CLASS_DEVICE
219 depends on RFKILL 219 depends on RFKILL
220 depends on POWER_SUPPLY
220 default n 221 default n
221 ---help--- 222 ---help---
222 This driver adds support for rfkill and backlight control to Dell 223 This driver adds support for rfkill and backlight control to Dell
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index bf5e4d065436..558bf3f2c276 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -35,7 +35,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
35 35
36 if (!ssc_valid) { 36 if (!ssc_valid) {
37 spin_unlock(&user_lock); 37 spin_unlock(&user_lock);
38 dev_dbg(&ssc->pdev->dev, "could not find requested device\n"); 38 pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
39 return ERR_PTR(-ENODEV); 39 return ERR_PTR(-ENODEV);
40 } 40 }
41 41
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 10c421b73eaf..f26667a7abf7 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -207,7 +207,7 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
207 &device_ccb->recv_ctrl); 207 &device_ccb->recv_ctrl);
208 208
209 /* give iLO some time to process stop request */ 209 /* give iLO some time to process stop request */
210 for (retries = 1000; retries > 0; retries--) { 210 for (retries = MAX_WAIT; retries > 0; retries--) {
211 doorbell_set(driver_ccb); 211 doorbell_set(driver_ccb);
212 udelay(1); 212 udelay(1);
213 if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) 213 if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
@@ -309,7 +309,7 @@ static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
309 doorbell_clr(driver_ccb); 309 doorbell_clr(driver_ccb);
310 310
311 /* make sure iLO is really handling requests */ 311 /* make sure iLO is really handling requests */
312 for (i = 1000; i > 0; i--) { 312 for (i = MAX_WAIT; i > 0; i--) {
313 if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) 313 if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
314 break; 314 break;
315 udelay(1); 315 udelay(1);
@@ -326,7 +326,7 @@ static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
326 326
327 return 0; 327 return 0;
328free: 328free:
329 pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); 329 ilo_ccb_close(pdev, data);
330out: 330out:
331 return error; 331 return error;
332} 332}
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index a281207696c1..b64a20ef07e3 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -19,6 +19,8 @@
19#define MAX_ILO_DEV 1 19#define MAX_ILO_DEV 1
20/* max number of files */ 20/* max number of files */
21#define MAX_OPEN (MAX_CCB * MAX_ILO_DEV) 21#define MAX_OPEN (MAX_CCB * MAX_ILO_DEV)
22/* spin counter for open/close delay */
23#define MAX_WAIT 10000
22 24
23/* 25/*
24 * Per device, used to track global memory allocations. 26 * Per device, used to track global memory allocations.
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index a5bd658c2e83..275b78896a73 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9/* 9/*
@@ -514,7 +514,8 @@ struct xpc_channel_uv {
514 /* partition's notify mq */ 514 /* partition's notify mq */
515 515
516 struct xpc_send_msg_slot_uv *send_msg_slots; 516 struct xpc_send_msg_slot_uv *send_msg_slots;
517 struct xpc_notify_mq_msg_uv *recv_msg_slots; 517 void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */
518 /* structure plus the user's payload */
518 519
519 struct xpc_fifo_head_uv msg_slot_free_list; 520 struct xpc_fifo_head_uv msg_slot_free_list;
520 struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */ 521 struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index f17f7d40ea2c..29c0502a96b2 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9/* 9/*
@@ -1010,8 +1010,8 @@ xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1010 continue; 1010 continue;
1011 1011
1012 for (entry = 0; entry < nentries; entry++) { 1012 for (entry = 0; entry < nentries; entry++) {
1013 msg_slot = ch_uv->recv_msg_slots + entry * 1013 msg_slot = ch_uv->recv_msg_slots +
1014 ch->entry_size; 1014 entry * ch->entry_size;
1015 1015
1016 msg_slot->hdr.msg_slot_number = entry; 1016 msg_slot->hdr.msg_slot_number = entry;
1017 } 1017 }
@@ -1308,9 +1308,8 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1308 /* we're dealing with a normal message sent via the notify_mq */ 1308 /* we're dealing with a normal message sent via the notify_mq */
1309 ch_uv = &ch->sn.uv; 1309 ch_uv = &ch->sn.uv;
1310 1310
1311 msg_slot = (struct xpc_notify_mq_msg_uv *)((u64)ch_uv->recv_msg_slots + 1311 msg_slot = ch_uv->recv_msg_slots +
1312 (msg->hdr.msg_slot_number % ch->remote_nentries) * 1312 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1313 ch->entry_size);
1314 1313
1315 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); 1314 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
1316 BUG_ON(msg_slot->hdr.size != 0); 1315 BUG_ON(msg_slot->hdr.size != 0);
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 535c234286ea..8c694213035b 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1475,6 +1475,7 @@ el3_resume(struct device *pdev)
1475 spin_lock_irqsave(&lp->lock, flags); 1475 spin_lock_irqsave(&lp->lock, flags);
1476 1476
1477 outw(PowerUp, ioaddr + EL3_CMD); 1477 outw(PowerUp, ioaddr + EL3_CMD);
1478 EL3WINDOW(0);
1478 el3_up(dev); 1479 el3_up(dev);
1479 1480
1480 if (netif_running(dev)) 1481 if (netif_running(dev))
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 379a1324db4e..d31791f60292 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2276,8 +2276,7 @@ no_mem:
2276 } else if ((len = ntohl(r->len_cq)) != 0) { 2276 } else if ((len = ntohl(r->len_cq)) != 0) {
2277 struct sge_fl *fl; 2277 struct sge_fl *fl;
2278 2278
2279 if (eth) 2279 lro &= eth && is_eth_tcp(rss_hi);
2280 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2281 2280
2282 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2281 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2283 if (fl->use_pages) { 2282 if (fl->use_pages) {
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 3f7eab42aef1..9b12a13a640f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -351,6 +351,9 @@ static int gfar_probe(struct of_device *ofdev,
351 /* Reset MAC layer */ 351 /* Reset MAC layer */
352 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 352 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
353 353
354 /* We need to delay at least 3 TX clocks */
355 udelay(2);
356
354 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 357 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
355 gfar_write(&priv->regs->maccfg1, tempval); 358 gfar_write(&priv->regs->maccfg1, tempval);
356 359
@@ -1626,6 +1629,12 @@ static void gfar_schedule_cleanup(struct net_device *dev)
1626 if (netif_rx_schedule_prep(&priv->napi)) { 1629 if (netif_rx_schedule_prep(&priv->napi)) {
1627 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1630 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1628 __netif_rx_schedule(&priv->napi); 1631 __netif_rx_schedule(&priv->napi);
1632 } else {
1633 /*
1634 * Clear IEVENT, so interrupts aren't called again
1635 * because of the packets that have already arrived.
1636 */
1637 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1629 } 1638 }
1630 1639
1631 spin_unlock(&priv->rxlock); 1640 spin_unlock(&priv->rxlock);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index b1a83344acc7..eaa86897f5c3 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -312,7 +312,7 @@ extern const char gfar_driver_version[];
312#define ATTRELI_EI(x) (x) 312#define ATTRELI_EI(x) (x)
313 313
314#define BD_LFLAG(flags) ((flags) << 16) 314#define BD_LFLAG(flags) ((flags) << 16)
315#define BD_LENGTH_MASK 0x00ff 315#define BD_LENGTH_MASK 0x0000ffff
316 316
317/* TxBD status field bits */ 317/* TxBD status field bits */
318#define TXBD_READY 0x8000 318#define TXBD_READY 0x8000
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9c78c963b721..f4dd9acb6877 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1203,7 +1203,7 @@ typedef struct {
1203#define NETXEN_IS_MSI_FAMILY(adapter) \ 1203#define NETXEN_IS_MSI_FAMILY(adapter) \
1204 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) 1204 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
1205 1205
1206#define MSIX_ENTRIES_PER_ADAPTER 8 1206#define MSIX_ENTRIES_PER_ADAPTER 1
1207#define NETXEN_MSIX_TBL_SPACE 8192 1207#define NETXEN_MSIX_TBL_SPACE 8192
1208#define NETXEN_PCI_REG_MSIX_TBL 0x44 1208#define NETXEN_PCI_REG_MSIX_TBL 0x44
1209 1209
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 645d384fe87e..3b17a7936147 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -76,6 +76,7 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
76#endif 76#endif
77static irqreturn_t netxen_intr(int irq, void *data); 77static irqreturn_t netxen_intr(int irq, void *data);
78static irqreturn_t netxen_msi_intr(int irq, void *data); 78static irqreturn_t netxen_msi_intr(int irq, void *data);
79static irqreturn_t netxen_msix_intr(int irq, void *data);
79 80
80/* PCI Device ID Table */ 81/* PCI Device ID Table */
81#define ENTRY(device) \ 82#define ENTRY(device) \
@@ -1084,7 +1085,9 @@ static int netxen_nic_open(struct net_device *netdev)
1084 for (ring = 0; ring < adapter->max_rds_rings; ring++) 1085 for (ring = 0; ring < adapter->max_rds_rings; ring++)
1085 netxen_post_rx_buffers(adapter, ctx, ring); 1086 netxen_post_rx_buffers(adapter, ctx, ring);
1086 } 1087 }
1087 if (NETXEN_IS_MSI_FAMILY(adapter)) 1088 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
1089 handler = netxen_msix_intr;
1090 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
1088 handler = netxen_msi_intr; 1091 handler = netxen_msi_intr;
1089 else { 1092 else {
1090 flags |= IRQF_SHARED; 1093 flags |= IRQF_SHARED;
@@ -1612,6 +1615,14 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
1612 return IRQ_HANDLED; 1615 return IRQ_HANDLED;
1613} 1616}
1614 1617
1618static irqreturn_t netxen_msix_intr(int irq, void *data)
1619{
1620 struct netxen_adapter *adapter = data;
1621
1622 napi_schedule(&adapter->napi);
1623 return IRQ_HANDLED;
1624}
1625
1615static int netxen_nic_poll(struct napi_struct *napi, int budget) 1626static int netxen_nic_poll(struct napi_struct *napi, int budget)
1616{ 1627{
1617 struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi); 1628 struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 2c73ca606b35..0771eb6fc6eb 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -437,6 +437,22 @@ enum features {
437 RTL_FEATURE_GMII = (1 << 2), 437 RTL_FEATURE_GMII = (1 << 2),
438}; 438};
439 439
440struct rtl8169_counters {
441 __le64 tx_packets;
442 __le64 rx_packets;
443 __le64 tx_errors;
444 __le32 rx_errors;
445 __le16 rx_missed;
446 __le16 align_errors;
447 __le32 tx_one_collision;
448 __le32 tx_multi_collision;
449 __le64 rx_unicast;
450 __le64 rx_broadcast;
451 __le32 rx_multicast;
452 __le16 tx_aborted;
453 __le16 tx_underun;
454};
455
440struct rtl8169_private { 456struct rtl8169_private {
441 void __iomem *mmio_addr; /* memory map physical address */ 457 void __iomem *mmio_addr; /* memory map physical address */
442 struct pci_dev *pci_dev; /* Index of PCI device */ 458 struct pci_dev *pci_dev; /* Index of PCI device */
@@ -480,6 +496,7 @@ struct rtl8169_private {
480 unsigned features; 496 unsigned features;
481 497
482 struct mii_if_info mii; 498 struct mii_if_info mii;
499 struct rtl8169_counters counters;
483}; 500};
484 501
485MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 502MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -1100,22 +1117,6 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1100 "tx_underrun", 1117 "tx_underrun",
1101}; 1118};
1102 1119
1103struct rtl8169_counters {
1104 __le64 tx_packets;
1105 __le64 rx_packets;
1106 __le64 tx_errors;
1107 __le32 rx_errors;
1108 __le16 rx_missed;
1109 __le16 align_errors;
1110 __le32 tx_one_collision;
1111 __le32 tx_multi_collision;
1112 __le64 rx_unicast;
1113 __le64 rx_broadcast;
1114 __le32 rx_multicast;
1115 __le16 tx_aborted;
1116 __le16 tx_underun;
1117};
1118
1119static int rtl8169_get_sset_count(struct net_device *dev, int sset) 1120static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1120{ 1121{
1121 switch (sset) { 1122 switch (sset) {
@@ -1126,16 +1127,21 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1126 } 1127 }
1127} 1128}
1128 1129
1129static void rtl8169_get_ethtool_stats(struct net_device *dev, 1130static void rtl8169_update_counters(struct net_device *dev)
1130 struct ethtool_stats *stats, u64 *data)
1131{ 1131{
1132 struct rtl8169_private *tp = netdev_priv(dev); 1132 struct rtl8169_private *tp = netdev_priv(dev);
1133 void __iomem *ioaddr = tp->mmio_addr; 1133 void __iomem *ioaddr = tp->mmio_addr;
1134 struct rtl8169_counters *counters; 1134 struct rtl8169_counters *counters;
1135 dma_addr_t paddr; 1135 dma_addr_t paddr;
1136 u32 cmd; 1136 u32 cmd;
1137 int wait = 1000;
1137 1138
1138 ASSERT_RTNL(); 1139 /*
1140 * Some chips are unable to dump tally counters when the receiver
1141 * is disabled.
1142 */
1143 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1144 return;
1139 1145
1140 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); 1146 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1141 if (!counters) 1147 if (!counters)
@@ -1146,31 +1152,45 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
1146 RTL_W32(CounterAddrLow, cmd); 1152 RTL_W32(CounterAddrLow, cmd);
1147 RTL_W32(CounterAddrLow, cmd | CounterDump); 1153 RTL_W32(CounterAddrLow, cmd | CounterDump);
1148 1154
1149 while (RTL_R32(CounterAddrLow) & CounterDump) { 1155 while (wait--) {
1150 if (msleep_interruptible(1)) 1156 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1157 /* copy updated counters */
1158 memcpy(&tp->counters, counters, sizeof(*counters));
1151 break; 1159 break;
1160 }
1161 udelay(10);
1152 } 1162 }
1153 1163
1154 RTL_W32(CounterAddrLow, 0); 1164 RTL_W32(CounterAddrLow, 0);
1155 RTL_W32(CounterAddrHigh, 0); 1165 RTL_W32(CounterAddrHigh, 0);
1156 1166
1157 data[0] = le64_to_cpu(counters->tx_packets);
1158 data[1] = le64_to_cpu(counters->rx_packets);
1159 data[2] = le64_to_cpu(counters->tx_errors);
1160 data[3] = le32_to_cpu(counters->rx_errors);
1161 data[4] = le16_to_cpu(counters->rx_missed);
1162 data[5] = le16_to_cpu(counters->align_errors);
1163 data[6] = le32_to_cpu(counters->tx_one_collision);
1164 data[7] = le32_to_cpu(counters->tx_multi_collision);
1165 data[8] = le64_to_cpu(counters->rx_unicast);
1166 data[9] = le64_to_cpu(counters->rx_broadcast);
1167 data[10] = le32_to_cpu(counters->rx_multicast);
1168 data[11] = le16_to_cpu(counters->tx_aborted);
1169 data[12] = le16_to_cpu(counters->tx_underun);
1170
1171 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); 1167 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1172} 1168}
1173 1169
1170static void rtl8169_get_ethtool_stats(struct net_device *dev,
1171 struct ethtool_stats *stats, u64 *data)
1172{
1173 struct rtl8169_private *tp = netdev_priv(dev);
1174
1175 ASSERT_RTNL();
1176
1177 rtl8169_update_counters(dev);
1178
1179 data[0] = le64_to_cpu(tp->counters.tx_packets);
1180 data[1] = le64_to_cpu(tp->counters.rx_packets);
1181 data[2] = le64_to_cpu(tp->counters.tx_errors);
1182 data[3] = le32_to_cpu(tp->counters.rx_errors);
1183 data[4] = le16_to_cpu(tp->counters.rx_missed);
1184 data[5] = le16_to_cpu(tp->counters.align_errors);
1185 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1186 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1187 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1188 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1189 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1190 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1191 data[12] = le16_to_cpu(tp->counters.tx_underun);
1192}
1193
1174static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1194static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1175{ 1195{
1176 switch(stringset) { 1196 switch(stringset) {
@@ -3682,6 +3702,9 @@ static int rtl8169_close(struct net_device *dev)
3682 struct rtl8169_private *tp = netdev_priv(dev); 3702 struct rtl8169_private *tp = netdev_priv(dev);
3683 struct pci_dev *pdev = tp->pci_dev; 3703 struct pci_dev *pdev = tp->pci_dev;
3684 3704
3705 /* update counters before going down */
3706 rtl8169_update_counters(dev);
3707
3685 rtl8169_down(dev); 3708 rtl8169_down(dev);
3686 3709
3687 free_irq(dev->irq, dev); 3710 free_irq(dev->irq, dev);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b17efa9cc530..491876341068 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2221,6 +2221,8 @@ static int gem_do_start(struct net_device *dev)
2221 2221
2222 gp->running = 1; 2222 gp->running = 1;
2223 2223
2224 napi_enable(&gp->napi);
2225
2224 if (gp->lstate == link_up) { 2226 if (gp->lstate == link_up) {
2225 netif_carrier_on(gp->dev); 2227 netif_carrier_on(gp->dev);
2226 gem_set_link_modes(gp); 2228 gem_set_link_modes(gp);
@@ -2238,6 +2240,8 @@ static int gem_do_start(struct net_device *dev)
2238 spin_lock_irqsave(&gp->lock, flags); 2240 spin_lock_irqsave(&gp->lock, flags);
2239 spin_lock(&gp->tx_lock); 2241 spin_lock(&gp->tx_lock);
2240 2242
2243 napi_disable(&gp->napi);
2244
2241 gp->running = 0; 2245 gp->running = 0;
2242 gem_reset(gp); 2246 gem_reset(gp);
2243 gem_clean_rings(gp); 2247 gem_clean_rings(gp);
@@ -2338,8 +2342,6 @@ static int gem_open(struct net_device *dev)
2338 if (!gp->asleep) 2342 if (!gp->asleep)
2339 rc = gem_do_start(dev); 2343 rc = gem_do_start(dev);
2340 gp->opened = (rc == 0); 2344 gp->opened = (rc == 0);
2341 if (gp->opened)
2342 napi_enable(&gp->napi);
2343 2345
2344 mutex_unlock(&gp->pm_mutex); 2346 mutex_unlock(&gp->pm_mutex);
2345 2347
@@ -2476,8 +2478,6 @@ static int gem_resume(struct pci_dev *pdev)
2476 2478
2477 /* Re-attach net device */ 2479 /* Re-attach net device */
2478 netif_device_attach(dev); 2480 netif_device_attach(dev);
2479
2480 napi_enable(&gp->napi);
2481 } 2481 }
2482 2482
2483 spin_lock_irqsave(&gp->lock, flags); 2483 spin_lock_irqsave(&gp->lock, flags);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 7a72a3112f0a..cc4013be5e18 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2629,6 +2629,14 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2629 int i, qfe_slot = -1; 2629 int i, qfe_slot = -1;
2630 int err = -ENODEV; 2630 int err = -ENODEV;
2631 2631
2632 sbus_dp = to_of_device(op->dev.parent)->node;
2633 if (is_qfe)
2634 sbus_dp = to_of_device(op->dev.parent->parent)->node;
2635
2636 /* We can match PCI devices too, do not accept those here. */
2637 if (strcmp(sbus_dp->name, "sbus"))
2638 return err;
2639
2632 if (is_qfe) { 2640 if (is_qfe) {
2633 qp = quattro_sbus_find(op); 2641 qp = quattro_sbus_find(op);
2634 if (qp == NULL) 2642 if (qp == NULL)
@@ -2734,10 +2742,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2734 if (qp != NULL) 2742 if (qp != NULL)
2735 hp->happy_flags |= HFLAG_QUATTRO; 2743 hp->happy_flags |= HFLAG_QUATTRO;
2736 2744
2737 sbus_dp = to_of_device(op->dev.parent)->node;
2738 if (is_qfe)
2739 sbus_dp = to_of_device(op->dev.parent->parent)->node;
2740
2741 /* Get the supported DVMA burst sizes from our Happy SBUS. */ 2745 /* Get the supported DVMA burst sizes from our Happy SBUS. */
2742 hp->happy_bursts = of_getintprop_default(sbus_dp, 2746 hp->happy_bursts = of_getintprop_default(sbus_dp,
2743 "burst-sizes", 0x00); 2747 "burst-sizes", 0x00);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 0bf2114738be..d4c5ecc51f77 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -464,13 +464,14 @@ static void de_rx (struct de_private *de)
464 drop = 1; 464 drop = 1;
465 465
466rx_next: 466rx_next:
467 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
468 if (rx_tail == (DE_RX_RING_SIZE - 1)) 467 if (rx_tail == (DE_RX_RING_SIZE - 1))
469 de->rx_ring[rx_tail].opts2 = 468 de->rx_ring[rx_tail].opts2 =
470 cpu_to_le32(RingEnd | de->rx_buf_sz); 469 cpu_to_le32(RingEnd | de->rx_buf_sz);
471 else 470 else
472 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz); 471 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
473 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); 472 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
473 wmb();
474 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
474 rx_tail = NEXT_RX(rx_tail); 475 rx_tail = NEXT_RX(rx_tail);
475 } 476 }
476 477
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7b81e4fdd56..09fea31d3e36 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -157,10 +157,16 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
157 157
158 nexact = n; 158 nexact = n;
159 159
160 /* The rest is hashed */ 160 /* Remaining multicast addresses are hashed,
161 * unicast will leave the filter disabled. */
161 memset(filter->mask, 0, sizeof(filter->mask)); 162 memset(filter->mask, 0, sizeof(filter->mask));
162 for (; n < uf.count; n++) 163 for (; n < uf.count; n++) {
164 if (!is_multicast_ether_addr(addr[n].u)) {
165 err = 0; /* no filter */
166 goto done;
167 }
163 addr_hash_set(filter->mask, addr[n].u); 168 addr_hash_set(filter->mask, addr[n].u);
169 }
164 170
165 /* For ALLMULTI just set the mask to all ones. 171 /* For ALLMULTI just set the mask to all ones.
166 * This overrides the mask populated above. */ 172 * This overrides the mask populated above. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b35c8813bef4..c01ea48da5fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -4042,6 +4042,7 @@ static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4042 priv->is_open = 1; 4042 priv->is_open = 1;
4043 } 4043 }
4044 4044
4045 pci_save_state(pdev);
4045 pci_set_power_state(pdev, PCI_D3hot); 4046 pci_set_power_state(pdev, PCI_D3hot);
4046 4047
4047 return 0; 4048 return 0;
@@ -4052,6 +4053,7 @@ static int iwl_pci_resume(struct pci_dev *pdev)
4052 struct iwl_priv *priv = pci_get_drvdata(pdev); 4053 struct iwl_priv *priv = pci_get_drvdata(pdev);
4053 4054
4054 pci_set_power_state(pdev, PCI_D0); 4055 pci_set_power_state(pdev, PCI_D0);
4056 pci_restore_state(pdev);
4055 4057
4056 if (priv->is_open) 4058 if (priv->is_open)
4057 iwl_mac_start(priv->hw); 4059 iwl_mac_start(priv->hw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 412f66bac1af..70a8b21ca39b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -480,6 +480,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
480 priv->num_stations = 0; 480 priv->num_stations = 0;
481 memset(priv->stations, 0, sizeof(priv->stations)); 481 memset(priv->stations, 0, sizeof(priv->stations));
482 482
483 /* clean ucode key table bit map */
484 priv->ucode_key_table = 0;
485
483 spin_unlock_irqrestore(&priv->sta_lock, flags); 486 spin_unlock_irqrestore(&priv->sta_lock, flags);
484} 487}
485EXPORT_SYMBOL(iwl_clear_stations_table); 488EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 95d01984c80e..5b44d322b99f 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -8143,6 +8143,7 @@ static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
8143 priv->is_open = 1; 8143 priv->is_open = 1;
8144 } 8144 }
8145 8145
8146 pci_save_state(pdev);
8146 pci_set_power_state(pdev, PCI_D3hot); 8147 pci_set_power_state(pdev, PCI_D3hot);
8147 8148
8148 return 0; 8149 return 0;
@@ -8153,6 +8154,7 @@ static int iwl3945_pci_resume(struct pci_dev *pdev)
8153 struct iwl3945_priv *priv = pci_get_drvdata(pdev); 8154 struct iwl3945_priv *priv = pci_get_drvdata(pdev);
8154 8155
8155 pci_set_power_state(pdev, PCI_D0); 8156 pci_set_power_state(pdev, PCI_D0);
8157 pci_restore_state(pdev);
8156 8158
8157 if (priv->is_open) 8159 if (priv->is_open)
8158 iwl3945_mac_start(priv->hw); 8160 iwl3945_mac_start(priv->hw);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index ab1d615425a8..93eac1423585 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -355,6 +355,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
355 int i = 0; 355 int i = 0;
356 356
357 if (drv && drv->suspend) { 357 if (drv && drv->suspend) {
358 pci_power_t prev = pci_dev->current_state;
359
358 pci_dev->state_saved = false; 360 pci_dev->state_saved = false;
359 361
360 i = drv->suspend(pci_dev, state); 362 i = drv->suspend(pci_dev, state);
@@ -365,12 +367,16 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
365 if (pci_dev->state_saved) 367 if (pci_dev->state_saved)
366 goto Fixup; 368 goto Fixup;
367 369
368 if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0)) 370 if (pci_dev->current_state != PCI_D0
371 && pci_dev->current_state != PCI_UNKNOWN) {
372 WARN_ONCE(pci_dev->current_state != prev,
373 "PCI PM: Device state not saved by %pF\n",
374 drv->suspend);
369 goto Fixup; 375 goto Fixup;
376 }
370 } 377 }
371 378
372 pci_save_state(pci_dev); 379 pci_save_state(pci_dev);
373 pci_dev->state_saved = true;
374 /* 380 /*
375 * This is for compatibility with existing code with legacy PM support. 381 * This is for compatibility with existing code with legacy PM support.
376 */ 382 */
@@ -424,35 +430,20 @@ static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
424 pci_fixup_device(pci_fixup_resume_early, pci_dev); 430 pci_fixup_device(pci_fixup_resume_early, pci_dev);
425} 431}
426 432
427static int pci_pm_default_resume(struct pci_dev *pci_dev) 433static void pci_pm_default_resume(struct pci_dev *pci_dev)
428{ 434{
429 pci_fixup_device(pci_fixup_resume, pci_dev); 435 pci_fixup_device(pci_fixup_resume, pci_dev);
430 436
431 if (!pci_is_bridge(pci_dev)) 437 if (!pci_is_bridge(pci_dev))
432 pci_enable_wake(pci_dev, PCI_D0, false); 438 pci_enable_wake(pci_dev, PCI_D0, false);
433
434 return pci_pm_reenable_device(pci_dev);
435}
436
437static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev)
438{
439 /* If device is enabled at this point, disable it */
440 pci_disable_enabled_device(pci_dev);
441 /*
442 * Save state with interrupts enabled, because in principle the bus the
443 * device is on may be put into a low power state after this code runs.
444 */
445 pci_save_state(pci_dev);
446} 439}
447 440
448static void pci_pm_default_suspend(struct pci_dev *pci_dev) 441static void pci_pm_default_suspend(struct pci_dev *pci_dev)
449{ 442{
450 pci_pm_default_suspend_generic(pci_dev); 443 /* Disable non-bridge devices without PM support */
451
452 if (!pci_is_bridge(pci_dev)) 444 if (!pci_is_bridge(pci_dev))
453 pci_prepare_to_sleep(pci_dev); 445 pci_disable_enabled_device(pci_dev);
454 446 pci_save_state(pci_dev);
455 pci_fixup_device(pci_fixup_suspend, pci_dev);
456} 447}
457 448
458static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 449static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
@@ -497,21 +488,49 @@ static void pci_pm_complete(struct device *dev)
497static int pci_pm_suspend(struct device *dev) 488static int pci_pm_suspend(struct device *dev)
498{ 489{
499 struct pci_dev *pci_dev = to_pci_dev(dev); 490 struct pci_dev *pci_dev = to_pci_dev(dev);
500 struct device_driver *drv = dev->driver; 491 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
501 int error = 0;
502 492
503 if (pci_has_legacy_pm_support(pci_dev)) 493 if (pci_has_legacy_pm_support(pci_dev))
504 return pci_legacy_suspend(dev, PMSG_SUSPEND); 494 return pci_legacy_suspend(dev, PMSG_SUSPEND);
505 495
506 if (drv && drv->pm && drv->pm->suspend) { 496 if (!pm) {
507 error = drv->pm->suspend(dev); 497 pci_pm_default_suspend(pci_dev);
508 suspend_report_result(drv->pm->suspend, error); 498 goto Fixup;
509 } 499 }
510 500
511 if (!error) 501 pci_dev->state_saved = false;
512 pci_pm_default_suspend(pci_dev);
513 502
514 return error; 503 if (pm->suspend) {
504 pci_power_t prev = pci_dev->current_state;
505 int error;
506
507 error = pm->suspend(dev);
508 suspend_report_result(pm->suspend, error);
509 if (error)
510 return error;
511
512 if (pci_dev->state_saved)
513 goto Fixup;
514
515 if (pci_dev->current_state != PCI_D0
516 && pci_dev->current_state != PCI_UNKNOWN) {
517 WARN_ONCE(pci_dev->current_state != prev,
518 "PCI PM: State of device not saved by %pF\n",
519 pm->suspend);
520 goto Fixup;
521 }
522 }
523
524 if (!pci_dev->state_saved) {
525 pci_save_state(pci_dev);
526 if (!pci_is_bridge(pci_dev))
527 pci_prepare_to_sleep(pci_dev);
528 }
529
530 Fixup:
531 pci_fixup_device(pci_fixup_suspend, pci_dev);
532
533 return 0;
515} 534}
516 535
517static int pci_pm_suspend_noirq(struct device *dev) 536static int pci_pm_suspend_noirq(struct device *dev)
@@ -554,7 +573,7 @@ static int pci_pm_resume_noirq(struct device *dev)
554static int pci_pm_resume(struct device *dev) 573static int pci_pm_resume(struct device *dev)
555{ 574{
556 struct pci_dev *pci_dev = to_pci_dev(dev); 575 struct pci_dev *pci_dev = to_pci_dev(dev);
557 struct device_driver *drv = dev->driver; 576 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
558 int error = 0; 577 int error = 0;
559 578
560 /* 579 /*
@@ -567,12 +586,16 @@ static int pci_pm_resume(struct device *dev)
567 if (pci_has_legacy_pm_support(pci_dev)) 586 if (pci_has_legacy_pm_support(pci_dev))
568 return pci_legacy_resume(dev); 587 return pci_legacy_resume(dev);
569 588
570 error = pci_pm_default_resume(pci_dev); 589 pci_pm_default_resume(pci_dev);
571 590
572 if (!error && drv && drv->pm && drv->pm->resume) 591 if (pm) {
573 error = drv->pm->resume(dev); 592 if (pm->resume)
593 error = pm->resume(dev);
594 } else {
595 pci_pm_reenable_device(pci_dev);
596 }
574 597
575 return error; 598 return 0;
576} 599}
577 600
578#else /* !CONFIG_SUSPEND */ 601#else /* !CONFIG_SUSPEND */
@@ -589,21 +612,31 @@ static int pci_pm_resume(struct device *dev)
589static int pci_pm_freeze(struct device *dev) 612static int pci_pm_freeze(struct device *dev)
590{ 613{
591 struct pci_dev *pci_dev = to_pci_dev(dev); 614 struct pci_dev *pci_dev = to_pci_dev(dev);
592 struct device_driver *drv = dev->driver; 615 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
593 int error = 0;
594 616
595 if (pci_has_legacy_pm_support(pci_dev)) 617 if (pci_has_legacy_pm_support(pci_dev))
596 return pci_legacy_suspend(dev, PMSG_FREEZE); 618 return pci_legacy_suspend(dev, PMSG_FREEZE);
597 619
598 if (drv && drv->pm && drv->pm->freeze) { 620 if (!pm) {
599 error = drv->pm->freeze(dev); 621 pci_pm_default_suspend(pci_dev);
600 suspend_report_result(drv->pm->freeze, error); 622 return 0;
601 } 623 }
602 624
603 if (!error) 625 pci_dev->state_saved = false;
604 pci_pm_default_suspend_generic(pci_dev);
605 626
606 return error; 627 if (pm->freeze) {
628 int error;
629
630 error = pm->freeze(dev);
631 suspend_report_result(pm->freeze, error);
632 if (error)
633 return error;
634 }
635
636 if (!pci_dev->state_saved)
637 pci_save_state(pci_dev);
638
639 return 0;
607} 640}
608 641
609static int pci_pm_freeze_noirq(struct device *dev) 642static int pci_pm_freeze_noirq(struct device *dev)
@@ -646,16 +679,18 @@ static int pci_pm_thaw_noirq(struct device *dev)
646static int pci_pm_thaw(struct device *dev) 679static int pci_pm_thaw(struct device *dev)
647{ 680{
648 struct pci_dev *pci_dev = to_pci_dev(dev); 681 struct pci_dev *pci_dev = to_pci_dev(dev);
649 struct device_driver *drv = dev->driver; 682 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
650 int error = 0; 683 int error = 0;
651 684
652 if (pci_has_legacy_pm_support(pci_dev)) 685 if (pci_has_legacy_pm_support(pci_dev))
653 return pci_legacy_resume(dev); 686 return pci_legacy_resume(dev);
654 687
655 pci_pm_reenable_device(pci_dev); 688 if (pm) {
656 689 if (pm->thaw)
657 if (drv && drv->pm && drv->pm->thaw) 690 error = pm->thaw(dev);
658 error = drv->pm->thaw(dev); 691 } else {
692 pci_pm_reenable_device(pci_dev);
693 }
659 694
660 return error; 695 return error;
661} 696}
@@ -663,22 +698,29 @@ static int pci_pm_thaw(struct device *dev)
663static int pci_pm_poweroff(struct device *dev) 698static int pci_pm_poweroff(struct device *dev)
664{ 699{
665 struct pci_dev *pci_dev = to_pci_dev(dev); 700 struct pci_dev *pci_dev = to_pci_dev(dev);
666 struct device_driver *drv = dev->driver; 701 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
667 int error = 0; 702 int error = 0;
668 703
669 if (pci_has_legacy_pm_support(pci_dev)) 704 if (pci_has_legacy_pm_support(pci_dev))
670 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 705 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
671 706
672 if (!drv || !drv->pm) 707 if (!pm) {
673 return 0; 708 pci_pm_default_suspend(pci_dev);
709 goto Fixup;
710 }
711
712 pci_dev->state_saved = false;
674 713
675 if (drv->pm->poweroff) { 714 if (pm->poweroff) {
676 error = drv->pm->poweroff(dev); 715 error = pm->poweroff(dev);
677 suspend_report_result(drv->pm->poweroff, error); 716 suspend_report_result(pm->poweroff, error);
678 } 717 }
679 718
680 if (!error) 719 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
681 pci_pm_default_suspend(pci_dev); 720 pci_prepare_to_sleep(pci_dev);
721
722 Fixup:
723 pci_fixup_device(pci_fixup_suspend, pci_dev);
682 724
683 return error; 725 return error;
684} 726}
@@ -719,7 +761,7 @@ static int pci_pm_restore_noirq(struct device *dev)
719static int pci_pm_restore(struct device *dev) 761static int pci_pm_restore(struct device *dev)
720{ 762{
721 struct pci_dev *pci_dev = to_pci_dev(dev); 763 struct pci_dev *pci_dev = to_pci_dev(dev);
722 struct device_driver *drv = dev->driver; 764 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
723 int error = 0; 765 int error = 0;
724 766
725 /* 767 /*
@@ -732,10 +774,14 @@ static int pci_pm_restore(struct device *dev)
732 if (pci_has_legacy_pm_support(pci_dev)) 774 if (pci_has_legacy_pm_support(pci_dev))
733 return pci_legacy_resume(dev); 775 return pci_legacy_resume(dev);
734 776
735 error = pci_pm_default_resume(pci_dev); 777 pci_pm_default_resume(pci_dev);
736 778
737 if (!error && drv && drv->pm && drv->pm->restore) 779 if (pm) {
738 error = drv->pm->restore(dev); 780 if (pm->restore)
781 error = pm->restore(dev);
782 } else {
783 pci_pm_reenable_device(pci_dev);
784 }
739 785
740 return error; 786 return error;
741} 787}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index db7ec14fa719..dfc4e0ddf241 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -768,8 +768,8 @@ pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
768 return -EINVAL; 768 return -EINVAL;
769 769
770 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 770 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
771 if (!rom) 771 if (!rom || !size)
772 return 0; 772 return -EIO;
773 773
774 if (off >= size) 774 if (off >= size)
775 count = 0; 775 count = 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 48807556b47a..e3efe6b19ee7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1418,10 +1418,10 @@ int pci_restore_standard_config(struct pci_dev *dev)
1418 break; 1418 break;
1419 } 1419 }
1420 1420
1421 dev->current_state = PCI_D0; 1421 pci_update_current_state(dev, PCI_D0);
1422 1422
1423 Restore: 1423 Restore:
1424 return pci_restore_state(dev); 1424 return dev->state_saved ? pci_restore_state(dev) : 0;
1425} 1425}
1426 1426
1427/** 1427/**
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 586b6f75910d..b0367f168af4 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -718,9 +718,9 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
718 718
719 /* 719 /*
720 * All PCIe functions are in one slot, remove one function will remove 720 * All PCIe functions are in one slot, remove one function will remove
721 * the the whole slot, so just wait 721 * the whole slot, so just wait until we are the last function left.
722 */ 722 */
723 if (!list_empty(&parent->subordinate->devices)) 723 if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
724 goto out; 724 goto out;
725 725
726 /* All functions are removed, so just disable ASPM for the link */ 726 /* All functions are removed, so just disable ASPM for the link */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 99a914a027f8..f9b874eaeb9f 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -55,25 +55,13 @@ static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state)
55 55
56} 56}
57 57
58static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state)
59{
60 return pci_save_state(dev);
61}
62
63static int pcie_portdrv_resume_early(struct pci_dev *dev)
64{
65 return pci_restore_state(dev);
66}
67
68static int pcie_portdrv_resume(struct pci_dev *dev) 58static int pcie_portdrv_resume(struct pci_dev *dev)
69{ 59{
70 pcie_portdrv_restore_config(dev); 60 pci_set_master(dev);
71 return pcie_port_device_resume(dev); 61 return pcie_port_device_resume(dev);
72} 62}
73#else 63#else
74#define pcie_portdrv_suspend NULL 64#define pcie_portdrv_suspend NULL
75#define pcie_portdrv_suspend_late NULL
76#define pcie_portdrv_resume_early NULL
77#define pcie_portdrv_resume NULL 65#define pcie_portdrv_resume NULL
78#endif 66#endif
79 67
@@ -292,8 +280,6 @@ static struct pci_driver pcie_portdriver = {
292 .remove = pcie_portdrv_remove, 280 .remove = pcie_portdrv_remove,
293 281
294 .suspend = pcie_portdrv_suspend, 282 .suspend = pcie_portdrv_suspend,
295 .suspend_late = pcie_portdrv_suspend_late,
296 .resume_early = pcie_portdrv_resume_early,
297 .resume = pcie_portdrv_resume, 283 .resume = pcie_portdrv_resume,
298 284
299 .err_handler = &pcie_portdrv_err_handler, 285 .err_handler = &pcie_portdrv_err_handler,
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 132a78159b60..29cbe47f219f 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -63,7 +63,7 @@ void pci_disable_rom(struct pci_dev *pdev)
63 * The PCI window size could be much larger than the 63 * The PCI window size could be much larger than the
64 * actual image size. 64 * actual image size.
65 */ 65 */
66size_t pci_get_rom_size(void __iomem *rom, size_t size) 66size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
67{ 67{
68 void __iomem *image; 68 void __iomem *image;
69 int last_image; 69 int last_image;
@@ -72,8 +72,10 @@ size_t pci_get_rom_size(void __iomem *rom, size_t size)
72 do { 72 do {
73 void __iomem *pds; 73 void __iomem *pds;
74 /* Standard PCI ROMs start out with these bytes 55 AA */ 74 /* Standard PCI ROMs start out with these bytes 55 AA */
75 if (readb(image) != 0x55) 75 if (readb(image) != 0x55) {
76 dev_err(&pdev->dev, "Invalid ROM contents\n");
76 break; 77 break;
78 }
77 if (readb(image + 1) != 0xAA) 79 if (readb(image + 1) != 0xAA)
78 break; 80 break;
79 /* get the PCI data structure and check its signature */ 81 /* get the PCI data structure and check its signature */
@@ -159,7 +161,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
159 * size is much larger than the actual size of the ROM. 161 * size is much larger than the actual size of the ROM.
160 * True size is important if the ROM is going to be copied. 162 * True size is important if the ROM is going to be copied.
161 */ 163 */
162 *size = pci_get_rom_size(rom, *size); 164 *size = pci_get_rom_size(pdev, rom, *size);
163 return rom; 165 return rom;
164} 166}
165 167
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1a266d4ab5f1..94363115a42a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -42,6 +42,7 @@ config ASUS_LAPTOP
42 depends on LEDS_CLASS 42 depends on LEDS_CLASS
43 depends on NEW_LEDS 43 depends on NEW_LEDS
44 depends on BACKLIGHT_CLASS_DEVICE 44 depends on BACKLIGHT_CLASS_DEVICE
45 depends on INPUT
45 ---help--- 46 ---help---
46 This is the new Linux driver for Asus laptops. It may also support some 47 This is the new Linux driver for Asus laptops. It may also support some
47 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate 48 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 8fb8b3591048..56af6cf385b0 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -46,6 +46,7 @@
46#include <acpi/acpi_drivers.h> 46#include <acpi/acpi_drivers.h>
47#include <acpi/acpi_bus.h> 47#include <acpi/acpi_bus.h>
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49#include <linux/input.h>
49 50
50#define ASUS_LAPTOP_VERSION "0.42" 51#define ASUS_LAPTOP_VERSION "0.42"
51 52
@@ -181,6 +182,8 @@ struct asus_hotk {
181 u8 light_level; //light sensor level 182 u8 light_level; //light sensor level
182 u8 light_switch; //light sensor switch value 183 u8 light_switch; //light sensor switch value
183 u16 event_count[128]; //count for each event TODO make this better 184 u16 event_count[128]; //count for each event TODO make this better
185 struct input_dev *inputdev;
186 u16 *keycode_map;
184}; 187};
185 188
186/* 189/*
@@ -250,6 +253,37 @@ ASUS_LED(rled, "record");
250ASUS_LED(pled, "phone"); 253ASUS_LED(pled, "phone");
251ASUS_LED(gled, "gaming"); 254ASUS_LED(gled, "gaming");
252 255
256struct key_entry {
257 char type;
258 u8 code;
259 u16 keycode;
260};
261
262enum { KE_KEY, KE_END };
263
264static struct key_entry asus_keymap[] = {
265 {KE_KEY, 0x30, KEY_VOLUMEUP},
266 {KE_KEY, 0x31, KEY_VOLUMEDOWN},
267 {KE_KEY, 0x32, KEY_MUTE},
268 {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE},
269 {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE},
270 {KE_KEY, 0x40, KEY_PREVIOUSSONG},
271 {KE_KEY, 0x41, KEY_NEXTSONG},
272 {KE_KEY, 0x43, KEY_STOP},
273 {KE_KEY, 0x45, KEY_PLAYPAUSE},
274 {KE_KEY, 0x50, KEY_EMAIL},
275 {KE_KEY, 0x51, KEY_WWW},
276 {KE_KEY, 0x5C, BTN_EXTRA}, /* Performance */
277 {KE_KEY, 0x5D, KEY_WLAN},
278 {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
279 {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
280 {KE_KEY, 0x82, KEY_CAMERA},
281 {KE_KEY, 0x8A, KEY_TV},
282 {KE_KEY, 0x95, KEY_MEDIA},
283 {KE_KEY, 0x99, KEY_PHONE},
284 {KE_END, 0},
285};
286
253/* 287/*
254 * This function evaluates an ACPI method, given an int as parameter, the 288 * This function evaluates an ACPI method, given an int as parameter, the
255 * method is searched within the scope of the handle, can be NULL. The output 289 * method is searched within the scope of the handle, can be NULL. The output
@@ -720,8 +754,68 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
720 return store_status(buf, count, NULL, GPS_ON); 754 return store_status(buf, count, NULL, GPS_ON);
721} 755}
722 756
757/*
758 * Hotkey functions
759 */
760static struct key_entry *asus_get_entry_by_scancode(int code)
761{
762 struct key_entry *key;
763
764 for (key = asus_keymap; key->type != KE_END; key++)
765 if (code == key->code)
766 return key;
767
768 return NULL;
769}
770
771static struct key_entry *asus_get_entry_by_keycode(int code)
772{
773 struct key_entry *key;
774
775 for (key = asus_keymap; key->type != KE_END; key++)
776 if (code == key->keycode && key->type == KE_KEY)
777 return key;
778
779 return NULL;
780}
781
782static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode)
783{
784 struct key_entry *key = asus_get_entry_by_scancode(scancode);
785
786 if (key && key->type == KE_KEY) {
787 *keycode = key->keycode;
788 return 0;
789 }
790
791 return -EINVAL;
792}
793
794static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode)
795{
796 struct key_entry *key;
797 int old_keycode;
798
799 if (keycode < 0 || keycode > KEY_MAX)
800 return -EINVAL;
801
802 key = asus_get_entry_by_scancode(scancode);
803 if (key && key->type == KE_KEY) {
804 old_keycode = key->keycode;
805 key->keycode = keycode;
806 set_bit(keycode, dev->keybit);
807 if (!asus_get_entry_by_keycode(old_keycode))
808 clear_bit(old_keycode, dev->keybit);
809 return 0;
810 }
811
812 return -EINVAL;
813}
814
723static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) 815static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
724{ 816{
817 static struct key_entry *key;
818
725 /* TODO Find a better way to handle events count. */ 819 /* TODO Find a better way to handle events count. */
726 if (!hotk) 820 if (!hotk)
727 return; 821 return;
@@ -738,10 +832,24 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
738 lcd_blank(FB_BLANK_POWERDOWN); 832 lcd_blank(FB_BLANK_POWERDOWN);
739 } 833 }
740 834
741 acpi_bus_generate_proc_event(hotk->device, event, 835 acpi_bus_generate_netlink_event(hotk->device->pnp.device_class,
742 hotk->event_count[event % 128]++); 836 dev_name(&hotk->device->dev), event,
743 837 hotk->event_count[event % 128]++);
744 return; 838
839 if (hotk->inputdev) {
840 key = asus_get_entry_by_scancode(event);
841 if (!key)
842 return ;
843
844 switch (key->type) {
845 case KE_KEY:
846 input_report_key(hotk->inputdev, key->keycode, 1);
847 input_sync(hotk->inputdev);
848 input_report_key(hotk->inputdev, key->keycode, 0);
849 input_sync(hotk->inputdev);
850 break;
851 }
852 }
745} 853}
746 854
747#define ASUS_CREATE_DEVICE_ATTR(_name) \ 855#define ASUS_CREATE_DEVICE_ATTR(_name) \
@@ -959,6 +1067,38 @@ static int asus_hotk_get_info(void)
959 return AE_OK; 1067 return AE_OK;
960} 1068}
961 1069
1070static int asus_input_init(void)
1071{
1072 const struct key_entry *key;
1073 int result;
1074
1075 hotk->inputdev = input_allocate_device();
1076 if (!hotk->inputdev) {
1077 printk(ASUS_INFO "Unable to allocate input device\n");
1078 return 0;
1079 }
1080 hotk->inputdev->name = "Asus Laptop extra buttons";
1081 hotk->inputdev->phys = ASUS_HOTK_FILE "/input0";
1082 hotk->inputdev->id.bustype = BUS_HOST;
1083 hotk->inputdev->getkeycode = asus_getkeycode;
1084 hotk->inputdev->setkeycode = asus_setkeycode;
1085
1086 for (key = asus_keymap; key->type != KE_END; key++) {
1087 switch (key->type) {
1088 case KE_KEY:
1089 set_bit(EV_KEY, hotk->inputdev->evbit);
1090 set_bit(key->keycode, hotk->inputdev->keybit);
1091 break;
1092 }
1093 }
1094 result = input_register_device(hotk->inputdev);
1095 if (result) {
1096 printk(ASUS_INFO "Unable to register input device\n");
1097 input_free_device(hotk->inputdev);
1098 }
1099 return result;
1100}
1101
962static int asus_hotk_check(void) 1102static int asus_hotk_check(void)
963{ 1103{
964 int result = 0; 1104 int result = 0;
@@ -1044,7 +1184,7 @@ static int asus_hotk_add(struct acpi_device *device)
1044 /* GPS is on by default */ 1184 /* GPS is on by default */
1045 write_status(NULL, 1, GPS_ON); 1185 write_status(NULL, 1, GPS_ON);
1046 1186
1047 end: 1187end:
1048 if (result) { 1188 if (result) {
1049 kfree(hotk->name); 1189 kfree(hotk->name);
1050 kfree(hotk); 1190 kfree(hotk);
@@ -1091,10 +1231,17 @@ static void asus_led_exit(void)
1091 ASUS_LED_UNREGISTER(gled); 1231 ASUS_LED_UNREGISTER(gled);
1092} 1232}
1093 1233
1234static void asus_input_exit(void)
1235{
1236 if (hotk->inputdev)
1237 input_unregister_device(hotk->inputdev);
1238}
1239
1094static void __exit asus_laptop_exit(void) 1240static void __exit asus_laptop_exit(void)
1095{ 1241{
1096 asus_backlight_exit(); 1242 asus_backlight_exit();
1097 asus_led_exit(); 1243 asus_led_exit();
1244 asus_input_exit();
1098 1245
1099 acpi_bus_unregister_driver(&asus_hotk_driver); 1246 acpi_bus_unregister_driver(&asus_hotk_driver);
1100 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group); 1247 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
@@ -1216,6 +1363,10 @@ static int __init asus_laptop_init(void)
1216 printk(ASUS_INFO "Brightness ignored, must be controlled by " 1363 printk(ASUS_INFO "Brightness ignored, must be controlled by "
1217 "ACPI video driver\n"); 1364 "ACPI video driver\n");
1218 1365
1366 result = asus_input_init();
1367 if (result)
1368 goto fail_input;
1369
1219 result = asus_led_init(dev); 1370 result = asus_led_init(dev);
1220 if (result) 1371 if (result)
1221 goto fail_led; 1372 goto fail_led;
@@ -1242,22 +1393,25 @@ static int __init asus_laptop_init(void)
1242 1393
1243 return 0; 1394 return 0;
1244 1395
1245 fail_sysfs: 1396fail_sysfs:
1246 platform_device_del(asuspf_device); 1397 platform_device_del(asuspf_device);
1247 1398
1248 fail_platform_device2: 1399fail_platform_device2:
1249 platform_device_put(asuspf_device); 1400 platform_device_put(asuspf_device);
1250 1401
1251 fail_platform_device1: 1402fail_platform_device1:
1252 platform_driver_unregister(&asuspf_driver); 1403 platform_driver_unregister(&asuspf_driver);
1253 1404
1254 fail_platform_driver: 1405fail_platform_driver:
1255 asus_led_exit(); 1406 asus_led_exit();
1256 1407
1257 fail_led: 1408fail_led:
1409 asus_input_exit();
1410
1411fail_input:
1258 asus_backlight_exit(); 1412 asus_backlight_exit();
1259 1413
1260 fail_backlight: 1414fail_backlight:
1261 1415
1262 return result; 1416 return result;
1263} 1417}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 1e74988c7b2d..d63f26e666a4 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -143,6 +143,7 @@ struct asus_hotk {
143 S1300N, S5200N*/ 143 S1300N, S5200N*/
144 A4S, /* Z81sp */ 144 A4S, /* Z81sp */
145 F3Sa, /* (Centrino) */ 145 F3Sa, /* (Centrino) */
146 R1F,
146 END_MODEL 147 END_MODEL
147 } model; /* Models currently supported */ 148 } model; /* Models currently supported */
148 u16 event_count[128]; /* Count for each event TODO make this better */ 149 u16 event_count[128]; /* Count for each event TODO make this better */
@@ -420,7 +421,18 @@ static struct model_data model_conf[END_MODEL] = {
420 .display_get = "\\ADVG", 421 .display_get = "\\ADVG",
421 .display_set = "SDSP", 422 .display_set = "SDSP",
422 }, 423 },
423 424 {
425 .name = "R1F",
426 .mt_bt_switch = "BLED",
427 .mt_mled = "MLED",
428 .mt_wled = "WLED",
429 .mt_lcd_switch = "\\Q10",
430 .lcd_status = "\\GP06",
431 .brightness_set = "SPLV",
432 .brightness_get = "GPLV",
433 .display_set = "SDSP",
434 .display_get = "\\INFB"
435 }
424}; 436};
425 437
426/* procdir we use */ 438/* procdir we use */
@@ -1165,6 +1177,8 @@ static int asus_model_match(char *model)
1165 return W3V; 1177 return W3V;
1166 else if (strncmp(model, "W5A", 3) == 0) 1178 else if (strncmp(model, "W5A", 3) == 0)
1167 return W5A; 1179 return W5A;
1180 else if (strncmp(model, "R1F", 3) == 0)
1181 return R1F;
1168 else if (strncmp(model, "A4S", 3) == 0) 1182 else if (strncmp(model, "A4S", 3) == 0)
1169 return A4S; 1183 return A4S;
1170 else if (strncmp(model, "F3Sa", 4) == 0) 1184 else if (strncmp(model, "F3Sa", 4) == 0)
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 9d93cb971e59..786ed8661cb0 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -30,6 +30,7 @@
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/input.h> 31#include <linux/input.h>
32#include <linux/rfkill.h> 32#include <linux/rfkill.h>
33#include <linux/pci.h>
33 34
34#define EEEPC_LAPTOP_VERSION "0.1" 35#define EEEPC_LAPTOP_VERSION "0.1"
35 36
@@ -161,6 +162,10 @@ static struct key_entry eeepc_keymap[] = {
161 {KE_KEY, 0x13, KEY_MUTE }, 162 {KE_KEY, 0x13, KEY_MUTE },
162 {KE_KEY, 0x14, KEY_VOLUMEDOWN }, 163 {KE_KEY, 0x14, KEY_VOLUMEDOWN },
163 {KE_KEY, 0x15, KEY_VOLUMEUP }, 164 {KE_KEY, 0x15, KEY_VOLUMEUP },
165 {KE_KEY, 0x1a, KEY_COFFEE },
166 {KE_KEY, 0x1b, KEY_ZOOM },
167 {KE_KEY, 0x1c, KEY_PROG2 },
168 {KE_KEY, 0x1d, KEY_PROG3 },
164 {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE }, 169 {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
165 {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE }, 170 {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
166 {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE }, 171 {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
@@ -510,7 +515,43 @@ static int eeepc_hotk_check(void)
510static void notify_brn(void) 515static void notify_brn(void)
511{ 516{
512 struct backlight_device *bd = eeepc_backlight_device; 517 struct backlight_device *bd = eeepc_backlight_device;
513 bd->props.brightness = read_brightness(bd); 518 if (bd)
519 bd->props.brightness = read_brightness(bd);
520}
521
522static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
523{
524 struct pci_dev *dev;
525 struct pci_bus *bus = pci_find_bus(0, 1);
526
527 if (event != ACPI_NOTIFY_BUS_CHECK)
528 return;
529
530 if (!bus) {
531 printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
532 return;
533 }
534
535 if (get_acpi(CM_ASL_WLAN) == 1) {
536 dev = pci_get_slot(bus, 0);
537 if (dev) {
538 /* Device already present */
539 pci_dev_put(dev);
540 return;
541 }
542 dev = pci_scan_single_device(bus, 0);
543 if (dev) {
544 pci_bus_assign_resources(bus);
545 if (pci_bus_add_device(dev))
546 printk(EEEPC_ERR "Unable to hotplug wifi\n");
547 }
548 } else {
549 dev = pci_get_slot(bus, 0);
550 if (dev) {
551 pci_remove_bus_device(dev);
552 pci_dev_put(dev);
553 }
554 }
514} 555}
515 556
516static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data) 557static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
@@ -520,8 +561,9 @@ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
520 return; 561 return;
521 if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) 562 if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
522 notify_brn(); 563 notify_brn();
523 acpi_bus_generate_proc_event(ehotk->device, event, 564 acpi_bus_generate_netlink_event(ehotk->device->pnp.device_class,
524 ehotk->event_count[event % 128]++); 565 dev_name(&ehotk->device->dev), event,
566 ehotk->event_count[event % 128]++);
525 if (ehotk->inputdev) { 567 if (ehotk->inputdev) {
526 key = eepc_get_entry_by_scancode(event); 568 key = eepc_get_entry_by_scancode(event);
527 if (key) { 569 if (key) {
@@ -539,6 +581,45 @@ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
539 } 581 }
540} 582}
541 583
584static int eeepc_register_rfkill_notifier(char *node)
585{
586 acpi_status status = AE_OK;
587 acpi_handle handle;
588
589 status = acpi_get_handle(NULL, node, &handle);
590
591 if (ACPI_SUCCESS(status)) {
592 status = acpi_install_notify_handler(handle,
593 ACPI_SYSTEM_NOTIFY,
594 eeepc_rfkill_notify,
595 NULL);
596 if (ACPI_FAILURE(status))
597 printk(EEEPC_WARNING
598 "Failed to register notify on %s\n", node);
599 } else
600 return -ENODEV;
601
602 return 0;
603}
604
605static void eeepc_unregister_rfkill_notifier(char *node)
606{
607 acpi_status status = AE_OK;
608 acpi_handle handle;
609
610 status = acpi_get_handle(NULL, node, &handle);
611
612 if (ACPI_SUCCESS(status)) {
613 status = acpi_remove_notify_handler(handle,
614 ACPI_SYSTEM_NOTIFY,
615 eeepc_rfkill_notify);
616 if (ACPI_FAILURE(status))
617 printk(EEEPC_ERR
618 "Error removing rfkill notify handler %s\n",
619 node);
620 }
621}
622
542static int eeepc_hotk_add(struct acpi_device *device) 623static int eeepc_hotk_add(struct acpi_device *device)
543{ 624{
544 acpi_status status = AE_OK; 625 acpi_status status = AE_OK;
@@ -558,7 +639,7 @@ static int eeepc_hotk_add(struct acpi_device *device)
558 ehotk->device = device; 639 ehotk->device = device;
559 result = eeepc_hotk_check(); 640 result = eeepc_hotk_check();
560 if (result) 641 if (result)
561 goto end; 642 goto ehotk_fail;
562 status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY, 643 status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
563 eeepc_hotk_notify, ehotk); 644 eeepc_hotk_notify, ehotk);
564 if (ACPI_FAILURE(status)) 645 if (ACPI_FAILURE(status))
@@ -569,18 +650,25 @@ static int eeepc_hotk_add(struct acpi_device *device)
569 RFKILL_TYPE_WLAN); 650 RFKILL_TYPE_WLAN);
570 651
571 if (!ehotk->eeepc_wlan_rfkill) 652 if (!ehotk->eeepc_wlan_rfkill)
572 goto end; 653 goto wlan_fail;
573 654
574 ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan"; 655 ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan";
575 ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set; 656 ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set;
576 ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state; 657 ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state;
577 if (get_acpi(CM_ASL_WLAN) == 1) 658 if (get_acpi(CM_ASL_WLAN) == 1) {
578 ehotk->eeepc_wlan_rfkill->state = 659 ehotk->eeepc_wlan_rfkill->state =
579 RFKILL_STATE_UNBLOCKED; 660 RFKILL_STATE_UNBLOCKED;
580 else 661 rfkill_set_default(RFKILL_TYPE_WLAN,
662 RFKILL_STATE_UNBLOCKED);
663 } else {
581 ehotk->eeepc_wlan_rfkill->state = 664 ehotk->eeepc_wlan_rfkill->state =
582 RFKILL_STATE_SOFT_BLOCKED; 665 RFKILL_STATE_SOFT_BLOCKED;
583 rfkill_register(ehotk->eeepc_wlan_rfkill); 666 rfkill_set_default(RFKILL_TYPE_WLAN,
667 RFKILL_STATE_SOFT_BLOCKED);
668 }
669 result = rfkill_register(ehotk->eeepc_wlan_rfkill);
670 if (result)
671 goto wlan_fail;
584 } 672 }
585 673
586 if (get_acpi(CM_ASL_BLUETOOTH) != -1) { 674 if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
@@ -588,27 +676,47 @@ static int eeepc_hotk_add(struct acpi_device *device)
588 rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH); 676 rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH);
589 677
590 if (!ehotk->eeepc_bluetooth_rfkill) 678 if (!ehotk->eeepc_bluetooth_rfkill)
591 goto end; 679 goto bluetooth_fail;
592 680
593 ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth"; 681 ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth";
594 ehotk->eeepc_bluetooth_rfkill->toggle_radio = 682 ehotk->eeepc_bluetooth_rfkill->toggle_radio =
595 eeepc_bluetooth_rfkill_set; 683 eeepc_bluetooth_rfkill_set;
596 ehotk->eeepc_bluetooth_rfkill->get_state = 684 ehotk->eeepc_bluetooth_rfkill->get_state =
597 eeepc_bluetooth_rfkill_state; 685 eeepc_bluetooth_rfkill_state;
598 if (get_acpi(CM_ASL_BLUETOOTH) == 1) 686 if (get_acpi(CM_ASL_BLUETOOTH) == 1) {
599 ehotk->eeepc_bluetooth_rfkill->state = 687 ehotk->eeepc_bluetooth_rfkill->state =
600 RFKILL_STATE_UNBLOCKED; 688 RFKILL_STATE_UNBLOCKED;
601 else 689 rfkill_set_default(RFKILL_TYPE_BLUETOOTH,
690 RFKILL_STATE_UNBLOCKED);
691 } else {
602 ehotk->eeepc_bluetooth_rfkill->state = 692 ehotk->eeepc_bluetooth_rfkill->state =
603 RFKILL_STATE_SOFT_BLOCKED; 693 RFKILL_STATE_SOFT_BLOCKED;
604 rfkill_register(ehotk->eeepc_bluetooth_rfkill); 694 rfkill_set_default(RFKILL_TYPE_BLUETOOTH,
605 } 695 RFKILL_STATE_SOFT_BLOCKED);
696 }
606 697
607 end: 698 result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
608 if (result) { 699 if (result)
609 kfree(ehotk); 700 goto bluetooth_fail;
610 ehotk = NULL;
611 } 701 }
702
703 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
704 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
705
706 return 0;
707
708 bluetooth_fail:
709 if (ehotk->eeepc_bluetooth_rfkill)
710 rfkill_free(ehotk->eeepc_bluetooth_rfkill);
711 rfkill_unregister(ehotk->eeepc_wlan_rfkill);
712 ehotk->eeepc_wlan_rfkill = NULL;
713 wlan_fail:
714 if (ehotk->eeepc_wlan_rfkill)
715 rfkill_free(ehotk->eeepc_wlan_rfkill);
716 ehotk_fail:
717 kfree(ehotk);
718 ehotk = NULL;
719
612 return result; 720 return result;
613} 721}
614 722
@@ -622,6 +730,10 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
622 eeepc_hotk_notify); 730 eeepc_hotk_notify);
623 if (ACPI_FAILURE(status)) 731 if (ACPI_FAILURE(status))
624 printk(EEEPC_ERR "Error removing notify handler\n"); 732 printk(EEEPC_ERR "Error removing notify handler\n");
733
734 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
735 eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
736
625 kfree(ehotk); 737 kfree(ehotk);
626 return 0; 738 return 0;
627} 739}
@@ -737,13 +849,21 @@ static void eeepc_backlight_exit(void)
737{ 849{
738 if (eeepc_backlight_device) 850 if (eeepc_backlight_device)
739 backlight_device_unregister(eeepc_backlight_device); 851 backlight_device_unregister(eeepc_backlight_device);
740 if (ehotk->inputdev) 852 eeepc_backlight_device = NULL;
741 input_unregister_device(ehotk->inputdev); 853}
854
855static void eeepc_rfkill_exit(void)
856{
742 if (ehotk->eeepc_wlan_rfkill) 857 if (ehotk->eeepc_wlan_rfkill)
743 rfkill_unregister(ehotk->eeepc_wlan_rfkill); 858 rfkill_unregister(ehotk->eeepc_wlan_rfkill);
744 if (ehotk->eeepc_bluetooth_rfkill) 859 if (ehotk->eeepc_bluetooth_rfkill)
745 rfkill_unregister(ehotk->eeepc_bluetooth_rfkill); 860 rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
746 eeepc_backlight_device = NULL; 861}
862
863static void eeepc_input_exit(void)
864{
865 if (ehotk->inputdev)
866 input_unregister_device(ehotk->inputdev);
747} 867}
748 868
749static void eeepc_hwmon_exit(void) 869static void eeepc_hwmon_exit(void)
@@ -762,6 +882,8 @@ static void eeepc_hwmon_exit(void)
762static void __exit eeepc_laptop_exit(void) 882static void __exit eeepc_laptop_exit(void)
763{ 883{
764 eeepc_backlight_exit(); 884 eeepc_backlight_exit();
885 eeepc_rfkill_exit();
886 eeepc_input_exit();
765 eeepc_hwmon_exit(); 887 eeepc_hwmon_exit();
766 acpi_bus_unregister_driver(&eeepc_hotk_driver); 888 acpi_bus_unregister_driver(&eeepc_hotk_driver);
767 sysfs_remove_group(&platform_device->dev.kobj, 889 sysfs_remove_group(&platform_device->dev.kobj,
@@ -865,6 +987,8 @@ fail_platform_driver:
865fail_hwmon: 987fail_hwmon:
866 eeepc_backlight_exit(); 988 eeepc_backlight_exit();
867fail_backlight: 989fail_backlight:
990 eeepc_input_exit();
991 eeepc_rfkill_exit();
868 return result; 992 return result;
869} 993}
870 994
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index de91ddab0a86..f41135f2fb29 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -463,9 +463,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
463 463
464 return 0; 464 return 0;
465register_wwan_err: 465register_wwan_err:
466 rfkill_unregister(bluetooth_rfkill); 466 if (bluetooth_rfkill)
467 rfkill_unregister(bluetooth_rfkill);
467register_bluetooth_error: 468register_bluetooth_error:
468 rfkill_unregister(wifi_rfkill); 469 if (wifi_rfkill)
470 rfkill_unregister(wifi_rfkill);
469add_sysfs_error: 471add_sysfs_error:
470 cleanup_sysfs(device); 472 cleanup_sysfs(device);
471 return err; 473 return err;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index f30db367c82e..c47a44dcb702 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -507,7 +507,7 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
507 507
508 hkey_num = result & 0xf; 508 hkey_num = result & 0xf;
509 509
510 if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) { 510 if (hkey_num < 0 || hkey_num >= ARRAY_SIZE(pcc->keymap)) {
511 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 511 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
512 "hotkey number out of range: %d\n", 512 "hotkey number out of range: %d\n",
513 hkey_num)); 513 hkey_num));
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cced4d108319..81450fbd8b12 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -241,6 +241,12 @@ config RTC_DRV_M41T80_WDT
241 If you say Y here you will get support for the 241 If you say Y here you will get support for the
242 watchdog timer in the ST M41T60 and M41T80 RTC chips series. 242 watchdog timer in the ST M41T60 and M41T80 RTC chips series.
243 243
244config RTC_DRV_DM355EVM
245 tristate "TI DaVinci DM355 EVM RTC"
246 depends on MFD_DM355EVM_MSP
247 help
248 Supports the RTC firmware in the MSP430 on the DM355 EVM.
249
244config RTC_DRV_TWL92330 250config RTC_DRV_TWL92330
245 boolean "TI TWL92330/Menelaus" 251 boolean "TI TWL92330/Menelaus"
246 depends on MENELAUS 252 depends on MENELAUS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 6e28021abb9d..0e697aa51caa 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
23obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o 23obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
24obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o 24obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
25obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o 25obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
26obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
26obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o 27obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
27obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o 28obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
28obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o 29obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
new file mode 100644
index 000000000000..58d4e18530da
--- /dev/null
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -0,0 +1,175 @@
1/*
2 * rtc-dm355evm.c - access battery-backed counter in MSP430 firmware
3 *
4 * Copyright (c) 2008 by David Brownell
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/rtc.h>
14#include <linux/platform_device.h>
15
16#include <linux/i2c/dm355evm_msp.h>
17
18
19/*
20 * The MSP430 firmware on the DM355 EVM uses a watch crystal to feed
21 * a 1 Hz counter. When a backup battery is supplied, that makes a
22 * reasonable RTC for applications where alarms and non-NTP drift
23 * compensation aren't important.
24 *
25 * The only real glitch is the inability to read or write all four
26 * counter bytes atomically: the count may increment in the middle
27 * of an operation, causing trouble when the LSB rolls over.
28 *
29 * This driver was tested with firmware revision A4.
30 */
31union evm_time {
32 u8 bytes[4];
33 u32 value;
34};
35
36static int dm355evm_rtc_read_time(struct device *dev, struct rtc_time *tm)
37{
38 union evm_time time;
39 int status;
40 int tries = 0;
41
42 do {
43 /*
44 * Read LSB(0) to MSB(3) bytes. Defend against the counter
45 * rolling over by re-reading until the value is stable,
46 * and assuming the four reads take at most a few seconds.
47 */
48 status = dm355evm_msp_read(DM355EVM_MSP_RTC_0);
49 if (status < 0)
50 return status;
51 if (tries && time.bytes[0] == status)
52 break;
53 time.bytes[0] = status;
54
55 status = dm355evm_msp_read(DM355EVM_MSP_RTC_1);
56 if (status < 0)
57 return status;
58 if (tries && time.bytes[1] == status)
59 break;
60 time.bytes[1] = status;
61
62 status = dm355evm_msp_read(DM355EVM_MSP_RTC_2);
63 if (status < 0)
64 return status;
65 if (tries && time.bytes[2] == status)
66 break;
67 time.bytes[2] = status;
68
69 status = dm355evm_msp_read(DM355EVM_MSP_RTC_3);
70 if (status < 0)
71 return status;
72 if (tries && time.bytes[3] == status)
73 break;
74 time.bytes[3] = status;
75
76 } while (++tries < 5);
77
78 dev_dbg(dev, "read timestamp %08x\n", time.value);
79
80 rtc_time_to_tm(le32_to_cpu(time.value), tm);
81 return 0;
82}
83
84static int dm355evm_rtc_set_time(struct device *dev, struct rtc_time *tm)
85{
86 union evm_time time;
87 unsigned long value;
88 int status;
89
90 rtc_tm_to_time(tm, &value);
91 time.value = cpu_to_le32(value);
92
93 dev_dbg(dev, "write timestamp %08x\n", time.value);
94
95 /*
96 * REVISIT handle non-atomic writes ... maybe just retry until
97 * byte[1] sticks (no rollover)?
98 */
99 status = dm355evm_msp_write(time.bytes[0], DM355EVM_MSP_RTC_0);
100 if (status < 0)
101 return status;
102
103 status = dm355evm_msp_write(time.bytes[1], DM355EVM_MSP_RTC_1);
104 if (status < 0)
105 return status;
106
107 status = dm355evm_msp_write(time.bytes[2], DM355EVM_MSP_RTC_2);
108 if (status < 0)
109 return status;
110
111 status = dm355evm_msp_write(time.bytes[3], DM355EVM_MSP_RTC_3);
112 if (status < 0)
113 return status;
114
115 return 0;
116}
117
118static struct rtc_class_ops dm355evm_rtc_ops = {
119 .read_time = dm355evm_rtc_read_time,
120 .set_time = dm355evm_rtc_set_time,
121};
122
123/*----------------------------------------------------------------------*/
124
125static int __devinit dm355evm_rtc_probe(struct platform_device *pdev)
126{
127 struct rtc_device *rtc;
128
129 rtc = rtc_device_register(pdev->name,
130 &pdev->dev, &dm355evm_rtc_ops, THIS_MODULE);
131 if (IS_ERR(rtc)) {
132 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
133 PTR_ERR(rtc));
134 return PTR_ERR(rtc);
135 }
136 platform_set_drvdata(pdev, rtc);
137
138 return 0;
139}
140
141static int __devexit dm355evm_rtc_remove(struct platform_device *pdev)
142{
143 struct rtc_device *rtc = platform_get_drvdata(pdev);
144
145 rtc_device_unregister(rtc);
146 platform_set_drvdata(pdev, NULL);
147 return 0;
148}
149
150/*
151 * I2C is used to talk to the MSP430, but this platform device is
152 * exposed by an MFD driver that manages I2C communications.
153 */
154static struct platform_driver rtc_dm355evm_driver = {
155 .probe = dm355evm_rtc_probe,
156 .remove = __devexit_p(dm355evm_rtc_remove),
157 .driver = {
158 .owner = THIS_MODULE,
159 .name = "rtc-dm355evm",
160 },
161};
162
163static int __init dm355evm_rtc_init(void)
164{
165 return platform_driver_register(&rtc_dm355evm_driver);
166}
167module_init(dm355evm_rtc_init);
168
169static void __exit dm355evm_rtc_exit(void)
170{
171 platform_driver_unregister(&rtc_dm355evm_driver);
172}
173module_exit(dm355evm_rtc_exit);
174
175MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index e54b5c619bdf..e01b955db077 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -122,7 +122,6 @@ static const struct rtc_class_ops ds1390_rtc_ops = {
122 122
123static int __devinit ds1390_probe(struct spi_device *spi) 123static int __devinit ds1390_probe(struct spi_device *spi)
124{ 124{
125 struct rtc_device *rtc;
126 unsigned char tmp; 125 unsigned char tmp;
127 struct ds1390 *chip; 126 struct ds1390 *chip;
128 int res; 127 int res;
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 6b996db0dd6a..604bd1e0d546 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -27,6 +27,7 @@ menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
27 bool "Android RAM Console Enable error correction" 27 bool "Android RAM Console Enable error correction"
28 default n 28 default n
29 depends on ANDROID_RAM_CONSOLE 29 depends on ANDROID_RAM_CONSOLE
30 depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
30 select REED_SOLOMON 31 select REED_SOLOMON
31 select REED_SOLOMON_ENC8 32 select REED_SOLOMON_ENC8
32 select REED_SOLOMON_DEC8 33 select REED_SOLOMON_DEC8
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
index bf006857a87a..643ac5ce381d 100644
--- a/drivers/staging/android/ram_console.c
+++ b/drivers/staging/android/ram_console.c
@@ -224,9 +224,23 @@ static int __init ram_console_init(struct ram_console_buffer *buffer,
224 ram_console_buffer_size = 224 ram_console_buffer_size =
225 buffer_size - sizeof(struct ram_console_buffer); 225 buffer_size - sizeof(struct ram_console_buffer);
226 226
227 if (ram_console_buffer_size > buffer_size) {
228 pr_err("ram_console: buffer %p, invalid size %d, datasize %d\n",
229 buffer, buffer_size, ram_console_buffer_size);
230 return 0;
231 }
232
227#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION 233#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
228 ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size, 234 ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
229 ECC_BLOCK_SIZE) + 1) * ECC_SIZE; 235 ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
236
237 if (ram_console_buffer_size > buffer_size) {
238 pr_err("ram_console: buffer %p, invalid size %d, "
239 "non-ecc datasize %d\n",
240 buffer, buffer_size, ram_console_buffer_size);
241 return 0;
242 }
243
230 ram_console_par_buffer = buffer->data + ram_console_buffer_size; 244 ram_console_par_buffer = buffer->data + ram_console_buffer_size;
231 245
232 246
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index 903270cbbe02..33daff0481d2 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -50,7 +50,7 @@ static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *att
50 if (hrtimer_active(&gpio_data->timer)) { 50 if (hrtimer_active(&gpio_data->timer)) {
51 ktime_t r = hrtimer_get_remaining(&gpio_data->timer); 51 ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
52 struct timeval t = ktime_to_timeval(r); 52 struct timeval t = ktime_to_timeval(r);
53 remaining = t.tv_sec * 1000 + t.tv_usec; 53 remaining = t.tv_sec * 1000 + t.tv_usec / 1000;
54 } else 54 } else
55 remaining = 0; 55 remaining = 0;
56 56
diff --git a/drivers/staging/at76_usb/Kconfig b/drivers/staging/at76_usb/Kconfig
index 4c0e55e15448..8606f9621624 100644
--- a/drivers/staging/at76_usb/Kconfig
+++ b/drivers/staging/at76_usb/Kconfig
@@ -1,6 +1,6 @@
1config USB_ATMEL 1config USB_ATMEL
2 tristate "Atmel at76c503/at76c505/at76c505a USB cards" 2 tristate "Atmel at76c503/at76c505/at76c505a USB cards"
3 depends on MAC80211 && WLAN_80211 && USB 3 depends on WLAN_80211 && USB
4 default N 4 default N
5 select FW_LOADER 5 select FW_LOADER
6 ---help--- 6 ---help---
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index 185533e54769..c8e4d31c7df2 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -6,7 +6,6 @@
6 * Copyright (c) 2004 Nick Jones 6 * Copyright (c) 2004 Nick Jones
7 * Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com> 7 * Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com>
8 * Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org> 8 * Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org>
9 * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi>
10 * 9 *
11 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as 11 * modify it under the terms of the GNU General Public License as
@@ -17,13 +16,6 @@
17 * Atmel AT76C503A/505/505A. 16 * Atmel AT76C503A/505/505A.
18 * 17 *
19 * Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed 18 * Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed
20 *
21 * TODO for the mac80211 port:
22 * o adhoc support
23 * o RTS/CTS support
24 * o Power Save Mode support
25 * o support for short/long preambles
26 * o export variables through debugfs/sysfs
27 */ 19 */
28 20
29#include <linux/init.h> 21#include <linux/init.h>
@@ -44,7 +36,7 @@
44#include <net/ieee80211_radiotap.h> 36#include <net/ieee80211_radiotap.h>
45#include <linux/firmware.h> 37#include <linux/firmware.h>
46#include <linux/leds.h> 38#include <linux/leds.h>
47#include <net/mac80211.h> 39#include <net/ieee80211.h>
48 40
49#include "at76_usb.h" 41#include "at76_usb.h"
50 42
@@ -84,43 +76,31 @@
84#define DBG_WE_EVENTS 0x08000000 /* dump wireless events */ 76#define DBG_WE_EVENTS 0x08000000 /* dump wireless events */
85#define DBG_FW 0x10000000 /* firmware download */ 77#define DBG_FW 0x10000000 /* firmware download */
86#define DBG_DFU 0x20000000 /* device firmware upgrade */ 78#define DBG_DFU 0x20000000 /* device firmware upgrade */
87#define DBG_CMD 0x40000000
88#define DBG_MAC80211 0x80000000
89 79
90#define DBG_DEFAULTS 0 80#define DBG_DEFAULTS 0
91 81
92/* Use our own dbg macro */ 82/* Use our own dbg macro */
93#define at76_dbg(bits, format, arg...) \ 83#define at76_dbg(bits, format, arg...) \
94do { \ 84 do { \
95 if (at76_debug & (bits)) \ 85 if (at76_debug & (bits)) \
96 printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , ## arg); \
97} while (0)
98
99#define at76_dbg_dump(bits, buf, len, format, arg...) \
100do { \
101 if (at76_debug & (bits)) { \
102 printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , ## arg); \ 86 printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , ## arg); \
103 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); \ 87 } while (0)
104 } \
105} while (0)
106 88
107static int at76_debug = DBG_DEFAULTS; 89static int at76_debug = DBG_DEFAULTS;
108 90
109#define FIRMWARE_IS_WPA(ver) ((ver.major == 1) && (ver.minor == 103))
110
111/* Protect against concurrent firmware loading and parsing */ 91/* Protect against concurrent firmware loading and parsing */
112static struct mutex fw_mutex; 92static struct mutex fw_mutex;
113 93
114static struct fwentry firmwares[] = { 94static struct fwentry firmwares[] = {
115 [0] = { "" }, 95 [0] = {""},
116 [BOARD_503_ISL3861] = { "atmel_at76c503-i3861.bin" }, 96 [BOARD_503_ISL3861] = {"atmel_at76c503-i3861.bin"},
117 [BOARD_503_ISL3863] = { "atmel_at76c503-i3863.bin" }, 97 [BOARD_503_ISL3863] = {"atmel_at76c503-i3863.bin"},
118 [BOARD_503] = { "atmel_at76c503-rfmd.bin" }, 98 [BOARD_503] = {"atmel_at76c503-rfmd.bin"},
119 [BOARD_503_ACC] = { "atmel_at76c503-rfmd-acc.bin" }, 99 [BOARD_503_ACC] = {"atmel_at76c503-rfmd-acc.bin"},
120 [BOARD_505] = { "atmel_at76c505-rfmd.bin" }, 100 [BOARD_505] = {"atmel_at76c505-rfmd.bin"},
121 [BOARD_505_2958] = { "atmel_at76c505-rfmd2958.bin" }, 101 [BOARD_505_2958] = {"atmel_at76c505-rfmd2958.bin"},
122 [BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" }, 102 [BOARD_505A] = {"atmel_at76c505a-rfmd2958.bin"},
123 [BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" }, 103 [BOARD_505AMX] = {"atmel_at76c505amx-rfmd.bin"},
124}; 104};
125 105
126#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) 106#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
@@ -130,133 +110,135 @@ static struct usb_device_id dev_table[] = {
130 * at76c503-i3861 110 * at76c503-i3861
131 */ 111 */
132 /* Generic AT76C503/3861 device */ 112 /* Generic AT76C503/3861 device */
133 { USB_DEVICE(0x03eb, 0x7603), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 113 {USB_DEVICE(0x03eb, 0x7603), USB_DEVICE_DATA(BOARD_503_ISL3861)},
134 /* Linksys WUSB11 v2.1/v2.6 */ 114 /* Linksys WUSB11 v2.1/v2.6 */
135 { USB_DEVICE(0x066b, 0x2211), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 115 {USB_DEVICE(0x066b, 0x2211), USB_DEVICE_DATA(BOARD_503_ISL3861)},
136 /* Netgear MA101 rev. A */ 116 /* Netgear MA101 rev. A */
137 { USB_DEVICE(0x0864, 0x4100), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 117 {USB_DEVICE(0x0864, 0x4100), USB_DEVICE_DATA(BOARD_503_ISL3861)},
138 /* Tekram U300C / Allnet ALL0193 */ 118 /* Tekram U300C / Allnet ALL0193 */
139 { USB_DEVICE(0x0b3b, 0x1612), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 119 {USB_DEVICE(0x0b3b, 0x1612), USB_DEVICE_DATA(BOARD_503_ISL3861)},
140 /* HP HN210W J7801A */ 120 /* HP HN210W J7801A */
141 { USB_DEVICE(0x03f0, 0x011c), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 121 {USB_DEVICE(0x03f0, 0x011c), USB_DEVICE_DATA(BOARD_503_ISL3861)},
142 /* Sitecom/Z-Com/Zyxel M4Y-750 */ 122 /* Sitecom/Z-Com/Zyxel M4Y-750 */
143 { USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 123 {USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861)},
144 /* Dynalink/Askey WLL013 (intersil) */ 124 /* Dynalink/Askey WLL013 (intersil) */
145 { USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 125 {USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861)},
146 /* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */ 126 /* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */
147 { USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 127 {USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861)},
148 /* BenQ AWL300 */ 128 /* BenQ AWL300 */
149 { USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 129 {USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861)},
150 /* Addtron AWU-120, Compex WLU11 */ 130 /* Addtron AWU-120, Compex WLU11 */
151 { USB_DEVICE(0x05dd, 0xff31), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 131 {USB_DEVICE(0x05dd, 0xff31), USB_DEVICE_DATA(BOARD_503_ISL3861)},
152 /* Intel AP310 AnyPoint II USB */ 132 /* Intel AP310 AnyPoint II USB */
153 { USB_DEVICE(0x8086, 0x0200), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 133 {USB_DEVICE(0x8086, 0x0200), USB_DEVICE_DATA(BOARD_503_ISL3861)},
154 /* Dynalink L11U */ 134 /* Dynalink L11U */
155 { USB_DEVICE(0x0d8e, 0x7100), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 135 {USB_DEVICE(0x0d8e, 0x7100), USB_DEVICE_DATA(BOARD_503_ISL3861)},
156 /* Arescom WL-210, FCC id 07J-GL2411USB */ 136 /* Arescom WL-210, FCC id 07J-GL2411USB */
157 { USB_DEVICE(0x0d8e, 0x7110), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 137 {USB_DEVICE(0x0d8e, 0x7110), USB_DEVICE_DATA(BOARD_503_ISL3861)},
158 /* I-O DATA WN-B11/USB */ 138 /* I-O DATA WN-B11/USB */
159 { USB_DEVICE(0x04bb, 0x0919), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 139 {USB_DEVICE(0x04bb, 0x0919), USB_DEVICE_DATA(BOARD_503_ISL3861)},
160 /* BT Voyager 1010 */ 140 /* BT Voyager 1010 */
161 { USB_DEVICE(0x069a, 0x0821), USB_DEVICE_DATA(BOARD_503_ISL3861) }, 141 {USB_DEVICE(0x069a, 0x0821), USB_DEVICE_DATA(BOARD_503_ISL3861)},
162 /* 142 /*
163 * at76c503-i3863 143 * at76c503-i3863
164 */ 144 */
165 /* Generic AT76C503/3863 device */ 145 /* Generic AT76C503/3863 device */
166 { USB_DEVICE(0x03eb, 0x7604), USB_DEVICE_DATA(BOARD_503_ISL3863) }, 146 {USB_DEVICE(0x03eb, 0x7604), USB_DEVICE_DATA(BOARD_503_ISL3863)},
167 /* Samsung SWL-2100U */ 147 /* Samsung SWL-2100U */
168 { USB_DEVICE(0x055d, 0xa000), USB_DEVICE_DATA(BOARD_503_ISL3863) }, 148 {USB_DEVICE(0x055d, 0xa000), USB_DEVICE_DATA(BOARD_503_ISL3863)},
169 /* 149 /*
170 * at76c503-rfmd 150 * at76c503-rfmd
171 */ 151 */
172 /* Generic AT76C503/RFMD device */ 152 /* Generic AT76C503/RFMD device */
173 { USB_DEVICE(0x03eb, 0x7605), USB_DEVICE_DATA(BOARD_503) }, 153 {USB_DEVICE(0x03eb, 0x7605), USB_DEVICE_DATA(BOARD_503)},
174 /* Dynalink/Askey WLL013 (rfmd) */ 154 /* Dynalink/Askey WLL013 (rfmd) */
175 { USB_DEVICE(0x069a, 0x0321), USB_DEVICE_DATA(BOARD_503) }, 155 {USB_DEVICE(0x069a, 0x0321), USB_DEVICE_DATA(BOARD_503)},
176 /* Linksys WUSB11 v2.6 */ 156 /* Linksys WUSB11 v2.6 */
177 { USB_DEVICE(0x077b, 0x2219), USB_DEVICE_DATA(BOARD_503) }, 157 {USB_DEVICE(0x077b, 0x2219), USB_DEVICE_DATA(BOARD_503)},
178 /* Network Everywhere NWU11B */ 158 /* Network Everywhere NWU11B */
179 { USB_DEVICE(0x077b, 0x2227), USB_DEVICE_DATA(BOARD_503) }, 159 {USB_DEVICE(0x077b, 0x2227), USB_DEVICE_DATA(BOARD_503)},
180 /* Netgear MA101 rev. B */ 160 /* Netgear MA101 rev. B */
181 { USB_DEVICE(0x0864, 0x4102), USB_DEVICE_DATA(BOARD_503) }, 161 {USB_DEVICE(0x0864, 0x4102), USB_DEVICE_DATA(BOARD_503)},
182 /* D-Link DWL-120 rev. E */ 162 /* D-Link DWL-120 rev. E */
183 { USB_DEVICE(0x2001, 0x3200), USB_DEVICE_DATA(BOARD_503) }, 163 {USB_DEVICE(0x2001, 0x3200), USB_DEVICE_DATA(BOARD_503)},
184 /* Actiontec 802UAT1, HWU01150-01UK */ 164 /* Actiontec 802UAT1, HWU01150-01UK */
185 { USB_DEVICE(0x1668, 0x7605), USB_DEVICE_DATA(BOARD_503) }, 165 {USB_DEVICE(0x1668, 0x7605), USB_DEVICE_DATA(BOARD_503)},
186 /* AirVast W-Buddie WN210 */ 166 /* AirVast W-Buddie WN210 */
187 { USB_DEVICE(0x03eb, 0x4102), USB_DEVICE_DATA(BOARD_503) }, 167 {USB_DEVICE(0x03eb, 0x4102), USB_DEVICE_DATA(BOARD_503)},
188 /* Dick Smith Electronics XH1153 802.11b USB adapter */ 168 /* Dick Smith Electronics XH1153 802.11b USB adapter */
189 { USB_DEVICE(0x1371, 0x5743), USB_DEVICE_DATA(BOARD_503) }, 169 {USB_DEVICE(0x1371, 0x5743), USB_DEVICE_DATA(BOARD_503)},
190 /* CNet CNUSB611 */ 170 /* CNet CNUSB611 */
191 { USB_DEVICE(0x1371, 0x0001), USB_DEVICE_DATA(BOARD_503) }, 171 {USB_DEVICE(0x1371, 0x0001), USB_DEVICE_DATA(BOARD_503)},
192 /* FiberLine FL-WL200U */ 172 /* FiberLine FL-WL200U */
193 { USB_DEVICE(0x1371, 0x0002), USB_DEVICE_DATA(BOARD_503) }, 173 {USB_DEVICE(0x1371, 0x0002), USB_DEVICE_DATA(BOARD_503)},
194 /* BenQ AWL400 USB stick */ 174 /* BenQ AWL400 USB stick */
195 { USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) }, 175 {USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503)},
196 /* 3Com 3CRSHEW696 */ 176 /* 3Com 3CRSHEW696 */
197 { USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) }, 177 {USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503)},
198 /* Siemens Santis ADSL WLAN USB adapter WLL 013 */ 178 /* Siemens Santis ADSL WLAN USB adapter WLL 013 */
199 { USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) }, 179 {USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503)},
200 /* Belkin F5D6050, version 2 */ 180 /* Belkin F5D6050, version 2 */
201 { USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) }, 181 {USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503)},
202 /* iBlitzz, BWU613 (not *B or *SB) */ 182 /* iBlitzz, BWU613 (not *B or *SB) */
203 { USB_DEVICE(0x07b8, 0xb000), USB_DEVICE_DATA(BOARD_503) }, 183 {USB_DEVICE(0x07b8, 0xb000), USB_DEVICE_DATA(BOARD_503)},
204 /* Gigabyte GN-WLBM101 */ 184 /* Gigabyte GN-WLBM101 */
205 { USB_DEVICE(0x1044, 0x8003), USB_DEVICE_DATA(BOARD_503) }, 185 {USB_DEVICE(0x1044, 0x8003), USB_DEVICE_DATA(BOARD_503)},
206 /* Planex GW-US11S */ 186 /* Planex GW-US11S */
207 { USB_DEVICE(0x2019, 0x3220), USB_DEVICE_DATA(BOARD_503) }, 187 {USB_DEVICE(0x2019, 0x3220), USB_DEVICE_DATA(BOARD_503)},
208 /* Internal WLAN adapter in h5[4,5]xx series iPAQs */ 188 /* Internal WLAN adapter in h5[4,5]xx series iPAQs */
209 { USB_DEVICE(0x049f, 0x0032), USB_DEVICE_DATA(BOARD_503) }, 189 {USB_DEVICE(0x049f, 0x0032), USB_DEVICE_DATA(BOARD_503)},
210 /* Corega Wireless LAN USB-11 mini */ 190 /* Corega Wireless LAN USB-11 mini */
211 { USB_DEVICE(0x07aa, 0x0011), USB_DEVICE_DATA(BOARD_503) }, 191 {USB_DEVICE(0x07aa, 0x0011), USB_DEVICE_DATA(BOARD_503)},
212 /* Corega Wireless LAN USB-11 mini2 */ 192 /* Corega Wireless LAN USB-11 mini2 */
213 { USB_DEVICE(0x07aa, 0x0018), USB_DEVICE_DATA(BOARD_503) }, 193 {USB_DEVICE(0x07aa, 0x0018), USB_DEVICE_DATA(BOARD_503)},
214 /* Uniden PCW100 */ 194 /* Uniden PCW100 */
215 { USB_DEVICE(0x05dd, 0xff35), USB_DEVICE_DATA(BOARD_503) }, 195 {USB_DEVICE(0x05dd, 0xff35), USB_DEVICE_DATA(BOARD_503)},
216 /* 196 /*
217 * at76c503-rfmd-acc 197 * at76c503-rfmd-acc
218 */ 198 */
219 /* SMC2664W */ 199 /* SMC2664W */
220 { USB_DEVICE(0x083a, 0x3501), USB_DEVICE_DATA(BOARD_503_ACC) }, 200 {USB_DEVICE(0x083a, 0x3501), USB_DEVICE_DATA(BOARD_503_ACC)},
221 /* Belkin F5D6050, SMC2662W v2, SMC2662W-AR */ 201 /* Belkin F5D6050, SMC2662W v2, SMC2662W-AR */
222 { USB_DEVICE(0x0d5c, 0xa002), USB_DEVICE_DATA(BOARD_503_ACC) }, 202 {USB_DEVICE(0x0d5c, 0xa002), USB_DEVICE_DATA(BOARD_503_ACC)},
223 /* 203 /*
224 * at76c505-rfmd 204 * at76c505-rfmd
225 */ 205 */
226 /* Generic AT76C505/RFMD */ 206 /* Generic AT76C505/RFMD */
227 { USB_DEVICE(0x03eb, 0x7606), USB_DEVICE_DATA(BOARD_505) }, 207 {USB_DEVICE(0x03eb, 0x7606), USB_DEVICE_DATA(BOARD_505)},
228 /* 208 /*
229 * at76c505-rfmd2958 209 * at76c505-rfmd2958
230 */ 210 */
231 /* Generic AT76C505/RFMD, OvisLink WL-1130USB */ 211 /* Generic AT76C505/RFMD, OvisLink WL-1130USB */
232 { USB_DEVICE(0x03eb, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) }, 212 {USB_DEVICE(0x03eb, 0x7613), USB_DEVICE_DATA(BOARD_505_2958)},
233 /* Fiberline FL-WL240U */ 213 /* Fiberline FL-WL240U */
234 { USB_DEVICE(0x1371, 0x0014), USB_DEVICE_DATA(BOARD_505_2958) }, 214 {USB_DEVICE(0x1371, 0x0014), USB_DEVICE_DATA(BOARD_505_2958)},
235 /* CNet CNUSB-611G */ 215 /* CNet CNUSB-611G */
236 { USB_DEVICE(0x1371, 0x0013), USB_DEVICE_DATA(BOARD_505_2958) }, 216 {USB_DEVICE(0x1371, 0x0013), USB_DEVICE_DATA(BOARD_505_2958)},
237 /* Linksys WUSB11 v2.8 */ 217 /* Linksys WUSB11 v2.8 */
238 { USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) }, 218 {USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958)},
239 /* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */ 219 /* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */
240 { USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) }, 220 {USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958)},
241 /* Corega WLAN USB Stick 11 */ 221 /* Corega WLAN USB Stick 11 */
242 { USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) }, 222 {USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958)},
243 /* Microstar MSI Box MS6978 */ 223 /* Microstar MSI Box MS6978 */
244 { USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) }, 224 {USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958)},
245 /* 225 /*
246 * at76c505a-rfmd2958 226 * at76c505a-rfmd2958
247 */ 227 */
248 /* Generic AT76C505A device */ 228 /* Generic AT76C505A device */
249 { USB_DEVICE(0x03eb, 0x7614), USB_DEVICE_DATA(BOARD_505A) }, 229 {USB_DEVICE(0x03eb, 0x7614), USB_DEVICE_DATA(BOARD_505A)},
250 /* Generic AT76C505AS device */ 230 /* Generic AT76C505AS device */
251 { USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A) }, 231 {USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A)},
252 /* Siemens Gigaset USB WLAN Adapter 11 */ 232 /* Siemens Gigaset USB WLAN Adapter 11 */
253 { USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A) }, 233 {USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A)},
234 /* OQO Model 01+ Internal Wi-Fi */
235 {USB_DEVICE(0x1557, 0x0002), USB_DEVICE_DATA(BOARD_505A)},
254 /* 236 /*
255 * at76c505amx-rfmd 237 * at76c505amx-rfmd
256 */ 238 */
257 /* Generic AT76C505AMX device */ 239 /* Generic AT76C505AMX device */
258 { USB_DEVICE(0x03eb, 0x7615), USB_DEVICE_DATA(BOARD_505AMX) }, 240 {USB_DEVICE(0x03eb, 0x7615), USB_DEVICE_DATA(BOARD_505AMX)},
259 { } 241 {}
260}; 242};
261 243
262MODULE_DEVICE_TABLE(usb, dev_table); 244MODULE_DEVICE_TABLE(usb, dev_table);
@@ -264,8 +246,26 @@ MODULE_DEVICE_TABLE(usb, dev_table);
264/* Supported rates of this hardware, bit 7 marks basic rates */ 246/* Supported rates of this hardware, bit 7 marks basic rates */
265static const u8 hw_rates[] = { 0x82, 0x84, 0x0b, 0x16 }; 247static const u8 hw_rates[] = { 0x82, 0x84, 0x0b, 0x16 };
266 248
249/* Frequency of each channel in MHz */
250static const long channel_frequency[] = {
251 2412, 2417, 2422, 2427, 2432, 2437, 2442,
252 2447, 2452, 2457, 2462, 2467, 2472, 2484
253};
254
255#define NUM_CHANNELS ARRAY_SIZE(channel_frequency)
256
267static const char *const preambles[] = { "long", "short", "auto" }; 257static const char *const preambles[] = { "long", "short", "auto" };
268 258
259static const char *const mac_states[] = {
260 [MAC_INIT] = "INIT",
261 [MAC_SCANNING] = "SCANNING",
262 [MAC_AUTH] = "AUTH",
263 [MAC_ASSOC] = "ASSOC",
264 [MAC_JOINING] = "JOINING",
265 [MAC_CONNECTED] = "CONNECTED",
266 [MAC_OWN_IBSS] = "OWN_IBSS"
267};
268
269/* Firmware download */ 269/* Firmware download */
270/* DFU states */ 270/* DFU states */
271#define STATE_IDLE 0x00 271#define STATE_IDLE 0x00
@@ -300,30 +300,17 @@ struct dfu_status {
300 300
301static inline int at76_is_intersil(enum board_type board) 301static inline int at76_is_intersil(enum board_type board)
302{ 302{
303 if (board == BOARD_503_ISL3861 || board == BOARD_503_ISL3863) 303 return (board == BOARD_503_ISL3861 || board == BOARD_503_ISL3863);
304 return 1;
305 return 0;
306} 304}
307 305
308static inline int at76_is_503rfmd(enum board_type board) 306static inline int at76_is_503rfmd(enum board_type board)
309{ 307{
310 if (board == BOARD_503 || board == BOARD_503_ACC) 308 return (board == BOARD_503 || board == BOARD_503_ACC);
311 return 1;
312 return 0;
313}
314
315static inline int at76_is_505(enum board_type board)
316{
317 if (board == BOARD_505 || board == BOARD_505_2958)
318 return 1;
319 return 0;
320} 309}
321 310
322static inline int at76_is_505a(enum board_type board) 311static inline int at76_is_505a(enum board_type board)
323{ 312{
324 if (board == BOARD_505A || board == BOARD_505AMX) 313 return (board == BOARD_505A || board == BOARD_505AMX);
325 return 1;
326 return 0;
327} 314}
328 315
329/* Load a block of the first (internal) part of the firmware */ 316/* Load a block of the first (internal) part of the firmware */
@@ -504,6 +491,41 @@ exit:
504 return ret; 491 return ret;
505} 492}
506 493
494/* Report that the scan results are ready */
495static inline void at76_iwevent_scan_complete(struct net_device *netdev)
496{
497 union iwreq_data wrqu;
498 wrqu.data.length = 0;
499 wrqu.data.flags = 0;
500 wireless_send_event(netdev, SIOCGIWSCAN, &wrqu, NULL);
501 at76_dbg(DBG_WE_EVENTS, "%s: SIOCGIWSCAN sent", netdev->name);
502}
503
504static inline void at76_iwevent_bss_connect(struct net_device *netdev,
505 u8 *bssid)
506{
507 union iwreq_data wrqu;
508 wrqu.data.length = 0;
509 wrqu.data.flags = 0;
510 memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
511 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
512 wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
513 at76_dbg(DBG_WE_EVENTS, "%s: %s: SIOCGIWAP sent", netdev->name,
514 __func__);
515}
516
517static inline void at76_iwevent_bss_disconnect(struct net_device *netdev)
518{
519 union iwreq_data wrqu;
520 wrqu.data.length = 0;
521 wrqu.data.flags = 0;
522 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
523 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
524 wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
525 at76_dbg(DBG_WE_EVENTS, "%s: %s: SIOCGIWAP sent", netdev->name,
526 __func__);
527}
528
507#define HEX2STR_BUFFERS 4 529#define HEX2STR_BUFFERS 4
508#define HEX2STR_MAX_LEN 64 530#define HEX2STR_MAX_LEN 64
509#define BIN2HEX(x) ((x) < 10 ? '0' + (x) : (x) + 'A' - 10) 531#define BIN2HEX(x) ((x) < 10 ? '0' + (x) : (x) + 'A' - 10)
@@ -575,6 +597,37 @@ static void at76_ledtrig_tx_activity(void)
575 mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4); 597 mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4);
576} 598}
577 599
600/* Check if the given ssid is hidden */
601static inline int at76_is_hidden_ssid(u8 *ssid, int length)
602{
603 static const u8 zeros[32];
604
605 if (length == 0)
606 return 1;
607
608 if (length == 1 && ssid[0] == ' ')
609 return 1;
610
611 return (memcmp(ssid, zeros, length) == 0);
612}
613
614static inline void at76_free_bss_list(struct at76_priv *priv)
615{
616 struct list_head *next, *ptr;
617 unsigned long flags;
618
619 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
620
621 priv->curr_bss = NULL;
622
623 list_for_each_safe(ptr, next, &priv->bss_list) {
624 list_del(ptr);
625 kfree(list_entry(ptr, struct bss_info, list));
626 }
627
628 spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
629}
630
578static int at76_remap(struct usb_device *udev) 631static int at76_remap(struct usb_device *udev)
579{ 632{
580 int ret; 633 int ret;
@@ -598,7 +651,7 @@ static int at76_get_op_mode(struct usb_device *udev)
598 return -ENOMEM; 651 return -ENOMEM;
599 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33, 652 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33,
600 USB_TYPE_VENDOR | USB_DIR_IN | 653 USB_TYPE_VENDOR | USB_DIR_IN |
601 USB_RECIP_INTERFACE, 0x01, 0, &op_mode, 1, 654 USB_RECIP_INTERFACE, 0x01, 0, op_mode, 1,
602 USB_CTRL_GET_TIMEOUT); 655 USB_CTRL_GET_TIMEOUT);
603 saved = *op_mode; 656 saved = *op_mode;
604 kfree(op_mode); 657 kfree(op_mode);
@@ -676,7 +729,7 @@ exit:
676 kfree(hwcfg); 729 kfree(hwcfg);
677 if (ret < 0) 730 if (ret < 0)
678 printk(KERN_ERR "%s: cannot get HW Config (error %d)\n", 731 printk(KERN_ERR "%s: cannot get HW Config (error %d)\n",
679 wiphy_name(priv->hw->wiphy), ret); 732 priv->netdev->name, ret);
680 733
681 return ret; 734 return ret;
682} 735}
@@ -685,15 +738,15 @@ static struct reg_domain const *at76_get_reg_domain(u16 code)
685{ 738{
686 int i; 739 int i;
687 static struct reg_domain const fd_tab[] = { 740 static struct reg_domain const fd_tab[] = {
688 { 0x10, "FCC (USA)", 0x7ff }, /* ch 1-11 */ 741 {0x10, "FCC (USA)", 0x7ff}, /* ch 1-11 */
689 { 0x20, "IC (Canada)", 0x7ff }, /* ch 1-11 */ 742 {0x20, "IC (Canada)", 0x7ff}, /* ch 1-11 */
690 { 0x30, "ETSI (most of Europe)", 0x1fff }, /* ch 1-13 */ 743 {0x30, "ETSI (most of Europe)", 0x1fff}, /* ch 1-13 */
691 { 0x31, "Spain", 0x600 }, /* ch 10-11 */ 744 {0x31, "Spain", 0x600}, /* ch 10-11 */
692 { 0x32, "France", 0x1e00 }, /* ch 10-13 */ 745 {0x32, "France", 0x1e00}, /* ch 10-13 */
693 { 0x40, "MKK (Japan)", 0x2000 }, /* ch 14 */ 746 {0x40, "MKK (Japan)", 0x2000}, /* ch 14 */
694 { 0x41, "MKK1 (Japan)", 0x3fff }, /* ch 1-14 */ 747 {0x41, "MKK1 (Japan)", 0x3fff}, /* ch 1-14 */
695 { 0x50, "Israel", 0x3fc }, /* ch 3-9 */ 748 {0x50, "Israel", 0x3fc}, /* ch 3-9 */
696 { 0x00, "<unknown>", 0xffffffff } /* ch 1-32 */ 749 {0x00, "<unknown>", 0xffffffff} /* ch 1-32 */
697 }; 750 };
698 751
699 /* Last entry is fallback for unknown domain code */ 752 /* Last entry is fallback for unknown domain code */
@@ -731,7 +784,7 @@ static inline int at76_get_cmd_status(struct usb_device *udev, u8 cmd)
731 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x22, 784 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x22,
732 USB_TYPE_VENDOR | USB_DIR_IN | 785 USB_TYPE_VENDOR | USB_DIR_IN |
733 USB_RECIP_INTERFACE, cmd, 0, stat_buf, 786 USB_RECIP_INTERFACE, cmd, 0, stat_buf,
734 sizeof(stat_buf), USB_CTRL_GET_TIMEOUT); 787 40, USB_CTRL_GET_TIMEOUT);
735 if (ret >= 0) 788 if (ret >= 0)
736 ret = stat_buf[5]; 789 ret = stat_buf[5];
737 kfree(stat_buf); 790 kfree(stat_buf);
@@ -739,24 +792,6 @@ static inline int at76_get_cmd_status(struct usb_device *udev, u8 cmd)
739 return ret; 792 return ret;
740} 793}
741 794
742#define MAKE_CMD_CASE(c) case (c): return #c
743
744static const char *at76_get_cmd_string(u8 cmd_status)
745{
746 switch (cmd_status) {
747 MAKE_CMD_CASE(CMD_SET_MIB);
748 MAKE_CMD_CASE(CMD_GET_MIB);
749 MAKE_CMD_CASE(CMD_SCAN);
750 MAKE_CMD_CASE(CMD_JOIN);
751 MAKE_CMD_CASE(CMD_START_IBSS);
752 MAKE_CMD_CASE(CMD_RADIO_ON);
753 MAKE_CMD_CASE(CMD_RADIO_OFF);
754 MAKE_CMD_CASE(CMD_STARTUP);
755 }
756
757 return "UNKNOWN";
758}
759
760static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf, 795static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
761 int buf_size) 796 int buf_size)
762{ 797{
@@ -772,10 +807,6 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
772 cmd_buf->size = cpu_to_le16(buf_size); 807 cmd_buf->size = cpu_to_le16(buf_size);
773 memcpy(cmd_buf->data, buf, buf_size); 808 memcpy(cmd_buf->data, buf, buf_size);
774 809
775 at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size,
776 "issuing command %s (0x%02x)",
777 at76_get_cmd_string(cmd), cmd);
778
779 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e, 810 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
780 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 811 USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
781 0, 0, cmd_buf, 812 0, 0, cmd_buf,
@@ -813,13 +844,13 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd)
813 status = at76_get_cmd_status(priv->udev, cmd); 844 status = at76_get_cmd_status(priv->udev, cmd);
814 if (status < 0) { 845 if (status < 0) {
815 printk(KERN_ERR "%s: at76_get_cmd_status failed: %d\n", 846 printk(KERN_ERR "%s: at76_get_cmd_status failed: %d\n",
816 wiphy_name(priv->hw->wiphy), status); 847 priv->netdev->name, status);
817 break; 848 break;
818 } 849 }
819 850
820 at76_dbg(DBG_WAIT_COMPLETE, 851 at76_dbg(DBG_WAIT_COMPLETE,
821 "%s: Waiting on cmd %d, status = %d (%s)", 852 "%s: Waiting on cmd %d, status = %d (%s)",
822 wiphy_name(priv->hw->wiphy), cmd, status, 853 priv->netdev->name, cmd, status,
823 at76_get_cmd_status_string(status)); 854 at76_get_cmd_status_string(status));
824 855
825 if (status != CMD_STATUS_IN_PROGRESS 856 if (status != CMD_STATUS_IN_PROGRESS
@@ -830,7 +861,7 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd)
830 if (time_after(jiffies, timeout)) { 861 if (time_after(jiffies, timeout)) {
831 printk(KERN_ERR 862 printk(KERN_ERR
832 "%s: completion timeout for command %d\n", 863 "%s: completion timeout for command %d\n",
833 wiphy_name(priv->hw->wiphy), cmd); 864 priv->netdev->name, cmd);
834 status = -ETIMEDOUT; 865 status = -ETIMEDOUT;
835 break; 866 break;
836 } 867 }
@@ -853,7 +884,7 @@ static int at76_set_mib(struct at76_priv *priv, struct set_mib_buffer *buf)
853 if (ret != CMD_STATUS_COMPLETE) { 884 if (ret != CMD_STATUS_COMPLETE) {
854 printk(KERN_INFO 885 printk(KERN_INFO
855 "%s: set_mib: at76_wait_completion failed " 886 "%s: set_mib: at76_wait_completion failed "
856 "with %d\n", wiphy_name(priv->hw->wiphy), ret); 887 "with %d\n", priv->netdev->name, ret);
857 ret = -EIO; 888 ret = -EIO;
858 } 889 }
859 890
@@ -874,7 +905,7 @@ static int at76_set_radio(struct at76_priv *priv, int enable)
874 ret = at76_set_card_command(priv->udev, cmd, NULL, 0); 905 ret = at76_set_card_command(priv->udev, cmd, NULL, 0);
875 if (ret < 0) 906 if (ret < 0)
876 printk(KERN_ERR "%s: at76_set_card_command(%d) failed: %d\n", 907 printk(KERN_ERR "%s: at76_set_card_command(%d) failed: %d\n",
877 wiphy_name(priv->hw->wiphy), cmd, ret); 908 priv->netdev->name, cmd, ret);
878 else 909 else
879 ret = 1; 910 ret = 1;
880 911
@@ -895,7 +926,44 @@ static int at76_set_pm_mode(struct at76_priv *priv)
895 ret = at76_set_mib(priv, &priv->mib_buf); 926 ret = at76_set_mib(priv, &priv->mib_buf);
896 if (ret < 0) 927 if (ret < 0)
897 printk(KERN_ERR "%s: set_mib (pm_mode) failed: %d\n", 928 printk(KERN_ERR "%s: set_mib (pm_mode) failed: %d\n",
898 wiphy_name(priv->hw->wiphy), ret); 929 priv->netdev->name, ret);
930
931 return ret;
932}
933
934/* Set the association id for power save mode */
935static int at76_set_associd(struct at76_priv *priv, u16 id)
936{
937 int ret = 0;
938
939 priv->mib_buf.type = MIB_MAC_MGMT;
940 priv->mib_buf.size = 2;
941 priv->mib_buf.index = offsetof(struct mib_mac_mgmt, station_id);
942 priv->mib_buf.data.word = cpu_to_le16(id);
943
944 ret = at76_set_mib(priv, &priv->mib_buf);
945 if (ret < 0)
946 printk(KERN_ERR "%s: set_mib (associd) failed: %d\n",
947 priv->netdev->name, ret);
948
949 return ret;
950}
951
952/* Set the listen interval for power save mode */
953static int at76_set_listen_interval(struct at76_priv *priv, u16 interval)
954{
955 int ret = 0;
956
957 priv->mib_buf.type = MIB_MAC;
958 priv->mib_buf.size = 2;
959 priv->mib_buf.index = offsetof(struct mib_mac, listen_interval);
960 priv->mib_buf.data.word = cpu_to_le16(interval);
961
962 ret = at76_set_mib(priv, &priv->mib_buf);
963 if (ret < 0)
964 printk(KERN_ERR
965 "%s: set_mib (listen_interval) failed: %d\n",
966 priv->netdev->name, ret);
899 967
900 return ret; 968 return ret;
901} 969}
@@ -912,7 +980,7 @@ static int at76_set_preamble(struct at76_priv *priv, u8 type)
912 ret = at76_set_mib(priv, &priv->mib_buf); 980 ret = at76_set_mib(priv, &priv->mib_buf);
913 if (ret < 0) 981 if (ret < 0)
914 printk(KERN_ERR "%s: set_mib (preamble) failed: %d\n", 982 printk(KERN_ERR "%s: set_mib (preamble) failed: %d\n",
915 wiphy_name(priv->hw->wiphy), ret); 983 priv->netdev->name, ret);
916 984
917 return ret; 985 return ret;
918} 986}
@@ -929,7 +997,7 @@ static int at76_set_frag(struct at76_priv *priv, u16 size)
929 ret = at76_set_mib(priv, &priv->mib_buf); 997 ret = at76_set_mib(priv, &priv->mib_buf);
930 if (ret < 0) 998 if (ret < 0)
931 printk(KERN_ERR "%s: set_mib (frag threshold) failed: %d\n", 999 printk(KERN_ERR "%s: set_mib (frag threshold) failed: %d\n",
932 wiphy_name(priv->hw->wiphy), ret); 1000 priv->netdev->name, ret);
933 1001
934 return ret; 1002 return ret;
935} 1003}
@@ -946,7 +1014,7 @@ static int at76_set_rts(struct at76_priv *priv, u16 size)
946 ret = at76_set_mib(priv, &priv->mib_buf); 1014 ret = at76_set_mib(priv, &priv->mib_buf);
947 if (ret < 0) 1015 if (ret < 0)
948 printk(KERN_ERR "%s: set_mib (rts) failed: %d\n", 1016 printk(KERN_ERR "%s: set_mib (rts) failed: %d\n",
949 wiphy_name(priv->hw->wiphy), ret); 1017 priv->netdev->name, ret);
950 1018
951 return ret; 1019 return ret;
952} 1020}
@@ -963,41 +1031,24 @@ static int at76_set_autorate_fallback(struct at76_priv *priv, int onoff)
963 ret = at76_set_mib(priv, &priv->mib_buf); 1031 ret = at76_set_mib(priv, &priv->mib_buf);
964 if (ret < 0) 1032 if (ret < 0)
965 printk(KERN_ERR "%s: set_mib (autorate fallback) failed: %d\n", 1033 printk(KERN_ERR "%s: set_mib (autorate fallback) failed: %d\n",
966 wiphy_name(priv->hw->wiphy), ret); 1034 priv->netdev->name, ret);
967 1035
968 return ret; 1036 return ret;
969} 1037}
970 1038
971static int at76_set_tkip_bssid(struct at76_priv *priv, const void *addr) 1039static int at76_add_mac_address(struct at76_priv *priv, void *addr)
972{ 1040{
973 int ret = 0; 1041 int ret = 0;
974 1042
975 priv->mib_buf.type = MIB_MAC_ENCRYPTION; 1043 priv->mib_buf.type = MIB_MAC_ADDR;
976 priv->mib_buf.size = ETH_ALEN; 1044 priv->mib_buf.size = ETH_ALEN;
977 priv->mib_buf.index = offsetof(struct mib_mac_encryption, tkip_bssid); 1045 priv->mib_buf.index = offsetof(struct mib_mac_addr, mac_addr);
978 memcpy(priv->mib_buf.data.addr, addr, ETH_ALEN); 1046 memcpy(priv->mib_buf.data.addr, addr, ETH_ALEN);
979 1047
980 ret = at76_set_mib(priv, &priv->mib_buf); 1048 ret = at76_set_mib(priv, &priv->mib_buf);
981 if (ret < 0) 1049 if (ret < 0)
982 printk(KERN_ERR "%s: set_mib (MAC_ENCRYPTION, tkip_bssid) failed: %d\n", 1050 printk(KERN_ERR "%s: set_mib (MAC_ADDR, mac_addr) failed: %d\n",
983 wiphy_name(priv->hw->wiphy), ret); 1051 priv->netdev->name, ret);
984
985 return ret;
986}
987
988static int at76_reset_rsc(struct at76_priv *priv)
989{
990 int ret = 0;
991
992 priv->mib_buf.type = MIB_MAC_ENCRYPTION;
993 priv->mib_buf.size = 4 * 8;
994 priv->mib_buf.index = offsetof(struct mib_mac_encryption, key_rsc);
995 memset(priv->mib_buf.data.data, 0 , priv->mib_buf.size);
996
997 ret = at76_set_mib(priv, &priv->mib_buf);
998 if (ret < 0)
999 printk(KERN_ERR "%s: set_mib (MAC_ENCRYPTION, key_rsc) failed: %d\n",
1000 wiphy_name(priv->hw->wiphy), ret);
1001 1052
1002 return ret; 1053 return ret;
1003} 1054}
@@ -1016,16 +1067,16 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
1016 sizeof(struct mib_mac_addr)); 1067 sizeof(struct mib_mac_addr));
1017 if (ret < 0) { 1068 if (ret < 0) {
1018 printk(KERN_ERR "%s: at76_get_mib (MAC_ADDR) failed: %d\n", 1069 printk(KERN_ERR "%s: at76_get_mib (MAC_ADDR) failed: %d\n",
1019 wiphy_name(priv->hw->wiphy), ret); 1070 priv->netdev->name, ret);
1020 goto exit; 1071 goto exit;
1021 } 1072 }
1022 1073
1023 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %s res 0x%x 0x%x", 1074 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %s res 0x%x 0x%x",
1024 wiphy_name(priv->hw->wiphy), 1075 priv->netdev->name,
1025 mac2str(m->mac_addr), m->res[0], m->res[1]); 1076 mac2str(m->mac_addr), m->res[0], m->res[1]);
1026 for (i = 0; i < ARRAY_SIZE(m->group_addr); i++) 1077 for (i = 0; i < ARRAY_SIZE(m->group_addr); i++)
1027 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %s, " 1078 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %s, "
1028 "status %d", wiphy_name(priv->hw->wiphy), i, 1079 "status %d", priv->netdev->name, i,
1029 mac2str(m->group_addr[i]), m->group_addr_status[i]); 1080 mac2str(m->group_addr[i]), m->group_addr_status[i]);
1030exit: 1081exit:
1031 kfree(m); 1082 kfree(m);
@@ -1045,13 +1096,13 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
1045 sizeof(struct mib_mac_wep)); 1096 sizeof(struct mib_mac_wep));
1046 if (ret < 0) { 1097 if (ret < 0) {
1047 printk(KERN_ERR "%s: at76_get_mib (MAC_WEP) failed: %d\n", 1098 printk(KERN_ERR "%s: at76_get_mib (MAC_WEP) failed: %d\n",
1048 wiphy_name(priv->hw->wiphy), ret); 1099 priv->netdev->name, ret);
1049 goto exit; 1100 goto exit;
1050 } 1101 }
1051 1102
1052 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: priv_invoked %u def_key_id %u " 1103 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: priv_invoked %u def_key_id %u "
1053 "key_len %u excl_unencr %u wep_icv_err %u wep_excluded %u " 1104 "key_len %u excl_unencr %u wep_icv_err %u wep_excluded %u "
1054 "encr_level %u key %d", wiphy_name(priv->hw->wiphy), 1105 "encr_level %u key %d", priv->netdev->name,
1055 m->privacy_invoked, m->wep_default_key_id, 1106 m->privacy_invoked, m->wep_default_key_id,
1056 m->wep_key_mapping_len, m->exclude_unencrypted, 1107 m->wep_key_mapping_len, m->exclude_unencrypted,
1057 le32_to_cpu(m->wep_icv_error_count), 1108 le32_to_cpu(m->wep_icv_error_count),
@@ -1063,55 +1114,12 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
1063 1114
1064 for (i = 0; i < WEP_KEYS; i++) 1115 for (i = 0; i < WEP_KEYS; i++)
1065 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s", 1116 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s",
1066 wiphy_name(priv->hw->wiphy), i, 1117 priv->netdev->name, i,
1067 hex2str(m->wep_default_keyvalue[i], key_len)); 1118 hex2str(m->wep_default_keyvalue[i], key_len));
1068exit: 1119exit:
1069 kfree(m); 1120 kfree(m);
1070} 1121}
1071 1122
1072static void at76_dump_mib_mac_encryption(struct at76_priv *priv)
1073{
1074 int i;
1075 int ret;
1076 /*int key_len;*/
1077 struct mib_mac_encryption *m;
1078
1079 m = kmalloc(sizeof(struct mib_mac_encryption), GFP_KERNEL);
1080 if (!m)
1081 return;
1082
1083 ret = at76_get_mib(priv->udev, MIB_MAC_ENCRYPTION, m,
1084 sizeof(struct mib_mac_encryption));
1085 if (ret < 0) {
1086 dev_err(&priv->udev->dev,
1087 "%s: at76_get_mib (MAC_ENCRYPTION) failed: %d\n",
1088 wiphy_name(priv->hw->wiphy), ret);
1089 goto exit;
1090 }
1091
1092 at76_dbg(DBG_MIB,
1093 "%s: MIB MAC_ENCRYPTION: tkip_bssid %s priv_invoked %u "
1094 "ciph_key_id %u grp_key_id %u excl_unencr %u "
1095 "ckip_key_perm %u wep_icv_err %u wep_excluded %u",
1096 wiphy_name(priv->hw->wiphy), mac2str(m->tkip_bssid),
1097 m->privacy_invoked, m->cipher_default_key_id,
1098 m->cipher_default_group_key_id, m->exclude_unencrypted,
1099 m->ckip_key_permutation,
1100 le32_to_cpu(m->wep_icv_error_count),
1101 le32_to_cpu(m->wep_excluded_count));
1102
1103 /*key_len = (m->encryption_level == 1) ?
1104 WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;*/
1105
1106 for (i = 0; i < CIPHER_KEYS; i++)
1107 at76_dbg(DBG_MIB, "%s: MIB MAC_ENCRYPTION: key %d: %s",
1108 wiphy_name(priv->hw->wiphy), i,
1109 hex2str(m->cipher_default_keyvalue[i],
1110 CIPHER_KEY_LEN));
1111exit:
1112 kfree(m);
1113}
1114
1115static void at76_dump_mib_mac_mgmt(struct at76_priv *priv) 1123static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1116{ 1124{
1117 int ret; 1125 int ret;
@@ -1125,7 +1133,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1125 sizeof(struct mib_mac_mgmt)); 1133 sizeof(struct mib_mac_mgmt));
1126 if (ret < 0) { 1134 if (ret < 0) {
1127 printk(KERN_ERR "%s: at76_get_mib (MAC_MGMT) failed: %d\n", 1135 printk(KERN_ERR "%s: at76_get_mib (MAC_MGMT) failed: %d\n",
1128 wiphy_name(priv->hw->wiphy), ret); 1136 priv->netdev->name, ret);
1129 goto exit; 1137 goto exit;
1130 } 1138 }
1131 1139
@@ -1136,7 +1144,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1136 "pm_mode %d ibss_change %d res %d " 1144 "pm_mode %d ibss_change %d res %d "
1137 "multi_domain_capability_implemented %d " 1145 "multi_domain_capability_implemented %d "
1138 "international_roaming %d country_string %.3s", 1146 "international_roaming %d country_string %.3s",
1139 wiphy_name(priv->hw->wiphy), le16_to_cpu(m->beacon_period), 1147 priv->netdev->name, le16_to_cpu(m->beacon_period),
1140 le16_to_cpu(m->CFP_max_duration), 1148 le16_to_cpu(m->CFP_max_duration),
1141 le16_to_cpu(m->medium_occupancy_limit), 1149 le16_to_cpu(m->medium_occupancy_limit),
1142 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window), 1150 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
@@ -1161,7 +1169,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1161 ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); 1169 ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
1162 if (ret < 0) { 1170 if (ret < 0) {
1163 printk(KERN_ERR "%s: at76_get_mib (MAC) failed: %d\n", 1171 printk(KERN_ERR "%s: at76_get_mib (MAC) failed: %d\n",
1164 wiphy_name(priv->hw->wiphy), ret); 1172 priv->netdev->name, ret);
1165 goto exit; 1173 goto exit;
1166 } 1174 }
1167 1175
@@ -1171,8 +1179,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1171 "scan_type %d scan_channel %d probe_delay %u " 1179 "scan_type %d scan_channel %d probe_delay %u "
1172 "min_channel_time %d max_channel_time %d listen_int %d " 1180 "min_channel_time %d max_channel_time %d listen_int %d "
1173 "desired_ssid %s desired_bssid %s desired_bsstype %d", 1181 "desired_ssid %s desired_bssid %s desired_bsstype %d",
1174 wiphy_name(priv->hw->wiphy), 1182 priv->netdev->name, le32_to_cpu(m->max_tx_msdu_lifetime),
1175 le32_to_cpu(m->max_tx_msdu_lifetime),
1176 le32_to_cpu(m->max_rx_lifetime), 1183 le32_to_cpu(m->max_rx_lifetime),
1177 le16_to_cpu(m->frag_threshold), le16_to_cpu(m->rts_threshold), 1184 le16_to_cpu(m->frag_threshold), le16_to_cpu(m->rts_threshold),
1178 le16_to_cpu(m->cwmin), le16_to_cpu(m->cwmax), 1185 le16_to_cpu(m->cwmin), le16_to_cpu(m->cwmax),
@@ -1198,7 +1205,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
1198 ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); 1205 ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
1199 if (ret < 0) { 1206 if (ret < 0) {
1200 printk(KERN_ERR "%s: at76_get_mib (PHY) failed: %d\n", 1207 printk(KERN_ERR "%s: at76_get_mib (PHY) failed: %d\n",
1201 wiphy_name(priv->hw->wiphy), ret); 1208 priv->netdev->name, ret);
1202 goto exit; 1209 goto exit;
1203 } 1210 }
1204 1211
@@ -1207,7 +1214,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
1207 "mpdu_max_length %d cca_mode_supported %d operation_rate_set " 1214 "mpdu_max_length %d cca_mode_supported %d operation_rate_set "
1208 "0x%x 0x%x 0x%x 0x%x channel_id %d current_cca_mode %d " 1215 "0x%x 0x%x 0x%x 0x%x channel_id %d current_cca_mode %d "
1209 "phy_type %d current_reg_domain %d", 1216 "phy_type %d current_reg_domain %d",
1210 wiphy_name(priv->hw->wiphy), le32_to_cpu(m->ed_threshold), 1217 priv->netdev->name, le32_to_cpu(m->ed_threshold),
1211 le16_to_cpu(m->slot_time), le16_to_cpu(m->sifs_time), 1218 le16_to_cpu(m->slot_time), le16_to_cpu(m->sifs_time),
1212 le16_to_cpu(m->preamble_length), 1219 le16_to_cpu(m->preamble_length),
1213 le16_to_cpu(m->plcp_header_length), 1220 le16_to_cpu(m->plcp_header_length),
@@ -1231,14 +1238,13 @@ static void at76_dump_mib_local(struct at76_priv *priv)
1231 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); 1238 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
1232 if (ret < 0) { 1239 if (ret < 0) {
1233 printk(KERN_ERR "%s: at76_get_mib (LOCAL) failed: %d\n", 1240 printk(KERN_ERR "%s: at76_get_mib (LOCAL) failed: %d\n",
1234 wiphy_name(priv->hw->wiphy), ret); 1241 priv->netdev->name, ret);
1235 goto exit; 1242 goto exit;
1236 } 1243 }
1237 1244
1238 at76_dbg(DBG_MIB, "%s: MIB LOCAL: beacon_enable %d " 1245 at76_dbg(DBG_MIB, "%s: MIB LOCAL: beacon_enable %d "
1239 "txautorate_fallback %d ssid_size %d promiscuous_mode %d " 1246 "txautorate_fallback %d ssid_size %d promiscuous_mode %d "
1240 "preamble_type %d", wiphy_name(priv->hw->wiphy), 1247 "preamble_type %d", priv->netdev->name, m->beacon_enable,
1241 m->beacon_enable,
1242 m->txautorate_fallback, m->ssid_size, m->promiscuous_mode, 1248 m->txautorate_fallback, m->ssid_size, m->promiscuous_mode,
1243 m->preamble_type); 1249 m->preamble_type);
1244exit: 1250exit:
@@ -1257,21 +1263,118 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
1257 sizeof(struct mib_mdomain)); 1263 sizeof(struct mib_mdomain));
1258 if (ret < 0) { 1264 if (ret < 0) {
1259 printk(KERN_ERR "%s: at76_get_mib (MDOMAIN) failed: %d\n", 1265 printk(KERN_ERR "%s: at76_get_mib (MDOMAIN) failed: %d\n",
1260 wiphy_name(priv->hw->wiphy), ret); 1266 priv->netdev->name, ret);
1261 goto exit; 1267 goto exit;
1262 } 1268 }
1263 1269
1264 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s", 1270 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s",
1265 wiphy_name(priv->hw->wiphy), 1271 priv->netdev->name,
1266 hex2str(m->channel_list, sizeof(m->channel_list))); 1272 hex2str(m->channel_list, sizeof(m->channel_list)));
1267 1273
1268 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s", 1274 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s",
1269 wiphy_name(priv->hw->wiphy), 1275 priv->netdev->name,
1270 hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel))); 1276 hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel)));
1271exit: 1277exit:
1272 kfree(m); 1278 kfree(m);
1273} 1279}
1274 1280
1281static int at76_get_current_bssid(struct at76_priv *priv)
1282{
1283 int ret = 0;
1284 struct mib_mac_mgmt *mac_mgmt =
1285 kmalloc(sizeof(struct mib_mac_mgmt), GFP_KERNEL);
1286
1287 if (!mac_mgmt) {
1288 ret = -ENOMEM;
1289 goto exit;
1290 }
1291
1292 ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, mac_mgmt,
1293 sizeof(struct mib_mac_mgmt));
1294 if (ret < 0) {
1295 printk(KERN_ERR "%s: at76_get_mib failed: %d\n",
1296 priv->netdev->name, ret);
1297 goto error;
1298 }
1299 memcpy(priv->bssid, mac_mgmt->current_bssid, ETH_ALEN);
1300 printk(KERN_INFO "%s: using BSSID %s\n", priv->netdev->name,
1301 mac2str(priv->bssid));
1302error:
1303 kfree(mac_mgmt);
1304exit:
1305 return ret;
1306}
1307
1308static int at76_get_current_channel(struct at76_priv *priv)
1309{
1310 int ret = 0;
1311 struct mib_phy *phy = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
1312
1313 if (!phy) {
1314 ret = -ENOMEM;
1315 goto exit;
1316 }
1317 ret = at76_get_mib(priv->udev, MIB_PHY, phy, sizeof(struct mib_phy));
1318 if (ret < 0) {
1319 printk(KERN_ERR "%s: at76_get_mib(MIB_PHY) failed: %d\n",
1320 priv->netdev->name, ret);
1321 goto error;
1322 }
1323 priv->channel = phy->channel_id;
1324error:
1325 kfree(phy);
1326exit:
1327 return ret;
1328}
1329
1330/**
1331 * at76_start_scan - start a scan
1332 *
1333 * @use_essid - use the configured ESSID in non passive mode
1334 */
1335static int at76_start_scan(struct at76_priv *priv, int use_essid)
1336{
1337 struct at76_req_scan scan;
1338
1339 memset(&scan, 0, sizeof(struct at76_req_scan));
1340 memset(scan.bssid, 0xff, ETH_ALEN);
1341
1342 if (use_essid) {
1343 memcpy(scan.essid, priv->essid, IW_ESSID_MAX_SIZE);
1344 scan.essid_size = priv->essid_size;
1345 } else
1346 scan.essid_size = 0;
1347
1348 /* jal: why should we start at a certain channel? we do scan the whole
1349 range allowed by reg domain. */
1350 scan.channel = priv->channel;
1351
1352 /* atmelwlandriver differs between scan type 0 and 1 (active/passive)
1353 For ad-hoc mode, it uses type 0 only. */
1354 scan.scan_type = priv->scan_mode;
1355
1356 /* INFO: For probe_delay, not multiplying by 1024 as this will be
1357 slightly less than min_channel_time
1358 (per spec: probe delay < min. channel time) */
1359 scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
1360 scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
1361 scan.probe_delay = cpu_to_le16(priv->scan_min_time * 1000);
1362 scan.international_scan = 0;
1363
1364 /* other values are set to 0 for type 0 */
1365
1366 at76_dbg(DBG_PROGRESS, "%s: start_scan (use_essid = %d, intl = %d, "
1367 "channel = %d, probe_delay = %d, scan_min_time = %d, "
1368 "scan_max_time = %d)",
1369 priv->netdev->name, use_essid,
1370 scan.international_scan, scan.channel,
1371 le16_to_cpu(scan.probe_delay),
1372 le16_to_cpu(scan.min_channel_time),
1373 le16_to_cpu(scan.max_channel_time));
1374
1375 return at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
1376}
1377
1275/* Enable monitor mode */ 1378/* Enable monitor mode */
1276static int at76_start_monitor(struct at76_priv *priv) 1379static int at76_start_monitor(struct at76_priv *priv)
1277{ 1380{
@@ -1292,6 +1395,86 @@ static int at76_start_monitor(struct at76_priv *priv)
1292 return ret; 1395 return ret;
1293} 1396}
1294 1397
1398static int at76_start_ibss(struct at76_priv *priv)
1399{
1400 struct at76_req_ibss bss;
1401 int ret;
1402
1403 WARN_ON(priv->mac_state != MAC_OWN_IBSS);
1404 if (priv->mac_state != MAC_OWN_IBSS)
1405 return -EBUSY;
1406
1407 memset(&bss, 0, sizeof(struct at76_req_ibss));
1408 memset(bss.bssid, 0xff, ETH_ALEN);
1409 memcpy(bss.essid, priv->essid, IW_ESSID_MAX_SIZE);
1410 bss.essid_size = priv->essid_size;
1411 bss.bss_type = ADHOC_MODE;
1412 bss.channel = priv->channel;
1413
1414 ret = at76_set_card_command(priv->udev, CMD_START_IBSS, &bss,
1415 sizeof(struct at76_req_ibss));
1416 if (ret < 0) {
1417 printk(KERN_ERR "%s: start_ibss failed: %d\n",
1418 priv->netdev->name, ret);
1419 return ret;
1420 }
1421
1422 ret = at76_wait_completion(priv, CMD_START_IBSS);
1423 if (ret != CMD_STATUS_COMPLETE) {
1424 printk(KERN_ERR "%s: start_ibss failed to complete, %d\n",
1425 priv->netdev->name, ret);
1426 return ret;
1427 }
1428
1429 ret = at76_get_current_bssid(priv);
1430 if (ret < 0)
1431 return ret;
1432
1433 ret = at76_get_current_channel(priv);
1434 if (ret < 0)
1435 return ret;
1436
1437 /* not sure what this is good for ??? */
1438 priv->mib_buf.type = MIB_MAC_MGMT;
1439 priv->mib_buf.size = 1;
1440 priv->mib_buf.index = offsetof(struct mib_mac_mgmt, ibss_change);
1441 priv->mib_buf.data.byte = 0;
1442
1443 ret = at76_set_mib(priv, &priv->mib_buf);
1444 if (ret < 0) {
1445 printk(KERN_ERR "%s: set_mib (ibss change ok) failed: %d\n",
1446 priv->netdev->name, ret);
1447 return ret;
1448 }
1449
1450 netif_carrier_on(priv->netdev);
1451 netif_start_queue(priv->netdev);
1452 return 0;
1453}
1454
1455/* Request card to join BSS in managed or ad-hoc mode */
1456static int at76_join_bss(struct at76_priv *priv, struct bss_info *ptr)
1457{
1458 struct at76_req_join join;
1459
1460 BUG_ON(!ptr);
1461
1462 memset(&join, 0, sizeof(struct at76_req_join));
1463 memcpy(join.bssid, ptr->bssid, ETH_ALEN);
1464 memcpy(join.essid, ptr->ssid, ptr->ssid_len);
1465 join.essid_size = ptr->ssid_len;
1466 join.bss_type = (priv->iw_mode == IW_MODE_ADHOC ? 1 : 2);
1467 join.channel = ptr->channel;
1468 join.timeout = cpu_to_le16(2000);
1469
1470 at76_dbg(DBG_PROGRESS,
1471 "%s join addr %s ssid %s type %d ch %d timeout %d",
1472 priv->netdev->name, mac2str(join.bssid), join.essid,
1473 join.bss_type, join.channel, le16_to_cpu(join.timeout));
1474 return at76_set_card_command(priv->udev, CMD_JOIN, &join,
1475 sizeof(struct at76_req_join));
1476}
1477
1295/* Calculate padding from txbuf->wlength (which excludes the USB TX header), 1478/* Calculate padding from txbuf->wlength (which excludes the USB TX header),
1296 likely to compensate a flaw in the AT76C503A USB part ... */ 1479 likely to compensate a flaw in the AT76C503A USB part ... */
1297static inline int at76_calc_padding(int wlen) 1480static inline int at76_calc_padding(int wlen)
@@ -1310,6 +1493,14 @@ static inline int at76_calc_padding(int wlen)
1310 return 0; 1493 return 0;
1311} 1494}
1312 1495
1496/* We are doing a lot of things here in an interrupt. Need
1497 a bh handler (Watching TV with a TV card is probably
1498 a good test: if you see flickers, we are doing too much.
1499 Currently I do see flickers... even with our tasklet :-( )
1500 Maybe because the bttv driver and usb-uhci use the same interrupt
1501*/
1502/* Or maybe because our BH handler is preempting bttv's BH handler.. BHs don't
1503 * solve everything.. (alex) */
1313static void at76_rx_callback(struct urb *urb) 1504static void at76_rx_callback(struct urb *urb)
1314{ 1505{
1315 struct at76_priv *priv = urb->context; 1506 struct at76_priv *priv = urb->context;
@@ -1319,6 +1510,1758 @@ static void at76_rx_callback(struct urb *urb)
1319 return; 1510 return;
1320} 1511}
1321 1512
1513static void at76_tx_callback(struct urb *urb)
1514{
1515 struct at76_priv *priv = urb->context;
1516 struct net_device_stats *stats = &priv->stats;
1517 unsigned long flags;
1518 struct at76_tx_buffer *mgmt_buf;
1519 int ret;
1520
1521 switch (urb->status) {
1522 case 0:
1523 stats->tx_packets++;
1524 break;
1525 case -ENOENT:
1526 case -ECONNRESET:
1527 /* urb has been unlinked */
1528 return;
1529 default:
1530 at76_dbg(DBG_URB, "%s - nonzero tx status received: %d",
1531 __func__, urb->status);
1532 stats->tx_errors++;
1533 break;
1534 }
1535
1536 spin_lock_irqsave(&priv->mgmt_spinlock, flags);
1537 mgmt_buf = priv->next_mgmt_bulk;
1538 priv->next_mgmt_bulk = NULL;
1539 spin_unlock_irqrestore(&priv->mgmt_spinlock, flags);
1540
1541 if (!mgmt_buf) {
1542 netif_wake_queue(priv->netdev);
1543 return;
1544 }
1545
1546 /* we don't copy the padding bytes, but add them
1547 to the length */
1548 memcpy(priv->bulk_out_buffer, mgmt_buf,
1549 le16_to_cpu(mgmt_buf->wlength) + AT76_TX_HDRLEN);
1550 usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe,
1551 priv->bulk_out_buffer,
1552 le16_to_cpu(mgmt_buf->wlength) + mgmt_buf->padding +
1553 AT76_TX_HDRLEN, at76_tx_callback, priv);
1554 ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
1555 if (ret)
1556 printk(KERN_ERR "%s: error in tx submit urb: %d\n",
1557 priv->netdev->name, ret);
1558
1559 kfree(mgmt_buf);
1560}
1561
1562/* Send a management frame on bulk-out. txbuf->wlength must be set */
1563static int at76_tx_mgmt(struct at76_priv *priv, struct at76_tx_buffer *txbuf)
1564{
1565 unsigned long flags;
1566 int ret;
1567 int urb_status;
1568 void *oldbuf = NULL;
1569
1570 netif_carrier_off(priv->netdev); /* stop netdev watchdog */
1571 netif_stop_queue(priv->netdev); /* stop tx data packets */
1572
1573 spin_lock_irqsave(&priv->mgmt_spinlock, flags);
1574
1575 urb_status = priv->tx_urb->status;
1576 if (urb_status == -EINPROGRESS) {
1577 /* cannot transmit now, put in the queue */
1578 oldbuf = priv->next_mgmt_bulk;
1579 priv->next_mgmt_bulk = txbuf;
1580 }
1581 spin_unlock_irqrestore(&priv->mgmt_spinlock, flags);
1582
1583 if (oldbuf) {
1584 /* a data/mgmt tx is already pending in the URB -
1585 if this is no error in some situations we must
1586 implement a queue or silently modify the old msg */
1587 printk(KERN_ERR "%s: removed pending mgmt buffer %s\n",
1588 priv->netdev->name, hex2str(oldbuf, 64));
1589 kfree(oldbuf);
1590 return 0;
1591 }
1592
1593 txbuf->tx_rate = TX_RATE_1MBIT;
1594 txbuf->padding = at76_calc_padding(le16_to_cpu(txbuf->wlength));
1595 memset(txbuf->reserved, 0, sizeof(txbuf->reserved));
1596
1597 if (priv->next_mgmt_bulk)
1598 printk(KERN_ERR "%s: URB status %d, but mgmt is pending\n",
1599 priv->netdev->name, urb_status);
1600
1601 at76_dbg(DBG_TX_MGMT,
1602 "%s: tx mgmt: wlen %d tx_rate %d pad %d %s",
1603 priv->netdev->name, le16_to_cpu(txbuf->wlength),
1604 txbuf->tx_rate, txbuf->padding,
1605 hex2str(txbuf->packet, le16_to_cpu(txbuf->wlength)));
1606
1607 /* txbuf was not consumed above -> send mgmt msg immediately */
1608 memcpy(priv->bulk_out_buffer, txbuf,
1609 le16_to_cpu(txbuf->wlength) + AT76_TX_HDRLEN);
1610 usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe,
1611 priv->bulk_out_buffer,
1612 le16_to_cpu(txbuf->wlength) + txbuf->padding +
1613 AT76_TX_HDRLEN, at76_tx_callback, priv);
1614 ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
1615 if (ret)
1616 printk(KERN_ERR "%s: error in tx submit urb: %d\n",
1617 priv->netdev->name, ret);
1618
1619 kfree(txbuf);
1620
1621 return ret;
1622}
1623
1624/* Go to the next information element */
1625static inline void next_ie(struct ieee80211_info_element **ie)
1626{
1627 *ie = (struct ieee80211_info_element *)(&(*ie)->data[(*ie)->len]);
1628}
1629
1630/* Challenge is the challenge string (in TLV format)
1631 we got with seq_nr 2 for shared secret authentication only and
1632 send in seq_nr 3 WEP encrypted to prove we have the correct WEP key;
1633 otherwise it is NULL */
1634static int at76_auth_req(struct at76_priv *priv, struct bss_info *bss,
1635 int seq_nr, struct ieee80211_info_element *challenge)
1636{
1637 struct at76_tx_buffer *tx_buffer;
1638 struct ieee80211_hdr_3addr *mgmt;
1639 struct ieee80211_auth *req;
1640 int buf_len = (seq_nr != 3 ? AUTH_FRAME_SIZE :
1641 AUTH_FRAME_SIZE + 1 + 1 + challenge->len);
1642
1643 BUG_ON(!bss);
1644 BUG_ON(seq_nr == 3 && !challenge);
1645 tx_buffer = kmalloc(buf_len + MAX_PADDING_SIZE, GFP_ATOMIC);
1646 if (!tx_buffer)
1647 return -ENOMEM;
1648
1649 req = (struct ieee80211_auth *)tx_buffer->packet;
1650 mgmt = &req->header;
1651
1652 /* make wireless header */
1653 /* first auth msg is not encrypted, only the second (seq_nr == 3) */
1654 mgmt->frame_ctl =
1655 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH |
1656 (seq_nr == 3 ? IEEE80211_FCTL_PROTECTED : 0));
1657
1658 mgmt->duration_id = cpu_to_le16(0x8000);
1659 memcpy(mgmt->addr1, bss->bssid, ETH_ALEN);
1660 memcpy(mgmt->addr2, priv->netdev->dev_addr, ETH_ALEN);
1661 memcpy(mgmt->addr3, bss->bssid, ETH_ALEN);
1662 mgmt->seq_ctl = cpu_to_le16(0);
1663
1664 req->algorithm = cpu_to_le16(priv->auth_mode);
1665 req->transaction = cpu_to_le16(seq_nr);
1666 req->status = cpu_to_le16(0);
1667
1668 if (seq_nr == 3)
1669 memcpy(req->info_element, challenge, 1 + 1 + challenge->len);
1670
1671 /* init. at76_priv tx header */
1672 tx_buffer->wlength = cpu_to_le16(buf_len - AT76_TX_HDRLEN);
1673 at76_dbg(DBG_TX_MGMT, "%s: AuthReq bssid %s alg %d seq_nr %d",
1674 priv->netdev->name, mac2str(mgmt->addr3),
1675 le16_to_cpu(req->algorithm), le16_to_cpu(req->transaction));
1676 if (seq_nr == 3)
1677 at76_dbg(DBG_TX_MGMT, "%s: AuthReq challenge: %s ...",
1678 priv->netdev->name, hex2str(req->info_element, 18));
1679
1680 /* either send immediately (if no data tx is pending
1681 or put it in pending list */
1682 return at76_tx_mgmt(priv, tx_buffer);
1683}
1684
1685static int at76_assoc_req(struct at76_priv *priv, struct bss_info *bss)
1686{
1687 struct at76_tx_buffer *tx_buffer;
1688 struct ieee80211_hdr_3addr *mgmt;
1689 struct ieee80211_assoc_request *req;
1690 struct ieee80211_info_element *ie;
1691 char *essid;
1692 int essid_len;
1693 u16 capa;
1694
1695 BUG_ON(!bss);
1696
1697 tx_buffer = kmalloc(ASSOCREQ_MAX_SIZE + MAX_PADDING_SIZE, GFP_ATOMIC);
1698 if (!tx_buffer)
1699 return -ENOMEM;
1700
1701 req = (struct ieee80211_assoc_request *)tx_buffer->packet;
1702 mgmt = &req->header;
1703 ie = req->info_element;
1704
1705 /* make wireless header */
1706 mgmt->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1707 IEEE80211_STYPE_ASSOC_REQ);
1708
1709 mgmt->duration_id = cpu_to_le16(0x8000);
1710 memcpy(mgmt->addr1, bss->bssid, ETH_ALEN);
1711 memcpy(mgmt->addr2, priv->netdev->dev_addr, ETH_ALEN);
1712 memcpy(mgmt->addr3, bss->bssid, ETH_ALEN);
1713 mgmt->seq_ctl = cpu_to_le16(0);
1714
1715 /* we must set the Privacy bit in the capabilities to assure an
1716 Agere-based AP with optional WEP transmits encrypted frames
1717 to us. AP only set the Privacy bit in their capabilities
1718 if WEP is mandatory in the BSS! */
1719 capa = bss->capa;
1720 if (priv->wep_enabled)
1721 capa |= WLAN_CAPABILITY_PRIVACY;
1722 if (priv->preamble_type != PREAMBLE_TYPE_LONG)
1723 capa |= WLAN_CAPABILITY_SHORT_PREAMBLE;
1724 req->capability = cpu_to_le16(capa);
1725
1726 req->listen_interval = cpu_to_le16(2 * bss->beacon_interval);
1727
1728 /* write TLV data elements */
1729
1730 ie->id = MFIE_TYPE_SSID;
1731 ie->len = bss->ssid_len;
1732 memcpy(ie->data, bss->ssid, bss->ssid_len);
1733 next_ie(&ie);
1734
1735 ie->id = MFIE_TYPE_RATES;
1736 ie->len = sizeof(hw_rates);
1737 memcpy(ie->data, hw_rates, sizeof(hw_rates));
1738 next_ie(&ie); /* ie points behind the supp_rates field */
1739
1740 /* init. at76_priv tx header */
1741 tx_buffer->wlength = cpu_to_le16((u8 *)ie - (u8 *)mgmt);
1742
1743 ie = req->info_element;
1744 essid = ie->data;
1745 essid_len = min_t(int, IW_ESSID_MAX_SIZE, ie->len);
1746
1747 next_ie(&ie); /* points to IE of rates now */
1748 at76_dbg(DBG_TX_MGMT,
1749 "%s: AssocReq bssid %s capa 0x%04x ssid %.*s rates %s",
1750 priv->netdev->name, mac2str(mgmt->addr3),
1751 le16_to_cpu(req->capability), essid_len, essid,
1752 hex2str(ie->data, ie->len));
1753
1754 /* either send immediately (if no data tx is pending
1755 or put it in pending list */
1756 return at76_tx_mgmt(priv, tx_buffer);
1757}
1758
1759/* We got to check the bss_list for old entries */
1760static void at76_bss_list_timeout(unsigned long par)
1761{
1762 struct at76_priv *priv = (struct at76_priv *)par;
1763 unsigned long flags;
1764 struct list_head *lptr, *nptr;
1765 struct bss_info *ptr;
1766
1767 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
1768
1769 list_for_each_safe(lptr, nptr, &priv->bss_list) {
1770
1771 ptr = list_entry(lptr, struct bss_info, list);
1772
1773 if (ptr != priv->curr_bss
1774 && time_after(jiffies, ptr->last_rx + BSS_LIST_TIMEOUT)) {
1775 at76_dbg(DBG_BSS_TABLE_RM,
1776 "%s: bss_list: removing old BSS %s ch %d",
1777 priv->netdev->name, mac2str(ptr->bssid),
1778 ptr->channel);
1779 list_del(&ptr->list);
1780 kfree(ptr);
1781 }
1782 }
1783 spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
1784 /* restart the timer */
1785 mod_timer(&priv->bss_list_timer, jiffies + BSS_LIST_TIMEOUT);
1786}
1787
1788static inline void at76_set_mac_state(struct at76_priv *priv,
1789 enum mac_state mac_state)
1790{
1791 at76_dbg(DBG_MAC_STATE, "%s state: %s", priv->netdev->name,
1792 mac_states[mac_state]);
1793 priv->mac_state = mac_state;
1794}
1795
1796static void at76_dump_bss_table(struct at76_priv *priv)
1797{
1798 struct bss_info *ptr;
1799 unsigned long flags;
1800 struct list_head *lptr;
1801
1802 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
1803
1804 at76_dbg(DBG_BSS_TABLE, "%s BSS table (curr=%p):", priv->netdev->name,
1805 priv->curr_bss);
1806
1807 list_for_each(lptr, &priv->bss_list) {
1808 ptr = list_entry(lptr, struct bss_info, list);
1809 at76_dbg(DBG_BSS_TABLE, "0x%p: bssid %s channel %d ssid %.*s "
1810 "(%s) capa 0x%04x rates %s rssi %d link %d noise %d",
1811 ptr, mac2str(ptr->bssid), ptr->channel, ptr->ssid_len,
1812 ptr->ssid, hex2str(ptr->ssid, ptr->ssid_len),
1813 ptr->capa, hex2str(ptr->rates, ptr->rates_len),
1814 ptr->rssi, ptr->link_qual, ptr->noise_level);
1815 }
1816 spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
1817}
1818
1819/* Called upon successful association to mark interface as connected */
1820static void at76_work_assoc_done(struct work_struct *work)
1821{
1822 struct at76_priv *priv = container_of(work, struct at76_priv,
1823 work_assoc_done);
1824
1825 mutex_lock(&priv->mtx);
1826
1827 WARN_ON(priv->mac_state != MAC_ASSOC);
1828 WARN_ON(!priv->curr_bss);
1829 if (priv->mac_state != MAC_ASSOC || !priv->curr_bss)
1830 goto exit;
1831
1832 if (priv->iw_mode == IW_MODE_INFRA) {
1833 if (priv->pm_mode != AT76_PM_OFF) {
1834 /* calculate the listen interval in units of
1835 beacon intervals of the curr_bss */
1836 u32 pm_period_beacon = (priv->pm_period >> 10) /
1837 priv->curr_bss->beacon_interval;
1838
1839 pm_period_beacon = max(pm_period_beacon, 2u);
1840 pm_period_beacon = min(pm_period_beacon, 0xffffu);
1841
1842 at76_dbg(DBG_PM,
1843 "%s: pm_mode %d assoc id 0x%x listen int %d",
1844 priv->netdev->name, priv->pm_mode,
1845 priv->assoc_id, pm_period_beacon);
1846
1847 at76_set_associd(priv, priv->assoc_id);
1848 at76_set_listen_interval(priv, (u16)pm_period_beacon);
1849 }
1850 schedule_delayed_work(&priv->dwork_beacon, BEACON_TIMEOUT);
1851 }
1852 at76_set_pm_mode(priv);
1853
1854 netif_carrier_on(priv->netdev);
1855 netif_wake_queue(priv->netdev);
1856 at76_set_mac_state(priv, MAC_CONNECTED);
1857 at76_iwevent_bss_connect(priv->netdev, priv->curr_bss->bssid);
1858 at76_dbg(DBG_PROGRESS, "%s: connected to BSSID %s",
1859 priv->netdev->name, mac2str(priv->curr_bss->bssid));
1860
1861exit:
1862 mutex_unlock(&priv->mtx);
1863}
1864
1865/* We only store the new mac address in netdev struct,
1866 it gets set when the netdev is opened. */
1867static int at76_set_mac_address(struct net_device *netdev, void *addr)
1868{
1869 struct sockaddr *mac = addr;
1870 memcpy(netdev->dev_addr, mac->sa_data, ETH_ALEN);
1871 return 1;
1872}
1873
1874static struct net_device_stats *at76_get_stats(struct net_device *netdev)
1875{
1876 struct at76_priv *priv = netdev_priv(netdev);
1877 return &priv->stats;
1878}
1879
1880static struct iw_statistics *at76_get_wireless_stats(struct net_device *netdev)
1881{
1882 struct at76_priv *priv = netdev_priv(netdev);
1883
1884 at76_dbg(DBG_IOCTL, "RETURN qual %d level %d noise %d updated %d",
1885 priv->wstats.qual.qual, priv->wstats.qual.level,
1886 priv->wstats.qual.noise, priv->wstats.qual.updated);
1887
1888 return &priv->wstats;
1889}
1890
1891static void at76_set_multicast(struct net_device *netdev)
1892{
1893 struct at76_priv *priv = netdev_priv(netdev);
1894 int promisc;
1895
1896 promisc = ((netdev->flags & IFF_PROMISC) != 0);
1897 if (promisc != priv->promisc) {
1898 /* This gets called in interrupt, must reschedule */
1899 priv->promisc = promisc;
1900 schedule_work(&priv->work_set_promisc);
1901 }
1902}
1903
1904/* Stop all network activity, flush all pending tasks */
1905static void at76_quiesce(struct at76_priv *priv)
1906{
1907 unsigned long flags;
1908
1909 netif_stop_queue(priv->netdev);
1910 netif_carrier_off(priv->netdev);
1911
1912 at76_set_mac_state(priv, MAC_INIT);
1913
1914 cancel_delayed_work(&priv->dwork_get_scan);
1915 cancel_delayed_work(&priv->dwork_beacon);
1916 cancel_delayed_work(&priv->dwork_auth);
1917 cancel_delayed_work(&priv->dwork_assoc);
1918 cancel_delayed_work(&priv->dwork_restart);
1919
1920 spin_lock_irqsave(&priv->mgmt_spinlock, flags);
1921 kfree(priv->next_mgmt_bulk);
1922 priv->next_mgmt_bulk = NULL;
1923 spin_unlock_irqrestore(&priv->mgmt_spinlock, flags);
1924}
1925
1926/*******************************************************************************
1927 * at76_priv implementations of iw_handler functions:
1928 */
1929static int at76_iw_handler_commit(struct net_device *netdev,
1930 struct iw_request_info *info,
1931 void *null, char *extra)
1932{
1933 struct at76_priv *priv = netdev_priv(netdev);
1934
1935 at76_dbg(DBG_IOCTL, "%s %s: restarting the device", netdev->name,
1936 __func__);
1937
1938 if (priv->mac_state != MAC_INIT)
1939 at76_quiesce(priv);
1940
1941 /* Wait half second before the restart to process subsequent
1942 * requests from the same iwconfig in a single restart */
1943 schedule_delayed_work(&priv->dwork_restart, HZ / 2);
1944
1945 return 0;
1946}
1947
1948static int at76_iw_handler_get_name(struct net_device *netdev,
1949 struct iw_request_info *info,
1950 char *name, char *extra)
1951{
1952 strcpy(name, "IEEE 802.11b");
1953 at76_dbg(DBG_IOCTL, "%s: SIOCGIWNAME - name %s", netdev->name, name);
1954 return 0;
1955}
1956
1957static int at76_iw_handler_set_freq(struct net_device *netdev,
1958 struct iw_request_info *info,
1959 struct iw_freq *freq, char *extra)
1960{
1961 struct at76_priv *priv = netdev_priv(netdev);
1962 int chan = -1;
1963 int ret = -EIWCOMMIT;
1964 at76_dbg(DBG_IOCTL, "%s: SIOCSIWFREQ - freq.m %d freq.e %d",
1965 netdev->name, freq->m, freq->e);
1966
1967 if ((freq->e == 0) && (freq->m <= 1000))
1968 /* Setting by channel number */
1969 chan = freq->m;
1970 else {
1971 /* Setting by frequency - search the table */
1972 int mult = 1;
1973 int i;
1974
1975 for (i = 0; i < (6 - freq->e); i++)
1976 mult *= 10;
1977
1978 for (i = 0; i < NUM_CHANNELS; i++) {
1979 if (freq->m == (channel_frequency[i] * mult))
1980 chan = i + 1;
1981 }
1982 }
1983
1984 if (chan < 1 || !priv->domain)
1985 /* non-positive channels are invalid
1986 * we need a domain info to set the channel
1987 * either that or an invalid frequency was
1988 * provided by the user */
1989 ret = -EINVAL;
1990 else if (!(priv->domain->channel_map & (1 << (chan - 1)))) {
1991 printk(KERN_INFO "%s: channel %d not allowed for domain %s\n",
1992 priv->netdev->name, chan, priv->domain->name);
1993 ret = -EINVAL;
1994 }
1995
1996 if (ret == -EIWCOMMIT) {
1997 priv->channel = chan;
1998 at76_dbg(DBG_IOCTL, "%s: SIOCSIWFREQ - ch %d", netdev->name,
1999 chan);
2000 }
2001
2002 return ret;
2003}
2004
2005static int at76_iw_handler_get_freq(struct net_device *netdev,
2006 struct iw_request_info *info,
2007 struct iw_freq *freq, char *extra)
2008{
2009 struct at76_priv *priv = netdev_priv(netdev);
2010
2011 freq->m = priv->channel;
2012 freq->e = 0;
2013
2014 if (priv->channel)
2015 at76_dbg(DBG_IOCTL, "%s: SIOCGIWFREQ - freq %ld x 10e%d",
2016 netdev->name, channel_frequency[priv->channel - 1], 6);
2017
2018 at76_dbg(DBG_IOCTL, "%s: SIOCGIWFREQ - ch %d", netdev->name,
2019 priv->channel);
2020
2021 return 0;
2022}
2023
2024static int at76_iw_handler_set_mode(struct net_device *netdev,
2025 struct iw_request_info *info,
2026 __u32 *mode, char *extra)
2027{
2028 struct at76_priv *priv = netdev_priv(netdev);
2029
2030 at76_dbg(DBG_IOCTL, "%s: SIOCSIWMODE - %d", netdev->name, *mode);
2031
2032 if ((*mode != IW_MODE_ADHOC) && (*mode != IW_MODE_INFRA) &&
2033 (*mode != IW_MODE_MONITOR))
2034 return -EINVAL;
2035
2036 priv->iw_mode = *mode;
2037 if (priv->iw_mode != IW_MODE_INFRA)
2038 priv->pm_mode = AT76_PM_OFF;
2039
2040 return -EIWCOMMIT;
2041}
2042
2043static int at76_iw_handler_get_mode(struct net_device *netdev,
2044 struct iw_request_info *info,
2045 __u32 *mode, char *extra)
2046{
2047 struct at76_priv *priv = netdev_priv(netdev);
2048
2049 *mode = priv->iw_mode;
2050
2051 at76_dbg(DBG_IOCTL, "%s: SIOCGIWMODE - %d", netdev->name, *mode);
2052
2053 return 0;
2054}
2055
2056static int at76_iw_handler_get_range(struct net_device *netdev,
2057 struct iw_request_info *info,
2058 struct iw_point *data, char *extra)
2059{
2060 /* inspired by atmel.c */
2061 struct at76_priv *priv = netdev_priv(netdev);
2062 struct iw_range *range = (struct iw_range *)extra;
2063 int i;
2064
2065 data->length = sizeof(struct iw_range);
2066 memset(range, 0, sizeof(struct iw_range));
2067
2068 /* TODO: range->throughput = xxxxxx; */
2069
2070 range->min_nwid = 0x0000;
2071 range->max_nwid = 0x0000;
2072
2073 /* this driver doesn't maintain sensitivity information */
2074 range->sensitivity = 0;
2075
2076 range->max_qual.qual = 100;
2077 range->max_qual.level = 100;
2078 range->max_qual.noise = 0;
2079 range->max_qual.updated = IW_QUAL_NOISE_INVALID;
2080
2081 range->avg_qual.qual = 50;
2082 range->avg_qual.level = 50;
2083 range->avg_qual.noise = 0;
2084 range->avg_qual.updated = IW_QUAL_NOISE_INVALID;
2085
2086 range->bitrate[0] = 1000000;
2087 range->bitrate[1] = 2000000;
2088 range->bitrate[2] = 5500000;
2089 range->bitrate[3] = 11000000;
2090 range->num_bitrates = 4;
2091
2092 range->min_rts = 0;
2093 range->max_rts = MAX_RTS_THRESHOLD;
2094
2095 range->min_frag = MIN_FRAG_THRESHOLD;
2096 range->max_frag = MAX_FRAG_THRESHOLD;
2097
2098 range->pmp_flags = IW_POWER_PERIOD;
2099 range->pmt_flags = IW_POWER_ON;
2100 range->pm_capa = IW_POWER_PERIOD | IW_POWER_ALL_R;
2101
2102 range->encoding_size[0] = WEP_SMALL_KEY_LEN;
2103 range->encoding_size[1] = WEP_LARGE_KEY_LEN;
2104 range->num_encoding_sizes = 2;
2105 range->max_encoding_tokens = WEP_KEYS;
2106
2107 /* both WL-240U and Linksys WUSB11 v2.6 specify 15 dBm as output power
2108 - take this for all (ignore antenna gains) */
2109 range->txpower[0] = 15;
2110 range->num_txpower = 1;
2111 range->txpower_capa = IW_TXPOW_DBM;
2112
2113 range->we_version_source = WIRELESS_EXT;
2114 range->we_version_compiled = WIRELESS_EXT;
2115
2116 /* same as the values used in atmel.c */
2117 range->retry_capa = IW_RETRY_LIMIT;
2118 range->retry_flags = IW_RETRY_LIMIT;
2119 range->r_time_flags = 0;
2120 range->min_retry = 1;
2121 range->max_retry = 255;
2122
2123 range->num_channels = NUM_CHANNELS;
2124 range->num_frequency = 0;
2125
2126 for (i = 0; i < NUM_CHANNELS; i++) {
2127 /* test if channel map bit is raised */
2128 if (priv->domain->channel_map & (0x1 << i)) {
2129 range->num_frequency += 1;
2130
2131 range->freq[i].i = i + 1;
2132 range->freq[i].m = channel_frequency[i] * 100000;
2133 range->freq[i].e = 1; /* freq * 10^1 */
2134 }
2135 }
2136
2137 at76_dbg(DBG_IOCTL, "%s: SIOCGIWRANGE", netdev->name);
2138
2139 return 0;
2140}
2141
2142static int at76_iw_handler_set_spy(struct net_device *netdev,
2143 struct iw_request_info *info,
2144 struct iw_point *data, char *extra)
2145{
2146 struct at76_priv *priv = netdev_priv(netdev);
2147 int ret = 0;
2148
2149 at76_dbg(DBG_IOCTL, "%s: SIOCSIWSPY - number of addresses %d",
2150 netdev->name, data->length);
2151
2152 spin_lock_bh(&priv->spy_spinlock);
2153 ret = iw_handler_set_spy(priv->netdev, info, (union iwreq_data *)data,
2154 extra);
2155 spin_unlock_bh(&priv->spy_spinlock);
2156
2157 return ret;
2158}
2159
2160static int at76_iw_handler_get_spy(struct net_device *netdev,
2161 struct iw_request_info *info,
2162 struct iw_point *data, char *extra)
2163{
2164
2165 struct at76_priv *priv = netdev_priv(netdev);
2166 int ret = 0;
2167
2168 spin_lock_bh(&priv->spy_spinlock);
2169 ret = iw_handler_get_spy(priv->netdev, info,
2170 (union iwreq_data *)data, extra);
2171 spin_unlock_bh(&priv->spy_spinlock);
2172
2173 at76_dbg(DBG_IOCTL, "%s: SIOCGIWSPY - number of addresses %d",
2174 netdev->name, data->length);
2175
2176 return ret;
2177}
2178
2179static int at76_iw_handler_set_thrspy(struct net_device *netdev,
2180 struct iw_request_info *info,
2181 struct iw_point *data, char *extra)
2182{
2183 struct at76_priv *priv = netdev_priv(netdev);
2184 int ret;
2185
2186 at76_dbg(DBG_IOCTL, "%s: SIOCSIWTHRSPY - number of addresses %d)",
2187 netdev->name, data->length);
2188
2189 spin_lock_bh(&priv->spy_spinlock);
2190 ret = iw_handler_set_thrspy(netdev, info, (union iwreq_data *)data,
2191 extra);
2192 spin_unlock_bh(&priv->spy_spinlock);
2193
2194 return ret;
2195}
2196
2197static int at76_iw_handler_get_thrspy(struct net_device *netdev,
2198 struct iw_request_info *info,
2199 struct iw_point *data, char *extra)
2200{
2201 struct at76_priv *priv = netdev_priv(netdev);
2202 int ret;
2203
2204 spin_lock_bh(&priv->spy_spinlock);
2205 ret = iw_handler_get_thrspy(netdev, info, (union iwreq_data *)data,
2206 extra);
2207 spin_unlock_bh(&priv->spy_spinlock);
2208
2209 at76_dbg(DBG_IOCTL, "%s: SIOCGIWTHRSPY - number of addresses %d)",
2210 netdev->name, data->length);
2211
2212 return ret;
2213}
2214
2215static int at76_iw_handler_set_wap(struct net_device *netdev,
2216 struct iw_request_info *info,
2217 struct sockaddr *ap_addr, char *extra)
2218{
2219 struct at76_priv *priv = netdev_priv(netdev);
2220
2221 at76_dbg(DBG_IOCTL, "%s: SIOCSIWAP - wap/bssid %s", netdev->name,
2222 mac2str(ap_addr->sa_data));
2223
2224 /* if the incoming address == ff:ff:ff:ff:ff:ff, the user has
2225 chosen any or auto AP preference */
2226 if (is_broadcast_ether_addr(ap_addr->sa_data)
2227 || is_zero_ether_addr(ap_addr->sa_data))
2228 priv->wanted_bssid_valid = 0;
2229 else {
2230 /* user wants to set a preferred AP address */
2231 priv->wanted_bssid_valid = 1;
2232 memcpy(priv->wanted_bssid, ap_addr->sa_data, ETH_ALEN);
2233 }
2234
2235 return -EIWCOMMIT;
2236}
2237
2238static int at76_iw_handler_get_wap(struct net_device *netdev,
2239 struct iw_request_info *info,
2240 struct sockaddr *ap_addr, char *extra)
2241{
2242 struct at76_priv *priv = netdev_priv(netdev);
2243
2244 ap_addr->sa_family = ARPHRD_ETHER;
2245 memcpy(ap_addr->sa_data, priv->bssid, ETH_ALEN);
2246
2247 at76_dbg(DBG_IOCTL, "%s: SIOCGIWAP - wap/bssid %s", netdev->name,
2248 mac2str(ap_addr->sa_data));
2249
2250 return 0;
2251}
2252
2253static int at76_iw_handler_set_scan(struct net_device *netdev,
2254 struct iw_request_info *info,
2255 union iwreq_data *wrqu, char *extra)
2256{
2257 struct at76_priv *priv = netdev_priv(netdev);
2258 int ret = 0;
2259
2260 at76_dbg(DBG_IOCTL, "%s: SIOCSIWSCAN", netdev->name);
2261
2262 if (mutex_lock_interruptible(&priv->mtx))
2263 return -EINTR;
2264
2265 if (!netif_running(netdev)) {
2266 ret = -ENETDOWN;
2267 goto exit;
2268 }
2269
2270 /* jal: we don't allow "iwlist ethX scan" while we are
2271 in monitor mode */
2272 if (priv->iw_mode == IW_MODE_MONITOR) {
2273 ret = -EBUSY;
2274 goto exit;
2275 }
2276
2277 /* Discard old scan results */
2278 if ((jiffies - priv->last_scan) > (20 * HZ))
2279 priv->scan_state = SCAN_IDLE;
2280 priv->last_scan = jiffies;
2281
2282 /* Initiate a scan command */
2283 if (priv->scan_state == SCAN_IN_PROGRESS) {
2284 ret = -EBUSY;
2285 goto exit;
2286 }
2287
2288 priv->scan_state = SCAN_IN_PROGRESS;
2289
2290 at76_quiesce(priv);
2291
2292 /* Try to do passive or active scan if WE asks as. */
2293 if (wrqu->data.length
2294 && wrqu->data.length == sizeof(struct iw_scan_req)) {
2295 struct iw_scan_req *req = (struct iw_scan_req *)extra;
2296
2297 if (req->scan_type == IW_SCAN_TYPE_PASSIVE)
2298 priv->scan_mode = SCAN_TYPE_PASSIVE;
2299 else if (req->scan_type == IW_SCAN_TYPE_ACTIVE)
2300 priv->scan_mode = SCAN_TYPE_ACTIVE;
2301
2302 /* Sanity check values? */
2303 if (req->min_channel_time > 0)
2304 priv->scan_min_time = req->min_channel_time;
2305
2306 if (req->max_channel_time > 0)
2307 priv->scan_max_time = req->max_channel_time;
2308 }
2309
2310 /* change to scanning state */
2311 at76_set_mac_state(priv, MAC_SCANNING);
2312 schedule_work(&priv->work_start_scan);
2313
2314exit:
2315 mutex_unlock(&priv->mtx);
2316 return ret;
2317}
2318
2319static int at76_iw_handler_get_scan(struct net_device *netdev,
2320 struct iw_request_info *info,
2321 struct iw_point *data, char *extra)
2322{
2323 struct at76_priv *priv = netdev_priv(netdev);
2324 unsigned long flags;
2325 struct list_head *lptr, *nptr;
2326 struct bss_info *curr_bss;
2327 struct iw_event *iwe = kmalloc(sizeof(struct iw_event), GFP_KERNEL);
2328 char *curr_val, *curr_pos = extra;
2329 int i;
2330
2331 at76_dbg(DBG_IOCTL, "%s: SIOCGIWSCAN", netdev->name);
2332
2333 if (!iwe)
2334 return -ENOMEM;
2335
2336 if (priv->scan_state != SCAN_COMPLETED) {
2337 /* scan not yet finished */
2338 kfree(iwe);
2339 return -EAGAIN;
2340 }
2341
2342 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
2343
2344 list_for_each_safe(lptr, nptr, &priv->bss_list) {
2345 curr_bss = list_entry(lptr, struct bss_info, list);
2346
2347 iwe->cmd = SIOCGIWAP;
2348 iwe->u.ap_addr.sa_family = ARPHRD_ETHER;
2349 memcpy(iwe->u.ap_addr.sa_data, curr_bss->bssid, 6);
2350 curr_pos = iwe_stream_add_event(info, curr_pos,
2351 extra + IW_SCAN_MAX_DATA, iwe,
2352 IW_EV_ADDR_LEN);
2353
2354 iwe->u.data.length = curr_bss->ssid_len;
2355 iwe->cmd = SIOCGIWESSID;
2356 iwe->u.data.flags = 1;
2357
2358 curr_pos = iwe_stream_add_point(info, curr_pos,
2359 extra + IW_SCAN_MAX_DATA, iwe,
2360 curr_bss->ssid);
2361
2362 iwe->cmd = SIOCGIWMODE;
2363 iwe->u.mode = (curr_bss->capa & WLAN_CAPABILITY_IBSS) ?
2364 IW_MODE_ADHOC :
2365 (curr_bss->capa & WLAN_CAPABILITY_ESS) ?
2366 IW_MODE_MASTER : IW_MODE_AUTO;
2367 /* IW_MODE_AUTO = 0 which I thought is
2368 * the most logical value to return in this case */
2369 curr_pos = iwe_stream_add_event(info, curr_pos,
2370 extra + IW_SCAN_MAX_DATA, iwe,
2371 IW_EV_UINT_LEN);
2372
2373 iwe->cmd = SIOCGIWFREQ;
2374 iwe->u.freq.m = curr_bss->channel;
2375 iwe->u.freq.e = 0;
2376 curr_pos = iwe_stream_add_event(info, curr_pos,
2377 extra + IW_SCAN_MAX_DATA, iwe,
2378 IW_EV_FREQ_LEN);
2379
2380 iwe->cmd = SIOCGIWENCODE;
2381 if (curr_bss->capa & WLAN_CAPABILITY_PRIVACY)
2382 iwe->u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
2383 else
2384 iwe->u.data.flags = IW_ENCODE_DISABLED;
2385
2386 iwe->u.data.length = 0;
2387 curr_pos = iwe_stream_add_point(info, curr_pos,
2388 extra + IW_SCAN_MAX_DATA, iwe,
2389 NULL);
2390
2391 /* Add quality statistics */
2392 iwe->cmd = IWEVQUAL;
2393 iwe->u.qual.noise = 0;
2394 iwe->u.qual.updated =
2395 IW_QUAL_NOISE_INVALID | IW_QUAL_LEVEL_UPDATED;
2396 iwe->u.qual.level = (curr_bss->rssi * 100 / 42);
2397 if (iwe->u.qual.level > 100)
2398 iwe->u.qual.level = 100;
2399 if (at76_is_intersil(priv->board_type))
2400 iwe->u.qual.qual = curr_bss->link_qual;
2401 else {
2402 iwe->u.qual.qual = 0;
2403 iwe->u.qual.updated |= IW_QUAL_QUAL_INVALID;
2404 }
2405 /* Add new value to event */
2406 curr_pos = iwe_stream_add_event(info, curr_pos,
2407 extra + IW_SCAN_MAX_DATA, iwe,
2408 IW_EV_QUAL_LEN);
2409
2410 /* Rate: stuffing multiple values in a single event requires
2411 * a bit more of magic - Jean II */
2412 curr_val = curr_pos + IW_EV_LCP_LEN;
2413
2414 iwe->cmd = SIOCGIWRATE;
2415 /* Those two flags are ignored... */
2416 iwe->u.bitrate.fixed = 0;
2417 iwe->u.bitrate.disabled = 0;
2418 /* Max 8 values */
2419 for (i = 0; i < curr_bss->rates_len; i++) {
2420 /* Bit rate given in 500 kb/s units (+ 0x80) */
2421 iwe->u.bitrate.value =
2422 ((curr_bss->rates[i] & 0x7f) * 500000);
2423 /* Add new value to event */
2424 curr_val = iwe_stream_add_value(info, curr_pos,
2425 curr_val,
2426 extra +
2427 IW_SCAN_MAX_DATA, iwe,
2428 IW_EV_PARAM_LEN);
2429 }
2430
2431 /* Check if we added any event */
2432 if ((curr_val - curr_pos) > IW_EV_LCP_LEN)
2433 curr_pos = curr_val;
2434
2435 /* more information may be sent back using IWECUSTOM */
2436
2437 }
2438
2439 spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
2440
2441 data->length = (curr_pos - extra);
2442 data->flags = 0;
2443
2444 kfree(iwe);
2445 return 0;
2446}
2447
2448static int at76_iw_handler_set_essid(struct net_device *netdev,
2449 struct iw_request_info *info,
2450 struct iw_point *data, char *extra)
2451{
2452 struct at76_priv *priv = netdev_priv(netdev);
2453
2454 at76_dbg(DBG_IOCTL, "%s: SIOCSIWESSID - %s", netdev->name, extra);
2455
2456 if (data->flags) {
2457 memcpy(priv->essid, extra, data->length);
2458 priv->essid_size = data->length;
2459 } else
2460 priv->essid_size = 0; /* Use any SSID */
2461
2462 return -EIWCOMMIT;
2463}
2464
2465static int at76_iw_handler_get_essid(struct net_device *netdev,
2466 struct iw_request_info *info,
2467 struct iw_point *data, char *extra)
2468{
2469 struct at76_priv *priv = netdev_priv(netdev);
2470
2471 if (priv->essid_size) {
2472 /* not the ANY ssid in priv->essid */
2473 data->flags = 1;
2474 data->length = priv->essid_size;
2475 memcpy(extra, priv->essid, data->length);
2476 } else {
2477 /* the ANY ssid was specified */
2478 if (priv->mac_state == MAC_CONNECTED && priv->curr_bss) {
2479 /* report the SSID we have found */
2480 data->flags = 1;
2481 data->length = priv->curr_bss->ssid_len;
2482 memcpy(extra, priv->curr_bss->ssid, data->length);
2483 } else {
2484 /* report ANY back */
2485 data->flags = 0;
2486 data->length = 0;
2487 }
2488 }
2489
2490 at76_dbg(DBG_IOCTL, "%s: SIOCGIWESSID - %.*s", netdev->name,
2491 data->length, extra);
2492
2493 return 0;
2494}
2495
2496static int at76_iw_handler_set_rate(struct net_device *netdev,
2497 struct iw_request_info *info,
2498 struct iw_param *bitrate, char *extra)
2499{
2500 struct at76_priv *priv = netdev_priv(netdev);
2501 int ret = -EIWCOMMIT;
2502
2503 at76_dbg(DBG_IOCTL, "%s: SIOCSIWRATE - %d", netdev->name,
2504 bitrate->value);
2505
2506 switch (bitrate->value) {
2507 case -1:
2508 priv->txrate = TX_RATE_AUTO;
2509 break; /* auto rate */
2510 case 1000000:
2511 priv->txrate = TX_RATE_1MBIT;
2512 break;
2513 case 2000000:
2514 priv->txrate = TX_RATE_2MBIT;
2515 break;
2516 case 5500000:
2517 priv->txrate = TX_RATE_5_5MBIT;
2518 break;
2519 case 11000000:
2520 priv->txrate = TX_RATE_11MBIT;
2521 break;
2522 default:
2523 ret = -EINVAL;
2524 }
2525
2526 return ret;
2527}
2528
2529static int at76_iw_handler_get_rate(struct net_device *netdev,
2530 struct iw_request_info *info,
2531 struct iw_param *bitrate, char *extra)
2532{
2533 struct at76_priv *priv = netdev_priv(netdev);
2534 int ret = 0;
2535
2536 switch (priv->txrate) {
2537 /* return max rate if RATE_AUTO */
2538 case TX_RATE_AUTO:
2539 bitrate->value = 11000000;
2540 break;
2541 case TX_RATE_1MBIT:
2542 bitrate->value = 1000000;
2543 break;
2544 case TX_RATE_2MBIT:
2545 bitrate->value = 2000000;
2546 break;
2547 case TX_RATE_5_5MBIT:
2548 bitrate->value = 5500000;
2549 break;
2550 case TX_RATE_11MBIT:
2551 bitrate->value = 11000000;
2552 break;
2553 default:
2554 ret = -EINVAL;
2555 }
2556
2557 bitrate->fixed = (priv->txrate != TX_RATE_AUTO);
2558 bitrate->disabled = 0;
2559
2560 at76_dbg(DBG_IOCTL, "%s: SIOCGIWRATE - %d", netdev->name,
2561 bitrate->value);
2562
2563 return ret;
2564}
2565
2566static int at76_iw_handler_set_rts(struct net_device *netdev,
2567 struct iw_request_info *info,
2568 struct iw_param *rts, char *extra)
2569{
2570 struct at76_priv *priv = netdev_priv(netdev);
2571 int ret = -EIWCOMMIT;
2572 int rthr = rts->value;
2573
2574 at76_dbg(DBG_IOCTL, "%s: SIOCSIWRTS - value %d disabled %s",
2575 netdev->name, rts->value, (rts->disabled) ? "true" : "false");
2576
2577 if (rts->disabled)
2578 rthr = MAX_RTS_THRESHOLD;
2579
2580 if ((rthr < 0) || (rthr > MAX_RTS_THRESHOLD))
2581 ret = -EINVAL;
2582 else
2583 priv->rts_threshold = rthr;
2584
2585 return ret;
2586}
2587
2588static int at76_iw_handler_get_rts(struct net_device *netdev,
2589 struct iw_request_info *info,
2590 struct iw_param *rts, char *extra)
2591{
2592 struct at76_priv *priv = netdev_priv(netdev);
2593
2594 rts->value = priv->rts_threshold;
2595 rts->disabled = (rts->value >= MAX_RTS_THRESHOLD);
2596 rts->fixed = 1;
2597
2598 at76_dbg(DBG_IOCTL, "%s: SIOCGIWRTS - value %d disabled %s",
2599 netdev->name, rts->value, (rts->disabled) ? "true" : "false");
2600
2601 return 0;
2602}
2603
2604static int at76_iw_handler_set_frag(struct net_device *netdev,
2605 struct iw_request_info *info,
2606 struct iw_param *frag, char *extra)
2607{
2608 struct at76_priv *priv = netdev_priv(netdev);
2609 int ret = -EIWCOMMIT;
2610 int fthr = frag->value;
2611
2612 at76_dbg(DBG_IOCTL, "%s: SIOCSIWFRAG - value %d, disabled %s",
2613 netdev->name, frag->value,
2614 (frag->disabled) ? "true" : "false");
2615
2616 if (frag->disabled)
2617 fthr = MAX_FRAG_THRESHOLD;
2618
2619 if ((fthr < MIN_FRAG_THRESHOLD) || (fthr > MAX_FRAG_THRESHOLD))
2620 ret = -EINVAL;
2621 else
2622 priv->frag_threshold = fthr & ~0x1; /* get an even value */
2623
2624 return ret;
2625}
2626
2627static int at76_iw_handler_get_frag(struct net_device *netdev,
2628 struct iw_request_info *info,
2629 struct iw_param *frag, char *extra)
2630{
2631 struct at76_priv *priv = netdev_priv(netdev);
2632
2633 frag->value = priv->frag_threshold;
2634 frag->disabled = (frag->value >= MAX_FRAG_THRESHOLD);
2635 frag->fixed = 1;
2636
2637 at76_dbg(DBG_IOCTL, "%s: SIOCGIWFRAG - value %d, disabled %s",
2638 netdev->name, frag->value,
2639 (frag->disabled) ? "true" : "false");
2640
2641 return 0;
2642}
2643
2644static int at76_iw_handler_get_txpow(struct net_device *netdev,
2645 struct iw_request_info *info,
2646 struct iw_param *power, char *extra)
2647{
2648 power->value = 15;
2649 power->fixed = 1; /* No power control */
2650 power->disabled = 0;
2651 power->flags = IW_TXPOW_DBM;
2652
2653 at76_dbg(DBG_IOCTL, "%s: SIOCGIWTXPOW - txpow %d dBm", netdev->name,
2654 power->value);
2655
2656 return 0;
2657}
2658
2659/* jal: short retry is handled by the firmware (at least 0.90.x),
2660 while long retry is not (?) */
2661static int at76_iw_handler_set_retry(struct net_device *netdev,
2662 struct iw_request_info *info,
2663 struct iw_param *retry, char *extra)
2664{
2665 struct at76_priv *priv = netdev_priv(netdev);
2666 int ret = -EIWCOMMIT;
2667
2668 at76_dbg(DBG_IOCTL, "%s: SIOCSIWRETRY disabled %d flags 0x%x val %d",
2669 netdev->name, retry->disabled, retry->flags, retry->value);
2670
2671 if (!retry->disabled && (retry->flags & IW_RETRY_LIMIT)) {
2672 if ((retry->flags & IW_RETRY_MIN) ||
2673 !(retry->flags & IW_RETRY_MAX))
2674 priv->short_retry_limit = retry->value;
2675 else
2676 ret = -EINVAL;
2677 } else
2678 ret = -EINVAL;
2679
2680 return ret;
2681}
2682
2683/* Adapted (ripped) from atmel.c */
2684static int at76_iw_handler_get_retry(struct net_device *netdev,
2685 struct iw_request_info *info,
2686 struct iw_param *retry, char *extra)
2687{
2688 struct at76_priv *priv = netdev_priv(netdev);
2689
2690 at76_dbg(DBG_IOCTL, "%s: SIOCGIWRETRY", netdev->name);
2691
2692 retry->disabled = 0; /* Can't be disabled */
2693 retry->flags = IW_RETRY_LIMIT;
2694 retry->value = priv->short_retry_limit;
2695
2696 return 0;
2697}
2698
2699static int at76_iw_handler_set_encode(struct net_device *netdev,
2700 struct iw_request_info *info,
2701 struct iw_point *encoding, char *extra)
2702{
2703 struct at76_priv *priv = netdev_priv(netdev);
2704 int index = (encoding->flags & IW_ENCODE_INDEX) - 1;
2705 int len = encoding->length;
2706
2707 at76_dbg(DBG_IOCTL, "%s: SIOCSIWENCODE - enc.flags %08x "
2708 "pointer %p len %d", netdev->name, encoding->flags,
2709 encoding->pointer, encoding->length);
2710 at76_dbg(DBG_IOCTL,
2711 "%s: SIOCSIWENCODE - old wepstate: enabled %s key_id %d "
2712 "auth_mode %s", netdev->name,
2713 (priv->wep_enabled) ? "true" : "false", priv->wep_key_id,
2714 (priv->auth_mode ==
2715 WLAN_AUTH_SHARED_KEY) ? "restricted" : "open");
2716
2717 /* take the old default key if index is invalid */
2718 if ((index < 0) || (index >= WEP_KEYS))
2719 index = priv->wep_key_id;
2720
2721 if (len > 0) {
2722 if (len > WEP_LARGE_KEY_LEN)
2723 len = WEP_LARGE_KEY_LEN;
2724
2725 memset(priv->wep_keys[index], 0, WEP_KEY_LEN);
2726 memcpy(priv->wep_keys[index], extra, len);
2727 priv->wep_keys_len[index] = (len <= WEP_SMALL_KEY_LEN) ?
2728 WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
2729 priv->wep_enabled = 1;
2730 }
2731
2732 priv->wep_key_id = index;
2733 priv->wep_enabled = ((encoding->flags & IW_ENCODE_DISABLED) == 0);
2734
2735 if (encoding->flags & IW_ENCODE_RESTRICTED)
2736 priv->auth_mode = WLAN_AUTH_SHARED_KEY;
2737 if (encoding->flags & IW_ENCODE_OPEN)
2738 priv->auth_mode = WLAN_AUTH_OPEN;
2739
2740 at76_dbg(DBG_IOCTL,
2741 "%s: SIOCSIWENCODE - new wepstate: enabled %s key_id %d "
2742 "key_len %d auth_mode %s", netdev->name,
2743 (priv->wep_enabled) ? "true" : "false", priv->wep_key_id + 1,
2744 priv->wep_keys_len[priv->wep_key_id],
2745 (priv->auth_mode ==
2746 WLAN_AUTH_SHARED_KEY) ? "restricted" : "open");
2747
2748 return -EIWCOMMIT;
2749}
2750
2751static int at76_iw_handler_get_encode(struct net_device *netdev,
2752 struct iw_request_info *info,
2753 struct iw_point *encoding, char *extra)
2754{
2755 struct at76_priv *priv = netdev_priv(netdev);
2756 int index = (encoding->flags & IW_ENCODE_INDEX) - 1;
2757
2758 if ((index < 0) || (index >= WEP_KEYS))
2759 index = priv->wep_key_id;
2760
2761 encoding->flags =
2762 (priv->auth_mode == WLAN_AUTH_SHARED_KEY) ?
2763 IW_ENCODE_RESTRICTED : IW_ENCODE_OPEN;
2764
2765 if (!priv->wep_enabled)
2766 encoding->flags |= IW_ENCODE_DISABLED;
2767
2768 if (encoding->pointer) {
2769 encoding->length = priv->wep_keys_len[index];
2770
2771 memcpy(extra, priv->wep_keys[index], priv->wep_keys_len[index]);
2772
2773 encoding->flags |= (index + 1);
2774 }
2775
2776 at76_dbg(DBG_IOCTL, "%s: SIOCGIWENCODE - enc.flags %08x "
2777 "pointer %p len %d", netdev->name, encoding->flags,
2778 encoding->pointer, encoding->length);
2779 at76_dbg(DBG_IOCTL,
2780 "%s: SIOCGIWENCODE - wepstate: enabled %s key_id %d "
2781 "key_len %d auth_mode %s", netdev->name,
2782 (priv->wep_enabled) ? "true" : "false", priv->wep_key_id + 1,
2783 priv->wep_keys_len[priv->wep_key_id],
2784 (priv->auth_mode ==
2785 WLAN_AUTH_SHARED_KEY) ? "restricted" : "open");
2786
2787 return 0;
2788}
2789
2790static int at76_iw_handler_set_power(struct net_device *netdev,
2791 struct iw_request_info *info,
2792 struct iw_param *prq, char *extra)
2793{
2794 int err = -EIWCOMMIT;
2795 struct at76_priv *priv = netdev_priv(netdev);
2796
2797 at76_dbg(DBG_IOCTL,
2798 "%s: SIOCSIWPOWER - disabled %s flags 0x%x value 0x%x",
2799 netdev->name, (prq->disabled) ? "true" : "false", prq->flags,
2800 prq->value);
2801
2802 if (prq->disabled)
2803 priv->pm_mode = AT76_PM_OFF;
2804 else {
2805 switch (prq->flags & IW_POWER_MODE) {
2806 case IW_POWER_ALL_R:
2807 case IW_POWER_ON:
2808 break;
2809 default:
2810 err = -EINVAL;
2811 goto exit;
2812 }
2813 if (prq->flags & IW_POWER_PERIOD)
2814 priv->pm_period = prq->value;
2815
2816 if (prq->flags & IW_POWER_TIMEOUT) {
2817 err = -EINVAL;
2818 goto exit;
2819 }
2820 priv->pm_mode = AT76_PM_ON;
2821 }
2822exit:
2823 return err;
2824}
2825
2826static int at76_iw_handler_get_power(struct net_device *netdev,
2827 struct iw_request_info *info,
2828 struct iw_param *power, char *extra)
2829{
2830 struct at76_priv *priv = netdev_priv(netdev);
2831
2832 power->disabled = (priv->pm_mode == AT76_PM_OFF);
2833 if (!power->disabled) {
2834 power->flags = IW_POWER_PERIOD | IW_POWER_ALL_R;
2835 power->value = priv->pm_period;
2836 }
2837
2838 at76_dbg(DBG_IOCTL, "%s: SIOCGIWPOWER - %s flags 0x%x value 0x%x",
2839 netdev->name, power->disabled ? "disabled" : "enabled",
2840 power->flags, power->value);
2841
2842 return 0;
2843}
2844
2845/*******************************************************************************
2846 * Private IOCTLS
2847 */
2848static int at76_iw_set_short_preamble(struct net_device *netdev,
2849 struct iw_request_info *info, char *name,
2850 char *extra)
2851{
2852 struct at76_priv *priv = netdev_priv(netdev);
2853 int val = *((int *)name);
2854 int ret = -EIWCOMMIT;
2855
2856 at76_dbg(DBG_IOCTL, "%s: AT76_SET_SHORT_PREAMBLE, %d",
2857 netdev->name, val);
2858
2859 if (val < PREAMBLE_TYPE_LONG || val > PREAMBLE_TYPE_AUTO)
2860 ret = -EINVAL;
2861 else
2862 priv->preamble_type = val;
2863
2864 return ret;
2865}
2866
2867static int at76_iw_get_short_preamble(struct net_device *netdev,
2868 struct iw_request_info *info,
2869 union iwreq_data *wrqu, char *extra)
2870{
2871 struct at76_priv *priv = netdev_priv(netdev);
2872
2873 snprintf(wrqu->name, sizeof(wrqu->name), "%s (%d)",
2874 preambles[priv->preamble_type], priv->preamble_type);
2875 return 0;
2876}
2877
2878static int at76_iw_set_debug(struct net_device *netdev,
2879 struct iw_request_info *info,
2880 struct iw_point *data, char *extra)
2881{
2882 char *ptr;
2883 u32 val;
2884
2885 if (data->length > 0) {
2886 val = simple_strtol(extra, &ptr, 0);
2887
2888 if (ptr == extra)
2889 val = DBG_DEFAULTS;
2890
2891 at76_dbg(DBG_IOCTL, "%s: AT76_SET_DEBUG input %d: %s -> 0x%x",
2892 netdev->name, data->length, extra, val);
2893 } else
2894 val = DBG_DEFAULTS;
2895
2896 at76_dbg(DBG_IOCTL, "%s: AT76_SET_DEBUG, old 0x%x, new 0x%x",
2897 netdev->name, at76_debug, val);
2898
2899 /* jal: some more output to pin down lockups */
2900 at76_dbg(DBG_IOCTL, "%s: netif running %d queue_stopped %d "
2901 "carrier_ok %d", netdev->name, netif_running(netdev),
2902 netif_queue_stopped(netdev), netif_carrier_ok(netdev));
2903
2904 at76_debug = val;
2905
2906 return 0;
2907}
2908
2909static int at76_iw_get_debug(struct net_device *netdev,
2910 struct iw_request_info *info,
2911 union iwreq_data *wrqu, char *extra)
2912{
2913 snprintf(wrqu->name, sizeof(wrqu->name), "0x%08x", at76_debug);
2914 return 0;
2915}
2916
2917static int at76_iw_set_powersave_mode(struct net_device *netdev,
2918 struct iw_request_info *info, char *name,
2919 char *extra)
2920{
2921 struct at76_priv *priv = netdev_priv(netdev);
2922 int val = *((int *)name);
2923 int ret = -EIWCOMMIT;
2924
2925 at76_dbg(DBG_IOCTL, "%s: AT76_SET_POWERSAVE_MODE, %d (%s)",
2926 netdev->name, val,
2927 val == AT76_PM_OFF ? "active" : val == AT76_PM_ON ? "save" :
2928 val == AT76_PM_SMART ? "smart save" : "<invalid>");
2929 if (val < AT76_PM_OFF || val > AT76_PM_SMART)
2930 ret = -EINVAL;
2931 else
2932 priv->pm_mode = val;
2933
2934 return ret;
2935}
2936
2937static int at76_iw_get_powersave_mode(struct net_device *netdev,
2938 struct iw_request_info *info,
2939 union iwreq_data *wrqu, char *extra)
2940{
2941 struct at76_priv *priv = netdev_priv(netdev);
2942 int *param = (int *)extra;
2943
2944 param[0] = priv->pm_mode;
2945 return 0;
2946}
2947
2948static int at76_iw_set_scan_times(struct net_device *netdev,
2949 struct iw_request_info *info, char *name,
2950 char *extra)
2951{
2952 struct at76_priv *priv = netdev_priv(netdev);
2953 int mint = *((int *)name);
2954 int maxt = *((int *)name + 1);
2955 int ret = -EIWCOMMIT;
2956
2957 at76_dbg(DBG_IOCTL, "%s: AT76_SET_SCAN_TIMES - min %d max %d",
2958 netdev->name, mint, maxt);
2959 if (mint <= 0 || maxt <= 0 || mint > maxt)
2960 ret = -EINVAL;
2961 else {
2962 priv->scan_min_time = mint;
2963 priv->scan_max_time = maxt;
2964 }
2965
2966 return ret;
2967}
2968
2969static int at76_iw_get_scan_times(struct net_device *netdev,
2970 struct iw_request_info *info,
2971 union iwreq_data *wrqu, char *extra)
2972{
2973 struct at76_priv *priv = netdev_priv(netdev);
2974 int *param = (int *)extra;
2975
2976 param[0] = priv->scan_min_time;
2977 param[1] = priv->scan_max_time;
2978 return 0;
2979}
2980
2981static int at76_iw_set_scan_mode(struct net_device *netdev,
2982 struct iw_request_info *info, char *name,
2983 char *extra)
2984{
2985 struct at76_priv *priv = netdev_priv(netdev);
2986 int val = *((int *)name);
2987 int ret = -EIWCOMMIT;
2988
2989 at76_dbg(DBG_IOCTL, "%s: AT76_SET_SCAN_MODE - mode %s",
2990 netdev->name, (val = SCAN_TYPE_ACTIVE) ? "active" :
2991 (val = SCAN_TYPE_PASSIVE) ? "passive" : "<invalid>");
2992
2993 if (val != SCAN_TYPE_ACTIVE && val != SCAN_TYPE_PASSIVE)
2994 ret = -EINVAL;
2995 else
2996 priv->scan_mode = val;
2997
2998 return ret;
2999}
3000
3001static int at76_iw_get_scan_mode(struct net_device *netdev,
3002 struct iw_request_info *info,
3003 union iwreq_data *wrqu, char *extra)
3004{
3005 struct at76_priv *priv = netdev_priv(netdev);
3006 int *param = (int *)extra;
3007
3008 param[0] = priv->scan_mode;
3009 return 0;
3010}
3011
3012#define AT76_SET_HANDLER(h, f) [h - SIOCIWFIRST] = (iw_handler) f
3013
3014/* Standard wireless handlers */
3015static const iw_handler at76_handlers[] = {
3016 AT76_SET_HANDLER(SIOCSIWCOMMIT, at76_iw_handler_commit),
3017 AT76_SET_HANDLER(SIOCGIWNAME, at76_iw_handler_get_name),
3018 AT76_SET_HANDLER(SIOCSIWFREQ, at76_iw_handler_set_freq),
3019 AT76_SET_HANDLER(SIOCGIWFREQ, at76_iw_handler_get_freq),
3020 AT76_SET_HANDLER(SIOCSIWMODE, at76_iw_handler_set_mode),
3021 AT76_SET_HANDLER(SIOCGIWMODE, at76_iw_handler_get_mode),
3022 AT76_SET_HANDLER(SIOCGIWRANGE, at76_iw_handler_get_range),
3023 AT76_SET_HANDLER(SIOCSIWSPY, at76_iw_handler_set_spy),
3024 AT76_SET_HANDLER(SIOCGIWSPY, at76_iw_handler_get_spy),
3025 AT76_SET_HANDLER(SIOCSIWTHRSPY, at76_iw_handler_set_thrspy),
3026 AT76_SET_HANDLER(SIOCGIWTHRSPY, at76_iw_handler_get_thrspy),
3027 AT76_SET_HANDLER(SIOCSIWAP, at76_iw_handler_set_wap),
3028 AT76_SET_HANDLER(SIOCGIWAP, at76_iw_handler_get_wap),
3029 AT76_SET_HANDLER(SIOCSIWSCAN, at76_iw_handler_set_scan),
3030 AT76_SET_HANDLER(SIOCGIWSCAN, at76_iw_handler_get_scan),
3031 AT76_SET_HANDLER(SIOCSIWESSID, at76_iw_handler_set_essid),
3032 AT76_SET_HANDLER(SIOCGIWESSID, at76_iw_handler_get_essid),
3033 AT76_SET_HANDLER(SIOCSIWRATE, at76_iw_handler_set_rate),
3034 AT76_SET_HANDLER(SIOCGIWRATE, at76_iw_handler_get_rate),
3035 AT76_SET_HANDLER(SIOCSIWRTS, at76_iw_handler_set_rts),
3036 AT76_SET_HANDLER(SIOCGIWRTS, at76_iw_handler_get_rts),
3037 AT76_SET_HANDLER(SIOCSIWFRAG, at76_iw_handler_set_frag),
3038 AT76_SET_HANDLER(SIOCGIWFRAG, at76_iw_handler_get_frag),
3039 AT76_SET_HANDLER(SIOCGIWTXPOW, at76_iw_handler_get_txpow),
3040 AT76_SET_HANDLER(SIOCSIWRETRY, at76_iw_handler_set_retry),
3041 AT76_SET_HANDLER(SIOCGIWRETRY, at76_iw_handler_get_retry),
3042 AT76_SET_HANDLER(SIOCSIWENCODE, at76_iw_handler_set_encode),
3043 AT76_SET_HANDLER(SIOCGIWENCODE, at76_iw_handler_get_encode),
3044 AT76_SET_HANDLER(SIOCSIWPOWER, at76_iw_handler_set_power),
3045 AT76_SET_HANDLER(SIOCGIWPOWER, at76_iw_handler_get_power)
3046};
3047
3048#define AT76_SET_PRIV(h, f) [h - SIOCIWFIRSTPRIV] = (iw_handler) f
3049
3050/* Private wireless handlers */
3051static const iw_handler at76_priv_handlers[] = {
3052 AT76_SET_PRIV(AT76_SET_SHORT_PREAMBLE, at76_iw_set_short_preamble),
3053 AT76_SET_PRIV(AT76_GET_SHORT_PREAMBLE, at76_iw_get_short_preamble),
3054 AT76_SET_PRIV(AT76_SET_DEBUG, at76_iw_set_debug),
3055 AT76_SET_PRIV(AT76_GET_DEBUG, at76_iw_get_debug),
3056 AT76_SET_PRIV(AT76_SET_POWERSAVE_MODE, at76_iw_set_powersave_mode),
3057 AT76_SET_PRIV(AT76_GET_POWERSAVE_MODE, at76_iw_get_powersave_mode),
3058 AT76_SET_PRIV(AT76_SET_SCAN_TIMES, at76_iw_set_scan_times),
3059 AT76_SET_PRIV(AT76_GET_SCAN_TIMES, at76_iw_get_scan_times),
3060 AT76_SET_PRIV(AT76_SET_SCAN_MODE, at76_iw_set_scan_mode),
3061 AT76_SET_PRIV(AT76_GET_SCAN_MODE, at76_iw_get_scan_mode),
3062};
3063
3064/* Names and arguments of private wireless handlers */
3065static const struct iw_priv_args at76_priv_args[] = {
3066 /* 0 - long, 1 - short */
3067 {AT76_SET_SHORT_PREAMBLE,
3068 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"},
3069
3070 {AT76_GET_SHORT_PREAMBLE,
3071 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 10, "get_preamble"},
3072
3073 /* we must pass the new debug mask as a string, because iwpriv cannot
3074 * parse hex numbers starting with 0x :-( */
3075 {AT76_SET_DEBUG,
3076 IW_PRIV_TYPE_CHAR | 10, 0, "set_debug"},
3077
3078 {AT76_GET_DEBUG,
3079 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 10, "get_debug"},
3080
3081 /* 1 - active, 2 - power save, 3 - smart power save */
3082 {AT76_SET_POWERSAVE_MODE,
3083 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_powersave"},
3084
3085 {AT76_GET_POWERSAVE_MODE,
3086 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_powersave"},
3087
3088 /* min_channel_time, max_channel_time */
3089 {AT76_SET_SCAN_TIMES,
3090 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_scan_times"},
3091
3092 {AT76_GET_SCAN_TIMES,
3093 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, "get_scan_times"},
3094
3095 /* 0 - active, 1 - passive scan */
3096 {AT76_SET_SCAN_MODE,
3097 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_scan_mode"},
3098
3099 {AT76_GET_SCAN_MODE,
3100 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scan_mode"},
3101};
3102
3103static const struct iw_handler_def at76_handler_def = {
3104 .num_standard = ARRAY_SIZE(at76_handlers),
3105 .num_private = ARRAY_SIZE(at76_priv_handlers),
3106 .num_private_args = ARRAY_SIZE(at76_priv_args),
3107 .standard = at76_handlers,
3108 .private = at76_priv_handlers,
3109 .private_args = at76_priv_args,
3110 .get_wireless_stats = at76_get_wireless_stats,
3111};
3112
3113static const u8 snapsig[] = { 0xaa, 0xaa, 0x03 };
3114
3115/* RFC 1042 encapsulates Ethernet frames in 802.2 SNAP (0xaa, 0xaa, 0x03) with
3116 * a SNAP OID of 0 (0x00, 0x00, 0x00) */
3117static const u8 rfc1042sig[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
3118
3119static int at76_tx(struct sk_buff *skb, struct net_device *netdev)
3120{
3121 struct at76_priv *priv = netdev_priv(netdev);
3122 struct net_device_stats *stats = &priv->stats;
3123 int ret = 0;
3124 int wlen;
3125 int submit_len;
3126 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
3127 struct ieee80211_hdr_3addr *i802_11_hdr =
3128 (struct ieee80211_hdr_3addr *)tx_buffer->packet;
3129 u8 *payload = i802_11_hdr->payload;
3130 struct ethhdr *eh = (struct ethhdr *)skb->data;
3131
3132 if (netif_queue_stopped(netdev)) {
3133 printk(KERN_ERR "%s: %s called while netdev is stopped\n",
3134 netdev->name, __func__);
3135 /* skip this packet */
3136 dev_kfree_skb(skb);
3137 return 0;
3138 }
3139
3140 if (priv->tx_urb->status == -EINPROGRESS) {
3141 printk(KERN_ERR "%s: %s called while tx urb is pending\n",
3142 netdev->name, __func__);
3143 /* skip this packet */
3144 dev_kfree_skb(skb);
3145 return 0;
3146 }
3147
3148 if (skb->len < ETH_HLEN) {
3149 printk(KERN_ERR "%s: %s: skb too short (%d)\n",
3150 netdev->name, __func__, skb->len);
3151 dev_kfree_skb(skb);
3152 return 0;
3153 }
3154
3155 at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */
3156
3157 /* we can get rid of memcpy if we set netdev->hard_header_len to
3158 reserve enough space, but we would need to keep the skb around */
3159
3160 if (ntohs(eh->h_proto) <= ETH_DATA_LEN) {
3161 /* this is a 802.3 packet */
3162 if (skb->len >= ETH_HLEN + sizeof(rfc1042sig)
3163 && skb->data[ETH_HLEN] == rfc1042sig[0]
3164 && skb->data[ETH_HLEN + 1] == rfc1042sig[1]) {
3165 /* higher layer delivered SNAP header - keep it */
3166 memcpy(payload, skb->data + ETH_HLEN,
3167 skb->len - ETH_HLEN);
3168 wlen = IEEE80211_3ADDR_LEN + skb->len - ETH_HLEN;
3169 } else {
3170 printk(KERN_ERR "%s: dropping non-SNAP 802.2 packet "
3171 "(DSAP 0x%02x SSAP 0x%02x cntrl 0x%02x)\n",
3172 priv->netdev->name, skb->data[ETH_HLEN],
3173 skb->data[ETH_HLEN + 1],
3174 skb->data[ETH_HLEN + 2]);
3175 dev_kfree_skb(skb);
3176 return 0;
3177 }
3178 } else {
3179 /* add RFC 1042 header in front */
3180 memcpy(payload, rfc1042sig, sizeof(rfc1042sig));
3181 memcpy(payload + sizeof(rfc1042sig), &eh->h_proto,
3182 skb->len - offsetof(struct ethhdr, h_proto));
3183 wlen = IEEE80211_3ADDR_LEN + sizeof(rfc1042sig) + skb->len -
3184 offsetof(struct ethhdr, h_proto);
3185 }
3186
3187 /* make wireless header */
3188 i802_11_hdr->frame_ctl =
3189 cpu_to_le16(IEEE80211_FTYPE_DATA |
3190 (priv->wep_enabled ? IEEE80211_FCTL_PROTECTED : 0) |
3191 (priv->iw_mode ==
3192 IW_MODE_INFRA ? IEEE80211_FCTL_TODS : 0));
3193
3194 if (priv->iw_mode == IW_MODE_ADHOC) {
3195 memcpy(i802_11_hdr->addr1, eh->h_dest, ETH_ALEN);
3196 memcpy(i802_11_hdr->addr2, eh->h_source, ETH_ALEN);
3197 memcpy(i802_11_hdr->addr3, priv->bssid, ETH_ALEN);
3198 } else if (priv->iw_mode == IW_MODE_INFRA) {
3199 memcpy(i802_11_hdr->addr1, priv->bssid, ETH_ALEN);
3200 memcpy(i802_11_hdr->addr2, eh->h_source, ETH_ALEN);
3201 memcpy(i802_11_hdr->addr3, eh->h_dest, ETH_ALEN);
3202 }
3203
3204 i802_11_hdr->duration_id = cpu_to_le16(0);
3205 i802_11_hdr->seq_ctl = cpu_to_le16(0);
3206
3207 /* setup 'Atmel' header */
3208 tx_buffer->wlength = cpu_to_le16(wlen);
3209 tx_buffer->tx_rate = priv->txrate;
3210 /* for broadcast destination addresses, the firmware 0.100.x
3211 seems to choose the highest rate set with CMD_STARTUP in
3212 basic_rate_set replacing this value */
3213
3214 memset(tx_buffer->reserved, 0, sizeof(tx_buffer->reserved));
3215
3216 tx_buffer->padding = at76_calc_padding(wlen);
3217 submit_len = wlen + AT76_TX_HDRLEN + tx_buffer->padding;
3218
3219 at76_dbg(DBG_TX_DATA_CONTENT, "%s skb->data %s", priv->netdev->name,
3220 hex2str(skb->data, 32));
3221 at76_dbg(DBG_TX_DATA, "%s tx: wlen 0x%x pad 0x%x rate %d hdr %s",
3222 priv->netdev->name,
3223 le16_to_cpu(tx_buffer->wlength),
3224 tx_buffer->padding, tx_buffer->tx_rate,
3225 hex2str(i802_11_hdr, sizeof(*i802_11_hdr)));
3226 at76_dbg(DBG_TX_DATA_CONTENT, "%s payload %s", priv->netdev->name,
3227 hex2str(payload, 48));
3228
3229 /* send stuff */
3230 netif_stop_queue(netdev);
3231 netdev->trans_start = jiffies;
3232
3233 usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe, tx_buffer,
3234 submit_len, at76_tx_callback, priv);
3235 ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
3236 if (ret) {
3237 stats->tx_errors++;
3238 printk(KERN_ERR "%s: error in tx submit urb: %d\n",
3239 netdev->name, ret);
3240 if (ret == -EINVAL)
3241 printk(KERN_ERR
3242 "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n",
3243 priv->netdev->name, priv->tx_urb,
3244 priv->tx_urb->hcpriv, priv->tx_urb->complete);
3245 } else {
3246 stats->tx_bytes += skb->len;
3247 dev_kfree_skb(skb);
3248 }
3249
3250 return ret;
3251}
3252
3253static void at76_tx_timeout(struct net_device *netdev)
3254{
3255 struct at76_priv *priv = netdev_priv(netdev);
3256
3257 if (!priv)
3258 return;
3259 dev_warn(&netdev->dev, "tx timeout.");
3260
3261 usb_unlink_urb(priv->tx_urb);
3262 priv->stats.tx_errors++;
3263}
3264
1322static int at76_submit_rx_urb(struct at76_priv *priv) 3265static int at76_submit_rx_urb(struct at76_priv *priv)
1323{ 3266{
1324 int ret; 3267 int ret;
@@ -1327,7 +3270,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
1327 3270
1328 if (!priv->rx_urb) { 3271 if (!priv->rx_urb) {
1329 printk(KERN_ERR "%s: %s: priv->rx_urb is NULL\n", 3272 printk(KERN_ERR "%s: %s: priv->rx_urb is NULL\n",
1330 wiphy_name(priv->hw->wiphy), __func__); 3273 priv->netdev->name, __func__);
1331 return -EFAULT; 3274 return -EFAULT;
1332 } 3275 }
1333 3276
@@ -1335,7 +3278,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
1335 skb = dev_alloc_skb(sizeof(struct at76_rx_buffer)); 3278 skb = dev_alloc_skb(sizeof(struct at76_rx_buffer));
1336 if (!skb) { 3279 if (!skb) {
1337 printk(KERN_ERR "%s: cannot allocate rx skbuff\n", 3280 printk(KERN_ERR "%s: cannot allocate rx skbuff\n",
1338 wiphy_name(priv->hw->wiphy)); 3281 priv->netdev->name);
1339 ret = -ENOMEM; 3282 ret = -ENOMEM;
1340 goto exit; 3283 goto exit;
1341 } 3284 }
@@ -1355,18 +3298,110 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
1355 "usb_submit_urb returned -ENODEV"); 3298 "usb_submit_urb returned -ENODEV");
1356 else 3299 else
1357 printk(KERN_ERR "%s: rx, usb_submit_urb failed: %d\n", 3300 printk(KERN_ERR "%s: rx, usb_submit_urb failed: %d\n",
1358 wiphy_name(priv->hw->wiphy), ret); 3301 priv->netdev->name, ret);
1359 } 3302 }
1360 3303
1361exit: 3304exit:
1362 if (ret < 0 && ret != -ENODEV) 3305 if (ret < 0 && ret != -ENODEV)
1363 printk(KERN_ERR "%s: cannot submit rx urb - please unload the " 3306 printk(KERN_ERR "%s: cannot submit rx urb - please unload the "
1364 "driver and/or power cycle the device\n", 3307 "driver and/or power cycle the device\n",
1365 wiphy_name(priv->hw->wiphy)); 3308 priv->netdev->name);
1366 3309
1367 return ret; 3310 return ret;
1368} 3311}
1369 3312
3313static int at76_open(struct net_device *netdev)
3314{
3315 struct at76_priv *priv = netdev_priv(netdev);
3316 int ret = 0;
3317
3318 at76_dbg(DBG_PROC_ENTRY, "%s(): entry", __func__);
3319
3320 if (mutex_lock_interruptible(&priv->mtx))
3321 return -EINTR;
3322
3323 /* if netdev->dev_addr != priv->mac_addr we must
3324 set the mac address in the device ! */
3325 if (compare_ether_addr(netdev->dev_addr, priv->mac_addr)) {
3326 if (at76_add_mac_address(priv, netdev->dev_addr) >= 0)
3327 at76_dbg(DBG_PROGRESS, "%s: set new MAC addr %s",
3328 netdev->name, mac2str(netdev->dev_addr));
3329 }
3330
3331 priv->scan_state = SCAN_IDLE;
3332 priv->last_scan = jiffies;
3333
3334 ret = at76_submit_rx_urb(priv);
3335 if (ret < 0) {
3336 printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n",
3337 netdev->name, ret);
3338 goto error;
3339 }
3340
3341 schedule_delayed_work(&priv->dwork_restart, 0);
3342
3343 at76_dbg(DBG_PROC_ENTRY, "%s(): end", __func__);
3344error:
3345 mutex_unlock(&priv->mtx);
3346 return ret < 0 ? ret : 0;
3347}
3348
3349static int at76_stop(struct net_device *netdev)
3350{
3351 struct at76_priv *priv = netdev_priv(netdev);
3352
3353 at76_dbg(DBG_DEVSTART, "%s: ENTER", __func__);
3354
3355 if (mutex_lock_interruptible(&priv->mtx))
3356 return -EINTR;
3357
3358 at76_quiesce(priv);
3359
3360 if (!priv->device_unplugged) {
3361 /* We are called by "ifconfig ethX down", not because the
3362 * device is not available anymore. */
3363 at76_set_radio(priv, 0);
3364
3365 /* We unlink rx_urb because at76_open() re-submits it.
3366 * If unplugged, at76_delete_device() takes care of it. */
3367 usb_kill_urb(priv->rx_urb);
3368 }
3369
3370 /* free the bss_list */
3371 at76_free_bss_list(priv);
3372
3373 mutex_unlock(&priv->mtx);
3374 at76_dbg(DBG_DEVSTART, "%s: EXIT", __func__);
3375
3376 return 0;
3377}
3378
3379static void at76_ethtool_get_drvinfo(struct net_device *netdev,
3380 struct ethtool_drvinfo *info)
3381{
3382 struct at76_priv *priv = netdev_priv(netdev);
3383
3384 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
3385 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
3386
3387 usb_make_path(priv->udev, info->bus_info, sizeof(info->bus_info));
3388
3389 snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d-%d",
3390 priv->fw_version.major, priv->fw_version.minor,
3391 priv->fw_version.patch, priv->fw_version.build);
3392}
3393
3394static u32 at76_ethtool_get_link(struct net_device *netdev)
3395{
3396 struct at76_priv *priv = netdev_priv(netdev);
3397 return priv->mac_state == MAC_CONNECTED;
3398}
3399
3400static struct ethtool_ops at76_ethtool_ops = {
3401 .get_drvinfo = at76_ethtool_get_drvinfo,
3402 .get_link = at76_ethtool_get_link,
3403};
3404
1370/* Download external firmware */ 3405/* Download external firmware */
1371static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe) 3406static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
1372{ 3407{
@@ -1463,6 +3498,406 @@ exit:
1463 return ret; 3498 return ret;
1464} 3499}
1465 3500
3501static int at76_match_essid(struct at76_priv *priv, struct bss_info *ptr)
3502{
3503 /* common criteria for both modi */
3504
3505 int ret = (priv->essid_size == 0 /* ANY ssid */ ||
3506 (priv->essid_size == ptr->ssid_len &&
3507 !memcmp(priv->essid, ptr->ssid, ptr->ssid_len)));
3508 if (!ret)
3509 at76_dbg(DBG_BSS_MATCH,
3510 "%s bss table entry %p: essid didn't match",
3511 priv->netdev->name, ptr);
3512 return ret;
3513}
3514
3515static inline int at76_match_mode(struct at76_priv *priv, struct bss_info *ptr)
3516{
3517 int ret;
3518
3519 if (priv->iw_mode == IW_MODE_ADHOC)
3520 ret = ptr->capa & WLAN_CAPABILITY_IBSS;
3521 else
3522 ret = ptr->capa & WLAN_CAPABILITY_ESS;
3523 if (!ret)
3524 at76_dbg(DBG_BSS_MATCH,
3525 "%s bss table entry %p: mode didn't match",
3526 priv->netdev->name, ptr);
3527 return ret;
3528}
3529
3530static int at76_match_rates(struct at76_priv *priv, struct bss_info *ptr)
3531{
3532 int i;
3533
3534 for (i = 0; i < ptr->rates_len; i++) {
3535 u8 rate = ptr->rates[i];
3536
3537 if (!(rate & 0x80))
3538 continue;
3539
3540 /* this is a basic rate we have to support
3541 (see IEEE802.11, ch. 7.3.2.2) */
3542 if (rate != (0x80 | hw_rates[0])
3543 && rate != (0x80 | hw_rates[1])
3544 && rate != (0x80 | hw_rates[2])
3545 && rate != (0x80 | hw_rates[3])) {
3546 at76_dbg(DBG_BSS_MATCH,
3547 "%s: bss table entry %p: basic rate %02x not "
3548 "supported", priv->netdev->name, ptr, rate);
3549 return 0;
3550 }
3551 }
3552
3553 /* if we use short preamble, the bss must support it */
3554 if (priv->preamble_type == PREAMBLE_TYPE_SHORT &&
3555 !(ptr->capa & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
3556 at76_dbg(DBG_BSS_MATCH,
3557 "%s: %p does not support short preamble",
3558 priv->netdev->name, ptr);
3559 return 0;
3560 } else
3561 return 1;
3562}
3563
3564static inline int at76_match_wep(struct at76_priv *priv, struct bss_info *ptr)
3565{
3566 if (!priv->wep_enabled && ptr->capa & WLAN_CAPABILITY_PRIVACY) {
3567 /* we have disabled WEP, but the BSS signals privacy */
3568 at76_dbg(DBG_BSS_MATCH,
3569 "%s: bss table entry %p: requires encryption",
3570 priv->netdev->name, ptr);
3571 return 0;
3572 }
3573 /* otherwise if the BSS does not signal privacy it may well
3574 accept encrypted packets from us ... */
3575 return 1;
3576}
3577
3578static inline int at76_match_bssid(struct at76_priv *priv, struct bss_info *ptr)
3579{
3580 if (!priv->wanted_bssid_valid ||
3581 !compare_ether_addr(ptr->bssid, priv->wanted_bssid))
3582 return 1;
3583
3584 at76_dbg(DBG_BSS_MATCH,
3585 "%s: requested bssid - %s does not match",
3586 priv->netdev->name, mac2str(priv->wanted_bssid));
3587 at76_dbg(DBG_BSS_MATCH,
3588 " AP bssid - %s of bss table entry %p",
3589 mac2str(ptr->bssid), ptr);
3590 return 0;
3591}
3592
3593/**
3594 * at76_match_bss - try to find a matching bss in priv->bss
3595 *
3596 * last - last bss tried
3597 *
3598 * last == NULL signals a new round starting with priv->bss_list.next
3599 * this function must be called inside an acquired priv->bss_list_spinlock
3600 * otherwise the timeout on bss may remove the newly chosen entry
3601 */
3602static struct bss_info *at76_match_bss(struct at76_priv *priv,
3603 struct bss_info *last)
3604{
3605 struct bss_info *ptr = NULL;
3606 struct list_head *curr;
3607
3608 curr = last ? last->list.next : priv->bss_list.next;
3609 while (curr != &priv->bss_list) {
3610 ptr = list_entry(curr, struct bss_info, list);
3611 if (at76_match_essid(priv, ptr) && at76_match_mode(priv, ptr)
3612 && at76_match_wep(priv, ptr) && at76_match_rates(priv, ptr)
3613 && at76_match_bssid(priv, ptr))
3614 break;
3615 curr = curr->next;
3616 }
3617
3618 if (curr == &priv->bss_list)
3619 ptr = NULL;
3620 /* otherwise ptr points to the struct bss_info we have chosen */
3621
3622 at76_dbg(DBG_BSS_TABLE, "%s %s: returned %p", priv->netdev->name,
3623 __func__, ptr);
3624 return ptr;
3625}
3626
3627/* Start joining a matching BSS, or create own IBSS */
3628static void at76_work_join(struct work_struct *work)
3629{
3630 struct at76_priv *priv = container_of(work, struct at76_priv,
3631 work_join);
3632 int ret;
3633 unsigned long flags;
3634
3635 mutex_lock(&priv->mtx);
3636
3637 WARN_ON(priv->mac_state != MAC_JOINING);
3638 if (priv->mac_state != MAC_JOINING)
3639 goto exit;
3640
3641 /* secure the access to priv->curr_bss ! */
3642 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
3643 priv->curr_bss = at76_match_bss(priv, priv->curr_bss);
3644 spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
3645
3646 if (!priv->curr_bss) {
3647 /* here we haven't found a matching (i)bss ... */
3648 if (priv->iw_mode == IW_MODE_ADHOC) {
3649 at76_set_mac_state(priv, MAC_OWN_IBSS);
3650 at76_start_ibss(priv);
3651 goto exit;
3652 }
3653 /* haven't found a matching BSS in infra mode - try again */
3654 at76_set_mac_state(priv, MAC_SCANNING);
3655 schedule_work(&priv->work_start_scan);
3656 goto exit;
3657 }
3658
3659 ret = at76_join_bss(priv, priv->curr_bss);
3660 if (ret < 0) {
3661 printk(KERN_ERR "%s: join_bss failed with %d\n",
3662 priv->netdev->name, ret);
3663 goto exit;
3664 }
3665
3666 ret = at76_wait_completion(priv, CMD_JOIN);
3667 if (ret != CMD_STATUS_COMPLETE) {
3668 if (ret != CMD_STATUS_TIME_OUT)
3669 printk(KERN_ERR "%s: join_bss completed with %d\n",
3670 priv->netdev->name, ret);
3671 else
3672 printk(KERN_INFO "%s: join_bss ssid %s timed out\n",
3673 priv->netdev->name,
3674 mac2str(priv->curr_bss->bssid));
3675
3676 /* retry next BSS immediately */
3677 schedule_work(&priv->work_join);
3678 goto exit;
3679 }
3680
3681 /* here we have joined the (I)BSS */
3682 if (priv->iw_mode == IW_MODE_ADHOC) {
3683 struct bss_info *bptr = priv->curr_bss;
3684 at76_set_mac_state(priv, MAC_CONNECTED);
3685 /* get ESSID, BSSID and channel for priv->curr_bss */
3686 priv->essid_size = bptr->ssid_len;
3687 memcpy(priv->essid, bptr->ssid, bptr->ssid_len);
3688 memcpy(priv->bssid, bptr->bssid, ETH_ALEN);
3689 priv->channel = bptr->channel;
3690 at76_iwevent_bss_connect(priv->netdev, bptr->bssid);
3691 netif_carrier_on(priv->netdev);
3692 netif_start_queue(priv->netdev);
3693 /* just to be sure */
3694 cancel_delayed_work(&priv->dwork_get_scan);
3695 cancel_delayed_work(&priv->dwork_auth);
3696 cancel_delayed_work(&priv->dwork_assoc);
3697 } else {
3698 /* send auth req */
3699 priv->retries = AUTH_RETRIES;
3700 at76_set_mac_state(priv, MAC_AUTH);
3701 at76_auth_req(priv, priv->curr_bss, 1, NULL);
3702 at76_dbg(DBG_MGMT_TIMER,
3703 "%s:%d: starting mgmt_timer + HZ", __func__, __LINE__);
3704 schedule_delayed_work(&priv->dwork_auth, AUTH_TIMEOUT);
3705 }
3706
3707exit:
3708 mutex_unlock(&priv->mtx);
3709}
3710
3711/* Reap scan results */
3712static void at76_dwork_get_scan(struct work_struct *work)
3713{
3714 int status;
3715 int ret;
3716 struct at76_priv *priv = container_of(work, struct at76_priv,
3717 dwork_get_scan.work);
3718
3719 mutex_lock(&priv->mtx);
3720 WARN_ON(priv->mac_state != MAC_SCANNING);
3721 if (priv->mac_state != MAC_SCANNING)
3722 goto exit;
3723
3724 status = at76_get_cmd_status(priv->udev, CMD_SCAN);
3725 if (status < 0) {
3726 printk(KERN_ERR "%s: %s: at76_get_cmd_status failed with %d\n",
3727 priv->netdev->name, __func__, status);
3728 status = CMD_STATUS_IN_PROGRESS;
3729 /* INFO: Hope it was a one off error - if not, scanning
3730 further down the line and stop this cycle */
3731 }
3732 at76_dbg(DBG_PROGRESS,
3733 "%s %s: got cmd_status %d (state %s, need_any %d)",
3734 priv->netdev->name, __func__, status,
3735 mac_states[priv->mac_state], priv->scan_need_any);
3736
3737 if (status != CMD_STATUS_COMPLETE) {
3738 if ((status != CMD_STATUS_IN_PROGRESS) &&
3739 (status != CMD_STATUS_IDLE))
3740 printk(KERN_ERR "%s: %s: Bad scan status: %s\n",
3741 priv->netdev->name, __func__,
3742 at76_get_cmd_status_string(status));
3743
3744 /* the first cmd status after scan start is always a IDLE ->
3745 start the timer to poll again until COMPLETED */
3746 at76_dbg(DBG_MGMT_TIMER,
3747 "%s:%d: starting mgmt_timer for %d ticks",
3748 __func__, __LINE__, SCAN_POLL_INTERVAL);
3749 schedule_delayed_work(&priv->dwork_get_scan,
3750 SCAN_POLL_INTERVAL);
3751 goto exit;
3752 }
3753
3754 if (at76_debug & DBG_BSS_TABLE)
3755 at76_dump_bss_table(priv);
3756
3757 if (priv->scan_need_any) {
3758 ret = at76_start_scan(priv, 0);
3759 if (ret < 0)
3760 printk(KERN_ERR
3761 "%s: %s: start_scan (ANY) failed with %d\n",
3762 priv->netdev->name, __func__, ret);
3763 at76_dbg(DBG_MGMT_TIMER,
3764 "%s:%d: starting mgmt_timer for %d ticks", __func__,
3765 __LINE__, SCAN_POLL_INTERVAL);
3766 schedule_delayed_work(&priv->dwork_get_scan,
3767 SCAN_POLL_INTERVAL);
3768 priv->scan_need_any = 0;
3769 } else {
3770 priv->scan_state = SCAN_COMPLETED;
3771 /* report the end of scan to user space */
3772 at76_iwevent_scan_complete(priv->netdev);
3773 at76_set_mac_state(priv, MAC_JOINING);
3774 schedule_work(&priv->work_join);
3775 }
3776
3777exit:
3778 mutex_unlock(&priv->mtx);
3779}
3780
3781/* Handle loss of beacons from the AP */
3782static void at76_dwork_beacon(struct work_struct *work)
3783{
3784 struct at76_priv *priv = container_of(work, struct at76_priv,
3785 dwork_beacon.work);
3786
3787 mutex_lock(&priv->mtx);
3788 if (priv->mac_state != MAC_CONNECTED || priv->iw_mode != IW_MODE_INFRA)
3789 goto exit;
3790
3791 /* We haven't received any beacons from out AP for BEACON_TIMEOUT */
3792 printk(KERN_INFO "%s: lost beacon bssid %s\n",
3793 priv->netdev->name, mac2str(priv->curr_bss->bssid));
3794
3795 netif_carrier_off(priv->netdev);
3796 netif_stop_queue(priv->netdev);
3797 at76_iwevent_bss_disconnect(priv->netdev);
3798 at76_set_mac_state(priv, MAC_SCANNING);
3799 schedule_work(&priv->work_start_scan);
3800
3801exit:
3802 mutex_unlock(&priv->mtx);
3803}
3804
3805/* Handle authentication response timeout */
3806static void at76_dwork_auth(struct work_struct *work)
3807{
3808 struct at76_priv *priv = container_of(work, struct at76_priv,
3809 dwork_auth.work);
3810
3811 mutex_lock(&priv->mtx);
3812 WARN_ON(priv->mac_state != MAC_AUTH);
3813 if (priv->mac_state != MAC_AUTH)
3814 goto exit;
3815
3816 at76_dbg(DBG_PROGRESS, "%s: authentication response timeout",
3817 priv->netdev->name);
3818
3819 if (priv->retries-- >= 0) {
3820 at76_auth_req(priv, priv->curr_bss, 1, NULL);
3821 at76_dbg(DBG_MGMT_TIMER, "%s:%d: starting mgmt_timer + HZ",
3822 __func__, __LINE__);
3823 schedule_delayed_work(&priv->dwork_auth, AUTH_TIMEOUT);
3824 } else {
3825 /* try to get next matching BSS */
3826 at76_set_mac_state(priv, MAC_JOINING);
3827 schedule_work(&priv->work_join);
3828 }
3829
3830exit:
3831 mutex_unlock(&priv->mtx);
3832}
3833
3834/* Handle association response timeout */
3835static void at76_dwork_assoc(struct work_struct *work)
3836{
3837 struct at76_priv *priv = container_of(work, struct at76_priv,
3838 dwork_assoc.work);
3839
3840 mutex_lock(&priv->mtx);
3841 WARN_ON(priv->mac_state != MAC_ASSOC);
3842 if (priv->mac_state != MAC_ASSOC)
3843 goto exit;
3844
3845 at76_dbg(DBG_PROGRESS, "%s: association response timeout",
3846 priv->netdev->name);
3847
3848 if (priv->retries-- >= 0) {
3849 at76_assoc_req(priv, priv->curr_bss);
3850 at76_dbg(DBG_MGMT_TIMER, "%s:%d: starting mgmt_timer + HZ",
3851 __func__, __LINE__);
3852 schedule_delayed_work(&priv->dwork_assoc, ASSOC_TIMEOUT);
3853 } else {
3854 /* try to get next matching BSS */
3855 at76_set_mac_state(priv, MAC_JOINING);
3856 schedule_work(&priv->work_join);
3857 }
3858
3859exit:
3860 mutex_unlock(&priv->mtx);
3861}
3862
3863/* Read new bssid in ad-hoc mode */
3864static void at76_work_new_bss(struct work_struct *work)
3865{
3866 struct at76_priv *priv = container_of(work, struct at76_priv,
3867 work_new_bss);
3868 int ret;
3869 struct mib_mac_mgmt mac_mgmt;
3870
3871 mutex_lock(&priv->mtx);
3872
3873 ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, &mac_mgmt,
3874 sizeof(struct mib_mac_mgmt));
3875 if (ret < 0) {
3876 printk(KERN_ERR "%s: at76_get_mib failed: %d\n",
3877 priv->netdev->name, ret);
3878 goto exit;
3879 }
3880
3881 at76_dbg(DBG_PROGRESS, "ibss_change = 0x%2x", mac_mgmt.ibss_change);
3882 memcpy(priv->bssid, mac_mgmt.current_bssid, ETH_ALEN);
3883 at76_dbg(DBG_PROGRESS, "using BSSID %s", mac2str(priv->bssid));
3884
3885 at76_iwevent_bss_connect(priv->netdev, priv->bssid);
3886
3887 priv->mib_buf.type = MIB_MAC_MGMT;
3888 priv->mib_buf.size = 1;
3889 priv->mib_buf.index = offsetof(struct mib_mac_mgmt, ibss_change);
3890 priv->mib_buf.data.byte = 0;
3891
3892 ret = at76_set_mib(priv, &priv->mib_buf);
3893 if (ret < 0)
3894 printk(KERN_ERR "%s: set_mib (ibss change ok) failed: %d\n",
3895 priv->netdev->name, ret);
3896
3897exit:
3898 mutex_unlock(&priv->mtx);
3899}
3900
1466static int at76_startup_device(struct at76_priv *priv) 3901static int at76_startup_device(struct at76_priv *priv)
1467{ 3902{
1468 struct at76_card_config *ccfg = &priv->card_config; 3903 struct at76_card_config *ccfg = &priv->card_config;
@@ -1470,14 +3905,14 @@ static int at76_startup_device(struct at76_priv *priv)
1470 3905
1471 at76_dbg(DBG_PARAMS, 3906 at76_dbg(DBG_PARAMS,
1472 "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d " 3907 "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d "
1473 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size, 3908 "keylen %d", priv->netdev->name, priv->essid_size, priv->essid,
1474 priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE), 3909 hex2str(priv->essid, IW_ESSID_MAX_SIZE),
1475 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra", 3910 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
1476 priv->channel, priv->wep_enabled ? "enabled" : "disabled", 3911 priv->channel, priv->wep_enabled ? "enabled" : "disabled",
1477 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]); 3912 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
1478 at76_dbg(DBG_PARAMS, 3913 at76_dbg(DBG_PARAMS,
1479 "%s param: preamble %s rts %d retry %d frag %d " 3914 "%s param: preamble %s rts %d retry %d frag %d "
1480 "txrate %s auth_mode %d", wiphy_name(priv->hw->wiphy), 3915 "txrate %s auth_mode %d", priv->netdev->name,
1481 preambles[priv->preamble_type], priv->rts_threshold, 3916 preambles[priv->preamble_type], priv->rts_threshold,
1482 priv->short_retry_limit, priv->frag_threshold, 3917 priv->short_retry_limit, priv->frag_threshold,
1483 priv->txrate == TX_RATE_1MBIT ? "1MBit" : priv->txrate == 3918 priv->txrate == TX_RATE_1MBIT ? "1MBit" : priv->txrate ==
@@ -1488,7 +3923,7 @@ static int at76_startup_device(struct at76_priv *priv)
1488 at76_dbg(DBG_PARAMS, 3923 at76_dbg(DBG_PARAMS,
1489 "%s param: pm_mode %d pm_period %d auth_mode %s " 3924 "%s param: pm_mode %d pm_period %d auth_mode %s "
1490 "scan_times %d %d scan_mode %s", 3925 "scan_times %d %d scan_mode %s",
1491 wiphy_name(priv->hw->wiphy), priv->pm_mode, priv->pm_period, 3926 priv->netdev->name, priv->pm_mode, priv->pm_period,
1492 priv->auth_mode == WLAN_AUTH_OPEN ? "open" : "shared_secret", 3927 priv->auth_mode == WLAN_AUTH_OPEN ? "open" : "shared_secret",
1493 priv->scan_min_time, priv->scan_max_time, 3928 priv->scan_min_time, priv->scan_max_time,
1494 priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive"); 3929 priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive");
@@ -1522,8 +3957,7 @@ static int at76_startup_device(struct at76_priv *priv)
1522 ccfg->ssid_len = priv->essid_size; 3957 ccfg->ssid_len = priv->essid_size;
1523 3958
1524 ccfg->wep_default_key_id = priv->wep_key_id; 3959 ccfg->wep_default_key_id = priv->wep_key_id;
1525 memcpy(ccfg->wep_default_key_value, priv->wep_keys, 3960 memcpy(ccfg->wep_default_key_value, priv->wep_keys, 4 * WEP_KEY_LEN);
1526 sizeof(priv->wep_keys));
1527 3961
1528 ccfg->short_preamble = priv->preamble_type; 3962 ccfg->short_preamble = priv->preamble_type;
1529 ccfg->beacon_period = cpu_to_le16(priv->beacon_period); 3963 ccfg->beacon_period = cpu_to_le16(priv->beacon_period);
@@ -1532,7 +3966,7 @@ static int at76_startup_device(struct at76_priv *priv)
1532 sizeof(struct at76_card_config)); 3966 sizeof(struct at76_card_config));
1533 if (ret < 0) { 3967 if (ret < 0) {
1534 printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", 3968 printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
1535 wiphy_name(priv->hw->wiphy), ret); 3969 priv->netdev->name, ret);
1536 return ret; 3970 return ret;
1537 } 3971 }
1538 3972
@@ -1578,6 +4012,69 @@ static int at76_startup_device(struct at76_priv *priv)
1578 return 0; 4012 return 0;
1579} 4013}
1580 4014
4015/* Restart the interface */
4016static void at76_dwork_restart(struct work_struct *work)
4017{
4018 struct at76_priv *priv = container_of(work, struct at76_priv,
4019 dwork_restart.work);
4020
4021 mutex_lock(&priv->mtx);
4022
4023 netif_carrier_off(priv->netdev); /* stop netdev watchdog */
4024 netif_stop_queue(priv->netdev); /* stop tx data packets */
4025
4026 at76_startup_device(priv);
4027
4028 if (priv->iw_mode != IW_MODE_MONITOR) {
4029 priv->netdev->type = ARPHRD_ETHER;
4030 at76_set_mac_state(priv, MAC_SCANNING);
4031 schedule_work(&priv->work_start_scan);
4032 } else {
4033 priv->netdev->type = ARPHRD_IEEE80211_RADIOTAP;
4034 at76_start_monitor(priv);
4035 }
4036
4037 mutex_unlock(&priv->mtx);
4038}
4039
4040/* Initiate scanning */
4041static void at76_work_start_scan(struct work_struct *work)
4042{
4043 struct at76_priv *priv = container_of(work, struct at76_priv,
4044 work_start_scan);
4045 int ret;
4046
4047 mutex_lock(&priv->mtx);
4048
4049 WARN_ON(priv->mac_state != MAC_SCANNING);
4050 if (priv->mac_state != MAC_SCANNING)
4051 goto exit;
4052
4053 /* only clear the bss list when a scan is actively initiated,
4054 * otherwise simply rely on at76_bss_list_timeout */
4055 if (priv->scan_state == SCAN_IN_PROGRESS) {
4056 at76_free_bss_list(priv);
4057 priv->scan_need_any = 1;
4058 } else
4059 priv->scan_need_any = 0;
4060
4061 ret = at76_start_scan(priv, 1);
4062
4063 if (ret < 0)
4064 printk(KERN_ERR "%s: %s: start_scan failed with %d\n",
4065 priv->netdev->name, __func__, ret);
4066 else {
4067 at76_dbg(DBG_MGMT_TIMER,
4068 "%s:%d: starting mgmt_timer for %d ticks",
4069 __func__, __LINE__, SCAN_POLL_INTERVAL);
4070 schedule_delayed_work(&priv->dwork_get_scan,
4071 SCAN_POLL_INTERVAL);
4072 }
4073
4074exit:
4075 mutex_unlock(&priv->mtx);
4076}
4077
1581/* Enable or disable promiscuous mode */ 4078/* Enable or disable promiscuous mode */
1582static void at76_work_set_promisc(struct work_struct *work) 4079static void at76_work_set_promisc(struct work_struct *work)
1583{ 4080{
@@ -1595,7 +4092,7 @@ static void at76_work_set_promisc(struct work_struct *work)
1595 ret = at76_set_mib(priv, &priv->mib_buf); 4092 ret = at76_set_mib(priv, &priv->mib_buf);
1596 if (ret < 0) 4093 if (ret < 0)
1597 printk(KERN_ERR "%s: set_mib (promiscuous_mode) failed: %d\n", 4094 printk(KERN_ERR "%s: set_mib (promiscuous_mode) failed: %d\n",
1598 wiphy_name(priv->hw->wiphy), ret); 4095 priv->netdev->name, ret);
1599 4096
1600 mutex_unlock(&priv->mtx); 4097 mutex_unlock(&priv->mtx);
1601} 4098}
@@ -1611,759 +4108,1088 @@ static void at76_work_submit_rx(struct work_struct *work)
1611 mutex_unlock(&priv->mtx); 4108 mutex_unlock(&priv->mtx);
1612} 4109}
1613 4110
1614static void at76_rx_tasklet(unsigned long param) 4111/* We got an association response */
4112static void at76_rx_mgmt_assoc(struct at76_priv *priv,
4113 struct at76_rx_buffer *buf)
1615{ 4114{
1616 struct urb *urb = (struct urb *)param; 4115 struct ieee80211_assoc_response *resp =
1617 struct at76_priv *priv = urb->context; 4116 (struct ieee80211_assoc_response *)buf->packet;
1618 struct at76_rx_buffer *buf; 4117 u16 assoc_id = le16_to_cpu(resp->aid);
1619 struct ieee80211_rx_status rx_status = { 0 }; 4118 u16 status = le16_to_cpu(resp->status);
1620 4119
1621 if (priv->device_unplugged) { 4120 at76_dbg(DBG_RX_MGMT, "%s: rx AssocResp bssid %s capa 0x%04x status "
1622 at76_dbg(DBG_DEVSTART, "device unplugged"); 4121 "0x%04x assoc_id 0x%04x rates %s", priv->netdev->name,
1623 if (urb) 4122 mac2str(resp->header.addr3), le16_to_cpu(resp->capability),
1624 at76_dbg(DBG_DEVSTART, "urb status %d", urb->status); 4123 status, assoc_id, hex2str(resp->info_element->data,
4124 resp->info_element->len));
4125
4126 if (priv->mac_state != MAC_ASSOC) {
4127 printk(KERN_INFO "%s: AssocResp in state %s ignored\n",
4128 priv->netdev->name, mac_states[priv->mac_state]);
1625 return; 4129 return;
1626 } 4130 }
1627 4131
1628 if (!priv->rx_skb || !priv->rx_skb->data) 4132 BUG_ON(!priv->curr_bss);
1629 return; 4133
1630 4134 cancel_delayed_work(&priv->dwork_assoc);
1631 buf = (struct at76_rx_buffer *)priv->rx_skb->data; 4135 if (status == WLAN_STATUS_SUCCESS) {
1632 4136 struct bss_info *ptr = priv->curr_bss;
1633 if (urb->status != 0) { 4137 priv->assoc_id = assoc_id & 0x3fff;
1634 if (urb->status != -ENOENT && urb->status != -ECONNRESET) 4138 /* update iwconfig params */
1635 at76_dbg(DBG_URB, 4139 memcpy(priv->bssid, ptr->bssid, ETH_ALEN);
1636 "%s %s: - nonzero Rx bulk status received: %d", 4140 memcpy(priv->essid, ptr->ssid, ptr->ssid_len);
1637 __func__, wiphy_name(priv->hw->wiphy), 4141 priv->essid_size = ptr->ssid_len;
1638 urb->status); 4142 priv->channel = ptr->channel;
1639 return; 4143 schedule_work(&priv->work_assoc_done);
4144 } else {
4145 at76_set_mac_state(priv, MAC_JOINING);
4146 schedule_work(&priv->work_join);
1640 } 4147 }
4148}
1641 4149
1642 at76_dbg(DBG_RX_ATMEL_HDR, 4150/* Process disassociation request from the AP */
1643 "%s: rx frame: rate %d rssi %d noise %d link %d", 4151static void at76_rx_mgmt_disassoc(struct at76_priv *priv,
1644 wiphy_name(priv->hw->wiphy), buf->rx_rate, buf->rssi, 4152 struct at76_rx_buffer *buf)
1645 buf->noise_level, buf->link_quality); 4153{
1646 4154 struct ieee80211_disassoc *resp =
1647 skb_trim(priv->rx_skb, le16_to_cpu(buf->wlength) + AT76_RX_HDRLEN); 4155 (struct ieee80211_disassoc *)buf->packet;
1648 at76_dbg_dump(DBG_RX_DATA, &priv->rx_skb->data[AT76_RX_HDRLEN], 4156 struct ieee80211_hdr_3addr *mgmt = &resp->header;
1649 priv->rx_skb->len, "RX: len=%d", 4157
1650 (int)(priv->rx_skb->len - AT76_RX_HDRLEN)); 4158 at76_dbg(DBG_RX_MGMT,
4159 "%s: rx DisAssoc bssid %s reason 0x%04x destination %s",
4160 priv->netdev->name, mac2str(mgmt->addr3),
4161 le16_to_cpu(resp->reason), mac2str(mgmt->addr1));
4162
4163 /* We are not connected, ignore */
4164 if (priv->mac_state == MAC_SCANNING || priv->mac_state == MAC_INIT
4165 || !priv->curr_bss)
4166 return;
1651 4167
1652 rx_status.signal = buf->rssi; 4168 /* Not our BSSID, ignore */
1653 /* FIXME: is rate_idx still present in structure? */ 4169 if (compare_ether_addr(mgmt->addr3, priv->curr_bss->bssid))
1654 rx_status.rate_idx = buf->rx_rate; 4170 return;
1655 rx_status.flag |= RX_FLAG_DECRYPTED;
1656 rx_status.flag |= RX_FLAG_IV_STRIPPED;
1657 4171
1658 skb_pull(priv->rx_skb, AT76_RX_HDRLEN); 4172 /* Not for our STA and not broadcast, ignore */
1659 at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d", 4173 if (compare_ether_addr(priv->netdev->dev_addr, mgmt->addr1)
1660 priv->rx_skb->len, priv->rx_skb->data_len); 4174 && !is_broadcast_ether_addr(mgmt->addr1))
1661 ieee80211_rx_irqsafe(priv->hw, priv->rx_skb, &rx_status); 4175 return;
1662 4176
1663 /* Use a new skb for the next receive */ 4177 if (priv->mac_state != MAC_ASSOC && priv->mac_state != MAC_CONNECTED
1664 priv->rx_skb = NULL; 4178 && priv->mac_state != MAC_JOINING) {
4179 printk(KERN_INFO "%s: DisAssoc in state %s ignored\n",
4180 priv->netdev->name, mac_states[priv->mac_state]);
4181 return;
4182 }
1665 4183
1666 at76_submit_rx_urb(priv); 4184 if (priv->mac_state == MAC_CONNECTED) {
4185 netif_carrier_off(priv->netdev);
4186 netif_stop_queue(priv->netdev);
4187 at76_iwevent_bss_disconnect(priv->netdev);
4188 }
4189 cancel_delayed_work(&priv->dwork_get_scan);
4190 cancel_delayed_work(&priv->dwork_beacon);
4191 cancel_delayed_work(&priv->dwork_auth);
4192 cancel_delayed_work(&priv->dwork_assoc);
4193 at76_set_mac_state(priv, MAC_JOINING);
4194 schedule_work(&priv->work_join);
1667} 4195}
1668 4196
1669/* Load firmware into kernel memory and parse it */ 4197static void at76_rx_mgmt_auth(struct at76_priv *priv,
1670static struct fwentry *at76_load_firmware(struct usb_device *udev, 4198 struct at76_rx_buffer *buf)
1671 enum board_type board_type)
1672{ 4199{
1673 int ret; 4200 struct ieee80211_auth *resp = (struct ieee80211_auth *)buf->packet;
1674 char *str; 4201 struct ieee80211_hdr_3addr *mgmt = &resp->header;
1675 struct at76_fw_header *fwh; 4202 int seq_nr = le16_to_cpu(resp->transaction);
1676 struct fwentry *fwe = &firmwares[board_type]; 4203 int alg = le16_to_cpu(resp->algorithm);
1677 4204 int status = le16_to_cpu(resp->status);
1678 mutex_lock(&fw_mutex); 4205
1679 4206 at76_dbg(DBG_RX_MGMT,
1680 if (fwe->loaded) { 4207 "%s: rx AuthFrame bssid %s alg %d seq_nr %d status %d "
1681 at76_dbg(DBG_FW, "re-using previously loaded fw"); 4208 "destination %s", priv->netdev->name, mac2str(mgmt->addr3),
1682 goto exit; 4209 alg, seq_nr, status, mac2str(mgmt->addr1));
4210
4211 if (alg == WLAN_AUTH_SHARED_KEY && seq_nr == 2)
4212 at76_dbg(DBG_RX_MGMT, "%s: AuthFrame challenge %s ...",
4213 priv->netdev->name, hex2str(resp->info_element, 18));
4214
4215 if (priv->mac_state != MAC_AUTH) {
4216 printk(KERN_INFO "%s: ignored AuthFrame in state %s\n",
4217 priv->netdev->name, mac_states[priv->mac_state]);
4218 return;
1683 } 4219 }
1684 4220 if (priv->auth_mode != alg) {
1685 at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname); 4221 printk(KERN_INFO "%s: ignored AuthFrame for alg %d\n",
1686 ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev); 4222 priv->netdev->name, alg);
1687 if (ret < 0) { 4223 return;
1688 dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n",
1689 fwe->fwname);
1690 dev_printk(KERN_ERR, &udev->dev,
1691 "you may need to download the firmware from "
1692 "http://developer.berlios.de/projects/at76c503a/\n");
1693 goto exit;
1694 } 4224 }
1695 4225
1696 at76_dbg(DBG_FW, "got it."); 4226 BUG_ON(!priv->curr_bss);
1697 fwh = (struct at76_fw_header *)(fwe->fw->data);
1698 4227
1699 if (fwe->fw->size <= sizeof(*fwh)) { 4228 /* Not our BSSID or not for our STA, ignore */
1700 dev_printk(KERN_ERR, &udev->dev, 4229 if (compare_ether_addr(mgmt->addr3, priv->curr_bss->bssid)
1701 "firmware is too short (0x%zx)\n", fwe->fw->size); 4230 || compare_ether_addr(priv->netdev->dev_addr, mgmt->addr1))
1702 goto exit; 4231 return;
4232
4233 cancel_delayed_work(&priv->dwork_auth);
4234 if (status != WLAN_STATUS_SUCCESS) {
4235 /* try to join next bss */
4236 at76_set_mac_state(priv, MAC_JOINING);
4237 schedule_work(&priv->work_join);
4238 return;
1703 } 4239 }
1704 4240
1705 /* CRC currently not checked */ 4241 if (priv->auth_mode == WLAN_AUTH_OPEN || seq_nr == 4) {
1706 fwe->board_type = le32_to_cpu(fwh->board_type); 4242 priv->retries = ASSOC_RETRIES;
1707 if (fwe->board_type != board_type) { 4243 at76_set_mac_state(priv, MAC_ASSOC);
1708 dev_printk(KERN_ERR, &udev->dev, 4244 at76_assoc_req(priv, priv->curr_bss);
1709 "board type mismatch, requested %u, got %u\n", 4245 at76_dbg(DBG_MGMT_TIMER,
1710 board_type, fwe->board_type); 4246 "%s:%d: starting mgmt_timer + HZ", __func__, __LINE__);
1711 goto exit; 4247 schedule_delayed_work(&priv->dwork_assoc, ASSOC_TIMEOUT);
4248 return;
1712 } 4249 }
1713 4250
1714 fwe->fw_version.major = fwh->major; 4251 WARN_ON(seq_nr != 2);
1715 fwe->fw_version.minor = fwh->minor; 4252 at76_auth_req(priv, priv->curr_bss, seq_nr + 1, resp->info_element);
1716 fwe->fw_version.patch = fwh->patch; 4253 at76_dbg(DBG_MGMT_TIMER, "%s:%d: starting mgmt_timer + HZ", __func__,
1717 fwe->fw_version.build = fwh->build; 4254 __LINE__);
4255 schedule_delayed_work(&priv->dwork_auth, AUTH_TIMEOUT);
4256}
1718 4257
1719 str = (char *)fwh + le32_to_cpu(fwh->str_offset); 4258static void at76_rx_mgmt_deauth(struct at76_priv *priv,
1720 fwe->intfw = (u8 *)fwh + le32_to_cpu(fwh->int_fw_offset); 4259 struct at76_rx_buffer *buf)
1721 fwe->intfw_size = le32_to_cpu(fwh->int_fw_len); 4260{
1722 fwe->extfw = (u8 *)fwh + le32_to_cpu(fwh->ext_fw_offset); 4261 struct ieee80211_disassoc *resp =
1723 fwe->extfw_size = le32_to_cpu(fwh->ext_fw_len); 4262 (struct ieee80211_disassoc *)buf->packet;
4263 struct ieee80211_hdr_3addr *mgmt = &resp->header;
4264
4265 at76_dbg(DBG_RX_MGMT | DBG_PROGRESS,
4266 "%s: rx DeAuth bssid %s reason 0x%04x destination %s",
4267 priv->netdev->name, mac2str(mgmt->addr3),
4268 le16_to_cpu(resp->reason), mac2str(mgmt->addr1));
4269
4270 if (priv->mac_state != MAC_AUTH && priv->mac_state != MAC_ASSOC
4271 && priv->mac_state != MAC_CONNECTED) {
4272 printk(KERN_INFO "%s: DeAuth in state %s ignored\n",
4273 priv->netdev->name, mac_states[priv->mac_state]);
4274 return;
4275 }
1724 4276
1725 fwe->loaded = 1; 4277 BUG_ON(!priv->curr_bss);
1726 4278
1727 dev_printk(KERN_DEBUG, &udev->dev, 4279 /* Not our BSSID, ignore */
1728 "using firmware %s (version %d.%d.%d-%d)\n", 4280 if (compare_ether_addr(mgmt->addr3, priv->curr_bss->bssid))
1729 fwe->fwname, fwh->major, fwh->minor, fwh->patch, fwh->build); 4281 return;
1730 4282
1731 at76_dbg(DBG_DEVSTART, "board %u, int %d:%d, ext %d:%d", board_type, 4283 /* Not for our STA and not broadcast, ignore */
1732 le32_to_cpu(fwh->int_fw_offset), le32_to_cpu(fwh->int_fw_len), 4284 if (compare_ether_addr(priv->netdev->dev_addr, mgmt->addr1)
1733 le32_to_cpu(fwh->ext_fw_offset), le32_to_cpu(fwh->ext_fw_len)); 4285 && !is_broadcast_ether_addr(mgmt->addr1))
1734 at76_dbg(DBG_DEVSTART, "firmware id %s", str); 4286 return;
1735 4287
1736exit: 4288 if (priv->mac_state == MAC_CONNECTED)
1737 mutex_unlock(&fw_mutex); 4289 at76_iwevent_bss_disconnect(priv->netdev);
1738 4290
1739 if (fwe->loaded) 4291 at76_set_mac_state(priv, MAC_JOINING);
1740 return fwe; 4292 schedule_work(&priv->work_join);
1741 else 4293 cancel_delayed_work(&priv->dwork_get_scan);
1742 return NULL; 4294 cancel_delayed_work(&priv->dwork_beacon);
4295 cancel_delayed_work(&priv->dwork_auth);
4296 cancel_delayed_work(&priv->dwork_assoc);
1743} 4297}
1744 4298
1745static void at76_mac80211_tx_callback(struct urb *urb) 4299static void at76_rx_mgmt_beacon(struct at76_priv *priv,
4300 struct at76_rx_buffer *buf)
1746{ 4301{
1747 struct at76_priv *priv = urb->context; 4302 int varpar_len;
1748 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb); 4303 /* beacon content */
4304 struct ieee80211_beacon *bdata = (struct ieee80211_beacon *)buf->packet;
4305 struct ieee80211_hdr_3addr *mgmt = &bdata->header;
4306
4307 struct list_head *lptr;
4308 struct bss_info *match; /* entry matching addr3 with its bssid */
4309 int new_entry = 0;
4310 int len;
4311 struct ieee80211_info_element *ie;
4312 int have_ssid = 0;
4313 int have_rates = 0;
4314 int have_channel = 0;
4315 int keep_going = 1;
4316 unsigned long flags;
4317
4318 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
4319 if (priv->mac_state == MAC_CONNECTED) {
4320 /* in state MAC_CONNECTED we use the mgmt_timer to control
4321 the beacon of the BSS */
4322 BUG_ON(!priv->curr_bss);
4323
4324 if (!compare_ether_addr(priv->curr_bss->bssid, mgmt->addr3)) {
4325 /* We got our AP's beacon, defer the timeout handler.
4326 Kill pending work first, as schedule_delayed_work()
4327 won't do it. */
4328 cancel_delayed_work(&priv->dwork_beacon);
4329 schedule_delayed_work(&priv->dwork_beacon,
4330 BEACON_TIMEOUT);
4331 priv->curr_bss->rssi = buf->rssi;
4332 priv->beacons_received++;
4333 goto exit;
4334 }
4335 }
1749 4336
1750 at76_dbg(DBG_MAC80211, "%s()", __func__); 4337 /* look if we have this BSS already in the list */
4338 match = NULL;
1751 4339
1752 switch (urb->status) { 4340 if (!list_empty(&priv->bss_list)) {
1753 case 0: 4341 list_for_each(lptr, &priv->bss_list) {
1754 /* success */ 4342 struct bss_info *bss_ptr =
1755 /* FIXME: 4343 list_entry(lptr, struct bss_info, list);
1756 * is the frame really ACKed when tx_callback is called ? */ 4344 if (!compare_ether_addr(bss_ptr->bssid, mgmt->addr3)) {
1757 info->flags |= IEEE80211_TX_STAT_ACK; 4345 match = bss_ptr;
1758 break; 4346 break;
1759 case -ENOENT: 4347 }
1760 case -ECONNRESET: 4348 }
1761 /* fail, urb has been unlinked */
1762 /* FIXME: add error message */
1763 break;
1764 default:
1765 at76_dbg(DBG_URB, "%s - nonzero tx status received: %d",
1766 __func__, urb->status);
1767 break;
1768 } 4349 }
1769 4350
1770 memset(&info->status, 0, sizeof(info->status)); 4351 if (!match) {
4352 /* BSS not in the list - append it */
4353 match = kzalloc(sizeof(struct bss_info), GFP_ATOMIC);
4354 if (!match) {
4355 at76_dbg(DBG_BSS_TABLE,
4356 "%s: cannot kmalloc new bss info (%zd byte)",
4357 priv->netdev->name, sizeof(struct bss_info));
4358 goto exit;
4359 }
4360 new_entry = 1;
4361 list_add_tail(&match->list, &priv->bss_list);
4362 }
1771 4363
1772 ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb); 4364 match->capa = le16_to_cpu(bdata->capability);
4365 match->beacon_interval = le16_to_cpu(bdata->beacon_interval);
4366 match->rssi = buf->rssi;
4367 match->link_qual = buf->link_quality;
4368 match->noise_level = buf->noise_level;
4369 memcpy(match->bssid, mgmt->addr3, ETH_ALEN);
4370 at76_dbg(DBG_RX_BEACON, "%s: bssid %s", priv->netdev->name,
4371 mac2str(match->bssid));
4372
4373 ie = bdata->info_element;
4374
4375 /* length of var length beacon parameters */
4376 varpar_len = min_t(int, le16_to_cpu(buf->wlength) -
4377 sizeof(struct ieee80211_beacon),
4378 BEACON_MAX_DATA_LENGTH);
4379
4380 /* This routine steps through the bdata->data array to get
4381 * some useful information about the access point.
4382 * Currently, this implementation supports receipt of: SSID,
4383 * supported transfer rates and channel, in any order, with some
4384 * tolerance for intermittent unknown codes (although this
4385 * functionality may not be necessary as the useful information will
4386 * usually arrive in consecutively, but there have been some
4387 * reports of some of the useful information fields arriving in a
4388 * different order).
4389 * It does not support any more IE types although MFIE_TYPE_TIM may
4390 * be supported (on my AP at least).
4391 * The bdata->data array is about 1500 bytes long but only ~36 of those
4392 * bytes are useful, hence the have_ssid etc optimizations. */
4393
4394 while (keep_going &&
4395 ((&ie->data[ie->len] - (u8 *)bdata->info_element) <=
4396 varpar_len)) {
4397
4398 switch (ie->id) {
4399
4400 case MFIE_TYPE_SSID:
4401 if (have_ssid)
4402 break;
1773 4403
1774 priv->tx_skb = NULL; 4404 len = min_t(int, IW_ESSID_MAX_SIZE, ie->len);
1775 4405
1776 ieee80211_wake_queues(priv->hw); 4406 /* we copy only if this is a new entry,
1777} 4407 or the incoming SSID is not a hidden SSID. This
4408 will protect us from overwriting a real SSID read
4409 in a ProbeResponse with a hidden one from a
4410 following beacon. */
4411 if (!new_entry && at76_is_hidden_ssid(ie->data, len)) {
4412 have_ssid = 1;
4413 break;
4414 }
1778 4415
1779static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 4416 match->ssid_len = len;
1780{ 4417 memcpy(match->ssid, ie->data, len);
1781 struct at76_priv *priv = hw->priv; 4418 at76_dbg(DBG_RX_BEACON, "%s: SSID - %.*s",
1782 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 4419 priv->netdev->name, len, match->ssid);
1783 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4420 have_ssid = 1;
1784 int padding, submit_len, ret; 4421 break;
1785 4422
1786 at76_dbg(DBG_MAC80211, "%s()", __func__); 4423 case MFIE_TYPE_RATES:
4424 if (have_rates)
4425 break;
1787 4426
1788 if (priv->tx_urb->status == -EINPROGRESS) { 4427 match->rates_len =
1789 printk(KERN_ERR "%s: %s called while tx urb is pending\n", 4428 min_t(int, sizeof(match->rates), ie->len);
1790 wiphy_name(priv->hw->wiphy), __func__); 4429 memcpy(match->rates, ie->data, match->rates_len);
1791 return NETDEV_TX_BUSY; 4430 have_rates = 1;
1792 } 4431 at76_dbg(DBG_RX_BEACON, "%s: SUPPORTED RATES %s",
4432 priv->netdev->name,
4433 hex2str(ie->data, ie->len));
4434 break;
1793 4435
1794 ieee80211_stop_queues(hw); 4436 case MFIE_TYPE_DS_SET:
4437 if (have_channel)
4438 break;
1795 4439
1796 at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */ 4440 match->channel = ie->data[0];
4441 have_channel = 1;
4442 at76_dbg(DBG_RX_BEACON, "%s: CHANNEL - %d",
4443 priv->netdev->name, match->channel);
4444 break;
1797 4445
1798 WARN_ON(priv->tx_skb != NULL); 4446 case MFIE_TYPE_CF_SET:
4447 case MFIE_TYPE_TIM:
4448 case MFIE_TYPE_IBSS_SET:
4449 default:
4450 at76_dbg(DBG_RX_BEACON, "%s: beacon IE id %d len %d %s",
4451 priv->netdev->name, ie->id, ie->len,
4452 hex2str(ie->data, ie->len));
4453 break;
4454 }
1799 4455
1800 priv->tx_skb = skb; 4456 /* advance to the next informational element */
1801 padding = at76_calc_padding(skb->len); 4457 next_ie(&ie);
1802 submit_len = AT76_TX_HDRLEN + skb->len + padding;
1803 4458
1804 /* setup 'Atmel' header */ 4459 /* Optimization: after all, the bdata->data array is
1805 memset(tx_buffer, 0, sizeof(*tx_buffer)); 4460 * varpar_len bytes long, whereas we get all of the useful
1806 tx_buffer->padding = padding; 4461 * information after only ~36 bytes, this saves us a lot of
1807 tx_buffer->wlength = cpu_to_le16(skb->len); 4462 * time (and trouble as the remaining portion of the array
1808 tx_buffer->tx_rate = ieee80211_get_tx_rate(hw, info)->hw_value; 4463 * could be full of junk)
1809 if (FIRMWARE_IS_WPA(priv->fw_version) && info->control.hw_key) { 4464 * Comment this out if you want to see what other information
1810 tx_buffer->key_id = (info->control.hw_key->keyidx); 4465 * comes from the AP - although little of it may be useful */
1811 tx_buffer->cipher_type = 4466 }
1812 priv->keys[info->control.hw_key->keyidx].cipher;
1813 tx_buffer->cipher_length =
1814 priv->keys[info->control.hw_key->keyidx].keylen;
1815 tx_buffer->reserved = 0;
1816 } else {
1817 tx_buffer->key_id = 0;
1818 tx_buffer->cipher_type = 0;
1819 tx_buffer->cipher_length = 0;
1820 tx_buffer->reserved = 0;
1821 };
1822 /* memset(tx_buffer->reserved, 0, sizeof(tx_buffer->reserved)); */
1823 memcpy(tx_buffer->packet, skb->data, skb->len);
1824 4467
1825 at76_dbg(DBG_TX_DATA, "%s tx: wlen 0x%x pad 0x%x rate %d hdr", 4468 at76_dbg(DBG_RX_BEACON, "%s: Finished processing beacon data",
1826 wiphy_name(priv->hw->wiphy), le16_to_cpu(tx_buffer->wlength), 4469 priv->netdev->name);
1827 tx_buffer->padding, tx_buffer->tx_rate);
1828 4470
1829 /* send stuff */ 4471 match->last_rx = jiffies; /* record last rx of beacon */
1830 at76_dbg_dump(DBG_TX_DATA_CONTENT, tx_buffer, submit_len,
1831 "%s(): tx_buffer %d bytes:", __func__, submit_len);
1832 usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe, tx_buffer,
1833 submit_len, at76_mac80211_tx_callback, priv);
1834 ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
1835 if (ret) {
1836 printk(KERN_ERR "%s: error in tx submit urb: %d\n",
1837 wiphy_name(priv->hw->wiphy), ret);
1838 if (ret == -EINVAL)
1839 printk(KERN_ERR
1840 "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n",
1841 wiphy_name(priv->hw->wiphy), priv->tx_urb,
1842 priv->tx_urb->hcpriv, priv->tx_urb->complete);
1843 }
1844 4472
1845 return 0; 4473exit:
4474 spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
1846} 4475}
1847 4476
1848static int at76_mac80211_start(struct ieee80211_hw *hw) 4477/* Calculate the link level from a given rx_buffer */
4478static void at76_calc_level(struct at76_priv *priv, struct at76_rx_buffer *buf,
4479 struct iw_quality *qual)
1849{ 4480{
1850 struct at76_priv *priv = hw->priv; 4481 /* just a guess for now, might be different for other chips */
1851 int ret; 4482 int max_rssi = 42;
1852
1853 at76_dbg(DBG_MAC80211, "%s()", __func__);
1854 4483
1855 mutex_lock(&priv->mtx); 4484 qual->level = (buf->rssi * 100 / max_rssi);
1856 4485 if (qual->level > 100)
1857 ret = at76_submit_rx_urb(priv); 4486 qual->level = 100;
1858 if (ret < 0) { 4487 qual->updated |= IW_QUAL_LEVEL_UPDATED;
1859 printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n", 4488}
1860 wiphy_name(priv->hw->wiphy), ret);
1861 goto error;
1862 }
1863 4489
1864 at76_startup_device(priv); 4490/* Calculate the link quality from a given rx_buffer */
4491static void at76_calc_qual(struct at76_priv *priv, struct at76_rx_buffer *buf,
4492 struct iw_quality *qual)
4493{
4494 if (at76_is_intersil(priv->board_type))
4495 qual->qual = buf->link_quality;
4496 else {
4497 unsigned long elapsed;
1865 4498
1866 at76_start_monitor(priv); 4499 /* Update qual at most once a second */
4500 elapsed = jiffies - priv->beacons_last_qual;
4501 if (elapsed < 1 * HZ)
4502 return;
1867 4503
1868error: 4504 qual->qual = qual->level * priv->beacons_received *
1869 mutex_unlock(&priv->mtx); 4505 msecs_to_jiffies(priv->beacon_period) / elapsed;
1870 4506
1871 return 0; 4507 priv->beacons_last_qual = jiffies;
4508 priv->beacons_received = 0;
4509 }
4510 qual->qual = (qual->qual > 100) ? 100 : qual->qual;
4511 qual->updated |= IW_QUAL_QUAL_UPDATED;
1872} 4512}
1873 4513
1874static void at76_mac80211_stop(struct ieee80211_hw *hw) 4514/* Calculate the noise quality from a given rx_buffer */
4515static void at76_calc_noise(struct at76_priv *priv, struct at76_rx_buffer *buf,
4516 struct iw_quality *qual)
1875{ 4517{
1876 struct at76_priv *priv = hw->priv; 4518 qual->noise = 0;
1877 4519 qual->updated |= IW_QUAL_NOISE_INVALID;
1878 at76_dbg(DBG_MAC80211, "%s()", __func__); 4520}
1879
1880 mutex_lock(&priv->mtx);
1881 4521
1882 if (!priv->device_unplugged) { 4522static void at76_update_wstats(struct at76_priv *priv,
1883 /* We are called by "ifconfig ethX down", not because the 4523 struct at76_rx_buffer *buf)
1884 * device is not available anymore. */ 4524{
1885 if (at76_set_radio(priv, 0) == 1) 4525 struct iw_quality *qual = &priv->wstats.qual;
1886 at76_wait_completion(priv, CMD_RADIO_ON);
1887 4526
1888 /* We unlink rx_urb because at76_open() re-submits it. 4527 if (buf->rssi && priv->mac_state == MAC_CONNECTED) {
1889 * If unplugged, at76_delete_device() takes care of it. */ 4528 qual->updated = 0;
1890 usb_kill_urb(priv->rx_urb); 4529 at76_calc_level(priv, buf, qual);
4530 at76_calc_qual(priv, buf, qual);
4531 at76_calc_noise(priv, buf, qual);
4532 } else {
4533 qual->qual = 0;
4534 qual->level = 0;
4535 qual->noise = 0;
4536 qual->updated = IW_QUAL_ALL_INVALID;
1891 } 4537 }
1892
1893 mutex_unlock(&priv->mtx);
1894} 4538}
1895 4539
1896static int at76_add_interface(struct ieee80211_hw *hw, 4540static void at76_rx_mgmt(struct at76_priv *priv, struct at76_rx_buffer *buf)
1897 struct ieee80211_if_init_conf *conf)
1898{ 4541{
1899 struct at76_priv *priv = hw->priv; 4542 struct ieee80211_hdr_3addr *mgmt =
1900 int ret = 0; 4543 (struct ieee80211_hdr_3addr *)buf->packet;
4544 u16 framectl = le16_to_cpu(mgmt->frame_ctl);
4545
4546 /* update wstats */
4547 if (priv->mac_state != MAC_INIT && priv->mac_state != MAC_SCANNING) {
4548 /* jal: this is a dirty hack needed by Tim in ad-hoc mode */
4549 /* Data packets always seem to have a 0 link level, so we
4550 only read link quality info from management packets.
4551 Atmel driver actually averages the present, and previous
4552 values, we just present the raw value at the moment - TJS */
4553 if (priv->iw_mode == IW_MODE_ADHOC
4554 || (priv->curr_bss
4555 && !compare_ether_addr(mgmt->addr3,
4556 priv->curr_bss->bssid)))
4557 at76_update_wstats(priv, buf);
4558 }
1901 4559
1902 at76_dbg(DBG_MAC80211, "%s()", __func__); 4560 at76_dbg(DBG_RX_MGMT_CONTENT, "%s rx mgmt framectl 0x%x %s",
4561 priv->netdev->name, framectl,
4562 hex2str(mgmt, le16_to_cpu(buf->wlength)));
1903 4563
1904 mutex_lock(&priv->mtx); 4564 switch (framectl & IEEE80211_FCTL_STYPE) {
4565 case IEEE80211_STYPE_BEACON:
4566 case IEEE80211_STYPE_PROBE_RESP:
4567 at76_rx_mgmt_beacon(priv, buf);
4568 break;
4569
4570 case IEEE80211_STYPE_ASSOC_RESP:
4571 at76_rx_mgmt_assoc(priv, buf);
4572 break;
1905 4573
1906 switch (conf->type) { 4574 case IEEE80211_STYPE_DISASSOC:
1907 case NL80211_IFTYPE_STATION: 4575 at76_rx_mgmt_disassoc(priv, buf);
1908 priv->iw_mode = IW_MODE_INFRA;
1909 break; 4576 break;
4577
4578 case IEEE80211_STYPE_AUTH:
4579 at76_rx_mgmt_auth(priv, buf);
4580 break;
4581
4582 case IEEE80211_STYPE_DEAUTH:
4583 at76_rx_mgmt_deauth(priv, buf);
4584 break;
4585
1910 default: 4586 default:
1911 ret = -EOPNOTSUPP; 4587 printk(KERN_DEBUG "%s: ignoring frame with framectl 0x%04x\n",
1912 goto exit; 4588 priv->netdev->name, framectl);
1913 } 4589 }
1914 4590
1915exit: 4591 return;
1916 mutex_unlock(&priv->mtx);
1917
1918 return ret;
1919} 4592}
1920 4593
1921static void at76_remove_interface(struct ieee80211_hw *hw, 4594/* Convert the 802.11 header into an ethernet-style header, make skb
1922 struct ieee80211_if_init_conf *conf) 4595 * ready for consumption by netif_rx() */
4596static void at76_ieee80211_to_eth(struct sk_buff *skb, int iw_mode)
1923{ 4597{
1924 at76_dbg(DBG_MAC80211, "%s()", __func__); 4598 struct ieee80211_hdr_3addr *i802_11_hdr;
1925} 4599 struct ethhdr *eth_hdr_p;
4600 u8 *src_addr;
4601 u8 *dest_addr;
1926 4602
1927static int at76_join(struct at76_priv *priv) 4603 i802_11_hdr = (struct ieee80211_hdr_3addr *)skb->data;
1928{
1929 struct at76_req_join join;
1930 int ret;
1931 4604
1932 memset(&join, 0, sizeof(struct at76_req_join)); 4605 /* That would be the ethernet header if the hardware converted
1933 memcpy(join.essid, priv->essid, priv->essid_size); 4606 * the frame for us. Make sure the source and the destination
1934 join.essid_size = priv->essid_size; 4607 * match the 802.11 header. Which hardware does it? */
1935 memcpy(join.bssid, priv->bssid, ETH_ALEN); 4608 eth_hdr_p = (struct ethhdr *)skb_pull(skb, IEEE80211_3ADDR_LEN);
1936 join.bss_type = INFRASTRUCTURE_MODE;
1937 join.channel = priv->channel;
1938 join.timeout = cpu_to_le16(2000);
1939 4609
1940 at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__); 4610 dest_addr = i802_11_hdr->addr1;
1941 ret = at76_set_card_command(priv->udev, CMD_JOIN, &join, 4611 if (iw_mode == IW_MODE_ADHOC)
1942 sizeof(struct at76_req_join)); 4612 src_addr = i802_11_hdr->addr2;
4613 else
4614 src_addr = i802_11_hdr->addr3;
1943 4615
1944 if (ret < 0) { 4616 if (!compare_ether_addr(eth_hdr_p->h_source, src_addr) &&
1945 printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", 4617 !compare_ether_addr(eth_hdr_p->h_dest, dest_addr))
1946 wiphy_name(priv->hw->wiphy), ret); 4618 /* Yes, we already have an ethernet header */
1947 return 0; 4619 skb_reset_mac_header(skb);
1948 } 4620 else {
4621 u16 len;
4622
4623 /* Need to build an ethernet header */
4624 if (!memcmp(skb->data, snapsig, sizeof(snapsig))) {
4625 /* SNAP frame - decapsulate, keep proto */
4626 skb_push(skb, offsetof(struct ethhdr, h_proto) -
4627 sizeof(rfc1042sig));
4628 len = 0;
4629 } else {
4630 /* 802.3 frame, proto is length */
4631 len = skb->len;
4632 skb_push(skb, ETH_HLEN);
4633 }
1949 4634
1950 ret = at76_wait_completion(priv, CMD_JOIN); 4635 skb_reset_mac_header(skb);
1951 at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret); 4636 eth_hdr_p = eth_hdr(skb);
1952 if (ret != CMD_STATUS_COMPLETE) { 4637 /* This needs to be done in this order (eth_hdr_p->h_dest may
1953 printk(KERN_ERR "%s: at76_wait_completion failed: %d\n", 4638 * overlap src_addr) */
1954 wiphy_name(priv->hw->wiphy), ret); 4639 memcpy(eth_hdr_p->h_source, src_addr, ETH_ALEN);
1955 return 0; 4640 memcpy(eth_hdr_p->h_dest, dest_addr, ETH_ALEN);
4641 if (len)
4642 eth_hdr_p->h_proto = htons(len);
1956 } 4643 }
1957 4644
1958 at76_set_tkip_bssid(priv, priv->bssid); 4645 skb->protocol = eth_type_trans(skb, skb->dev);
1959 at76_set_pm_mode(priv);
1960
1961 return 0;
1962} 4646}
1963 4647
1964static void at76_dwork_hw_scan(struct work_struct *work) 4648/* Check for fragmented data in priv->rx_skb. If the packet was no fragment
4649 or it was the last of a fragment set a skb containing the whole packet
4650 is returned for further processing. Otherwise we get NULL and are
4651 done and the packet is either stored inside the fragment buffer
4652 or thrown away. Every returned skb starts with the ieee802_11 header
4653 and contains _no_ FCS at the end */
4654static struct sk_buff *at76_check_for_rx_frags(struct at76_priv *priv)
1965{ 4655{
1966 struct at76_priv *priv = container_of(work, struct at76_priv, 4656 struct sk_buff *skb = priv->rx_skb;
1967 dwork_hw_scan.work); 4657 struct at76_rx_buffer *buf = (struct at76_rx_buffer *)skb->data;
1968 int ret; 4658 struct ieee80211_hdr_3addr *i802_11_hdr =
4659 (struct ieee80211_hdr_3addr *)buf->packet;
4660 /* seq_ctrl, fragment_number, sequence number of new packet */
4661 u16 sctl = le16_to_cpu(i802_11_hdr->seq_ctl);
4662 u16 fragnr = sctl & 0xf;
4663 u16 seqnr = sctl >> 4;
4664 u16 frame_ctl = le16_to_cpu(i802_11_hdr->frame_ctl);
1969 4665
1970 ret = at76_get_cmd_status(priv->udev, CMD_SCAN); 4666 /* Length including the IEEE802.11 header, but without the trailing
1971 at76_dbg(DBG_MAC80211, "%s: CMD_SCAN status 0x%02x", __func__, ret); 4667 * FCS and without the Atmel Rx header */
4668 int length = le16_to_cpu(buf->wlength) - IEEE80211_FCS_LEN;
1972 4669
1973 /* FIXME: add maximum time for scan to complete */ 4670 /* where does the data payload start in skb->data ? */
4671 u8 *data = i802_11_hdr->payload;
1974 4672
1975 if (ret != CMD_STATUS_COMPLETE) { 4673 /* length of payload, excl. the trailing FCS */
1976 queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan, 4674 int data_len = length - IEEE80211_3ADDR_LEN;
1977 SCAN_POLL_INTERVAL); 4675
1978 goto exit; 4676 int i;
4677 struct rx_data_buf *bptr, *optr;
4678 unsigned long oldest = ~0UL;
4679
4680 at76_dbg(DBG_RX_FRAGS,
4681 "%s: rx data frame_ctl %04x addr2 %s seq/frag %d/%d "
4682 "length %d data %d: %s ...", priv->netdev->name, frame_ctl,
4683 mac2str(i802_11_hdr->addr2), seqnr, fragnr, length, data_len,
4684 hex2str(data, 32));
4685
4686 at76_dbg(DBG_RX_FRAGS_SKB, "%s: incoming skb: head %p data %p "
4687 "tail %p end %p len %d", priv->netdev->name, skb->head,
4688 skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
4689 skb->len);
4690
4691 if (data_len < 0) {
4692 /* make sure data starts in the buffer */
4693 printk(KERN_INFO "%s: data frame too short\n",
4694 priv->netdev->name);
4695 return NULL;
1979 } 4696 }
1980 4697
1981 ieee80211_scan_completed(priv->hw); 4698 WARN_ON(length <= AT76_RX_HDRLEN);
4699 if (length <= AT76_RX_HDRLEN)
4700 return NULL;
1982 4701
1983 if (is_valid_ether_addr(priv->bssid)) { 4702 /* remove the at76_rx_buffer header - we don't need it anymore */
1984 ieee80211_wake_queues(priv->hw); 4703 /* we need the IEEE802.11 header (for the addresses) if this packet
1985 at76_join(priv); 4704 is the first of a chain */
4705 skb_pull(skb, AT76_RX_HDRLEN);
4706
4707 /* remove FCS at end */
4708 skb_trim(skb, length);
4709
4710 at76_dbg(DBG_RX_FRAGS_SKB, "%s: trimmed skb: head %p data %p tail %p "
4711 "end %p len %d data %p data_len %d", priv->netdev->name,
4712 skb->head, skb->data, skb_tail_pointer(skb),
4713 skb_end_pointer(skb), skb->len, data, data_len);
4714
4715 if (fragnr == 0 && !(frame_ctl & IEEE80211_FCTL_MOREFRAGS)) {
4716 /* unfragmented packet received */
4717 /* Use a new skb for the next receive */
4718 priv->rx_skb = NULL;
4719 at76_dbg(DBG_RX_FRAGS, "%s: unfragmented", priv->netdev->name);
4720 return skb;
1986 } 4721 }
1987 4722
1988 ieee80211_wake_queues(priv->hw); 4723 /* look if we've got a chain for the sender address.
4724 afterwards optr points to first free or the oldest entry,
4725 or, if i < NR_RX_DATA_BUF, bptr points to the entry for the
4726 sender address */
4727 /* determining the oldest entry doesn't cope with jiffies wrapping
4728 but I don't care to delete a young entry at these rare moments ... */
4729
4730 bptr = priv->rx_data;
4731 optr = NULL;
4732 for (i = 0; i < NR_RX_DATA_BUF; i++, bptr++) {
4733 if (!bptr->skb) {
4734 optr = bptr;
4735 oldest = 0UL;
4736 continue;
4737 }
1989 4738
1990exit: 4739 if (!compare_ether_addr(i802_11_hdr->addr2, bptr->sender))
1991 return; 4740 break;
1992}
1993 4741
1994static int at76_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 4742 if (!optr) {
1995{ 4743 optr = bptr;
1996 struct at76_priv *priv = hw->priv; 4744 oldest = bptr->last_rx;
1997 struct at76_req_scan scan; 4745 } else if (bptr->last_rx < oldest)
1998 int ret; 4746 optr = bptr;
4747 }
1999 4748
2000 at76_dbg(DBG_MAC80211, "%s():", __func__); 4749 if (i < NR_RX_DATA_BUF) {
2001 at76_dbg_dump(DBG_MAC80211, ssid, len, "ssid %zd bytes:", len); 4750
4751 at76_dbg(DBG_RX_FRAGS, "%s: %d. cacheentry (seq/frag = %d/%d) "
4752 "matched sender addr",
4753 priv->netdev->name, i, bptr->seqnr, bptr->fragnr);
4754
4755 /* bptr points to an entry for the sender address */
4756 if (bptr->seqnr == seqnr) {
4757 int left;
4758 /* the fragment has the current sequence number */
4759 if (((bptr->fragnr + 1) & 0xf) != fragnr) {
4760 /* wrong fragment number -> ignore it */
4761 /* is & 0xf necessary above ??? */
4762 at76_dbg(DBG_RX_FRAGS,
4763 "%s: frag nr mismatch: %d + 1 != %d",
4764 priv->netdev->name, bptr->fragnr,
4765 fragnr);
4766 return NULL;
4767 }
4768 bptr->last_rx = jiffies;
4769 /* the next following fragment number ->
4770 add the data at the end */
4771
4772 /* for test only ??? */
4773 left = skb_tailroom(bptr->skb);
4774 if (left < data_len)
4775 printk(KERN_INFO
4776 "%s: only %d byte free (need %d)\n",
4777 priv->netdev->name, left, data_len);
4778 else
4779 memcpy(skb_put(bptr->skb, data_len), data,
4780 data_len);
4781
4782 bptr->fragnr = fragnr;
4783 if (frame_ctl & IEEE80211_FCTL_MOREFRAGS)
4784 return NULL;
4785
4786 /* this was the last fragment - send it */
4787 skb = bptr->skb;
4788 bptr->skb = NULL; /* free the entry */
4789 at76_dbg(DBG_RX_FRAGS, "%s: last frag of seq %d",
4790 priv->netdev->name, seqnr);
4791 return skb;
4792 }
2002 4793
2003 mutex_lock(&priv->mtx); 4794 /* got another sequence number */
4795 if (fragnr == 0) {
4796 /* it's the start of a new chain - replace the
4797 old one by this */
4798 /* bptr->sender has the correct value already */
4799 at76_dbg(DBG_RX_FRAGS,
4800 "%s: start of new seq %d, removing old seq %d",
4801 priv->netdev->name, seqnr, bptr->seqnr);
4802 bptr->seqnr = seqnr;
4803 bptr->fragnr = 0;
4804 bptr->last_rx = jiffies;
4805 /* swap bptr->skb and priv->rx_skb */
4806 skb = bptr->skb;
4807 bptr->skb = priv->rx_skb;
4808 priv->rx_skb = skb;
4809 } else {
4810 /* it from the middle of a new chain ->
4811 delete the old entry and skip the new one */
4812 at76_dbg(DBG_RX_FRAGS,
4813 "%s: middle of new seq %d (%d) "
4814 "removing old seq %d",
4815 priv->netdev->name, seqnr, fragnr,
4816 bptr->seqnr);
4817 dev_kfree_skb(bptr->skb);
4818 bptr->skb = NULL;
4819 }
4820 return NULL;
4821 }
2004 4822
2005 ieee80211_stop_queues(hw); 4823 /* if we didn't find a chain for the sender address, optr
4824 points either to the first free or the oldest entry */
2006 4825
2007 memset(&scan, 0, sizeof(struct at76_req_scan)); 4826 if (fragnr != 0) {
2008 memset(scan.bssid, 0xFF, ETH_ALEN); 4827 /* this is not the begin of a fragment chain ... */
2009 scan.scan_type = SCAN_TYPE_ACTIVE; 4828 at76_dbg(DBG_RX_FRAGS,
2010 if (priv->essid_size > 0) { 4829 "%s: no chain for non-first fragment (%d)",
2011 memcpy(scan.essid, ssid, len); 4830 priv->netdev->name, fragnr);
2012 scan.essid_size = len; 4831 return NULL;
2013 } 4832 }
2014 scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
2015 scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
2016 scan.probe_delay = cpu_to_le16(priv->scan_min_time * 1000);
2017 scan.international_scan = 0;
2018 4833
2019 at76_dbg(DBG_MAC80211, "%s: sending CMD_SCAN", __func__); 4834 BUG_ON(!optr);
2020 ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan)); 4835 if (optr->skb) {
4836 /* swap the skb's */
4837 skb = optr->skb;
4838 optr->skb = priv->rx_skb;
4839 priv->rx_skb = skb;
2021 4840
2022 if (ret < 0) { 4841 at76_dbg(DBG_RX_FRAGS,
2023 err("CMD_SCAN failed: %d", ret); 4842 "%s: free old contents: sender %s seq/frag %d/%d",
2024 goto exit; 4843 priv->netdev->name, mac2str(optr->sender),
2025 } 4844 optr->seqnr, optr->fragnr);
2026 4845
2027 queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan, 4846 } else {
2028 SCAN_POLL_INTERVAL); 4847 /* take the skb from priv->rx_skb */
4848 optr->skb = priv->rx_skb;
4849 /* let at76_submit_rx_urb() allocate a new skb */
4850 priv->rx_skb = NULL;
2029 4851
2030exit: 4852 at76_dbg(DBG_RX_FRAGS, "%s: use a free entry",
2031 mutex_unlock(&priv->mtx); 4853 priv->netdev->name);
4854 }
4855 memcpy(optr->sender, i802_11_hdr->addr2, ETH_ALEN);
4856 optr->seqnr = seqnr;
4857 optr->fragnr = 0;
4858 optr->last_rx = jiffies;
2032 4859
2033 return 0; 4860 return NULL;
2034} 4861}
2035 4862
2036static int at76_config(struct ieee80211_hw *hw, u32 changed) 4863/* Rx interrupt: we expect the complete data buffer in priv->rx_skb */
4864static void at76_rx_data(struct at76_priv *priv)
2037{ 4865{
2038 struct at76_priv *priv = hw->priv; 4866 struct net_device *netdev = priv->netdev;
2039 struct ieee80211_conf *conf = &hw->conf; 4867 struct net_device_stats *stats = &priv->stats;
4868 struct sk_buff *skb = priv->rx_skb;
4869 struct at76_rx_buffer *buf = (struct at76_rx_buffer *)skb->data;
4870 struct ieee80211_hdr_3addr *i802_11_hdr;
4871 int length = le16_to_cpu(buf->wlength);
2040 4872
2041 at76_dbg(DBG_MAC80211, "%s(): channel %d radio %d", 4873 at76_dbg(DBG_RX_DATA, "%s received data packet: %s", netdev->name,
2042 __func__, conf->channel->hw_value, conf->radio_enabled); 4874 hex2str(skb->data, AT76_RX_HDRLEN));
2043 at76_dbg_dump(DBG_MAC80211, priv->essid, priv->essid_size, "ssid:");
2044 at76_dbg_dump(DBG_MAC80211, priv->bssid, ETH_ALEN, "bssid:");
2045 4875
2046 mutex_lock(&priv->mtx); 4876 at76_dbg(DBG_RX_DATA_CONTENT, "rx packet: %s",
4877 hex2str(skb->data + AT76_RX_HDRLEN, length));
2047 4878
2048 priv->channel = conf->channel->hw_value; 4879 skb = at76_check_for_rx_frags(priv);
4880 if (!skb)
4881 return;
2049 4882
2050 if (is_valid_ether_addr(priv->bssid)) { 4883 /* Atmel header and the FCS are already removed */
2051 at76_join(priv); 4884 i802_11_hdr = (struct ieee80211_hdr_3addr *)skb->data;
2052 ieee80211_wake_queues(priv->hw);
2053 } else {
2054 ieee80211_stop_queues(priv->hw);
2055 at76_start_monitor(priv);
2056 };
2057 4885
2058 mutex_unlock(&priv->mtx); 4886 skb->dev = netdev;
4887 skb->ip_summed = CHECKSUM_NONE; /* TODO: should check CRC */
2059 4888
2060 return 0; 4889 if (is_broadcast_ether_addr(i802_11_hdr->addr1)) {
2061} 4890 if (!compare_ether_addr(i802_11_hdr->addr1, netdev->broadcast))
4891 skb->pkt_type = PACKET_BROADCAST;
4892 else
4893 skb->pkt_type = PACKET_MULTICAST;
4894 } else if (compare_ether_addr(i802_11_hdr->addr1, netdev->dev_addr))
4895 skb->pkt_type = PACKET_OTHERHOST;
2062 4896
2063static int at76_config_interface(struct ieee80211_hw *hw, 4897 at76_ieee80211_to_eth(skb, priv->iw_mode);
2064 struct ieee80211_vif *vif,
2065 struct ieee80211_if_conf *conf)
2066{
2067 struct at76_priv *priv = hw->priv;
2068 4898
2069 at76_dbg_dump(DBG_MAC80211, conf->bssid, ETH_ALEN, "bssid:"); 4899 netdev->last_rx = jiffies;
4900 netif_rx(skb);
4901 stats->rx_packets++;
4902 stats->rx_bytes += length;
2070 4903
2071 mutex_lock(&priv->mtx); 4904 return;
4905}
2072 4906
2073 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 4907static void at76_rx_monitor_mode(struct at76_priv *priv)
2074// memcpy(priv->essid, conf->ssid, conf->ssid_len); 4908{
2075// priv->essid_size = conf->ssid_len; 4909 struct at76_rx_radiotap *rt;
4910 u8 *payload;
4911 int skblen;
4912 struct net_device *netdev = priv->netdev;
4913 struct at76_rx_buffer *buf =
4914 (struct at76_rx_buffer *)priv->rx_skb->data;
4915 /* length including the IEEE802.11 header and the trailing FCS,
4916 but not at76_rx_buffer */
4917 int length = le16_to_cpu(buf->wlength);
4918 struct sk_buff *skb = priv->rx_skb;
4919 struct net_device_stats *stats = &priv->stats;
2076 4920
2077 if (is_valid_ether_addr(priv->bssid)) { 4921 if (length < IEEE80211_FCS_LEN) {
2078 /* mac80211 is joining a bss */ 4922 /* buffer contains no data */
2079 ieee80211_wake_queues(priv->hw); 4923 at76_dbg(DBG_MONITOR_MODE,
2080 at76_join(priv); 4924 "%s: MONITOR MODE: rx skb without data",
2081 } else 4925 priv->netdev->name);
2082 ieee80211_stop_queues(priv->hw); 4926 return;
4927 }
2083 4928
2084 mutex_unlock(&priv->mtx); 4929 skblen = sizeof(struct at76_rx_radiotap) + length;
2085 4930
2086 return 0; 4931 skb = dev_alloc_skb(skblen);
4932 if (!skb) {
4933 printk(KERN_ERR "%s: MONITOR MODE: dev_alloc_skb for radiotap "
4934 "header returned NULL\n", priv->netdev->name);
4935 return;
4936 }
4937
4938 skb_put(skb, skblen);
4939
4940 rt = (struct at76_rx_radiotap *)skb->data;
4941 payload = skb->data + sizeof(struct at76_rx_radiotap);
4942
4943 rt->rt_hdr.it_version = 0;
4944 rt->rt_hdr.it_pad = 0;
4945 rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct at76_rx_radiotap));
4946 rt->rt_hdr.it_present = cpu_to_le32(AT76_RX_RADIOTAP_PRESENT);
4947
4948 rt->rt_tsft = cpu_to_le64(le32_to_cpu(buf->rx_time));
4949 rt->rt_rate = hw_rates[buf->rx_rate] & (~0x80);
4950 rt->rt_signal = buf->rssi;
4951 rt->rt_noise = buf->noise_level;
4952 rt->rt_flags = IEEE80211_RADIOTAP_F_FCS;
4953 if (buf->fragmentation)
4954 rt->rt_flags |= IEEE80211_RADIOTAP_F_FRAG;
4955
4956 memcpy(payload, buf->packet, length);
4957 skb->dev = netdev;
4958 skb->ip_summed = CHECKSUM_NONE;
4959 skb_reset_mac_header(skb);
4960 skb->pkt_type = PACKET_OTHERHOST;
4961 skb->protocol = htons(ETH_P_802_2);
4962
4963 netdev->last_rx = jiffies;
4964 netif_rx(skb);
4965 stats->rx_packets++;
4966 stats->rx_bytes += length;
2087} 4967}
2088 4968
2089/* must be atomic */ 4969/* Check if we spy on the sender address in buf and update stats */
2090static void at76_configure_filter(struct ieee80211_hw *hw, 4970static void at76_iwspy_update(struct at76_priv *priv,
2091 unsigned int changed_flags, 4971 struct at76_rx_buffer *buf)
2092 unsigned int *total_flags, int mc_count,
2093 struct dev_addr_list *mc_list)
2094{ 4972{
2095 struct at76_priv *priv = hw->priv; 4973 struct ieee80211_hdr_3addr *hdr =
2096 int flags; 4974 (struct ieee80211_hdr_3addr *)buf->packet;
2097 4975 struct iw_quality qual;
2098 at76_dbg(DBG_MAC80211, "%s(): changed_flags=0x%08x "
2099 "total_flags=0x%08x mc_count=%d",
2100 __func__, changed_flags, *total_flags, mc_count);
2101 4976
2102 flags = changed_flags & AT76_SUPPORTED_FILTERS; 4977 /* We can only set the level here */
2103 *total_flags = AT76_SUPPORTED_FILTERS; 4978 qual.updated = IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
4979 qual.level = 0;
4980 qual.noise = 0;
4981 at76_calc_level(priv, buf, &qual);
2104 4982
2105 /* FIXME: access to priv->promisc should be protected with 4983 spin_lock_bh(&priv->spy_spinlock);
2106 * priv->mtx, but it's impossible because this function needs to be
2107 * atomic */
2108 4984
2109 if (flags && !priv->promisc) { 4985 if (priv->spy_data.spy_number > 0)
2110 /* mac80211 wants us to enable promiscuous mode */ 4986 wireless_spy_update(priv->netdev, hdr->addr2, &qual);
2111 priv->promisc = 1;
2112 } else if (!flags && priv->promisc) {
2113 /* we need to disable promiscuous mode */
2114 priv->promisc = 0;
2115 } else
2116 return;
2117 4987
2118 queue_work(hw->workqueue, &priv->work_set_promisc); 4988 spin_unlock_bh(&priv->spy_spinlock);
2119} 4989}
2120 4990
2121static int at76_set_key_oldfw(struct ieee80211_hw *hw, enum set_key_cmd cmd, 4991static void at76_rx_tasklet(unsigned long param)
2122 const u8 *local_address, const u8 *address,
2123 struct ieee80211_key_conf *key)
2124{ 4992{
2125 struct at76_priv *priv = hw->priv; 4993 struct urb *urb = (struct urb *)param;
2126 4994 struct at76_priv *priv = urb->context;
2127 int i; 4995 struct net_device *netdev = priv->netdev;
2128 4996 struct at76_rx_buffer *buf;
2129 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d " 4997 struct ieee80211_hdr_3addr *i802_11_hdr;
2130 "key->keylen %d", 4998 u16 frame_ctl;
2131 __func__, cmd, key->alg, key->keyidx, key->keylen);
2132 4999
2133 if (key->alg != ALG_WEP) 5000 if (priv->device_unplugged) {
2134 return -EOPNOTSUPP; 5001 at76_dbg(DBG_DEVSTART, "device unplugged");
5002 if (urb)
5003 at76_dbg(DBG_DEVSTART, "urb status %d", urb->status);
5004 return;
5005 }
2135 5006
2136 key->hw_key_idx = key->keyidx; 5007 if (!priv->rx_skb || !netdev || !priv->rx_skb->data)
5008 return;
2137 5009
2138 mutex_lock(&priv->mtx); 5010 buf = (struct at76_rx_buffer *)priv->rx_skb->data;
2139 5011
2140 switch (cmd) { 5012 i802_11_hdr = (struct ieee80211_hdr_3addr *)buf->packet;
2141 case SET_KEY:
2142 memcpy(priv->wep_keys[key->keyidx], key->key, key->keylen);
2143 priv->wep_keys_len[key->keyidx] = key->keylen;
2144 5013
2145 /* FIXME: find out how to do this properly */ 5014 frame_ctl = le16_to_cpu(i802_11_hdr->frame_ctl);
2146 priv->wep_key_id = key->keyidx;
2147 5015
2148 break; 5016 if (urb->status != 0) {
2149 case DISABLE_KEY: 5017 if (urb->status != -ENOENT && urb->status != -ECONNRESET)
2150 default: 5018 at76_dbg(DBG_URB,
2151 priv->wep_keys_len[key->keyidx] = 0; 5019 "%s %s: - nonzero Rx bulk status received: %d",
2152 break; 5020 __func__, netdev->name, urb->status);
5021 return;
2153 } 5022 }
2154 5023
2155 priv->wep_enabled = 0; 5024 at76_dbg(DBG_RX_ATMEL_HDR,
5025 "%s: rx frame: rate %d rssi %d noise %d link %d %s",
5026 priv->netdev->name, buf->rx_rate, buf->rssi, buf->noise_level,
5027 buf->link_quality, hex2str(i802_11_hdr, 48));
5028 if (priv->iw_mode == IW_MODE_MONITOR) {
5029 at76_rx_monitor_mode(priv);
5030 goto exit;
5031 }
2156 5032
2157 for (i = 0; i < WEP_KEYS; i++) { 5033 /* there is a new bssid around, accept it: */
2158 if (priv->wep_keys_len[i] != 0) 5034 if (buf->newbss && priv->iw_mode == IW_MODE_ADHOC) {
2159 priv->wep_enabled = 1; 5035 at76_dbg(DBG_PROGRESS, "%s: rx newbss", netdev->name);
5036 schedule_work(&priv->work_new_bss);
2160 } 5037 }
2161 5038
2162 at76_startup_device(priv); 5039 switch (frame_ctl & IEEE80211_FCTL_FTYPE) {
5040 case IEEE80211_FTYPE_DATA:
5041 at76_rx_data(priv);
5042 break;
2163 5043
2164 mutex_unlock(&priv->mtx); 5044 case IEEE80211_FTYPE_MGMT:
5045 /* jal: TODO: find out if we can update iwspy also on
5046 other frames than management (might depend on the
5047 radio chip / firmware version !) */
2165 5048
2166 return 0; 5049 at76_iwspy_update(priv, buf);
2167}
2168 5050
2169static int at76_set_key_newfw(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5051 at76_rx_mgmt(priv, buf);
2170 const u8 *local_address, const u8 *address, 5052 break;
2171 struct ieee80211_key_conf *key)
2172{
2173 struct at76_priv *priv = hw->priv;
2174 int ret = -EOPNOTSUPP;
2175
2176 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d "
2177 "key->keylen %d",
2178 __func__, cmd, key->alg, key->keyidx, key->keylen);
2179 5053
2180 mutex_lock(&priv->mtx); 5054 case IEEE80211_FTYPE_CTL:
5055 at76_dbg(DBG_RX_CTRL, "%s: ignored ctrl frame: %04x",
5056 priv->netdev->name, frame_ctl);
5057 break;
2181 5058
2182 priv->mib_buf.type = MIB_MAC_ENCRYPTION; 5059 default:
5060 printk(KERN_DEBUG "%s: ignoring frame with framectl 0x%04x\n",
5061 priv->netdev->name, frame_ctl);
5062 }
5063exit:
5064 at76_submit_rx_urb(priv);
5065}
2183 5066
2184 if (cmd == DISABLE_KEY) { 5067/* Load firmware into kernel memory and parse it */
2185 priv->mib_buf.size = CIPHER_KEY_LEN; 5068static struct fwentry *at76_load_firmware(struct usb_device *udev,
2186 priv->mib_buf.index = offsetof(struct mib_mac_encryption, 5069 enum board_type board_type)
2187 cipher_default_keyvalue[key->keyidx]); 5070{
2188 memset(priv->mib_buf.data.data, 0, CIPHER_KEY_LEN); 5071 int ret;
2189 if (at76_set_mib(priv, &priv->mib_buf) != CMD_STATUS_COMPLETE) 5072 char *str;
2190 ret = -EOPNOTSUPP; /* -EIO would be probably better */ 5073 struct at76_fw_header *fwh;
2191 else { 5074 struct fwentry *fwe = &firmwares[board_type];
2192 5075
2193 priv->keys[key->keyidx].cipher = CIPHER_NONE; 5076 mutex_lock(&fw_mutex);
2194 priv->keys[key->keyidx].keylen = 0;
2195 };
2196 if (priv->default_group_key == key->keyidx)
2197 priv->default_group_key = 0xff;
2198 5077
2199 if (priv->default_pairwise_key == key->keyidx) 5078 if (fwe->loaded) {
2200 priv->default_pairwise_key = 0xff; 5079 at76_dbg(DBG_FW, "re-using previously loaded fw");
2201 /* If default pairwise key is removed, fall back to
2202 * group key? */
2203 ret = 0;
2204 goto exit; 5080 goto exit;
2205 }; 5081 }
2206
2207 if (cmd == SET_KEY) {
2208 /* store key into MIB */
2209 priv->mib_buf.size = CIPHER_KEY_LEN;
2210 priv->mib_buf.index = offsetof(struct mib_mac_encryption,
2211 cipher_default_keyvalue[key->keyidx]);
2212 memset(priv->mib_buf.data.data, 0, CIPHER_KEY_LEN);
2213 memcpy(priv->mib_buf.data.data, key->key, key->keylen);
2214
2215 switch (key->alg) {
2216 case ALG_WEP:
2217 if (key->keylen == 5) {
2218 priv->keys[key->keyidx].cipher =
2219 CIPHER_WEP64;
2220 priv->keys[key->keyidx].keylen = 8;
2221 } else if (key->keylen == 13) {
2222 priv->keys[key->keyidx].cipher =
2223 CIPHER_WEP128;
2224 /* Firmware needs this */
2225 priv->keys[key->keyidx].keylen = 8;
2226 } else {
2227 ret = -EOPNOTSUPP;
2228 goto exit;
2229 };
2230 break;
2231 case ALG_TKIP:
2232 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2233 priv->keys[key->keyidx].cipher = CIPHER_TKIP;
2234 priv->keys[key->keyidx].keylen = 12;
2235 break;
2236 5082
2237 case ALG_CCMP: 5083 at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
2238 if (!at76_is_505a(priv->board_type)) { 5084 ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
2239 ret = -EOPNOTSUPP; 5085 if (ret < 0) {
2240 goto exit; 5086 dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n",
2241 }; 5087 fwe->fwname);
2242 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 5088 dev_printk(KERN_ERR, &udev->dev,
2243 priv->keys[key->keyidx].cipher = CIPHER_CCMP; 5089 "you may need to download the firmware from "
2244 priv->keys[key->keyidx].keylen = 16; 5090 "http://developer.berlios.de/projects/at76c503a/");
2245 break; 5091 goto exit;
5092 }
2246 5093
2247 default: 5094 at76_dbg(DBG_FW, "got it.");
2248 ret = -EOPNOTSUPP; 5095 fwh = (struct at76_fw_header *)(fwe->fw->data);
2249 goto exit;
2250 };
2251
2252 priv->mib_buf.data.data[38] = priv->keys[key->keyidx].cipher;
2253 priv->mib_buf.data.data[39] = 1; /* Taken from atmelwlandriver,
2254 not documented */
2255
2256 if (is_valid_ether_addr(address))
2257 /* Pairwise key */
2258 priv->mib_buf.data.data[39] |= (KEY_PAIRWISE | KEY_TX);
2259 else if (is_broadcast_ether_addr(address))
2260 /* Group key */
2261 priv->mib_buf.data.data[39] |= (KEY_TX);
2262 else /* Key used only for transmission ??? */
2263 priv->mib_buf.data.data[39] |= (KEY_TX);
2264
2265 if (at76_set_mib(priv, &priv->mib_buf) !=
2266 CMD_STATUS_COMPLETE) {
2267 ret = -EOPNOTSUPP; /* -EIO would be probably better */
2268 goto exit;
2269 };
2270 5096
2271 if ((key->alg == ALG_TKIP) || (key->alg == ALG_CCMP)) 5097 if (fwe->fw->size <= sizeof(*fwh)) {
2272 at76_reset_rsc(priv); 5098 dev_printk(KERN_ERR, &udev->dev,
5099 "firmware is too short (0x%zx)\n", fwe->fw->size);
5100 goto exit;
5101 }
2273 5102
2274 key->hw_key_idx = key->keyidx; 5103 /* CRC currently not checked */
5104 fwe->board_type = le32_to_cpu(fwh->board_type);
5105 if (fwe->board_type != board_type) {
5106 dev_printk(KERN_ERR, &udev->dev,
5107 "board type mismatch, requested %u, got %u\n",
5108 board_type, fwe->board_type);
5109 goto exit;
5110 }
2275 5111
2276 /* Set up default keys */ 5112 fwe->fw_version.major = fwh->major;
2277 if (is_broadcast_ether_addr(address)) 5113 fwe->fw_version.minor = fwh->minor;
2278 priv->default_group_key = key->keyidx; 5114 fwe->fw_version.patch = fwh->patch;
2279 if (is_valid_ether_addr(address)) 5115 fwe->fw_version.build = fwh->build;
2280 priv->default_pairwise_key = key->keyidx;
2281 5116
2282 /* Set up encryption MIBs */ 5117 str = (char *)fwh + le32_to_cpu(fwh->str_offset);
5118 fwe->intfw = (u8 *)fwh + le32_to_cpu(fwh->int_fw_offset);
5119 fwe->intfw_size = le32_to_cpu(fwh->int_fw_len);
5120 fwe->extfw = (u8 *)fwh + le32_to_cpu(fwh->ext_fw_offset);
5121 fwe->extfw_size = le32_to_cpu(fwh->ext_fw_len);
2283 5122
2284 /* first block of settings */ 5123 fwe->loaded = 1;
2285 priv->mib_buf.size = 3;
2286 priv->mib_buf.index = offsetof(struct mib_mac_encryption,
2287 privacy_invoked);
2288 priv->mib_buf.data.data[0] = 1; /* privacy_invoked */
2289 priv->mib_buf.data.data[1] = priv->default_pairwise_key;
2290 priv->mib_buf.data.data[2] = priv->default_group_key;
2291 5124
2292 ret = at76_set_mib(priv, &priv->mib_buf); 5125 dev_printk(KERN_DEBUG, &udev->dev,
2293 if (ret != CMD_STATUS_COMPLETE) 5126 "using firmware %s (version %d.%d.%d-%d)\n",
2294 goto exit; 5127 fwe->fwname, fwh->major, fwh->minor, fwh->patch, fwh->build);
2295 5128
2296 /* second block of settings */ 5129 at76_dbg(DBG_DEVSTART, "board %u, int %d:%d, ext %d:%d", board_type,
2297 priv->mib_buf.size = 3; 5130 le32_to_cpu(fwh->int_fw_offset), le32_to_cpu(fwh->int_fw_len),
2298 priv->mib_buf.index = offsetof(struct mib_mac_encryption, 5131 le32_to_cpu(fwh->ext_fw_offset), le32_to_cpu(fwh->ext_fw_len));
2299 exclude_unencrypted); 5132 at76_dbg(DBG_DEVSTART, "firmware id %s", str);
2300 priv->mib_buf.data.data[0] = 1; /* exclude_unencrypted */
2301 priv->mib_buf.data.data[1] = 0; /* wep_encryption_type */
2302 priv->mib_buf.data.data[2] = 0; /* ckip_key_permutation */
2303 5133
2304 ret = at76_set_mib(priv, &priv->mib_buf);
2305 if (ret != CMD_STATUS_COMPLETE)
2306 goto exit;
2307 ret = 0;
2308 };
2309exit: 5134exit:
2310 at76_dump_mib_mac_encryption(priv); 5135 mutex_unlock(&fw_mutex);
2311 mutex_unlock(&priv->mtx);
2312 return ret;
2313}
2314
2315static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2316 const u8 *local_address, const u8 *address,
2317 struct ieee80211_key_conf *key)
2318{
2319 struct at76_priv *priv = hw->priv;
2320
2321 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d "
2322 "key->keylen %d",
2323 __func__, cmd, key->alg, key->keyidx, key->keylen);
2324 5136
2325 if (FIRMWARE_IS_WPA(priv->fw_version)) 5137 if (fwe->loaded)
2326 return at76_set_key_newfw(hw, cmd, local_address, address, key); 5138 return fwe;
2327 else 5139 else
2328 return at76_set_key_oldfw(hw, cmd, local_address, address, key); 5140 return NULL;
2329
2330} 5141}
2331 5142
2332static const struct ieee80211_ops at76_ops = {
2333 .tx = at76_mac80211_tx,
2334 .add_interface = at76_add_interface,
2335 .remove_interface = at76_remove_interface,
2336 .config = at76_config,
2337 .config_interface = at76_config_interface,
2338 .configure_filter = at76_configure_filter,
2339 .start = at76_mac80211_start,
2340 .stop = at76_mac80211_stop,
2341 .hw_scan = at76_hw_scan,
2342 .set_key = at76_set_key,
2343};
2344
2345/* Allocate network device and initialize private data */ 5143/* Allocate network device and initialize private data */
2346static struct at76_priv *at76_alloc_new_device(struct usb_device *udev) 5144static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
2347{ 5145{
2348 struct ieee80211_hw *hw; 5146 struct net_device *netdev;
2349 struct at76_priv *priv; 5147 struct at76_priv *priv;
5148 int i;
2350 5149
2351 hw = ieee80211_alloc_hw(sizeof(struct at76_priv), &at76_ops); 5150 /* allocate memory for our device state and initialize it */
2352 if (!hw) { 5151 netdev = alloc_etherdev(sizeof(struct at76_priv));
2353 printk(KERN_ERR DRIVER_NAME ": could not register" 5152 if (!netdev) {
2354 " ieee80211_hw\n"); 5153 dev_printk(KERN_ERR, &udev->dev, "out of memory\n");
2355 return NULL; 5154 return NULL;
2356 } 5155 }
2357 5156
2358 priv = hw->priv; 5157 priv = netdev_priv(netdev);
2359 priv->hw = hw;
2360 5158
2361 priv->udev = udev; 5159 priv->udev = udev;
5160 priv->netdev = netdev;
2362 5161
2363 mutex_init(&priv->mtx); 5162 mutex_init(&priv->mtx);
5163 INIT_WORK(&priv->work_assoc_done, at76_work_assoc_done);
5164 INIT_WORK(&priv->work_join, at76_work_join);
5165 INIT_WORK(&priv->work_new_bss, at76_work_new_bss);
5166 INIT_WORK(&priv->work_start_scan, at76_work_start_scan);
2364 INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc); 5167 INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc);
2365 INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx); 5168 INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx);
2366 INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan); 5169 INIT_DELAYED_WORK(&priv->dwork_restart, at76_dwork_restart);
5170 INIT_DELAYED_WORK(&priv->dwork_get_scan, at76_dwork_get_scan);
5171 INIT_DELAYED_WORK(&priv->dwork_beacon, at76_dwork_beacon);
5172 INIT_DELAYED_WORK(&priv->dwork_auth, at76_dwork_auth);
5173 INIT_DELAYED_WORK(&priv->dwork_assoc, at76_dwork_assoc);
5174
5175 spin_lock_init(&priv->mgmt_spinlock);
5176 priv->next_mgmt_bulk = NULL;
5177 priv->mac_state = MAC_INIT;
5178
5179 /* initialize empty BSS list */
5180 priv->curr_bss = NULL;
5181 INIT_LIST_HEAD(&priv->bss_list);
5182 spin_lock_init(&priv->bss_list_spinlock);
5183
5184 init_timer(&priv->bss_list_timer);
5185 priv->bss_list_timer.data = (unsigned long)priv;
5186 priv->bss_list_timer.function = at76_bss_list_timeout;
5187
5188 spin_lock_init(&priv->spy_spinlock);
5189
5190 /* mark all rx data entries as unused */
5191 for (i = 0; i < NR_RX_DATA_BUF; i++)
5192 priv->rx_data[i].skb = NULL;
2367 5193
2368 priv->rx_tasklet.func = at76_rx_tasklet; 5194 priv->rx_tasklet.func = at76_rx_tasklet;
2369 priv->rx_tasklet.data = 0; 5195 priv->rx_tasklet.data = 0;
@@ -2371,9 +5197,6 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
2371 priv->pm_mode = AT76_PM_OFF; 5197 priv->pm_mode = AT76_PM_OFF;
2372 priv->pm_period = 0; 5198 priv->pm_period = 0;
2373 5199
2374 /* unit us */
2375 priv->hw->channel_change_time = 100000;
2376
2377 return priv; 5200 return priv;
2378} 5201}
2379 5202
@@ -2436,42 +5259,11 @@ static int at76_alloc_urbs(struct at76_priv *priv,
2436 return 0; 5259 return 0;
2437} 5260}
2438 5261
2439static struct ieee80211_rate at76_rates[] = {
2440 { .bitrate = 10, .hw_value = TX_RATE_1MBIT, },
2441 { .bitrate = 20, .hw_value = TX_RATE_2MBIT, },
2442 { .bitrate = 55, .hw_value = TX_RATE_5_5MBIT, },
2443 { .bitrate = 110, .hw_value = TX_RATE_11MBIT, },
2444};
2445
2446static struct ieee80211_channel at76_channels[] = {
2447 { .center_freq = 2412, .hw_value = 1 },
2448 { .center_freq = 2417, .hw_value = 2 },
2449 { .center_freq = 2422, .hw_value = 3 },
2450 { .center_freq = 2427, .hw_value = 4 },
2451 { .center_freq = 2432, .hw_value = 5 },
2452 { .center_freq = 2437, .hw_value = 6 },
2453 { .center_freq = 2442, .hw_value = 7 },
2454 { .center_freq = 2447, .hw_value = 8 },
2455 { .center_freq = 2452, .hw_value = 9 },
2456 { .center_freq = 2457, .hw_value = 10 },
2457 { .center_freq = 2462, .hw_value = 11 },
2458 { .center_freq = 2467, .hw_value = 12 },
2459 { .center_freq = 2472, .hw_value = 13 },
2460 { .center_freq = 2484, .hw_value = 14 }
2461};
2462
2463static struct ieee80211_supported_band at76_supported_band = {
2464 .channels = at76_channels,
2465 .n_channels = ARRAY_SIZE(at76_channels),
2466 .bitrates = at76_rates,
2467 .n_bitrates = ARRAY_SIZE(at76_rates),
2468};
2469
2470/* Register network device and initialize the hardware */ 5262/* Register network device and initialize the hardware */
2471static int at76_init_new_device(struct at76_priv *priv, 5263static int at76_init_new_device(struct at76_priv *priv,
2472 struct usb_interface *interface) 5264 struct usb_interface *interface)
2473{ 5265{
2474 struct device *dev = &interface->dev; 5266 struct net_device *netdev = priv->netdev;
2475 int ret; 5267 int ret;
2476 5268
2477 /* set up the endpoint information */ 5269 /* set up the endpoint information */
@@ -2487,11 +5279,14 @@ static int at76_init_new_device(struct at76_priv *priv,
2487 /* MAC address */ 5279 /* MAC address */
2488 ret = at76_get_hw_config(priv); 5280 ret = at76_get_hw_config(priv);
2489 if (ret < 0) { 5281 if (ret < 0) {
2490 dev_err(dev, "cannot get MAC address\n"); 5282 dev_printk(KERN_ERR, &interface->dev,
5283 "cannot get MAC address\n");
2491 goto exit; 5284 goto exit;
2492 } 5285 }
2493 5286
2494 priv->domain = at76_get_reg_domain(priv->regulatory_domain); 5287 priv->domain = at76_get_reg_domain(priv->regulatory_domain);
5288 /* init. netdev->dev_addr */
5289 memcpy(netdev->dev_addr, priv->mac_addr, ETH_ALEN);
2495 5290
2496 priv->channel = DEF_CHANNEL; 5291 priv->channel = DEF_CHANNEL;
2497 priv->iw_mode = IW_MODE_INFRA; 5292 priv->iw_mode = IW_MODE_INFRA;
@@ -2501,54 +5296,47 @@ static int at76_init_new_device(struct at76_priv *priv,
2501 priv->txrate = TX_RATE_AUTO; 5296 priv->txrate = TX_RATE_AUTO;
2502 priv->preamble_type = PREAMBLE_TYPE_LONG; 5297 priv->preamble_type = PREAMBLE_TYPE_LONG;
2503 priv->beacon_period = 100; 5298 priv->beacon_period = 100;
5299 priv->beacons_last_qual = jiffies;
2504 priv->auth_mode = WLAN_AUTH_OPEN; 5300 priv->auth_mode = WLAN_AUTH_OPEN;
2505 priv->scan_min_time = DEF_SCAN_MIN_TIME; 5301 priv->scan_min_time = DEF_SCAN_MIN_TIME;
2506 priv->scan_max_time = DEF_SCAN_MAX_TIME; 5302 priv->scan_max_time = DEF_SCAN_MAX_TIME;
2507 priv->scan_mode = SCAN_TYPE_ACTIVE; 5303 priv->scan_mode = SCAN_TYPE_ACTIVE;
2508 priv->default_pairwise_key = 0xff;
2509 priv->default_group_key = 0xff;
2510
2511 /* mac80211 initialisation */
2512 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
2513 5304
2514 if (FIRMWARE_IS_WPA(priv->fw_version) && 5305 netdev->flags &= ~IFF_MULTICAST; /* not yet or never */
2515 (at76_is_503rfmd(priv->board_type) || 5306 netdev->open = at76_open;
2516 at76_is_505(priv->board_type))) 5307 netdev->stop = at76_stop;
2517 priv->hw->flags = IEEE80211_HW_SIGNAL_UNSPEC; 5308 netdev->get_stats = at76_get_stats;
2518 else 5309 netdev->ethtool_ops = &at76_ethtool_ops;
2519 priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 5310
2520 IEEE80211_HW_SIGNAL_UNSPEC; 5311 /* Add pointers to enable iwspy support. */
2521 5312 priv->wireless_data.spy_data = &priv->spy_data;
2522 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 5313 netdev->wireless_data = &priv->wireless_data;
2523 5314
2524 SET_IEEE80211_DEV(priv->hw, &interface->dev); 5315 netdev->hard_start_xmit = at76_tx;
2525 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 5316 netdev->tx_timeout = at76_tx_timeout;
2526 5317 netdev->watchdog_timeo = 2 * HZ;
2527 ret = ieee80211_register_hw(priv->hw); 5318 netdev->wireless_handlers = &at76_handler_def;
5319 netdev->set_multicast_list = at76_set_multicast;
5320 netdev->set_mac_address = at76_set_mac_address;
5321 dev_alloc_name(netdev, "wlan%d");
5322
5323 ret = register_netdev(priv->netdev);
2528 if (ret) { 5324 if (ret) {
2529 dev_err(dev, "cannot register mac80211 hw (status %d)!\n", ret); 5325 dev_printk(KERN_ERR, &interface->dev,
5326 "cannot register netdevice (status %d)!\n", ret);
2530 goto exit; 5327 goto exit;
2531 } 5328 }
5329 priv->netdev_registered = 1;
2532 5330
2533 priv->mac80211_registered = 1; 5331 printk(KERN_INFO "%s: USB %s, MAC %s, firmware %d.%d.%d-%d\n",
5332 netdev->name, dev_name(&interface->dev), mac2str(priv->mac_addr),
5333 priv->fw_version.major, priv->fw_version.minor,
5334 priv->fw_version.patch, priv->fw_version.build);
5335 printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n", netdev->name,
5336 priv->regulatory_domain, priv->domain->name);
2534 5337
2535 dev_info(dev, "%s: USB %s, MAC %s, firmware %d.%d.%d-%d\n", 5338 /* we let this timer run the whole time this driver instance lives */
2536 wiphy_name(priv->hw->wiphy), 5339 mod_timer(&priv->bss_list_timer, jiffies + BSS_LIST_TIMEOUT);
2537 dev_name(&interface->dev), mac2str(priv->mac_addr),
2538 priv->fw_version.major, priv->fw_version.minor,
2539 priv->fw_version.patch, priv->fw_version.build);
2540 dev_info(dev, "%s: regulatory domain 0x%02x: %s\n",
2541 wiphy_name(priv->hw->wiphy),
2542 priv->regulatory_domain, priv->domain->name);
2543 dev_info(dev, "%s: WPA support: ", wiphy_name(priv->hw->wiphy));
2544 if (!FIRMWARE_IS_WPA(priv->fw_version))
2545 printk("none\n");
2546 else {
2547 if (!at76_is_505a(priv->board_type))
2548 printk("TKIP\n");
2549 else
2550 printk("TKIP, AES/CCMP\n");
2551 };
2552 5340
2553exit: 5341exit:
2554 return ret; 5342 return ret;
@@ -2556,13 +5344,15 @@ exit:
2556 5344
2557static void at76_delete_device(struct at76_priv *priv) 5345static void at76_delete_device(struct at76_priv *priv)
2558{ 5346{
5347 int i;
5348
2559 at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__); 5349 at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
2560 5350
2561 /* The device is gone, don't bother turning it off */ 5351 /* The device is gone, don't bother turning it off */
2562 priv->device_unplugged = 1; 5352 priv->device_unplugged = 1;
2563 5353
2564 if (priv->mac80211_registered) 5354 if (priv->netdev_registered)
2565 ieee80211_unregister_hw(priv->hw); 5355 unregister_netdev(priv->netdev);
2566 5356
2567 /* assuming we used keventd, it must quiesce too */ 5357 /* assuming we used keventd, it must quiesce too */
2568 flush_scheduled_work(); 5358 flush_scheduled_work();
@@ -2583,11 +5373,25 @@ static void at76_delete_device(struct at76_priv *priv)
2583 if (priv->rx_skb) 5373 if (priv->rx_skb)
2584 kfree_skb(priv->rx_skb); 5374 kfree_skb(priv->rx_skb);
2585 5375
5376 at76_free_bss_list(priv);
5377 del_timer_sync(&priv->bss_list_timer);
5378 cancel_delayed_work(&priv->dwork_get_scan);
5379 cancel_delayed_work(&priv->dwork_beacon);
5380 cancel_delayed_work(&priv->dwork_auth);
5381 cancel_delayed_work(&priv->dwork_assoc);
5382
5383 if (priv->mac_state == MAC_CONNECTED)
5384 at76_iwevent_bss_disconnect(priv->netdev);
5385
5386 for (i = 0; i < NR_RX_DATA_BUF; i++)
5387 if (priv->rx_data[i].skb) {
5388 dev_kfree_skb(priv->rx_data[i].skb);
5389 priv->rx_data[i].skb = NULL;
5390 }
2586 usb_put_dev(priv->udev); 5391 usb_put_dev(priv->udev);
2587 5392
2588 at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", 5393 at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/netdev", __func__);
2589 __func__); 5394 free_netdev(priv->netdev); /* priv is in netdev */
2590 ieee80211_free_hw(priv->hw);
2591 5395
2592 at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__); 5396 at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__);
2593} 5397}
@@ -2621,8 +5425,8 @@ static int at76_probe(struct usb_interface *interface,
2621 we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */ 5425 we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */
2622 5426
2623 if (op_mode == OPMODE_HW_CONFIG_MODE) { 5427 if (op_mode == OPMODE_HW_CONFIG_MODE) {
2624 dev_err(&interface->dev, 5428 dev_printk(KERN_ERR, &interface->dev,
2625 "cannot handle a device in HW_CONFIG_MODE\n"); 5429 "cannot handle a device in HW_CONFIG_MODE\n");
2626 ret = -EBUSY; 5430 ret = -EBUSY;
2627 goto error; 5431 goto error;
2628 } 5432 }
@@ -2630,12 +5434,13 @@ static int at76_probe(struct usb_interface *interface,
2630 if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH 5434 if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
2631 && op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) { 5435 && op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
2632 /* download internal firmware part */ 5436 /* download internal firmware part */
2633 dev_dbg(&interface->dev, "downloading internal firmware\n"); 5437 dev_printk(KERN_DEBUG, &interface->dev,
5438 "downloading internal firmware\n");
2634 ret = at76_load_internal_fw(udev, fwe); 5439 ret = at76_load_internal_fw(udev, fwe);
2635 if (ret < 0) { 5440 if (ret < 0) {
2636 dev_err(&interface->dev, 5441 dev_printk(KERN_ERR, &interface->dev,
2637 "error %d downloading internal firmware\n", 5442 "error %d downloading internal firmware\n",
2638 ret); 5443 ret);
2639 goto error; 5444 goto error;
2640 } 5445 }
2641 usb_put_dev(udev); 5446 usb_put_dev(udev);
@@ -2660,7 +5465,8 @@ static int at76_probe(struct usb_interface *interface,
2660 need_ext_fw = 1; 5465 need_ext_fw = 1;
2661 5466
2662 if (need_ext_fw) { 5467 if (need_ext_fw) {
2663 dev_dbg(&interface->dev, "downloading external firmware\n"); 5468 dev_printk(KERN_DEBUG, &interface->dev,
5469 "downloading external firmware\n");
2664 5470
2665 ret = at76_load_external_fw(udev, fwe); 5471 ret = at76_load_external_fw(udev, fwe);
2666 if (ret) 5472 if (ret)
@@ -2669,8 +5475,8 @@ static int at76_probe(struct usb_interface *interface,
2669 /* Re-check firmware version */ 5475 /* Re-check firmware version */
2670 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); 5476 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
2671 if (ret < 0) { 5477 if (ret < 0) {
2672 dev_err(&interface->dev, 5478 dev_printk(KERN_ERR, &interface->dev,
2673 "error %d getting firmware version\n", ret); 5479 "error %d getting firmware version\n", ret);
2674 goto error; 5480 goto error;
2675 } 5481 }
2676 } 5482 }
@@ -2681,6 +5487,7 @@ static int at76_probe(struct usb_interface *interface,
2681 goto error; 5487 goto error;
2682 } 5488 }
2683 5489
5490 SET_NETDEV_DEV(priv->netdev, &interface->dev);
2684 usb_set_intfdata(interface, priv); 5491 usb_set_intfdata(interface, priv);
2685 5492
2686 memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version)); 5493 memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version));
@@ -2708,7 +5515,7 @@ static void at76_disconnect(struct usb_interface *interface)
2708 if (!priv) 5515 if (!priv)
2709 return; 5516 return;
2710 5517
2711 printk(KERN_INFO "%s: disconnecting\n", wiphy_name(priv->hw->wiphy)); 5518 printk(KERN_INFO "%s: disconnecting\n", priv->netdev->name);
2712 at76_delete_device(priv); 5519 at76_delete_device(priv);
2713 dev_printk(KERN_INFO, &interface->dev, "disconnected\n"); 5520 dev_printk(KERN_INFO, &interface->dev, "disconnected\n");
2714} 5521}
@@ -2764,8 +5571,5 @@ MODULE_AUTHOR("Alex <alex@foogod.com>");
2764MODULE_AUTHOR("Nick Jones"); 5571MODULE_AUTHOR("Nick Jones");
2765MODULE_AUTHOR("Balint Seeber <n0_5p4m_p13453@hotmail.com>"); 5572MODULE_AUTHOR("Balint Seeber <n0_5p4m_p13453@hotmail.com>");
2766MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>"); 5573MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
2767MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
2768MODULE_AUTHOR("Kalle Valo <kalle.valo@iki.fi>");
2769MODULE_AUTHOR("Milan Plzik <milan.plzik@gmail.com>");
2770MODULE_DESCRIPTION(DRIVER_DESC); 5574MODULE_DESCRIPTION(DRIVER_DESC);
2771MODULE_LICENSE("GPL"); 5575MODULE_LICENSE("GPL");
diff --git a/drivers/staging/at76_usb/at76_usb.h b/drivers/staging/at76_usb/at76_usb.h
index 8bb352f16d45..b20be9da1fa1 100644
--- a/drivers/staging/at76_usb/at76_usb.h
+++ b/drivers/staging/at76_usb/at76_usb.h
@@ -34,6 +34,23 @@ enum board_type {
34 BOARD_505AMX = 8 34 BOARD_505AMX = 8
35}; 35};
36 36
37/* our private ioctl's */
38/* preamble length (0 - long, 1 - short, 2 - auto) */
39#define AT76_SET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 0)
40#define AT76_GET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 1)
41/* which debug channels are enabled */
42#define AT76_SET_DEBUG (SIOCIWFIRSTPRIV + 2)
43#define AT76_GET_DEBUG (SIOCIWFIRSTPRIV + 3)
44/* power save mode (incl. the Atmel proprietary smart save mode) */
45#define AT76_SET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 4)
46#define AT76_GET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 5)
47/* min and max channel times for scan */
48#define AT76_SET_SCAN_TIMES (SIOCIWFIRSTPRIV + 6)
49#define AT76_GET_SCAN_TIMES (SIOCIWFIRSTPRIV + 7)
50/* scan mode (0 - active, 1 - passive) */
51#define AT76_SET_SCAN_MODE (SIOCIWFIRSTPRIV + 8)
52#define AT76_GET_SCAN_MODE (SIOCIWFIRSTPRIV + 9)
53
37#define CMD_STATUS_IDLE 0x00 54#define CMD_STATUS_IDLE 0x00
38#define CMD_STATUS_COMPLETE 0x01 55#define CMD_STATUS_COMPLETE 0x01
39#define CMD_STATUS_UNKNOWN 0x02 56#define CMD_STATUS_UNKNOWN 0x02
@@ -65,7 +82,6 @@ enum board_type {
65#define MIB_MAC 0x03 82#define MIB_MAC 0x03
66#define MIB_MAC_MGMT 0x05 83#define MIB_MAC_MGMT 0x05
67#define MIB_MAC_WEP 0x06 84#define MIB_MAC_WEP 0x06
68#define MIB_MAC_ENCRYPTION 0x06
69#define MIB_PHY 0x07 85#define MIB_PHY 0x07
70#define MIB_FW_VERSION 0x08 86#define MIB_FW_VERSION 0x08
71#define MIB_MDOMAIN 0x09 87#define MIB_MDOMAIN 0x09
@@ -90,26 +106,6 @@ enum board_type {
90#define AT76_PM_ON 2 106#define AT76_PM_ON 2
91#define AT76_PM_SMART 3 107#define AT76_PM_SMART 3
92 108
93/* cipher values for encryption keys */
94#define CIPHER_NONE 0 /* this value is only guessed */
95#define CIPHER_WEP64 1
96#define CIPHER_TKIP 2
97#define CIPHER_CCMP 3
98#define CIPHER_CCX 4 /* for consistency sake only */
99#define CIPHER_WEP128 5
100
101/* bit flags key types for encryption keys */
102#define KEY_PAIRWISE 2
103#define KEY_TX 4
104
105#define CIPHER_KEYS (4)
106#define CIPHER_KEY_LEN (40)
107
108struct key_config {
109 u8 cipher;
110 u8 keylen;
111};
112
113struct hwcfg_r505 { 109struct hwcfg_r505 {
114 u8 cr39_values[14]; 110 u8 cr39_values[14];
115 u8 reserved1[14]; 111 u8 reserved1[14];
@@ -151,9 +147,6 @@ union at76_hwcfg {
151 147
152#define WEP_SMALL_KEY_LEN (40 / 8) 148#define WEP_SMALL_KEY_LEN (40 / 8)
153#define WEP_LARGE_KEY_LEN (104 / 8) 149#define WEP_LARGE_KEY_LEN (104 / 8)
154#define WEP_KEYS (4)
155
156
157 150
158struct at76_card_config { 151struct at76_card_config {
159 u8 exclude_unencrypted; 152 u8 exclude_unencrypted;
@@ -168,7 +161,7 @@ struct at76_card_config {
168 u8 privacy_invoked; 161 u8 privacy_invoked;
169 u8 wep_default_key_id; /* 0..3 */ 162 u8 wep_default_key_id; /* 0..3 */
170 u8 current_ssid[32]; 163 u8 current_ssid[32];
171 u8 wep_default_key_value[4][WEP_LARGE_KEY_LEN]; 164 u8 wep_default_key_value[4][WEP_KEY_LEN];
172 u8 ssid_len; 165 u8 ssid_len;
173 u8 short_preamble; 166 u8 short_preamble;
174 __le16 beacon_period; 167 __le16 beacon_period;
@@ -193,7 +186,7 @@ struct at76_rx_buffer {
193 u8 link_quality; 186 u8 link_quality;
194 u8 noise_level; 187 u8 noise_level;
195 __le32 rx_time; 188 __le32 rx_time;
196 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; 189 u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
197} __attribute__((packed)); 190} __attribute__((packed));
198 191
199/* Length of Atmel-specific Tx header before 802.11 frame */ 192/* Length of Atmel-specific Tx header before 802.11 frame */
@@ -203,11 +196,8 @@ struct at76_tx_buffer {
203 __le16 wlength; 196 __le16 wlength;
204 u8 tx_rate; 197 u8 tx_rate;
205 u8 padding; 198 u8 padding;
206 u8 key_id; 199 u8 reserved[4];
207 u8 cipher_type; 200 u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
208 u8 cipher_length;
209 u8 reserved;
210 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
211} __attribute__((packed)); 201} __attribute__((packed));
212 202
213/* defines for scan_type below */ 203/* defines for scan_type below */
@@ -254,7 +244,6 @@ struct set_mib_buffer {
254 u8 byte; 244 u8 byte;
255 __le16 word; 245 __le16 word;
256 u8 addr[ETH_ALEN]; 246 u8 addr[ETH_ALEN];
257 u8 data[256]; /* we need more space for mib_mac_encryption */
258 } data; 247 } data;
259} __attribute__((packed)); 248} __attribute__((packed));
260 249
@@ -328,24 +317,10 @@ struct mib_mac_wep {
328 u8 exclude_unencrypted; 317 u8 exclude_unencrypted;
329 __le32 wep_icv_error_count; 318 __le32 wep_icv_error_count;
330 __le32 wep_excluded_count; 319 __le32 wep_excluded_count;
331 u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; 320 u8 wep_default_keyvalue[WEP_KEYS][WEP_KEY_LEN];
332 u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ 321 u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
333} __attribute__((packed)); 322} __attribute__((packed));
334 323
335struct mib_mac_encryption {
336 u8 cipher_default_keyvalue[CIPHER_KEYS][CIPHER_KEY_LEN];
337 u8 tkip_bssid[6];
338 u8 privacy_invoked;
339 u8 cipher_default_key_id;
340 u8 cipher_default_group_key_id;
341 u8 exclude_unencrypted;
342 u8 wep_encryption_type;
343 u8 ckip_key_permutation; /* bool */
344 __le32 wep_icv_error_count;
345 __le32 wep_excluded_count;
346 u8 key_rsc[CIPHER_KEYS][8];
347} __attribute__((packed));
348
349struct mib_phy { 324struct mib_phy {
350 __le32 ed_threshold; 325 __le32 ed_threshold;
351 326
@@ -389,6 +364,16 @@ struct at76_fw_header {
389 __le32 ext_fw_len; /* external firmware image length */ 364 __le32 ext_fw_len; /* external firmware image length */
390} __attribute__((packed)); 365} __attribute__((packed));
391 366
367enum mac_state {
368 MAC_INIT,
369 MAC_SCANNING,
370 MAC_AUTH,
371 MAC_ASSOC,
372 MAC_JOINING,
373 MAC_CONNECTED,
374 MAC_OWN_IBSS
375};
376
392/* a description of a regulatory domain and the allowed channels */ 377/* a description of a regulatory domain and the allowed channels */
393struct reg_domain { 378struct reg_domain {
394 u16 code; 379 u16 code;
@@ -396,6 +381,47 @@ struct reg_domain {
396 u32 channel_map; /* if bit N is set, channel (N+1) is allowed */ 381 u32 channel_map; /* if bit N is set, channel (N+1) is allowed */
397}; 382};
398 383
384/* how long do we keep a (I)BSS in the bss_list in jiffies
385 this should be long enough for the user to retrieve the table
386 (by iwlist ?) after the device started, because all entries from
387 other channels than the one the device locks on get removed, too */
388#define BSS_LIST_TIMEOUT (120 * HZ)
389/* struct to store BSS info found during scan */
390#define BSS_LIST_MAX_RATE_LEN 32 /* 32 rates should be enough ... */
391
392struct bss_info {
393 struct list_head list;
394
395 u8 bssid[ETH_ALEN]; /* bssid */
396 u8 ssid[IW_ESSID_MAX_SIZE]; /* essid */
397 u8 ssid_len; /* length of ssid above */
398 u8 channel;
399 u16 capa; /* BSS capabilities */
400 u16 beacon_interval; /* beacon interval, Kus (1024 microseconds) */
401 u8 rates[BSS_LIST_MAX_RATE_LEN]; /* supported rates in units of
402 500 kbps, ORed with 0x80 for
403 basic rates */
404 u8 rates_len;
405
406 /* quality of received beacon */
407 u8 rssi;
408 u8 link_qual;
409 u8 noise_level;
410
411 unsigned long last_rx; /* time (jiffies) of last beacon received */
412};
413
414/* a rx data buffer to collect rx fragments */
415struct rx_data_buf {
416 u8 sender[ETH_ALEN]; /* sender address */
417 u16 seqnr; /* sequence number */
418 u16 fragnr; /* last fragment received */
419 unsigned long last_rx; /* jiffies of last rx */
420 struct sk_buff *skb; /* == NULL if entry is free */
421};
422
423#define NR_RX_DATA_BUF 8
424
399/* Data for one loaded firmware file */ 425/* Data for one loaded firmware file */
400struct fwentry { 426struct fwentry {
401 const char *const fwname; 427 const char *const fwname;
@@ -412,9 +438,11 @@ struct fwentry {
412 438
413struct at76_priv { 439struct at76_priv {
414 struct usb_device *udev; /* USB device pointer */ 440 struct usb_device *udev; /* USB device pointer */
441 struct net_device *netdev; /* net device pointer */
442 struct net_device_stats stats; /* net device stats */
443 struct iw_statistics wstats; /* wireless stats */
415 444
416 struct sk_buff *rx_skb; /* skbuff for receiving data */ 445 struct sk_buff *rx_skb; /* skbuff for receiving data */
417 struct sk_buff *tx_skb; /* skbuff for transmitting data */
418 void *bulk_out_buffer; /* buffer for sending data */ 446 void *bulk_out_buffer; /* buffer for sending data */
419 447
420 struct urb *tx_urb; /* URB for sending data */ 448 struct urb *tx_urb; /* URB for sending data */
@@ -426,17 +454,26 @@ struct at76_priv {
426 struct mutex mtx; /* locks this structure */ 454 struct mutex mtx; /* locks this structure */
427 455
428 /* work queues */ 456 /* work queues */
457 struct work_struct work_assoc_done;
458 struct work_struct work_join;
459 struct work_struct work_new_bss;
460 struct work_struct work_start_scan;
429 struct work_struct work_set_promisc; 461 struct work_struct work_set_promisc;
430 struct work_struct work_submit_rx; 462 struct work_struct work_submit_rx;
431 struct delayed_work dwork_hw_scan; 463 struct delayed_work dwork_restart;
464 struct delayed_work dwork_get_scan;
465 struct delayed_work dwork_beacon;
466 struct delayed_work dwork_auth;
467 struct delayed_work dwork_assoc;
432 468
433 struct tasklet_struct rx_tasklet; 469 struct tasklet_struct rx_tasklet;
434 470
435 /* the WEP stuff */ 471 /* the WEP stuff */
436 int wep_enabled; /* 1 if WEP is enabled */ 472 int wep_enabled; /* 1 if WEP is enabled */
437 int wep_key_id; /* key id to be used */ 473 int wep_key_id; /* key id to be used */
438 u8 wep_keys[WEP_KEYS][WEP_LARGE_KEY_LEN]; /* WEP keys */ 474 u8 wep_keys[WEP_KEYS][WEP_KEY_LEN]; /* the four WEP keys,
439 u8 wep_keys_len[WEP_KEYS]; /* length of WEP keys */ 475 5 or 13 bytes are used */
476 u8 wep_keys_len[WEP_KEYS]; /* the length of the above keys */
440 477
441 int channel; 478 int channel;
442 int iw_mode; 479 int iw_mode;
@@ -458,13 +495,44 @@ struct at76_priv {
458 int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */ 495 int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
459 int scan_need_any; /* if set, need to scan for any ESSID */ 496 int scan_need_any; /* if set, need to scan for any ESSID */
460 497
498 /* the list we got from scanning */
499 spinlock_t bss_list_spinlock; /* protects bss_list operations */
500 struct list_head bss_list; /* list of BSS we got beacons from */
501 struct timer_list bss_list_timer; /* timer to purge old entries
502 from bss_list */
503 struct bss_info *curr_bss; /* current BSS */
461 u16 assoc_id; /* current association ID, if associated */ 504 u16 assoc_id; /* current association ID, if associated */
462 505
506 u8 wanted_bssid[ETH_ALEN];
507 int wanted_bssid_valid; /* != 0 if wanted_bssid is to be used */
508
509 /* some data for infrastructure mode only */
510 spinlock_t mgmt_spinlock; /* this spinlock protects access to
511 next_mgmt_bulk */
512
513 struct at76_tx_buffer *next_mgmt_bulk; /* pending management msg to
514 send via bulk out */
515 enum mac_state mac_state;
516 enum {
517 SCAN_IDLE,
518 SCAN_IN_PROGRESS,
519 SCAN_COMPLETED
520 } scan_state;
521 time_t last_scan;
522
523 int retries; /* remaining retries in case of timeout when
524 * sending AuthReq or AssocReq */
463 u8 pm_mode; /* power management mode */ 525 u8 pm_mode; /* power management mode */
464 u32 pm_period; /* power management period in microseconds */ 526 u32 pm_period; /* power management period in microseconds */
465 527
466 struct reg_domain const *domain; /* reg domain description */ 528 struct reg_domain const *domain; /* reg domain description */
467 529
530 /* iwspy support */
531 spinlock_t spy_spinlock;
532 struct iw_spy_data spy_data;
533
534 struct iw_public_data wireless_data;
535
468 /* These fields contain HW config provided by the device (not all of 536 /* These fields contain HW config provided by the device (not all of
469 * these fields are used by all board types) */ 537 * these fields are used by all board types) */
470 u8 mac_addr[ETH_ALEN]; 538 u8 mac_addr[ETH_ALEN];
@@ -472,6 +540,9 @@ struct at76_priv {
472 540
473 struct at76_card_config card_config; 541 struct at76_card_config card_config;
474 542
543 /* store rx fragments until complete */
544 struct rx_data_buf rx_data[NR_RX_DATA_BUF];
545
475 enum board_type board_type; 546 enum board_type board_type;
476 struct mib_fw_version fw_version; 547 struct mib_fw_version fw_version;
477 548
@@ -479,20 +550,58 @@ struct at76_priv {
479 unsigned int netdev_registered:1; 550 unsigned int netdev_registered:1;
480 struct set_mib_buffer mib_buf; /* global buffer for set_mib calls */ 551 struct set_mib_buffer mib_buf; /* global buffer for set_mib calls */
481 552
553 /* beacon counting */
482 int beacon_period; /* period of mgmt beacons, Kus */ 554 int beacon_period; /* period of mgmt beacons, Kus */
555 int beacons_received;
556 unsigned long beacons_last_qual; /* time we restarted counting
557 beacons */
558};
483 559
484 struct ieee80211_hw *hw; 560struct at76_rx_radiotap {
485 int mac80211_registered; 561 struct ieee80211_radiotap_header rt_hdr;
486 562 __le64 rt_tsft;
487 struct key_config keys[4]; /* installed key types */ 563 u8 rt_flags;
488 u8 default_pairwise_key; 564 u8 rt_rate;
489 u8 default_group_key; 565 s8 rt_signal;
566 s8 rt_noise;
490}; 567};
491 568
492#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS 569#define AT76_RX_RADIOTAP_PRESENT \
570 ((1 << IEEE80211_RADIOTAP_TSFT) | \
571 (1 << IEEE80211_RADIOTAP_FLAGS) | \
572 (1 << IEEE80211_RADIOTAP_RATE) | \
573 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | \
574 (1 << IEEE80211_RADIOTAP_DB_ANTNOISE))
575
576#define BEACON_MAX_DATA_LENGTH 1500
577
578/* the maximum size of an AssocReq packet */
579#define ASSOCREQ_MAX_SIZE \
580 (AT76_TX_HDRLEN + sizeof(struct ieee80211_assoc_request) + \
581 1 + 1 + IW_ESSID_MAX_SIZE + 1 + 1 + 4)
582
583/* for shared secret auth, add the challenge text size */
584#define AUTH_FRAME_SIZE (AT76_TX_HDRLEN + sizeof(struct ieee80211_auth))
585
586/* Maximal number of AuthReq retries */
587#define AUTH_RETRIES 3
493 588
589/* Maximal number of AssocReq retries */
590#define ASSOC_RETRIES 3
591
592/* Beacon timeout in managed mode when we are connected */
593#define BEACON_TIMEOUT (10 * HZ)
594
595/* Timeout for authentication response */
596#define AUTH_TIMEOUT (1 * HZ)
597
598/* Timeout for association response */
599#define ASSOC_TIMEOUT (1 * HZ)
600
601/* Polling interval when scan is running */
494#define SCAN_POLL_INTERVAL (HZ / 4) 602#define SCAN_POLL_INTERVAL (HZ / 4)
495 603
604/* Command completion timeout */
496#define CMD_COMPLETION_TIMEOUT (5 * HZ) 605#define CMD_COMPLETION_TIMEOUT (5 * HZ)
497 606
498#define DEF_RTS_THRESHOLD 1536 607#define DEF_RTS_THRESHOLD 1536
@@ -502,6 +611,8 @@ struct at76_priv {
502#define DEF_SCAN_MIN_TIME 10 611#define DEF_SCAN_MIN_TIME 10
503#define DEF_SCAN_MAX_TIME 120 612#define DEF_SCAN_MAX_TIME 120
504 613
614#define MAX_RTS_THRESHOLD (MAX_FRAG_THRESHOLD + 1)
615
505/* the max padding size for tx in bytes (see calc_padding) */ 616/* the max padding size for tx in bytes (see calc_padding) */
506#define MAX_PADDING_SIZE 53 617#define MAX_PADDING_SIZE 53
507 618
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 5ffe269c2382..ab69c1bf36a8 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -622,7 +622,7 @@ static int set_ctrl_bits(void)
622} 622}
623 623
624/* sets ctrl & data port bits according to current signals values */ 624/* sets ctrl & data port bits according to current signals values */
625static void set_bits(void) 625static void panel_set_bits(void)
626{ 626{
627 set_data_bits(); 627 set_data_bits();
628 set_ctrl_bits(); 628 set_ctrl_bits();
@@ -707,12 +707,12 @@ static void lcd_send_serial(int byte)
707 */ 707 */
708 for (bit = 0; bit < 8; bit++) { 708 for (bit = 0; bit < 8; bit++) {
709 bits.cl = BIT_CLR; /* CLK low */ 709 bits.cl = BIT_CLR; /* CLK low */
710 set_bits(); 710 panel_set_bits();
711 bits.da = byte & 1; 711 bits.da = byte & 1;
712 set_bits(); 712 panel_set_bits();
713 udelay(2); /* maintain the data during 2 us before CLK up */ 713 udelay(2); /* maintain the data during 2 us before CLK up */
714 bits.cl = BIT_SET; /* CLK high */ 714 bits.cl = BIT_SET; /* CLK high */
715 set_bits(); 715 panel_set_bits();
716 udelay(1); /* maintain the strobe during 1 us */ 716 udelay(1); /* maintain the strobe during 1 us */
717 byte >>= 1; 717 byte >>= 1;
718 } 718 }
@@ -727,7 +727,7 @@ static void lcd_backlight(int on)
727 /* The backlight is activated by seting the AUTOFEED line to +5V */ 727 /* The backlight is activated by seting the AUTOFEED line to +5V */
728 spin_lock(&pprt_lock); 728 spin_lock(&pprt_lock);
729 bits.bl = on; 729 bits.bl = on;
730 set_bits(); 730 panel_set_bits();
731 spin_unlock(&pprt_lock); 731 spin_unlock(&pprt_lock);
732} 732}
733 733
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 8bcde8cde554..b2ceb4aff233 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_MON) += mon/
11obj-$(CONFIG_PCI) += host/ 11obj-$(CONFIG_PCI) += host/
12obj-$(CONFIG_USB_EHCI_HCD) += host/ 12obj-$(CONFIG_USB_EHCI_HCD) += host/
13obj-$(CONFIG_USB_ISP116X_HCD) += host/ 13obj-$(CONFIG_USB_ISP116X_HCD) += host/
14obj-$(CONFIG_USB_ISP1760_HCD) += host/
14obj-$(CONFIG_USB_OHCI_HCD) += host/ 15obj-$(CONFIG_USB_OHCI_HCD) += host/
15obj-$(CONFIG_USB_UHCI_HCD) += host/ 16obj-$(CONFIG_USB_UHCI_HCD) += host/
16obj-$(CONFIG_USB_FHCI_HCD) += host/ 17obj-$(CONFIG_USB_FHCI_HCD) += host/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 97ba4a985edc..326dd7f65ee9 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1349,9 +1349,6 @@ static struct usb_device_id acm_ids[] = {
1349 { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ 1349 { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
1350 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1350 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1351 }, 1351 },
1352 { USB_DEVICE(0x0e8d, 0x3329), /* i-blue 747, Qstarz BT-Q1000, Holux M-241 */
1353 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1354 },
1355 { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */ 1352 { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
1356 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1353 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1357 }, 1354 },
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index d6c5bcd40064..d701bf4698d2 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1622,6 +1622,8 @@ static int qe_ep_disable(struct usb_ep *_ep)
1622 nuke(ep, -ESHUTDOWN); 1622 nuke(ep, -ESHUTDOWN);
1623 ep->desc = NULL; 1623 ep->desc = NULL;
1624 ep->stopped = 1; 1624 ep->stopped = 1;
1625 ep->tx_req = NULL;
1626 qe_ep_reset(udc, ep->epnum);
1625 spin_unlock_irqrestore(&udc->lock, flags); 1627 spin_unlock_irqrestore(&udc->lock, flags);
1626 1628
1627 cpm_muram_free(cpm_muram_offset(ep->rxbase)); 1629 cpm_muram_free(cpm_muram_offset(ep->rxbase));
@@ -1681,14 +1683,11 @@ static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1681 kfree(req); 1683 kfree(req);
1682} 1684}
1683 1685
1684/* queues (submits) an I/O request to an endpoint */ 1686static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1685static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1686 gfp_t gfp_flags)
1687{ 1687{
1688 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); 1688 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1689 struct qe_req *req = container_of(_req, struct qe_req, req); 1689 struct qe_req *req = container_of(_req, struct qe_req, req);
1690 struct qe_udc *udc; 1690 struct qe_udc *udc;
1691 unsigned long flags;
1692 int reval; 1691 int reval;
1693 1692
1694 udc = ep->udc; 1693 udc = ep->udc;
@@ -1732,7 +1731,7 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1732 list_add_tail(&req->queue, &ep->queue); 1731 list_add_tail(&req->queue, &ep->queue);
1733 dev_vdbg(udc->dev, "gadget have request in %s! %d\n", 1732 dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1734 ep->name, req->req.length); 1733 ep->name, req->req.length);
1735 spin_lock_irqsave(&udc->lock, flags); 1734
1736 /* push the request to device */ 1735 /* push the request to device */
1737 if (ep_is_in(ep)) 1736 if (ep_is_in(ep))
1738 reval = ep_req_send(ep, req); 1737 reval = ep_req_send(ep, req);
@@ -1748,11 +1747,24 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1748 if (ep->dir == USB_DIR_OUT) 1747 if (ep->dir == USB_DIR_OUT)
1749 reval = ep_req_receive(ep, req); 1748 reval = ep_req_receive(ep, req);
1750 1749
1751 spin_unlock_irqrestore(&udc->lock, flags);
1752
1753 return 0; 1750 return 0;
1754} 1751}
1755 1752
1753/* queues (submits) an I/O request to an endpoint */
1754static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1755 gfp_t gfp_flags)
1756{
1757 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1758 struct qe_udc *udc = ep->udc;
1759 unsigned long flags;
1760 int ret;
1761
1762 spin_lock_irqsave(&udc->lock, flags);
1763 ret = __qe_ep_queue(_ep, _req);
1764 spin_unlock_irqrestore(&udc->lock, flags);
1765 return ret;
1766}
1767
1756/* dequeues (cancels, unlinks) an I/O request from an endpoint */ 1768/* dequeues (cancels, unlinks) an I/O request from an endpoint */
1757static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) 1769static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1758{ 1770{
@@ -2008,7 +2020,7 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
2008 udc->ep0_dir = USB_DIR_IN; 2020 udc->ep0_dir = USB_DIR_IN;
2009 2021
2010 /* data phase */ 2022 /* data phase */
2011 status = qe_ep_queue(&ep->ep, &req->req, GFP_ATOMIC); 2023 status = __qe_ep_queue(&ep->ep, &req->req);
2012 2024
2013 if (status == 0) 2025 if (status == 0)
2014 return; 2026 return;
@@ -2151,6 +2163,9 @@ static int reset_irq(struct qe_udc *udc)
2151{ 2163{
2152 unsigned char i; 2164 unsigned char i;
2153 2165
2166 if (udc->usb_state == USB_STATE_DEFAULT)
2167 return 0;
2168
2154 qe_usb_disable(); 2169 qe_usb_disable();
2155 out_8(&udc->usb_regs->usb_usadr, 0); 2170 out_8(&udc->usb_regs->usb_usadr, 0);
2156 2171
@@ -2442,8 +2457,12 @@ static int __devinit qe_udc_reg_init(struct qe_udc *udc)
2442 struct usb_ctlr __iomem *qe_usbregs; 2457 struct usb_ctlr __iomem *qe_usbregs;
2443 qe_usbregs = udc->usb_regs; 2458 qe_usbregs = udc->usb_regs;
2444 2459
2445 /* Init the usb register */ 2460 /* Spec says that we must enable the USB controller to change mode. */
2446 out_8(&qe_usbregs->usb_usmod, 0x01); 2461 out_8(&qe_usbregs->usb_usmod, 0x01);
2462 /* Mode changed, now disable it, since muram isn't initialized yet. */
2463 out_8(&qe_usbregs->usb_usmod, 0x00);
2464
2465 /* Initialize the rest. */
2447 out_be16(&qe_usbregs->usb_usbmr, 0); 2466 out_be16(&qe_usbregs->usb_usbmr, 0);
2448 out_8(&qe_usbregs->usb_uscom, 0); 2467 out_8(&qe_usbregs->usb_uscom, 0);
2449 out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR); 2468 out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
@@ -2604,6 +2623,10 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
2604 (unsigned long)udc_controller); 2623 (unsigned long)udc_controller);
2605 /* request irq and disable DR */ 2624 /* request irq and disable DR */
2606 udc_controller->usb_irq = irq_of_parse_and_map(np, 0); 2625 udc_controller->usb_irq = irq_of_parse_and_map(np, 0);
2626 if (!udc_controller->usb_irq) {
2627 ret = -EINVAL;
2628 goto err_noirq;
2629 }
2607 2630
2608 ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0, 2631 ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0,
2609 driver_name, udc_controller); 2632 driver_name, udc_controller);
@@ -2625,6 +2648,8 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
2625err6: 2648err6:
2626 free_irq(udc_controller->usb_irq, udc_controller); 2649 free_irq(udc_controller->usb_irq, udc_controller);
2627err5: 2650err5:
2651 irq_dispose_mapping(udc_controller->usb_irq);
2652err_noirq:
2628 if (udc_controller->nullmap) { 2653 if (udc_controller->nullmap) {
2629 dma_unmap_single(udc_controller->gadget.dev.parent, 2654 dma_unmap_single(udc_controller->gadget.dev.parent,
2630 udc_controller->nullp, 256, 2655 udc_controller->nullp, 256,
@@ -2648,7 +2673,7 @@ err2:
2648 iounmap(udc_controller->usb_regs); 2673 iounmap(udc_controller->usb_regs);
2649err1: 2674err1:
2650 kfree(udc_controller); 2675 kfree(udc_controller);
2651 2676 udc_controller = NULL;
2652 return ret; 2677 return ret;
2653} 2678}
2654 2679
@@ -2710,6 +2735,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
2710 kfree(ep->txframe); 2735 kfree(ep->txframe);
2711 2736
2712 free_irq(udc_controller->usb_irq, udc_controller); 2737 free_irq(udc_controller->usb_irq, udc_controller);
2738 irq_dispose_mapping(udc_controller->usb_irq);
2713 2739
2714 tasklet_kill(&udc_controller->rx_tasklet); 2740 tasklet_kill(&udc_controller->rx_tasklet);
2715 2741
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 537f953bd7f8..6d106e74265e 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -621,9 +621,9 @@ static int __init aircable_init(void)
621 goto failed_usb_register; 621 goto failed_usb_register;
622 return 0; 622 return 0;
623 623
624failed_serial_register:
625 usb_serial_deregister(&aircable_device);
626failed_usb_register: 624failed_usb_register:
625 usb_serial_deregister(&aircable_device);
626failed_serial_register:
627 return retval; 627 return retval;
628} 628}
629 629
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 75597337583e..f92f4d773374 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -662,6 +662,7 @@ static struct usb_device_id id_table_combined [] = {
662 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 662 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
663 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 663 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
664 { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, 664 { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
665 { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) },
665 { }, /* Optional parameter entry */ 666 { }, /* Optional parameter entry */
666 { } /* Terminating entry */ 667 { } /* Terminating entry */
667}; 668};
@@ -1064,8 +1065,10 @@ static int set_serial_info(struct tty_struct *tty,
1064 1065
1065 if (!capable(CAP_SYS_ADMIN)) { 1066 if (!capable(CAP_SYS_ADMIN)) {
1066 if (((new_serial.flags & ~ASYNC_USR_MASK) != 1067 if (((new_serial.flags & ~ASYNC_USR_MASK) !=
1067 (priv->flags & ~ASYNC_USR_MASK))) 1068 (priv->flags & ~ASYNC_USR_MASK))) {
1069 unlock_kernel();
1068 return -EPERM; 1070 return -EPERM;
1071 }
1069 priv->flags = ((priv->flags & ~ASYNC_USR_MASK) | 1072 priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
1070 (new_serial.flags & ASYNC_USR_MASK)); 1073 (new_serial.flags & ASYNC_USR_MASK));
1071 priv->custom_divisor = new_serial.custom_divisor; 1074 priv->custom_divisor = new_serial.custom_divisor;
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 1b62eff475d2..e300c840f8ca 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -844,6 +844,9 @@
844#define TML_VID 0x1B91 /* Vendor ID */ 844#define TML_VID 0x1B91 /* Vendor ID */
845#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ 845#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
846 846
847/* NDI Polaris System */
848#define FTDI_NDI_HUC_PID 0xDA70
849
847/* Propox devices */ 850/* Propox devices */
848#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 851#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
849 852
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6c89da9c6fea..bfd0b68ceccd 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -199,14 +199,15 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
199#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 199#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
200 200
201/* FUTURE NOVATEL PRODUCTS */ 201/* FUTURE NOVATEL PRODUCTS */
202#define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000 202#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
203#define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000 203#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
204#define NOVATELWIRELESS_PRODUCT_EMBEDDED_1 0x8000 204#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
205#define NOVATELWIRELESS_PRODUCT_GLOBAL_1 0x9000 205#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
206#define NOVATELWIRELESS_PRODUCT_EVDO_2 0x6001 206#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000
207#define NOVATELWIRELESS_PRODUCT_HSPA_2 0x7001 207#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001
208#define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001 208#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000
209#define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001 209#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001
210#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001
210 211
211/* AMOI PRODUCTS */ 212/* AMOI PRODUCTS */
212#define AMOI_VENDOR_ID 0x1614 213#define AMOI_VENDOR_ID 0x1614
@@ -216,6 +217,27 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
216 217
217#define DELL_VENDOR_ID 0x413C 218#define DELL_VENDOR_ID 0x413C
218 219
220/* Dell modems */
221#define DELL_PRODUCT_5700_MINICARD 0x8114
222#define DELL_PRODUCT_5500_MINICARD 0x8115
223#define DELL_PRODUCT_5505_MINICARD 0x8116
224#define DELL_PRODUCT_5700_EXPRESSCARD 0x8117
225#define DELL_PRODUCT_5510_EXPRESSCARD 0x8118
226
227#define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128
228#define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129
229
230#define DELL_PRODUCT_5720_MINICARD_VZW 0x8133
231#define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134
232#define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135
233#define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136
234#define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137
235#define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138
236
237#define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180
238#define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
239#define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
240
219#define KYOCERA_VENDOR_ID 0x0c88 241#define KYOCERA_VENDOR_ID 0x0c88
220#define KYOCERA_PRODUCT_KPC650 0x17da 242#define KYOCERA_PRODUCT_KPC650 0x17da
221#define KYOCERA_PRODUCT_KPC680 0x180a 243#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -274,12 +296,6 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
274#define ERICSSON_VENDOR_ID 0x0bdb 296#define ERICSSON_VENDOR_ID 0x0bdb
275#define ERICSSON_PRODUCT_F3507G 0x1900 297#define ERICSSON_PRODUCT_F3507G 0x1900
276 298
277/* Pantech products */
278#define PANTECH_VENDOR_ID 0x106c
279#define PANTECH_PRODUCT_PC5740 0x3701
280#define PANTECH_PRODUCT_PC5750 0x3702 /* PX-500 */
281#define PANTECH_PRODUCT_UM150 0x3711
282
283static struct usb_device_id option_ids[] = { 299static struct usb_device_id option_ids[] = {
284 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 300 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
285 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 301 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -395,31 +411,37 @@ static struct usb_device_id option_ids[] = {
395 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ 411 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
396 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 412 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
397 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 413 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
398 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */ 414 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
399 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */ 415 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
400 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */ 416 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
401 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_1) }, /* Novatel Global product */ 417 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
402 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_2) }, /* Novatel EVDO product */ 418 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
403 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_2) }, /* Novatel HSPA product */ 419 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
404 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */ 420 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
405 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */ 421 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
422 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
406 423
407 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 424 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
408 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 425 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
409 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, 426 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
410 427
411 { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ 428 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
412 { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 429 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
413 { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 430 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
414 { USB_DEVICE(DELL_VENDOR_ID, 0x8117) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ 431 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
415 { USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ 432 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
416 { USB_DEVICE(DELL_VENDOR_ID, 0x8128) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ 433 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
417 { USB_DEVICE(DELL_VENDOR_ID, 0x8129) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */ 434 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
418 { USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ 435 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
419 { USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ 436 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
420 { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ 437 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
421 { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ 438 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
422 { USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */ 439 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */
440 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
441 { USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
442 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
443 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
444 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
423 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ 445 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
424 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 446 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
425 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, 447 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -488,9 +510,6 @@ static struct usb_device_id option_ids[] = {
488 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 510 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
489 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, 511 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
490 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) }, 512 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
491 { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5740) },
492 { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5750) },
493 { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_UM150) },
494 { } /* Terminating entry */ 513 { } /* Terminating entry */
495}; 514};
496MODULE_DEVICE_TABLE(usb, option_ids); 515MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index baf591137b80..2620bf6fe5e1 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -176,7 +176,7 @@ static unsigned int product_5052_count;
176/* the array dimension is the number of default entries plus */ 176/* the array dimension is the number of default entries plus */
177/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ 177/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
178/* null entry */ 178/* null entry */
179static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = { 179static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
180 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, 180 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
181 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, 181 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
182 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, 182 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -185,9 +185,11 @@ static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = {
185 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, 185 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
186 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, 186 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
187 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 187 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
188 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
189 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
188}; 190};
189 191
190static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = { 192static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
191 { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, 193 { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
192 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 195 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -195,7 +197,7 @@ static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
195 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 197 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
196}; 198};
197 199
198static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = { 200static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
199 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, 201 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
200 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, 202 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
201 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, 203 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -208,6 +210,8 @@ static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] =
208 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 210 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
209 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, 211 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
210 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 212 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
213 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
214 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
211 { } 215 { }
212}; 216};
213 217
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index b7ea5dbadee5..f323c6025858 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -30,6 +30,8 @@
30#define IBM_VENDOR_ID 0x04b3 30#define IBM_VENDOR_ID 0x04b3
31#define TI_3410_PRODUCT_ID 0x3410 31#define TI_3410_PRODUCT_ID 0x3410
32#define IBM_4543_PRODUCT_ID 0x4543 32#define IBM_4543_PRODUCT_ID 0x4543
33#define IBM_454B_PRODUCT_ID 0x454b
34#define IBM_454C_PRODUCT_ID 0x454c
33#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */ 35#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
34#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */ 36#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
35#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */ 37#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 2a42b862aa9f..727c506417cc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -64,6 +64,7 @@
64 */ 64 */
65#define VENDOR_ID_NOKIA 0x0421 65#define VENDOR_ID_NOKIA 0x0421
66#define VENDOR_ID_NIKON 0x04b0 66#define VENDOR_ID_NIKON 0x04b0
67#define VENDOR_ID_PENTAX 0x0a17
67#define VENDOR_ID_MOTOROLA 0x22b8 68#define VENDOR_ID_MOTOROLA 0x22b8
68 69
69/*********************************************************************** 70/***********************************************************************
@@ -158,6 +159,7 @@ static int slave_configure(struct scsi_device *sdev)
158 switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) { 159 switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
159 case VENDOR_ID_NOKIA: 160 case VENDOR_ID_NOKIA:
160 case VENDOR_ID_NIKON: 161 case VENDOR_ID_NIKON:
162 case VENDOR_ID_PENTAX:
161 case VENDOR_ID_MOTOROLA: 163 case VENDOR_ID_MOTOROLA:
162 if (!(us->fflags & (US_FL_FIX_CAPACITY | 164 if (!(us->fflags & (US_FL_FIX_CAPACITY |
163 US_FL_CAPACITY_OK))) 165 US_FL_CAPACITY_OK)))
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1d5438e6363b..fb65d221cedf 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -558,32 +558,10 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
558 558
559 if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) { 559 if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
560 560
561 /* The command succeeded. If the capacity is odd 561 /* The command succeeded. We know this device doesn't
562 * (i.e., if the sector number is even) then the 562 * have the last-sector bug, so stop checking it.
563 * "always-even" heuristic would be wrong for this
564 * device. Issue a WARN() so that the kerneloops.org
565 * project will be notified and we will then know to
566 * mark the device with a CAPACITY_OK flag. Hopefully
567 * this will occur for only a few devices.
568 *
569 * Use the sign of us->last_sector_hacks to tell whether
570 * the warning has already been issued; we don't need
571 * more than one warning per device.
572 */ 563 */
573 if (!(sector & 1) && us->use_last_sector_hacks > 0) { 564 us->use_last_sector_hacks = 0;
574 unsigned vid = le16_to_cpu(
575 us->pusb_dev->descriptor.idVendor);
576 unsigned pid = le16_to_cpu(
577 us->pusb_dev->descriptor.idProduct);
578 unsigned rev = le16_to_cpu(
579 us->pusb_dev->descriptor.bcdDevice);
580
581 WARN(1, "%s: Successful last sector success at %u, "
582 "device %04x:%04x:%04x\n",
583 sdkp->disk->disk_name, sector,
584 vid, pid, rev);
585 us->use_last_sector_hacks = -1;
586 }
587 565
588 } else { 566 } else {
589 /* The command failed. Allow up to 3 retries in case this 567 /* The command failed. Allow up to 3 retries in case this
@@ -599,14 +577,6 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
599 srb->result = SAM_STAT_CHECK_CONDITION; 577 srb->result = SAM_STAT_CHECK_CONDITION;
600 memcpy(srb->sense_buffer, record_not_found, 578 memcpy(srb->sense_buffer, record_not_found,
601 sizeof(record_not_found)); 579 sizeof(record_not_found));
602
603 /* In theory we might want to issue a WARN() here if the
604 * capacity is even, since it could indicate the device
605 * has the READ CAPACITY bug _and_ the real capacity is
606 * odd. But it could also indicate that the device
607 * simply can't access its last sector, a failure mode
608 * which is surprisingly common. So no warning.
609 */
610 } 580 }
611 581
612 done: 582 done:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 69269f739563..50dc33a6065b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1214,7 +1214,7 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
1214 "Datafab", 1214 "Datafab",
1215 "KECF-USB", 1215 "KECF-USB",
1216 US_SC_DEVICE, US_PR_DEVICE, NULL, 1216 US_SC_DEVICE, US_PR_DEVICE, NULL,
1217 US_FL_FIX_INQUIRY ), 1217 US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
1218 1218
1219/* Reported by Rauch Wolke <rauchwolke@gmx.net> */ 1219/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
1220UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, 1220UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
@@ -1354,21 +1354,6 @@ UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
1354 US_SC_DEVICE, US_PR_DEVICE, NULL, 1354 US_SC_DEVICE, US_PR_DEVICE, NULL,
1355 US_FL_FIX_INQUIRY ), 1355 US_FL_FIX_INQUIRY ),
1356 1356
1357
1358/* Submitted by Per Winkvist <per.winkvist@uk.com> */
1359UNUSUAL_DEV( 0x0a17, 0x006, 0x0000, 0xffff,
1360 "Pentax",
1361 "Optio S/S4",
1362 US_SC_DEVICE, US_PR_DEVICE, NULL,
1363 US_FL_FIX_INQUIRY ),
1364
1365/* Reported by Jaak Ristioja <Ristioja@gmail.com> */
1366UNUSUAL_DEV( 0x0a17, 0x006e, 0x0100, 0x0100,
1367 "Pentax",
1368 "K10D",
1369 US_SC_DEVICE, US_PR_DEVICE, NULL,
1370 US_FL_FIX_CAPACITY ),
1371
1372/* These are virtual windows driver CDs, which the zd1211rw driver 1357/* These are virtual windows driver CDs, which the zd1211rw driver
1373 * automatically converts into WLAN devices. */ 1358 * automatically converts into WLAN devices. */
1374UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101, 1359UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index f0267706cb45..bf0af660df8a 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1054,9 +1054,10 @@ config FB_RIVA_BACKLIGHT
1054 1054
1055config FB_I810 1055config FB_I810
1056 tristate "Intel 810/815 support (EXPERIMENTAL)" 1056 tristate "Intel 810/815 support (EXPERIMENTAL)"
1057 depends on FB && EXPERIMENTAL && PCI && X86_32 1057 depends on EXPERIMENTAL && PCI && X86_32
1058 select AGP 1058 select AGP
1059 select AGP_INTEL 1059 select AGP_INTEL
1060 select FB
1060 select FB_MODE_HELPERS 1061 select FB_MODE_HELPERS
1061 select FB_CFB_FILLRECT 1062 select FB_CFB_FILLRECT
1062 select FB_CFB_COPYAREA 1063 select FB_CFB_COPYAREA
@@ -1119,7 +1120,8 @@ config FB_CARILLO_RANCH
1119 1120
1120config FB_INTEL 1121config FB_INTEL
1121 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)" 1122 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
1122 depends on FB && EXPERIMENTAL && PCI && X86 1123 depends on EXPERIMENTAL && PCI && X86
1124 select FB
1123 select AGP 1125 select AGP
1124 select AGP_INTEL 1126 select AGP_INTEL
1125 select FB_MODE_HELPERS 1127 select FB_MODE_HELPERS
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index db16112cf197..e6e299feb51b 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1475,7 +1475,7 @@ static int aty128fb_set_par(struct fb_info *info)
1475 aty128_set_pll(&par->pll, par); 1475 aty128_set_pll(&par->pll, par);
1476 aty128_set_fifo(&par->fifo_reg, par); 1476 aty128_set_fifo(&par->fifo_reg, par);
1477 1477
1478 config = aty_ld_le32(CONFIG_CNTL) & ~3; 1478 config = aty_ld_le32(CNFG_CNTL) & ~3;
1479 1479
1480#if defined(__BIG_ENDIAN) 1480#if defined(__BIG_ENDIAN)
1481 if (par->crtc.bpp == 32) 1481 if (par->crtc.bpp == 32)
@@ -1484,7 +1484,7 @@ static int aty128fb_set_par(struct fb_info *info)
1484 config |= 1; /* make aperture do 16 bit swapping */ 1484 config |= 1; /* make aperture do 16 bit swapping */
1485#endif 1485#endif
1486 1486
1487 aty_st_le32(CONFIG_CNTL, config); 1487 aty_st_le32(CNFG_CNTL, config);
1488 aty_st_8(CRTC_EXT_CNTL + 1, 0); /* turn the video back on */ 1488 aty_st_8(CRTC_EXT_CNTL + 1, 0); /* turn the video back on */
1489 1489
1490 info->fix.line_length = (par->crtc.vxres * par->crtc.bpp) >> 3; 1490 info->fix.line_length = (par->crtc.vxres * par->crtc.bpp) >> 3;
@@ -1875,7 +1875,7 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
1875 u32 dac; 1875 u32 dac;
1876 1876
1877 /* Get the chip revision */ 1877 /* Get the chip revision */
1878 chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F; 1878 chip_rev = (aty_ld_le32(CNFG_CNTL) >> 16) & 0x1F;
1879 1879
1880 strcpy(video_card, "Rage128 XX "); 1880 strcpy(video_card, "Rage128 XX ");
1881 video_card[8] = ent->device >> 8; 1881 video_card[8] = ent->device >> 8;
@@ -2057,7 +2057,7 @@ static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_
2057 2057
2058 /* Grab memory size from the card */ 2058 /* Grab memory size from the card */
2059 // How does this relate to the resource length from the PCI hardware? 2059 // How does this relate to the resource length from the PCI hardware?
2060 par->vram_size = aty_ld_le32(CONFIG_MEMSIZE) & 0x03FFFFFF; 2060 par->vram_size = aty_ld_le32(CNFG_MEMSIZE) & 0x03FFFFFF;
2061 2061
2062 /* Virtualize the framebuffer */ 2062 /* Virtualize the framebuffer */
2063 info->screen_base = ioremap(fb_addr, par->vram_size); 2063 info->screen_base = ioremap(fb_addr, par->vram_size);
@@ -2374,6 +2374,8 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
2374 /* Set the chip into the appropriate suspend mode (we use D2, 2374 /* Set the chip into the appropriate suspend mode (we use D2,
2375 * D3 would require a complete re-initialisation of the chip, 2375 * D3 would require a complete re-initialisation of the chip,
2376 * including PCI config registers, clocks, AGP configuration, ...) 2376 * including PCI config registers, clocks, AGP configuration, ...)
2377 *
2378 * For resume, the core will have already brought us back to D0
2377 */ 2379 */
2378 if (suspend) { 2380 if (suspend) {
2379 /* Make sure CRTC2 is reset. Remove that the day we decide to 2381 /* Make sure CRTC2 is reset. Remove that the day we decide to
@@ -2391,17 +2393,9 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
2391 aty_st_le32(BUS_CNTL1, 0x00000010); 2393 aty_st_le32(BUS_CNTL1, 0x00000010);
2392 aty_st_le32(MEM_POWER_MISC, 0x0c830000); 2394 aty_st_le32(MEM_POWER_MISC, 0x0c830000);
2393 mdelay(100); 2395 mdelay(100);
2394 pci_read_config_word(pdev, par->pm_reg+PCI_PM_CTRL, &pwr_command); 2396
2395 /* Switch PCI power management to D2 */ 2397 /* Switch PCI power management to D2 */
2396 pci_write_config_word(pdev, par->pm_reg+PCI_PM_CTRL, 2398 pci_set_power_state(pdev, PCI_D2);
2397 (pwr_command & ~PCI_PM_CTRL_STATE_MASK) | 2);
2398 pci_read_config_word(pdev, par->pm_reg+PCI_PM_CTRL, &pwr_command);
2399 } else {
2400 /* Switch back PCI power management to D0 */
2401 mdelay(100);
2402 pci_write_config_word(pdev, par->pm_reg+PCI_PM_CTRL, 0);
2403 pci_read_config_word(pdev, par->pm_reg+PCI_PM_CTRL, &pwr_command);
2404 mdelay(100);
2405 } 2399 }
2406} 2400}
2407 2401
@@ -2410,6 +2404,12 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2410 struct fb_info *info = pci_get_drvdata(pdev); 2404 struct fb_info *info = pci_get_drvdata(pdev);
2411 struct aty128fb_par *par = info->par; 2405 struct aty128fb_par *par = info->par;
2412 2406
2407 /* Because we may change PCI D state ourselves, we need to
2408 * first save the config space content so the core can
2409 * restore it properly on resume.
2410 */
2411 pci_save_state(pdev);
2412
2413 /* We don't do anything but D2, for now we return 0, but 2413 /* We don't do anything but D2, for now we return 0, but
2414 * we may want to change that. How do we know if the BIOS 2414 * we may want to change that. How do we know if the BIOS
2415 * can properly take care of D3 ? Also, with swsusp, we 2415 * can properly take care of D3 ? Also, with swsusp, we
@@ -2476,6 +2476,11 @@ static int aty128_do_resume(struct pci_dev *pdev)
2476 if (pdev->dev.power.power_state.event == PM_EVENT_ON) 2476 if (pdev->dev.power.power_state.event == PM_EVENT_ON)
2477 return 0; 2477 return 0;
2478 2478
2479 /* PCI state will have been restored by the core, so
2480 * we should be in D0 now with our config space fully
2481 * restored
2482 */
2483
2479 /* Wakeup chip */ 2484 /* Wakeup chip */
2480 aty128_set_suspend(par, 0); 2485 aty128_set_suspend(par, 0);
2481 par->asleep = 0; 2486 par->asleep = 0;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index cc6b470073da..1207c208a30b 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -135,7 +135,7 @@
135#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ 135#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
136defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT) 136defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
137static const u32 lt_lcd_regs[] = { 137static const u32 lt_lcd_regs[] = {
138 CONFIG_PANEL_LG, 138 CNFG_PANEL_LG,
139 LCD_GEN_CNTL_LG, 139 LCD_GEN_CNTL_LG,
140 DSTN_CONTROL_LG, 140 DSTN_CONTROL_LG,
141 HFB_PITCH_ADDR_LG, 141 HFB_PITCH_ADDR_LG,
@@ -446,7 +446,7 @@ static int __devinit correct_chipset(struct atyfb_par *par)
446 par->pll_limits.ecp_max = aty_chips[i].ecp_max; 446 par->pll_limits.ecp_max = aty_chips[i].ecp_max;
447 par->features = aty_chips[i].features; 447 par->features = aty_chips[i].features;
448 448
449 chip_id = aty_ld_le32(CONFIG_CHIP_ID, par); 449 chip_id = aty_ld_le32(CNFG_CHIP_ID, par);
450 type = chip_id & CFG_CHIP_TYPE; 450 type = chip_id & CFG_CHIP_TYPE;
451 rev = (chip_id & CFG_CHIP_REV) >> 24; 451 rev = (chip_id & CFG_CHIP_REV) >> 24;
452 452
@@ -629,7 +629,7 @@ static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
629 crtc->lcd_index = aty_ld_le32(LCD_INDEX, par); 629 crtc->lcd_index = aty_ld_le32(LCD_INDEX, par);
630 aty_st_le32(LCD_INDEX, crtc->lcd_index, par); 630 aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
631 } 631 }
632 crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par); 632 crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par);
633 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par); 633 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par);
634 634
635 635
@@ -676,7 +676,7 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
676 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~(CRTC_EXT_DISP_EN | CRTC_EN), par); 676 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~(CRTC_EXT_DISP_EN | CRTC_EN), par);
677 677
678 /* update non-shadow registers first */ 678 /* update non-shadow registers first */
679 aty_st_lcd(CONFIG_PANEL, crtc->lcd_config_panel, par); 679 aty_st_lcd(CNFG_PANEL, crtc->lcd_config_panel, par);
680 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl & 680 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
681 ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par); 681 ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
682 682
@@ -858,7 +858,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
858 if (!M64_HAS(MOBIL_BUS)) 858 if (!M64_HAS(MOBIL_BUS))
859 crtc->lcd_index |= CRTC2_DISPLAY_DIS; 859 crtc->lcd_index |= CRTC2_DISPLAY_DIS;
860 860
861 crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par) | 0x4000; 861 crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par) | 0x4000;
862 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT; 862 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT;
863 863
864 crtc->lcd_gen_cntl &= 864 crtc->lcd_gen_cntl &=
@@ -1978,7 +1978,7 @@ static int aty_power_mgmt(int sleep, struct atyfb_par *par)
1978 1978
1979 return timeout ? 0 : -EIO; 1979 return timeout ? 0 : -EIO;
1980} 1980}
1981#endif 1981#endif /* CONFIG_PPC_PMAC */
1982 1982
1983static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1983static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1984{ 1984{
@@ -2002,9 +2002,15 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2002 par->asleep = 1; 2002 par->asleep = 1;
2003 par->lock_blank = 1; 2003 par->lock_blank = 1;
2004 2004
2005 /* Because we may change PCI D state ourselves, we need to
2006 * first save the config space content so the core can
2007 * restore it properly on resume.
2008 */
2009 pci_save_state(pdev);
2010
2005#ifdef CONFIG_PPC_PMAC 2011#ifdef CONFIG_PPC_PMAC
2006 /* Set chip to "suspend" mode */ 2012 /* Set chip to "suspend" mode */
2007 if (aty_power_mgmt(1, par)) { 2013 if (machine_is(powermac) && aty_power_mgmt(1, par)) {
2008 par->asleep = 0; 2014 par->asleep = 0;
2009 par->lock_blank = 0; 2015 par->lock_blank = 0;
2010 atyfb_blank(FB_BLANK_UNBLANK, info); 2016 atyfb_blank(FB_BLANK_UNBLANK, info);
@@ -2047,11 +2053,15 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
2047 2053
2048 acquire_console_sem(); 2054 acquire_console_sem();
2049 2055
2056 /* PCI state will have been restored by the core, so
2057 * we should be in D0 now with our config space fully
2058 * restored
2059 */
2060
2050#ifdef CONFIG_PPC_PMAC 2061#ifdef CONFIG_PPC_PMAC
2051 if (pdev->dev.power.power_state.event == 2) 2062 if (machine_is(powermac) &&
2063 pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
2052 aty_power_mgmt(0, par); 2064 aty_power_mgmt(0, par);
2053#else
2054 pci_set_power_state(pdev, PCI_D0);
2055#endif 2065#endif
2056 2066
2057 aty_resume_chip(info); 2067 aty_resume_chip(info);
@@ -2254,7 +2264,7 @@ static int __devinit aty_init(struct fb_info *info)
2254 if (!M64_HAS(INTEGRATED)) { 2264 if (!M64_HAS(INTEGRATED)) {
2255 u32 stat0; 2265 u32 stat0;
2256 u8 dac_type, dac_subtype, clk_type; 2266 u8 dac_type, dac_subtype, clk_type;
2257 stat0 = aty_ld_le32(CONFIG_STAT0, par); 2267 stat0 = aty_ld_le32(CNFG_STAT0, par);
2258 par->bus_type = (stat0 >> 0) & 0x07; 2268 par->bus_type = (stat0 >> 0) & 0x07;
2259 par->ram_type = (stat0 >> 3) & 0x07; 2269 par->ram_type = (stat0 >> 3) & 0x07;
2260 ramname = aty_gx_ram[par->ram_type]; 2270 ramname = aty_gx_ram[par->ram_type];
@@ -2324,7 +2334,7 @@ static int __devinit aty_init(struct fb_info *info)
2324 par->dac_ops = &aty_dac_ct; 2334 par->dac_ops = &aty_dac_ct;
2325 par->pll_ops = &aty_pll_ct; 2335 par->pll_ops = &aty_pll_ct;
2326 par->bus_type = PCI; 2336 par->bus_type = PCI;
2327 par->ram_type = (aty_ld_le32(CONFIG_STAT0, par) & 0x07); 2337 par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
2328 ramname = aty_ct_ram[par->ram_type]; 2338 ramname = aty_ct_ram[par->ram_type];
2329 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ 2339 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
2330 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) 2340 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
@@ -2433,7 +2443,7 @@ static int __devinit aty_init(struct fb_info *info)
2433 } 2443 }
2434 2444
2435 if (M64_HAS(MAGIC_VRAM_SIZE)) { 2445 if (M64_HAS(MAGIC_VRAM_SIZE)) {
2436 if (aty_ld_le32(CONFIG_STAT1, par) & 0x40000000) 2446 if (aty_ld_le32(CNFG_STAT1, par) & 0x40000000)
2437 info->fix.smem_len += 0x400000; 2447 info->fix.smem_len += 0x400000;
2438 } 2448 }
2439 2449
@@ -2946,7 +2956,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
2946 * Fix PROMs idea of MEM_CNTL settings... 2956 * Fix PROMs idea of MEM_CNTL settings...
2947 */ 2957 */
2948 mem = aty_ld_le32(MEM_CNTL, par); 2958 mem = aty_ld_le32(MEM_CNTL, par);
2949 chip_id = aty_ld_le32(CONFIG_CHIP_ID, par); 2959 chip_id = aty_ld_le32(CNFG_CHIP_ID, par);
2950 if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) { 2960 if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) {
2951 switch (mem & 0x0f) { 2961 switch (mem & 0x0f) {
2952 case 3: 2962 case 3:
@@ -2964,7 +2974,7 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
2964 default: 2974 default:
2965 break; 2975 break;
2966 } 2976 }
2967 if ((aty_ld_le32(CONFIG_STAT0, par) & 7) >= SDRAM) 2977 if ((aty_ld_le32(CNFG_STAT0, par) & 7) >= SDRAM)
2968 mem &= ~(0x00700000); 2978 mem &= ~(0x00700000);
2969 } 2979 }
2970 mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */ 2980 mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */
@@ -3572,7 +3582,7 @@ static int __init atyfb_atari_probe(void)
3572 } 3582 }
3573 3583
3574 /* Fake pci_id for correct_chipset() */ 3584 /* Fake pci_id for correct_chipset() */
3575 switch (aty_ld_le32(CONFIG_CHIP_ID, par) & CFG_CHIP_TYPE) { 3585 switch (aty_ld_le32(CNFG_CHIP_ID, par) & CFG_CHIP_TYPE) {
3576 case 0x00d7: 3586 case 0x00d7:
3577 par->pci_id = PCI_CHIP_MACH64GX; 3587 par->pci_id = PCI_CHIP_MACH64GX;
3578 break; 3588 break;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index d0f1a7fc2c9d..16bb7e3c0310 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -1936,8 +1936,8 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
1936 OUTREG(CRTC_GEN_CNTL, save_crtc_gen_cntl | CRTC_DISP_REQ_EN_B); 1936 OUTREG(CRTC_GEN_CNTL, save_crtc_gen_cntl | CRTC_DISP_REQ_EN_B);
1937 mdelay(100); 1937 mdelay(100);
1938 1938
1939 aper_base = INREG(CONFIG_APER_0_BASE); 1939 aper_base = INREG(CNFG_APER_0_BASE);
1940 aper_size = INREG(CONFIG_APER_SIZE); 1940 aper_size = INREG(CNFG_APER_SIZE);
1941 1941
1942#ifdef SET_MC_FB_FROM_APERTURE 1942#ifdef SET_MC_FB_FROM_APERTURE
1943 /* Set framebuffer to be at the same address as set in PCI BAR */ 1943 /* Set framebuffer to be at the same address as set in PCI BAR */
@@ -2024,11 +2024,11 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
2024 ~CRTC_H_CUTOFF_ACTIVE_EN); 2024 ~CRTC_H_CUTOFF_ACTIVE_EN);
2025 } 2025 }
2026 } else { 2026 } else {
2027 tmp = INREG(CONFIG_MEMSIZE); 2027 tmp = INREG(CNFG_MEMSIZE);
2028 } 2028 }
2029 2029
2030 /* mem size is bits [28:0], mask off the rest */ 2030 /* mem size is bits [28:0], mask off the rest */
2031 rinfo->video_ram = tmp & CONFIG_MEMSIZE_MASK; 2031 rinfo->video_ram = tmp & CNFG_MEMSIZE_MASK;
2032 2032
2033 /* 2033 /*
2034 * Hack to get around some busted production M6's 2034 * Hack to get around some busted production M6's
@@ -2228,7 +2228,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2228 */ 2228 */
2229 rinfo->errata = 0; 2229 rinfo->errata = 0;
2230 if (rinfo->family == CHIP_FAMILY_R300 && 2230 if (rinfo->family == CHIP_FAMILY_R300 &&
2231 (INREG(CONFIG_CNTL) & CFG_ATI_REV_ID_MASK) 2231 (INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK)
2232 == CFG_ATI_REV_A11) 2232 == CFG_ATI_REV_A11)
2233 rinfo->errata |= CHIP_ERRATA_R300_CG; 2233 rinfo->errata |= CHIP_ERRATA_R300_CG;
2234 2234
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 675abdafc2d8..ca5f0dc28546 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -333,7 +333,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
333 if (!rinfo->has_CRTC2) { 333 if (!rinfo->has_CRTC2) {
334 tmp = INPLL(pllSCLK_CNTL); 334 tmp = INPLL(pllSCLK_CNTL);
335 335
336 if ((INREG(CONFIG_CNTL) & CFG_ATI_REV_ID_MASK) > CFG_ATI_REV_A13) 336 if ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) > CFG_ATI_REV_A13)
337 tmp &= ~(SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_RB); 337 tmp &= ~(SCLK_CNTL__FORCE_CP | SCLK_CNTL__FORCE_RB);
338 tmp &= ~(SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 | 338 tmp &= ~(SCLK_CNTL__FORCE_HDP | SCLK_CNTL__FORCE_DISP1 |
339 SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_SE | 339 SCLK_CNTL__FORCE_TOP | SCLK_CNTL__FORCE_SE |
@@ -468,9 +468,9 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
468 468
469 /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300*/ 469 /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300*/
470 if ((rinfo->family == CHIP_FAMILY_RV250 && 470 if ((rinfo->family == CHIP_FAMILY_RV250 &&
471 ((INREG(CONFIG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) || 471 ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) ||
472 ((rinfo->family == CHIP_FAMILY_RV100) && 472 ((rinfo->family == CHIP_FAMILY_RV100) &&
473 ((INREG(CONFIG_CNTL) & CFG_ATI_REV_ID_MASK) <= CFG_ATI_REV_A13))) { 473 ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) <= CFG_ATI_REV_A13))) {
474 tmp |= SCLK_CNTL__FORCE_CP; 474 tmp |= SCLK_CNTL__FORCE_CP;
475 tmp |= SCLK_CNTL__FORCE_VIP; 475 tmp |= SCLK_CNTL__FORCE_VIP;
476 } 476 }
@@ -486,7 +486,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
486 /* RV200::A11 A12 RV250::A11 A12 */ 486 /* RV200::A11 A12 RV250::A11 A12 */
487 if (((rinfo->family == CHIP_FAMILY_RV200) || 487 if (((rinfo->family == CHIP_FAMILY_RV200) ||
488 (rinfo->family == CHIP_FAMILY_RV250)) && 488 (rinfo->family == CHIP_FAMILY_RV250)) &&
489 ((INREG(CONFIG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) 489 ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13))
490 tmp |= SCLK_MORE_CNTL__FORCEON; 490 tmp |= SCLK_MORE_CNTL__FORCEON;
491 491
492 OUTPLL(pllSCLK_MORE_CNTL, tmp); 492 OUTPLL(pllSCLK_MORE_CNTL, tmp);
@@ -497,7 +497,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
497 /* RV200::A11 A12, RV250::A11 A12 */ 497 /* RV200::A11 A12, RV250::A11 A12 */
498 if (((rinfo->family == CHIP_FAMILY_RV200) || 498 if (((rinfo->family == CHIP_FAMILY_RV200) ||
499 (rinfo->family == CHIP_FAMILY_RV250)) && 499 (rinfo->family == CHIP_FAMILY_RV250)) &&
500 ((INREG(CONFIG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) { 500 ((INREG(CNFG_CNTL) & CFG_ATI_REV_ID_MASK) < CFG_ATI_REV_A13)) {
501 tmp = INPLL(pllPLL_PWRMGT_CNTL); 501 tmp = INPLL(pllPLL_PWRMGT_CNTL);
502 tmp |= PLL_PWRMGT_CNTL__TCL_BYPASS_DISABLE; 502 tmp |= PLL_PWRMGT_CNTL__TCL_BYPASS_DISABLE;
503 OUTPLL(pllPLL_PWRMGT_CNTL, tmp); 503 OUTPLL(pllPLL_PWRMGT_CNTL, tmp);
@@ -702,7 +702,7 @@ static void radeon_pm_restore_regs(struct radeonfb_info *rinfo)
702 OUTREG(DISPLAY_BASE_ADDR, rinfo->save_regs[31]); 702 OUTREG(DISPLAY_BASE_ADDR, rinfo->save_regs[31]);
703 OUTREG(MC_AGP_LOCATION, rinfo->save_regs[32]); 703 OUTREG(MC_AGP_LOCATION, rinfo->save_regs[32]);
704 OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]); 704 OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]);
705 OUTREG(CONFIG_MEMSIZE, rinfo->video_ram); 705 OUTREG(CNFG_MEMSIZE, rinfo->video_ram);
706 706
707 OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]); 707 OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]);
708 OUTREG(DISP_PWR_MAN, rinfo->save_regs[10]); 708 OUTREG(DISP_PWR_MAN, rinfo->save_regs[10]);
@@ -1723,7 +1723,7 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
1723 OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]); 1723 OUTREG(CRTC2_DISPLAY_BASE_ADDR, rinfo->save_regs[33]);
1724 OUTREG(MC_FB_LOCATION, rinfo->save_regs[30]); 1724 OUTREG(MC_FB_LOCATION, rinfo->save_regs[30]);
1725 OUTREG(OV0_BASE_ADDR, rinfo->save_regs[80]); 1725 OUTREG(OV0_BASE_ADDR, rinfo->save_regs[80]);
1726 OUTREG(CONFIG_MEMSIZE, rinfo->video_ram); 1726 OUTREG(CNFG_MEMSIZE, rinfo->video_ram);
1727 OUTREG(BUS_CNTL, rinfo->save_regs[36]); 1727 OUTREG(BUS_CNTL, rinfo->save_regs[36]);
1728 OUTREG(BUS_CNTL1, rinfo->save_regs[14]); 1728 OUTREG(BUS_CNTL1, rinfo->save_regs[14]);
1729 OUTREG(MPP_TB_CONFIG, rinfo->save_regs[37]); 1729 OUTREG(MPP_TB_CONFIG, rinfo->save_regs[37]);
@@ -1961,7 +1961,7 @@ static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
1961 OUTMC(rinfo, ixMC_CHP_IO_CNTL_B1, rinfo->save_regs[68] /*0x141555ff*/); 1961 OUTMC(rinfo, ixMC_CHP_IO_CNTL_B1, rinfo->save_regs[68] /*0x141555ff*/);
1962 OUTMC(rinfo, ixMC_IMP_CNTL_0, rinfo->save_regs[71] /*0x00009249*/); 1962 OUTMC(rinfo, ixMC_IMP_CNTL_0, rinfo->save_regs[71] /*0x00009249*/);
1963 OUTREG(MC_IND_INDEX, 0); 1963 OUTREG(MC_IND_INDEX, 0);
1964 OUTREG(CONFIG_MEMSIZE, rinfo->video_ram); 1964 OUTREG(CNFG_MEMSIZE, rinfo->video_ram);
1965 1965
1966 mdelay(20); 1966 mdelay(20);
1967} 1967}
@@ -2361,7 +2361,7 @@ static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
2361 OUTMC(rinfo, ixMC_IMP_CNTL_0, 0x00009249); 2361 OUTMC(rinfo, ixMC_IMP_CNTL_0, 0x00009249);
2362 OUTREG(MC_IND_INDEX, 0); 2362 OUTREG(MC_IND_INDEX, 0);
2363 2363
2364 OUTREG(CONFIG_MEMSIZE, rinfo->video_ram); 2364 OUTREG(CNFG_MEMSIZE, rinfo->video_ram);
2365 2365
2366 radeon_pm_full_reset_sdram(rinfo); 2366 radeon_pm_full_reset_sdram(rinfo);
2367 2367
@@ -2509,9 +2509,7 @@ static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
2509 2509
2510static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) 2510static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
2511{ 2511{
2512 u16 pwr_cmd;
2513 u32 tmp; 2512 u32 tmp;
2514 int i;
2515 2513
2516 if (!rinfo->pm_reg) 2514 if (!rinfo->pm_reg)
2517 return; 2515 return;
@@ -2557,32 +2555,14 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
2557 } 2555 }
2558 } 2556 }
2559 2557
2560 for (i = 0; i < 64; ++i)
2561 pci_read_config_dword(rinfo->pdev, i * 4,
2562 &rinfo->cfg_save[i]);
2563
2564 /* Switch PCI power management to D2. */ 2558 /* Switch PCI power management to D2. */
2565 pci_disable_device(rinfo->pdev); 2559 pci_disable_device(rinfo->pdev);
2566 for (;;) { 2560 pci_save_state(rinfo->pdev);
2567 pci_read_config_word( 2561 pci_set_power_state(rinfo->pdev, PCI_D2);
2568 rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL,
2569 &pwr_cmd);
2570 if (pwr_cmd & 2)
2571 break;
2572 pci_write_config_word(
2573 rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL,
2574 (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2);
2575 mdelay(500);
2576 }
2577 } else { 2562 } else {
2578 printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n", 2563 printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
2579 pci_name(rinfo->pdev)); 2564 pci_name(rinfo->pdev));
2580 2565
2581 /* Switch back PCI powermanagment to D0 */
2582 mdelay(200);
2583 pci_write_config_word(rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, 0);
2584 mdelay(500);
2585
2586 if (rinfo->family <= CHIP_FAMILY_RV250) { 2566 if (rinfo->family <= CHIP_FAMILY_RV250) {
2587 /* Reset the SDRAM controller */ 2567 /* Reset the SDRAM controller */
2588 radeon_pm_full_reset_sdram(rinfo); 2568 radeon_pm_full_reset_sdram(rinfo);
@@ -2598,37 +2578,10 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
2598 } 2578 }
2599} 2579}
2600 2580
2601static int radeon_restore_pci_cfg(struct radeonfb_info *rinfo)
2602{
2603 int i;
2604 static u32 radeon_cfg_after_resume[64];
2605
2606 for (i = 0; i < 64; ++i)
2607 pci_read_config_dword(rinfo->pdev, i * 4,
2608 &radeon_cfg_after_resume[i]);
2609
2610 if (radeon_cfg_after_resume[PCI_BASE_ADDRESS_0/4]
2611 == rinfo->cfg_save[PCI_BASE_ADDRESS_0/4])
2612 return 0; /* assume everything is ok */
2613
2614 for (i = PCI_BASE_ADDRESS_0/4; i < 64; ++i) {
2615 if (radeon_cfg_after_resume[i] != rinfo->cfg_save[i])
2616 pci_write_config_dword(rinfo->pdev, i * 4,
2617 rinfo->cfg_save[i]);
2618 }
2619 pci_write_config_word(rinfo->pdev, PCI_CACHE_LINE_SIZE,
2620 rinfo->cfg_save[PCI_CACHE_LINE_SIZE/4]);
2621 pci_write_config_word(rinfo->pdev, PCI_COMMAND,
2622 rinfo->cfg_save[PCI_COMMAND/4]);
2623 return 1;
2624}
2625
2626
2627int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 2581int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2628{ 2582{
2629 struct fb_info *info = pci_get_drvdata(pdev); 2583 struct fb_info *info = pci_get_drvdata(pdev);
2630 struct radeonfb_info *rinfo = info->par; 2584 struct radeonfb_info *rinfo = info->par;
2631 int i;
2632 2585
2633 if (mesg.event == pdev->dev.power.power_state.event) 2586 if (mesg.event == pdev->dev.power.power_state.event)
2634 return 0; 2587 return 0;
@@ -2674,6 +2627,11 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2674 pmac_suspend_agp_for_card(pdev); 2627 pmac_suspend_agp_for_card(pdev);
2675#endif /* CONFIG_PPC_PMAC */ 2628#endif /* CONFIG_PPC_PMAC */
2676 2629
2630 /* It's unclear whether or when the generic code will do that, so let's
2631 * do it ourselves. We save state before we do any power management
2632 */
2633 pci_save_state(pdev);
2634
2677 /* If we support wakeup from poweroff, we save all regs we can including cfg 2635 /* If we support wakeup from poweroff, we save all regs we can including cfg
2678 * space 2636 * space
2679 */ 2637 */
@@ -2698,9 +2656,6 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2698 mdelay(20); 2656 mdelay(20);
2699 OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON)); 2657 OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON));
2700 } 2658 }
2701 // FIXME: Use PCI layer
2702 for (i = 0; i < 64; ++i)
2703 pci_read_config_dword(pdev, i * 4, &rinfo->cfg_save[i]);
2704 pci_disable_device(pdev); 2659 pci_disable_device(pdev);
2705 } 2660 }
2706 /* If we support D2, we go to it (should be fixed later with a flag forcing 2661 /* If we support D2, we go to it (should be fixed later with a flag forcing
@@ -2717,6 +2672,13 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2717 return 0; 2672 return 0;
2718} 2673}
2719 2674
2675static int radeon_check_power_loss(struct radeonfb_info *rinfo)
2676{
2677 return rinfo->save_regs[4] != INPLL(CLK_PIN_CNTL) ||
2678 rinfo->save_regs[2] != INPLL(MCLK_CNTL) ||
2679 rinfo->save_regs[3] != INPLL(SCLK_CNTL);
2680}
2681
2720int radeonfb_pci_resume(struct pci_dev *pdev) 2682int radeonfb_pci_resume(struct pci_dev *pdev)
2721{ 2683{
2722 struct fb_info *info = pci_get_drvdata(pdev); 2684 struct fb_info *info = pci_get_drvdata(pdev);
@@ -2735,20 +2697,13 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2735 printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n", 2697 printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
2736 pci_name(pdev), pdev->dev.power.power_state.event); 2698 pci_name(pdev), pdev->dev.power.power_state.event);
2737 2699
2738 2700 /* PCI state will have been restored by the core, so
2739 if (pci_enable_device(pdev)) { 2701 * we should be in D0 now with our config space fully
2740 rc = -ENODEV; 2702 * restored
2741 printk(KERN_ERR "radeonfb (%s): can't enable PCI device !\n", 2703 */
2742 pci_name(pdev));
2743 goto bail;
2744 }
2745 pci_set_master(pdev);
2746
2747 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 2704 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2748 /* Wakeup chip. Check from config space if we were powered off 2705 /* Wakeup chip */
2749 * (todo: additionally, check CLK_PIN_CNTL too) 2706 if ((rinfo->pm_mode & radeon_pm_off) && radeon_check_power_loss(rinfo)) {
2750 */
2751 if ((rinfo->pm_mode & radeon_pm_off) && radeon_restore_pci_cfg(rinfo)) {
2752 if (rinfo->reinit_func != NULL) 2707 if (rinfo->reinit_func != NULL)
2753 rinfo->reinit_func(rinfo); 2708 rinfo->reinit_func(rinfo);
2754 else { 2709 else {
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 3ea1b00fdd22..7351e66c7f54 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -361,8 +361,6 @@ struct radeonfb_info {
361#ifdef CONFIG_FB_RADEON_I2C 361#ifdef CONFIG_FB_RADEON_I2C
362 struct radeon_i2c_chan i2c[4]; 362 struct radeon_i2c_chan i2c[4];
363#endif 363#endif
364
365 u32 cfg_save[64];
366}; 364};
367 365
368 366
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 363b3cb2f01b..63d759498165 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
18obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o 18obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
19obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o 19obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
20obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o 20obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
21obj-$(CONFIG_BACKLIGHT_DA903X) += da903x.o 21obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
22obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 22obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
23obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o 23obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
24obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o 24obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
diff --git a/drivers/video/backlight/da903x.c b/drivers/video/backlight/da903x_bl.c
index 93bb4340cc64..93bb4340cc64 100644
--- a/drivers/video/backlight/da903x.c
+++ b/drivers/video/backlight/da903x_bl.c
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index 91b78e691505..f53b9f1d6aba 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -250,10 +250,6 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
250 int rc, size = cmap->len * sizeof(u16); 250 int rc, size = cmap->len * sizeof(u16);
251 struct fb_cmap umap; 251 struct fb_cmap umap;
252 252
253 if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
254 !info->fbops->fb_setcmap))
255 return -EINVAL;
256
257 memset(&umap, 0, sizeof(struct fb_cmap)); 253 memset(&umap, 0, sizeof(struct fb_cmap));
258 rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); 254 rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL);
259 if (rc) 255 if (rc)
@@ -262,11 +258,23 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
262 copy_from_user(umap.green, cmap->green, size) || 258 copy_from_user(umap.green, cmap->green, size) ||
263 copy_from_user(umap.blue, cmap->blue, size) || 259 copy_from_user(umap.blue, cmap->blue, size) ||
264 (cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) { 260 (cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) {
265 fb_dealloc_cmap(&umap); 261 rc = -EFAULT;
266 return -EFAULT; 262 goto out;
267 } 263 }
268 umap.start = cmap->start; 264 umap.start = cmap->start;
265 if (!lock_fb_info(info)) {
266 rc = -ENODEV;
267 goto out;
268 }
269 if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
270 !info->fbops->fb_setcmap)) {
271 rc = -EINVAL;
272 goto out1;
273 }
269 rc = fb_set_cmap(&umap, info); 274 rc = fb_set_cmap(&umap, info);
275out1:
276 unlock_fb_info(info);
277out:
270 fb_dealloc_cmap(&umap); 278 fb_dealloc_cmap(&umap);
271 return rc; 279 return rc;
272} 280}
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 756efeb91abc..cfd9dce1ce0b 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1013,132 +1013,139 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1013 struct fb_var_screeninfo var; 1013 struct fb_var_screeninfo var;
1014 struct fb_fix_screeninfo fix; 1014 struct fb_fix_screeninfo fix;
1015 struct fb_con2fbmap con2fb; 1015 struct fb_con2fbmap con2fb;
1016 struct fb_cmap cmap_from;
1016 struct fb_cmap_user cmap; 1017 struct fb_cmap_user cmap;
1017 struct fb_event event; 1018 struct fb_event event;
1018 void __user *argp = (void __user *)arg; 1019 void __user *argp = (void __user *)arg;
1019 long ret = 0; 1020 long ret = 0;
1020 1021
1021 fb = info->fbops;
1022 if (!fb)
1023 return -ENODEV;
1024
1025 switch (cmd) { 1022 switch (cmd) {
1026 case FBIOGET_VSCREENINFO: 1023 case FBIOGET_VSCREENINFO:
1027 ret = copy_to_user(argp, &info->var, 1024 if (!lock_fb_info(info))
1028 sizeof(var)) ? -EFAULT : 0; 1025 return -ENODEV;
1026 var = info->var;
1027 unlock_fb_info(info);
1028
1029 ret = copy_to_user(argp, &var, sizeof(var)) ? -EFAULT : 0;
1029 break; 1030 break;
1030 case FBIOPUT_VSCREENINFO: 1031 case FBIOPUT_VSCREENINFO:
1031 if (copy_from_user(&var, argp, sizeof(var))) { 1032 if (copy_from_user(&var, argp, sizeof(var)))
1032 ret = -EFAULT; 1033 return -EFAULT;
1033 break; 1034 if (!lock_fb_info(info))
1034 } 1035 return -ENODEV;
1035 acquire_console_sem(); 1036 acquire_console_sem();
1036 info->flags |= FBINFO_MISC_USEREVENT; 1037 info->flags |= FBINFO_MISC_USEREVENT;
1037 ret = fb_set_var(info, &var); 1038 ret = fb_set_var(info, &var);
1038 info->flags &= ~FBINFO_MISC_USEREVENT; 1039 info->flags &= ~FBINFO_MISC_USEREVENT;
1039 release_console_sem(); 1040 release_console_sem();
1040 if (ret == 0 && copy_to_user(argp, &var, sizeof(var))) 1041 unlock_fb_info(info);
1042 if (!ret && copy_to_user(argp, &var, sizeof(var)))
1041 ret = -EFAULT; 1043 ret = -EFAULT;
1042 break; 1044 break;
1043 case FBIOGET_FSCREENINFO: 1045 case FBIOGET_FSCREENINFO:
1044 ret = copy_to_user(argp, &info->fix, 1046 if (!lock_fb_info(info))
1045 sizeof(fix)) ? -EFAULT : 0; 1047 return -ENODEV;
1048 fix = info->fix;
1049 unlock_fb_info(info);
1050
1051 ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
1046 break; 1052 break;
1047 case FBIOPUTCMAP: 1053 case FBIOPUTCMAP:
1048 if (copy_from_user(&cmap, argp, sizeof(cmap))) 1054 if (copy_from_user(&cmap, argp, sizeof(cmap)))
1049 ret = -EFAULT; 1055 return -EFAULT;
1050 else 1056 ret = fb_set_user_cmap(&cmap, info);
1051 ret = fb_set_user_cmap(&cmap, info);
1052 break; 1057 break;
1053 case FBIOGETCMAP: 1058 case FBIOGETCMAP:
1054 if (copy_from_user(&cmap, argp, sizeof(cmap))) 1059 if (copy_from_user(&cmap, argp, sizeof(cmap)))
1055 ret = -EFAULT; 1060 return -EFAULT;
1056 else 1061 if (!lock_fb_info(info))
1057 ret = fb_cmap_to_user(&info->cmap, &cmap); 1062 return -ENODEV;
1063 cmap_from = info->cmap;
1064 unlock_fb_info(info);
1065 ret = fb_cmap_to_user(&cmap_from, &cmap);
1058 break; 1066 break;
1059 case FBIOPAN_DISPLAY: 1067 case FBIOPAN_DISPLAY:
1060 if (copy_from_user(&var, argp, sizeof(var))) { 1068 if (copy_from_user(&var, argp, sizeof(var)))
1061 ret = -EFAULT; 1069 return -EFAULT;
1062 break; 1070 if (!lock_fb_info(info))
1063 } 1071 return -ENODEV;
1064 acquire_console_sem(); 1072 acquire_console_sem();
1065 ret = fb_pan_display(info, &var); 1073 ret = fb_pan_display(info, &var);
1066 release_console_sem(); 1074 release_console_sem();
1075 unlock_fb_info(info);
1067 if (ret == 0 && copy_to_user(argp, &var, sizeof(var))) 1076 if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
1068 ret = -EFAULT; 1077 return -EFAULT;
1069 break; 1078 break;
1070 case FBIO_CURSOR: 1079 case FBIO_CURSOR:
1071 ret = -EINVAL; 1080 ret = -EINVAL;
1072 break; 1081 break;
1073 case FBIOGET_CON2FBMAP: 1082 case FBIOGET_CON2FBMAP:
1074 if (copy_from_user(&con2fb, argp, sizeof(con2fb))) 1083 if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
1075 ret = -EFAULT; 1084 return -EFAULT;
1076 else if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) 1085 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
1077 ret = -EINVAL; 1086 return -EINVAL;
1078 else { 1087 con2fb.framebuffer = -1;
1079 con2fb.framebuffer = -1; 1088 event.data = &con2fb;
1080 event.info = info; 1089
1081 event.data = &con2fb; 1090 if (!lock_fb_info(info))
1082 fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, 1091 return -ENODEV;
1083 &event); 1092 event.info = info;
1084 ret = copy_to_user(argp, &con2fb, 1093 fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
1085 sizeof(con2fb)) ? -EFAULT : 0; 1094 unlock_fb_info(info);
1086 } 1095
1096 ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
1087 break; 1097 break;
1088 case FBIOPUT_CON2FBMAP: 1098 case FBIOPUT_CON2FBMAP:
1089 if (copy_from_user(&con2fb, argp, sizeof(con2fb))) { 1099 if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
1090 ret = -EFAULT; 1100 return -EFAULT;
1091 break; 1101 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
1092 } 1102 return -EINVAL;
1093 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) { 1103 if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
1094 ret = -EINVAL; 1104 return -EINVAL;
1095 break;
1096 }
1097 if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) {
1098 ret = -EINVAL;
1099 break;
1100 }
1101 if (!registered_fb[con2fb.framebuffer]) 1105 if (!registered_fb[con2fb.framebuffer])
1102 request_module("fb%d", con2fb.framebuffer); 1106 request_module("fb%d", con2fb.framebuffer);
1103 if (!registered_fb[con2fb.framebuffer]) { 1107 if (!registered_fb[con2fb.framebuffer]) {
1104 ret = -EINVAL; 1108 ret = -EINVAL;
1105 break; 1109 break;
1106 } 1110 }
1107 event.info = info;
1108 event.data = &con2fb; 1111 event.data = &con2fb;
1112 if (!lock_fb_info(info))
1113 return -ENODEV;
1114 event.info = info;
1109 ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, 1115 ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP,
1110 &event); 1116 &event);
1117 unlock_fb_info(info);
1111 break; 1118 break;
1112 case FBIOBLANK: 1119 case FBIOBLANK:
1120 if (!lock_fb_info(info))
1121 return -ENODEV;
1113 acquire_console_sem(); 1122 acquire_console_sem();
1114 info->flags |= FBINFO_MISC_USEREVENT; 1123 info->flags |= FBINFO_MISC_USEREVENT;
1115 ret = fb_blank(info, arg); 1124 ret = fb_blank(info, arg);
1116 info->flags &= ~FBINFO_MISC_USEREVENT; 1125 info->flags &= ~FBINFO_MISC_USEREVENT;
1117 release_console_sem(); 1126 release_console_sem();
1118 break;; 1127 unlock_fb_info(info);
1128 break;
1119 default: 1129 default:
1120 if (fb->fb_ioctl == NULL) 1130 if (!lock_fb_info(info))
1121 ret = -ENOTTY; 1131 return -ENODEV;
1122 else 1132 fb = info->fbops;
1133 if (fb->fb_ioctl)
1123 ret = fb->fb_ioctl(info, cmd, arg); 1134 ret = fb->fb_ioctl(info, cmd, arg);
1135 else
1136 ret = -ENOTTY;
1137 unlock_fb_info(info);
1124 } 1138 }
1125 return ret; 1139 return ret;
1126} 1140}
1127 1141
1128static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1142static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1129__acquires(&info->lock)
1130__releases(&info->lock)
1131{ 1143{
1132 struct inode *inode = file->f_path.dentry->d_inode; 1144 struct inode *inode = file->f_path.dentry->d_inode;
1133 int fbidx = iminor(inode); 1145 int fbidx = iminor(inode);
1134 struct fb_info *info; 1146 struct fb_info *info = registered_fb[fbidx];
1135 long ret;
1136 1147
1137 info = registered_fb[fbidx]; 1148 return do_fb_ioctl(info, cmd, arg);
1138 mutex_lock(&info->lock);
1139 ret = do_fb_ioctl(info, cmd, arg);
1140 mutex_unlock(&info->lock);
1141 return ret;
1142} 1149}
1143 1150
1144#ifdef CONFIG_COMPAT 1151#ifdef CONFIG_COMPAT
@@ -1257,8 +1264,6 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
1257 1264
1258static long fb_compat_ioctl(struct file *file, unsigned int cmd, 1265static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1259 unsigned long arg) 1266 unsigned long arg)
1260__acquires(&info->lock)
1261__releases(&info->lock)
1262{ 1267{
1263 struct inode *inode = file->f_path.dentry->d_inode; 1268 struct inode *inode = file->f_path.dentry->d_inode;
1264 int fbidx = iminor(inode); 1269 int fbidx = iminor(inode);
@@ -1266,7 +1271,6 @@ __releases(&info->lock)
1266 struct fb_ops *fb = info->fbops; 1271 struct fb_ops *fb = info->fbops;
1267 long ret = -ENOIOCTLCMD; 1272 long ret = -ENOIOCTLCMD;
1268 1273
1269 mutex_lock(&info->lock);
1270 switch(cmd) { 1274 switch(cmd) {
1271 case FBIOGET_VSCREENINFO: 1275 case FBIOGET_VSCREENINFO:
1272 case FBIOPUT_VSCREENINFO: 1276 case FBIOPUT_VSCREENINFO:
@@ -1292,7 +1296,6 @@ __releases(&info->lock)
1292 ret = fb->fb_compat_ioctl(info, cmd, arg); 1296 ret = fb->fb_compat_ioctl(info, cmd, arg);
1293 break; 1297 break;
1294 } 1298 }
1295 mutex_unlock(&info->lock);
1296 return ret; 1299 return ret;
1297} 1300}
1298#endif 1301#endif
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e3ff2b9e602f..33b7235f853b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1208,9 +1208,11 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
1208 * check for an ELF header. If we find one, dump the first page to 1208 * check for an ELF header. If we find one, dump the first page to
1209 * aid in determining what was mapped here. 1209 * aid in determining what was mapped here.
1210 */ 1210 */
1211 if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) { 1211 if (FILTER(ELF_HEADERS) &&
1212 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1212 u32 __user *header = (u32 __user *) vma->vm_start; 1213 u32 __user *header = (u32 __user *) vma->vm_start;
1213 u32 word; 1214 u32 word;
1215 mm_segment_t fs = get_fs();
1214 /* 1216 /*
1215 * Doing it this way gets the constant folded by GCC. 1217 * Doing it this way gets the constant folded by GCC.
1216 */ 1218 */
@@ -1223,7 +1225,15 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
1223 magic.elfmag[EI_MAG1] = ELFMAG1; 1225 magic.elfmag[EI_MAG1] = ELFMAG1;
1224 magic.elfmag[EI_MAG2] = ELFMAG2; 1226 magic.elfmag[EI_MAG2] = ELFMAG2;
1225 magic.elfmag[EI_MAG3] = ELFMAG3; 1227 magic.elfmag[EI_MAG3] = ELFMAG3;
1226 if (get_user(word, header) == 0 && word == magic.cmp) 1228 /*
1229 * Switch to the user "segment" for get_user(),
1230 * then put back what elf_core_dump() had in place.
1231 */
1232 set_fs(USER_DS);
1233 if (unlikely(get_user(word, header)))
1234 word = 0;
1235 set_fs(fs);
1236 if (word == magic.cmp)
1227 return PAGE_SIZE; 1237 return PAGE_SIZE;
1228 } 1238 }
1229 1239
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index f8fcf999ea1b..7bb3c020e570 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -16,3 +16,16 @@ config BTRFS_FS
16 module will be called btrfs. 16 module will be called btrfs.
17 17
18 If unsure, say N. 18 If unsure, say N.
19
20config BTRFS_FS_POSIX_ACL
21 bool "Btrfs POSIX Access Control Lists"
22 depends on BTRFS_FS
23 select FS_POSIX_ACL
24 help
25 POSIX Access Control Lists (ACLs) support permissions for users and
26 groups beyond the owner/group/world scheme.
27
28 To learn more about Access Control Lists, visit the POSIX ACLs for
29 Linux website <http://acl.bestbits.at/>.
30
31 If you don't know what Access Control Lists are, say N
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 8e2fec05dbe0..c84ca1f5259a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -16,11 +16,11 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/version.h>
20#include <linux/kthread.h> 19#include <linux/kthread.h>
21#include <linux/list.h> 20#include <linux/list.h>
22#include <linux/spinlock.h> 21#include <linux/spinlock.h>
23# include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/ftrace.h>
24#include "async-thread.h" 24#include "async-thread.h"
25 25
26#define WORK_QUEUED_BIT 0 26#define WORK_QUEUED_BIT 0
@@ -143,6 +143,7 @@ static int worker_loop(void *arg)
143 struct btrfs_work *work; 143 struct btrfs_work *work;
144 do { 144 do {
145 spin_lock_irq(&worker->lock); 145 spin_lock_irq(&worker->lock);
146again_locked:
146 while (!list_empty(&worker->pending)) { 147 while (!list_empty(&worker->pending)) {
147 cur = worker->pending.next; 148 cur = worker->pending.next;
148 work = list_entry(cur, struct btrfs_work, list); 149 work = list_entry(cur, struct btrfs_work, list);
@@ -165,14 +166,50 @@ static int worker_loop(void *arg)
165 check_idle_worker(worker); 166 check_idle_worker(worker);
166 167
167 } 168 }
168 worker->working = 0;
169 if (freezing(current)) { 169 if (freezing(current)) {
170 worker->working = 0;
171 spin_unlock_irq(&worker->lock);
170 refrigerator(); 172 refrigerator();
171 } else { 173 } else {
172 set_current_state(TASK_INTERRUPTIBLE);
173 spin_unlock_irq(&worker->lock); 174 spin_unlock_irq(&worker->lock);
174 if (!kthread_should_stop()) 175 if (!kthread_should_stop()) {
176 cpu_relax();
177 /*
178 * we've dropped the lock, did someone else
179 * jump_in?
180 */
181 smp_mb();
182 if (!list_empty(&worker->pending))
183 continue;
184
185 /*
186 * this short schedule allows more work to
187 * come in without the queue functions
188 * needing to go through wake_up_process()
189 *
190 * worker->working is still 1, so nobody
191 * is going to try and wake us up
192 */
193 schedule_timeout(1);
194 smp_mb();
195 if (!list_empty(&worker->pending))
196 continue;
197
198 /* still no more work?, sleep for real */
199 spin_lock_irq(&worker->lock);
200 set_current_state(TASK_INTERRUPTIBLE);
201 if (!list_empty(&worker->pending))
202 goto again_locked;
203
204 /*
205 * this makes sure we get a wakeup when someone
206 * adds something new to the queue
207 */
208 worker->working = 0;
209 spin_unlock_irq(&worker->lock);
210
175 schedule(); 211 schedule();
212 }
176 __set_current_state(TASK_RUNNING); 213 __set_current_state(TASK_RUNNING);
177 } 214 }
178 } while (!kthread_should_stop()); 215 } while (!kthread_should_stop());
@@ -350,13 +387,14 @@ int btrfs_requeue_work(struct btrfs_work *work)
350{ 387{
351 struct btrfs_worker_thread *worker = work->worker; 388 struct btrfs_worker_thread *worker = work->worker;
352 unsigned long flags; 389 unsigned long flags;
390 int wake = 0;
353 391
354 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) 392 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
355 goto out; 393 goto out;
356 394
357 spin_lock_irqsave(&worker->lock, flags); 395 spin_lock_irqsave(&worker->lock, flags);
358 atomic_inc(&worker->num_pending);
359 list_add_tail(&work->list, &worker->pending); 396 list_add_tail(&work->list, &worker->pending);
397 atomic_inc(&worker->num_pending);
360 398
361 /* by definition we're busy, take ourselves off the idle 399 /* by definition we're busy, take ourselves off the idle
362 * list 400 * list
@@ -368,10 +406,16 @@ int btrfs_requeue_work(struct btrfs_work *work)
368 &worker->workers->worker_list); 406 &worker->workers->worker_list);
369 spin_unlock_irqrestore(&worker->workers->lock, flags); 407 spin_unlock_irqrestore(&worker->workers->lock, flags);
370 } 408 }
409 if (!worker->working) {
410 wake = 1;
411 worker->working = 1;
412 }
371 413
372 spin_unlock_irqrestore(&worker->lock, flags); 414 spin_unlock_irqrestore(&worker->lock, flags);
373 415 if (wake)
416 wake_up_process(worker->task);
374out: 417out:
418
375 return 0; 419 return 0;
376} 420}
377 421
@@ -398,9 +442,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
398 } 442 }
399 443
400 spin_lock_irqsave(&worker->lock, flags); 444 spin_lock_irqsave(&worker->lock, flags);
445
446 list_add_tail(&work->list, &worker->pending);
401 atomic_inc(&worker->num_pending); 447 atomic_inc(&worker->num_pending);
402 check_busy_worker(worker); 448 check_busy_worker(worker);
403 list_add_tail(&work->list, &worker->pending);
404 449
405 /* 450 /*
406 * avoid calling into wake_up_process if this thread has already 451 * avoid calling into wake_up_process if this thread has already
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ee848d8585d9..ab07627084f1 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -32,7 +32,6 @@
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/writeback.h> 33#include <linux/writeback.h>
34#include <linux/bit_spinlock.h> 34#include <linux/bit_spinlock.h>
35#include <linux/version.h>
36#include <linux/pagevec.h> 35#include <linux/pagevec.h>
37#include "compat.h" 36#include "compat.h"
38#include "ctree.h" 37#include "ctree.h"
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9e46c0776816..35443cc4b9a9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -54,6 +54,31 @@ struct btrfs_path *btrfs_alloc_path(void)
54 return path; 54 return path;
55} 55}
56 56
57/*
58 * set all locked nodes in the path to blocking locks. This should
59 * be done before scheduling
60 */
61noinline void btrfs_set_path_blocking(struct btrfs_path *p)
62{
63 int i;
64 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
65 if (p->nodes[i] && p->locks[i])
66 btrfs_set_lock_blocking(p->nodes[i]);
67 }
68}
69
70/*
71 * reset all the locked nodes in the patch to spinning locks.
72 */
73noinline void btrfs_clear_path_blocking(struct btrfs_path *p)
74{
75 int i;
76 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
77 if (p->nodes[i] && p->locks[i])
78 btrfs_clear_lock_blocking(p->nodes[i]);
79 }
80}
81
57/* this also releases the path */ 82/* this also releases the path */
58void btrfs_free_path(struct btrfs_path *p) 83void btrfs_free_path(struct btrfs_path *p)
59{ 84{
@@ -272,6 +297,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
272 if (IS_ERR(cow)) 297 if (IS_ERR(cow))
273 return PTR_ERR(cow); 298 return PTR_ERR(cow);
274 299
300 /* cow is set to blocking by btrfs_init_new_buffer */
301
275 copy_extent_buffer(cow, buf, 0, 0, cow->len); 302 copy_extent_buffer(cow, buf, 0, 0, cow->len);
276 btrfs_set_header_bytenr(cow, cow->start); 303 btrfs_set_header_bytenr(cow, cow->start);
277 btrfs_set_header_generation(cow, trans->transid); 304 btrfs_set_header_generation(cow, trans->transid);
@@ -388,17 +415,20 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
388 WARN_ON(1); 415 WARN_ON(1);
389 } 416 }
390 417
391 spin_lock(&root->fs_info->hash_lock);
392 if (btrfs_header_generation(buf) == trans->transid && 418 if (btrfs_header_generation(buf) == trans->transid &&
393 btrfs_header_owner(buf) == root->root_key.objectid && 419 btrfs_header_owner(buf) == root->root_key.objectid &&
394 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 420 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
395 *cow_ret = buf; 421 *cow_ret = buf;
396 spin_unlock(&root->fs_info->hash_lock);
397 WARN_ON(prealloc_dest); 422 WARN_ON(prealloc_dest);
398 return 0; 423 return 0;
399 } 424 }
400 spin_unlock(&root->fs_info->hash_lock); 425
401 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 426 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
427
428 if (parent)
429 btrfs_set_lock_blocking(parent);
430 btrfs_set_lock_blocking(buf);
431
402 ret = __btrfs_cow_block(trans, root, buf, parent, 432 ret = __btrfs_cow_block(trans, root, buf, parent,
403 parent_slot, cow_ret, search_start, 0, 433 parent_slot, cow_ret, search_start, 0,
404 prealloc_dest); 434 prealloc_dest);
@@ -504,6 +534,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
504 if (parent_nritems == 1) 534 if (parent_nritems == 1)
505 return 0; 535 return 0;
506 536
537 btrfs_set_lock_blocking(parent);
538
507 for (i = start_slot; i < end_slot; i++) { 539 for (i = start_slot; i < end_slot; i++) {
508 int close = 1; 540 int close = 1;
509 541
@@ -564,6 +596,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
564 search_start = last_block; 596 search_start = last_block;
565 597
566 btrfs_tree_lock(cur); 598 btrfs_tree_lock(cur);
599 btrfs_set_lock_blocking(cur);
567 err = __btrfs_cow_block(trans, root, cur, parent, i, 600 err = __btrfs_cow_block(trans, root, cur, parent, i,
568 &cur, search_start, 601 &cur, search_start,
569 min(16 * blocksize, 602 min(16 * blocksize,
@@ -862,6 +895,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
862 return 0; 895 return 0;
863 896
864 mid = path->nodes[level]; 897 mid = path->nodes[level];
898
865 WARN_ON(!path->locks[level]); 899 WARN_ON(!path->locks[level]);
866 WARN_ON(btrfs_header_generation(mid) != trans->transid); 900 WARN_ON(btrfs_header_generation(mid) != trans->transid);
867 901
@@ -884,6 +918,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
884 /* promote the child to a root */ 918 /* promote the child to a root */
885 child = read_node_slot(root, mid, 0); 919 child = read_node_slot(root, mid, 0);
886 btrfs_tree_lock(child); 920 btrfs_tree_lock(child);
921 btrfs_set_lock_blocking(child);
887 BUG_ON(!child); 922 BUG_ON(!child);
888 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0); 923 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
889 BUG_ON(ret); 924 BUG_ON(ret);
@@ -900,6 +935,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
900 935
901 add_root_to_dirty_list(root); 936 add_root_to_dirty_list(root);
902 btrfs_tree_unlock(child); 937 btrfs_tree_unlock(child);
938
903 path->locks[level] = 0; 939 path->locks[level] = 0;
904 path->nodes[level] = NULL; 940 path->nodes[level] = NULL;
905 clean_tree_block(trans, root, mid); 941 clean_tree_block(trans, root, mid);
@@ -924,6 +960,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
924 left = read_node_slot(root, parent, pslot - 1); 960 left = read_node_slot(root, parent, pslot - 1);
925 if (left) { 961 if (left) {
926 btrfs_tree_lock(left); 962 btrfs_tree_lock(left);
963 btrfs_set_lock_blocking(left);
927 wret = btrfs_cow_block(trans, root, left, 964 wret = btrfs_cow_block(trans, root, left,
928 parent, pslot - 1, &left, 0); 965 parent, pslot - 1, &left, 0);
929 if (wret) { 966 if (wret) {
@@ -934,6 +971,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
934 right = read_node_slot(root, parent, pslot + 1); 971 right = read_node_slot(root, parent, pslot + 1);
935 if (right) { 972 if (right) {
936 btrfs_tree_lock(right); 973 btrfs_tree_lock(right);
974 btrfs_set_lock_blocking(right);
937 wret = btrfs_cow_block(trans, root, right, 975 wret = btrfs_cow_block(trans, root, right,
938 parent, pslot + 1, &right, 0); 976 parent, pslot + 1, &right, 0);
939 if (wret) { 977 if (wret) {
@@ -1109,6 +1147,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1109 u32 left_nr; 1147 u32 left_nr;
1110 1148
1111 btrfs_tree_lock(left); 1149 btrfs_tree_lock(left);
1150 btrfs_set_lock_blocking(left);
1151
1112 left_nr = btrfs_header_nritems(left); 1152 left_nr = btrfs_header_nritems(left);
1113 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1153 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1114 wret = 1; 1154 wret = 1;
@@ -1155,7 +1195,10 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1155 */ 1195 */
1156 if (right) { 1196 if (right) {
1157 u32 right_nr; 1197 u32 right_nr;
1198
1158 btrfs_tree_lock(right); 1199 btrfs_tree_lock(right);
1200 btrfs_set_lock_blocking(right);
1201
1159 right_nr = btrfs_header_nritems(right); 1202 right_nr = btrfs_header_nritems(right);
1160 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 1203 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1161 wret = 1; 1204 wret = 1;
@@ -1210,8 +1253,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
1210 struct btrfs_disk_key disk_key; 1253 struct btrfs_disk_key disk_key;
1211 u32 nritems; 1254 u32 nritems;
1212 u64 search; 1255 u64 search;
1213 u64 lowest_read; 1256 u64 target;
1214 u64 highest_read;
1215 u64 nread = 0; 1257 u64 nread = 0;
1216 int direction = path->reada; 1258 int direction = path->reada;
1217 struct extent_buffer *eb; 1259 struct extent_buffer *eb;
@@ -1235,8 +1277,7 @@ static noinline void reada_for_search(struct btrfs_root *root,
1235 return; 1277 return;
1236 } 1278 }
1237 1279
1238 highest_read = search; 1280 target = search;
1239 lowest_read = search;
1240 1281
1241 nritems = btrfs_header_nritems(node); 1282 nritems = btrfs_header_nritems(node);
1242 nr = slot; 1283 nr = slot;
@@ -1256,27 +1297,80 @@ static noinline void reada_for_search(struct btrfs_root *root,
1256 break; 1297 break;
1257 } 1298 }
1258 search = btrfs_node_blockptr(node, nr); 1299 search = btrfs_node_blockptr(node, nr);
1259 if ((search >= lowest_read && search <= highest_read) || 1300 if ((search <= target && target - search <= 65536) ||
1260 (search < lowest_read && lowest_read - search <= 16384) || 1301 (search > target && search - target <= 65536)) {
1261 (search > highest_read && search - highest_read <= 16384)) {
1262 readahead_tree_block(root, search, blocksize, 1302 readahead_tree_block(root, search, blocksize,
1263 btrfs_node_ptr_generation(node, nr)); 1303 btrfs_node_ptr_generation(node, nr));
1264 nread += blocksize; 1304 nread += blocksize;
1265 } 1305 }
1266 nscan++; 1306 nscan++;
1267 if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32)) 1307 if ((nread > 65536 || nscan > 32))
1268 break; 1308 break;
1309 }
1310}
1269 1311
1270 if (nread > (256 * 1024) || nscan > 128) 1312/*
1271 break; 1313 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1314 * cache
1315 */
1316static noinline int reada_for_balance(struct btrfs_root *root,
1317 struct btrfs_path *path, int level)
1318{
1319 int slot;
1320 int nritems;
1321 struct extent_buffer *parent;
1322 struct extent_buffer *eb;
1323 u64 gen;
1324 u64 block1 = 0;
1325 u64 block2 = 0;
1326 int ret = 0;
1327 int blocksize;
1272 1328
1273 if (search < lowest_read) 1329 parent = path->nodes[level - 1];
1274 lowest_read = search; 1330 if (!parent)
1275 if (search > highest_read) 1331 return 0;
1276 highest_read = search; 1332
1333 nritems = btrfs_header_nritems(parent);
1334 slot = path->slots[level];
1335 blocksize = btrfs_level_size(root, level);
1336
1337 if (slot > 0) {
1338 block1 = btrfs_node_blockptr(parent, slot - 1);
1339 gen = btrfs_node_ptr_generation(parent, slot - 1);
1340 eb = btrfs_find_tree_block(root, block1, blocksize);
1341 if (eb && btrfs_buffer_uptodate(eb, gen))
1342 block1 = 0;
1343 free_extent_buffer(eb);
1344 }
1345 if (slot < nritems) {
1346 block2 = btrfs_node_blockptr(parent, slot + 1);
1347 gen = btrfs_node_ptr_generation(parent, slot + 1);
1348 eb = btrfs_find_tree_block(root, block2, blocksize);
1349 if (eb && btrfs_buffer_uptodate(eb, gen))
1350 block2 = 0;
1351 free_extent_buffer(eb);
1352 }
1353 if (block1 || block2) {
1354 ret = -EAGAIN;
1355 btrfs_release_path(root, path);
1356 if (block1)
1357 readahead_tree_block(root, block1, blocksize, 0);
1358 if (block2)
1359 readahead_tree_block(root, block2, blocksize, 0);
1360
1361 if (block1) {
1362 eb = read_tree_block(root, block1, blocksize, 0);
1363 free_extent_buffer(eb);
1364 }
1365 if (block1) {
1366 eb = read_tree_block(root, block2, blocksize, 0);
1367 free_extent_buffer(eb);
1368 }
1277 } 1369 }
1370 return ret;
1278} 1371}
1279 1372
1373
1280/* 1374/*
1281 * when we walk down the tree, it is usually safe to unlock the higher layers 1375 * when we walk down the tree, it is usually safe to unlock the higher layers
1282 * in the tree. The exceptions are when our path goes through slot 0, because 1376 * in the tree. The exceptions are when our path goes through slot 0, because
@@ -1328,6 +1422,32 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
1328} 1422}
1329 1423
1330/* 1424/*
1425 * This releases any locks held in the path starting at level and
1426 * going all the way up to the root.
1427 *
1428 * btrfs_search_slot will keep the lock held on higher nodes in a few
1429 * corner cases, such as COW of the block at slot zero in the node. This
1430 * ignores those rules, and it should only be called when there are no
1431 * more updates to be done higher up in the tree.
1432 */
1433noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1434{
1435 int i;
1436
1437 if (path->keep_locks || path->lowest_level)
1438 return;
1439
1440 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1441 if (!path->nodes[i])
1442 continue;
1443 if (!path->locks[i])
1444 continue;
1445 btrfs_tree_unlock(path->nodes[i]);
1446 path->locks[i] = 0;
1447 }
1448}
1449
1450/*
1331 * look for key in the tree. path is filled in with nodes along the way 1451 * look for key in the tree. path is filled in with nodes along the way
1332 * if key is found, we return zero and you can find the item in the leaf 1452 * if key is found, we return zero and you can find the item in the leaf
1333 * level of the path (level 0) 1453 * level of the path (level 0)
@@ -1387,32 +1507,30 @@ again:
1387 int wret; 1507 int wret;
1388 1508
1389 /* is a cow on this block not required */ 1509 /* is a cow on this block not required */
1390 spin_lock(&root->fs_info->hash_lock);
1391 if (btrfs_header_generation(b) == trans->transid && 1510 if (btrfs_header_generation(b) == trans->transid &&
1392 btrfs_header_owner(b) == root->root_key.objectid && 1511 btrfs_header_owner(b) == root->root_key.objectid &&
1393 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) { 1512 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
1394 spin_unlock(&root->fs_info->hash_lock);
1395 goto cow_done; 1513 goto cow_done;
1396 } 1514 }
1397 spin_unlock(&root->fs_info->hash_lock);
1398 1515
1399 /* ok, we have to cow, is our old prealloc the right 1516 /* ok, we have to cow, is our old prealloc the right
1400 * size? 1517 * size?
1401 */ 1518 */
1402 if (prealloc_block.objectid && 1519 if (prealloc_block.objectid &&
1403 prealloc_block.offset != b->len) { 1520 prealloc_block.offset != b->len) {
1521 btrfs_release_path(root, p);
1404 btrfs_free_reserved_extent(root, 1522 btrfs_free_reserved_extent(root,
1405 prealloc_block.objectid, 1523 prealloc_block.objectid,
1406 prealloc_block.offset); 1524 prealloc_block.offset);
1407 prealloc_block.objectid = 0; 1525 prealloc_block.objectid = 0;
1526 goto again;
1408 } 1527 }
1409 1528
1410 /* 1529 /*
1411 * for higher level blocks, try not to allocate blocks 1530 * for higher level blocks, try not to allocate blocks
1412 * with the block and the parent locks held. 1531 * with the block and the parent locks held.
1413 */ 1532 */
1414 if (level > 1 && !prealloc_block.objectid && 1533 if (level > 0 && !prealloc_block.objectid) {
1415 btrfs_path_lock_waiting(p, level)) {
1416 u32 size = b->len; 1534 u32 size = b->len;
1417 u64 hint = b->start; 1535 u64 hint = b->start;
1418 1536
@@ -1425,6 +1543,8 @@ again:
1425 goto again; 1543 goto again;
1426 } 1544 }
1427 1545
1546 btrfs_set_path_blocking(p);
1547
1428 wret = btrfs_cow_block(trans, root, b, 1548 wret = btrfs_cow_block(trans, root, b,
1429 p->nodes[level + 1], 1549 p->nodes[level + 1],
1430 p->slots[level + 1], 1550 p->slots[level + 1],
@@ -1446,6 +1566,22 @@ cow_done:
1446 if (!p->skip_locking) 1566 if (!p->skip_locking)
1447 p->locks[level] = 1; 1567 p->locks[level] = 1;
1448 1568
1569 btrfs_clear_path_blocking(p);
1570
1571 /*
1572 * we have a lock on b and as long as we aren't changing
1573 * the tree, there is no way to for the items in b to change.
1574 * It is safe to drop the lock on our parent before we
1575 * go through the expensive btree search on b.
1576 *
1577 * If cow is true, then we might be changing slot zero,
1578 * which may require changing the parent. So, we can't
1579 * drop the lock until after we know which slot we're
1580 * operating on.
1581 */
1582 if (!cow)
1583 btrfs_unlock_up_safe(p, level + 1);
1584
1449 ret = check_block(root, p, level); 1585 ret = check_block(root, p, level);
1450 if (ret) { 1586 if (ret) {
1451 ret = -1; 1587 ret = -1;
@@ -1453,6 +1589,7 @@ cow_done:
1453 } 1589 }
1454 1590
1455 ret = bin_search(b, key, level, &slot); 1591 ret = bin_search(b, key, level, &slot);
1592
1456 if (level != 0) { 1593 if (level != 0) {
1457 if (ret && slot > 0) 1594 if (ret && slot > 0)
1458 slot -= 1; 1595 slot -= 1;
@@ -1460,7 +1597,16 @@ cow_done:
1460 if ((p->search_for_split || ins_len > 0) && 1597 if ((p->search_for_split || ins_len > 0) &&
1461 btrfs_header_nritems(b) >= 1598 btrfs_header_nritems(b) >=
1462 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { 1599 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1463 int sret = split_node(trans, root, p, level); 1600 int sret;
1601
1602 sret = reada_for_balance(root, p, level);
1603 if (sret)
1604 goto again;
1605
1606 btrfs_set_path_blocking(p);
1607 sret = split_node(trans, root, p, level);
1608 btrfs_clear_path_blocking(p);
1609
1464 BUG_ON(sret > 0); 1610 BUG_ON(sret > 0);
1465 if (sret) { 1611 if (sret) {
1466 ret = sret; 1612 ret = sret;
@@ -1468,9 +1614,19 @@ cow_done:
1468 } 1614 }
1469 b = p->nodes[level]; 1615 b = p->nodes[level];
1470 slot = p->slots[level]; 1616 slot = p->slots[level];
1471 } else if (ins_len < 0) { 1617 } else if (ins_len < 0 &&
1472 int sret = balance_level(trans, root, p, 1618 btrfs_header_nritems(b) <
1473 level); 1619 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) {
1620 int sret;
1621
1622 sret = reada_for_balance(root, p, level);
1623 if (sret)
1624 goto again;
1625
1626 btrfs_set_path_blocking(p);
1627 sret = balance_level(trans, root, p, level);
1628 btrfs_clear_path_blocking(p);
1629
1474 if (sret) { 1630 if (sret) {
1475 ret = sret; 1631 ret = sret;
1476 goto done; 1632 goto done;
@@ -1504,7 +1660,7 @@ cow_done:
1504 * of the btree by dropping locks before 1660 * of the btree by dropping locks before
1505 * we read. 1661 * we read.
1506 */ 1662 */
1507 if (level > 1) { 1663 if (level > 0) {
1508 btrfs_release_path(NULL, p); 1664 btrfs_release_path(NULL, p);
1509 if (tmp) 1665 if (tmp)
1510 free_extent_buffer(tmp); 1666 free_extent_buffer(tmp);
@@ -1519,6 +1675,7 @@ cow_done:
1519 free_extent_buffer(tmp); 1675 free_extent_buffer(tmp);
1520 goto again; 1676 goto again;
1521 } else { 1677 } else {
1678 btrfs_set_path_blocking(p);
1522 if (tmp) 1679 if (tmp)
1523 free_extent_buffer(tmp); 1680 free_extent_buffer(tmp);
1524 if (should_reada) 1681 if (should_reada)
@@ -1528,14 +1685,29 @@ cow_done:
1528 b = read_node_slot(root, b, slot); 1685 b = read_node_slot(root, b, slot);
1529 } 1686 }
1530 } 1687 }
1531 if (!p->skip_locking) 1688 if (!p->skip_locking) {
1532 btrfs_tree_lock(b); 1689 int lret;
1690
1691 btrfs_clear_path_blocking(p);
1692 lret = btrfs_try_spin_lock(b);
1693
1694 if (!lret) {
1695 btrfs_set_path_blocking(p);
1696 btrfs_tree_lock(b);
1697 btrfs_clear_path_blocking(p);
1698 }
1699 }
1533 } else { 1700 } else {
1534 p->slots[level] = slot; 1701 p->slots[level] = slot;
1535 if (ins_len > 0 && 1702 if (ins_len > 0 &&
1536 btrfs_leaf_free_space(root, b) < ins_len) { 1703 btrfs_leaf_free_space(root, b) < ins_len) {
1537 int sret = split_leaf(trans, root, key, 1704 int sret;
1705
1706 btrfs_set_path_blocking(p);
1707 sret = split_leaf(trans, root, key,
1538 p, ins_len, ret == 0); 1708 p, ins_len, ret == 0);
1709 btrfs_clear_path_blocking(p);
1710
1539 BUG_ON(sret > 0); 1711 BUG_ON(sret > 0);
1540 if (sret) { 1712 if (sret) {
1541 ret = sret; 1713 ret = sret;
@@ -1549,12 +1721,16 @@ cow_done:
1549 } 1721 }
1550 ret = 1; 1722 ret = 1;
1551done: 1723done:
1724 /*
1725 * we don't really know what they plan on doing with the path
1726 * from here on, so for now just mark it as blocking
1727 */
1728 btrfs_set_path_blocking(p);
1552 if (prealloc_block.objectid) { 1729 if (prealloc_block.objectid) {
1553 btrfs_free_reserved_extent(root, 1730 btrfs_free_reserved_extent(root,
1554 prealloc_block.objectid, 1731 prealloc_block.objectid,
1555 prealloc_block.offset); 1732 prealloc_block.offset);
1556 } 1733 }
1557
1558 return ret; 1734 return ret;
1559} 1735}
1560 1736
@@ -1578,6 +1754,8 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
1578 ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0); 1754 ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0);
1579 BUG_ON(ret); 1755 BUG_ON(ret);
1580 1756
1757 btrfs_set_lock_blocking(eb);
1758
1581 parent = eb; 1759 parent = eb;
1582 while (1) { 1760 while (1) {
1583 level = btrfs_header_level(parent); 1761 level = btrfs_header_level(parent);
@@ -1602,6 +1780,7 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
1602 eb = read_tree_block(root, bytenr, blocksize, 1780 eb = read_tree_block(root, bytenr, blocksize,
1603 generation); 1781 generation);
1604 btrfs_tree_lock(eb); 1782 btrfs_tree_lock(eb);
1783 btrfs_set_lock_blocking(eb);
1605 } 1784 }
1606 1785
1607 /* 1786 /*
@@ -1626,6 +1805,7 @@ int btrfs_merge_path(struct btrfs_trans_handle *trans,
1626 eb = read_tree_block(root, bytenr, blocksize, 1805 eb = read_tree_block(root, bytenr, blocksize,
1627 generation); 1806 generation);
1628 btrfs_tree_lock(eb); 1807 btrfs_tree_lock(eb);
1808 btrfs_set_lock_blocking(eb);
1629 } 1809 }
1630 1810
1631 ret = btrfs_cow_block(trans, root, eb, parent, slot, 1811 ret = btrfs_cow_block(trans, root, eb, parent, slot,
@@ -2172,6 +2352,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2172 2352
2173 right = read_node_slot(root, upper, slot + 1); 2353 right = read_node_slot(root, upper, slot + 1);
2174 btrfs_tree_lock(right); 2354 btrfs_tree_lock(right);
2355 btrfs_set_lock_blocking(right);
2356
2175 free_space = btrfs_leaf_free_space(root, right); 2357 free_space = btrfs_leaf_free_space(root, right);
2176 if (free_space < data_size) 2358 if (free_space < data_size)
2177 goto out_unlock; 2359 goto out_unlock;
@@ -2367,6 +2549,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2367 2549
2368 left = read_node_slot(root, path->nodes[1], slot - 1); 2550 left = read_node_slot(root, path->nodes[1], slot - 1);
2369 btrfs_tree_lock(left); 2551 btrfs_tree_lock(left);
2552 btrfs_set_lock_blocking(left);
2553
2370 free_space = btrfs_leaf_free_space(root, left); 2554 free_space = btrfs_leaf_free_space(root, left);
2371 if (free_space < data_size) { 2555 if (free_space < data_size) {
2372 ret = 1; 2556 ret = 1;
@@ -2825,6 +3009,12 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
2825 path->keep_locks = 0; 3009 path->keep_locks = 0;
2826 BUG_ON(ret); 3010 BUG_ON(ret);
2827 3011
3012 /*
3013 * make sure any changes to the path from split_leaf leave it
3014 * in a blocking state
3015 */
3016 btrfs_set_path_blocking(path);
3017
2828 leaf = path->nodes[0]; 3018 leaf = path->nodes[0];
2829 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); 3019 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2830 3020
@@ -3354,6 +3544,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3354 BUG(); 3544 BUG();
3355 } 3545 }
3356out: 3546out:
3547 btrfs_unlock_up_safe(path, 1);
3357 return ret; 3548 return ret;
3358} 3549}
3359 3550
@@ -3441,15 +3632,22 @@ noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3441{ 3632{
3442 int ret; 3633 int ret;
3443 u64 root_gen = btrfs_header_generation(path->nodes[1]); 3634 u64 root_gen = btrfs_header_generation(path->nodes[1]);
3635 u64 parent_start = path->nodes[1]->start;
3636 u64 parent_owner = btrfs_header_owner(path->nodes[1]);
3444 3637
3445 ret = del_ptr(trans, root, path, 1, path->slots[1]); 3638 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3446 if (ret) 3639 if (ret)
3447 return ret; 3640 return ret;
3448 3641
3642 /*
3643 * btrfs_free_extent is expensive, we want to make sure we
3644 * aren't holding any locks when we call it
3645 */
3646 btrfs_unlock_up_safe(path, 0);
3647
3449 ret = btrfs_free_extent(trans, root, bytenr, 3648 ret = btrfs_free_extent(trans, root, bytenr,
3450 btrfs_level_size(root, 0), 3649 btrfs_level_size(root, 0),
3451 path->nodes[1]->start, 3650 parent_start, parent_owner,
3452 btrfs_header_owner(path->nodes[1]),
3453 root_gen, 0, 1); 3651 root_gen, 0, 1);
3454 return ret; 3652 return ret;
3455} 3653}
@@ -3721,12 +3919,14 @@ find_next_key:
3721 */ 3919 */
3722 if (slot >= nritems) { 3920 if (slot >= nritems) {
3723 path->slots[level] = slot; 3921 path->slots[level] = slot;
3922 btrfs_set_path_blocking(path);
3724 sret = btrfs_find_next_key(root, path, min_key, level, 3923 sret = btrfs_find_next_key(root, path, min_key, level,
3725 cache_only, min_trans); 3924 cache_only, min_trans);
3726 if (sret == 0) { 3925 if (sret == 0) {
3727 btrfs_release_path(root, path); 3926 btrfs_release_path(root, path);
3728 goto again; 3927 goto again;
3729 } else { 3928 } else {
3929 btrfs_clear_path_blocking(path);
3730 goto out; 3930 goto out;
3731 } 3931 }
3732 } 3932 }
@@ -3738,16 +3938,20 @@ find_next_key:
3738 unlock_up(path, level, 1); 3938 unlock_up(path, level, 1);
3739 goto out; 3939 goto out;
3740 } 3940 }
3941 btrfs_set_path_blocking(path);
3741 cur = read_node_slot(root, cur, slot); 3942 cur = read_node_slot(root, cur, slot);
3742 3943
3743 btrfs_tree_lock(cur); 3944 btrfs_tree_lock(cur);
3945
3744 path->locks[level - 1] = 1; 3946 path->locks[level - 1] = 1;
3745 path->nodes[level - 1] = cur; 3947 path->nodes[level - 1] = cur;
3746 unlock_up(path, level, 1); 3948 unlock_up(path, level, 1);
3949 btrfs_clear_path_blocking(path);
3747 } 3950 }
3748out: 3951out:
3749 if (ret == 0) 3952 if (ret == 0)
3750 memcpy(min_key, &found_key, sizeof(found_key)); 3953 memcpy(min_key, &found_key, sizeof(found_key));
3954 btrfs_set_path_blocking(path);
3751 return ret; 3955 return ret;
3752} 3956}
3753 3957
@@ -3843,6 +4047,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3843 if (ret < 0) 4047 if (ret < 0)
3844 return ret; 4048 return ret;
3845 4049
4050 btrfs_set_path_blocking(path);
3846 nritems = btrfs_header_nritems(path->nodes[0]); 4051 nritems = btrfs_header_nritems(path->nodes[0]);
3847 /* 4052 /*
3848 * by releasing the path above we dropped all our locks. A balance 4053 * by releasing the path above we dropped all our locks. A balance
@@ -3873,6 +4078,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3873 free_extent_buffer(next); 4078 free_extent_buffer(next);
3874 } 4079 }
3875 4080
4081 /* the path was set to blocking above */
3876 if (level == 1 && (path->locks[1] || path->skip_locking) && 4082 if (level == 1 && (path->locks[1] || path->skip_locking) &&
3877 path->reada) 4083 path->reada)
3878 reada_for_search(root, path, level, slot, 0); 4084 reada_for_search(root, path, level, slot, 0);
@@ -3881,6 +4087,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3881 if (!path->skip_locking) { 4087 if (!path->skip_locking) {
3882 WARN_ON(!btrfs_tree_locked(c)); 4088 WARN_ON(!btrfs_tree_locked(c));
3883 btrfs_tree_lock(next); 4089 btrfs_tree_lock(next);
4090 btrfs_set_lock_blocking(next);
3884 } 4091 }
3885 break; 4092 break;
3886 } 4093 }
@@ -3897,12 +4104,15 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3897 path->locks[level] = 1; 4104 path->locks[level] = 1;
3898 if (!level) 4105 if (!level)
3899 break; 4106 break;
4107
4108 btrfs_set_path_blocking(path);
3900 if (level == 1 && path->locks[1] && path->reada) 4109 if (level == 1 && path->locks[1] && path->reada)
3901 reada_for_search(root, path, level, slot, 0); 4110 reada_for_search(root, path, level, slot, 0);
3902 next = read_node_slot(root, next, 0); 4111 next = read_node_slot(root, next, 0);
3903 if (!path->skip_locking) { 4112 if (!path->skip_locking) {
3904 WARN_ON(!btrfs_tree_locked(path->nodes[level])); 4113 WARN_ON(!btrfs_tree_locked(path->nodes[level]));
3905 btrfs_tree_lock(next); 4114 btrfs_tree_lock(next);
4115 btrfs_set_lock_blocking(next);
3906 } 4116 }
3907 } 4117 }
3908done: 4118done:
@@ -3927,6 +4137,7 @@ int btrfs_previous_item(struct btrfs_root *root,
3927 4137
3928 while (1) { 4138 while (1) {
3929 if (path->slots[0] == 0) { 4139 if (path->slots[0] == 0) {
4140 btrfs_set_path_blocking(path);
3930 ret = btrfs_prev_leaf(root, path); 4141 ret = btrfs_prev_leaf(root, path);
3931 if (ret != 0) 4142 if (ret != 0)
3932 return ret; 4143 return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index eee060f88113..531db112c8bd 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -454,17 +454,11 @@ struct btrfs_timespec {
454 __le32 nsec; 454 __le32 nsec;
455} __attribute__ ((__packed__)); 455} __attribute__ ((__packed__));
456 456
457typedef enum { 457enum btrfs_compression_type {
458 BTRFS_COMPRESS_NONE = 0, 458 BTRFS_COMPRESS_NONE = 0,
459 BTRFS_COMPRESS_ZLIB = 1, 459 BTRFS_COMPRESS_ZLIB = 1,
460 BTRFS_COMPRESS_LAST = 2, 460 BTRFS_COMPRESS_LAST = 2,
461} btrfs_compression_type; 461};
462
463/* we don't understand any encryption methods right now */
464typedef enum {
465 BTRFS_ENCRYPTION_NONE = 0,
466 BTRFS_ENCRYPTION_LAST = 1,
467} btrfs_encryption_type;
468 462
469struct btrfs_inode_item { 463struct btrfs_inode_item {
470 /* nfs style generation number */ 464 /* nfs style generation number */
@@ -701,9 +695,7 @@ struct btrfs_fs_info {
701 struct btrfs_transaction *running_transaction; 695 struct btrfs_transaction *running_transaction;
702 wait_queue_head_t transaction_throttle; 696 wait_queue_head_t transaction_throttle;
703 wait_queue_head_t transaction_wait; 697 wait_queue_head_t transaction_wait;
704
705 wait_queue_head_t async_submit_wait; 698 wait_queue_head_t async_submit_wait;
706 wait_queue_head_t tree_log_wait;
707 699
708 struct btrfs_super_block super_copy; 700 struct btrfs_super_block super_copy;
709 struct btrfs_super_block super_for_commit; 701 struct btrfs_super_block super_for_commit;
@@ -711,7 +703,6 @@ struct btrfs_fs_info {
711 struct super_block *sb; 703 struct super_block *sb;
712 struct inode *btree_inode; 704 struct inode *btree_inode;
713 struct backing_dev_info bdi; 705 struct backing_dev_info bdi;
714 spinlock_t hash_lock;
715 struct mutex trans_mutex; 706 struct mutex trans_mutex;
716 struct mutex tree_log_mutex; 707 struct mutex tree_log_mutex;
717 struct mutex transaction_kthread_mutex; 708 struct mutex transaction_kthread_mutex;
@@ -730,10 +721,6 @@ struct btrfs_fs_info {
730 atomic_t async_submit_draining; 721 atomic_t async_submit_draining;
731 atomic_t nr_async_bios; 722 atomic_t nr_async_bios;
732 atomic_t async_delalloc_pages; 723 atomic_t async_delalloc_pages;
733 atomic_t tree_log_writers;
734 atomic_t tree_log_commit;
735 unsigned long tree_log_batch;
736 u64 tree_log_transid;
737 724
738 /* 725 /*
739 * this is used by the balancing code to wait for all the pending 726 * this is used by the balancing code to wait for all the pending
@@ -833,7 +820,14 @@ struct btrfs_root {
833 struct kobject root_kobj; 820 struct kobject root_kobj;
834 struct completion kobj_unregister; 821 struct completion kobj_unregister;
835 struct mutex objectid_mutex; 822 struct mutex objectid_mutex;
823
836 struct mutex log_mutex; 824 struct mutex log_mutex;
825 wait_queue_head_t log_writer_wait;
826 wait_queue_head_t log_commit_wait[2];
827 atomic_t log_writers;
828 atomic_t log_commit[2];
829 unsigned long log_transid;
830 unsigned long log_batch;
837 831
838 u64 objectid; 832 u64 objectid;
839 u64 last_trans; 833 u64 last_trans;
@@ -1841,6 +1835,10 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
1841struct btrfs_path *btrfs_alloc_path(void); 1835struct btrfs_path *btrfs_alloc_path(void);
1842void btrfs_free_path(struct btrfs_path *p); 1836void btrfs_free_path(struct btrfs_path *p);
1843void btrfs_init_path(struct btrfs_path *p); 1837void btrfs_init_path(struct btrfs_path *p);
1838void btrfs_set_path_blocking(struct btrfs_path *p);
1839void btrfs_clear_path_blocking(struct btrfs_path *p);
1840void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
1841
1844int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1842int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1845 struct btrfs_path *path, int slot, int nr); 1843 struct btrfs_path *path, int slot, int nr);
1846int btrfs_del_leaf(struct btrfs_trans_handle *trans, 1844int btrfs_del_leaf(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 81a313874ae5..5aebddd71193 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -16,7 +16,6 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/version.h>
20#include <linux/fs.h> 19#include <linux/fs.h>
21#include <linux/blkdev.h> 20#include <linux/blkdev.h>
22#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
@@ -800,7 +799,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
800 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 799 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
801 800
802 if (ret == 0) 801 if (ret == 0)
803 buf->flags |= EXTENT_UPTODATE; 802 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
804 else 803 else
805 WARN_ON(1); 804 WARN_ON(1);
806 return buf; 805 return buf;
@@ -814,6 +813,10 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
814 if (btrfs_header_generation(buf) == 813 if (btrfs_header_generation(buf) ==
815 root->fs_info->running_transaction->transid) { 814 root->fs_info->running_transaction->transid) {
816 WARN_ON(!btrfs_tree_locked(buf)); 815 WARN_ON(!btrfs_tree_locked(buf));
816
817 /* ugh, clear_extent_buffer_dirty can be expensive */
818 btrfs_set_lock_blocking(buf);
819
817 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, 820 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
818 buf); 821 buf);
819 } 822 }
@@ -850,6 +853,14 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
850 spin_lock_init(&root->list_lock); 853 spin_lock_init(&root->list_lock);
851 mutex_init(&root->objectid_mutex); 854 mutex_init(&root->objectid_mutex);
852 mutex_init(&root->log_mutex); 855 mutex_init(&root->log_mutex);
856 init_waitqueue_head(&root->log_writer_wait);
857 init_waitqueue_head(&root->log_commit_wait[0]);
858 init_waitqueue_head(&root->log_commit_wait[1]);
859 atomic_set(&root->log_commit[0], 0);
860 atomic_set(&root->log_commit[1], 0);
861 atomic_set(&root->log_writers, 0);
862 root->log_batch = 0;
863 root->log_transid = 0;
853 extent_io_tree_init(&root->dirty_log_pages, 864 extent_io_tree_init(&root->dirty_log_pages,
854 fs_info->btree_inode->i_mapping, GFP_NOFS); 865 fs_info->btree_inode->i_mapping, GFP_NOFS);
855 866
@@ -934,15 +945,16 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
934 return 0; 945 return 0;
935} 946}
936 947
937int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 948static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
938 struct btrfs_fs_info *fs_info) 949 struct btrfs_fs_info *fs_info)
939{ 950{
940 struct btrfs_root *root; 951 struct btrfs_root *root;
941 struct btrfs_root *tree_root = fs_info->tree_root; 952 struct btrfs_root *tree_root = fs_info->tree_root;
953 struct extent_buffer *leaf;
942 954
943 root = kzalloc(sizeof(*root), GFP_NOFS); 955 root = kzalloc(sizeof(*root), GFP_NOFS);
944 if (!root) 956 if (!root)
945 return -ENOMEM; 957 return ERR_PTR(-ENOMEM);
946 958
947 __setup_root(tree_root->nodesize, tree_root->leafsize, 959 __setup_root(tree_root->nodesize, tree_root->leafsize,
948 tree_root->sectorsize, tree_root->stripesize, 960 tree_root->sectorsize, tree_root->stripesize,
@@ -951,12 +963,23 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
951 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 963 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
952 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 964 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
953 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 965 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
966 /*
967 * log trees do not get reference counted because they go away
968 * before a real commit is actually done. They do store pointers
969 * to file data extents, and those reference counts still get
970 * updated (along with back refs to the log tree).
971 */
954 root->ref_cows = 0; 972 root->ref_cows = 0;
955 973
956 root->node = btrfs_alloc_free_block(trans, root, root->leafsize, 974 leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
957 0, BTRFS_TREE_LOG_OBJECTID, 975 0, BTRFS_TREE_LOG_OBJECTID,
958 trans->transid, 0, 0, 0); 976 trans->transid, 0, 0, 0);
977 if (IS_ERR(leaf)) {
978 kfree(root);
979 return ERR_CAST(leaf);
980 }
959 981
982 root->node = leaf;
960 btrfs_set_header_nritems(root->node, 0); 983 btrfs_set_header_nritems(root->node, 0);
961 btrfs_set_header_level(root->node, 0); 984 btrfs_set_header_level(root->node, 0);
962 btrfs_set_header_bytenr(root->node, root->node->start); 985 btrfs_set_header_bytenr(root->node, root->node->start);
@@ -968,7 +991,48 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
968 BTRFS_FSID_SIZE); 991 BTRFS_FSID_SIZE);
969 btrfs_mark_buffer_dirty(root->node); 992 btrfs_mark_buffer_dirty(root->node);
970 btrfs_tree_unlock(root->node); 993 btrfs_tree_unlock(root->node);
971 fs_info->log_root_tree = root; 994 return root;
995}
996
997int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
998 struct btrfs_fs_info *fs_info)
999{
1000 struct btrfs_root *log_root;
1001
1002 log_root = alloc_log_tree(trans, fs_info);
1003 if (IS_ERR(log_root))
1004 return PTR_ERR(log_root);
1005 WARN_ON(fs_info->log_root_tree);
1006 fs_info->log_root_tree = log_root;
1007 return 0;
1008}
1009
1010int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1011 struct btrfs_root *root)
1012{
1013 struct btrfs_root *log_root;
1014 struct btrfs_inode_item *inode_item;
1015
1016 log_root = alloc_log_tree(trans, root->fs_info);
1017 if (IS_ERR(log_root))
1018 return PTR_ERR(log_root);
1019
1020 log_root->last_trans = trans->transid;
1021 log_root->root_key.offset = root->root_key.objectid;
1022
1023 inode_item = &log_root->root_item.inode;
1024 inode_item->generation = cpu_to_le64(1);
1025 inode_item->size = cpu_to_le64(3);
1026 inode_item->nlink = cpu_to_le32(1);
1027 inode_item->nbytes = cpu_to_le64(root->leafsize);
1028 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1029
1030 btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
1031 btrfs_set_root_generation(&log_root->root_item, trans->transid);
1032
1033 WARN_ON(root->log_root);
1034 root->log_root = log_root;
1035 root->log_transid = 0;
972 return 0; 1036 return 0;
973} 1037}
974 1038
@@ -1136,7 +1200,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1136{ 1200{
1137 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1201 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1138 int ret = 0; 1202 int ret = 0;
1139 struct list_head *cur;
1140 struct btrfs_device *device; 1203 struct btrfs_device *device;
1141 struct backing_dev_info *bdi; 1204 struct backing_dev_info *bdi;
1142#if 0 1205#if 0
@@ -1144,8 +1207,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1144 btrfs_congested_async(info, 0)) 1207 btrfs_congested_async(info, 0))
1145 return 1; 1208 return 1;
1146#endif 1209#endif
1147 list_for_each(cur, &info->fs_devices->devices) { 1210 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1148 device = list_entry(cur, struct btrfs_device, dev_list);
1149 if (!device->bdev) 1211 if (!device->bdev)
1150 continue; 1212 continue;
1151 bdi = blk_get_backing_dev_info(device->bdev); 1213 bdi = blk_get_backing_dev_info(device->bdev);
@@ -1163,13 +1225,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1163 */ 1225 */
1164static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1226static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1165{ 1227{
1166 struct list_head *cur;
1167 struct btrfs_device *device; 1228 struct btrfs_device *device;
1168 struct btrfs_fs_info *info; 1229 struct btrfs_fs_info *info;
1169 1230
1170 info = (struct btrfs_fs_info *)bdi->unplug_io_data; 1231 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1171 list_for_each(cur, &info->fs_devices->devices) { 1232 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1172 device = list_entry(cur, struct btrfs_device, dev_list);
1173 if (!device->bdev) 1233 if (!device->bdev)
1174 continue; 1234 continue;
1175 1235
@@ -1447,7 +1507,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1447 INIT_LIST_HEAD(&fs_info->dead_roots); 1507 INIT_LIST_HEAD(&fs_info->dead_roots);
1448 INIT_LIST_HEAD(&fs_info->hashers); 1508 INIT_LIST_HEAD(&fs_info->hashers);
1449 INIT_LIST_HEAD(&fs_info->delalloc_inodes); 1509 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1450 spin_lock_init(&fs_info->hash_lock);
1451 spin_lock_init(&fs_info->delalloc_lock); 1510 spin_lock_init(&fs_info->delalloc_lock);
1452 spin_lock_init(&fs_info->new_trans_lock); 1511 spin_lock_init(&fs_info->new_trans_lock);
1453 spin_lock_init(&fs_info->ref_cache_lock); 1512 spin_lock_init(&fs_info->ref_cache_lock);
@@ -1535,10 +1594,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1535 init_waitqueue_head(&fs_info->transaction_throttle); 1594 init_waitqueue_head(&fs_info->transaction_throttle);
1536 init_waitqueue_head(&fs_info->transaction_wait); 1595 init_waitqueue_head(&fs_info->transaction_wait);
1537 init_waitqueue_head(&fs_info->async_submit_wait); 1596 init_waitqueue_head(&fs_info->async_submit_wait);
1538 init_waitqueue_head(&fs_info->tree_log_wait);
1539 atomic_set(&fs_info->tree_log_commit, 0);
1540 atomic_set(&fs_info->tree_log_writers, 0);
1541 fs_info->tree_log_transid = 0;
1542 1597
1543 __setup_root(4096, 4096, 4096, 4096, tree_root, 1598 __setup_root(4096, 4096, 4096, 4096, tree_root,
1544 fs_info, BTRFS_ROOT_TREE_OBJECTID); 1599 fs_info, BTRFS_ROOT_TREE_OBJECTID);
@@ -1627,6 +1682,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1627 * low idle thresh 1682 * low idle thresh
1628 */ 1683 */
1629 fs_info->endio_workers.idle_thresh = 4; 1684 fs_info->endio_workers.idle_thresh = 4;
1685 fs_info->endio_meta_workers.idle_thresh = 4;
1686
1630 fs_info->endio_write_workers.idle_thresh = 64; 1687 fs_info->endio_write_workers.idle_thresh = 64;
1631 fs_info->endio_meta_write_workers.idle_thresh = 64; 1688 fs_info->endio_meta_write_workers.idle_thresh = 64;
1632 1689
@@ -1740,13 +1797,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1740 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; 1797 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1741 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 1798 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1742 "btrfs-cleaner"); 1799 "btrfs-cleaner");
1743 if (!fs_info->cleaner_kthread) 1800 if (IS_ERR(fs_info->cleaner_kthread))
1744 goto fail_csum_root; 1801 goto fail_csum_root;
1745 1802
1746 fs_info->transaction_kthread = kthread_run(transaction_kthread, 1803 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1747 tree_root, 1804 tree_root,
1748 "btrfs-transaction"); 1805 "btrfs-transaction");
1749 if (!fs_info->transaction_kthread) 1806 if (IS_ERR(fs_info->transaction_kthread))
1750 goto fail_cleaner; 1807 goto fail_cleaner;
1751 1808
1752 if (btrfs_super_log_root(disk_super) != 0) { 1809 if (btrfs_super_log_root(disk_super) != 0) {
@@ -1828,13 +1885,14 @@ fail_sb_buffer:
1828fail_iput: 1885fail_iput:
1829 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 1886 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1830 iput(fs_info->btree_inode); 1887 iput(fs_info->btree_inode);
1831fail: 1888
1832 btrfs_close_devices(fs_info->fs_devices); 1889 btrfs_close_devices(fs_info->fs_devices);
1833 btrfs_mapping_tree_free(&fs_info->mapping_tree); 1890 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1891 bdi_destroy(&fs_info->bdi);
1834 1892
1893fail:
1835 kfree(extent_root); 1894 kfree(extent_root);
1836 kfree(tree_root); 1895 kfree(tree_root);
1837 bdi_destroy(&fs_info->bdi);
1838 kfree(fs_info); 1896 kfree(fs_info);
1839 kfree(chunk_root); 1897 kfree(chunk_root);
1840 kfree(dev_root); 1898 kfree(dev_root);
@@ -1995,7 +2053,6 @@ static int write_dev_supers(struct btrfs_device *device,
1995 2053
1996int write_all_supers(struct btrfs_root *root, int max_mirrors) 2054int write_all_supers(struct btrfs_root *root, int max_mirrors)
1997{ 2055{
1998 struct list_head *cur;
1999 struct list_head *head = &root->fs_info->fs_devices->devices; 2056 struct list_head *head = &root->fs_info->fs_devices->devices;
2000 struct btrfs_device *dev; 2057 struct btrfs_device *dev;
2001 struct btrfs_super_block *sb; 2058 struct btrfs_super_block *sb;
@@ -2011,8 +2068,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2011 2068
2012 sb = &root->fs_info->super_for_commit; 2069 sb = &root->fs_info->super_for_commit;
2013 dev_item = &sb->dev_item; 2070 dev_item = &sb->dev_item;
2014 list_for_each(cur, head) { 2071 list_for_each_entry(dev, head, dev_list) {
2015 dev = list_entry(cur, struct btrfs_device, dev_list);
2016 if (!dev->bdev) { 2072 if (!dev->bdev) {
2017 total_errors++; 2073 total_errors++;
2018 continue; 2074 continue;
@@ -2045,8 +2101,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2045 } 2101 }
2046 2102
2047 total_errors = 0; 2103 total_errors = 0;
2048 list_for_each(cur, head) { 2104 list_for_each_entry(dev, head, dev_list) {
2049 dev = list_entry(cur, struct btrfs_device, dev_list);
2050 if (!dev->bdev) 2105 if (!dev->bdev)
2051 continue; 2106 continue;
2052 if (!dev->in_fs_metadata || !dev->writeable) 2107 if (!dev->in_fs_metadata || !dev->writeable)
@@ -2260,6 +2315,8 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2260 u64 transid = btrfs_header_generation(buf); 2315 u64 transid = btrfs_header_generation(buf);
2261 struct inode *btree_inode = root->fs_info->btree_inode; 2316 struct inode *btree_inode = root->fs_info->btree_inode;
2262 2317
2318 btrfs_set_lock_blocking(buf);
2319
2263 WARN_ON(!btrfs_tree_locked(buf)); 2320 WARN_ON(!btrfs_tree_locked(buf));
2264 if (transid != root->fs_info->generation) { 2321 if (transid != root->fs_info->generation) {
2265 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " 2322 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
@@ -2302,14 +2359,13 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2302 int ret; 2359 int ret;
2303 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 2360 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2304 if (ret == 0) 2361 if (ret == 0)
2305 buf->flags |= EXTENT_UPTODATE; 2362 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2306 return ret; 2363 return ret;
2307} 2364}
2308 2365
2309int btree_lock_page_hook(struct page *page) 2366int btree_lock_page_hook(struct page *page)
2310{ 2367{
2311 struct inode *inode = page->mapping->host; 2368 struct inode *inode = page->mapping->host;
2312 struct btrfs_root *root = BTRFS_I(inode)->root;
2313 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2369 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2314 struct extent_buffer *eb; 2370 struct extent_buffer *eb;
2315 unsigned long len; 2371 unsigned long len;
@@ -2324,9 +2380,7 @@ int btree_lock_page_hook(struct page *page)
2324 goto out; 2380 goto out;
2325 2381
2326 btrfs_tree_lock(eb); 2382 btrfs_tree_lock(eb);
2327 spin_lock(&root->fs_info->hash_lock);
2328 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 2383 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2329 spin_unlock(&root->fs_info->hash_lock);
2330 btrfs_tree_unlock(eb); 2384 btrfs_tree_unlock(eb);
2331 free_extent_buffer(eb); 2385 free_extent_buffer(eb);
2332out: 2386out:
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index c0ff404c31b7..494a56eb2986 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -98,5 +98,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
98 struct btrfs_fs_info *fs_info); 98 struct btrfs_fs_info *fs_info);
99int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 99int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
100 struct btrfs_fs_info *fs_info); 100 struct btrfs_fs_info *fs_info);
101int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root);
101int btree_lock_page_hook(struct page *page); 103int btree_lock_page_hook(struct page *page);
102#endif 104#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 293da650873f..7527523c2d2d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -19,7 +19,7 @@
19#include <linux/pagemap.h> 19#include <linux/pagemap.h>
20#include <linux/writeback.h> 20#include <linux/writeback.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/version.h> 22#include <linux/sort.h>
23#include "compat.h" 23#include "compat.h"
24#include "hash.h" 24#include "hash.h"
25#include "crc32c.h" 25#include "crc32c.h"
@@ -30,7 +30,6 @@
30#include "volumes.h" 30#include "volumes.h"
31#include "locking.h" 31#include "locking.h"
32#include "ref-cache.h" 32#include "ref-cache.h"
33#include "compat.h"
34 33
35#define PENDING_EXTENT_INSERT 0 34#define PENDING_EXTENT_INSERT 0
36#define PENDING_EXTENT_DELETE 1 35#define PENDING_EXTENT_DELETE 1
@@ -326,10 +325,8 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
326 u64 flags) 325 u64 flags)
327{ 326{
328 struct list_head *head = &info->space_info; 327 struct list_head *head = &info->space_info;
329 struct list_head *cur;
330 struct btrfs_space_info *found; 328 struct btrfs_space_info *found;
331 list_for_each(cur, head) { 329 list_for_each_entry(found, head, list) {
332 found = list_entry(cur, struct btrfs_space_info, list);
333 if (found->flags == flags) 330 if (found->flags == flags)
334 return found; 331 return found;
335 } 332 }
@@ -1525,15 +1522,55 @@ out:
1525 return ret; 1522 return ret;
1526} 1523}
1527 1524
1528int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1525/* when a block goes through cow, we update the reference counts of
1529 struct extent_buffer *orig_buf, struct extent_buffer *buf, 1526 * everything that block points to. The internal pointers of the block
1530 u32 *nr_extents) 1527 * can be in just about any order, and it is likely to have clusters of
1528 * things that are close together and clusters of things that are not.
1529 *
1530 * To help reduce the seeks that come with updating all of these reference
1531 * counts, sort them by byte number before actual updates are done.
1532 *
1533 * struct refsort is used to match byte number to slot in the btree block.
1534 * we sort based on the byte number and then use the slot to actually
1535 * find the item.
1536 *
1537 * struct refsort is smaller than strcut btrfs_item and smaller than
1538 * struct btrfs_key_ptr. Since we're currently limited to the page size
1539 * for a btree block, there's no way for a kmalloc of refsorts for a
1540 * single node to be bigger than a page.
1541 */
1542struct refsort {
1543 u64 bytenr;
1544 u32 slot;
1545};
1546
1547/*
1548 * for passing into sort()
1549 */
1550static int refsort_cmp(const void *a_void, const void *b_void)
1551{
1552 const struct refsort *a = a_void;
1553 const struct refsort *b = b_void;
1554
1555 if (a->bytenr < b->bytenr)
1556 return -1;
1557 if (a->bytenr > b->bytenr)
1558 return 1;
1559 return 0;
1560}
1561
1562
1563noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1564 struct btrfs_root *root,
1565 struct extent_buffer *orig_buf,
1566 struct extent_buffer *buf, u32 *nr_extents)
1531{ 1567{
1532 u64 bytenr; 1568 u64 bytenr;
1533 u64 ref_root; 1569 u64 ref_root;
1534 u64 orig_root; 1570 u64 orig_root;
1535 u64 ref_generation; 1571 u64 ref_generation;
1536 u64 orig_generation; 1572 u64 orig_generation;
1573 struct refsort *sorted;
1537 u32 nritems; 1574 u32 nritems;
1538 u32 nr_file_extents = 0; 1575 u32 nr_file_extents = 0;
1539 struct btrfs_key key; 1576 struct btrfs_key key;
@@ -1542,6 +1579,8 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1542 int level; 1579 int level;
1543 int ret = 0; 1580 int ret = 0;
1544 int faili = 0; 1581 int faili = 0;
1582 int refi = 0;
1583 int slot;
1545 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, 1584 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1546 u64, u64, u64, u64, u64, u64, u64, u64); 1585 u64, u64, u64, u64, u64, u64, u64, u64);
1547 1586
@@ -1553,6 +1592,9 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1553 nritems = btrfs_header_nritems(buf); 1592 nritems = btrfs_header_nritems(buf);
1554 level = btrfs_header_level(buf); 1593 level = btrfs_header_level(buf);
1555 1594
1595 sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
1596 BUG_ON(!sorted);
1597
1556 if (root->ref_cows) { 1598 if (root->ref_cows) {
1557 process_func = __btrfs_inc_extent_ref; 1599 process_func = __btrfs_inc_extent_ref;
1558 } else { 1600 } else {
@@ -1565,6 +1607,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1565 process_func = __btrfs_update_extent_ref; 1607 process_func = __btrfs_update_extent_ref;
1566 } 1608 }
1567 1609
1610 /*
1611 * we make two passes through the items. In the first pass we
1612 * only record the byte number and slot. Then we sort based on
1613 * byte number and do the actual work based on the sorted results
1614 */
1568 for (i = 0; i < nritems; i++) { 1615 for (i = 0; i < nritems; i++) {
1569 cond_resched(); 1616 cond_resched();
1570 if (level == 0) { 1617 if (level == 0) {
@@ -1581,6 +1628,32 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1581 continue; 1628 continue;
1582 1629
1583 nr_file_extents++; 1630 nr_file_extents++;
1631 sorted[refi].bytenr = bytenr;
1632 sorted[refi].slot = i;
1633 refi++;
1634 } else {
1635 bytenr = btrfs_node_blockptr(buf, i);
1636 sorted[refi].bytenr = bytenr;
1637 sorted[refi].slot = i;
1638 refi++;
1639 }
1640 }
1641 /*
1642 * if refi == 0, we didn't actually put anything into the sorted
1643 * array and we're done
1644 */
1645 if (refi == 0)
1646 goto out;
1647
1648 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1649
1650 for (i = 0; i < refi; i++) {
1651 cond_resched();
1652 slot = sorted[i].slot;
1653 bytenr = sorted[i].bytenr;
1654
1655 if (level == 0) {
1656 btrfs_item_key_to_cpu(buf, &key, slot);
1584 1657
1585 ret = process_func(trans, root, bytenr, 1658 ret = process_func(trans, root, bytenr,
1586 orig_buf->start, buf->start, 1659 orig_buf->start, buf->start,
@@ -1589,25 +1662,25 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1589 key.objectid); 1662 key.objectid);
1590 1663
1591 if (ret) { 1664 if (ret) {
1592 faili = i; 1665 faili = slot;
1593 WARN_ON(1); 1666 WARN_ON(1);
1594 goto fail; 1667 goto fail;
1595 } 1668 }
1596 } else { 1669 } else {
1597 bytenr = btrfs_node_blockptr(buf, i);
1598 ret = process_func(trans, root, bytenr, 1670 ret = process_func(trans, root, bytenr,
1599 orig_buf->start, buf->start, 1671 orig_buf->start, buf->start,
1600 orig_root, ref_root, 1672 orig_root, ref_root,
1601 orig_generation, ref_generation, 1673 orig_generation, ref_generation,
1602 level - 1); 1674 level - 1);
1603 if (ret) { 1675 if (ret) {
1604 faili = i; 1676 faili = slot;
1605 WARN_ON(1); 1677 WARN_ON(1);
1606 goto fail; 1678 goto fail;
1607 } 1679 }
1608 } 1680 }
1609 } 1681 }
1610out: 1682out:
1683 kfree(sorted);
1611 if (nr_extents) { 1684 if (nr_extents) {
1612 if (level == 0) 1685 if (level == 0)
1613 *nr_extents = nr_file_extents; 1686 *nr_extents = nr_file_extents;
@@ -1616,6 +1689,7 @@ out:
1616 } 1689 }
1617 return 0; 1690 return 0;
1618fail: 1691fail:
1692 kfree(sorted);
1619 WARN_ON(1); 1693 WARN_ON(1);
1620 return ret; 1694 return ret;
1621} 1695}
@@ -2159,7 +2233,8 @@ again:
2159 ret = find_first_extent_bit(&info->extent_ins, search, &start, 2233 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2160 &end, EXTENT_WRITEBACK); 2234 &end, EXTENT_WRITEBACK);
2161 if (ret) { 2235 if (ret) {
2162 if (skipped && all && !num_inserts) { 2236 if (skipped && all && !num_inserts &&
2237 list_empty(&update_list)) {
2163 skipped = 0; 2238 skipped = 0;
2164 search = 0; 2239 search = 0;
2165 continue; 2240 continue;
@@ -2547,6 +2622,7 @@ again:
2547 if (ret) { 2622 if (ret) {
2548 if (all && skipped && !nr) { 2623 if (all && skipped && !nr) {
2549 search = 0; 2624 search = 0;
2625 skipped = 0;
2550 continue; 2626 continue;
2551 } 2627 }
2552 mutex_unlock(&info->extent_ins_mutex); 2628 mutex_unlock(&info->extent_ins_mutex);
@@ -2700,13 +2776,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2700 /* if metadata always pin */ 2776 /* if metadata always pin */
2701 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { 2777 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2702 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 2778 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2703 struct btrfs_block_group_cache *cache; 2779 mutex_lock(&root->fs_info->pinned_mutex);
2704 2780 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2705 /* btrfs_free_reserved_extent */ 2781 mutex_unlock(&root->fs_info->pinned_mutex);
2706 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2707 BUG_ON(!cache);
2708 btrfs_add_free_space(cache, bytenr, num_bytes);
2709 put_block_group(cache);
2710 update_reserved_extents(root, bytenr, num_bytes, 0); 2782 update_reserved_extents(root, bytenr, num_bytes, 0);
2711 return 0; 2783 return 0;
2712 } 2784 }
@@ -3014,7 +3086,6 @@ loop_check:
3014static void dump_space_info(struct btrfs_space_info *info, u64 bytes) 3086static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3015{ 3087{
3016 struct btrfs_block_group_cache *cache; 3088 struct btrfs_block_group_cache *cache;
3017 struct list_head *l;
3018 3089
3019 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 3090 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3020 (unsigned long long)(info->total_bytes - info->bytes_used - 3091 (unsigned long long)(info->total_bytes - info->bytes_used -
@@ -3022,8 +3093,7 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3022 (info->full) ? "" : "not "); 3093 (info->full) ? "" : "not ");
3023 3094
3024 down_read(&info->groups_sem); 3095 down_read(&info->groups_sem);
3025 list_for_each(l, &info->block_groups) { 3096 list_for_each_entry(cache, &info->block_groups, list) {
3026 cache = list_entry(l, struct btrfs_block_group_cache, list);
3027 spin_lock(&cache->lock); 3097 spin_lock(&cache->lock);
3028 printk(KERN_INFO "block group %llu has %llu bytes, %llu used " 3098 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3029 "%llu pinned %llu reserved\n", 3099 "%llu pinned %llu reserved\n",
@@ -3342,7 +3412,10 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3342 btrfs_set_header_generation(buf, trans->transid); 3412 btrfs_set_header_generation(buf, trans->transid);
3343 btrfs_tree_lock(buf); 3413 btrfs_tree_lock(buf);
3344 clean_tree_block(trans, root, buf); 3414 clean_tree_block(trans, root, buf);
3415
3416 btrfs_set_lock_blocking(buf);
3345 btrfs_set_buffer_uptodate(buf); 3417 btrfs_set_buffer_uptodate(buf);
3418
3346 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 3419 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3347 set_extent_dirty(&root->dirty_log_pages, buf->start, 3420 set_extent_dirty(&root->dirty_log_pages, buf->start,
3348 buf->start + buf->len - 1, GFP_NOFS); 3421 buf->start + buf->len - 1, GFP_NOFS);
@@ -3351,6 +3424,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3351 buf->start + buf->len - 1, GFP_NOFS); 3424 buf->start + buf->len - 1, GFP_NOFS);
3352 } 3425 }
3353 trans->blocks_used++; 3426 trans->blocks_used++;
3427 /* this returns a buffer locked for blocking */
3354 return buf; 3428 return buf;
3355} 3429}
3356 3430
@@ -3388,36 +3462,73 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3388{ 3462{
3389 u64 leaf_owner; 3463 u64 leaf_owner;
3390 u64 leaf_generation; 3464 u64 leaf_generation;
3465 struct refsort *sorted;
3391 struct btrfs_key key; 3466 struct btrfs_key key;
3392 struct btrfs_file_extent_item *fi; 3467 struct btrfs_file_extent_item *fi;
3393 int i; 3468 int i;
3394 int nritems; 3469 int nritems;
3395 int ret; 3470 int ret;
3471 int refi = 0;
3472 int slot;
3396 3473
3397 BUG_ON(!btrfs_is_leaf(leaf)); 3474 BUG_ON(!btrfs_is_leaf(leaf));
3398 nritems = btrfs_header_nritems(leaf); 3475 nritems = btrfs_header_nritems(leaf);
3399 leaf_owner = btrfs_header_owner(leaf); 3476 leaf_owner = btrfs_header_owner(leaf);
3400 leaf_generation = btrfs_header_generation(leaf); 3477 leaf_generation = btrfs_header_generation(leaf);
3401 3478
3479 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3480 /* we do this loop twice. The first time we build a list
3481 * of the extents we have a reference on, then we sort the list
3482 * by bytenr. The second time around we actually do the
3483 * extent freeing.
3484 */
3402 for (i = 0; i < nritems; i++) { 3485 for (i = 0; i < nritems; i++) {
3403 u64 disk_bytenr; 3486 u64 disk_bytenr;
3404 cond_resched(); 3487 cond_resched();
3405 3488
3406 btrfs_item_key_to_cpu(leaf, &key, i); 3489 btrfs_item_key_to_cpu(leaf, &key, i);
3490
3491 /* only extents have references, skip everything else */
3407 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 3492 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3408 continue; 3493 continue;
3494
3409 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 3495 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3496
3497 /* inline extents live in the btree, they don't have refs */
3410 if (btrfs_file_extent_type(leaf, fi) == 3498 if (btrfs_file_extent_type(leaf, fi) ==
3411 BTRFS_FILE_EXTENT_INLINE) 3499 BTRFS_FILE_EXTENT_INLINE)
3412 continue; 3500 continue;
3413 /* 3501
3414 * FIXME make sure to insert a trans record that
3415 * repeats the snapshot del on crash
3416 */
3417 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 3502 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3503
3504 /* holes don't have refs */
3418 if (disk_bytenr == 0) 3505 if (disk_bytenr == 0)
3419 continue; 3506 continue;
3420 3507
3508 sorted[refi].bytenr = disk_bytenr;
3509 sorted[refi].slot = i;
3510 refi++;
3511 }
3512
3513 if (refi == 0)
3514 goto out;
3515
3516 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3517
3518 for (i = 0; i < refi; i++) {
3519 u64 disk_bytenr;
3520
3521 disk_bytenr = sorted[i].bytenr;
3522 slot = sorted[i].slot;
3523
3524 cond_resched();
3525
3526 btrfs_item_key_to_cpu(leaf, &key, slot);
3527 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3528 continue;
3529
3530 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3531
3421 ret = __btrfs_free_extent(trans, root, disk_bytenr, 3532 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3422 btrfs_file_extent_disk_num_bytes(leaf, fi), 3533 btrfs_file_extent_disk_num_bytes(leaf, fi),
3423 leaf->start, leaf_owner, leaf_generation, 3534 leaf->start, leaf_owner, leaf_generation,
@@ -3428,6 +3539,8 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3428 wake_up(&root->fs_info->transaction_throttle); 3539 wake_up(&root->fs_info->transaction_throttle);
3429 cond_resched(); 3540 cond_resched();
3430 } 3541 }
3542out:
3543 kfree(sorted);
3431 return 0; 3544 return 0;
3432} 3545}
3433 3546
@@ -3437,9 +3550,25 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3437{ 3550{
3438 int i; 3551 int i;
3439 int ret; 3552 int ret;
3440 struct btrfs_extent_info *info = ref->extents; 3553 struct btrfs_extent_info *info;
3554 struct refsort *sorted;
3555
3556 if (ref->nritems == 0)
3557 return 0;
3441 3558
3559 sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
3442 for (i = 0; i < ref->nritems; i++) { 3560 for (i = 0; i < ref->nritems; i++) {
3561 sorted[i].bytenr = ref->extents[i].bytenr;
3562 sorted[i].slot = i;
3563 }
3564 sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
3565
3566 /*
3567 * the items in the ref were sorted when the ref was inserted
3568 * into the ref cache, so this is already in order
3569 */
3570 for (i = 0; i < ref->nritems; i++) {
3571 info = ref->extents + sorted[i].slot;
3443 ret = __btrfs_free_extent(trans, root, info->bytenr, 3572 ret = __btrfs_free_extent(trans, root, info->bytenr,
3444 info->num_bytes, ref->bytenr, 3573 info->num_bytes, ref->bytenr,
3445 ref->owner, ref->generation, 3574 ref->owner, ref->generation,
@@ -3453,6 +3582,7 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3453 info++; 3582 info++;
3454 } 3583 }
3455 3584
3585 kfree(sorted);
3456 return 0; 3586 return 0;
3457} 3587}
3458 3588
@@ -3497,6 +3627,152 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
3497} 3627}
3498 3628
3499/* 3629/*
3630 * this is used while deleting old snapshots, and it drops the refs
3631 * on a whole subtree starting from a level 1 node.
3632 *
3633 * The idea is to sort all the leaf pointers, and then drop the
3634 * ref on all the leaves in order. Most of the time the leaves
3635 * will have ref cache entries, so no leaf IOs will be required to
3636 * find the extents they have references on.
3637 *
3638 * For each leaf, any references it has are also dropped in order
3639 *
3640 * This ends up dropping the references in something close to optimal
3641 * order for reading and modifying the extent allocation tree.
3642 */
3643static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
3644 struct btrfs_root *root,
3645 struct btrfs_path *path)
3646{
3647 u64 bytenr;
3648 u64 root_owner;
3649 u64 root_gen;
3650 struct extent_buffer *eb = path->nodes[1];
3651 struct extent_buffer *leaf;
3652 struct btrfs_leaf_ref *ref;
3653 struct refsort *sorted = NULL;
3654 int nritems = btrfs_header_nritems(eb);
3655 int ret;
3656 int i;
3657 int refi = 0;
3658 int slot = path->slots[1];
3659 u32 blocksize = btrfs_level_size(root, 0);
3660 u32 refs;
3661
3662 if (nritems == 0)
3663 goto out;
3664
3665 root_owner = btrfs_header_owner(eb);
3666 root_gen = btrfs_header_generation(eb);
3667 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3668
3669 /*
3670 * step one, sort all the leaf pointers so we don't scribble
3671 * randomly into the extent allocation tree
3672 */
3673 for (i = slot; i < nritems; i++) {
3674 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
3675 sorted[refi].slot = i;
3676 refi++;
3677 }
3678
3679 /*
3680 * nritems won't be zero, but if we're picking up drop_snapshot
3681 * after a crash, slot might be > 0, so double check things
3682 * just in case.
3683 */
3684 if (refi == 0)
3685 goto out;
3686
3687 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3688
3689 /*
3690 * the first loop frees everything the leaves point to
3691 */
3692 for (i = 0; i < refi; i++) {
3693 u64 ptr_gen;
3694
3695 bytenr = sorted[i].bytenr;
3696
3697 /*
3698 * check the reference count on this leaf. If it is > 1
3699 * we just decrement it below and don't update any
3700 * of the refs the leaf points to.
3701 */
3702 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3703 BUG_ON(ret);
3704 if (refs != 1)
3705 continue;
3706
3707 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
3708
3709 /*
3710 * the leaf only had one reference, which means the
3711 * only thing pointing to this leaf is the snapshot
3712 * we're deleting. It isn't possible for the reference
3713 * count to increase again later
3714 *
3715 * The reference cache is checked for the leaf,
3716 * and if found we'll be able to drop any refs held by
3717 * the leaf without needing to read it in.
3718 */
3719 ref = btrfs_lookup_leaf_ref(root, bytenr);
3720 if (ref && ref->generation != ptr_gen) {
3721 btrfs_free_leaf_ref(root, ref);
3722 ref = NULL;
3723 }
3724 if (ref) {
3725 ret = cache_drop_leaf_ref(trans, root, ref);
3726 BUG_ON(ret);
3727 btrfs_remove_leaf_ref(root, ref);
3728 btrfs_free_leaf_ref(root, ref);
3729 } else {
3730 /*
3731 * the leaf wasn't in the reference cache, so
3732 * we have to read it.
3733 */
3734 leaf = read_tree_block(root, bytenr, blocksize,
3735 ptr_gen);
3736 ret = btrfs_drop_leaf_ref(trans, root, leaf);
3737 BUG_ON(ret);
3738 free_extent_buffer(leaf);
3739 }
3740 atomic_inc(&root->fs_info->throttle_gen);
3741 wake_up(&root->fs_info->transaction_throttle);
3742 cond_resched();
3743 }
3744
3745 /*
3746 * run through the loop again to free the refs on the leaves.
3747 * This is faster than doing it in the loop above because
3748 * the leaves are likely to be clustered together. We end up
3749 * working in nice chunks on the extent allocation tree.
3750 */
3751 for (i = 0; i < refi; i++) {
3752 bytenr = sorted[i].bytenr;
3753 ret = __btrfs_free_extent(trans, root, bytenr,
3754 blocksize, eb->start,
3755 root_owner, root_gen, 0, 1);
3756 BUG_ON(ret);
3757
3758 atomic_inc(&root->fs_info->throttle_gen);
3759 wake_up(&root->fs_info->transaction_throttle);
3760 cond_resched();
3761 }
3762out:
3763 kfree(sorted);
3764
3765 /*
3766 * update the path to show we've processed the entire level 1
3767 * node. This will get saved into the root's drop_snapshot_progress
3768 * field so these drops are not repeated again if this transaction
3769 * commits.
3770 */
3771 path->slots[1] = nritems;
3772 return 0;
3773}
3774
3775/*
3500 * helper function for drop_snapshot, this walks down the tree dropping ref 3776 * helper function for drop_snapshot, this walks down the tree dropping ref
3501 * counts as it goes. 3777 * counts as it goes.
3502 */ 3778 */
@@ -3511,7 +3787,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3511 struct extent_buffer *next; 3787 struct extent_buffer *next;
3512 struct extent_buffer *cur; 3788 struct extent_buffer *cur;
3513 struct extent_buffer *parent; 3789 struct extent_buffer *parent;
3514 struct btrfs_leaf_ref *ref;
3515 u32 blocksize; 3790 u32 blocksize;
3516 int ret; 3791 int ret;
3517 u32 refs; 3792 u32 refs;
@@ -3538,17 +3813,46 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3538 if (path->slots[*level] >= 3813 if (path->slots[*level] >=
3539 btrfs_header_nritems(cur)) 3814 btrfs_header_nritems(cur))
3540 break; 3815 break;
3816
3817 /* the new code goes down to level 1 and does all the
3818 * leaves pointed to that node in bulk. So, this check
3819 * for level 0 will always be false.
3820 *
3821 * But, the disk format allows the drop_snapshot_progress
3822 * field in the root to leave things in a state where
3823 * a leaf will need cleaning up here. If someone crashes
3824 * with the old code and then boots with the new code,
3825 * we might find a leaf here.
3826 */
3541 if (*level == 0) { 3827 if (*level == 0) {
3542 ret = btrfs_drop_leaf_ref(trans, root, cur); 3828 ret = btrfs_drop_leaf_ref(trans, root, cur);
3543 BUG_ON(ret); 3829 BUG_ON(ret);
3544 break; 3830 break;
3545 } 3831 }
3832
3833 /*
3834 * once we get to level one, process the whole node
3835 * at once, including everything below it.
3836 */
3837 if (*level == 1) {
3838 ret = drop_level_one_refs(trans, root, path);
3839 BUG_ON(ret);
3840 break;
3841 }
3842
3546 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 3843 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3547 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 3844 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3548 blocksize = btrfs_level_size(root, *level - 1); 3845 blocksize = btrfs_level_size(root, *level - 1);
3549 3846
3550 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs); 3847 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3551 BUG_ON(ret); 3848 BUG_ON(ret);
3849
3850 /*
3851 * if there is more than one reference, we don't need
3852 * to read that node to drop any references it has. We
3853 * just drop the ref we hold on that node and move on to the
3854 * next slot in this level.
3855 */
3552 if (refs != 1) { 3856 if (refs != 1) {
3553 parent = path->nodes[*level]; 3857 parent = path->nodes[*level];
3554 root_owner = btrfs_header_owner(parent); 3858 root_owner = btrfs_header_owner(parent);
@@ -3567,46 +3871,12 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3567 3871
3568 continue; 3872 continue;
3569 } 3873 }
3874
3570 /* 3875 /*
3571 * at this point, we have a single ref, and since the 3876 * we need to keep freeing things in the next level down.
3572 * only place referencing this extent is a dead root 3877 * read the block and loop around to process it
3573 * the reference count should never go higher.
3574 * So, we don't need to check it again
3575 */ 3878 */
3576 if (*level == 1) { 3879 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3577 ref = btrfs_lookup_leaf_ref(root, bytenr);
3578 if (ref && ref->generation != ptr_gen) {
3579 btrfs_free_leaf_ref(root, ref);
3580 ref = NULL;
3581 }
3582 if (ref) {
3583 ret = cache_drop_leaf_ref(trans, root, ref);
3584 BUG_ON(ret);
3585 btrfs_remove_leaf_ref(root, ref);
3586 btrfs_free_leaf_ref(root, ref);
3587 *level = 0;
3588 break;
3589 }
3590 }
3591 next = btrfs_find_tree_block(root, bytenr, blocksize);
3592 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
3593 free_extent_buffer(next);
3594
3595 next = read_tree_block(root, bytenr, blocksize,
3596 ptr_gen);
3597 cond_resched();
3598#if 0
3599 /*
3600 * this is a debugging check and can go away
3601 * the ref should never go all the way down to 1
3602 * at this point
3603 */
3604 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
3605 &refs);
3606 BUG_ON(ret);
3607 WARN_ON(refs != 1);
3608#endif
3609 }
3610 WARN_ON(*level <= 0); 3880 WARN_ON(*level <= 0);
3611 if (path->nodes[*level-1]) 3881 if (path->nodes[*level-1])
3612 free_extent_buffer(path->nodes[*level-1]); 3882 free_extent_buffer(path->nodes[*level-1]);
@@ -3631,11 +3901,16 @@ out:
3631 root_owner = btrfs_header_owner(parent); 3901 root_owner = btrfs_header_owner(parent);
3632 root_gen = btrfs_header_generation(parent); 3902 root_gen = btrfs_header_generation(parent);
3633 3903
3904 /*
3905 * cleanup and free the reference on the last node
3906 * we processed
3907 */
3634 ret = __btrfs_free_extent(trans, root, bytenr, blocksize, 3908 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3635 parent->start, root_owner, root_gen, 3909 parent->start, root_owner, root_gen,
3636 *level, 1); 3910 *level, 1);
3637 free_extent_buffer(path->nodes[*level]); 3911 free_extent_buffer(path->nodes[*level]);
3638 path->nodes[*level] = NULL; 3912 path->nodes[*level] = NULL;
3913
3639 *level += 1; 3914 *level += 1;
3640 BUG_ON(ret); 3915 BUG_ON(ret);
3641 3916
@@ -3687,6 +3962,7 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3687 3962
3688 next = read_tree_block(root, bytenr, blocksize, ptr_gen); 3963 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3689 btrfs_tree_lock(next); 3964 btrfs_tree_lock(next);
3965 btrfs_set_lock_blocking(next);
3690 3966
3691 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize, 3967 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3692 &refs); 3968 &refs);
@@ -3754,6 +4030,13 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3754 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { 4030 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3755 struct extent_buffer *node; 4031 struct extent_buffer *node;
3756 struct btrfs_disk_key disk_key; 4032 struct btrfs_disk_key disk_key;
4033
4034 /*
4035 * there is more work to do in this level.
4036 * Update the drop_progress marker to reflect
4037 * the work we've done so far, and then bump
4038 * the slot number
4039 */
3757 node = path->nodes[i]; 4040 node = path->nodes[i];
3758 path->slots[i]++; 4041 path->slots[i]++;
3759 *level = i; 4042 *level = i;
@@ -3765,6 +4048,11 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3765 return 0; 4048 return 0;
3766 } else { 4049 } else {
3767 struct extent_buffer *parent; 4050 struct extent_buffer *parent;
4051
4052 /*
4053 * this whole node is done, free our reference
4054 * on it and go up one level
4055 */
3768 if (path->nodes[*level] == root->node) 4056 if (path->nodes[*level] == root->node)
3769 parent = path->nodes[*level]; 4057 parent = path->nodes[*level];
3770 else 4058 else
@@ -4444,7 +4732,7 @@ static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4444 u64 lock_end = 0; 4732 u64 lock_end = 0;
4445 u64 num_bytes; 4733 u64 num_bytes;
4446 u64 ext_offset; 4734 u64 ext_offset;
4447 u64 first_pos; 4735 u64 search_end = (u64)-1;
4448 u32 nritems; 4736 u32 nritems;
4449 int nr_scaned = 0; 4737 int nr_scaned = 0;
4450 int extent_locked = 0; 4738 int extent_locked = 0;
@@ -4452,7 +4740,6 @@ static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4452 int ret; 4740 int ret;
4453 4741
4454 memcpy(&key, leaf_key, sizeof(key)); 4742 memcpy(&key, leaf_key, sizeof(key));
4455 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
4456 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { 4743 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4457 if (key.objectid < ref_path->owner_objectid || 4744 if (key.objectid < ref_path->owner_objectid ||
4458 (key.objectid == ref_path->owner_objectid && 4745 (key.objectid == ref_path->owner_objectid &&
@@ -4501,7 +4788,7 @@ next:
4501 if ((key.objectid > ref_path->owner_objectid) || 4788 if ((key.objectid > ref_path->owner_objectid) ||
4502 (key.objectid == ref_path->owner_objectid && 4789 (key.objectid == ref_path->owner_objectid &&
4503 key.type > BTRFS_EXTENT_DATA_KEY) || 4790 key.type > BTRFS_EXTENT_DATA_KEY) ||
4504 (key.offset >= first_pos + extent_key->offset)) 4791 key.offset >= search_end)
4505 break; 4792 break;
4506 } 4793 }
4507 4794
@@ -4534,8 +4821,10 @@ next:
4534 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 4821 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4535 ext_offset = btrfs_file_extent_offset(leaf, fi); 4822 ext_offset = btrfs_file_extent_offset(leaf, fi);
4536 4823
4537 if (first_pos > key.offset - ext_offset) 4824 if (search_end == (u64)-1) {
4538 first_pos = key.offset - ext_offset; 4825 search_end = key.offset - ext_offset +
4826 btrfs_file_extent_ram_bytes(leaf, fi);
4827 }
4539 4828
4540 if (!extent_locked) { 4829 if (!extent_locked) {
4541 lock_start = key.offset; 4830 lock_start = key.offset;
@@ -4724,7 +5013,7 @@ next:
4724 } 5013 }
4725skip: 5014skip:
4726 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && 5015 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4727 key.offset >= first_pos + extent_key->offset) 5016 key.offset >= search_end)
4728 break; 5017 break;
4729 5018
4730 cond_resched(); 5019 cond_resched();
@@ -4778,6 +5067,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4778 ref->bytenr = buf->start; 5067 ref->bytenr = buf->start;
4779 ref->owner = btrfs_header_owner(buf); 5068 ref->owner = btrfs_header_owner(buf);
4780 ref->generation = btrfs_header_generation(buf); 5069 ref->generation = btrfs_header_generation(buf);
5070
4781 ret = btrfs_add_leaf_ref(root, ref, 0); 5071 ret = btrfs_add_leaf_ref(root, ref, 0);
4782 WARN_ON(ret); 5072 WARN_ON(ret);
4783 btrfs_free_leaf_ref(root, ref); 5073 btrfs_free_leaf_ref(root, ref);
@@ -5957,9 +6247,11 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5957 path = btrfs_alloc_path(); 6247 path = btrfs_alloc_path();
5958 BUG_ON(!path); 6248 BUG_ON(!path);
5959 6249
5960 btrfs_remove_free_space_cache(block_group); 6250 spin_lock(&root->fs_info->block_group_cache_lock);
5961 rb_erase(&block_group->cache_node, 6251 rb_erase(&block_group->cache_node,
5962 &root->fs_info->block_group_cache_tree); 6252 &root->fs_info->block_group_cache_tree);
6253 spin_unlock(&root->fs_info->block_group_cache_lock);
6254 btrfs_remove_free_space_cache(block_group);
5963 down_write(&block_group->space_info->groups_sem); 6255 down_write(&block_group->space_info->groups_sem);
5964 list_del(&block_group->list); 6256 list_del(&block_group->list);
5965 up_write(&block_group->space_info->groups_sem); 6257 up_write(&block_group->space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e086d407f1fa..37d43b516b79 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -9,7 +9,6 @@
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/swap.h> 11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h> 12#include <linux/writeback.h>
14#include <linux/pagevec.h> 13#include <linux/pagevec.h>
15#include "extent_io.h" 14#include "extent_io.h"
@@ -31,7 +30,7 @@ static LIST_HEAD(buffers);
31static LIST_HEAD(states); 30static LIST_HEAD(states);
32 31
33#define LEAK_DEBUG 0 32#define LEAK_DEBUG 0
34#ifdef LEAK_DEBUG 33#if LEAK_DEBUG
35static DEFINE_SPINLOCK(leak_lock); 34static DEFINE_SPINLOCK(leak_lock);
36#endif 35#endif
37 36
@@ -120,7 +119,7 @@ void extent_io_tree_init(struct extent_io_tree *tree,
120static struct extent_state *alloc_extent_state(gfp_t mask) 119static struct extent_state *alloc_extent_state(gfp_t mask)
121{ 120{
122 struct extent_state *state; 121 struct extent_state *state;
123#ifdef LEAK_DEBUG 122#if LEAK_DEBUG
124 unsigned long flags; 123 unsigned long flags;
125#endif 124#endif
126 125
@@ -130,7 +129,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
130 state->state = 0; 129 state->state = 0;
131 state->private = 0; 130 state->private = 0;
132 state->tree = NULL; 131 state->tree = NULL;
133#ifdef LEAK_DEBUG 132#if LEAK_DEBUG
134 spin_lock_irqsave(&leak_lock, flags); 133 spin_lock_irqsave(&leak_lock, flags);
135 list_add(&state->leak_list, &states); 134 list_add(&state->leak_list, &states);
136 spin_unlock_irqrestore(&leak_lock, flags); 135 spin_unlock_irqrestore(&leak_lock, flags);
@@ -145,11 +144,11 @@ static void free_extent_state(struct extent_state *state)
145 if (!state) 144 if (!state)
146 return; 145 return;
147 if (atomic_dec_and_test(&state->refs)) { 146 if (atomic_dec_and_test(&state->refs)) {
148#ifdef LEAK_DEBUG 147#if LEAK_DEBUG
149 unsigned long flags; 148 unsigned long flags;
150#endif 149#endif
151 WARN_ON(state->tree); 150 WARN_ON(state->tree);
152#ifdef LEAK_DEBUG 151#if LEAK_DEBUG
153 spin_lock_irqsave(&leak_lock, flags); 152 spin_lock_irqsave(&leak_lock, flags);
154 list_del(&state->leak_list); 153 list_del(&state->leak_list);
155 spin_unlock_irqrestore(&leak_lock, flags); 154 spin_unlock_irqrestore(&leak_lock, flags);
@@ -2378,11 +2377,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2378 int scanned = 0; 2377 int scanned = 0;
2379 int range_whole = 0; 2378 int range_whole = 0;
2380 2379
2381 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2382 wbc->encountered_congestion = 1;
2383 return 0;
2384 }
2385
2386 pagevec_init(&pvec, 0); 2380 pagevec_init(&pvec, 0);
2387 if (wbc->range_cyclic) { 2381 if (wbc->range_cyclic) {
2388 index = mapping->writeback_index; /* Start from prev offset */ 2382 index = mapping->writeback_index; /* Start from prev offset */
@@ -2855,6 +2849,98 @@ out:
2855 return sector; 2849 return sector;
2856} 2850}
2857 2851
2852int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2853 __u64 start, __u64 len, get_extent_t *get_extent)
2854{
2855 int ret;
2856 u64 off = start;
2857 u64 max = start + len;
2858 u32 flags = 0;
2859 u64 disko = 0;
2860 struct extent_map *em = NULL;
2861 int end = 0;
2862 u64 em_start = 0, em_len = 0;
2863 unsigned long emflags;
2864 ret = 0;
2865
2866 if (len == 0)
2867 return -EINVAL;
2868
2869 lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2870 GFP_NOFS);
2871 em = get_extent(inode, NULL, 0, off, max - off, 0);
2872 if (!em)
2873 goto out;
2874 if (IS_ERR(em)) {
2875 ret = PTR_ERR(em);
2876 goto out;
2877 }
2878 while (!end) {
2879 off = em->start + em->len;
2880 if (off >= max)
2881 end = 1;
2882
2883 em_start = em->start;
2884 em_len = em->len;
2885
2886 disko = 0;
2887 flags = 0;
2888
2889 switch (em->block_start) {
2890 case EXTENT_MAP_LAST_BYTE:
2891 end = 1;
2892 flags |= FIEMAP_EXTENT_LAST;
2893 break;
2894 case EXTENT_MAP_HOLE:
2895 flags |= FIEMAP_EXTENT_UNWRITTEN;
2896 break;
2897 case EXTENT_MAP_INLINE:
2898 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2899 FIEMAP_EXTENT_NOT_ALIGNED);
2900 break;
2901 case EXTENT_MAP_DELALLOC:
2902 flags |= (FIEMAP_EXTENT_DELALLOC |
2903 FIEMAP_EXTENT_UNKNOWN);
2904 break;
2905 default:
2906 disko = em->block_start;
2907 break;
2908 }
2909 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2910 flags |= FIEMAP_EXTENT_ENCODED;
2911
2912 emflags = em->flags;
2913 free_extent_map(em);
2914 em = NULL;
2915
2916 if (!end) {
2917 em = get_extent(inode, NULL, 0, off, max - off, 0);
2918 if (!em)
2919 goto out;
2920 if (IS_ERR(em)) {
2921 ret = PTR_ERR(em);
2922 goto out;
2923 }
2924 emflags = em->flags;
2925 }
2926 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2927 flags |= FIEMAP_EXTENT_LAST;
2928 end = 1;
2929 }
2930
2931 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2932 em_len, flags);
2933 if (ret)
2934 goto out_free;
2935 }
2936out_free:
2937 free_extent_map(em);
2938out:
2939 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2940 GFP_NOFS);
2941 return ret;
2942}
2943
2858static inline struct page *extent_buffer_page(struct extent_buffer *eb, 2944static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2859 unsigned long i) 2945 unsigned long i)
2860{ 2946{
@@ -2892,15 +2978,17 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2892 gfp_t mask) 2978 gfp_t mask)
2893{ 2979{
2894 struct extent_buffer *eb = NULL; 2980 struct extent_buffer *eb = NULL;
2895#ifdef LEAK_DEBUG 2981#if LEAK_DEBUG
2896 unsigned long flags; 2982 unsigned long flags;
2897#endif 2983#endif
2898 2984
2899 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 2985 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2900 eb->start = start; 2986 eb->start = start;
2901 eb->len = len; 2987 eb->len = len;
2902 mutex_init(&eb->mutex); 2988 spin_lock_init(&eb->lock);
2903#ifdef LEAK_DEBUG 2989 init_waitqueue_head(&eb->lock_wq);
2990
2991#if LEAK_DEBUG
2904 spin_lock_irqsave(&leak_lock, flags); 2992 spin_lock_irqsave(&leak_lock, flags);
2905 list_add(&eb->leak_list, &buffers); 2993 list_add(&eb->leak_list, &buffers);
2906 spin_unlock_irqrestore(&leak_lock, flags); 2994 spin_unlock_irqrestore(&leak_lock, flags);
@@ -2912,7 +3000,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2912 3000
2913static void __free_extent_buffer(struct extent_buffer *eb) 3001static void __free_extent_buffer(struct extent_buffer *eb)
2914{ 3002{
2915#ifdef LEAK_DEBUG 3003#if LEAK_DEBUG
2916 unsigned long flags; 3004 unsigned long flags;
2917 spin_lock_irqsave(&leak_lock, flags); 3005 spin_lock_irqsave(&leak_lock, flags);
2918 list_del(&eb->leak_list); 3006 list_del(&eb->leak_list);
@@ -2980,8 +3068,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2980 unlock_page(p); 3068 unlock_page(p);
2981 } 3069 }
2982 if (uptodate) 3070 if (uptodate)
2983 eb->flags |= EXTENT_UPTODATE; 3071 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
2984 eb->flags |= EXTENT_BUFFER_FILLED;
2985 3072
2986 spin_lock(&tree->buffer_lock); 3073 spin_lock(&tree->buffer_lock);
2987 exists = buffer_tree_insert(tree, start, &eb->rb_node); 3074 exists = buffer_tree_insert(tree, start, &eb->rb_node);
@@ -3135,7 +3222,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3135 unsigned long num_pages; 3222 unsigned long num_pages;
3136 3223
3137 num_pages = num_extent_pages(eb->start, eb->len); 3224 num_pages = num_extent_pages(eb->start, eb->len);
3138 eb->flags &= ~EXTENT_UPTODATE; 3225 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3139 3226
3140 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3227 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3141 GFP_NOFS); 3228 GFP_NOFS);
@@ -3206,7 +3293,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
3206 struct page *page; 3293 struct page *page;
3207 int pg_uptodate = 1; 3294 int pg_uptodate = 1;
3208 3295
3209 if (eb->flags & EXTENT_UPTODATE) 3296 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3210 return 1; 3297 return 1;
3211 3298
3212 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3299 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
@@ -3242,7 +3329,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3242 struct bio *bio = NULL; 3329 struct bio *bio = NULL;
3243 unsigned long bio_flags = 0; 3330 unsigned long bio_flags = 0;
3244 3331
3245 if (eb->flags & EXTENT_UPTODATE) 3332 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3246 return 0; 3333 return 0;
3247 3334
3248 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3335 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
@@ -3273,7 +3360,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3273 } 3360 }
3274 if (all_uptodate) { 3361 if (all_uptodate) {
3275 if (start_i == 0) 3362 if (start_i == 0)
3276 eb->flags |= EXTENT_UPTODATE; 3363 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3277 goto unlock_exit; 3364 goto unlock_exit;
3278 } 3365 }
3279 3366
@@ -3309,7 +3396,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3309 } 3396 }
3310 3397
3311 if (!ret) 3398 if (!ret)
3312 eb->flags |= EXTENT_UPTODATE; 3399 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3313 return ret; 3400 return ret;
3314 3401
3315unlock_exit: 3402unlock_exit:
@@ -3406,7 +3493,6 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3406 unmap_extent_buffer(eb, eb->map_token, km); 3493 unmap_extent_buffer(eb, eb->map_token, km);
3407 eb->map_token = NULL; 3494 eb->map_token = NULL;
3408 save = 1; 3495 save = 1;
3409 WARN_ON(!mutex_is_locked(&eb->mutex));
3410 } 3496 }
3411 err = map_private_extent_buffer(eb, start, min_len, token, map, 3497 err = map_private_extent_buffer(eb, start, min_len, token, map,
3412 map_start, map_len, km); 3498 map_start, map_len, km);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index c5b483a79137..1f9df88afbf6 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -22,6 +22,10 @@
22/* flags for bio submission */ 22/* flags for bio submission */
23#define EXTENT_BIO_COMPRESSED 1 23#define EXTENT_BIO_COMPRESSED 1
24 24
25/* these are bit numbers for test/set bit */
26#define EXTENT_BUFFER_UPTODATE 0
27#define EXTENT_BUFFER_BLOCKING 1
28
25/* 29/*
26 * page->private values. Every page that is controlled by the extent 30 * page->private values. Every page that is controlled by the extent
27 * map has page->private set to one. 31 * map has page->private set to one.
@@ -95,11 +99,19 @@ struct extent_buffer {
95 unsigned long map_start; 99 unsigned long map_start;
96 unsigned long map_len; 100 unsigned long map_len;
97 struct page *first_page; 101 struct page *first_page;
102 unsigned long bflags;
98 atomic_t refs; 103 atomic_t refs;
99 int flags;
100 struct list_head leak_list; 104 struct list_head leak_list;
101 struct rb_node rb_node; 105 struct rb_node rb_node;
102 struct mutex mutex; 106
107 /* the spinlock is used to protect most operations */
108 spinlock_t lock;
109
110 /*
111 * when we keep the lock held while blocking, waiters go onto
112 * the wq
113 */
114 wait_queue_head_t lock_wq;
103}; 115};
104 116
105struct extent_map_tree; 117struct extent_map_tree;
@@ -193,6 +205,8 @@ int extent_commit_write(struct extent_io_tree *tree,
193 unsigned from, unsigned to); 205 unsigned from, unsigned to);
194sector_t extent_bmap(struct address_space *mapping, sector_t iblock, 206sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
195 get_extent_t *get_extent); 207 get_extent_t *get_extent);
208int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
209 __u64 start, __u64 len, get_extent_t *get_extent);
196int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end); 210int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
197int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); 211int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
198int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); 212int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 4a83e33ada32..50da69da20ce 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -3,7 +3,6 @@
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/version.h>
7#include <linux/hardirq.h> 6#include <linux/hardirq.h>
8#include "extent_map.h" 7#include "extent_map.h"
9 8
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 90268334145e..3e8023efaff7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -29,7 +29,6 @@
29#include <linux/writeback.h> 29#include <linux/writeback.h>
30#include <linux/statfs.h> 30#include <linux/statfs.h>
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/version.h>
33#include "ctree.h" 32#include "ctree.h"
34#include "disk-io.h" 33#include "disk-io.h"
35#include "transaction.h" 34#include "transaction.h"
@@ -1215,10 +1214,10 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1215 } 1214 }
1216 mutex_unlock(&root->fs_info->trans_mutex); 1215 mutex_unlock(&root->fs_info->trans_mutex);
1217 1216
1218 root->fs_info->tree_log_batch++; 1217 root->log_batch++;
1219 filemap_fdatawrite(inode->i_mapping); 1218 filemap_fdatawrite(inode->i_mapping);
1220 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1219 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1221 root->fs_info->tree_log_batch++; 1220 root->log_batch++;
1222 1221
1223 /* 1222 /*
1224 * ok we haven't committed the transaction yet, lets do a commit 1223 * ok we haven't committed the transaction yet, lets do a commit
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8adfe059ab41..8f0706210a47 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -34,7 +34,6 @@
34#include <linux/statfs.h> 34#include <linux/statfs.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/bit_spinlock.h> 36#include <linux/bit_spinlock.h>
37#include <linux/version.h>
38#include <linux/xattr.h> 37#include <linux/xattr.h>
39#include <linux/posix_acl.h> 38#include <linux/posix_acl.h>
40#include <linux/falloc.h> 39#include <linux/falloc.h>
@@ -51,6 +50,7 @@
51#include "tree-log.h" 50#include "tree-log.h"
52#include "ref-cache.h" 51#include "ref-cache.h"
53#include "compression.h" 52#include "compression.h"
53#include "locking.h"
54 54
55struct btrfs_iget_args { 55struct btrfs_iget_args {
56 u64 ino; 56 u64 ino;
@@ -91,6 +91,16 @@ static noinline int cow_file_range(struct inode *inode,
91 u64 start, u64 end, int *page_started, 91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock); 92 unsigned long *nr_written, int unlock);
93 93
94static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
95{
96 int err;
97
98 err = btrfs_init_acl(inode, dir);
99 if (!err)
100 err = btrfs_xattr_security_init(inode, dir);
101 return err;
102}
103
94/* 104/*
95 * a very lame attempt at stopping writes when the FS is 85% full. There 105 * a very lame attempt at stopping writes when the FS is 85% full. There
96 * are countless ways this is incorrect, but it is better than nothing. 106 * are countless ways this is incorrect, but it is better than nothing.
@@ -350,6 +360,19 @@ again:
350 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 360 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
351 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 361 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
352 362
363 /*
364 * we don't want to send crud past the end of i_size through
365 * compression, that's just a waste of CPU time. So, if the
366 * end of the file is before the start of our current
367 * requested range of bytes, we bail out to the uncompressed
368 * cleanup code that can deal with all of this.
369 *
370 * It isn't really the fastest way to fix things, but this is a
371 * very uncommon corner.
372 */
373 if (actual_end <= start)
374 goto cleanup_and_bail_uncompressed;
375
353 total_compressed = actual_end - start; 376 total_compressed = actual_end - start;
354 377
355 /* we want to make sure that amount of ram required to uncompress 378 /* we want to make sure that amount of ram required to uncompress
@@ -494,6 +517,7 @@ again:
494 goto again; 517 goto again;
495 } 518 }
496 } else { 519 } else {
520cleanup_and_bail_uncompressed:
497 /* 521 /*
498 * No compression, but we still need to write the pages in 522 * No compression, but we still need to write the pages in
499 * the file we've been given so far. redirty the locked 523 * the file we've been given so far. redirty the locked
@@ -1324,12 +1348,11 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1324 struct inode *inode, u64 file_offset, 1348 struct inode *inode, u64 file_offset,
1325 struct list_head *list) 1349 struct list_head *list)
1326{ 1350{
1327 struct list_head *cur;
1328 struct btrfs_ordered_sum *sum; 1351 struct btrfs_ordered_sum *sum;
1329 1352
1330 btrfs_set_trans_block_group(trans, inode); 1353 btrfs_set_trans_block_group(trans, inode);
1331 list_for_each(cur, list) { 1354
1332 sum = list_entry(cur, struct btrfs_ordered_sum, list); 1355 list_for_each_entry(sum, list, list) {
1333 btrfs_csum_file_blocks(trans, 1356 btrfs_csum_file_blocks(trans,
1334 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1357 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1335 } 1358 }
@@ -2013,6 +2036,7 @@ void btrfs_read_locked_inode(struct inode *inode)
2013 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2036 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2014 2037
2015 alloc_group_block = btrfs_inode_block_group(leaf, inode_item); 2038 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2039
2016 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2040 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2017 alloc_group_block, 0); 2041 alloc_group_block, 0);
2018 btrfs_free_path(path); 2042 btrfs_free_path(path);
@@ -2039,6 +2063,7 @@ void btrfs_read_locked_inode(struct inode *inode)
2039 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2063 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2040 break; 2064 break;
2041 default: 2065 default:
2066 inode->i_op = &btrfs_special_inode_operations;
2042 init_special_inode(inode, inode->i_mode, rdev); 2067 init_special_inode(inode, inode->i_mode, rdev);
2043 break; 2068 break;
2044 } 2069 }
@@ -2108,6 +2133,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2108 goto failed; 2133 goto failed;
2109 } 2134 }
2110 2135
2136 btrfs_unlock_up_safe(path, 1);
2111 leaf = path->nodes[0]; 2137 leaf = path->nodes[0];
2112 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2138 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2113 struct btrfs_inode_item); 2139 struct btrfs_inode_item);
@@ -2429,6 +2455,8 @@ next_node:
2429 ref->generation = leaf_gen; 2455 ref->generation = leaf_gen;
2430 ref->nritems = 0; 2456 ref->nritems = 0;
2431 2457
2458 btrfs_sort_leaf_ref(ref);
2459
2432 ret = btrfs_add_leaf_ref(root, ref, 0); 2460 ret = btrfs_add_leaf_ref(root, ref, 0);
2433 WARN_ON(ret); 2461 WARN_ON(ret);
2434 btrfs_free_leaf_ref(root, ref); 2462 btrfs_free_leaf_ref(root, ref);
@@ -2476,7 +2504,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2476 struct btrfs_path *path; 2504 struct btrfs_path *path;
2477 struct btrfs_key key; 2505 struct btrfs_key key;
2478 struct btrfs_key found_key; 2506 struct btrfs_key found_key;
2479 u32 found_type; 2507 u32 found_type = (u8)-1;
2480 struct extent_buffer *leaf; 2508 struct extent_buffer *leaf;
2481 struct btrfs_file_extent_item *fi; 2509 struct btrfs_file_extent_item *fi;
2482 u64 extent_start = 0; 2510 u64 extent_start = 0;
@@ -2663,6 +2691,8 @@ next:
2663 if (pending_del_nr) 2691 if (pending_del_nr)
2664 goto del_pending; 2692 goto del_pending;
2665 btrfs_release_path(root, path); 2693 btrfs_release_path(root, path);
2694 if (found_type == BTRFS_INODE_ITEM_KEY)
2695 break;
2666 goto search_again; 2696 goto search_again;
2667 } 2697 }
2668 2698
@@ -2679,6 +2709,8 @@ del_pending:
2679 BUG_ON(ret); 2709 BUG_ON(ret);
2680 pending_del_nr = 0; 2710 pending_del_nr = 0;
2681 btrfs_release_path(root, path); 2711 btrfs_release_path(root, path);
2712 if (found_type == BTRFS_INODE_ITEM_KEY)
2713 break;
2682 goto search_again; 2714 goto search_again;
2683 } 2715 }
2684 } 2716 }
@@ -3265,7 +3297,7 @@ skip:
3265 3297
3266 /* Reached end of directory/root. Bump pos past the last item. */ 3298 /* Reached end of directory/root. Bump pos past the last item. */
3267 if (key_type == BTRFS_DIR_INDEX_KEY) 3299 if (key_type == BTRFS_DIR_INDEX_KEY)
3268 filp->f_pos = INT_LIMIT(typeof(filp->f_pos)); 3300 filp->f_pos = INT_LIMIT(off_t);
3269 else 3301 else
3270 filp->f_pos++; 3302 filp->f_pos++;
3271nopos: 3303nopos:
@@ -3458,7 +3490,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3458 root->highest_inode = objectid; 3490 root->highest_inode = objectid;
3459 3491
3460 inode->i_uid = current_fsuid(); 3492 inode->i_uid = current_fsuid();
3461 inode->i_gid = current_fsgid(); 3493
3494 if (dir && (dir->i_mode & S_ISGID)) {
3495 inode->i_gid = dir->i_gid;
3496 if (S_ISDIR(mode))
3497 mode |= S_ISGID;
3498 } else
3499 inode->i_gid = current_fsgid();
3500
3462 inode->i_mode = mode; 3501 inode->i_mode = mode;
3463 inode->i_ino = objectid; 3502 inode->i_ino = objectid;
3464 inode_set_bytes(inode, 0); 3503 inode_set_bytes(inode, 0);
@@ -3586,7 +3625,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3586 if (IS_ERR(inode)) 3625 if (IS_ERR(inode))
3587 goto out_unlock; 3626 goto out_unlock;
3588 3627
3589 err = btrfs_init_acl(inode, dir); 3628 err = btrfs_init_inode_security(inode, dir);
3590 if (err) { 3629 if (err) {
3591 drop_inode = 1; 3630 drop_inode = 1;
3592 goto out_unlock; 3631 goto out_unlock;
@@ -3649,7 +3688,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
3649 if (IS_ERR(inode)) 3688 if (IS_ERR(inode))
3650 goto out_unlock; 3689 goto out_unlock;
3651 3690
3652 err = btrfs_init_acl(inode, dir); 3691 err = btrfs_init_inode_security(inode, dir);
3653 if (err) { 3692 if (err) {
3654 drop_inode = 1; 3693 drop_inode = 1;
3655 goto out_unlock; 3694 goto out_unlock;
@@ -3772,7 +3811,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3772 3811
3773 drop_on_err = 1; 3812 drop_on_err = 1;
3774 3813
3775 err = btrfs_init_acl(inode, dir); 3814 err = btrfs_init_inode_security(inode, dir);
3776 if (err) 3815 if (err)
3777 goto out_fail; 3816 goto out_fail;
3778 3817
@@ -4158,9 +4197,10 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4158 return -EINVAL; 4197 return -EINVAL;
4159} 4198}
4160 4199
4161static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) 4200static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4201 __u64 start, __u64 len)
4162{ 4202{
4163 return extent_bmap(mapping, iblock, btrfs_get_extent); 4203 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4164} 4204}
4165 4205
4166int btrfs_readpage(struct file *file, struct page *page) 4206int btrfs_readpage(struct file *file, struct page *page)
@@ -4733,7 +4773,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4733 if (IS_ERR(inode)) 4773 if (IS_ERR(inode))
4734 goto out_unlock; 4774 goto out_unlock;
4735 4775
4736 err = btrfs_init_acl(inode, dir); 4776 err = btrfs_init_inode_security(inode, dir);
4737 if (err) { 4777 if (err) {
4738 drop_inode = 1; 4778 drop_inode = 1;
4739 goto out_unlock; 4779 goto out_unlock;
@@ -4987,13 +5027,24 @@ static struct extent_io_ops btrfs_extent_io_ops = {
4987 .clear_bit_hook = btrfs_clear_bit_hook, 5027 .clear_bit_hook = btrfs_clear_bit_hook,
4988}; 5028};
4989 5029
5030/*
5031 * btrfs doesn't support the bmap operation because swapfiles
5032 * use bmap to make a mapping of extents in the file. They assume
5033 * these extents won't change over the life of the file and they
5034 * use the bmap result to do IO directly to the drive.
5035 *
5036 * the btrfs bmap call would return logical addresses that aren't
5037 * suitable for IO and they also will change frequently as COW
5038 * operations happen. So, swapfile + btrfs == corruption.
5039 *
5040 * For now we're avoiding this by dropping bmap.
5041 */
4990static struct address_space_operations btrfs_aops = { 5042static struct address_space_operations btrfs_aops = {
4991 .readpage = btrfs_readpage, 5043 .readpage = btrfs_readpage,
4992 .writepage = btrfs_writepage, 5044 .writepage = btrfs_writepage,
4993 .writepages = btrfs_writepages, 5045 .writepages = btrfs_writepages,
4994 .readpages = btrfs_readpages, 5046 .readpages = btrfs_readpages,
4995 .sync_page = block_sync_page, 5047 .sync_page = block_sync_page,
4996 .bmap = btrfs_bmap,
4997 .direct_IO = btrfs_direct_IO, 5048 .direct_IO = btrfs_direct_IO,
4998 .invalidatepage = btrfs_invalidatepage, 5049 .invalidatepage = btrfs_invalidatepage,
4999 .releasepage = btrfs_releasepage, 5050 .releasepage = btrfs_releasepage,
@@ -5017,6 +5068,7 @@ static struct inode_operations btrfs_file_inode_operations = {
5017 .removexattr = btrfs_removexattr, 5068 .removexattr = btrfs_removexattr,
5018 .permission = btrfs_permission, 5069 .permission = btrfs_permission,
5019 .fallocate = btrfs_fallocate, 5070 .fallocate = btrfs_fallocate,
5071 .fiemap = btrfs_fiemap,
5020}; 5072};
5021static struct inode_operations btrfs_special_inode_operations = { 5073static struct inode_operations btrfs_special_inode_operations = {
5022 .getattr = btrfs_getattr, 5074 .getattr = btrfs_getattr,
@@ -5032,4 +5084,8 @@ static struct inode_operations btrfs_symlink_inode_operations = {
5032 .follow_link = page_follow_link_light, 5084 .follow_link = page_follow_link_light,
5033 .put_link = page_put_link, 5085 .put_link = page_put_link,
5034 .permission = btrfs_permission, 5086 .permission = btrfs_permission,
5087 .setxattr = btrfs_setxattr,
5088 .getxattr = btrfs_getxattr,
5089 .listxattr = btrfs_listxattr,
5090 .removexattr = btrfs_removexattr,
5035}; 5091};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c2aa33e3feb5..988fdc8b49eb 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -38,7 +38,6 @@
38#include <linux/compat.h> 38#include <linux/compat.h>
39#include <linux/bit_spinlock.h> 39#include <linux/bit_spinlock.h>
40#include <linux/security.h> 40#include <linux/security.h>
41#include <linux/version.h>
42#include <linux/xattr.h> 41#include <linux/xattr.h>
43#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
44#include "compat.h" 43#include "compat.h"
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 39bae7761db6..9ebe9385129b 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -26,63 +26,213 @@
26#include "locking.h" 26#include "locking.h"
27 27
28/* 28/*
29 * locks the per buffer mutex in an extent buffer. This uses adaptive locks 29 * btrfs_header_level() isn't free, so don't call it when lockdep isn't
30 * and the spin is not tuned very extensively. The spinning does make a big 30 * on
31 * difference in almost every workload, but spinning for the right amount of
32 * time needs some help.
33 *
34 * In general, we want to spin as long as the lock holder is doing btree
35 * searches, and we should give up if they are in more expensive code.
36 */ 31 */
32#ifdef CONFIG_DEBUG_LOCK_ALLOC
33static inline void spin_nested(struct extent_buffer *eb)
34{
35 spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
36}
37#else
38static inline void spin_nested(struct extent_buffer *eb)
39{
40 spin_lock(&eb->lock);
41}
42#endif
37 43
38int btrfs_tree_lock(struct extent_buffer *eb) 44/*
45 * Setting a lock to blocking will drop the spinlock and set the
46 * flag that forces other procs who want the lock to wait. After
47 * this you can safely schedule with the lock held.
48 */
49void btrfs_set_lock_blocking(struct extent_buffer *eb)
39{ 50{
40 int i; 51 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
52 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
53 spin_unlock(&eb->lock);
54 }
55 /* exit with the spin lock released and the bit set */
56}
41 57
42 if (mutex_trylock(&eb->mutex)) 58/*
43 return 0; 59 * clearing the blocking flag will take the spinlock again.
60 * After this you can't safely schedule
61 */
62void btrfs_clear_lock_blocking(struct extent_buffer *eb)
63{
64 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
65 spin_nested(eb);
66 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
67 smp_mb__after_clear_bit();
68 }
69 /* exit with the spin lock held */
70}
71
72/*
73 * unfortunately, many of the places that currently set a lock to blocking
74 * don't end up blocking for every long, and often they don't block
75 * at all. For a dbench 50 run, if we don't spin one the blocking bit
76 * at all, the context switch rate can jump up to 400,000/sec or more.
77 *
78 * So, we're still stuck with this crummy spin on the blocking bit,
79 * at least until the most common causes of the short blocks
80 * can be dealt with.
81 */
82static int btrfs_spin_on_block(struct extent_buffer *eb)
83{
84 int i;
44 for (i = 0; i < 512; i++) { 85 for (i = 0; i < 512; i++) {
45 cpu_relax(); 86 cpu_relax();
46 if (mutex_trylock(&eb->mutex)) 87 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
47 return 0; 88 return 1;
89 if (need_resched())
90 break;
48 } 91 }
49 cpu_relax();
50 mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
51 return 0; 92 return 0;
52} 93}
53 94
54int btrfs_try_tree_lock(struct extent_buffer *eb) 95/*
96 * This is somewhat different from trylock. It will take the
97 * spinlock but if it finds the lock is set to blocking, it will
98 * return without the lock held.
99 *
100 * returns 1 if it was able to take the lock and zero otherwise
101 *
102 * After this call, scheduling is not safe without first calling
103 * btrfs_set_lock_blocking()
104 */
105int btrfs_try_spin_lock(struct extent_buffer *eb)
55{ 106{
56 return mutex_trylock(&eb->mutex); 107 int i;
108
109 spin_nested(eb);
110 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
111 return 1;
112 spin_unlock(&eb->lock);
113
114 /* spin for a bit on the BLOCKING flag */
115 for (i = 0; i < 2; i++) {
116 if (!btrfs_spin_on_block(eb))
117 break;
118
119 spin_nested(eb);
120 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
121 return 1;
122 spin_unlock(&eb->lock);
123 }
124 return 0;
57} 125}
58 126
59int btrfs_tree_unlock(struct extent_buffer *eb) 127/*
128 * the autoremove wake function will return 0 if it tried to wake up
129 * a process that was already awake, which means that process won't
130 * count as an exclusive wakeup. The waitq code will continue waking
131 * procs until it finds one that was actually sleeping.
132 *
133 * For btrfs, this isn't quite what we want. We want a single proc
134 * to be notified that the lock is ready for taking. If that proc
135 * already happen to be awake, great, it will loop around and try for
136 * the lock.
137 *
138 * So, btrfs_wake_function always returns 1, even when the proc that we
139 * tried to wake up was already awake.
140 */
141static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
142 int sync, void *key)
60{ 143{
61 mutex_unlock(&eb->mutex); 144 autoremove_wake_function(wait, mode, sync, key);
62 return 0; 145 return 1;
63} 146}
64 147
65int btrfs_tree_locked(struct extent_buffer *eb) 148/*
149 * returns with the extent buffer spinlocked.
150 *
151 * This will spin and/or wait as required to take the lock, and then
152 * return with the spinlock held.
153 *
154 * After this call, scheduling is not safe without first calling
155 * btrfs_set_lock_blocking()
156 */
157int btrfs_tree_lock(struct extent_buffer *eb)
66{ 158{
67 return mutex_is_locked(&eb->mutex); 159 DEFINE_WAIT(wait);
160 wait.func = btrfs_wake_function;
161
162 while(1) {
163 spin_nested(eb);
164
165 /* nobody is blocking, exit with the spinlock held */
166 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
167 return 0;
168
169 /*
170 * we have the spinlock, but the real owner is blocking.
171 * wait for them
172 */
173 spin_unlock(&eb->lock);
174
175 /*
176 * spin for a bit, and if the blocking flag goes away,
177 * loop around
178 */
179 if (btrfs_spin_on_block(eb))
180 continue;
181
182 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
183 TASK_UNINTERRUPTIBLE);
184
185 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
186 schedule();
187
188 finish_wait(&eb->lock_wq, &wait);
189 }
190 return 0;
68} 191}
69 192
70/* 193/*
71 * btrfs_search_slot uses this to decide if it should drop its locks 194 * Very quick trylock, this does not spin or schedule. It returns
72 * before doing something expensive like allocating free blocks for cow. 195 * 1 with the spinlock held if it was able to take the lock, or it
196 * returns zero if it was unable to take the lock.
197 *
198 * After this call, scheduling is not safe without first calling
199 * btrfs_set_lock_blocking()
73 */ 200 */
74int btrfs_path_lock_waiting(struct btrfs_path *path, int level) 201int btrfs_try_tree_lock(struct extent_buffer *eb)
75{ 202{
76 int i; 203 if (spin_trylock(&eb->lock)) {
77 struct extent_buffer *eb; 204 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
78 for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) { 205 /*
79 eb = path->nodes[i]; 206 * we've got the spinlock, but the real owner is
80 if (!eb) 207 * blocking. Drop the spinlock and return failure
81 break; 208 */
82 smp_mb(); 209 spin_unlock(&eb->lock);
83 if (!list_empty(&eb->mutex.wait_list)) 210 return 0;
84 return 1; 211 }
212 return 1;
85 } 213 }
214 /* someone else has the spinlock giveup */
86 return 0; 215 return 0;
87} 216}
88 217
218int btrfs_tree_unlock(struct extent_buffer *eb)
219{
220 /*
221 * if we were a blocking owner, we don't have the spinlock held
222 * just clear the bit and look for waiters
223 */
224 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
225 smp_mb__after_clear_bit();
226 else
227 spin_unlock(&eb->lock);
228
229 if (waitqueue_active(&eb->lock_wq))
230 wake_up(&eb->lock_wq);
231 return 0;
232}
233
234int btrfs_tree_locked(struct extent_buffer *eb)
235{
236 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
237 spin_is_locked(&eb->lock);
238}
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index bc1faef12519..6bb0afbff928 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -22,6 +22,10 @@
22int btrfs_tree_lock(struct extent_buffer *eb); 22int btrfs_tree_lock(struct extent_buffer *eb);
23int btrfs_tree_unlock(struct extent_buffer *eb); 23int btrfs_tree_unlock(struct extent_buffer *eb);
24int btrfs_tree_locked(struct extent_buffer *eb); 24int btrfs_tree_locked(struct extent_buffer *eb);
25
25int btrfs_try_tree_lock(struct extent_buffer *eb); 26int btrfs_try_tree_lock(struct extent_buffer *eb);
26int btrfs_path_lock_waiting(struct btrfs_path *path, int level); 27int btrfs_try_spin_lock(struct extent_buffer *eb);
28
29void btrfs_set_lock_blocking(struct extent_buffer *eb);
30void btrfs_clear_lock_blocking(struct extent_buffer *eb);
27#endif 31#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a20940170274..77c2411a5f0f 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -613,7 +613,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
613 struct btrfs_sector_sum *sector_sums; 613 struct btrfs_sector_sum *sector_sums;
614 struct btrfs_ordered_extent *ordered; 614 struct btrfs_ordered_extent *ordered;
615 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 615 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
616 struct list_head *cur;
617 unsigned long num_sectors; 616 unsigned long num_sectors;
618 unsigned long i; 617 unsigned long i;
619 u32 sectorsize = BTRFS_I(inode)->root->sectorsize; 618 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
@@ -624,8 +623,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
624 return 1; 623 return 1;
625 624
626 mutex_lock(&tree->mutex); 625 mutex_lock(&tree->mutex);
627 list_for_each_prev(cur, &ordered->list) { 626 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
628 ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list);
629 if (disk_bytenr >= ordered_sum->bytenr) { 627 if (disk_bytenr >= ordered_sum->bytenr) {
630 num_sectors = ordered_sum->len / sectorsize; 628 num_sectors = ordered_sum->len / sectorsize;
631 sector_sums = ordered_sum->sums; 629 sector_sums = ordered_sum->sums;
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
index 6f0acc4c9eab..d0cc62bccb94 100644
--- a/fs/btrfs/ref-cache.c
+++ b/fs/btrfs/ref-cache.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/sort.h>
20#include "ctree.h" 21#include "ctree.h"
21#include "ref-cache.h" 22#include "ref-cache.h"
22#include "transaction.h" 23#include "transaction.h"
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
index 16f3183d7c59..bc283ad2db73 100644
--- a/fs/btrfs/ref-cache.h
+++ b/fs/btrfs/ref-cache.h
@@ -73,5 +73,4 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
73int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, 73int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
74 int shared); 74 int shared);
75int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); 75int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
76
77#endif 76#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index db9fb3bc1e33..f3fd7e2cbc38 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -37,7 +37,6 @@
37#include <linux/ctype.h> 37#include <linux/ctype.h>
38#include <linux/namei.h> 38#include <linux/namei.h>
39#include <linux/miscdevice.h> 39#include <linux/miscdevice.h>
40#include <linux/version.h>
41#include <linux/magic.h> 40#include <linux/magic.h>
42#include "compat.h" 41#include "compat.h"
43#include "ctree.h" 42#include "ctree.h"
@@ -583,17 +582,18 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
583 struct btrfs_ioctl_vol_args *vol; 582 struct btrfs_ioctl_vol_args *vol;
584 struct btrfs_fs_devices *fs_devices; 583 struct btrfs_fs_devices *fs_devices;
585 int ret = -ENOTTY; 584 int ret = -ENOTTY;
586 int len;
587 585
588 if (!capable(CAP_SYS_ADMIN)) 586 if (!capable(CAP_SYS_ADMIN))
589 return -EPERM; 587 return -EPERM;
590 588
591 vol = kmalloc(sizeof(*vol), GFP_KERNEL); 589 vol = kmalloc(sizeof(*vol), GFP_KERNEL);
590 if (!vol)
591 return -ENOMEM;
592
592 if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) { 593 if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) {
593 ret = -EFAULT; 594 ret = -EFAULT;
594 goto out; 595 goto out;
595 } 596 }
596 len = strnlen(vol->name, BTRFS_PATH_NAME_MAX);
597 597
598 switch (cmd) { 598 switch (cmd) {
599 case BTRFS_IOC_SCAN_DEV: 599 case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8a08f9443340..919172de5c9a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -852,11 +852,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
852{ 852{
853 struct btrfs_pending_snapshot *pending; 853 struct btrfs_pending_snapshot *pending;
854 struct list_head *head = &trans->transaction->pending_snapshots; 854 struct list_head *head = &trans->transaction->pending_snapshots;
855 struct list_head *cur;
856 int ret; 855 int ret;
857 856
858 list_for_each(cur, head) { 857 list_for_each_entry(pending, head, list) {
859 pending = list_entry(cur, struct btrfs_pending_snapshot, list);
860 ret = create_pending_snapshot(trans, fs_info, pending); 858 ret = create_pending_snapshot(trans, fs_info, pending);
861 BUG_ON(ret); 859 BUG_ON(ret);
862 } 860 }
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 3e8358c36165..98d25fa4570e 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -74,6 +74,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
74 u32 nritems; 74 u32 nritems;
75 75
76 root_node = btrfs_lock_root_node(root); 76 root_node = btrfs_lock_root_node(root);
77 btrfs_set_lock_blocking(root_node);
77 nritems = btrfs_header_nritems(root_node); 78 nritems = btrfs_header_nritems(root_node);
78 root->defrag_max.objectid = 0; 79 root->defrag_max.objectid = 0;
79 /* from above we know this is not a leaf */ 80 /* from above we know this is not a leaf */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d81cda2e077c..20794290256b 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -78,104 +78,6 @@ static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
78 */ 78 */
79 79
80/* 80/*
81 * btrfs_add_log_tree adds a new per-subvolume log tree into the
82 * tree of log tree roots. This must be called with a tree log transaction
83 * running (see start_log_trans).
84 */
85static int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root)
87{
88 struct btrfs_key key;
89 struct btrfs_root_item root_item;
90 struct btrfs_inode_item *inode_item;
91 struct extent_buffer *leaf;
92 struct btrfs_root *new_root = root;
93 int ret;
94 u64 objectid = root->root_key.objectid;
95
96 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
97 BTRFS_TREE_LOG_OBJECTID,
98 trans->transid, 0, 0, 0);
99 if (IS_ERR(leaf)) {
100 ret = PTR_ERR(leaf);
101 return ret;
102 }
103
104 btrfs_set_header_nritems(leaf, 0);
105 btrfs_set_header_level(leaf, 0);
106 btrfs_set_header_bytenr(leaf, leaf->start);
107 btrfs_set_header_generation(leaf, trans->transid);
108 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
109
110 write_extent_buffer(leaf, root->fs_info->fsid,
111 (unsigned long)btrfs_header_fsid(leaf),
112 BTRFS_FSID_SIZE);
113 btrfs_mark_buffer_dirty(leaf);
114
115 inode_item = &root_item.inode;
116 memset(inode_item, 0, sizeof(*inode_item));
117 inode_item->generation = cpu_to_le64(1);
118 inode_item->size = cpu_to_le64(3);
119 inode_item->nlink = cpu_to_le32(1);
120 inode_item->nbytes = cpu_to_le64(root->leafsize);
121 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
122
123 btrfs_set_root_bytenr(&root_item, leaf->start);
124 btrfs_set_root_generation(&root_item, trans->transid);
125 btrfs_set_root_level(&root_item, 0);
126 btrfs_set_root_refs(&root_item, 0);
127 btrfs_set_root_used(&root_item, 0);
128
129 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
130 root_item.drop_level = 0;
131
132 btrfs_tree_unlock(leaf);
133 free_extent_buffer(leaf);
134 leaf = NULL;
135
136 btrfs_set_root_dirid(&root_item, 0);
137
138 key.objectid = BTRFS_TREE_LOG_OBJECTID;
139 key.offset = objectid;
140 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
141 ret = btrfs_insert_root(trans, root->fs_info->log_root_tree, &key,
142 &root_item);
143 if (ret)
144 goto fail;
145
146 new_root = btrfs_read_fs_root_no_radix(root->fs_info->log_root_tree,
147 &key);
148 BUG_ON(!new_root);
149
150 WARN_ON(root->log_root);
151 root->log_root = new_root;
152
153 /*
154 * log trees do not get reference counted because they go away
155 * before a real commit is actually done. They do store pointers
156 * to file data extents, and those reference counts still get
157 * updated (along with back refs to the log tree).
158 */
159 new_root->ref_cows = 0;
160 new_root->last_trans = trans->transid;
161
162 /*
163 * we need to make sure the root block for this new tree
164 * is marked as dirty in the dirty_log_pages tree. This
165 * is how it gets flushed down to disk at tree log commit time.
166 *
167 * the tree logging mutex keeps others from coming in and changing
168 * the new_root->node, so we can safely access it here
169 */
170 set_extent_dirty(&new_root->dirty_log_pages, new_root->node->start,
171 new_root->node->start + new_root->node->len - 1,
172 GFP_NOFS);
173
174fail:
175 return ret;
176}
177
178/*
179 * start a sub transaction and setup the log tree 81 * start a sub transaction and setup the log tree
180 * this increments the log tree writer count to make the people 82 * this increments the log tree writer count to make the people
181 * syncing the tree wait for us to finish 83 * syncing the tree wait for us to finish
@@ -184,6 +86,14 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
184 struct btrfs_root *root) 86 struct btrfs_root *root)
185{ 87{
186 int ret; 88 int ret;
89
90 mutex_lock(&root->log_mutex);
91 if (root->log_root) {
92 root->log_batch++;
93 atomic_inc(&root->log_writers);
94 mutex_unlock(&root->log_mutex);
95 return 0;
96 }
187 mutex_lock(&root->fs_info->tree_log_mutex); 97 mutex_lock(&root->fs_info->tree_log_mutex);
188 if (!root->fs_info->log_root_tree) { 98 if (!root->fs_info->log_root_tree) {
189 ret = btrfs_init_log_root_tree(trans, root->fs_info); 99 ret = btrfs_init_log_root_tree(trans, root->fs_info);
@@ -193,9 +103,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
193 ret = btrfs_add_log_tree(trans, root); 103 ret = btrfs_add_log_tree(trans, root);
194 BUG_ON(ret); 104 BUG_ON(ret);
195 } 105 }
196 atomic_inc(&root->fs_info->tree_log_writers);
197 root->fs_info->tree_log_batch++;
198 mutex_unlock(&root->fs_info->tree_log_mutex); 106 mutex_unlock(&root->fs_info->tree_log_mutex);
107 root->log_batch++;
108 atomic_inc(&root->log_writers);
109 mutex_unlock(&root->log_mutex);
199 return 0; 110 return 0;
200} 111}
201 112
@@ -212,13 +123,12 @@ static int join_running_log_trans(struct btrfs_root *root)
212 if (!root->log_root) 123 if (!root->log_root)
213 return -ENOENT; 124 return -ENOENT;
214 125
215 mutex_lock(&root->fs_info->tree_log_mutex); 126 mutex_lock(&root->log_mutex);
216 if (root->log_root) { 127 if (root->log_root) {
217 ret = 0; 128 ret = 0;
218 atomic_inc(&root->fs_info->tree_log_writers); 129 atomic_inc(&root->log_writers);
219 root->fs_info->tree_log_batch++;
220 } 130 }
221 mutex_unlock(&root->fs_info->tree_log_mutex); 131 mutex_unlock(&root->log_mutex);
222 return ret; 132 return ret;
223} 133}
224 134
@@ -228,10 +138,11 @@ static int join_running_log_trans(struct btrfs_root *root)
228 */ 138 */
229static int end_log_trans(struct btrfs_root *root) 139static int end_log_trans(struct btrfs_root *root)
230{ 140{
231 atomic_dec(&root->fs_info->tree_log_writers); 141 if (atomic_dec_and_test(&root->log_writers)) {
232 smp_mb(); 142 smp_mb();
233 if (waitqueue_active(&root->fs_info->tree_log_wait)) 143 if (waitqueue_active(&root->log_writer_wait))
234 wake_up(&root->fs_info->tree_log_wait); 144 wake_up(&root->log_writer_wait);
145 }
235 return 0; 146 return 0;
236} 147}
237 148
@@ -1704,6 +1615,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1704 1615
1705 btrfs_tree_lock(next); 1616 btrfs_tree_lock(next);
1706 clean_tree_block(trans, root, next); 1617 clean_tree_block(trans, root, next);
1618 btrfs_set_lock_blocking(next);
1707 btrfs_wait_tree_block_writeback(next); 1619 btrfs_wait_tree_block_writeback(next);
1708 btrfs_tree_unlock(next); 1620 btrfs_tree_unlock(next);
1709 1621
@@ -1750,6 +1662,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1750 next = path->nodes[*level]; 1662 next = path->nodes[*level];
1751 btrfs_tree_lock(next); 1663 btrfs_tree_lock(next);
1752 clean_tree_block(trans, root, next); 1664 clean_tree_block(trans, root, next);
1665 btrfs_set_lock_blocking(next);
1753 btrfs_wait_tree_block_writeback(next); 1666 btrfs_wait_tree_block_writeback(next);
1754 btrfs_tree_unlock(next); 1667 btrfs_tree_unlock(next);
1755 1668
@@ -1807,6 +1720,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1807 1720
1808 btrfs_tree_lock(next); 1721 btrfs_tree_lock(next);
1809 clean_tree_block(trans, root, next); 1722 clean_tree_block(trans, root, next);
1723 btrfs_set_lock_blocking(next);
1810 btrfs_wait_tree_block_writeback(next); 1724 btrfs_wait_tree_block_writeback(next);
1811 btrfs_tree_unlock(next); 1725 btrfs_tree_unlock(next);
1812 1726
@@ -1879,6 +1793,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1879 1793
1880 btrfs_tree_lock(next); 1794 btrfs_tree_lock(next);
1881 clean_tree_block(trans, log, next); 1795 clean_tree_block(trans, log, next);
1796 btrfs_set_lock_blocking(next);
1882 btrfs_wait_tree_block_writeback(next); 1797 btrfs_wait_tree_block_writeback(next);
1883 btrfs_tree_unlock(next); 1798 btrfs_tree_unlock(next);
1884 1799
@@ -1902,26 +1817,65 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1902 } 1817 }
1903 } 1818 }
1904 btrfs_free_path(path); 1819 btrfs_free_path(path);
1905 if (wc->free)
1906 free_extent_buffer(log->node);
1907 return ret; 1820 return ret;
1908} 1821}
1909 1822
1910static int wait_log_commit(struct btrfs_root *log) 1823/*
1824 * helper function to update the item for a given subvolumes log root
1825 * in the tree of log roots
1826 */
1827static int update_log_root(struct btrfs_trans_handle *trans,
1828 struct btrfs_root *log)
1829{
1830 int ret;
1831
1832 if (log->log_transid == 1) {
1833 /* insert root item on the first sync */
1834 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
1835 &log->root_key, &log->root_item);
1836 } else {
1837 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
1838 &log->root_key, &log->root_item);
1839 }
1840 return ret;
1841}
1842
1843static int wait_log_commit(struct btrfs_root *root, unsigned long transid)
1911{ 1844{
1912 DEFINE_WAIT(wait); 1845 DEFINE_WAIT(wait);
1913 u64 transid = log->fs_info->tree_log_transid; 1846 int index = transid % 2;
1914 1847
1848 /*
1849 * we only allow two pending log transactions at a time,
1850 * so we know that if ours is more than 2 older than the
1851 * current transaction, we're done
1852 */
1915 do { 1853 do {
1916 prepare_to_wait(&log->fs_info->tree_log_wait, &wait, 1854 prepare_to_wait(&root->log_commit_wait[index],
1917 TASK_UNINTERRUPTIBLE); 1855 &wait, TASK_UNINTERRUPTIBLE);
1918 mutex_unlock(&log->fs_info->tree_log_mutex); 1856 mutex_unlock(&root->log_mutex);
1919 if (atomic_read(&log->fs_info->tree_log_commit)) 1857 if (root->log_transid < transid + 2 &&
1858 atomic_read(&root->log_commit[index]))
1920 schedule(); 1859 schedule();
1921 finish_wait(&log->fs_info->tree_log_wait, &wait); 1860 finish_wait(&root->log_commit_wait[index], &wait);
1922 mutex_lock(&log->fs_info->tree_log_mutex); 1861 mutex_lock(&root->log_mutex);
1923 } while (transid == log->fs_info->tree_log_transid && 1862 } while (root->log_transid < transid + 2 &&
1924 atomic_read(&log->fs_info->tree_log_commit)); 1863 atomic_read(&root->log_commit[index]));
1864 return 0;
1865}
1866
1867static int wait_for_writer(struct btrfs_root *root)
1868{
1869 DEFINE_WAIT(wait);
1870 while (atomic_read(&root->log_writers)) {
1871 prepare_to_wait(&root->log_writer_wait,
1872 &wait, TASK_UNINTERRUPTIBLE);
1873 mutex_unlock(&root->log_mutex);
1874 if (atomic_read(&root->log_writers))
1875 schedule();
1876 mutex_lock(&root->log_mutex);
1877 finish_wait(&root->log_writer_wait, &wait);
1878 }
1925 return 0; 1879 return 0;
1926} 1880}
1927 1881
@@ -1933,57 +1887,114 @@ static int wait_log_commit(struct btrfs_root *log)
1933int btrfs_sync_log(struct btrfs_trans_handle *trans, 1887int btrfs_sync_log(struct btrfs_trans_handle *trans,
1934 struct btrfs_root *root) 1888 struct btrfs_root *root)
1935{ 1889{
1890 int index1;
1891 int index2;
1936 int ret; 1892 int ret;
1937 unsigned long batch;
1938 struct btrfs_root *log = root->log_root; 1893 struct btrfs_root *log = root->log_root;
1894 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
1939 1895
1940 mutex_lock(&log->fs_info->tree_log_mutex); 1896 mutex_lock(&root->log_mutex);
1941 if (atomic_read(&log->fs_info->tree_log_commit)) { 1897 index1 = root->log_transid % 2;
1942 wait_log_commit(log); 1898 if (atomic_read(&root->log_commit[index1])) {
1943 goto out; 1899 wait_log_commit(root, root->log_transid);
1900 mutex_unlock(&root->log_mutex);
1901 return 0;
1944 } 1902 }
1945 atomic_set(&log->fs_info->tree_log_commit, 1); 1903 atomic_set(&root->log_commit[index1], 1);
1904
1905 /* wait for previous tree log sync to complete */
1906 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
1907 wait_log_commit(root, root->log_transid - 1);
1946 1908
1947 while (1) { 1909 while (1) {
1948 batch = log->fs_info->tree_log_batch; 1910 unsigned long batch = root->log_batch;
1949 mutex_unlock(&log->fs_info->tree_log_mutex); 1911 mutex_unlock(&root->log_mutex);
1950 schedule_timeout_uninterruptible(1); 1912 schedule_timeout_uninterruptible(1);
1951 mutex_lock(&log->fs_info->tree_log_mutex); 1913 mutex_lock(&root->log_mutex);
1952 1914 wait_for_writer(root);
1953 while (atomic_read(&log->fs_info->tree_log_writers)) { 1915 if (batch == root->log_batch)
1954 DEFINE_WAIT(wait);
1955 prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
1956 TASK_UNINTERRUPTIBLE);
1957 mutex_unlock(&log->fs_info->tree_log_mutex);
1958 if (atomic_read(&log->fs_info->tree_log_writers))
1959 schedule();
1960 mutex_lock(&log->fs_info->tree_log_mutex);
1961 finish_wait(&log->fs_info->tree_log_wait, &wait);
1962 }
1963 if (batch == log->fs_info->tree_log_batch)
1964 break; 1916 break;
1965 } 1917 }
1966 1918
1967 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages); 1919 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
1968 BUG_ON(ret); 1920 BUG_ON(ret);
1969 ret = btrfs_write_and_wait_marked_extents(root->fs_info->log_root_tree, 1921
1970 &root->fs_info->log_root_tree->dirty_log_pages); 1922 btrfs_set_root_bytenr(&log->root_item, log->node->start);
1923 btrfs_set_root_generation(&log->root_item, trans->transid);
1924 btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
1925
1926 root->log_batch = 0;
1927 root->log_transid++;
1928 log->log_transid = root->log_transid;
1929 smp_mb();
1930 /*
1931 * log tree has been flushed to disk, new modifications of
1932 * the log will be written to new positions. so it's safe to
1933 * allow log writers to go in.
1934 */
1935 mutex_unlock(&root->log_mutex);
1936
1937 mutex_lock(&log_root_tree->log_mutex);
1938 log_root_tree->log_batch++;
1939 atomic_inc(&log_root_tree->log_writers);
1940 mutex_unlock(&log_root_tree->log_mutex);
1941
1942 ret = update_log_root(trans, log);
1943 BUG_ON(ret);
1944
1945 mutex_lock(&log_root_tree->log_mutex);
1946 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
1947 smp_mb();
1948 if (waitqueue_active(&log_root_tree->log_writer_wait))
1949 wake_up(&log_root_tree->log_writer_wait);
1950 }
1951
1952 index2 = log_root_tree->log_transid % 2;
1953 if (atomic_read(&log_root_tree->log_commit[index2])) {
1954 wait_log_commit(log_root_tree, log_root_tree->log_transid);
1955 mutex_unlock(&log_root_tree->log_mutex);
1956 goto out;
1957 }
1958 atomic_set(&log_root_tree->log_commit[index2], 1);
1959
1960 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2]))
1961 wait_log_commit(log_root_tree, log_root_tree->log_transid - 1);
1962
1963 wait_for_writer(log_root_tree);
1964
1965 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
1966 &log_root_tree->dirty_log_pages);
1971 BUG_ON(ret); 1967 BUG_ON(ret);
1972 1968
1973 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 1969 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
1974 log->fs_info->log_root_tree->node->start); 1970 log_root_tree->node->start);
1975 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit, 1971 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
1976 btrfs_header_level(log->fs_info->log_root_tree->node)); 1972 btrfs_header_level(log_root_tree->node));
1973
1974 log_root_tree->log_batch = 0;
1975 log_root_tree->log_transid++;
1976 smp_mb();
1977
1978 mutex_unlock(&log_root_tree->log_mutex);
1979
1980 /*
1981 * nobody else is going to jump in and write the the ctree
1982 * super here because the log_commit atomic below is protecting
1983 * us. We must be called with a transaction handle pinning
1984 * the running transaction open, so a full commit can't hop
1985 * in and cause problems either.
1986 */
1987 write_ctree_super(trans, root->fs_info->tree_root, 2);
1977 1988
1978 write_ctree_super(trans, log->fs_info->tree_root, 2); 1989 atomic_set(&log_root_tree->log_commit[index2], 0);
1979 log->fs_info->tree_log_transid++;
1980 log->fs_info->tree_log_batch = 0;
1981 atomic_set(&log->fs_info->tree_log_commit, 0);
1982 smp_mb(); 1990 smp_mb();
1983 if (waitqueue_active(&log->fs_info->tree_log_wait)) 1991 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
1984 wake_up(&log->fs_info->tree_log_wait); 1992 wake_up(&log_root_tree->log_commit_wait[index2]);
1985out: 1993out:
1986 mutex_unlock(&log->fs_info->tree_log_mutex); 1994 atomic_set(&root->log_commit[index1], 0);
1995 smp_mb();
1996 if (waitqueue_active(&root->log_commit_wait[index1]))
1997 wake_up(&root->log_commit_wait[index1]);
1987 return 0; 1998 return 0;
1988} 1999}
1989 2000
@@ -2019,38 +2030,18 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2019 start, end, GFP_NOFS); 2030 start, end, GFP_NOFS);
2020 } 2031 }
2021 2032
2022 log = root->log_root; 2033 if (log->log_transid > 0) {
2023 ret = btrfs_del_root(trans, root->fs_info->log_root_tree, 2034 ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
2024 &log->root_key); 2035 &log->root_key);
2025 BUG_ON(ret); 2036 BUG_ON(ret);
2037 }
2026 root->log_root = NULL; 2038 root->log_root = NULL;
2027 kfree(root->log_root); 2039 free_extent_buffer(log->node);
2040 kfree(log);
2028 return 0; 2041 return 0;
2029} 2042}
2030 2043
2031/* 2044/*
2032 * helper function to update the item for a given subvolumes log root
2033 * in the tree of log roots
2034 */
2035static int update_log_root(struct btrfs_trans_handle *trans,
2036 struct btrfs_root *log)
2037{
2038 u64 bytenr = btrfs_root_bytenr(&log->root_item);
2039 int ret;
2040
2041 if (log->node->start == bytenr)
2042 return 0;
2043
2044 btrfs_set_root_bytenr(&log->root_item, log->node->start);
2045 btrfs_set_root_generation(&log->root_item, trans->transid);
2046 btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
2047 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2048 &log->root_key, &log->root_item);
2049 BUG_ON(ret);
2050 return ret;
2051}
2052
2053/*
2054 * If both a file and directory are logged, and unlinks or renames are 2045 * If both a file and directory are logged, and unlinks or renames are
2055 * mixed in, we have a few interesting corners: 2046 * mixed in, we have a few interesting corners:
2056 * 2047 *
@@ -2711,11 +2702,6 @@ next_slot:
2711 2702
2712 btrfs_free_path(path); 2703 btrfs_free_path(path);
2713 btrfs_free_path(dst_path); 2704 btrfs_free_path(dst_path);
2714
2715 mutex_lock(&root->fs_info->tree_log_mutex);
2716 ret = update_log_root(trans, log);
2717 BUG_ON(ret);
2718 mutex_unlock(&root->fs_info->tree_log_mutex);
2719out: 2705out:
2720 return 0; 2706 return 0;
2721} 2707}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3451e1cca2b5..bcd14ebccae1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -20,7 +20,6 @@
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/version.h>
24#include <asm/div64.h> 23#include <asm/div64.h>
25#include "compat.h" 24#include "compat.h"
26#include "ctree.h" 25#include "ctree.h"
@@ -104,10 +103,8 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
104 u64 devid, u8 *uuid) 103 u64 devid, u8 *uuid)
105{ 104{
106 struct btrfs_device *dev; 105 struct btrfs_device *dev;
107 struct list_head *cur;
108 106
109 list_for_each(cur, head) { 107 list_for_each_entry(dev, head, dev_list) {
110 dev = list_entry(cur, struct btrfs_device, dev_list);
111 if (dev->devid == devid && 108 if (dev->devid == devid &&
112 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { 109 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113 return dev; 110 return dev;
@@ -118,11 +115,9 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
118 115
119static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) 116static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120{ 117{
121 struct list_head *cur;
122 struct btrfs_fs_devices *fs_devices; 118 struct btrfs_fs_devices *fs_devices;
123 119
124 list_for_each(cur, &fs_uuids) { 120 list_for_each_entry(fs_devices, &fs_uuids, list) {
125 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
126 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) 121 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
127 return fs_devices; 122 return fs_devices;
128 } 123 }
@@ -159,6 +154,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
159loop: 154loop:
160 spin_lock(&device->io_lock); 155 spin_lock(&device->io_lock);
161 156
157loop_lock:
162 /* take all the bios off the list at once and process them 158 /* take all the bios off the list at once and process them
163 * later on (without the lock held). But, remember the 159 * later on (without the lock held). But, remember the
164 * tail and other pointers so the bios can be properly reinserted 160 * tail and other pointers so the bios can be properly reinserted
@@ -208,7 +204,7 @@ loop:
208 * is now congested. Back off and let other work structs 204 * is now congested. Back off and let other work structs
209 * run instead 205 * run instead
210 */ 206 */
211 if (pending && bdi_write_congested(bdi) && 207 if (pending && bdi_write_congested(bdi) && num_run > 16 &&
212 fs_info->fs_devices->open_devices > 1) { 208 fs_info->fs_devices->open_devices > 1) {
213 struct bio *old_head; 209 struct bio *old_head;
214 210
@@ -220,7 +216,8 @@ loop:
220 tail->bi_next = old_head; 216 tail->bi_next = old_head;
221 else 217 else
222 device->pending_bio_tail = tail; 218 device->pending_bio_tail = tail;
223 device->running_pending = 0; 219
220 device->running_pending = 1;
224 221
225 spin_unlock(&device->io_lock); 222 spin_unlock(&device->io_lock);
226 btrfs_requeue_work(&device->work); 223 btrfs_requeue_work(&device->work);
@@ -229,6 +226,11 @@ loop:
229 } 226 }
230 if (again) 227 if (again)
231 goto loop; 228 goto loop;
229
230 spin_lock(&device->io_lock);
231 if (device->pending_bios)
232 goto loop_lock;
233 spin_unlock(&device->io_lock);
232done: 234done:
233 return 0; 235 return 0;
234} 236}
@@ -345,14 +347,11 @@ error:
345 347
346int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 348int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
347{ 349{
348 struct list_head *tmp; 350 struct btrfs_device *device, *next;
349 struct list_head *cur;
350 struct btrfs_device *device;
351 351
352 mutex_lock(&uuid_mutex); 352 mutex_lock(&uuid_mutex);
353again: 353again:
354 list_for_each_safe(cur, tmp, &fs_devices->devices) { 354 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
355 device = list_entry(cur, struct btrfs_device, dev_list);
356 if (device->in_fs_metadata) 355 if (device->in_fs_metadata)
357 continue; 356 continue;
358 357
@@ -383,14 +382,12 @@ again:
383 382
384static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) 383static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
385{ 384{
386 struct list_head *cur;
387 struct btrfs_device *device; 385 struct btrfs_device *device;
388 386
389 if (--fs_devices->opened > 0) 387 if (--fs_devices->opened > 0)
390 return 0; 388 return 0;
391 389
392 list_for_each(cur, &fs_devices->devices) { 390 list_for_each_entry(device, &fs_devices->devices, dev_list) {
393 device = list_entry(cur, struct btrfs_device, dev_list);
394 if (device->bdev) { 391 if (device->bdev) {
395 close_bdev_exclusive(device->bdev, device->mode); 392 close_bdev_exclusive(device->bdev, device->mode);
396 fs_devices->open_devices--; 393 fs_devices->open_devices--;
@@ -439,7 +436,6 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
439{ 436{
440 struct block_device *bdev; 437 struct block_device *bdev;
441 struct list_head *head = &fs_devices->devices; 438 struct list_head *head = &fs_devices->devices;
442 struct list_head *cur;
443 struct btrfs_device *device; 439 struct btrfs_device *device;
444 struct block_device *latest_bdev = NULL; 440 struct block_device *latest_bdev = NULL;
445 struct buffer_head *bh; 441 struct buffer_head *bh;
@@ -450,8 +446,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
450 int seeding = 1; 446 int seeding = 1;
451 int ret = 0; 447 int ret = 0;
452 448
453 list_for_each(cur, head) { 449 list_for_each_entry(device, head, dev_list) {
454 device = list_entry(cur, struct btrfs_device, dev_list);
455 if (device->bdev) 450 if (device->bdev)
456 continue; 451 continue;
457 if (!device->name) 452 if (!device->name)
@@ -578,7 +573,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
578 *(unsigned long long *)disk_super->fsid, 573 *(unsigned long long *)disk_super->fsid,
579 *(unsigned long long *)(disk_super->fsid + 8)); 574 *(unsigned long long *)(disk_super->fsid + 8));
580 } 575 }
581 printk(KERN_INFO "devid %llu transid %llu %s\n", 576 printk(KERN_CONT "devid %llu transid %llu %s\n",
582 (unsigned long long)devid, (unsigned long long)transid, path); 577 (unsigned long long)devid, (unsigned long long)transid, path);
583 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 578 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
584 579
@@ -1017,14 +1012,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1017 } 1012 }
1018 1013
1019 if (strcmp(device_path, "missing") == 0) { 1014 if (strcmp(device_path, "missing") == 0) {
1020 struct list_head *cur;
1021 struct list_head *devices; 1015 struct list_head *devices;
1022 struct btrfs_device *tmp; 1016 struct btrfs_device *tmp;
1023 1017
1024 device = NULL; 1018 device = NULL;
1025 devices = &root->fs_info->fs_devices->devices; 1019 devices = &root->fs_info->fs_devices->devices;
1026 list_for_each(cur, devices) { 1020 list_for_each_entry(tmp, devices, dev_list) {
1027 tmp = list_entry(cur, struct btrfs_device, dev_list);
1028 if (tmp->in_fs_metadata && !tmp->bdev) { 1021 if (tmp->in_fs_metadata && !tmp->bdev) {
1029 device = tmp; 1022 device = tmp;
1030 break; 1023 break;
@@ -1280,7 +1273,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1280 struct btrfs_trans_handle *trans; 1273 struct btrfs_trans_handle *trans;
1281 struct btrfs_device *device; 1274 struct btrfs_device *device;
1282 struct block_device *bdev; 1275 struct block_device *bdev;
1283 struct list_head *cur;
1284 struct list_head *devices; 1276 struct list_head *devices;
1285 struct super_block *sb = root->fs_info->sb; 1277 struct super_block *sb = root->fs_info->sb;
1286 u64 total_bytes; 1278 u64 total_bytes;
@@ -1304,8 +1296,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1304 mutex_lock(&root->fs_info->volume_mutex); 1296 mutex_lock(&root->fs_info->volume_mutex);
1305 1297
1306 devices = &root->fs_info->fs_devices->devices; 1298 devices = &root->fs_info->fs_devices->devices;
1307 list_for_each(cur, devices) { 1299 list_for_each_entry(device, devices, dev_list) {
1308 device = list_entry(cur, struct btrfs_device, dev_list);
1309 if (device->bdev == bdev) { 1300 if (device->bdev == bdev) {
1310 ret = -EEXIST; 1301 ret = -EEXIST;
1311 goto error; 1302 goto error;
@@ -1704,7 +1695,6 @@ static u64 div_factor(u64 num, int factor)
1704int btrfs_balance(struct btrfs_root *dev_root) 1695int btrfs_balance(struct btrfs_root *dev_root)
1705{ 1696{
1706 int ret; 1697 int ret;
1707 struct list_head *cur;
1708 struct list_head *devices = &dev_root->fs_info->fs_devices->devices; 1698 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1709 struct btrfs_device *device; 1699 struct btrfs_device *device;
1710 u64 old_size; 1700 u64 old_size;
@@ -1723,8 +1713,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
1723 dev_root = dev_root->fs_info->dev_root; 1713 dev_root = dev_root->fs_info->dev_root;
1724 1714
1725 /* step one make some room on all the devices */ 1715 /* step one make some room on all the devices */
1726 list_for_each(cur, devices) { 1716 list_for_each_entry(device, devices, dev_list) {
1727 device = list_entry(cur, struct btrfs_device, dev_list);
1728 old_size = device->total_bytes; 1717 old_size = device->total_bytes;
1729 size_to_free = div_factor(old_size, 1); 1718 size_to_free = div_factor(old_size, 1);
1730 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 1719 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 7f332e270894..a9d3bf4d2689 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/rwsem.h> 22#include <linux/rwsem.h>
23#include <linux/xattr.h> 23#include <linux/xattr.h>
24#include <linux/security.h>
24#include "ctree.h" 25#include "ctree.h"
25#include "btrfs_inode.h" 26#include "btrfs_inode.h"
26#include "transaction.h" 27#include "transaction.h"
@@ -45,9 +46,12 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
45 /* lookup the xattr by name */ 46 /* lookup the xattr by name */
46 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, 47 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
47 strlen(name), 0); 48 strlen(name), 0);
48 if (!di || IS_ERR(di)) { 49 if (!di) {
49 ret = -ENODATA; 50 ret = -ENODATA;
50 goto out; 51 goto out;
52 } else if (IS_ERR(di)) {
53 ret = PTR_ERR(di);
54 goto out;
51 } 55 }
52 56
53 leaf = path->nodes[0]; 57 leaf = path->nodes[0];
@@ -62,6 +66,14 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
62 ret = -ERANGE; 66 ret = -ERANGE;
63 goto out; 67 goto out;
64 } 68 }
69
70 /*
71 * The way things are packed into the leaf is like this
72 * |struct btrfs_dir_item|name|data|
73 * where name is the xattr name, so security.foo, and data is the
74 * content of the xattr. data_ptr points to the location in memory
75 * where the data starts in the in memory leaf
76 */
65 data_ptr = (unsigned long)((char *)(di + 1) + 77 data_ptr = (unsigned long)((char *)(di + 1) +
66 btrfs_dir_name_len(leaf, di)); 78 btrfs_dir_name_len(leaf, di));
67 read_extent_buffer(leaf, buffer, data_ptr, 79 read_extent_buffer(leaf, buffer, data_ptr,
@@ -86,7 +98,7 @@ int __btrfs_setxattr(struct inode *inode, const char *name,
86 if (!path) 98 if (!path)
87 return -ENOMEM; 99 return -ENOMEM;
88 100
89 trans = btrfs_start_transaction(root, 1); 101 trans = btrfs_join_transaction(root, 1);
90 btrfs_set_trans_block_group(trans, inode); 102 btrfs_set_trans_block_group(trans, inode);
91 103
92 /* first lets see if we already have this xattr */ 104 /* first lets see if we already have this xattr */
@@ -176,7 +188,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 188 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
177 if (ret < 0) 189 if (ret < 0)
178 goto err; 190 goto err;
179 ret = 0;
180 advance = 0; 191 advance = 0;
181 while (1) { 192 while (1) {
182 leaf = path->nodes[0]; 193 leaf = path->nodes[0];
@@ -320,3 +331,34 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
320 return -EOPNOTSUPP; 331 return -EOPNOTSUPP;
321 return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 332 return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
322} 333}
334
335int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
336{
337 int err;
338 size_t len;
339 void *value;
340 char *suffix;
341 char *name;
342
343 err = security_inode_init_security(inode, dir, &suffix, &value, &len);
344 if (err) {
345 if (err == -EOPNOTSUPP)
346 return 0;
347 return err;
348 }
349
350 name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
351 GFP_NOFS);
352 if (!name) {
353 err = -ENOMEM;
354 } else {
355 strcpy(name, XATTR_SECURITY_PREFIX);
356 strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
357 err = __btrfs_setxattr(inode, name, value, len, 0);
358 kfree(name);
359 }
360
361 kfree(suffix);
362 kfree(value);
363 return err;
364}
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 5b1d08f8e68d..c71e9c3cf3f7 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -36,4 +36,6 @@ extern int btrfs_setxattr(struct dentry *dentry, const char *name,
36 const void *value, size_t size, int flags); 36 const void *value, size_t size, int flags);
37extern int btrfs_removexattr(struct dentry *dentry, const char *name); 37extern int btrfs_removexattr(struct dentry *dentry, const char *name);
38 38
39extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir);
40
39#endif /* __XATTR__ */ 41#endif /* __XATTR__ */
diff --git a/fs/buffer.c b/fs/buffer.c
index b58208f1640a..665d446b25bc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2688,7 +2688,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
2688 struct buffer_head *bh; 2688 struct buffer_head *bh;
2689 BUG_ON(fsdata != NULL && page_has_buffers(page)); 2689 BUG_ON(fsdata != NULL && page_has_buffers(page));
2690 2690
2691 if (unlikely(copied < len) && !page_has_buffers(page)) 2691 if (unlikely(copied < len) && head)
2692 attach_nobh_buffers(page, head); 2692 attach_nobh_buffers(page, head);
2693 if (page_has_buffers(page)) 2693 if (page_has_buffers(page))
2694 return generic_write_end(file, mapping, pos, len, 2694 return generic_write_end(file, mapping, pos, len,
diff --git a/fs/compat.c b/fs/compat.c
index 65a070e705ab..d0145ca27572 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1407,7 +1407,7 @@ int compat_do_execve(char * filename,
1407 bprm->cred = prepare_exec_creds(); 1407 bprm->cred = prepare_exec_creds();
1408 if (!bprm->cred) 1408 if (!bprm->cred)
1409 goto out_unlock; 1409 goto out_unlock;
1410 check_unsafe_exec(bprm); 1410 check_unsafe_exec(bprm, current->files);
1411 1411
1412 file = open_exec(filename); 1412 file = open_exec(filename);
1413 retval = PTR_ERR(file); 1413 retval = PTR_ERR(file);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c8f8d5904f5e..9c6d815dd191 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -785,7 +785,7 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
785 785
786 if (copy_in_user(&sgio->status, &sgio32->status, 786 if (copy_in_user(&sgio->status, &sgio32->status,
787 (4 * sizeof(unsigned char)) + 787 (4 * sizeof(unsigned char)) +
788 (2 * sizeof(unsigned (short))) + 788 (2 * sizeof(unsigned short)) +
789 (3 * sizeof(int)))) 789 (3 * sizeof(int))))
790 return -EFAULT; 790 return -EFAULT;
791 791
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 9c2358391147..8e93341f3e82 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -553,24 +553,12 @@ static void detach_groups(struct config_group *group)
553 553
554 child = sd->s_dentry; 554 child = sd->s_dentry;
555 555
556 /*
557 * Note: we hide this from lockdep since we have no way
558 * to teach lockdep about recursive
559 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
560 * in an inode tree, which are valid as soon as
561 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
562 * parent inode to one of its children.
563 */
564 lockdep_off();
565 mutex_lock(&child->d_inode->i_mutex); 556 mutex_lock(&child->d_inode->i_mutex);
566 lockdep_on();
567 557
568 configfs_detach_group(sd->s_element); 558 configfs_detach_group(sd->s_element);
569 child->d_inode->i_flags |= S_DEAD; 559 child->d_inode->i_flags |= S_DEAD;
570 560
571 lockdep_off();
572 mutex_unlock(&child->d_inode->i_mutex); 561 mutex_unlock(&child->d_inode->i_mutex);
573 lockdep_on();
574 562
575 d_delete(child); 563 d_delete(child);
576 dput(child); 564 dput(child);
@@ -760,22 +748,11 @@ static int configfs_attach_item(struct config_item *parent_item,
760 * We are going to remove an inode and its dentry but 748 * We are going to remove an inode and its dentry but
761 * the VFS may already have hit and used them. Thus, 749 * the VFS may already have hit and used them. Thus,
762 * we must lock them as rmdir() would. 750 * we must lock them as rmdir() would.
763 *
764 * Note: we hide this from lockdep since we have no way
765 * to teach lockdep about recursive
766 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
767 * in an inode tree, which are valid as soon as
768 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
769 * parent inode to one of its children.
770 */ 751 */
771 lockdep_off();
772 mutex_lock(&dentry->d_inode->i_mutex); 752 mutex_lock(&dentry->d_inode->i_mutex);
773 lockdep_on();
774 configfs_remove_dir(item); 753 configfs_remove_dir(item);
775 dentry->d_inode->i_flags |= S_DEAD; 754 dentry->d_inode->i_flags |= S_DEAD;
776 lockdep_off();
777 mutex_unlock(&dentry->d_inode->i_mutex); 755 mutex_unlock(&dentry->d_inode->i_mutex);
778 lockdep_on();
779 d_delete(dentry); 756 d_delete(dentry);
780 } 757 }
781 } 758 }
@@ -810,25 +787,14 @@ static int configfs_attach_group(struct config_item *parent_item,
810 * 787 *
811 * We must also lock the inode to remove it safely in case of 788 * We must also lock the inode to remove it safely in case of
812 * error, as rmdir() would. 789 * error, as rmdir() would.
813 *
814 * Note: we hide this from lockdep since we have no way
815 * to teach lockdep about recursive
816 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
817 * in an inode tree, which are valid as soon as
818 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
819 * parent inode to one of its children.
820 */ 790 */
821 lockdep_off();
822 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); 791 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
823 lockdep_on();
824 ret = populate_groups(to_config_group(item)); 792 ret = populate_groups(to_config_group(item));
825 if (ret) { 793 if (ret) {
826 configfs_detach_item(item); 794 configfs_detach_item(item);
827 dentry->d_inode->i_flags |= S_DEAD; 795 dentry->d_inode->i_flags |= S_DEAD;
828 } 796 }
829 lockdep_off();
830 mutex_unlock(&dentry->d_inode->i_mutex); 797 mutex_unlock(&dentry->d_inode->i_mutex);
831 lockdep_on();
832 if (ret) 798 if (ret)
833 d_delete(dentry); 799 d_delete(dentry);
834 } 800 }
@@ -990,17 +956,7 @@ static int configfs_depend_prep(struct dentry *origin,
990 BUG_ON(!origin || !sd); 956 BUG_ON(!origin || !sd);
991 957
992 /* Lock this guy on the way down */ 958 /* Lock this guy on the way down */
993 /*
994 * Note: we hide this from lockdep since we have no way
995 * to teach lockdep about recursive
996 * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
997 * in an inode tree, which are valid as soon as
998 * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
999 * parent inode to one of its children.
1000 */
1001 lockdep_off();
1002 mutex_lock(&sd->s_dentry->d_inode->i_mutex); 959 mutex_lock(&sd->s_dentry->d_inode->i_mutex);
1003 lockdep_on();
1004 if (sd->s_element == target) /* Boo-yah */ 960 if (sd->s_element == target) /* Boo-yah */
1005 goto out; 961 goto out;
1006 962
@@ -1014,9 +970,7 @@ static int configfs_depend_prep(struct dentry *origin,
1014 } 970 }
1015 971
1016 /* We looped all our children and didn't find target */ 972 /* We looped all our children and didn't find target */
1017 lockdep_off();
1018 mutex_unlock(&sd->s_dentry->d_inode->i_mutex); 973 mutex_unlock(&sd->s_dentry->d_inode->i_mutex);
1019 lockdep_on();
1020 ret = -ENOENT; 974 ret = -ENOENT;
1021 975
1022out: 976out:
@@ -1036,16 +990,11 @@ static void configfs_depend_rollback(struct dentry *origin,
1036 struct dentry *dentry = item->ci_dentry; 990 struct dentry *dentry = item->ci_dentry;
1037 991
1038 while (dentry != origin) { 992 while (dentry != origin) {
1039 /* See comments in configfs_depend_prep() */
1040 lockdep_off();
1041 mutex_unlock(&dentry->d_inode->i_mutex); 993 mutex_unlock(&dentry->d_inode->i_mutex);
1042 lockdep_on();
1043 dentry = dentry->d_parent; 994 dentry = dentry->d_parent;
1044 } 995 }
1045 996
1046 lockdep_off();
1047 mutex_unlock(&origin->d_inode->i_mutex); 997 mutex_unlock(&origin->d_inode->i_mutex);
1048 lockdep_on();
1049} 998}
1050 999
1051int configfs_depend_item(struct configfs_subsystem *subsys, 1000int configfs_depend_item(struct configfs_subsystem *subsys,
@@ -1380,16 +1329,8 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1380 } 1329 }
1381 1330
1382 /* Wait until the racing operation terminates */ 1331 /* Wait until the racing operation terminates */
1383 /*
1384 * Note: we hide this from lockdep since we are locked
1385 * with subclass I_MUTEX_NORMAL from vfs_rmdir() (why
1386 * not I_MUTEX_CHILD?), and I_MUTEX_XATTR or
1387 * I_MUTEX_QUOTA are not relevant for the locked inode.
1388 */
1389 lockdep_off();
1390 mutex_lock(wait_mutex); 1332 mutex_lock(wait_mutex);
1391 mutex_unlock(wait_mutex); 1333 mutex_unlock(wait_mutex);
1392 lockdep_on();
1393 } 1334 }
1394 } while (ret == -EAGAIN); 1335 } while (ret == -EAGAIN);
1395 1336
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c01e043670e2..f6caeb1d1106 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1716,7 +1716,7 @@ static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size,
1716{ 1716{
1717 int rc = 0; 1717 int rc = 0;
1718 1718
1719 (*copied_name) = kmalloc((name_size + 2), GFP_KERNEL); 1719 (*copied_name) = kmalloc((name_size + 1), GFP_KERNEL);
1720 if (!(*copied_name)) { 1720 if (!(*copied_name)) {
1721 rc = -ENOMEM; 1721 rc = -ENOMEM;
1722 goto out; 1722 goto out;
@@ -1726,7 +1726,7 @@ static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size,
1726 * in printing out the 1726 * in printing out the
1727 * string in debug 1727 * string in debug
1728 * messages */ 1728 * messages */
1729 (*copied_name_size) = (name_size + 1); 1729 (*copied_name_size) = name_size;
1730out: 1730out:
1731 return rc; 1731 return rc;
1732} 1732}
diff --git a/fs/exec.c b/fs/exec.c
index 0dd60a01f1b4..929b58004b7e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1049,16 +1049,32 @@ EXPORT_SYMBOL(install_exec_creds);
1049 * - the caller must hold current->cred_exec_mutex to protect against 1049 * - the caller must hold current->cred_exec_mutex to protect against
1050 * PTRACE_ATTACH 1050 * PTRACE_ATTACH
1051 */ 1051 */
1052void check_unsafe_exec(struct linux_binprm *bprm) 1052void check_unsafe_exec(struct linux_binprm *bprm, struct files_struct *files)
1053{ 1053{
1054 struct task_struct *p = current; 1054 struct task_struct *p = current, *t;
1055 unsigned long flags;
1056 unsigned n_fs, n_files, n_sighand;
1055 1057
1056 bprm->unsafe = tracehook_unsafe_exec(p); 1058 bprm->unsafe = tracehook_unsafe_exec(p);
1057 1059
1058 if (atomic_read(&p->fs->count) > 1 || 1060 n_fs = 1;
1059 atomic_read(&p->files->count) > 1 || 1061 n_files = 1;
1060 atomic_read(&p->sighand->count) > 1) 1062 n_sighand = 1;
1063 lock_task_sighand(p, &flags);
1064 for (t = next_thread(p); t != p; t = next_thread(t)) {
1065 if (t->fs == p->fs)
1066 n_fs++;
1067 if (t->files == files)
1068 n_files++;
1069 n_sighand++;
1070 }
1071
1072 if (atomic_read(&p->fs->count) > n_fs ||
1073 atomic_read(&p->files->count) > n_files ||
1074 atomic_read(&p->sighand->count) > n_sighand)
1061 bprm->unsafe |= LSM_UNSAFE_SHARE; 1075 bprm->unsafe |= LSM_UNSAFE_SHARE;
1076
1077 unlock_task_sighand(p, &flags);
1062} 1078}
1063 1079
1064/* 1080/*
@@ -1273,7 +1289,7 @@ int do_execve(char * filename,
1273 bprm->cred = prepare_exec_creds(); 1289 bprm->cred = prepare_exec_creds();
1274 if (!bprm->cred) 1290 if (!bprm->cred)
1275 goto out_unlock; 1291 goto out_unlock;
1276 check_unsafe_exec(bprm); 1292 check_unsafe_exec(bprm, displaced);
1277 1293
1278 file = open_exec(filename); 1294 file = open_exec(filename);
1279 retval = PTR_ERR(file); 1295 retval = PTR_ERR(file);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6903d37af037..9b800d97a687 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -108,7 +108,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
108 108
109 if (hugetlb_reserve_pages(inode, 109 if (hugetlb_reserve_pages(inode,
110 vma->vm_pgoff >> huge_page_order(h), 110 vma->vm_pgoff >> huge_page_order(h),
111 len >> huge_page_shift(h), vma)) 111 len >> huge_page_shift(h), vma,
112 vma->vm_flags))
112 goto out; 113 goto out;
113 114
114 ret = 0; 115 ret = 0;
@@ -947,7 +948,7 @@ static int can_do_hugetlb_shm(void)
947 can_do_mlock()); 948 can_do_mlock());
948} 949}
949 950
950struct file *hugetlb_file_setup(const char *name, size_t size) 951struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag)
951{ 952{
952 int error = -ENOMEM; 953 int error = -ENOMEM;
953 struct file *file; 954 struct file *file;
@@ -981,7 +982,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size)
981 982
982 error = -ENOMEM; 983 error = -ENOMEM;
983 if (hugetlb_reserve_pages(inode, 0, 984 if (hugetlb_reserve_pages(inode, 0,
984 size >> huge_page_shift(hstate_inode(inode)), NULL)) 985 size >> huge_page_shift(hstate_inode(inode)), NULL,
986 acctflag))
985 goto out_inode; 987 goto out_inode;
986 988
987 d_instantiate(dentry, inode); 989 d_instantiate(dentry, inode);
diff --git a/fs/internal.h b/fs/internal.h
index 53af885f1732..0d8ac497b3d5 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -43,7 +43,7 @@ extern void __init chrdev_init(void);
43/* 43/*
44 * exec.c 44 * exec.c
45 */ 45 */
46extern void check_unsafe_exec(struct linux_binprm *); 46extern void check_unsafe_exec(struct linux_binprm *, struct files_struct *);
47 47
48/* 48/*
49 * namespace.c 49 * namespace.c
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 6063a8e4b9f3..763b78a6e9de 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -427,7 +427,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
427 goto out; 427 goto out;
428 case -EAGAIN: 428 case -EAGAIN:
429 ret = nlm_lck_denied; 429 ret = nlm_lck_denied;
430 goto out; 430 break;
431 case FILE_LOCK_DEFERRED: 431 case FILE_LOCK_DEFERRED:
432 if (wait) 432 if (wait)
433 break; 433 break;
@@ -443,6 +443,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
443 goto out; 443 goto out;
444 } 444 }
445 445
446 ret = nlm_lck_denied;
447 if (!wait)
448 goto out;
449
446 ret = nlm_lck_blocked; 450 ret = nlm_lck_blocked;
447 451
448 /* Append to list of blocked */ 452 /* Append to list of blocked */
diff --git a/fs/seq_file.c b/fs/seq_file.c
index b569ff1c4dc8..5267098532bf 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -54,6 +54,64 @@ int seq_open(struct file *file, const struct seq_operations *op)
54} 54}
55EXPORT_SYMBOL(seq_open); 55EXPORT_SYMBOL(seq_open);
56 56
57static int traverse(struct seq_file *m, loff_t offset)
58{
59 loff_t pos = 0, index;
60 int error = 0;
61 void *p;
62
63 m->version = 0;
64 index = 0;
65 m->count = m->from = 0;
66 if (!offset) {
67 m->index = index;
68 return 0;
69 }
70 if (!m->buf) {
71 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
72 if (!m->buf)
73 return -ENOMEM;
74 }
75 p = m->op->start(m, &index);
76 while (p) {
77 error = PTR_ERR(p);
78 if (IS_ERR(p))
79 break;
80 error = m->op->show(m, p);
81 if (error < 0)
82 break;
83 if (unlikely(error)) {
84 error = 0;
85 m->count = 0;
86 }
87 if (m->count == m->size)
88 goto Eoverflow;
89 if (pos + m->count > offset) {
90 m->from = offset - pos;
91 m->count -= m->from;
92 m->index = index;
93 break;
94 }
95 pos += m->count;
96 m->count = 0;
97 if (pos == offset) {
98 index++;
99 m->index = index;
100 break;
101 }
102 p = m->op->next(m, p, &index);
103 }
104 m->op->stop(m, p);
105 m->index = index;
106 return error;
107
108Eoverflow:
109 m->op->stop(m, p);
110 kfree(m->buf);
111 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
112 return !m->buf ? -ENOMEM : -EAGAIN;
113}
114
57/** 115/**
58 * seq_read - ->read() method for sequential files. 116 * seq_read - ->read() method for sequential files.
59 * @file: the file to read from 117 * @file: the file to read from
@@ -186,63 +244,6 @@ Efault:
186} 244}
187EXPORT_SYMBOL(seq_read); 245EXPORT_SYMBOL(seq_read);
188 246
189static int traverse(struct seq_file *m, loff_t offset)
190{
191 loff_t pos = 0, index;
192 int error = 0;
193 void *p;
194
195 m->version = 0;
196 index = 0;
197 m->count = m->from = 0;
198 if (!offset) {
199 m->index = index;
200 return 0;
201 }
202 if (!m->buf) {
203 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
204 if (!m->buf)
205 return -ENOMEM;
206 }
207 p = m->op->start(m, &index);
208 while (p) {
209 error = PTR_ERR(p);
210 if (IS_ERR(p))
211 break;
212 error = m->op->show(m, p);
213 if (error < 0)
214 break;
215 if (unlikely(error)) {
216 error = 0;
217 m->count = 0;
218 }
219 if (m->count == m->size)
220 goto Eoverflow;
221 if (pos + m->count > offset) {
222 m->from = offset - pos;
223 m->count -= m->from;
224 m->index = index;
225 break;
226 }
227 pos += m->count;
228 m->count = 0;
229 if (pos == offset) {
230 index++;
231 m->index = index;
232 break;
233 }
234 p = m->op->next(m, p, &index);
235 }
236 m->op->stop(m, p);
237 return error;
238
239Eoverflow:
240 m->op->stop(m, p);
241 kfree(m->buf);
242 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
243 return !m->buf ? -ENOMEM : -EAGAIN;
244}
245
246/** 247/**
247 * seq_lseek - ->llseek() method for sequential files. 248 * seq_lseek - ->llseek() method for sequential files.
248 * @file: the file in question 249 * @file: the file in question
diff --git a/fs/super.c b/fs/super.c
index 645e5403f2a0..61dce001dd57 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -301,7 +301,7 @@ void generic_shutdown_super(struct super_block *sb)
301 /* 301 /*
302 * wait for asynchronous fs operations to finish before going further 302 * wait for asynchronous fs operations to finish before going further
303 */ 303 */
304 async_synchronize_full_special(&sb->s_async_list); 304 async_synchronize_full_domain(&sb->s_async_list);
305 305
306 /* bad name - it should be evict_inodes() */ 306 /* bad name - it should be evict_inodes() */
307 invalidate_inodes(sb); 307 invalidate_inodes(sb);
@@ -470,7 +470,7 @@ restart:
470 sb->s_count++; 470 sb->s_count++;
471 spin_unlock(&sb_lock); 471 spin_unlock(&sb_lock);
472 down_read(&sb->s_umount); 472 down_read(&sb->s_umount);
473 async_synchronize_full_special(&sb->s_async_list); 473 async_synchronize_full_domain(&sb->s_async_list);
474 if (sb->s_root && (wait || sb->s_dirt)) 474 if (sb->s_root && (wait || sb->s_dirt))
475 sb->s_op->sync_fs(sb, wait); 475 sb->s_op->sync_fs(sb, wait);
476 up_read(&sb->s_umount); 476 up_read(&sb->s_umount);
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
index e72bfdd887f9..552637b0d051 100644
--- a/include/acpi/pdc_intel.h
+++ b/include/acpi/pdc_intel.h
@@ -14,6 +14,7 @@
14#define ACPI_PDC_SMP_T_SWCOORD (0x0080) 14#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
15#define ACPI_PDC_C_C1_FFH (0x0100) 15#define ACPI_PDC_C_C1_FFH (0x0100)
16#define ACPI_PDC_C_C2C3_FFH (0x0200) 16#define ACPI_PDC_C_C2C3_FFH (0x0200)
17#define ACPI_PDC_SMP_P_HWCOORD (0x0800)
17 18
18#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ 19#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
19 ACPI_PDC_C_C1_HALT | \ 20 ACPI_PDC_C_C1_HALT | \
@@ -22,6 +23,7 @@
22#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ 23#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
23 ACPI_PDC_C_C1_HALT | \ 24 ACPI_PDC_C_C1_HALT | \
24 ACPI_PDC_SMP_P_SWCOORD | \ 25 ACPI_PDC_SMP_P_SWCOORD | \
26 ACPI_PDC_SMP_P_HWCOORD | \
25 ACPI_PDC_P_FFH) 27 ACPI_PDC_P_FFH)
26 28
27#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ 29#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index cd16d6e668ce..d797e119e3d5 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -222,7 +222,7 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
222 222
223static inline void crypto_free_shash(struct crypto_shash *tfm) 223static inline void crypto_free_shash(struct crypto_shash *tfm)
224{ 224{
225 crypto_free_tfm(crypto_shash_tfm(tfm)); 225 crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
226} 226}
227 227
228static inline unsigned int crypto_shash_alignmask( 228static inline unsigned int crypto_shash_alignmask(
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b3bcf72dc656..912cd52db965 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -261,6 +261,7 @@ typedef struct drm_i915_irq_wait {
261#define I915_PARAM_LAST_DISPATCH 3 261#define I915_PARAM_LAST_DISPATCH 3
262#define I915_PARAM_CHIPSET_ID 4 262#define I915_PARAM_CHIPSET_ID 4
263#define I915_PARAM_HAS_GEM 5 263#define I915_PARAM_HAS_GEM 5
264#define I915_PARAM_NUM_FENCES_AVAIL 6
264 265
265typedef struct drm_i915_getparam { 266typedef struct drm_i915_getparam {
266 int param; 267 int param;
@@ -272,6 +273,7 @@ typedef struct drm_i915_getparam {
272#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 273#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
273#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 274#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
274#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 275#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
276#define I915_SETPARAM_NUM_USED_FENCES 4
275 277
276typedef struct drm_i915_setparam { 278typedef struct drm_i915_setparam {
277 int param; 279 int param;
diff --git a/include/linux/async.h b/include/linux/async.h
index c4ecacd0b327..68a9530196f2 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -17,9 +17,11 @@ typedef u64 async_cookie_t;
17typedef void (async_func_ptr) (void *data, async_cookie_t cookie); 17typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
18 18
19extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); 19extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
20extern async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *list); 20extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
21 struct list_head *list);
21extern void async_synchronize_full(void); 22extern void async_synchronize_full(void);
22extern void async_synchronize_full_special(struct list_head *list); 23extern void async_synchronize_full_domain(struct list_head *list);
23extern void async_synchronize_cookie(async_cookie_t cookie); 24extern void async_synchronize_cookie(async_cookie_t cookie);
24extern void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *list); 25extern void async_synchronize_cookie_domain(async_cookie_t cookie,
26 struct list_head *list);
25 27
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3bacd71509fb..1f2e9020acc6 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -552,7 +552,12 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
552 const struct crypto_type *frontend, 552 const struct crypto_type *frontend,
553 u32 type, u32 mask); 553 u32 type, u32 mask);
554struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 554struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
555void crypto_free_tfm(struct crypto_tfm *tfm); 555void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
556
557static inline void crypto_free_tfm(struct crypto_tfm *tfm)
558{
559 return crypto_destroy_tfm(tfm, tfm);
560}
556 561
557int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 562int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
558 563
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3e0f64c335c8..3e68469c1885 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -282,6 +282,18 @@ static inline void dmaengine_put(void)
282} 282}
283#endif 283#endif
284 284
285#ifdef CONFIG_NET_DMA
286#define net_dmaengine_get() dmaengine_get()
287#define net_dmaengine_put() dmaengine_put()
288#else
289static inline void net_dmaengine_get(void)
290{
291}
292static inline void net_dmaengine_put(void)
293{
294}
295#endif
296
285dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 297dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
286 void *dest, void *src, size_t len); 298 void *dest, void *src, size_t len);
287dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 299dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 818fe21257e8..31527e17076b 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -960,6 +960,21 @@ extern struct fb_info *registered_fb[FB_MAX];
960extern int num_registered_fb; 960extern int num_registered_fb;
961extern struct class *fb_class; 961extern struct class *fb_class;
962 962
963static inline int lock_fb_info(struct fb_info *info)
964{
965 mutex_lock(&info->lock);
966 if (!info->fbops) {
967 mutex_unlock(&info->lock);
968 return 0;
969 }
970 return 1;
971}
972
973static inline void unlock_fb_info(struct fb_info *info)
974{
975 mutex_unlock(&info->lock);
976}
977
963static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, 978static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
964 u8 *src, u32 s_pitch, u32 height) 979 u8 *src, u32 s_pitch, u32 height)
965{ 980{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f1d2fba19ea0..af09660001c7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,7 +33,8 @@ unsigned long hugetlb_total_pages(void);
33int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 33int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
34 unsigned long address, int write_access); 34 unsigned long address, int write_access);
35int hugetlb_reserve_pages(struct inode *inode, long from, long to, 35int hugetlb_reserve_pages(struct inode *inode, long from, long to,
36 struct vm_area_struct *vma); 36 struct vm_area_struct *vma,
37 int acctflags);
37void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 38void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
38 39
39extern unsigned long hugepages_treat_as_movable; 40extern unsigned long hugepages_treat_as_movable;
@@ -138,7 +139,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
138 139
139extern const struct file_operations hugetlbfs_file_operations; 140extern const struct file_operations hugetlbfs_file_operations;
140extern struct vm_operations_struct hugetlb_vm_ops; 141extern struct vm_operations_struct hugetlb_vm_ops;
141struct file *hugetlb_file_setup(const char *name, size_t); 142struct file *hugetlb_file_setup(const char *name, size_t, int);
142int hugetlb_get_quota(struct address_space *mapping, long delta); 143int hugetlb_get_quota(struct address_space *mapping, long delta);
143void hugetlb_put_quota(struct address_space *mapping, long delta); 144void hugetlb_put_quota(struct address_space *mapping, long delta);
144 145
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 343df9ef2412..7fa371898e3e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -480,7 +480,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
480/* 480/*
481 * swap - swap value of @a and @b 481 * swap - swap value of @a and @b
482 */ 482 */
483#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }) 483#define swap(a, b) \
484 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
484 485
485/** 486/**
486 * container_of - cast a member of a structure out to the containing structure 487 * container_of - cast a member of a structure out to the containing structure
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e8ddc98b8405..323561582c10 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1129,8 +1129,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1129 unsigned long flag, unsigned long pgoff); 1129 unsigned long flag, unsigned long pgoff);
1130extern unsigned long mmap_region(struct file *file, unsigned long addr, 1130extern unsigned long mmap_region(struct file *file, unsigned long addr,
1131 unsigned long len, unsigned long flags, 1131 unsigned long len, unsigned long flags,
1132 unsigned int vm_flags, unsigned long pgoff, 1132 unsigned int vm_flags, unsigned long pgoff);
1133 int accountable);
1134 1133
1135static inline unsigned long do_mmap(struct file *file, unsigned long addr, 1134static inline unsigned long do_mmap(struct file *file, unsigned long addr,
1136 unsigned long len, unsigned long prot, 1135 unsigned long len, unsigned long prot,
diff --git a/include/linux/module.h b/include/linux/module.h
index f3b8329eb5b8..145a75528cc1 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -407,7 +407,6 @@ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
407static inline void __module_get(struct module *module) 407static inline void __module_get(struct module *module)
408{ 408{
409 if (module) { 409 if (module) {
410 BUG_ON(module_refcount(module) == 0);
411 local_inc(__module_ref_addr(module, get_cpu())); 410 local_inc(__module_ref_addr(module, get_cpu()));
412 put_cpu(); 411 put_cpu();
413 } 412 }
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 48890cf3f96e..7bd624bfdcfd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -684,7 +684,7 @@ int pci_enable_rom(struct pci_dev *pdev);
684void pci_disable_rom(struct pci_dev *pdev); 684void pci_disable_rom(struct pci_dev *pdev);
685void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 685void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
686void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 686void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
687size_t pci_get_rom_size(void __iomem *rom, size_t size); 687size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
688 688
689/* Power management related routines */ 689/* Power management related routines */
690int pci_save_state(struct pci_dev *dev); 690int pci_save_state(struct pci_dev *dev);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a7c76388731..2127e959e0f4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -443,6 +443,7 @@ struct pacct_struct {
443 * @utime: time spent in user mode, in &cputime_t units 443 * @utime: time spent in user mode, in &cputime_t units
444 * @stime: time spent in kernel mode, in &cputime_t units 444 * @stime: time spent in kernel mode, in &cputime_t units
445 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 445 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
446 * @lock: lock for fields in this struct
446 * 447 *
447 * This structure groups together three kinds of CPU time that are 448 * This structure groups together three kinds of CPU time that are
448 * tracked for threads and thread groups. Most things considering 449 * tracked for threads and thread groups. Most things considering
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e0c0fccced46..a0c66a2e00ad 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -124,7 +124,12 @@ do { \
124#ifdef CONFIG_GENERIC_LOCKBREAK 124#ifdef CONFIG_GENERIC_LOCKBREAK
125#define spin_is_contended(lock) ((lock)->break_lock) 125#define spin_is_contended(lock) ((lock)->break_lock)
126#else 126#else
127
128#ifdef __raw_spin_is_contended
127#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 129#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
130#else
131#define spin_is_contended(lock) (((void)(lock), 0))
132#endif /*__raw_spin_is_contended*/
128#endif 133#endif
129 134
130/** 135/**
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ef609f842fac..a210ede73b56 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -132,6 +132,8 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
132 list_del(&old->task_list); 132 list_del(&old->task_list);
133} 133}
134 134
135void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
136 int nr_exclusive, int sync, void *key);
135void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 137void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
136extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); 138extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
137extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 139extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -333,16 +335,19 @@ do { \
333 for (;;) { \ 335 for (;;) { \
334 prepare_to_wait_exclusive(&wq, &__wait, \ 336 prepare_to_wait_exclusive(&wq, &__wait, \
335 TASK_INTERRUPTIBLE); \ 337 TASK_INTERRUPTIBLE); \
336 if (condition) \ 338 if (condition) { \
339 finish_wait(&wq, &__wait); \
337 break; \ 340 break; \
341 } \
338 if (!signal_pending(current)) { \ 342 if (!signal_pending(current)) { \
339 schedule(); \ 343 schedule(); \
340 continue; \ 344 continue; \
341 } \ 345 } \
342 ret = -ERESTARTSYS; \ 346 ret = -ERESTARTSYS; \
347 abort_exclusive_wait(&wq, &__wait, \
348 TASK_INTERRUPTIBLE, NULL); \
343 break; \ 349 break; \
344 } \ 350 } \
345 finish_wait(&wq, &__wait); \
346} while (0) 351} while (0)
347 352
348#define wait_event_interruptible_exclusive(wq, condition) \ 353#define wait_event_interruptible_exclusive(wq, condition) \
@@ -431,6 +436,8 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
431void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 436void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
432void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 437void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
433void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 438void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
439void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
440 unsigned int mode, void *key);
434int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 441int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
435int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 442int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
436 443
diff --git a/include/video/aty128.h b/include/video/aty128.h
index 7079beb005e8..51ac69f05bdc 100644
--- a/include/video/aty128.h
+++ b/include/video/aty128.h
@@ -21,9 +21,9 @@
21#define I2C_CNTL_1 0x0094 21#define I2C_CNTL_1 0x0094
22#define PALETTE_INDEX 0x00b0 22#define PALETTE_INDEX 0x00b0
23#define PALETTE_DATA 0x00b4 23#define PALETTE_DATA 0x00b4
24#define CONFIG_CNTL 0x00e0 24#define CNFG_CNTL 0x00e0
25#define GEN_RESET_CNTL 0x00f0 25#define GEN_RESET_CNTL 0x00f0
26#define CONFIG_MEMSIZE 0x00f8 26#define CNFG_MEMSIZE 0x00f8
27#define MEM_CNTL 0x0140 27#define MEM_CNTL 0x0140
28#define MEM_POWER_MISC 0x015c 28#define MEM_POWER_MISC 0x015c
29#define AGP_BASE 0x0170 29#define AGP_BASE 0x0170
diff --git a/include/video/mach64.h b/include/video/mach64.h
index a8332e528ec1..89e91c0cb737 100644
--- a/include/video/mach64.h
+++ b/include/video/mach64.h
@@ -103,7 +103,7 @@
103#define CUR_HORZ_VERT_OFF 0x0070 /* Dword offset 0_1C */ 103#define CUR_HORZ_VERT_OFF 0x0070 /* Dword offset 0_1C */
104#define CUR2_HORZ_VERT_OFF 0x0070 /* Dword offset 0_1C */ 104#define CUR2_HORZ_VERT_OFF 0x0070 /* Dword offset 0_1C */
105 105
106#define CONFIG_PANEL_LG 0x0074 /* Dword offset 0_1D (LG) */ 106#define CNFG_PANEL_LG 0x0074 /* Dword offset 0_1D (LG) */
107 107
108/* General I/O Control */ 108/* General I/O Control */
109#define GP_IO 0x0078 /* Dword offset 0_1E */ 109#define GP_IO 0x0078 /* Dword offset 0_1E */
@@ -146,8 +146,8 @@
146#define CLOCK_SEL_CNTL 0x0090 /* Dword offset 0_24 */ 146#define CLOCK_SEL_CNTL 0x0090 /* Dword offset 0_24 */
147 147
148/* Configuration */ 148/* Configuration */
149#define CONFIG_STAT1 0x0094 /* Dword offset 0_25 */ 149#define CNFG_STAT1 0x0094 /* Dword offset 0_25 */
150#define CONFIG_STAT2 0x0098 /* Dword offset 0_26 */ 150#define CNFG_STAT2 0x0098 /* Dword offset 0_26 */
151 151
152/* Bus Control */ 152/* Bus Control */
153#define BUS_CNTL 0x00A0 /* Dword offset 0_28 */ 153#define BUS_CNTL 0x00A0 /* Dword offset 0_28 */
@@ -190,9 +190,9 @@
190#define POWER_MANAGEMENT_LG 0x00D8 /* Dword offset 0_36 (LG) */ 190#define POWER_MANAGEMENT_LG 0x00D8 /* Dword offset 0_36 (LG) */
191 191
192/* Configuration */ 192/* Configuration */
193#define CONFIG_CNTL 0x00DC /* Dword offset 0_37 (CT, ET, VT) */ 193#define CNFG_CNTL 0x00DC /* Dword offset 0_37 (CT, ET, VT) */
194#define CONFIG_CHIP_ID 0x00E0 /* Dword offset 0_38 */ 194#define CNFG_CHIP_ID 0x00E0 /* Dword offset 0_38 */
195#define CONFIG_STAT0 0x00E4 /* Dword offset 0_39 */ 195#define CNFG_STAT0 0x00E4 /* Dword offset 0_39 */
196 196
197/* Test and Debug */ 197/* Test and Debug */
198#define CRC_SIG 0x00E8 /* Dword offset 0_3A */ 198#define CRC_SIG 0x00E8 /* Dword offset 0_3A */
@@ -851,17 +851,17 @@
851#define PLL_YCLK_CNTL 0x29 851#define PLL_YCLK_CNTL 0x29
852#define PM_DYN_CLK_CNTL 0x2A 852#define PM_DYN_CLK_CNTL 0x2A
853 853
854/* CONFIG_CNTL register constants */ 854/* CNFG_CNTL register constants */
855#define APERTURE_4M_ENABLE 1 855#define APERTURE_4M_ENABLE 1
856#define APERTURE_8M_ENABLE 2 856#define APERTURE_8M_ENABLE 2
857#define VGA_APERTURE_ENABLE 4 857#define VGA_APERTURE_ENABLE 4
858 858
859/* CONFIG_STAT0 register constants (GX, CX) */ 859/* CNFG_STAT0 register constants (GX, CX) */
860#define CFG_BUS_TYPE 0x00000007 860#define CFG_BUS_TYPE 0x00000007
861#define CFG_MEM_TYPE 0x00000038 861#define CFG_MEM_TYPE 0x00000038
862#define CFG_INIT_DAC_TYPE 0x00000e00 862#define CFG_INIT_DAC_TYPE 0x00000e00
863 863
864/* CONFIG_STAT0 register constants (CT, ET, VT) */ 864/* CNFG_STAT0 register constants (CT, ET, VT) */
865#define CFG_MEM_TYPE_xT 0x00000007 865#define CFG_MEM_TYPE_xT 0x00000007
866 866
867#define ISA 0 867#define ISA 0
@@ -942,7 +942,7 @@
942#define PCI_ATI_VENDOR_ID 0x1002 942#define PCI_ATI_VENDOR_ID 0x1002
943 943
944 944
945/* CONFIG_CHIP_ID register constants */ 945/* CNFG_CHIP_ID register constants */
946#define CFG_CHIP_TYPE 0x0000FFFF 946#define CFG_CHIP_TYPE 0x0000FFFF
947#define CFG_CHIP_CLASS 0x00FF0000 947#define CFG_CHIP_CLASS 0x00FF0000
948#define CFG_CHIP_REV 0xFF000000 948#define CFG_CHIP_REV 0xFF000000
@@ -951,7 +951,7 @@
951#define CFG_CHIP_MINOR 0xC0000000 951#define CFG_CHIP_MINOR 0xC0000000
952 952
953 953
954/* Chip IDs read from CONFIG_CHIP_ID */ 954/* Chip IDs read from CNFG_CHIP_ID */
955 955
956/* mach64GX family */ 956/* mach64GX family */
957#define GX_CHIP_ID 0xD7 /* mach64GX (ATI888GX00) */ 957#define GX_CHIP_ID 0xD7 /* mach64GX (ATI888GX00) */
@@ -1254,7 +1254,7 @@
1254#define CRTC2_DISPLAY_DIS 0x00000400 1254#define CRTC2_DISPLAY_DIS 0x00000400
1255 1255
1256/* LCD register indices */ 1256/* LCD register indices */
1257#define CONFIG_PANEL 0x00 1257#define CNFG_PANEL 0x00
1258#define LCD_GEN_CNTL 0x01 1258#define LCD_GEN_CNTL 0x01
1259#define DSTN_CONTROL 0x02 1259#define DSTN_CONTROL 0x02
1260#define HFB_PITCH_ADDR 0x03 1260#define HFB_PITCH_ADDR 0x03
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 1cd09cc5b169..e072b16b39ab 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -11,13 +11,13 @@
11#define HI_STAT 0x004C 11#define HI_STAT 0x004C
12#define BUS_CNTL1 0x0034 12#define BUS_CNTL1 0x0034
13#define I2C_CNTL_1 0x0094 13#define I2C_CNTL_1 0x0094
14#define CONFIG_CNTL 0x00E0 14#define CNFG_CNTL 0x00E0
15#define CONFIG_MEMSIZE 0x00F8 15#define CNFG_MEMSIZE 0x00F8
16#define CONFIG_APER_0_BASE 0x0100 16#define CNFG_APER_0_BASE 0x0100
17#define CONFIG_APER_1_BASE 0x0104 17#define CNFG_APER_1_BASE 0x0104
18#define CONFIG_APER_SIZE 0x0108 18#define CNFG_APER_SIZE 0x0108
19#define CONFIG_REG_1_BASE 0x010C 19#define CNFG_REG_1_BASE 0x010C
20#define CONFIG_REG_APER_SIZE 0x0110 20#define CNFG_REG_APER_SIZE 0x0110
21#define PAD_AGPINPUT_DELAY 0x0164 21#define PAD_AGPINPUT_DELAY 0x0164
22#define PAD_CTLR_STRENGTH 0x0168 22#define PAD_CTLR_STRENGTH 0x0168
23#define PAD_CTLR_UPDATE 0x016C 23#define PAD_CTLR_UPDATE 0x016C
@@ -509,7 +509,7 @@
509/* CLOCK_CNTL_INDEX bit constants */ 509/* CLOCK_CNTL_INDEX bit constants */
510#define PLL_WR_EN 0x00000080 510#define PLL_WR_EN 0x00000080
511 511
512/* CONFIG_CNTL bit constants */ 512/* CNFG_CNTL bit constants */
513#define CFG_VGA_RAM_EN 0x00000100 513#define CFG_VGA_RAM_EN 0x00000100
514#define CFG_ATI_REV_ID_MASK (0xf << 16) 514#define CFG_ATI_REV_ID_MASK (0xf << 16)
515#define CFG_ATI_REV_A11 (0 << 16) 515#define CFG_ATI_REV_A11 (0 << 16)
@@ -980,7 +980,7 @@
980 980
981/* masks */ 981/* masks */
982 982
983#define CONFIG_MEMSIZE_MASK 0x1f000000 983#define CNFG_MEMSIZE_MASK 0x1f000000
984#define MEM_CFG_TYPE 0x40000000 984#define MEM_CFG_TYPE 0x40000000
985#define DST_OFFSET_MASK 0x003fffff 985#define DST_OFFSET_MASK 0x003fffff
986#define DST_PITCH_MASK 0x3fc00000 986#define DST_PITCH_MASK 0x3fc00000
diff --git a/ipc/shm.c b/ipc/shm.c
index c0a021f7f41a..05d51d2a792c 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -340,6 +340,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
340 struct file * file; 340 struct file * file;
341 char name[13]; 341 char name[13];
342 int id; 342 int id;
343 int acctflag = 0;
343 344
344 if (size < SHMMIN || size > ns->shm_ctlmax) 345 if (size < SHMMIN || size > ns->shm_ctlmax)
345 return -EINVAL; 346 return -EINVAL;
@@ -364,11 +365,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
364 365
365 sprintf (name, "SYSV%08x", key); 366 sprintf (name, "SYSV%08x", key);
366 if (shmflg & SHM_HUGETLB) { 367 if (shmflg & SHM_HUGETLB) {
367 /* hugetlb_file_setup takes care of mlock user accounting */ 368 /* hugetlb_file_setup applies strict accounting */
368 file = hugetlb_file_setup(name, size); 369 if (shmflg & SHM_NORESERVE)
370 acctflag = VM_NORESERVE;
371 file = hugetlb_file_setup(name, size, acctflag);
369 shp->mlock_user = current_user(); 372 shp->mlock_user = current_user();
370 } else { 373 } else {
371 int acctflag = 0;
372 /* 374 /*
373 * Do not allow no accounting for OVERCOMMIT_NEVER, even 375 * Do not allow no accounting for OVERCOMMIT_NEVER, even
374 * if it's asked for. 376 * if it's asked for.
@@ -565,11 +567,15 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
565 struct hstate *h = hstate_file(shp->shm_file); 567 struct hstate *h = hstate_file(shp->shm_file);
566 *rss += pages_per_huge_page(h) * mapping->nrpages; 568 *rss += pages_per_huge_page(h) * mapping->nrpages;
567 } else { 569 } else {
570#ifdef CONFIG_SHMEM
568 struct shmem_inode_info *info = SHMEM_I(inode); 571 struct shmem_inode_info *info = SHMEM_I(inode);
569 spin_lock(&info->lock); 572 spin_lock(&info->lock);
570 *rss += inode->i_mapping->nrpages; 573 *rss += inode->i_mapping->nrpages;
571 *swp += info->swapped; 574 *swp += info->swapped;
572 spin_unlock(&info->lock); 575 spin_unlock(&info->lock);
576#else
577 *rss += inode->i_mapping->nrpages;
578#endif
573 } 579 }
574 580
575 total++; 581 total++;
diff --git a/kernel/async.c b/kernel/async.c
index 608b32b42812..f565891f2c9b 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel.
54#include <linux/sched.h> 54#include <linux/sched.h>
55#include <linux/init.h> 55#include <linux/init.h>
56#include <linux/kthread.h> 56#include <linux/kthread.h>
57#include <linux/delay.h>
57#include <asm/atomic.h> 58#include <asm/atomic.h>
58 59
59static async_cookie_t next_cookie = 1; 60static async_cookie_t next_cookie = 1;
@@ -132,21 +133,23 @@ static void run_one_entry(void)
132 entry = list_first_entry(&async_pending, struct async_entry, list); 133 entry = list_first_entry(&async_pending, struct async_entry, list);
133 134
134 /* 2) move it to the running queue */ 135 /* 2) move it to the running queue */
135 list_del(&entry->list); 136 list_move_tail(&entry->list, entry->running);
136 list_add_tail(&entry->list, &async_running);
137 spin_unlock_irqrestore(&async_lock, flags); 137 spin_unlock_irqrestore(&async_lock, flags);
138 138
139 /* 3) run it (and print duration)*/ 139 /* 3) run it (and print duration)*/
140 if (initcall_debug && system_state == SYSTEM_BOOTING) { 140 if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); 141 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
142 entry->func, task_pid_nr(current));
142 calltime = ktime_get(); 143 calltime = ktime_get();
143 } 144 }
144 entry->func(entry->data, entry->cookie); 145 entry->func(entry->data, entry->cookie);
145 if (initcall_debug && system_state == SYSTEM_BOOTING) { 146 if (initcall_debug && system_state == SYSTEM_BOOTING) {
146 rettime = ktime_get(); 147 rettime = ktime_get();
147 delta = ktime_sub(rettime, calltime); 148 delta = ktime_sub(rettime, calltime);
148 printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, 149 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
149 entry->func, ktime_to_ns(delta) >> 10); 150 (long long)entry->cookie,
151 entry->func,
152 (long long)ktime_to_ns(delta) >> 10);
150 } 153 }
151 154
152 /* 4) remove it from the running queue */ 155 /* 4) remove it from the running queue */
@@ -205,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
205 return newcookie; 208 return newcookie;
206} 209}
207 210
211/**
212 * async_schedule - schedule a function for asynchronous execution
213 * @ptr: function to execute asynchronously
214 * @data: data pointer to pass to the function
215 *
216 * Returns an async_cookie_t that may be used for checkpointing later.
217 * Note: This function may be called from atomic or non-atomic contexts.
218 */
208async_cookie_t async_schedule(async_func_ptr *ptr, void *data) 219async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
209{ 220{
210 return __async_schedule(ptr, data, &async_pending); 221 return __async_schedule(ptr, data, &async_running);
211} 222}
212EXPORT_SYMBOL_GPL(async_schedule); 223EXPORT_SYMBOL_GPL(async_schedule);
213 224
214async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running) 225/**
226 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
227 * @ptr: function to execute asynchronously
228 * @data: data pointer to pass to the function
229 * @running: running list for the domain
230 *
231 * Returns an async_cookie_t that may be used for checkpointing later.
232 * @running may be used in the async_synchronize_*_domain() functions
233 * to wait within a certain synchronization domain rather than globally.
234 * A synchronization domain is specified via the running queue @running to use.
235 * Note: This function may be called from atomic or non-atomic contexts.
236 */
237async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
238 struct list_head *running)
215{ 239{
216 return __async_schedule(ptr, data, running); 240 return __async_schedule(ptr, data, running);
217} 241}
218EXPORT_SYMBOL_GPL(async_schedule_special); 242EXPORT_SYMBOL_GPL(async_schedule_domain);
219 243
244/**
245 * async_synchronize_full - synchronize all asynchronous function calls
246 *
247 * This function waits until all asynchronous function calls have been done.
248 */
220void async_synchronize_full(void) 249void async_synchronize_full(void)
221{ 250{
222 do { 251 do {
@@ -225,13 +254,30 @@ void async_synchronize_full(void)
225} 254}
226EXPORT_SYMBOL_GPL(async_synchronize_full); 255EXPORT_SYMBOL_GPL(async_synchronize_full);
227 256
228void async_synchronize_full_special(struct list_head *list) 257/**
258 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
259 * @list: running list to synchronize on
260 *
261 * This function waits until all asynchronous function calls for the
262 * synchronization domain specified by the running list @list have been done.
263 */
264void async_synchronize_full_domain(struct list_head *list)
229{ 265{
230 async_synchronize_cookie_special(next_cookie, list); 266 async_synchronize_cookie_domain(next_cookie, list);
231} 267}
232EXPORT_SYMBOL_GPL(async_synchronize_full_special); 268EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
233 269
234void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running) 270/**
271 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
272 * @cookie: async_cookie_t to use as checkpoint
273 * @running: running list to synchronize on
274 *
275 * This function waits until all asynchronous function calls for the
276 * synchronization domain specified by the running list @list submitted
277 * prior to @cookie have been done.
278 */
279void async_synchronize_cookie_domain(async_cookie_t cookie,
280 struct list_head *running)
235{ 281{
236 ktime_t starttime, delta, endtime; 282 ktime_t starttime, delta, endtime;
237 283
@@ -247,14 +293,22 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
247 delta = ktime_sub(endtime, starttime); 293 delta = ktime_sub(endtime, starttime);
248 294
249 printk("async_continuing @ %i after %lli usec\n", 295 printk("async_continuing @ %i after %lli usec\n",
250 task_pid_nr(current), ktime_to_ns(delta) >> 10); 296 task_pid_nr(current),
297 (long long)ktime_to_ns(delta) >> 10);
251 } 298 }
252} 299}
253EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); 300EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
254 301
302/**
303 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
304 * @cookie: async_cookie_t to use as checkpoint
305 *
306 * This function waits until all asynchronous function calls prior to @cookie
307 * have been done.
308 */
255void async_synchronize_cookie(async_cookie_t cookie) 309void async_synchronize_cookie(async_cookie_t cookie)
256{ 310{
257 async_synchronize_cookie_special(cookie, &async_running); 311 async_synchronize_cookie_domain(cookie, &async_running);
258} 312}
259EXPORT_SYMBOL_GPL(async_synchronize_cookie); 313EXPORT_SYMBOL_GPL(async_synchronize_cookie);
260 314
@@ -315,7 +369,11 @@ static int async_manager_thread(void *unused)
315 ec = atomic_read(&entry_count); 369 ec = atomic_read(&entry_count);
316 370
317 while (tc < ec && tc < MAX_THREADS) { 371 while (tc < ec && tc < MAX_THREADS) {
318 kthread_run(async_thread, NULL, "async/%i", tc); 372 if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
373 tc))) {
374 msleep(100);
375 continue;
376 }
319 atomic_inc(&thread_count); 377 atomic_inc(&thread_count);
320 tc++; 378 tc++;
321 } 379 }
@@ -330,7 +388,9 @@ static int async_manager_thread(void *unused)
330static int __init async_init(void) 388static int __init async_init(void)
331{ 389{
332 if (async_enabled) 390 if (async_enabled)
333 kthread_run(async_manager_thread, NULL, "async/mgr"); 391 if (IS_ERR(kthread_run(async_manager_thread, NULL,
392 "async/mgr")))
393 async_enabled = 0;
334 return 0; 394 return 0;
335} 395}
336 396
diff --git a/kernel/fork.c b/kernel/fork.c
index 242a706e7721..6d5dbb7a13e2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1005,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1005 * triggers too late. This doesn't hurt, the check is only there 1005 * triggers too late. This doesn't hurt, the check is only there
1006 * to stop root fork bombs. 1006 * to stop root fork bombs.
1007 */ 1007 */
1008 retval = -EAGAIN;
1008 if (nr_threads >= max_threads) 1009 if (nr_threads >= max_threads)
1009 goto bad_fork_cleanup_count; 1010 goto bad_fork_cleanup_count;
1010 1011
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index ecf765c6a77a..acd88356ac76 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
71 desc = irq_desc_ptrs[irq]; 71 desc = irq_desc_ptrs[irq];
72 72
73 if (desc && old_desc != desc) 73 if (desc && old_desc != desc)
74 goto out_unlock; 74 goto out_unlock;
75 75
76 node = cpu_to_node(cpu); 76 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu); 84 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85 85
86 irq_desc_ptrs[irq] = desc; 86 irq_desc_ptrs[irq] = desc;
87 spin_unlock_irqrestore(&sparse_irq_lock, flags);
87 88
88 /* free the old one */ 89 /* free the old one */
89 free_one_irq_desc(old_desc, desc); 90 free_one_irq_desc(old_desc, desc);
91 spin_unlock(&old_desc->lock);
90 kfree(old_desc); 92 kfree(old_desc);
93 spin_lock(&desc->lock);
94
95 return desc;
91 96
92out_unlock: 97out_unlock:
93 spin_unlock_irqrestore(&sparse_irq_lock, flags); 98 spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 239988873971..b4d219016b6c 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val)
57#ifdef CONFIG_PM_DEBUG 57#ifdef CONFIG_PM_DEBUG
58int pm_test_level = TEST_NONE; 58int pm_test_level = TEST_NONE;
59 59
60static int suspend_test(int level)
61{
62 if (pm_test_level == level) {
63 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
64 mdelay(5000);
65 return 1;
66 }
67 return 0;
68}
69
70static const char * const pm_tests[__TEST_AFTER_LAST] = { 60static const char * const pm_tests[__TEST_AFTER_LAST] = {
71 [TEST_NONE] = "none", 61 [TEST_NONE] = "none",
72 [TEST_CORE] = "core", 62 [TEST_CORE] = "core",
@@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
125} 115}
126 116
127power_attr(pm_test); 117power_attr(pm_test);
128#else /* !CONFIG_PM_DEBUG */ 118#endif /* CONFIG_PM_DEBUG */
129static inline int suspend_test(int level) { return 0; }
130#endif /* !CONFIG_PM_DEBUG */
131 119
132#endif /* CONFIG_PM_SLEEP */ 120#endif /* CONFIG_PM_SLEEP */
133 121
134#ifdef CONFIG_SUSPEND 122#ifdef CONFIG_SUSPEND
135 123
124static int suspend_test(int level)
125{
126#ifdef CONFIG_PM_DEBUG
127 if (pm_test_level == level) {
128 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
129 mdelay(5000);
130 return 1;
131 }
132#endif /* !CONFIG_PM_DEBUG */
133 return 0;
134}
135
136#ifdef CONFIG_PM_TEST_SUSPEND 136#ifdef CONFIG_PM_TEST_SUSPEND
137 137
138/* 138/*
diff --git a/kernel/sched.c b/kernel/sched.c
index 242d0d47a70d..8ee437a5ec1d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4697,8 +4697,8 @@ EXPORT_SYMBOL(default_wake_function);
4697 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 4697 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4698 * zero in this (rare) case, and we handle it by continuing to scan the queue. 4698 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4699 */ 4699 */
4700static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 4700void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4701 int nr_exclusive, int sync, void *key) 4701 int nr_exclusive, int sync, void *key)
4702{ 4702{
4703 wait_queue_t *curr, *next; 4703 wait_queue_t *curr, *next;
4704 4704
diff --git a/kernel/sys.c b/kernel/sys.c
index e7dc0e10a485..f145c415bc16 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1525 return -EINVAL; 1525 return -EINVAL;
1526 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1526 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1527 return -EFAULT; 1527 return -EFAULT;
1528 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1529 return -EINVAL;
1528 old_rlim = current->signal->rlim + resource; 1530 old_rlim = current->signal->rlim + resource;
1529 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1531 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1530 !capable(CAP_SYS_RESOURCE)) 1532 !capable(CAP_SYS_RESOURCE))
1531 return -EPERM; 1533 return -EPERM;
1532 1534 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
1533 if (resource == RLIMIT_NOFILE) { 1535 return -EPERM;
1534 if (new_rlim.rlim_max == RLIM_INFINITY)
1535 new_rlim.rlim_max = sysctl_nr_open;
1536 if (new_rlim.rlim_cur == RLIM_INFINITY)
1537 new_rlim.rlim_cur = sysctl_nr_open;
1538 if (new_rlim.rlim_max > sysctl_nr_open)
1539 return -EPERM;
1540 }
1541
1542 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1543 return -EINVAL;
1544 1536
1545 retval = security_task_setrlimit(resource, &new_rlim); 1537 retval = security_task_setrlimit(resource, &new_rlim);
1546 if (retval) 1538 if (retval)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7dcf6e9f2b04..9a236ffe2aa4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1737,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid)
1737{ 1737{
1738 struct task_struct *p; 1738 struct task_struct *p;
1739 1739
1740 rcu_read_lock();
1740 do_each_pid_task(pid, PIDTYPE_PID, p) { 1741 do_each_pid_task(pid, PIDTYPE_PID, p) {
1741 clear_tsk_trace_trace(p); 1742 clear_tsk_trace_trace(p);
1742 } while_each_pid_task(pid, PIDTYPE_PID, p); 1743 } while_each_pid_task(pid, PIDTYPE_PID, p);
1744 rcu_read_unlock();
1745
1743 put_pid(pid); 1746 put_pid(pid);
1744} 1747}
1745 1748
@@ -1747,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid)
1747{ 1750{
1748 struct task_struct *p; 1751 struct task_struct *p;
1749 1752
1753 rcu_read_lock();
1750 do_each_pid_task(pid, PIDTYPE_PID, p) { 1754 do_each_pid_task(pid, PIDTYPE_PID, p) {
1751 set_tsk_trace_trace(p); 1755 set_tsk_trace_trace(p);
1752 } while_each_pid_task(pid, PIDTYPE_PID, p); 1756 } while_each_pid_task(pid, PIDTYPE_PID, p);
1757 rcu_read_unlock();
1753} 1758}
1754 1759
1755static void clear_ftrace_pid_task(struct pid **pid) 1760static void clear_ftrace_pid_task(struct pid **pid)
diff --git a/kernel/wait.c b/kernel/wait.c
index cd87131f2fc2..42a2dbc181c8 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
91} 91}
92EXPORT_SYMBOL(prepare_to_wait_exclusive); 92EXPORT_SYMBOL(prepare_to_wait_exclusive);
93 93
94/*
95 * finish_wait - clean up after waiting in a queue
96 * @q: waitqueue waited on
97 * @wait: wait descriptor
98 *
99 * Sets current thread back to running state and removes
100 * the wait descriptor from the given waitqueue if still
101 * queued.
102 */
94void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 103void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
95{ 104{
96 unsigned long flags; 105 unsigned long flags;
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
117} 126}
118EXPORT_SYMBOL(finish_wait); 127EXPORT_SYMBOL(finish_wait);
119 128
129/*
130 * abort_exclusive_wait - abort exclusive waiting in a queue
131 * @q: waitqueue waited on
132 * @wait: wait descriptor
133 * @state: runstate of the waiter to be woken
134 * @key: key to identify a wait bit queue or %NULL
135 *
136 * Sets current thread back to running state and removes
137 * the wait descriptor from the given waitqueue if still
138 * queued.
139 *
140 * Wakes up the next waiter if the caller is concurrently
141 * woken up through the queue.
142 *
143 * This prevents waiter starvation where an exclusive waiter
144 * aborts and is woken up concurrently and noone wakes up
145 * the next waiter.
146 */
147void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
148 unsigned int mode, void *key)
149{
150 unsigned long flags;
151
152 __set_current_state(TASK_RUNNING);
153 spin_lock_irqsave(&q->lock, flags);
154 if (!list_empty(&wait->task_list))
155 list_del_init(&wait->task_list);
156 else if (waitqueue_active(q))
157 __wake_up_common(q, mode, 1, 0, key);
158 spin_unlock_irqrestore(&q->lock, flags);
159}
160EXPORT_SYMBOL(abort_exclusive_wait);
161
120int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 162int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
121{ 163{
122 int ret = default_wake_function(wait, mode, sync, key); 164 int ret = default_wake_function(wait, mode, sync, key);
@@ -177,17 +219,20 @@ int __sched
177__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 219__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
178 int (*action)(void *), unsigned mode) 220 int (*action)(void *), unsigned mode)
179{ 221{
180 int ret = 0;
181
182 do { 222 do {
223 int ret;
224
183 prepare_to_wait_exclusive(wq, &q->wait, mode); 225 prepare_to_wait_exclusive(wq, &q->wait, mode);
184 if (test_bit(q->key.bit_nr, q->key.flags)) { 226 if (!test_bit(q->key.bit_nr, q->key.flags))
185 if ((ret = (*action)(q->key.flags))) 227 continue;
186 break; 228 ret = action(q->key.flags);
187 } 229 if (!ret)
230 continue;
231 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
232 return ret;
188 } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); 233 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
189 finish_wait(wq, &q->wait); 234 finish_wait(wq, &q->wait);
190 return ret; 235 return 0;
191} 236}
192EXPORT_SYMBOL(__wait_on_bit_lock); 237EXPORT_SYMBOL(__wait_on_bit_lock);
193 238
diff --git a/mm/fremap.c b/mm/fremap.c
index 736ba7f3306a..b6ec85abbb39 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -198,7 +198,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
198 flags &= MAP_NONBLOCK; 198 flags &= MAP_NONBLOCK;
199 get_file(file); 199 get_file(file);
200 addr = mmap_region(file, start, size, 200 addr = mmap_region(file, start, size,
201 flags, vma->vm_flags, pgoff, 1); 201 flags, vma->vm_flags, pgoff);
202 fput(file); 202 fput(file);
203 if (IS_ERR_VALUE(addr)) { 203 if (IS_ERR_VALUE(addr)) {
204 err = addr; 204 err = addr;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 618e98304080..207464209546 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2269,14 +2269,12 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2269 2269
2270int hugetlb_reserve_pages(struct inode *inode, 2270int hugetlb_reserve_pages(struct inode *inode,
2271 long from, long to, 2271 long from, long to,
2272 struct vm_area_struct *vma) 2272 struct vm_area_struct *vma,
2273 int acctflag)
2273{ 2274{
2274 long ret, chg; 2275 long ret = 0, chg;
2275 struct hstate *h = hstate_inode(inode); 2276 struct hstate *h = hstate_inode(inode);
2276 2277
2277 if (vma && vma->vm_flags & VM_NORESERVE)
2278 return 0;
2279
2280 /* 2278 /*
2281 * Shared mappings base their reservation on the number of pages that 2279 * Shared mappings base their reservation on the number of pages that
2282 * are already allocated on behalf of the file. Private mappings need 2280 * are already allocated on behalf of the file. Private mappings need
@@ -2285,22 +2283,25 @@ int hugetlb_reserve_pages(struct inode *inode,
2285 */ 2283 */
2286 if (!vma || vma->vm_flags & VM_SHARED) 2284 if (!vma || vma->vm_flags & VM_SHARED)
2287 chg = region_chg(&inode->i_mapping->private_list, from, to); 2285 chg = region_chg(&inode->i_mapping->private_list, from, to);
2288 else { 2286 else
2289 struct resv_map *resv_map = resv_map_alloc();
2290 if (!resv_map)
2291 return -ENOMEM;
2292
2293 chg = to - from; 2287 chg = to - from;
2294 2288
2295 set_vma_resv_map(vma, resv_map);
2296 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2297 }
2298
2299 if (chg < 0) 2289 if (chg < 0)
2300 return chg; 2290 return chg;
2301 2291
2302 if (hugetlb_get_quota(inode->i_mapping, chg)) 2292 if (hugetlb_get_quota(inode->i_mapping, chg))
2303 return -ENOSPC; 2293 return -ENOSPC;
2294
2295 /*
2296 * Only apply hugepage reservation if asked. We still have to
2297 * take the filesystem quota because it is an upper limit
2298 * defined for the mount and not necessarily memory as a whole
2299 */
2300 if (acctflag & VM_NORESERVE) {
2301 reset_vma_resv_huge_pages(vma);
2302 return 0;
2303 }
2304
2304 ret = hugetlb_acct_memory(h, chg); 2305 ret = hugetlb_acct_memory(h, chg);
2305 if (ret < 0) { 2306 if (ret < 0) {
2306 hugetlb_put_quota(inode->i_mapping, chg); 2307 hugetlb_put_quota(inode->i_mapping, chg);
@@ -2308,6 +2309,16 @@ int hugetlb_reserve_pages(struct inode *inode,
2308 } 2309 }
2309 if (!vma || vma->vm_flags & VM_SHARED) 2310 if (!vma || vma->vm_flags & VM_SHARED)
2310 region_add(&inode->i_mapping->private_list, from, to); 2311 region_add(&inode->i_mapping->private_list, from, to);
2312 else {
2313 struct resv_map *resv_map = resv_map_alloc();
2314
2315 if (!resv_map)
2316 return -ENOMEM;
2317
2318 set_vma_resv_map(vma, resv_map);
2319 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2320 }
2321
2311 return 0; 2322 return 0;
2312} 2323}
2313 2324
diff --git a/mm/memory.c b/mm/memory.c
index 22bfa7a47a0b..baa999e87cd2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1999,7 +1999,7 @@ gotten:
1999 * Don't let another task, with possibly unlocked vma, 1999 * Don't let another task, with possibly unlocked vma,
2000 * keep the mlocked page. 2000 * keep the mlocked page.
2001 */ 2001 */
2002 if (vma->vm_flags & VM_LOCKED) { 2002 if ((vma->vm_flags & VM_LOCKED) && old_page) {
2003 lock_page(old_page); /* for LRU manipulation */ 2003 lock_page(old_page); /* for LRU manipulation */
2004 clear_page_mlock(old_page); 2004 clear_page_mlock(old_page);
2005 unlock_page(old_page); 2005 unlock_page(old_page);
diff --git a/mm/mlock.c b/mm/mlock.c
index 028ec482fdd4..037161d61b4e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -311,7 +311,10 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
311 is_vm_hugetlb_page(vma) || 311 is_vm_hugetlb_page(vma) ||
312 vma == get_gate_vma(current))) { 312 vma == get_gate_vma(current))) {
313 313
314 return __mlock_vma_pages_range(vma, start, end, 1); 314 __mlock_vma_pages_range(vma, start, end, 1);
315
316 /* Hide errors from mmap() and other callers */
317 return 0;
315 } 318 }
316 319
317 /* 320 /*
diff --git a/mm/mmap.c b/mm/mmap.c
index 214b6a258eeb..eb1270bebe67 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -918,7 +918,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
918 struct inode *inode; 918 struct inode *inode;
919 unsigned int vm_flags; 919 unsigned int vm_flags;
920 int error; 920 int error;
921 int accountable = 1;
922 unsigned long reqprot = prot; 921 unsigned long reqprot = prot;
923 922
924 /* 923 /*
@@ -1019,8 +1018,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1019 return -EPERM; 1018 return -EPERM;
1020 vm_flags &= ~VM_MAYEXEC; 1019 vm_flags &= ~VM_MAYEXEC;
1021 } 1020 }
1022 if (is_file_hugepages(file))
1023 accountable = 0;
1024 1021
1025 if (!file->f_op || !file->f_op->mmap) 1022 if (!file->f_op || !file->f_op->mmap)
1026 return -ENODEV; 1023 return -ENODEV;
@@ -1053,8 +1050,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1053 if (error) 1050 if (error)
1054 return error; 1051 return error;
1055 1052
1056 return mmap_region(file, addr, len, flags, vm_flags, pgoff, 1053 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1057 accountable);
1058} 1054}
1059EXPORT_SYMBOL(do_mmap_pgoff); 1055EXPORT_SYMBOL(do_mmap_pgoff);
1060 1056
@@ -1092,17 +1088,23 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1092 1088
1093/* 1089/*
1094 * We account for memory if it's a private writeable mapping, 1090 * We account for memory if it's a private writeable mapping,
1095 * and VM_NORESERVE wasn't set. 1091 * not hugepages and VM_NORESERVE wasn't set.
1096 */ 1092 */
1097static inline int accountable_mapping(unsigned int vm_flags) 1093static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1098{ 1094{
1095 /*
1096 * hugetlb has its own accounting separate from the core VM
1097 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1098 */
1099 if (file && is_file_hugepages(file))
1100 return 0;
1101
1099 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1102 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1100} 1103}
1101 1104
1102unsigned long mmap_region(struct file *file, unsigned long addr, 1105unsigned long mmap_region(struct file *file, unsigned long addr,
1103 unsigned long len, unsigned long flags, 1106 unsigned long len, unsigned long flags,
1104 unsigned int vm_flags, unsigned long pgoff, 1107 unsigned int vm_flags, unsigned long pgoff)
1105 int accountable)
1106{ 1108{
1107 struct mm_struct *mm = current->mm; 1109 struct mm_struct *mm = current->mm;
1108 struct vm_area_struct *vma, *prev; 1110 struct vm_area_struct *vma, *prev;
@@ -1128,18 +1130,22 @@ munmap_back:
1128 1130
1129 /* 1131 /*
1130 * Set 'VM_NORESERVE' if we should not account for the 1132 * Set 'VM_NORESERVE' if we should not account for the
1131 * memory use of this mapping. We only honor MAP_NORESERVE 1133 * memory use of this mapping.
1132 * if we're allowed to overcommit memory.
1133 */ 1134 */
1134 if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1135 if ((flags & MAP_NORESERVE)) {
1135 vm_flags |= VM_NORESERVE; 1136 /* We honor MAP_NORESERVE if allowed to overcommit */
1136 if (!accountable) 1137 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1137 vm_flags |= VM_NORESERVE; 1138 vm_flags |= VM_NORESERVE;
1139
1140 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1141 if (file && is_file_hugepages(file))
1142 vm_flags |= VM_NORESERVE;
1143 }
1138 1144
1139 /* 1145 /*
1140 * Private writable mapping: check memory availability 1146 * Private writable mapping: check memory availability
1141 */ 1147 */
1142 if (accountable_mapping(vm_flags)) { 1148 if (accountable_mapping(file, vm_flags)) {
1143 charged = len >> PAGE_SHIFT; 1149 charged = len >> PAGE_SHIFT;
1144 if (security_vm_enough_memory(charged)) 1150 if (security_vm_enough_memory(charged))
1145 return -ENOMEM; 1151 return -ENOMEM;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index abe2694e13f4..258197b76fb4 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -151,10 +151,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
151 /* 151 /*
152 * If we make a private mapping writable we increase our commit; 152 * If we make a private mapping writable we increase our commit;
153 * but (without finer accounting) cannot reduce our commit if we 153 * but (without finer accounting) cannot reduce our commit if we
154 * make it unwritable again. 154 * make it unwritable again. hugetlb mapping were accounted for
155 * even if read-only so there is no need to account for them here
155 */ 156 */
156 if (newflags & VM_WRITE) { 157 if (newflags & VM_WRITE) {
157 if (!(oldflags & (VM_ACCOUNT|VM_WRITE| 158 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
158 VM_SHARED|VM_NORESERVE))) { 159 VM_SHARED|VM_NORESERVE))) {
159 charged = nrpages; 160 charged = nrpages;
160 if (security_vm_enough_memory(charged)) 161 if (security_vm_enough_memory(charged))
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index dcd7666824ba..fc70147c771e 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -29,6 +29,7 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/types.h>
32#include <net/9p/9p.h> 33#include <net/9p/9p.h>
33#include <net/9p/client.h> 34#include <net/9p/client.h>
34#include "protocol.h" 35#include "protocol.h"
@@ -160,29 +161,32 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
160 break; 161 break;
161 case 'w':{ 162 case 'w':{
162 int16_t *val = va_arg(ap, int16_t *); 163 int16_t *val = va_arg(ap, int16_t *);
163 if (pdu_read(pdu, val, sizeof(*val))) { 164 __le16 le_val;
165 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
164 errcode = -EFAULT; 166 errcode = -EFAULT;
165 break; 167 break;
166 } 168 }
167 *val = cpu_to_le16(*val); 169 *val = le16_to_cpu(le_val);
168 } 170 }
169 break; 171 break;
170 case 'd':{ 172 case 'd':{
171 int32_t *val = va_arg(ap, int32_t *); 173 int32_t *val = va_arg(ap, int32_t *);
172 if (pdu_read(pdu, val, sizeof(*val))) { 174 __le32 le_val;
175 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
173 errcode = -EFAULT; 176 errcode = -EFAULT;
174 break; 177 break;
175 } 178 }
176 *val = cpu_to_le32(*val); 179 *val = le32_to_cpu(le_val);
177 } 180 }
178 break; 181 break;
179 case 'q':{ 182 case 'q':{
180 int64_t *val = va_arg(ap, int64_t *); 183 int64_t *val = va_arg(ap, int64_t *);
181 if (pdu_read(pdu, val, sizeof(*val))) { 184 __le64 le_val;
185 if (pdu_read(pdu, &le_val, sizeof(le_val))) {
182 errcode = -EFAULT; 186 errcode = -EFAULT;
183 break; 187 break;
184 } 188 }
185 *val = cpu_to_le64(*val); 189 *val = le64_to_cpu(le_val);
186 } 190 }
187 break; 191 break;
188 case 's':{ 192 case 's':{
@@ -362,19 +366,19 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
362 } 366 }
363 break; 367 break;
364 case 'w':{ 368 case 'w':{
365 int16_t val = va_arg(ap, int); 369 __le16 val = cpu_to_le16(va_arg(ap, int));
366 if (pdu_write(pdu, &val, sizeof(val))) 370 if (pdu_write(pdu, &val, sizeof(val)))
367 errcode = -EFAULT; 371 errcode = -EFAULT;
368 } 372 }
369 break; 373 break;
370 case 'd':{ 374 case 'd':{
371 int32_t val = va_arg(ap, int32_t); 375 __le32 val = cpu_to_le32(va_arg(ap, int32_t));
372 if (pdu_write(pdu, &val, sizeof(val))) 376 if (pdu_write(pdu, &val, sizeof(val)))
373 errcode = -EFAULT; 377 errcode = -EFAULT;
374 } 378 }
375 break; 379 break;
376 case 'q':{ 380 case 'q':{
377 int64_t val = va_arg(ap, int64_t); 381 __le64 val = cpu_to_le64(va_arg(ap, int64_t));
378 if (pdu_write(pdu, &val, sizeof(val))) 382 if (pdu_write(pdu, &val, sizeof(val)))
379 errcode = -EFAULT; 383 errcode = -EFAULT;
380 } 384 }
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd9ccea17ce..d2c27c808d3b 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
67{ 67{
68 struct net_device *indev; 68 struct net_device *indev;
69 69
70 if (skb_warn_if_lro(skb)) {
71 kfree_skb(skb);
72 return;
73 }
74
70 indev = skb->dev; 75 indev = skb->dev;
71 skb->dev = to->dev; 76 skb->dev = to->dev;
72 skb_forward_csum(skb); 77 skb_forward_csum(skb);
@@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
89/* called with rcu_read_lock */ 94/* called with rcu_read_lock */
90void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 95void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
91{ 96{
92 if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) { 97 if (should_deliver(to, skb)) {
93 __br_forward(to, skb); 98 __br_forward(to, skb);
94 return; 99 return;
95 } 100 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 5379b0c1190a..a17e00662363 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1090,7 +1090,7 @@ int dev_open(struct net_device *dev)
1090 /* 1090 /*
1091 * Enable NET_DMA 1091 * Enable NET_DMA
1092 */ 1092 */
1093 dmaengine_get(); 1093 net_dmaengine_get();
1094 1094
1095 /* 1095 /*
1096 * Initialize multicasting status 1096 * Initialize multicasting status
@@ -1172,7 +1172,7 @@ int dev_close(struct net_device *dev)
1172 /* 1172 /*
1173 * Shutdown NET_DMA 1173 * Shutdown NET_DMA
1174 */ 1174 */
1175 dmaengine_put(); 1175 net_dmaengine_put();
1176 1176
1177 return 0; 1177 return 0;
1178} 1178}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f66c58df8953..278a142d1047 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1994,8 +1994,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1994 if (!net_eq(neigh_parms_net(p), net)) 1994 if (!net_eq(neigh_parms_net(p), net))
1995 continue; 1995 continue;
1996 1996
1997 if (nidx++ < neigh_skip) 1997 if (nidx < neigh_skip)
1998 continue; 1998 goto next;
1999 1999
2000 if (neightbl_fill_param_info(skb, tbl, p, 2000 if (neightbl_fill_param_info(skb, tbl, p,
2001 NETLINK_CB(cb->skb).pid, 2001 NETLINK_CB(cb->skb).pid,
@@ -2003,6 +2003,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2003 RTM_NEWNEIGHTBL, 2003 RTM_NEWNEIGHTBL,
2004 NLM_F_MULTI) <= 0) 2004 NLM_F_MULTI) <= 0)
2005 goto out; 2005 goto out;
2006 next:
2007 nidx++;
2006 } 2008 }
2007 2009
2008 neigh_skip = 0; 2010 neigh_skip = 0;
@@ -2082,12 +2084,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2082 if (h > s_h) 2084 if (h > s_h)
2083 s_idx = 0; 2085 s_idx = 0;
2084 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2086 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2085 int lidx;
2086 if (dev_net(n->dev) != net) 2087 if (dev_net(n->dev) != net)
2087 continue; 2088 continue;
2088 lidx = idx++; 2089 if (idx < s_idx)
2089 if (lidx < s_idx) 2090 goto next;
2090 continue;
2091 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, 2091 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2092 cb->nlh->nlmsg_seq, 2092 cb->nlh->nlmsg_seq,
2093 RTM_NEWNEIGH, 2093 RTM_NEWNEIGH,
@@ -2096,6 +2096,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2096 rc = -1; 2096 rc = -1;
2097 goto out; 2097 goto out;
2098 } 2098 }
2099 next:
2100 idx++;
2099 } 2101 }
2100 } 2102 }
2101 read_unlock_bh(&tbl->lock); 2103 read_unlock_bh(&tbl->lock);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 557fe16cbfb0..dda42f0bd7a3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -663,14 +663,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
663 th->urg_ptr = 0; 663 th->urg_ptr = 0;
664 664
665 /* The urg_mode check is necessary during a below snd_una win probe */ 665 /* The urg_mode check is necessary during a below snd_una win probe */
666 if (unlikely(tcp_urg_mode(tp))) { 666 if (unlikely(tcp_urg_mode(tp) &&
667 if (between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF)) { 667 between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) {
668 th->urg_ptr = htons(tp->snd_up - tcb->seq); 668 th->urg_ptr = htons(tp->snd_up - tcb->seq);
669 th->urg = 1; 669 th->urg = 1;
670 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
671 th->urg_ptr = 0xFFFF;
672 th->urg = 1;
673 }
674 } 670 }
675 671
676 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 672 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1ab180bad72a..c47c989cb1fb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1231,11 +1231,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1231 int proto) 1231 int proto)
1232{ 1232{
1233 struct sock *sk; 1233 struct sock *sk;
1234 struct udphdr *uh = udp_hdr(skb); 1234 struct udphdr *uh;
1235 unsigned short ulen; 1235 unsigned short ulen;
1236 struct rtable *rt = (struct rtable*)skb->dst; 1236 struct rtable *rt = (struct rtable*)skb->dst;
1237 __be32 saddr = ip_hdr(skb)->saddr; 1237 __be32 saddr, daddr;
1238 __be32 daddr = ip_hdr(skb)->daddr;
1239 struct net *net = dev_net(skb->dev); 1238 struct net *net = dev_net(skb->dev);
1240 1239
1241 /* 1240 /*
@@ -1244,6 +1243,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1244 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 1243 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1245 goto drop; /* No space for header. */ 1244 goto drop; /* No space for header. */
1246 1245
1246 uh = udp_hdr(skb);
1247 ulen = ntohs(uh->len); 1247 ulen = ntohs(uh->len);
1248 if (ulen > skb->len) 1248 if (ulen > skb->len)
1249 goto short_packet; 1249 goto short_packet;
@@ -1258,6 +1258,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1258 if (udp4_csum_init(skb, uh, proto)) 1258 if (udp4_csum_init(skb, uh, proto))
1259 goto csum_error; 1259 goto csum_error;
1260 1260
1261 saddr = ip_hdr(skb)->saddr;
1262 daddr = ip_hdr(skb)->daddr;
1263
1261 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1264 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1262 return __udp4_lib_mcast_deliver(net, skb, uh, 1265 return __udp4_lib_mcast_deliver(net, skb, uh,
1263 saddr, daddr, udptable); 1266 saddr, daddr, udptable);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index c62dd247774f..7712578bdc66 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -323,17 +323,21 @@ static struct ip6_flowlabel *
323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 323fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
324 int optlen, int *err_p) 324 int optlen, int *err_p)
325{ 325{
326 struct ip6_flowlabel *fl; 326 struct ip6_flowlabel *fl = NULL;
327 int olen; 327 int olen;
328 int addr_type; 328 int addr_type;
329 int err; 329 int err;
330 330
331 olen = optlen - CMSG_ALIGN(sizeof(*freq));
332 err = -EINVAL;
333 if (olen > 64 * 1024)
334 goto done;
335
331 err = -ENOMEM; 336 err = -ENOMEM;
332 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 337 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
333 if (fl == NULL) 338 if (fl == NULL)
334 goto done; 339 goto done;
335 340
336 olen = optlen - CMSG_ALIGN(sizeof(*freq));
337 if (olen > 0) { 341 if (olen > 0) {
338 struct msghdr msg; 342 struct msghdr msg;
339 struct flowi flowi; 343 struct flowi flowi;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4b15938bef4d..9fb49c3b518a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1105,6 +1105,18 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1105 return err; 1105 return err;
1106} 1106}
1107 1107
1108static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1109 gfp_t gfp)
1110{
1111 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1112}
1113
1114static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1115 gfp_t gfp)
1116{
1117 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1118}
1119
1108int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, 1120int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1109 int offset, int len, int odd, struct sk_buff *skb), 1121 int offset, int len, int odd, struct sk_buff *skb),
1110 void *from, int length, int transhdrlen, 1122 void *from, int length, int transhdrlen,
@@ -1130,17 +1142,37 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1130 * setup for corking 1142 * setup for corking
1131 */ 1143 */
1132 if (opt) { 1144 if (opt) {
1133 if (np->cork.opt == NULL) { 1145 if (WARN_ON(np->cork.opt))
1134 np->cork.opt = kmalloc(opt->tot_len,
1135 sk->sk_allocation);
1136 if (unlikely(np->cork.opt == NULL))
1137 return -ENOBUFS;
1138 } else if (np->cork.opt->tot_len < opt->tot_len) {
1139 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1140 return -EINVAL; 1146 return -EINVAL;
1141 } 1147
1142 memcpy(np->cork.opt, opt, opt->tot_len); 1148 np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
1143 inet->cork.flags |= IPCORK_OPT; 1149 if (unlikely(np->cork.opt == NULL))
1150 return -ENOBUFS;
1151
1152 np->cork.opt->tot_len = opt->tot_len;
1153 np->cork.opt->opt_flen = opt->opt_flen;
1154 np->cork.opt->opt_nflen = opt->opt_nflen;
1155
1156 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1157 sk->sk_allocation);
1158 if (opt->dst0opt && !np->cork.opt->dst0opt)
1159 return -ENOBUFS;
1160
1161 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1162 sk->sk_allocation);
1163 if (opt->dst1opt && !np->cork.opt->dst1opt)
1164 return -ENOBUFS;
1165
1166 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1167 sk->sk_allocation);
1168 if (opt->hopopt && !np->cork.opt->hopopt)
1169 return -ENOBUFS;
1170
1171 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1172 sk->sk_allocation);
1173 if (opt->srcrt && !np->cork.opt->srcrt)
1174 return -ENOBUFS;
1175
1144 /* need source address above miyazawa*/ 1176 /* need source address above miyazawa*/
1145 } 1177 }
1146 dst_hold(&rt->u.dst); 1178 dst_hold(&rt->u.dst);
@@ -1167,8 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1167 } else { 1199 } else {
1168 rt = (struct rt6_info *)inet->cork.dst; 1200 rt = (struct rt6_info *)inet->cork.dst;
1169 fl = &inet->cork.fl; 1201 fl = &inet->cork.fl;
1170 if (inet->cork.flags & IPCORK_OPT) 1202 opt = np->cork.opt;
1171 opt = np->cork.opt;
1172 transhdrlen = 0; 1203 transhdrlen = 0;
1173 exthdrlen = 0; 1204 exthdrlen = 0;
1174 mtu = inet->cork.fragsize; 1205 mtu = inet->cork.fragsize;
@@ -1407,9 +1438,15 @@ error:
1407 1438
1408static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) 1439static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1409{ 1440{
1410 inet->cork.flags &= ~IPCORK_OPT; 1441 if (np->cork.opt) {
1411 kfree(np->cork.opt); 1442 kfree(np->cork.opt->dst0opt);
1412 np->cork.opt = NULL; 1443 kfree(np->cork.opt->dst1opt);
1444 kfree(np->cork.opt->hopopt);
1445 kfree(np->cork.opt->srcrt);
1446 kfree(np->cork.opt);
1447 np->cork.opt = NULL;
1448 }
1449
1413 if (inet->cork.dst) { 1450 if (inet->cork.dst) {
1414 dst_release(inet->cork.dst); 1451 dst_release(inet->cork.dst);
1415 inet->cork.dst = NULL; 1452 inet->cork.dst = NULL;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 58e2b0d93758..d994c55a5b16 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
249 } 249 }
250 250
251 t = netdev_priv(dev); 251 t = netdev_priv(dev);
252 ip6_tnl_dev_init(dev);
253 t->parms = *p; 252 t->parms = *p;
253 ip6_tnl_dev_init(dev);
254 254
255 if ((err = register_netdevice(dev)) < 0) 255 if ((err = register_netdevice(dev)) < 0)
256 goto failed_free; 256 goto failed_free;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c455cf4ee756..c323643ffcf9 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
49static const u_int8_t invmap[] = { 49static const u_int8_t invmap[] = {
50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1, 50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1, 51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1, 52 [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1 53 [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
54};
55
56static const u_int8_t noct_valid_new[] = {
57 [ICMPV6_MGM_QUERY - 130] = 1,
58 [ICMPV6_MGM_REPORT -130] = 1,
59 [ICMPV6_MGM_REDUCTION - 130] = 1,
60 [NDISC_ROUTER_SOLICITATION - 130] = 1,
61 [NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
62 [NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
63 [NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
64 [ICMPV6_MLD2_REPORT - 130] = 1
54}; 65};
55 66
56static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, 67static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
@@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
178{ 189{
179 const struct icmp6hdr *icmp6h; 190 const struct icmp6hdr *icmp6h;
180 struct icmp6hdr _ih; 191 struct icmp6hdr _ih;
192 int type;
181 193
182 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); 194 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
183 if (icmp6h == NULL) { 195 if (icmp6h == NULL) {
@@ -194,6 +206,15 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
194 return -NF_ACCEPT; 206 return -NF_ACCEPT;
195 } 207 }
196 208
209 type = icmp6h->icmp6_type - 130;
210 if (type >= 0 && type < sizeof(noct_valid_new) &&
211 noct_valid_new[type]) {
212 skb->nfct = &nf_conntrack_untracked.ct_general;
213 skb->nfctinfo = IP_CT_NEW;
214 nf_conntrack_get(skb->nfct);
215 return NF_ACCEPT;
216 }
217
197 /* is not error message ? */ 218 /* is not error message ? */
198 if (icmp6h->icmp6_type >= 128) 219 if (icmp6h->icmp6_type >= 128)
199 return NF_ACCEPT; 220 return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c32a7e8e3a1b..cb78aa00399e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
434 } else 434 } else
435 return NOTIFY_DONE; 435 return NOTIFY_DONE;
436 436
437 if (!nfnetlink_has_listeners(group)) 437 if (!item->report && !nfnetlink_has_listeners(group))
438 return NOTIFY_DONE; 438 return NOTIFY_DONE;
439 439
440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1215 } 1215 }
1216 } 1216 }
1217 1217
1218#ifdef CONFIG_NF_NAT_NEEDED
1219 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1220 err = ctnetlink_change_nat_seq_adj(ct, cda);
1221 if (err < 0) {
1222 rcu_read_unlock();
1223 goto err;
1224 }
1225 }
1226#endif
1227
1218 if (cda[CTA_PROTOINFO]) { 1228 if (cda[CTA_PROTOINFO]) {
1219 err = ctnetlink_change_protoinfo(ct, cda); 1229 err = ctnetlink_change_protoinfo(ct, cda);
1220 if (err < 0) { 1230 if (err < 0) {
@@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1492 } else 1502 } else
1493 return NOTIFY_DONE; 1503 return NOTIFY_DONE;
1494 1504
1495 if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1505 if (!item->report &&
1506 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
1496 return NOTIFY_DONE; 1507 return NOTIFY_DONE;
1497 1508
1498 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 1509 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index e223cb43ae8e..a189ada9128f 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb,
105 105
106 switch (chunk_match_type) { 106 switch (chunk_match_type) {
107 case SCTP_CHUNK_MATCH_ALL: 107 case SCTP_CHUNK_MATCH_ALL:
108 return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap); 108 return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
109 case SCTP_CHUNK_MATCH_ANY: 109 case SCTP_CHUNK_MATCH_ANY:
110 return false; 110 return false;
111 case SCTP_CHUNK_MATCH_ONLY: 111 case SCTP_CHUNK_MATCH_ONLY:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d7d2bed7a699..eac5e7bb7365 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -284,13 +284,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
284 if (IS_ERR(trans)) { 284 if (IS_ERR(trans)) {
285 call = ERR_CAST(trans); 285 call = ERR_CAST(trans);
286 trans = NULL; 286 trans = NULL;
287 goto out; 287 goto out_notrans;
288 } 288 }
289 } else { 289 } else {
290 trans = rx->trans; 290 trans = rx->trans;
291 if (!trans) { 291 if (!trans) {
292 call = ERR_PTR(-ENOTCONN); 292 call = ERR_PTR(-ENOTCONN);
293 goto out; 293 goto out_notrans;
294 } 294 }
295 atomic_inc(&trans->usage); 295 atomic_inc(&trans->usage);
296 } 296 }
@@ -315,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
315 rxrpc_put_bundle(trans, bundle); 315 rxrpc_put_bundle(trans, bundle);
316out: 316out:
317 rxrpc_put_transport(trans); 317 rxrpc_put_transport(trans);
318out_notrans:
318 release_sock(&rx->sk); 319 release_sock(&rx->sk);
319 _leave(" = %p", call); 320 _leave(" = %p", call);
320 return call; 321 return call;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index e17836680f49..0a1798eafb0b 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1767,7 +1767,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1767 AFMT_S8 | AFMT_U16_LE | 1767 AFMT_S8 | AFMT_U16_LE |
1768 AFMT_U16_BE | 1768 AFMT_U16_BE |
1769 AFMT_S32_LE | AFMT_S32_BE | 1769 AFMT_S32_LE | AFMT_S32_BE |
1770 AFMT_S24_LE | AFMT_S24_LE | 1770 AFMT_S24_LE | AFMT_S24_BE |
1771 AFMT_S24_PACKED; 1771 AFMT_S24_PACKED;
1772 params = kmalloc(sizeof(*params), GFP_KERNEL); 1772 params = kmalloc(sizeof(*params), GFP_KERNEL);
1773 if (!params) 1773 if (!params)
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index b7bba7dc7cf1..0b708134d12f 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -487,7 +487,6 @@ int /*__devinit*/ snd_hda_bus_new(struct snd_card *card,
487{ 487{
488 struct hda_bus *bus; 488 struct hda_bus *bus;
489 int err; 489 int err;
490 char qname[8];
491 static struct snd_device_ops dev_ops = { 490 static struct snd_device_ops dev_ops = {
492 .dev_register = snd_hda_bus_dev_register, 491 .dev_register = snd_hda_bus_dev_register,
493 .dev_free = snd_hda_bus_dev_free, 492 .dev_free = snd_hda_bus_dev_free,
@@ -517,10 +516,12 @@ int /*__devinit*/ snd_hda_bus_new(struct snd_card *card,
517 mutex_init(&bus->cmd_mutex); 516 mutex_init(&bus->cmd_mutex);
518 INIT_LIST_HEAD(&bus->codec_list); 517 INIT_LIST_HEAD(&bus->codec_list);
519 518
520 snprintf(qname, sizeof(qname), "hda%d", card->number); 519 snprintf(bus->workq_name, sizeof(bus->workq_name),
521 bus->workq = create_workqueue(qname); 520 "hd-audio%d", card->number);
521 bus->workq = create_singlethread_workqueue(bus->workq_name);
522 if (!bus->workq) { 522 if (!bus->workq) {
523 snd_printk(KERN_ERR "cannot create workqueue %s\n", qname); 523 snd_printk(KERN_ERR "cannot create workqueue %s\n",
524 bus->workq_name);
524 kfree(bus); 525 kfree(bus);
525 return -ENOMEM; 526 return -ENOMEM;
526 } 527 }
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 5810ef588402..09a332ada0c6 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -614,6 +614,7 @@ struct hda_bus {
614 614
615 /* unsolicited event queue */ 615 /* unsolicited event queue */
616 struct hda_bus_unsolicited *unsol; 616 struct hda_bus_unsolicited *unsol;
617 char workq_name[16];
617 struct workqueue_struct *workq; /* common workqueue for codecs */ 618 struct workqueue_struct *workq; /* common workqueue for codecs */
618 619
619 /* assigned PCMs */ 620 /* assigned PCMs */
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 7ca66d654148..144b85276d5a 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -399,7 +399,8 @@ static void print_conn_list(struct snd_info_buffer *buffer,
399{ 399{
400 int c, curr = -1; 400 int c, curr = -1;
401 401
402 if (conn_len > 1 && wid_type != AC_WID_AUD_MIX) 402 if (conn_len > 1 && wid_type != AC_WID_AUD_MIX &&
403 wid_type != AC_WID_VOL_KNB)
403 curr = snd_hda_codec_read(codec, nid, 0, 404 curr = snd_hda_codec_read(codec, nid, 0,
404 AC_VERB_GET_CONNECT_SEL, 0); 405 AC_VERB_GET_CONNECT_SEL, 0);
405 snd_iprintf(buffer, " Connection: %d\n", conn_len); 406 snd_iprintf(buffer, " Connection: %d\n", conn_len);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7884a4e07061..ae5c8a0d1479 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1037,6 +1037,7 @@ do_sku:
1037 case 0x10ec0267: 1037 case 0x10ec0267:
1038 case 0x10ec0268: 1038 case 0x10ec0268:
1039 case 0x10ec0269: 1039 case 0x10ec0269:
1040 case 0x10ec0272:
1040 case 0x10ec0660: 1041 case 0x10ec0660:
1041 case 0x10ec0662: 1042 case 0x10ec0662:
1042 case 0x10ec0663: 1043 case 0x10ec0663:
@@ -1065,6 +1066,7 @@ do_sku:
1065 case 0x10ec0882: 1066 case 0x10ec0882:
1066 case 0x10ec0883: 1067 case 0x10ec0883:
1067 case 0x10ec0885: 1068 case 0x10ec0885:
1069 case 0x10ec0887:
1068 case 0x10ec0889: 1070 case 0x10ec0889:
1069 snd_hda_codec_write(codec, 0x20, 0, 1071 snd_hda_codec_write(codec, 0x20, 0,
1070 AC_VERB_SET_COEF_INDEX, 7); 1072 AC_VERB_SET_COEF_INDEX, 7);
@@ -7012,6 +7014,7 @@ static int patch_alc882(struct hda_codec *codec)
7012 break; 7014 break;
7013 case 0x106b1000: /* iMac 24 */ 7015 case 0x106b1000: /* iMac 24 */
7014 case 0x106b2800: /* AppleTV */ 7016 case 0x106b2800: /* AppleTV */
7017 case 0x106b3e00: /* iMac 24 Aluminium */
7015 board_config = ALC885_IMAC24; 7018 board_config = ALC885_IMAC24;
7016 break; 7019 break;
7017 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */ 7020 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
@@ -8514,6 +8517,8 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8514 SND_PCI_QUIRK(0x1558, 0, "Clevo laptop", ALC883_LAPTOP_EAPD), 8517 SND_PCI_QUIRK(0x1558, 0, "Clevo laptop", ALC883_LAPTOP_EAPD),
8515 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch), 8518 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
8516 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION), 8519 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
8520 SND_PCI_QUIRK(0x1734, 0x1107, "FSC AMILO Xi2550",
8521 ALC883_FUJITSU_PI2515),
8517 SND_PCI_QUIRK(0x1734, 0x1108, "Fujitsu AMILO Pi2515", ALC883_FUJITSU_PI2515), 8522 SND_PCI_QUIRK(0x1734, 0x1108, "Fujitsu AMILO Pi2515", ALC883_FUJITSU_PI2515),
8518 SND_PCI_QUIRK(0x1734, 0x113d, "Fujitsu AMILO Xa3530", 8523 SND_PCI_QUIRK(0x1734, 0x113d, "Fujitsu AMILO Xa3530",
8519 ALC888_FUJITSU_XA3530), 8524 ALC888_FUJITSU_XA3530),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index b787b3cc096f..38428e22428f 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1804,6 +1804,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
1804 "HP dv4", STAC_HP_DV5), 1804 "HP dv4", STAC_HP_DV5),
1805 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc, 1805 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc,
1806 "HP dv7", STAC_HP_M4), 1806 "HP dv7", STAC_HP_M4),
1807 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3600,
1808 "HP dv5", STAC_HP_DV5),
1807 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603, 1809 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603,
1808 "HP dv5", STAC_HP_DV5), 1810 "HP dv5", STAC_HP_DV5),
1809 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a, 1811 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a,
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 19d3391e229f..e900cdc84849 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -617,7 +617,7 @@ static int snd_intel8x0_ali_codec_semaphore(struct intel8x0 *chip)
617 int time = 100; 617 int time = 100;
618 if (chip->buggy_semaphore) 618 if (chip->buggy_semaphore)
619 return 0; /* just ignore ... */ 619 return 0; /* just ignore ... */
620 while (time-- && (igetdword(chip, ICHREG(ALI_CAS)) & ALI_CAS_SEM_BUSY)) 620 while (--time && (igetdword(chip, ICHREG(ALI_CAS)) & ALI_CAS_SEM_BUSY))
621 udelay(1); 621 udelay(1);
622 if (! time && ! chip->in_ac97_init) 622 if (! time && ! chip->in_ac97_init)
623 snd_printk(KERN_WARNING "ali_codec_semaphore timeout\n"); 623 snd_printk(KERN_WARNING "ali_codec_semaphore timeout\n");
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index c5d67900d666..ff0054b76502 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -10,7 +10,7 @@
10 * Based on at91-ssc.c by 10 * Based on at91-ssc.c by
11 * Frank Mandarino <fmandarino@endrelia.com> 11 * Frank Mandarino <fmandarino@endrelia.com>
12 * Based on pxa2xx Platform drivers by 12 * Based on pxa2xx Platform drivers by
13 * Liam Girdwood <liam.girdwood@wolfsonmicro.com> 13 * Liam Girdwood <lrg@slimlogic.co.uk>
14 * 14 *
15 * This program is free software; you can redistribute it and/or modify 15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by 16 * it under the terms of the GNU General Public License as published by
diff --git a/sound/soc/atmel/atmel_ssc_dai.h b/sound/soc/atmel/atmel_ssc_dai.h
index a828746e8a2f..391135f9c6c1 100644
--- a/sound/soc/atmel/atmel_ssc_dai.h
+++ b/sound/soc/atmel/atmel_ssc_dai.h
@@ -10,7 +10,7 @@
10 * Based on at91-ssc.c by 10 * Based on at91-ssc.c by
11 * Frank Mandarino <fmandarino@endrelia.com> 11 * Frank Mandarino <fmandarino@endrelia.com>
12 * Based on pxa2xx Platform drivers by 12 * Based on pxa2xx Platform drivers by
13 * Liam Girdwood <liam.girdwood@wolfsonmicro.com> 13 * Liam Girdwood <lrg@slimlogic.co.uk>
14 * 14 *
15 * This program is free software; you can redistribute it and/or modify 15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by 16 * it under the terms of the GNU General Public License as published by
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index e3989d406f54..35d99750c383 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 5b5afc144478..1cbb7b9b51ce 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -2,8 +2,7 @@
2 * wm8990.c -- WM8990 ALSA Soc Audio driver 2 * wm8990.c -- WM8990 ALSA Soc Audio driver
3 * 3 *
4 * Copyright 2008 Wolfson Microelectronics PLC. 4 * Copyright 2008 Wolfson Microelectronics PLC.
5 * Author: Liam Girdwood 5 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
6 * lg@opensource.wolfsonmicro.com or linux@wolfsonmicro.com
7 * 6 *
8 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index b0362dfd5b71..dd3bb2933762 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -175,9 +175,10 @@ static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
175{ 175{
176 struct snd_pcm_runtime *runtime = substream->runtime; 176 struct snd_pcm_runtime *runtime = substream->runtime;
177 struct omap_runtime_data *prtd = runtime->private_data; 177 struct omap_runtime_data *prtd = runtime->private_data;
178 unsigned long flags;
178 int ret = 0; 179 int ret = 0;
179 180
180 spin_lock_irq(&prtd->lock); 181 spin_lock_irqsave(&prtd->lock, flags);
181 switch (cmd) { 182 switch (cmd) {
182 case SNDRV_PCM_TRIGGER_START: 183 case SNDRV_PCM_TRIGGER_START:
183 case SNDRV_PCM_TRIGGER_RESUME: 184 case SNDRV_PCM_TRIGGER_RESUME:
@@ -195,7 +196,7 @@ static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
195 default: 196 default:
196 ret = -EINVAL; 197 ret = -EINVAL;
197 } 198 }
198 spin_unlock_irq(&prtd->lock); 199 spin_unlock_irqrestore(&prtd->lock, flags);
199 200
200 return ret; 201 return ret;
201} 202}
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index c709b9563226..2ab83129d9b0 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -2966,6 +2966,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
2966 return -EINVAL; 2966 return -EINVAL;
2967 } 2967 }
2968 alts = &iface->altsetting[fp->altset_idx]; 2968 alts = &iface->altsetting[fp->altset_idx];
2969 fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
2969 usb_set_interface(chip->dev, fp->iface, 0); 2970 usb_set_interface(chip->dev, fp->iface, 0);
2970 init_usb_pitch(chip->dev, fp->iface, alts, fp); 2971 init_usb_pitch(chip->dev, fp->iface, alts, fp);
2971 init_usb_sample_rate(chip->dev, fp->iface, alts, fp, fp->rate_max); 2972 init_usb_sample_rate(chip->dev, fp->iface, alts, fp, fp->rate_max);