aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/device-mapper/dm-raid.txt44
-rw-r--r--Documentation/hwmon/adm12752
-rw-r--r--Documentation/hwmon/adt741011
-rw-r--r--Documentation/hwmon/jc422
-rw-r--r--Documentation/hwmon/lineage-pem2
-rw-r--r--Documentation/hwmon/lm250662
-rw-r--r--Documentation/hwmon/ltc29786
-rw-r--r--Documentation/hwmon/ltc42612
-rw-r--r--Documentation/hwmon/max160642
-rw-r--r--Documentation/hwmon/max160652
-rw-r--r--Documentation/hwmon/max344402
-rw-r--r--Documentation/hwmon/max86882
-rw-r--r--Documentation/hwmon/pmbus2
-rw-r--r--Documentation/hwmon/smm6652
-rw-r--r--Documentation/hwmon/ucd90002
-rw-r--r--Documentation/hwmon/ucd92002
-rw-r--r--Documentation/hwmon/zl61002
-rw-r--r--Documentation/power/opp.txt25
-rw-r--r--MAINTAINERS18
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/include/asm/mmu.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h34
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/head.S26
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/smp_tlb.c12
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/suspend.c1
-rw-r--r--arch/arm/lib/memset.S85
-rw-r--r--arch/arm/mach-netx/generic.c2
-rw-r--r--arch/arm/mach-netx/include/mach/irqs.h64
-rw-r--r--arch/arm/mm/context.c29
-rw-r--r--arch/arm/mm/idmap.c1
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/metag/include/asm/elf.h3
-rw-r--r--arch/metag/mm/Kconfig1
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S4
-rw-r--r--arch/powerpc/include/asm/bitops.h2
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c5
-rw-r--r--arch/x86/include/asm/bootparam_utils.h20
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--drivers/acpi/glue.c55
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c217
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c1
-rw-r--r--drivers/bcma/driver_pci_host.c2
-rw-r--r--drivers/char/hw_random/core.c19
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/connector/cn_proc.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c42
-rw-r--r--drivers/firmware/efivars.c130
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpiolib.c143
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c25
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c173
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/hid/hid-logitech-dj.c22
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c30
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/iommu/dmar.c1
-rw-r--r--drivers/isdn/hisax/st5481_usb.c12
-rw-r--r--drivers/mailbox/pl320-ipc.c3
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-raid.c123
-rw-r--r--drivers/md/md.c19
-rw-r--r--drivers/md/raid0.c13
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c97
-rw-r--r--drivers/md/raid10.h5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c64
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/ethernet/freescale/fec.c85
-rw-r--r--drivers/net/ethernet/freescale/fec.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/usb/Kconfig18
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_devices.c31
-rw-r--r--drivers/net/usb/ax88179_178a.c1448
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c104
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c75
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/pnp/pnpacpi/core.c8
-rw-r--r--drivers/regulator/core.c12
-rw-r--r--drivers/regulator/db8500-prcmu.c4
-rw-r--r--drivers/regulator/palmas-regulator.c3
-rw-r--r--drivers/regulator/twl-regulator.c9
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/tty/hvc/hvcs.c9
-rw-r--r--drivers/usb/core/usb-acpi.c9
-rw-r--r--fs/ecryptfs/Kconfig8
-rw-r--r--fs/ecryptfs/Makefile7
-rw-r--r--fs/ecryptfs/crypto.c9
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h40
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/ecryptfs/messaging.c5
-rw-r--r--fs/namei.c2
-rw-r--r--include/acpi/acpi_bus.h6
-rw-r--r--include/drm/drm_crtc.h6
-rw-r--r--include/linux/ecryptfs.h12
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h4
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/net/tcp.h4
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c21
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/irda/iriap.c7
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/tx.c77
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/rds/message.c8
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/tsnmap.c13
-rw-r--r--net/sctp/ulpqueue.c87
-rw-r--r--net/wireless/nl80211.c61
-rw-r--r--sound/core/seq/oss/seq_oss_event.c14
-rw-r--r--sound/core/vmaster.c5
-rw-r--r--sound/pci/hda/hda_codec.c11
-rw-r--r--sound/pci/hda/patch_ca0132.c8
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/soc/codecs/wm5102.c15
-rw-r--r--sound/soc/codecs/wm5110.c16
-rw-r--r--sound/soc/codecs/wm8350.c4
-rw-r--r--sound/soc/codecs/wm8960.c8
-rw-r--r--sound/soc/tegra/tegra20_i2s.h2
-rw-r--r--sound/soc/tegra/tegra30_i2s.h2
-rw-r--r--tools/testing/selftests/efivarfs/efivarfs.sh59
211 files changed, 3554 insertions, 1127 deletions
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 56fb62b09fc5..b428556197c9 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
30 raid10 Various RAID10 inspired algorithms chosen by additional params 30 raid10 Various RAID10 inspired algorithms chosen by additional params
31 - RAID10: Striped Mirrors (aka 'Striping on top of mirrors') 31 - RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
32 - RAID1E: Integrated Adjacent Stripe Mirroring 32 - RAID1E: Integrated Adjacent Stripe Mirroring
33 - RAID1E: Integrated Offset Stripe Mirroring
33 - and other similar RAID10 variants 34 - and other similar RAID10 variants
34 35
35 Reference: Chapter 4 of 36 Reference: Chapter 4 of
@@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
64 synchronisation state for each region. 65 synchronisation state for each region.
65 66
66 [raid10_copies <# copies>] 67 [raid10_copies <# copies>]
67 [raid10_format near] 68 [raid10_format <near|far|offset>]
68 These two options are used to alter the default layout of 69 These two options are used to alter the default layout of
69 a RAID10 configuration. The number of copies is can be 70 a RAID10 configuration. The number of copies is can be
70 specified, but the default is 2. There are other variations 71 specified, but the default is 2. There are also three
71 to how the copies are laid down - the default and only current 72 variations to how the copies are laid down - the default
72 option is "near". Near copies are what most people think of 73 is "near". Near copies are what most people think of with
73 with respect to mirroring. If these options are left 74 respect to mirroring. If these options are left unspecified,
74 unspecified, or 'raid10_copies 2' and/or 'raid10_format near' 75 or 'raid10_copies 2' and/or 'raid10_format near' are given,
75 are given, then the layouts for 2, 3 and 4 devices are: 76 then the layouts for 2, 3 and 4 devices are:
76 2 drives 3 drives 4 drives 77 2 drives 3 drives 4 drives
77 -------- ---------- -------------- 78 -------- ---------- --------------
78 A1 A1 A1 A1 A2 A1 A1 A2 A2 79 A1 A1 A1 A1 A2 A1 A1 A2 A2
@@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
85 3-device layout is what might be called a 'RAID1E - Integrated 86 3-device layout is what might be called a 'RAID1E - Integrated
86 Adjacent Stripe Mirroring'. 87 Adjacent Stripe Mirroring'.
87 88
89 If 'raid10_copies 2' and 'raid10_format far', then the layouts
90 for 2, 3 and 4 devices are:
91 2 drives 3 drives 4 drives
92 -------- -------------- --------------------
93 A1 A2 A1 A2 A3 A1 A2 A3 A4
94 A3 A4 A4 A5 A6 A5 A6 A7 A8
95 A5 A6 A7 A8 A9 A9 A10 A11 A12
96 .. .. .. .. .. .. .. .. ..
97 A2 A1 A3 A1 A2 A2 A1 A4 A3
98 A4 A3 A6 A4 A5 A6 A5 A8 A7
99 A6 A5 A9 A7 A8 A10 A9 A12 A11
100 .. .. .. .. .. .. .. .. ..
101
102 If 'raid10_copies 2' and 'raid10_format offset', then the
103 layouts for 2, 3 and 4 devices are:
104 2 drives 3 drives 4 drives
105 -------- ------------ -----------------
106 A1 A2 A1 A2 A3 A1 A2 A3 A4
107 A2 A1 A3 A1 A2 A2 A1 A4 A3
108 A3 A4 A4 A5 A6 A5 A6 A7 A8
109 A4 A3 A6 A4 A5 A6 A5 A8 A7
110 A5 A6 A7 A8 A9 A9 A10 A11 A12
111 A6 A5 A9 A7 A8 A10 A9 A12 A11
112 .. .. .. .. .. .. .. .. ..
113 Here we see layouts closely akin to 'RAID1E - Integrated
114 Offset Stripe Mirroring'.
115
88<#raid_devs>: The number of devices composing the array. 116<#raid_devs>: The number of devices composing the array.
89 Each device consists of two entries. The first is the device 117 Each device consists of two entries. The first is the device
90 containing the metadata (if any); the second is the one containing the 118 containing the metadata (if any); the second is the one containing the
@@ -142,3 +170,5 @@ Version History
1421.3.0 Added support for RAID 10 1701.3.0 Added support for RAID 10
1431.3.1 Allow device replacement/rebuild for RAID 10 1711.3.1 Allow device replacement/rebuild for RAID 10
1441.3.2 Fix/improve redundancy checking for RAID10 1721.3.2 Fix/improve redundancy checking for RAID10
1731.4.0 Non-functional change. Removes arg from mapping function.
1741.4.1 Add RAID10 "far" and "offset" algorithm support.
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275
index 2cfa25667123..15b4a20d5062 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275
@@ -15,7 +15,7 @@ Supported chips:
15 Addresses scanned: - 15 Addresses scanned: -
16 Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf 16 Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
17 17
18Author: Guenter Roeck <guenter.roeck@ericsson.com> 18Author: Guenter Roeck <linux@roeck-us.net>
19 19
20 20
21Description 21Description
diff --git a/Documentation/hwmon/adt7410 b/Documentation/hwmon/adt7410
index 96004000dc2a..58150c480e56 100644
--- a/Documentation/hwmon/adt7410
+++ b/Documentation/hwmon/adt7410
@@ -4,9 +4,14 @@ Kernel driver adt7410
4Supported chips: 4Supported chips:
5 * Analog Devices ADT7410 5 * Analog Devices ADT7410
6 Prefix: 'adt7410' 6 Prefix: 'adt7410'
7 Addresses scanned: I2C 0x48 - 0x4B 7 Addresses scanned: None
8 Datasheet: Publicly available at the Analog Devices website 8 Datasheet: Publicly available at the Analog Devices website
9 http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf 9 http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
10 * Analog Devices ADT7420
11 Prefix: 'adt7420'
12 Addresses scanned: None
13 Datasheet: Publicly available at the Analog Devices website
14 http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
10 15
11Author: Hartmut Knaack <knaack.h@gmx.de> 16Author: Hartmut Knaack <knaack.h@gmx.de>
12 17
@@ -27,6 +32,10 @@ value per second or even justget one sample on demand for power saving.
27Besides, it can completely power down its ADC, if power management is 32Besides, it can completely power down its ADC, if power management is
28required. 33required.
29 34
35The ADT7420 is register compatible, the only differences being the package,
36a slightly narrower operating temperature range (-40°C to +150°C), and a
37better accuracy (0.25°C instead of 0.50°C.)
38
30Configuration Notes 39Configuration Notes
31------------------- 40-------------------
32 41
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index 165077121238..868d74d6b773 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -49,7 +49,7 @@ Supported chips:
49 Addresses scanned: I2C 0x18 - 0x1f 49 Addresses scanned: I2C 0x18 - 0x1f
50 50
51Author: 51Author:
52 Guenter Roeck <guenter.roeck@ericsson.com> 52 Guenter Roeck <linux@roeck-us.net>
53 53
54 54
55Description 55Description
diff --git a/Documentation/hwmon/lineage-pem b/Documentation/hwmon/lineage-pem
index 2ba5ed126858..83b2ddc160c8 100644
--- a/Documentation/hwmon/lineage-pem
+++ b/Documentation/hwmon/lineage-pem
@@ -8,7 +8,7 @@ Supported devices:
8 Documentation: 8 Documentation:
9 http://www.lineagepower.com/oem/pdf/CPLI2C.pdf 9 http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
10 10
11Author: Guenter Roeck <guenter.roeck@ericsson.com> 11Author: Guenter Roeck <linux@roeck-us.net>
12 12
13 13
14Description 14Description
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index a21db81c4591..26025e419d35 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -19,7 +19,7 @@ Supported chips:
19 Datasheet: 19 Datasheet:
20 http://www.national.com/pf/LM/LM5066.html 20 http://www.national.com/pf/LM/LM5066.html
21 21
22Author: Guenter Roeck <guenter.roeck@ericsson.com> 22Author: Guenter Roeck <linux@roeck-us.net>
23 23
24 24
25Description 25Description
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978
index c365f9beb5dd..e4d75c606c97 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978
@@ -5,13 +5,13 @@ Supported chips:
5 * Linear Technology LTC2978 5 * Linear Technology LTC2978
6 Prefix: 'ltc2978' 6 Prefix: 'ltc2978'
7 Addresses scanned: - 7 Addresses scanned: -
8 Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf 8 Datasheet: http://www.linear.com/product/ltc2978
9 * Linear Technology LTC3880 9 * Linear Technology LTC3880
10 Prefix: 'ltc3880' 10 Prefix: 'ltc3880'
11 Addresses scanned: - 11 Addresses scanned: -
12 Datasheet: http://cds.linear.com/docs/Datasheet/3880f.pdf 12 Datasheet: http://www.linear.com/product/ltc3880
13 13
14Author: Guenter Roeck <guenter.roeck@ericsson.com> 14Author: Guenter Roeck <linux@roeck-us.net>
15 15
16 16
17Description 17Description
diff --git a/Documentation/hwmon/ltc4261 b/Documentation/hwmon/ltc4261
index eba2e2c4b94d..9378a75c6134 100644
--- a/Documentation/hwmon/ltc4261
+++ b/Documentation/hwmon/ltc4261
@@ -8,7 +8,7 @@ Supported chips:
8 Datasheet: 8 Datasheet:
9 http://cds.linear.com/docs/Datasheet/42612fb.pdf 9 http://cds.linear.com/docs/Datasheet/42612fb.pdf
10 10
11Author: Guenter Roeck <guenter.roeck@ericsson.com> 11Author: Guenter Roeck <linux@roeck-us.net>
12 12
13 13
14Description 14Description
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
index f8b478076f6d..d59cc7829bec 100644
--- a/Documentation/hwmon/max16064
+++ b/Documentation/hwmon/max16064
@@ -7,7 +7,7 @@ Supported chips:
7 Addresses scanned: - 7 Addresses scanned: -
8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf 8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
9 9
10Author: Guenter Roeck <guenter.roeck@ericsson.com> 10Author: Guenter Roeck <linux@roeck-us.net>
11 11
12 12
13Description 13Description
diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065
index c11f64a1f2ad..208a29e43010 100644
--- a/Documentation/hwmon/max16065
+++ b/Documentation/hwmon/max16065
@@ -24,7 +24,7 @@ Supported chips:
24 http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf 24 http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
25 25
26 26
27Author: Guenter Roeck <guenter.roeck@ericsson.com> 27Author: Guenter Roeck <linux@roeck-us.net>
28 28
29 29
30Description 30Description
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index 47651ff341ae..37cbf472a19d 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -27,7 +27,7 @@ Supported chips:
27 Addresses scanned: - 27 Addresses scanned: -
28 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf 28 Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
29 29
30Author: Guenter Roeck <guenter.roeck@ericsson.com> 30Author: Guenter Roeck <linux@roeck-us.net>
31 31
32 32
33Description 33Description
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
index fe849871df32..e78078638b91 100644
--- a/Documentation/hwmon/max8688
+++ b/Documentation/hwmon/max8688
@@ -7,7 +7,7 @@ Supported chips:
7 Addresses scanned: - 7 Addresses scanned: -
8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf 8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
9 9
10Author: Guenter Roeck <guenter.roeck@ericsson.com> 10Author: Guenter Roeck <linux@roeck-us.net>
11 11
12 12
13Description 13Description
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index 3d3a0f97f966..cf756ed48ff9 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -34,7 +34,7 @@ Supported chips:
34 Addresses scanned: - 34 Addresses scanned: -
35 Datasheet: n.a. 35 Datasheet: n.a.
36 36
37Author: Guenter Roeck <guenter.roeck@ericsson.com> 37Author: Guenter Roeck <linux@roeck-us.net>
38 38
39 39
40Description 40Description
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665
index 59e316140542..a341eeedab75 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665
@@ -29,7 +29,7 @@ Supported chips:
29 http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf 29 http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
30 http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf 30 http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
31 31
32Author: Guenter Roeck <guenter.roeck@ericsson.com> 32Author: Guenter Roeck <linux@roeck-us.net>
33 33
34 34
35Module Parameters 35Module Parameters
diff --git a/Documentation/hwmon/ucd9000 b/Documentation/hwmon/ucd9000
index 0df5f276505b..805e33edb978 100644
--- a/Documentation/hwmon/ucd9000
+++ b/Documentation/hwmon/ucd9000
@@ -11,7 +11,7 @@ Supported chips:
11 http://focus.ti.com/lit/ds/symlink/ucd9090.pdf 11 http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
12 http://focus.ti.com/lit/ds/symlink/ucd90910.pdf 12 http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
13 13
14Author: Guenter Roeck <guenter.roeck@ericsson.com> 14Author: Guenter Roeck <linux@roeck-us.net>
15 15
16 16
17Description 17Description
diff --git a/Documentation/hwmon/ucd9200 b/Documentation/hwmon/ucd9200
index fd7d07b1908a..1e8060e631bd 100644
--- a/Documentation/hwmon/ucd9200
+++ b/Documentation/hwmon/ucd9200
@@ -15,7 +15,7 @@ Supported chips:
15 http://focus.ti.com/lit/ds/symlink/ucd9246.pdf 15 http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
16 http://focus.ti.com/lit/ds/symlink/ucd9248.pdf 16 http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
17 17
18Author: Guenter Roeck <guenter.roeck@ericsson.com> 18Author: Guenter Roeck <linux@roeck-us.net>
19 19
20 20
21Description 21Description
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
index 3d924b6b59e9..756b57c6b73e 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100
@@ -54,7 +54,7 @@ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
54http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256 54http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
55 55
56 56
57Author: Guenter Roeck <guenter.roeck@ericsson.com> 57Author: Guenter Roeck <linux@roeck-us.net>
58 58
59 59
60Description 60Description
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 3035d00757ad..425c51d56aef 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -1,6 +1,5 @@
1*=============* 1Operating Performance Points (OPP) Library
2* OPP Library * 2==========================================
3*=============*
4 3
5(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated 4(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
6 5
@@ -16,15 +15,31 @@ Contents
16 15
171. Introduction 161. Introduction
18=============== 17===============
181.1 What is an Operating Performance Point (OPP)?
19
19Complex SoCs of today consists of a multiple sub-modules working in conjunction. 20Complex SoCs of today consists of a multiple sub-modules working in conjunction.
20In an operational system executing varied use cases, not all modules in the SoC 21In an operational system executing varied use cases, not all modules in the SoC
21need to function at their highest performing frequency all the time. To 22need to function at their highest performing frequency all the time. To
22facilitate this, sub-modules in a SoC are grouped into domains, allowing some 23facilitate this, sub-modules in a SoC are grouped into domains, allowing some
23domains to run at lower voltage and frequency while other domains are loaded 24domains to run at lower voltage and frequency while other domains run at
24more. The set of discrete tuples consisting of frequency and voltage pairs that 25voltage/frequency pairs that are higher.
26
27The set of discrete tuples consisting of frequency and voltage pairs that
25the device will support per domain are called Operating Performance Points or 28the device will support per domain are called Operating Performance Points or
26OPPs. 29OPPs.
27 30
31As an example:
32Let us consider an MPU device which supports the following:
33{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
34{1GHz at minimum voltage of 1.3V}
35
36We can represent these as three OPPs as the following {Hz, uV} tuples:
37{300000000, 1000000}
38{800000000, 1200000}
39{1000000000, 1300000}
40
411.2 Operating Performance Points Library
42
28OPP library provides a set of helper functions to organize and query the OPP 43OPP library provides a set of helper functions to organize and query the OPP
29information. The library is located in drivers/base/power/opp.c and the header 44information. The library is located in drivers/base/power/opp.c and the header
30is located in include/linux/opp.h. OPP library can be enabled by enabling 45is located in include/linux/opp.h. OPP library can be enabled by enabling
diff --git a/MAINTAINERS b/MAINTAINERS
index e95b1e944eb7..95616582c728 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
114 114
115 ----------------------------------- 115 -----------------------------------
116 116
1173C505 NETWORK DRIVER
118M: Philip Blundell <philb@gnu.org>
119L: netdev@vger.kernel.org
120S: Maintained
121F: drivers/net/ethernet/i825xx/3c505*
122
1233C59X NETWORK DRIVER 1173C59X NETWORK DRIVER
124M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de> 118M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
125L: netdev@vger.kernel.org 119L: netdev@vger.kernel.org
@@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
2361S: Maintained 2355S: Maintained
2362F: drivers/video/cyber2000fb.* 2356F: drivers/video/cyber2000fb.*
2363 2357
2364CYCLADES 2X SYNC CARD DRIVER
2365M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
2366W: http://oops.ghostprotocols.net:81/blog
2367S: Maintained
2368F: drivers/net/wan/cycx*
2369
2370CYCLADES ASYNC MUX DRIVER 2358CYCLADES ASYNC MUX DRIVER
2371W: http://www.cyclades.com/ 2359W: http://www.cyclades.com/
2372S: Orphan 2360S: Orphan
@@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
3067F: drivers/video/s1d13xxxfb.c 3055F: drivers/video/s1d13xxxfb.c
3068F: include/video/s1d13xxxfb.h 3056F: include/video/s1d13xxxfb.h
3069 3057
3070ETHEREXPRESS-16 NETWORK DRIVER
3071M: Philip Blundell <philb@gnu.org>
3072L: netdev@vger.kernel.org
3073S: Maintained
3074F: drivers/net/ethernet/i825xx/eexpress.*
3075
3076ETHERNET BRIDGE 3058ETHERNET BRIDGE
3077M: Stephen Hemminger <stephen@networkplumber.org> 3059M: Stephen Hemminger <stephen@networkplumber.org>
3078L: bridge@lists.linux-foundation.org 3060L: bridge@lists.linux-foundation.org
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 5cad8a6dadb0..afed28e37ea5 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
120KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 120KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
121endif 121endif
122 122
123ccflags-y := -fpic -fno-builtin -I$(obj) 123ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
124asflags-y := -Wa,-march=all -DZIMAGE 124asflags-y := -Wa,-march=all -DZIMAGE
125 125
126# Supply kernel BSS size to the decompressor via a linker symbol. 126# Supply kernel BSS size to the decompressor via a linker symbol.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3b..e3d55547e755 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,15 @@
5 5
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 u64 id; 8 atomic64_t id;
9#endif 9#endif
10 unsigned int vmalloc_seq; 10 unsigned int vmalloc_seq;
11} mm_context_t; 11} mm_context_t;
12 12
13#ifdef CONFIG_CPU_HAS_ASID 13#ifdef CONFIG_CPU_HAS_ASID
14#define ASID_BITS 8 14#define ASID_BITS 8
15#define ASID_MASK ((~0ULL) << ASID_BITS) 15#define ASID_MASK ((~0ULL) << ASID_BITS)
16#define ASID(mm) ((mm)->context.id & ~ASID_MASK) 16#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
17#else 17#else
18#define ASID(mm) (0) 18#define ASID(mm) (0)
19#endif 19#endif
@@ -26,7 +26,7 @@ typedef struct {
26 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> 26 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
27 */ 27 */
28typedef struct { 28typedef struct {
29 unsigned long end_brk; 29 unsigned long end_brk;
30} mm_context_t; 30} mm_context_t;
31 31
32#endif 32#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc5..863a6611323c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) 28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
29 29
30#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
31 31
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77eb..4db8c8820f0d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,10 +34,13 @@
34#define TLB_V6_D_ASID (1 << 17) 34#define TLB_V6_D_ASID (1 << 17)
35#define TLB_V6_I_ASID (1 << 18) 35#define TLB_V6_I_ASID (1 << 18)
36 36
37#define TLB_V6_BP (1 << 19)
38
37/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ 39/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
38#define TLB_V7_UIS_PAGE (1 << 19) 40#define TLB_V7_UIS_PAGE (1 << 20)
39#define TLB_V7_UIS_FULL (1 << 20) 41#define TLB_V7_UIS_FULL (1 << 21)
40#define TLB_V7_UIS_ASID (1 << 21) 42#define TLB_V7_UIS_ASID (1 << 22)
43#define TLB_V7_UIS_BP (1 << 23)
41 44
42#define TLB_BARRIER (1 << 28) 45#define TLB_BARRIER (1 << 28)
43#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ 46#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
@@ -150,7 +153,8 @@
150#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 153#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
151 TLB_V6_I_FULL | TLB_V6_D_FULL | \ 154 TLB_V6_I_FULL | TLB_V6_D_FULL | \
152 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ 155 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
153 TLB_V6_I_ASID | TLB_V6_D_ASID) 156 TLB_V6_I_ASID | TLB_V6_D_ASID | \
157 TLB_V6_BP)
154 158
155#ifdef CONFIG_CPU_TLB_V6 159#ifdef CONFIG_CPU_TLB_V6
156# define v6wbi_possible_flags v6wbi_tlb_flags 160# define v6wbi_possible_flags v6wbi_tlb_flags
@@ -166,9 +170,11 @@
166#endif 170#endif
167 171
168#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 172#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
169 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 173 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
174 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
170#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 175#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
171 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 176 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
177 TLB_V6_U_ASID | TLB_V6_BP)
172 178
173#ifdef CONFIG_CPU_TLB_V7 179#ifdef CONFIG_CPU_TLB_V7
174 180
@@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
430 } 436 }
431} 437}
432 438
439static inline void local_flush_bp_all(void)
440{
441 const int zero = 0;
442 const unsigned int __tlb_flag = __cpu_tlb_flags;
443
444 if (tlb_flag(TLB_V7_UIS_BP))
445 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
446 else if (tlb_flag(TLB_V6_BP))
447 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
448
449 if (tlb_flag(TLB_BARRIER))
450 isb();
451}
452
433/* 453/*
434 * flush_pmd_entry 454 * flush_pmd_entry
435 * 455 *
@@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
480#define flush_tlb_kernel_page local_flush_tlb_kernel_page 500#define flush_tlb_kernel_page local_flush_tlb_kernel_page
481#define flush_tlb_range local_flush_tlb_range 501#define flush_tlb_range local_flush_tlb_range
482#define flush_tlb_kernel_range local_flush_tlb_kernel_range 502#define flush_tlb_kernel_range local_flush_tlb_kernel_range
503#define flush_bp_all local_flush_bp_all
483#else 504#else
484extern void flush_tlb_all(void); 505extern void flush_tlb_all(void);
485extern void flush_tlb_mm(struct mm_struct *mm); 506extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
487extern void flush_tlb_kernel_page(unsigned long kaddr); 508extern void flush_tlb_kernel_page(unsigned long kaddr);
488extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 509extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
489extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 510extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
511extern void flush_bp_all(void);
490#endif 512#endif
491 513
492/* 514/*
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 4da7cde70b5d..af33b44990ed 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -404,7 +404,7 @@
404#define __NR_setns (__NR_SYSCALL_BASE+375) 404#define __NR_setns (__NR_SYSCALL_BASE+375)
405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) 405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) 406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
407 /* 378 for kcmp */ 407#define __NR_kcmp (__NR_SYSCALL_BASE+378)
408#define __NR_finit_module (__NR_SYSCALL_BASE+379) 408#define __NR_finit_module (__NR_SYSCALL_BASE+379)
409 409
410/* 410/*
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 5ce738b43508..923eec7105cf 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -110,7 +110,7 @@ int main(void)
110 BLANK(); 110 BLANK();
111#endif 111#endif
112#ifdef CONFIG_CPU_HAS_ASID 112#ifdef CONFIG_CPU_HAS_ASID
113 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 113 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
114 BLANK(); 114 BLANK();
115#endif 115#endif
116 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); 116 DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 0cc57611fc4f..c6ca7e376773 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -387,7 +387,7 @@
387/* 375 */ CALL(sys_setns) 387/* 375 */ CALL(sys_setns)
388 CALL(sys_process_vm_readv) 388 CALL(sys_process_vm_readv)
389 CALL(sys_process_vm_writev) 389 CALL(sys_process_vm_writev)
390 CALL(sys_ni_syscall) /* reserved for sys_kcmp */ 390 CALL(sys_kcmp)
391 CALL(sys_finit_module) 391 CALL(sys_finit_module)
392#ifndef syscalls_counted 392#ifndef syscalls_counted
393.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 393.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 486a15ae9011..e0eb9a1cae77 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -184,13 +184,22 @@ __create_page_tables:
184 orr r3, r3, #3 @ PGD block type 184 orr r3, r3, #3 @ PGD block type
185 mov r6, #4 @ PTRS_PER_PGD 185 mov r6, #4 @ PTRS_PER_PGD
186 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 186 mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1871: str r3, [r0], #4 @ set bottom PGD entry bits 1871:
188#ifdef CONFIG_CPU_ENDIAN_BE8
188 str r7, [r0], #4 @ set top PGD entry bits 189 str r7, [r0], #4 @ set top PGD entry bits
190 str r3, [r0], #4 @ set bottom PGD entry bits
191#else
192 str r3, [r0], #4 @ set bottom PGD entry bits
193 str r7, [r0], #4 @ set top PGD entry bits
194#endif
189 add r3, r3, #0x1000 @ next PMD table 195 add r3, r3, #0x1000 @ next PMD table
190 subs r6, r6, #1 196 subs r6, r6, #1
191 bne 1b 197 bne 1b
192 198
193 add r4, r4, #0x1000 @ point to the PMD tables 199 add r4, r4, #0x1000 @ point to the PMD tables
200#ifdef CONFIG_CPU_ENDIAN_BE8
201 add r4, r4, #4 @ we only write the bottom word
202#endif
194#endif 203#endif
195 204
196 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 205 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
@@ -258,6 +267,11 @@ __create_page_tables:
258 addne r6, r6, #1 << SECTION_SHIFT 267 addne r6, r6, #1 << SECTION_SHIFT
259 strne r6, [r3] 268 strne r6, [r3]
260 269
270#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
271 sub r4, r4, #4 @ Fixup page table pointer
272 @ for 64-bit descriptors
273#endif
274
261#ifdef CONFIG_DEBUG_LL 275#ifdef CONFIG_DEBUG_LL
262#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) 276#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
263 /* 277 /*
@@ -276,13 +290,17 @@ __create_page_tables:
276 orr r3, r7, r3, lsl #SECTION_SHIFT 290 orr r3, r7, r3, lsl #SECTION_SHIFT
277#ifdef CONFIG_ARM_LPAE 291#ifdef CONFIG_ARM_LPAE
278 mov r7, #1 << (54 - 32) @ XN 292 mov r7, #1 << (54 - 32) @ XN
293#ifdef CONFIG_CPU_ENDIAN_BE8
294 str r7, [r0], #4
295 str r3, [r0], #4
279#else 296#else
280 orr r3, r3, #PMD_SECT_XN
281#endif
282 str r3, [r0], #4 297 str r3, [r0], #4
283#ifdef CONFIG_ARM_LPAE
284 str r7, [r0], #4 298 str r7, [r0], #4
285#endif 299#endif
300#else
301 orr r3, r3, #PMD_SECT_XN
302 str r3, [r0], #4
303#endif
286 304
287#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ 305#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
288 /* we don't need any serial debugging mappings */ 306 /* we don't need any serial debugging mappings */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5eae53e7a2e1..96093b75ab90 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1023,7 +1023,7 @@ out_mdbgen:
1023static int __cpuinit dbg_reset_notify(struct notifier_block *self, 1023static int __cpuinit dbg_reset_notify(struct notifier_block *self,
1024 unsigned long action, void *cpu) 1024 unsigned long action, void *cpu)
1025{ 1025{
1026 if (action == CPU_ONLINE) 1026 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
1027 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 1027 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
1028 1028
1029 return NOTIFY_OK; 1029 return NOTIFY_OK;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 31e0eb353cd8..146157dfe27c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event)
400 } 400 }
401 401
402 if (event->group_leader != event) { 402 if (event->group_leader != event) {
403 if (validate_group(event) != 0); 403 if (validate_group(event) != 0)
404 return -EINVAL; 404 return -EINVAL;
405 } 405 }
406 406
@@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
484 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) 484 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
485}; 485};
486 486
487static void __init armpmu_init(struct arm_pmu *armpmu) 487static void armpmu_init(struct arm_pmu *armpmu)
488{ 488{
489 atomic_set(&armpmu->active_events, 0); 489 atomic_set(&armpmu->active_events, 0);
490 mutex_init(&armpmu->reserve_mutex); 490 mutex_init(&armpmu->reserve_mutex);
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8c79a9e70b83..039cffb053a7 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
774/* 774/*
775 * PMXEVTYPER: Event selection reg 775 * PMXEVTYPER: Event selection reg
776 */ 776 */
777#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ 777#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
778#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ 778#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
779 779
780/* 780/*
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1bdfd87c8e41..31644f1978d5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
285 * switch away from it before attempting any exclusive accesses. 285 * switch away from it before attempting any exclusive accesses.
286 */ 286 */
287 cpu_switch_mm(mm->pgd, mm); 287 cpu_switch_mm(mm->pgd, mm);
288 local_flush_bp_all();
288 enter_lazy_tlb(mm, current); 289 enter_lazy_tlb(mm, current);
289 local_flush_tlb_all(); 290 local_flush_tlb_all();
290 291
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 02c5d2ce23bf..bd0300531399 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
64 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 64 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
65} 65}
66 66
67static inline void ipi_flush_bp_all(void *ignored)
68{
69 local_flush_bp_all();
70}
71
67void flush_tlb_all(void) 72void flush_tlb_all(void)
68{ 73{
69 if (tlb_ops_need_broadcast()) 74 if (tlb_ops_need_broadcast())
@@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
127 local_flush_tlb_kernel_range(start, end); 132 local_flush_tlb_kernel_range(start, end);
128} 133}
129 134
135void flush_bp_all(void)
136{
137 if (tlb_ops_need_broadcast())
138 on_each_cpu(ipi_flush_bp_all, NULL, 1);
139 else
140 local_flush_bp_all();
141}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index c092115d903a..3f2565037480 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -22,6 +22,7 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_address.h> 23#include <linux/of_address.h>
24 24
25#include <asm/smp_plat.h>
25#include <asm/smp_twd.h> 26#include <asm/smp_twd.h>
26#include <asm/localtimer.h> 27#include <asm/localtimer.h>
27 28
@@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
373 struct device_node *np; 374 struct device_node *np;
374 int err; 375 int err;
375 376
377 if (!is_smp() || !setup_max_cpus)
378 return;
379
376 np = of_find_matching_node(NULL, twd_of_match); 380 np = of_find_matching_node(NULL, twd_of_match);
377 if (!np) 381 if (!np)
378 return; 382 return;
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 358bca3a995e..c59c97ea8268 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
68 ret = __cpu_suspend(arg, fn); 68 ret = __cpu_suspend(arg, fn);
69 if (ret == 0) { 69 if (ret == 0) {
70 cpu_switch_mm(mm->pgd, mm); 70 cpu_switch_mm(mm->pgd, mm);
71 local_flush_bp_all();
71 local_flush_tlb_all(); 72 local_flush_tlb_all();
72 } 73 }
73 74
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 650d5923ab83..d912e7397ecc 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -19,9 +19,9 @@
191: subs r2, r2, #4 @ 1 do we have enough 191: subs r2, r2, #4 @ 1 do we have enough
20 blt 5f @ 1 bytes to align with? 20 blt 5f @ 1 bytes to align with?
21 cmp r3, #2 @ 1 21 cmp r3, #2 @ 1
22 strltb r1, [r0], #1 @ 1 22 strltb r1, [ip], #1 @ 1
23 strleb r1, [r0], #1 @ 1 23 strleb r1, [ip], #1 @ 1
24 strb r1, [r0], #1 @ 1 24 strb r1, [ip], #1 @ 1
25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
26/* 26/*
27 * The pointer is now aligned and the length is adjusted. Try doing the 27 * The pointer is now aligned and the length is adjusted. Try doing the
@@ -29,10 +29,14 @@
29 */ 29 */
30 30
31ENTRY(memset) 31ENTRY(memset)
32 ands r3, r0, #3 @ 1 unaligned? 32/*
33 * Preserve the contents of r0 for the return value.
34 */
35 mov ip, r0
36 ands r3, ip, #3 @ 1 unaligned?
33 bne 1b @ 1 37 bne 1b @ 1
34/* 38/*
35 * we know that the pointer in r0 is aligned to a word boundary. 39 * we know that the pointer in ip is aligned to a word boundary.
36 */ 40 */
37 orr r1, r1, r1, lsl #8 41 orr r1, r1, r1, lsl #8
38 orr r1, r1, r1, lsl #16 42 orr r1, r1, r1, lsl #16
@@ -43,29 +47,28 @@ ENTRY(memset)
43#if ! CALGN(1)+0 47#if ! CALGN(1)+0
44 48
45/* 49/*
46 * We need an extra register for this loop - save the return address and 50 * We need 2 extra registers for this loop - use r8 and the LR
47 * use the LR
48 */ 51 */
49 str lr, [sp, #-4]! 52 stmfd sp!, {r8, lr}
50 mov ip, r1 53 mov r8, r1
51 mov lr, r1 54 mov lr, r1
52 55
532: subs r2, r2, #64 562: subs r2, r2, #64
54 stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. 57 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
55 stmgeia r0!, {r1, r3, ip, lr} 58 stmgeia ip!, {r1, r3, r8, lr}
56 stmgeia r0!, {r1, r3, ip, lr} 59 stmgeia ip!, {r1, r3, r8, lr}
57 stmgeia r0!, {r1, r3, ip, lr} 60 stmgeia ip!, {r1, r3, r8, lr}
58 bgt 2b 61 bgt 2b
59 ldmeqfd sp!, {pc} @ Now <64 bytes to go. 62 ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
60/* 63/*
61 * No need to correct the count; we're only testing bits from now on 64 * No need to correct the count; we're only testing bits from now on
62 */ 65 */
63 tst r2, #32 66 tst r2, #32
64 stmneia r0!, {r1, r3, ip, lr} 67 stmneia ip!, {r1, r3, r8, lr}
65 stmneia r0!, {r1, r3, ip, lr} 68 stmneia ip!, {r1, r3, r8, lr}
66 tst r2, #16 69 tst r2, #16
67 stmneia r0!, {r1, r3, ip, lr} 70 stmneia ip!, {r1, r3, r8, lr}
68 ldr lr, [sp], #4 71 ldmfd sp!, {r8, lr}
69 72
70#else 73#else
71 74
@@ -74,54 +77,54 @@ ENTRY(memset)
74 * whole cache lines at once. 77 * whole cache lines at once.
75 */ 78 */
76 79
77 stmfd sp!, {r4-r7, lr} 80 stmfd sp!, {r4-r8, lr}
78 mov r4, r1 81 mov r4, r1
79 mov r5, r1 82 mov r5, r1
80 mov r6, r1 83 mov r6, r1
81 mov r7, r1 84 mov r7, r1
82 mov ip, r1 85 mov r8, r1
83 mov lr, r1 86 mov lr, r1
84 87
85 cmp r2, #96 88 cmp r2, #96
86 tstgt r0, #31 89 tstgt ip, #31
87 ble 3f 90 ble 3f
88 91
89 and ip, r0, #31 92 and r8, ip, #31
90 rsb ip, ip, #32 93 rsb r8, r8, #32
91 sub r2, r2, ip 94 sub r2, r2, r8
92 movs ip, ip, lsl #(32 - 4) 95 movs r8, r8, lsl #(32 - 4)
93 stmcsia r0!, {r4, r5, r6, r7} 96 stmcsia ip!, {r4, r5, r6, r7}
94 stmmiia r0!, {r4, r5} 97 stmmiia ip!, {r4, r5}
95 tst ip, #(1 << 30) 98 tst r8, #(1 << 30)
96 mov ip, r1 99 mov r8, r1
97 strne r1, [r0], #4 100 strne r1, [ip], #4
98 101
993: subs r2, r2, #64 1023: subs r2, r2, #64
100 stmgeia r0!, {r1, r3-r7, ip, lr} 103 stmgeia ip!, {r1, r3-r8, lr}
101 stmgeia r0!, {r1, r3-r7, ip, lr} 104 stmgeia ip!, {r1, r3-r8, lr}
102 bgt 3b 105 bgt 3b
103 ldmeqfd sp!, {r4-r7, pc} 106 ldmeqfd sp!, {r4-r8, pc}
104 107
105 tst r2, #32 108 tst r2, #32
106 stmneia r0!, {r1, r3-r7, ip, lr} 109 stmneia ip!, {r1, r3-r8, lr}
107 tst r2, #16 110 tst r2, #16
108 stmneia r0!, {r4-r7} 111 stmneia ip!, {r4-r7}
109 ldmfd sp!, {r4-r7, lr} 112 ldmfd sp!, {r4-r8, lr}
110 113
111#endif 114#endif
112 115
1134: tst r2, #8 1164: tst r2, #8
114 stmneia r0!, {r1, r3} 117 stmneia ip!, {r1, r3}
115 tst r2, #4 118 tst r2, #4
116 strne r1, [r0], #4 119 strne r1, [ip], #4
117/* 120/*
118 * When we get here, we've got less than 4 bytes to zero. We 121 * When we get here, we've got less than 4 bytes to zero. We
119 * may have an unaligned pointer as well. 122 * may have an unaligned pointer as well.
120 */ 123 */
1215: tst r2, #2 1245: tst r2, #2
122 strneb r1, [r0], #1 125 strneb r1, [ip], #1
123 strneb r1, [r0], #1 126 strneb r1, [ip], #1
124 tst r2, #1 127 tst r2, #1
125 strneb r1, [r0], #1 128 strneb r1, [ip], #1
126 mov pc, lr 129 mov pc, lr
127ENDPROC(memset) 130ENDPROC(memset)
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 27c2cb7ab813..1504b68f4c66 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
168{ 168{
169 int irq; 169 int irq;
170 170
171 vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0); 171 vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
172 172
173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { 173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
174 irq_set_chip_and_handler(irq, &netx_hif_chip, 174 irq_set_chip_and_handler(irq, &netx_hif_chip,
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h
index 6ce914d54a30..8f74a844a775 100644
--- a/arch/arm/mach-netx/include/mach/irqs.h
+++ b/arch/arm/mach-netx/include/mach/irqs.h
@@ -17,42 +17,42 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#define NETX_IRQ_VIC_START 0 20#define NETX_IRQ_VIC_START 64
21#define NETX_IRQ_SOFTINT 0 21#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
22#define NETX_IRQ_TIMER0 1 22#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
23#define NETX_IRQ_TIMER1 2 23#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
24#define NETX_IRQ_TIMER2 3 24#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
25#define NETX_IRQ_SYSTIME_NS 4 25#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
26#define NETX_IRQ_SYSTIME_S 5 26#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
27#define NETX_IRQ_GPIO_15 6 27#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
28#define NETX_IRQ_WATCHDOG 7 28#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
29#define NETX_IRQ_UART0 8 29#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
30#define NETX_IRQ_UART1 9 30#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
31#define NETX_IRQ_UART2 10 31#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
32#define NETX_IRQ_USB 11 32#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
33#define NETX_IRQ_SPI 12 33#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
34#define NETX_IRQ_I2C 13 34#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
35#define NETX_IRQ_LCD 14 35#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
36#define NETX_IRQ_HIF 15 36#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
37#define NETX_IRQ_GPIO_0_14 16 37#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
38#define NETX_IRQ_XPEC0 17 38#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
39#define NETX_IRQ_XPEC1 18 39#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
40#define NETX_IRQ_XPEC2 19 40#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
41#define NETX_IRQ_XPEC3 20 41#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
42#define NETX_IRQ_XPEC(no) (17 + (no)) 42#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
43#define NETX_IRQ_MSYNC0 21 43#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
44#define NETX_IRQ_MSYNC1 22 44#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
45#define NETX_IRQ_MSYNC2 23 45#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
46#define NETX_IRQ_MSYNC3 24 46#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
47#define NETX_IRQ_IRQ_PHY 25 47#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
48#define NETX_IRQ_ISO_AREA 26 48#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
49/* int 27 is reserved */ 49/* int 27 is reserved */
50/* int 28 is reserved */ 50/* int 28 is reserved */
51#define NETX_IRQ_TIMER3 29 51#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
52#define NETX_IRQ_TIMER4 30 52#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
53/* int 31 is reserved */ 53/* int 31 is reserved */
54 54
55#define NETX_IRQS 32 55#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
56 56
57/* for multiplexed irqs on gpio 0..14 */ 57/* for multiplexed irqs on gpio 0..14 */
58#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x)) 58#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a0511191f6b..a5a4b2bc42ba 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
152 return 0; 152 return 0;
153} 153}
154 154
155static void new_context(struct mm_struct *mm, unsigned int cpu) 155static u64 new_context(struct mm_struct *mm, unsigned int cpu)
156{ 156{
157 u64 asid = mm->context.id; 157 u64 asid = atomic64_read(&mm->context.id);
158 u64 generation = atomic64_read(&asid_generation); 158 u64 generation = atomic64_read(&asid_generation);
159 159
160 if (asid != 0 && is_reserved_asid(asid)) { 160 if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
181 cpumask_clear(mm_cpumask(mm)); 181 cpumask_clear(mm_cpumask(mm));
182 } 182 }
183 183
184 mm->context.id = asid; 184 return asid;
185} 185}
186 186
187void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 187void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
188{ 188{
189 unsigned long flags; 189 unsigned long flags;
190 unsigned int cpu = smp_processor_id(); 190 unsigned int cpu = smp_processor_id();
191 u64 asid;
191 192
192 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 193 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
193 __check_vmalloc_seq(mm); 194 __check_vmalloc_seq(mm);
@@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
198 */ 199 */
199 cpu_set_reserved_ttbr0(); 200 cpu_set_reserved_ttbr0();
200 201
201 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 202 asid = atomic64_read(&mm->context.id);
202 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) 203 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
204 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
203 goto switch_mm_fastpath; 205 goto switch_mm_fastpath;
204 206
205 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 207 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
206 /* Check that our ASID belongs to the current generation. */ 208 /* Check that our ASID belongs to the current generation. */
207 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 209 asid = atomic64_read(&mm->context.id);
208 new_context(mm, cpu); 210 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
209 211 asid = new_context(mm, cpu);
210 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); 212 atomic64_set(&mm->context.id, asid);
211 cpumask_set_cpu(cpu, mm_cpumask(mm)); 213 }
212 214
213 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
216 local_flush_bp_all();
214 local_flush_tlb_all(); 217 local_flush_tlb_all();
218 }
219
220 atomic64_set(&per_cpu(active_asids, cpu), asid);
221 cpumask_set_cpu(cpu, mm_cpumask(mm));
215 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 222 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
216 223
217switch_mm_fastpath: 224switch_mm_fastpath:
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc41..5ee505c937d1 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
141{ 141{
142 /* Switch to the identity mapping. */ 142 /* Switch to the identity mapping. */
143 cpu_switch_mm(idmap_pgd, &init_mm); 143 cpu_switch_mm(idmap_pgd, &init_mm);
144 local_flush_bp_all();
144 145
145#ifdef CONFIG_CPU_HAS_ASID 146#ifdef CONFIG_CPU_HAS_ASID
146 /* 147 /*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 50bf1dafc9ea..6ffd78c0f9ab 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -48,7 +48,7 @@
48ENTRY(cpu_v7_switch_mm) 48ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU 49#ifdef CONFIG_MMU
50 mmid r1, r1 @ get mm->context.id 50 mmid r1, r1 @ get mm->context.id
51 and r3, r1, #0xff 51 asid r3, r1
52 mov r3, r3, lsl #(48 - 32) @ ASID 52 mov r3, r3, lsl #(48 - 32) @ ASID
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0 53 mcrr p15, 0, r0, r3, c2 @ set TTB 0
54 isb 54 isb
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
index d63b9d0e57dd..d2baf6961794 100644
--- a/arch/metag/include/asm/elf.h
+++ b/arch/metag/include/asm/elf.h
@@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
100 100
101#define ELF_PLATFORM (NULL) 101#define ELF_PLATFORM (NULL)
102 102
103#define SET_PERSONALITY(ex) \
104 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
105
106#define STACK_RND_MASK (0) 103#define STACK_RND_MASK (0)
107 104
108#ifdef CONFIG_METAG_USER_TCM 105#ifdef CONFIG_METAG_USER_TCM
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
index cd7f2f2ad416..975f2f4e3ecf 100644
--- a/arch/metag/mm/Kconfig
+++ b/arch/metag/mm/Kconfig
@@ -40,6 +40,7 @@ endchoice
40 40
41config NUMA 41config NUMA
42 bool "Non Uniform Memory Access (NUMA) Support" 42 bool "Non Uniform Memory Access (NUMA) Support"
43 select ARCH_WANT_NUMA_VARIABLE_LOCALITY
43 help 44 help
44 Some Meta systems have MMU-mappable on-chip memories with 45 Some Meta systems have MMU-mappable on-chip memories with
45 lower latencies than main memory. This enables support for 46 lower latencies than main memory. This enables support for
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index a5f8264d2d3c..125e16520061 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -113,7 +113,7 @@
113 STEPUP4((t)+16, fn) 113 STEPUP4((t)+16, fn)
114 114
115_GLOBAL(powerpc_sha_transform) 115_GLOBAL(powerpc_sha_transform)
116 PPC_STLU r1,-STACKFRAMESIZE(r1) 116 PPC_STLU r1,-INT_FRAME_SIZE(r1)
117 SAVE_8GPRS(14, r1) 117 SAVE_8GPRS(14, r1)
118 SAVE_10GPRS(22, r1) 118 SAVE_10GPRS(22, r1)
119 119
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
175 175
176 REST_8GPRS(14, r1) 176 REST_8GPRS(14, r1)
177 REST_10GPRS(22, r1) 177 REST_10GPRS(22, r1)
178 addi r1,r1,STACKFRAMESIZE 178 addi r1,r1,INT_FRAME_SIZE
179 blr 179 blr
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index ef918a2328bb..08bd299c75b1 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,8 +52,6 @@
52#define smp_mb__before_clear_bit() smp_mb() 52#define smp_mb__before_clear_bit() smp_mb()
53#define smp_mb__after_clear_bit() smp_mb() 53#define smp_mb__after_clear_bit() smp_mb()
54 54
55#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
56
57/* Macro for generating the ***_bits() functions */ 55/* Macro for generating the ***_bits() functions */
58#define DEFINE_BITOP(fn, op, prefix, postfix) \ 56#define DEFINE_BITOP(fn, op, prefix, postfix) \
59static __inline__ void fn(unsigned long mask, \ 57static __inline__ void fn(unsigned long mask, \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e66586122030..c9c67fc888c9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -266,7 +266,8 @@
266#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 266#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
267#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 267#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
268#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ 268#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
269#define FSCR_TAR (1<<8) /* Enable Target Adress Register */ 269#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
270#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
270#define SPRN_TAR 0x32f /* Target Address Register */ 271#define SPRN_TAR 0x32f /* Target Address Register */
271#define SPRN_LPCR 0x13E /* LPAR Control Register */ 272#define SPRN_LPCR 0x13E /* LPAR Control Register */
272#define LPCR_VPM0 (1ul << (63-0)) 273#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 535b6d8a41cc..ebbec52d21bd 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
358COMPAT_SYS(process_vm_readv) 358COMPAT_SYS(process_vm_readv)
359COMPAT_SYS(process_vm_writev) 359COMPAT_SYS(process_vm_writev)
360SYSCALL(finit_module) 360SYSCALL(finit_module)
361SYSCALL(ni_syscall) /* sys_kcmp */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f25b5c45c435..1487f0f12293 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 354 15#define __NR_syscalls 355
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 8c478c6c6b1e..74cb4d72d673 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -376,6 +376,7 @@
376#define __NR_process_vm_readv 351 376#define __NR_process_vm_readv 351
377#define __NR_process_vm_writev 352 377#define __NR_process_vm_writev 352
378#define __NR_finit_module 353 378#define __NR_finit_module 353
379#define __NR_kcmp 354
379 380
380 381
381#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 382#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index d29facbf9a28..ea847abb0d0a 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
48 48
49_GLOBAL(__setup_cpu_power8) 49_GLOBAL(__setup_cpu_power8)
50 mflr r11 50 mflr r11
51 bl __init_FSCR
51 bl __init_hvmode_206 52 bl __init_hvmode_206
52 mtlr r11 53 mtlr r11
53 beqlr 54 beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
56 mfspr r3,SPRN_LPCR 57 mfspr r3,SPRN_LPCR
57 oris r3, r3, LPCR_AIL_3@h 58 oris r3, r3, LPCR_AIL_3@h
58 bl __init_LPCR 59 bl __init_LPCR
59 bl __init_FSCR
60 bl __init_TLB 60 bl __init_TLB
61 mtlr r11 61 mtlr r11
62 blr 62 blr
63 63
64_GLOBAL(__restore_cpu_power8) 64_GLOBAL(__restore_cpu_power8)
65 mflr r11 65 mflr r11
66 bl __init_FSCR
66 mfmsr r3 67 mfmsr r3
67 rldicl. r0,r3,4,63 68 rldicl. r0,r3,4,63
68 beqlr 69 beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
115 116
116__init_FSCR: 117__init_FSCR:
117 mfspr r3,SPRN_FSCR 118 mfspr r3,SPRN_FSCR
118 ori r3,r3,FSCR_TAR 119 ori r3,r3,FSCR_TAR|FSCR_DSCR
119 mtspr SPRN_FSCR,r3 120 mtspr SPRN_FSCR,r3
120 blr 121 blr
121 122
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a8a5361fb70c..87ef8f5ee5bc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
74 mflr r10 ; \ 74 mflr r10 ; \
75 ld r12,PACAKBASE(r13) ; \ 75 ld r12,PACAKBASE(r13) ; \
76 LOAD_HANDLER(r12, system_call_entry_direct) ; \ 76 LOAD_HANDLER(r12, system_call_entry_direct) ; \
77 mtlr r12 ; \ 77 mtctr r12 ; \
78 mfspr r12,SPRN_SRR1 ; \ 78 mfspr r12,SPRN_SRR1 ; \
79 /* Re-use of r13... No spare regs to do this */ \ 79 /* Re-use of r13... No spare regs to do this */ \
80 li r13,MSR_RI ; \ 80 li r13,MSR_RI ; \
81 mtmsrd r13,1 ; \ 81 mtmsrd r13,1 ; \
82 GET_PACA(r13) ; /* get r13 back */ \ 82 GET_PACA(r13) ; /* get r13 back */ \
83 blr ; 83 bctr ;
84#else 84#else
85 /* We can branch directly */ 85 /* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT \ 86#define SYSCALL_PSERIES_2_DIRECT \
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index fcf4b4cbeaf3..4557e91626c4 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -23,6 +23,7 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h>
26 27
27#include <asm/hvcall.h> 28#include <asm/hvcall.h>
28#include <asm/hvcserver.h> 29#include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
188 = (unsigned int)last_p_partition_ID; 189 = (unsigned int)last_p_partition_ID;
189 190
190 /* copy the Null-term char too */ 191 /* copy the Null-term char too */
191 strncpy(&next_partner_info->location_code[0], 192 strlcpy(&next_partner_info->location_code[0],
192 (char *)&pi_buff[2], 193 (char *)&pi_buff[2],
193 strlen((char *)&pi_buff[2]) + 1); 194 sizeof(next_partner_info->location_code));
194 195
195 list_add_tail(&(next_partner_info->node), head); 196 list_add_tail(&(next_partner_info->node), head);
196 next_partner_info = NULL; 197 next_partner_info = NULL;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 5b5e9cb774b5..653668d140f9 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -14,13 +14,29 @@
14 * analysis of kexec-tools; if other broken bootloaders initialize a 14 * analysis of kexec-tools; if other broken bootloaders initialize a
15 * different set of fields we will need to figure out how to disambiguate. 15 * different set of fields we will need to figure out how to disambiguate.
16 * 16 *
17 * Note: efi_info is commonly left uninitialized, but that field has a
18 * private magic, so it is better to leave it unchanged.
17 */ 19 */
18static void sanitize_boot_params(struct boot_params *boot_params) 20static void sanitize_boot_params(struct boot_params *boot_params)
19{ 21{
22 /*
23 * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
24 * this field. The purpose of this field is to guarantee
25 * compliance with the x86 boot spec located in
26 * Documentation/x86/boot.txt . That spec says that the
27 * *whole* structure should be cleared, after which only the
28 * portion defined by struct setup_header (boot_params->hdr)
29 * should be copied in.
30 *
31 * If you're having an issue because the sentinel is set, you
32 * need to change the whole structure to be cleared, not this
33 * (or any other) individual field, or you will soon have
34 * problems again.
35 */
20 if (boot_params->sentinel) { 36 if (boot_params->sentinel) {
21 /*fields in boot_params are not valid, clear them */ 37 /* fields in boot_params are left uninitialized, clear them */
22 memset(&boot_params->olpc_ofw_header, 0, 38 memset(&boot_params->olpc_ofw_header, 0,
23 (char *)&boot_params->alt_mem_k - 39 (char *)&boot_params->efi_info -
24 (char *)&boot_params->olpc_ofw_header); 40 (char *)&boot_params->olpc_ofw_header);
25 memset(&boot_params->kbd_status, 0, 41 memset(&boot_params->kbd_status, 0,
26 (char *)&boot_params->hdr - 42 (char *)&boot_params->hdr -
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84d32855f65c..90d8cc930f5e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -171,9 +171,15 @@ static struct resource bss_resource = {
171 171
172#ifdef CONFIG_X86_32 172#ifdef CONFIG_X86_32
173/* cpu data as detected by the assembly code in head.S */ 173/* cpu data as detected by the assembly code in head.S */
174struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 174struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
175 .wp_works_ok = -1,
176 .fdiv_bug = -1,
177};
175/* common cpu data for all cpus */ 178/* common cpu data for all cpus */
176struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1}; 179struct cpuinfo_x86 boot_cpu_data __read_mostly = {
180 .wp_works_ok = -1,
181 .fdiv_bug = -1,
182};
177EXPORT_SYMBOL(boot_cpu_data); 183EXPORT_SYMBOL(boot_cpu_data);
178 184
179unsigned int def_to_bigsmp; 185unsigned int def_to_bigsmp;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a6ceaedc396a..9f190a2a00e9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
1365 unsigned int eax, ebx, ecx, edx; 1365 unsigned int eax, ebx, ecx, edx;
1366 unsigned int highest_cstate = 0; 1366 unsigned int highest_cstate = 0;
1367 unsigned int highest_subcstate = 0; 1367 unsigned int highest_subcstate = 0;
1368 int i;
1369 void *mwait_ptr; 1368 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1369 int i;
1371 1370
1372 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1371 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1372 return;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4903a03ae876..59b7fc453277 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
410 /* the ISA range is always mapped regardless of memory holes */ 410 /* the ISA range is always mapped regardless of memory holes */
411 init_memory_mapping(0, ISA_END_ADDRESS); 411 init_memory_mapping(0, ISA_END_ADDRESS);
412 412
413 /* xen has big range in reserved near end of ram, skip it at first */ 413 /* xen has big range in reserved near end of ram, skip it at first.*/
414 addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, 414 addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
415 PAGE_SIZE);
416 real_end = addr + PMD_SIZE; 415 real_end = addr + PMD_SIZE;
417 416
418 /* step_size need to be small so pgt_buf from BRK could cover it */ 417 /* step_size need to be small so pgt_buf from BRK could cover it */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2610bd93c896..657438858e83 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
563 if (base > __pa(high_memory-1)) 563 if (base > __pa(high_memory-1))
564 return 0; 564 return 0;
565 565
566 /*
567 * some areas in the middle of the kernel identity range
568 * are not mapped, like the PCI space.
569 */
570 if (!page_is_ram(base >> PAGE_SHIFT))
571 return 0;
572
566 id_sz = (__pa(high_memory-1) <= base + size) ? 573 id_sz = (__pa(high_memory-1) <= base + size) ?
567 __pa(high_memory) - base : 574 __pa(high_memory) - base :
568 size; 575 size;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index ef6f155469b5..40a84cc6740c 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
36{ 36{
37 if (acpi_disabled) 37 if (acpi_disabled)
38 return -ENODEV; 38 return -ENODEV;
39 if (type && type->bus && type->find_device) { 39 if (type && type->match && type->find_device) {
40 down_write(&bus_type_sem); 40 down_write(&bus_type_sem);
41 list_add_tail(&type->list, &bus_type_list); 41 list_add_tail(&type->list, &bus_type_list);
42 up_write(&bus_type_sem); 42 up_write(&bus_type_sem);
43 printk(KERN_INFO PREFIX "bus type %s registered\n", 43 printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
44 type->bus->name);
45 return 0; 44 return 0;
46 } 45 }
47 return -ENODEV; 46 return -ENODEV;
@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
56 down_write(&bus_type_sem); 55 down_write(&bus_type_sem);
57 list_del_init(&type->list); 56 list_del_init(&type->list);
58 up_write(&bus_type_sem); 57 up_write(&bus_type_sem);
59 printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n", 58 printk(KERN_INFO PREFIX "bus type %s unregistered\n",
60 type->bus->name); 59 type->name);
61 return 0; 60 return 0;
62 } 61 }
63 return -ENODEV; 62 return -ENODEV;
64} 63}
65EXPORT_SYMBOL_GPL(unregister_acpi_bus_type); 64EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
66 65
67static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type) 66static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
68{ 67{
69 struct acpi_bus_type *tmp, *ret = NULL; 68 struct acpi_bus_type *tmp, *ret = NULL;
70 69
71 if (!type)
72 return NULL;
73
74 down_read(&bus_type_sem); 70 down_read(&bus_type_sem);
75 list_for_each_entry(tmp, &bus_type_list, list) { 71 list_for_each_entry(tmp, &bus_type_list, list) {
76 if (tmp->bus == type) { 72 if (tmp->match(dev)) {
77 ret = tmp; 73 ret = tmp;
78 break; 74 break;
79 } 75 }
@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
82 return ret; 78 return ret;
83} 79}
84 80
85static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86{
87 struct acpi_bus_type *tmp;
88 int ret = -ENODEV;
89
90 down_read(&bus_type_sem);
91 list_for_each_entry(tmp, &bus_type_list, list) {
92 if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
93 ret = 0;
94 break;
95 }
96 }
97 up_read(&bus_type_sem);
98 return ret;
99}
100
101static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, 81static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
102 void *addr_p, void **ret_p) 82 void *addr_p, void **ret_p)
103{ 83{
@@ -261,29 +241,12 @@ err:
261 241
262static int acpi_platform_notify(struct device *dev) 242static int acpi_platform_notify(struct device *dev)
263{ 243{
264 struct acpi_bus_type *type; 244 struct acpi_bus_type *type = acpi_get_bus_type(dev);
265 acpi_handle handle; 245 acpi_handle handle;
266 int ret; 246 int ret;
267 247
268 ret = acpi_bind_one(dev, NULL); 248 ret = acpi_bind_one(dev, NULL);
269 if (ret && (!dev->bus || !dev->parent)) { 249 if (ret && type) {
270 /* bridge devices genernally haven't bus or parent */
271 ret = acpi_find_bridge_device(dev, &handle);
272 if (!ret) {
273 ret = acpi_bind_one(dev, handle);
274 if (ret)
275 goto out;
276 }
277 }
278
279 type = acpi_get_bus_type(dev->bus);
280 if (ret) {
281 if (!type || !type->find_device) {
282 DBG("No ACPI bus support for %s\n", dev_name(dev));
283 ret = -EINVAL;
284 goto out;
285 }
286
287 ret = type->find_device(dev, &handle); 250 ret = type->find_device(dev, &handle);
288 if (ret) { 251 if (ret) {
289 DBG("Unable to get handle for %s\n", dev_name(dev)); 252 DBG("Unable to get handle for %s\n", dev_name(dev));
@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
316{ 279{
317 struct acpi_bus_type *type; 280 struct acpi_bus_type *type;
318 281
319 type = acpi_get_bus_type(dev->bus); 282 type = acpi_get_bus_type(dev);
320 if (type && type->cleanup) 283 if (type && type->cleanup)
321 type->cleanup(dev); 284 type->cleanup(dev);
322 285
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index eff722278ff5..164d49569aeb 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
158 } 158 }
159 159
160exit: 160exit:
161 if (buffer.pointer) 161 kfree(buffer.pointer);
162 kfree(buffer.pointer);
163 return apic_id; 162 return apic_id;
164} 163}
165 164
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index df34bd04ae62..bec717ffd25f 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
559 return 0; 559 return 0;
560#endif 560#endif
561 561
562 BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); 562 BUG_ON(pr->id >= nr_cpu_ids);
563 563
564 /* 564 /*
565 * Buggy BIOS check 565 * Buggy BIOS check
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d3a06a629a1..24213033fbae 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
599 status = acpi_get_sleep_type_data(i, &type_a, &type_b); 599 status = acpi_get_sleep_type_data(i, &type_a, &type_b);
600 if (ACPI_SUCCESS(status)) { 600 if (ACPI_SUCCESS(status)) {
601 sleep_states[i] = 1; 601 sleep_states[i] = 1;
602 pr_cont(" S%d", i);
603 } 602 }
604 } 603 }
605 604
@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
742 hibernation_set_ops(old_suspend_ordering ? 741 hibernation_set_ops(old_suspend_ordering ?
743 &acpi_hibernation_ops_old : &acpi_hibernation_ops); 742 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
744 sleep_states[ACPI_STATE_S4] = 1; 743 sleep_states[ACPI_STATE_S4] = 1;
745 pr_cont(KERN_CONT " S4");
746 if (nosigcheck) 744 if (nosigcheck)
747 return; 745 return;
748 746
@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
788{ 786{
789 acpi_status status; 787 acpi_status status;
790 u8 type_a, type_b; 788 u8 type_a, type_b;
789 char supported[ACPI_S_STATE_COUNT * 3 + 1];
790 char *pos = supported;
791 int i;
791 792
792 if (acpi_disabled) 793 if (acpi_disabled)
793 return 0; 794 return 0;
@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
795 acpi_sleep_dmi_check(); 796 acpi_sleep_dmi_check();
796 797
797 sleep_states[ACPI_STATE_S0] = 1; 798 sleep_states[ACPI_STATE_S0] = 1;
798 pr_info(PREFIX "(supports S0");
799 799
800 acpi_sleep_suspend_setup(); 800 acpi_sleep_suspend_setup();
801 acpi_sleep_hibernate_setup(); 801 acpi_sleep_hibernate_setup();
@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
803 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 803 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
804 if (ACPI_SUCCESS(status)) { 804 if (ACPI_SUCCESS(status)) {
805 sleep_states[ACPI_STATE_S5] = 1; 805 sleep_states[ACPI_STATE_S5] = 1;
806 pr_cont(" S5");
807 pm_power_off_prepare = acpi_power_off_prepare; 806 pm_power_off_prepare = acpi_power_off_prepare;
808 pm_power_off = acpi_power_off; 807 pm_power_off = acpi_power_off;
809 } 808 }
810 pr_cont(")\n"); 809
810 supported[0] = 0;
811 for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
812 if (sleep_states[i])
813 pos += sprintf(pos, " S%d", i);
814 }
815 pr_info(PREFIX "(supports%s)\n", supported);
816
811 /* 817 /*
812 * Register the tts_notifier to reboot notifier list so that the _TTS 818 * Register the tts_notifier to reboot notifier list so that the _TTS
813 * object can also be evaluated when the system enters S5. 819 * object can also be evaluated when the system enters S5.
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 0ea1018280bd..beea3115577e 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
1144 return -ENODEV; 1144 return -ENODEV;
1145} 1145}
1146 1146
1147static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
1148{
1149 return -ENODEV;
1150}
1151
1152static struct acpi_bus_type ata_acpi_bus = { 1147static struct acpi_bus_type ata_acpi_bus = {
1153 .find_bridge = ata_acpi_find_dummy, 1148 .name = "ATA",
1154 .find_device = ata_acpi_find_device, 1149 .find_device = ata_acpi_find_device,
1155}; 1150};
1156 1151
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
99 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent)); 100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list); 101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
104} 103}
105 104
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry); 115 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx); 116 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev); 117 device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a2..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
4{ 4{
5 if (!dev->power.early_init) { 5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock); 6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID; 7 dev->power.qos = NULL;
8 dev->power.early_init = true; 8 dev->power.early_init = true;
9 } 9 }
10} 10}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
56 56
57static inline void device_pm_sleep_init(struct device *dev) {} 57static inline void device_pm_sleep_init(struct device *dev) {}
58 58
59static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev) {}
60{
61 dev_pm_qos_constraints_init(dev);
62}
63 60
64static inline void device_pm_remove(struct device *dev) 61static inline void device_pm_remove(struct device *dev)
65{ 62{
66 dev_pm_qos_constraints_destroy(dev);
67 pm_runtime_remove(dev); 63 pm_runtime_remove(dev);
68} 64}
69 65
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 62 struct pm_qos_flags *pqf;
62 s32 val; 63 s32 val;
63 64
64 if (!qos) 65 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 66 return PM_QOS_FLAGS_UNDEFINED;
66 67
67 pqf = &qos->flags; 68 pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
101 */ 102 */
102s32 __dev_pm_qos_read_value(struct device *dev) 103s32 __dev_pm_qos_read_value(struct device *dev)
103{ 104{
104 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 105 return IS_ERR_OR_NULL(dev->power.qos) ?
106 0 : pm_qos_read_value(&dev->power.qos->latency);
105} 107}
106 108
107/** 109/**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
198 return 0; 200 return 0;
199} 201}
200 202
201/** 203static void __dev_pm_qos_hide_latency_limit(struct device *dev);
202 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 204static void __dev_pm_qos_hide_flags(struct device *dev);
203 * @dev: target device
204 *
205 * Called from the device PM subsystem during device insertion under
206 * device_pm_lock().
207 */
208void dev_pm_qos_constraints_init(struct device *dev)
209{
210 mutex_lock(&dev_pm_qos_mtx);
211 dev->power.qos = NULL;
212 dev->power.power_state = PMSG_ON;
213 mutex_unlock(&dev_pm_qos_mtx);
214}
215 205
216/** 206/**
217 * dev_pm_qos_constraints_destroy 207 * dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
226 struct pm_qos_constraints *c; 216 struct pm_qos_constraints *c;
227 struct pm_qos_flags *f; 217 struct pm_qos_flags *f;
228 218
219 mutex_lock(&dev_pm_qos_mtx);
220
229 /* 221 /*
230 * If the device's PM QoS resume latency limit or PM QoS flags have been 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point. 223 * exposed to user space, they have to be hidden at this point.
232 */ 224 */
233 dev_pm_qos_hide_latency_limit(dev); 225 __dev_pm_qos_hide_latency_limit(dev);
234 dev_pm_qos_hide_flags(dev); 226 __dev_pm_qos_hide_flags(dev);
235 227
236 mutex_lock(&dev_pm_qos_mtx);
237
238 dev->power.power_state = PMSG_INVALID;
239 qos = dev->power.qos; 228 qos = dev->power.qos;
240 if (!qos) 229 if (!qos)
241 goto out; 230 goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
257 } 246 }
258 247
259 spin_lock_irq(&dev->power.lock); 248 spin_lock_irq(&dev->power.lock);
260 dev->power.qos = NULL; 249 dev->power.qos = ERR_PTR(-ENODEV);
261 spin_unlock_irq(&dev->power.lock); 250 spin_unlock_irq(&dev->power.lock);
262 251
263 kfree(c->notifiers); 252 kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
301 "%s() called for already added request\n", __func__)) 290 "%s() called for already added request\n", __func__))
302 return -EINVAL; 291 return -EINVAL;
303 292
304 req->dev = dev;
305
306 mutex_lock(&dev_pm_qos_mtx); 293 mutex_lock(&dev_pm_qos_mtx);
307 294
308 if (!dev->power.qos) { 295 if (IS_ERR(dev->power.qos))
309 if (dev->power.power_state.event == PM_EVENT_INVALID) { 296 ret = -ENODEV;
310 /* The device has been removed from the system. */ 297 else if (!dev->power.qos)
311 req->dev = NULL; 298 ret = dev_pm_qos_constraints_allocate(dev);
312 ret = -ENODEV;
313 goto out;
314 } else {
315 /*
316 * Allocate the constraints data on the first call to
317 * add_request, i.e. only if the data is not already
318 * allocated and if the device has not been removed.
319 */
320 ret = dev_pm_qos_constraints_allocate(dev);
321 }
322 }
323 299
324 if (!ret) { 300 if (!ret) {
301 req->dev = dev;
325 req->type = type; 302 req->type = type;
326 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 } 304 }
328 305
329 out:
330 mutex_unlock(&dev_pm_qos_mtx); 306 mutex_unlock(&dev_pm_qos_mtx);
331 307
332 return ret; 308 return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 320 s32 curr_value;
345 int ret = 0; 321 int ret = 0;
346 322
347 if (!req->dev->power.qos) 323 if (!req) /*guard against callers passing in null */
324 return -EINVAL;
325
326 if (WARN(!dev_pm_qos_request_active(req),
327 "%s() called for unknown object\n", __func__))
328 return -EINVAL;
329
330 if (IS_ERR_OR_NULL(req->dev->power.qos))
348 return -ENODEV; 331 return -ENODEV;
349 332
350 switch(req->type) { 333 switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 369{
387 int ret; 370 int ret;
388 371
372 mutex_lock(&dev_pm_qos_mtx);
373 ret = __dev_pm_qos_update_request(req, new_value);
374 mutex_unlock(&dev_pm_qos_mtx);
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
378
379static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
380{
381 int ret;
382
389 if (!req) /*guard against callers passing in null */ 383 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 384 return -EINVAL;
391 385
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 387 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 388 return -EINVAL;
395 389
396 mutex_lock(&dev_pm_qos_mtx); 390 if (IS_ERR_OR_NULL(req->dev->power.qos))
397 ret = __dev_pm_qos_update_request(req, new_value); 391 return -ENODEV;
398 mutex_unlock(&dev_pm_qos_mtx);
399 392
393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
394 memset(req, 0, sizeof(*req));
400 return ret; 395 return ret;
401} 396}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 397
404/** 398/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 399 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 412 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 413int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 414{
421 int ret = 0; 415 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 416
430 mutex_lock(&dev_pm_qos_mtx); 417 mutex_lock(&dev_pm_qos_mtx);
431 418 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 419 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 420 return ret;
443} 421}
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
462 440
463 mutex_lock(&dev_pm_qos_mtx); 441 mutex_lock(&dev_pm_qos_mtx);
464 442
465 if (!dev->power.qos) 443 if (IS_ERR(dev->power.qos))
466 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 444 ret = -ENODEV;
467 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 445 else if (!dev->power.qos)
446 ret = dev_pm_qos_constraints_allocate(dev);
468 447
469 if (!ret) 448 if (!ret)
470 ret = blocking_notifier_chain_register( 449 ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
493 mutex_lock(&dev_pm_qos_mtx); 472 mutex_lock(&dev_pm_qos_mtx);
494 473
495 /* Silently return if the constraints object is not present. */ 474 /* Silently return if the constraints object is not present. */
496 if (dev->power.qos) 475 if (!IS_ERR_OR_NULL(dev->power.qos))
497 retval = blocking_notifier_chain_unregister( 476 retval = blocking_notifier_chain_unregister(
498 dev->power.qos->latency.notifiers, 477 dev->power.qos->latency.notifiers,
499 notifier); 478 notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 542static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 543 enum dev_pm_qos_req_type type)
565{ 544{
545 struct dev_pm_qos_request *req = NULL;
546
566 switch(type) { 547 switch(type) {
567 case DEV_PM_QOS_LATENCY: 548 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 549 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 550 dev->power.qos->latency_req = NULL;
570 break; 551 break;
571 case DEV_PM_QOS_FLAGS: 552 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 553 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 554 dev->power.qos->flags_req = NULL;
574 break; 555 break;
575 } 556 }
557 __dev_pm_qos_remove_request(req);
558 kfree(req);
576} 559}
577 560
578/** 561/**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 571 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 572 return -EINVAL;
590 573
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 575 if (!req)
596 return -ENOMEM; 576 return -ENOMEM;
597 577
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 579 if (ret < 0) {
580 kfree(req);
600 return ret; 581 return ret;
582 }
583
584 mutex_lock(&dev_pm_qos_mtx);
585
586 if (IS_ERR_OR_NULL(dev->power.qos))
587 ret = -ENODEV;
588 else if (dev->power.qos->latency_req)
589 ret = -EEXIST;
590
591 if (ret < 0) {
592 __dev_pm_qos_remove_request(req);
593 kfree(req);
594 goto out;
595 }
601 596
602 dev->power.qos->latency_req = req; 597 dev->power.qos->latency_req = req;
603 ret = pm_qos_sysfs_add_latency(dev); 598 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 599 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 601
602 out:
603 mutex_unlock(&dev_pm_qos_mtx);
607 return ret; 604 return ret;
608} 605}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 607
608static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614}
615
611/** 616/**
612 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 */ 619 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 620void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 621{
617 if (dev->power.qos && dev->power.qos->latency_req) { 622 mutex_lock(&dev_pm_qos_mtx);
618 pm_qos_sysfs_remove_latency(dev); 623 __dev_pm_qos_hide_latency_limit(dev);
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 624 mutex_unlock(&dev_pm_qos_mtx);
620 }
621} 625}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 627
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 638 if (!device_is_registered(dev))
635 return -EINVAL; 639 return -EINVAL;
636 640
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 642 if (!req)
642 return -ENOMEM; 643 return -ENOMEM;
643 644
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 646 if (ret < 0) {
647 goto fail; 647 kfree(req);
648 return ret;
649 }
650
651 pm_runtime_get_sync(dev);
652 mutex_lock(&dev_pm_qos_mtx);
653
654 if (IS_ERR_OR_NULL(dev->power.qos))
655 ret = -ENODEV;
656 else if (dev->power.qos->flags_req)
657 ret = -EEXIST;
658
659 if (ret < 0) {
660 __dev_pm_qos_remove_request(req);
661 kfree(req);
662 goto out;
663 }
648 664
649 dev->power.qos->flags_req = req; 665 dev->power.qos->flags_req = req;
650 ret = pm_qos_sysfs_add_flags(dev); 666 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 667 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 669
654fail: 670 out:
671 mutex_unlock(&dev_pm_qos_mtx);
655 pm_runtime_put(dev); 672 pm_runtime_put(dev);
656 return ret; 673 return ret;
657} 674}
658EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 675EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 676
677static void __dev_pm_qos_hide_flags(struct device *dev)
678{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683}
684
660/** 685/**
661 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 * @dev: Device whose PM QoS flags are to be hidden from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 */ 688 */
664void dev_pm_qos_hide_flags(struct device *dev) 689void dev_pm_qos_hide_flags(struct device *dev)
665{ 690{
666 if (dev->power.qos && dev->power.qos->flags_req) { 691 pm_runtime_get_sync(dev);
667 pm_qos_sysfs_remove_flags(dev); 692 mutex_lock(&dev_pm_qos_mtx);
668 pm_runtime_get_sync(dev); 693 __dev_pm_qos_hide_flags(dev);
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 694 mutex_unlock(&dev_pm_qos_mtx);
670 pm_runtime_put(dev); 695 pm_runtime_put(dev);
671 }
672} 696}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 698
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 707 s32 value;
684 int ret; 708 int ret;
685 709
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 710 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 711 mutex_lock(&dev_pm_qos_mtx);
691 712
713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
714 ret = -EINVAL;
715 goto out;
716 }
717
692 value = dev_pm_qos_requested_flags(dev); 718 value = dev_pm_qos_requested_flags(dev);
693 if (set) 719 if (set)
694 value |= mask; 720 value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 723
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 725
726 out:
700 mutex_unlock(&dev_pm_qos_mtx); 727 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 728 pm_runtime_put(dev);
702
703 return ret; 729 return ret;
704} 730}
731#else /* !CONFIG_PM_RUNTIME */
732static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
733static void __dev_pm_qos_hide_flags(struct device *dev) {}
705#endif /* CONFIG_PM_RUNTIME */ 734#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a9..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
708 708
709void dpm_sysfs_remove(struct device *dev) 709void dpm_sysfs_remove(struct device *dev)
710{ 710{
711 dev_pm_qos_constraints_destroy(dev);
711 rpm_sysfs_remove(dev); 712 rpm_sysfs_remove(dev);
712 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 sysfs_remove_group(&dev->kobj, &pm_attr_group); 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc6..020ea2b9fd2f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
184 if (ret < 0) { 184 if (ret < 0) {
185 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
186 ret); 186 ret);
187 pm_runtime_put(map->dev);
187 return IRQ_NONE; 188 return IRQ_NONE;
188 } 189 }
189 } 190 }
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index d3bde6cec927..30629a3d44cc 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
404 return; 404 return;
405 } 405 }
406 406
407 spin_lock_init(&pc_host->cfgspace_lock);
408
407 pc->host_controller = pc_host; 409 pc->host_controller = pc_host;
408 pc_host->pci_controller.io_resource = &pc_host->io_resource; 410 pc_host->pci_controller.io_resource = &pc_host->io_resource;
409 pc_host->pci_controller.mem_resource = &pc_host->mem_resource; 411 pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 1bafb40ec8a2..69ae5972713c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -40,6 +40,7 @@
40#include <linux/init.h> 40#include <linux/init.h>
41#include <linux/miscdevice.h> 41#include <linux/miscdevice.h>
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/slab.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44 45
45 46
@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
52static LIST_HEAD(rng_list); 53static LIST_HEAD(rng_list);
53static DEFINE_MUTEX(rng_mutex); 54static DEFINE_MUTEX(rng_mutex);
54static int data_avail; 55static int data_avail;
55static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES] 56static u8 *rng_buffer;
56 __cacheline_aligned; 57
58static size_t rng_buffer_size(void)
59{
60 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
61}
57 62
58static inline int hwrng_init(struct hwrng *rng) 63static inline int hwrng_init(struct hwrng *rng)
59{ 64{
@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
116 121
117 if (!data_avail) { 122 if (!data_avail) {
118 bytes_read = rng_get_data(current_rng, rng_buffer, 123 bytes_read = rng_get_data(current_rng, rng_buffer,
119 sizeof(rng_buffer), 124 rng_buffer_size(),
120 !(filp->f_flags & O_NONBLOCK)); 125 !(filp->f_flags & O_NONBLOCK));
121 if (bytes_read < 0) { 126 if (bytes_read < 0) {
122 err = bytes_read; 127 err = bytes_read;
@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
307 312
308 mutex_lock(&rng_mutex); 313 mutex_lock(&rng_mutex);
309 314
315 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
316 err = -ENOMEM;
317 if (!rng_buffer) {
318 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
319 if (!rng_buffer)
320 goto out_unlock;
321 }
322
310 /* Must not register two RNGs with the same name. */ 323 /* Must not register two RNGs with the same name. */
311 err = -EEXIST; 324 err = -EEXIST;
312 list_for_each_entry(tmp, &rng_list, list) { 325 list_for_each_entry(tmp, &rng_list, list) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 594bda9dcfc8..32a6c5764950 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -852,6 +852,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
852 int reserved) 852 int reserved)
853{ 853{
854 unsigned long flags; 854 unsigned long flags;
855 int wakeup_write = 0;
855 856
856 /* Hold lock while accounting */ 857 /* Hold lock while accounting */
857 spin_lock_irqsave(&r->lock, flags); 858 spin_lock_irqsave(&r->lock, flags);
@@ -873,10 +874,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
873 else 874 else
874 r->entropy_count = reserved; 875 r->entropy_count = reserved;
875 876
876 if (r->entropy_count < random_write_wakeup_thresh) { 877 if (r->entropy_count < random_write_wakeup_thresh)
877 wake_up_interruptible(&random_write_wait); 878 wakeup_write = 1;
878 kill_fasync(&fasync, SIGIO, POLL_OUT);
879 }
880 } 879 }
881 880
882 DEBUG_ENT("debiting %zu entropy credits from %s%s\n", 881 DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
@@ -884,6 +883,11 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
884 883
885 spin_unlock_irqrestore(&r->lock, flags); 884 spin_unlock_irqrestore(&r->lock, flags);
886 885
886 if (wakeup_write) {
887 wake_up_interruptible(&random_write_wait);
888 kill_fasync(&fasync, SIGIO, POLL_OUT);
889 }
890
887 return nbytes; 891 return nbytes;
888} 892}
889 893
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index fce2000eec31..1110478dd0fd 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
313 (task_active_pid_ns(current) != &init_pid_ns)) 313 (task_active_pid_ns(current) != &init_pid_ns))
314 return; 314 return;
315 315
316 /* Can only change if privileged. */
317 if (!capable(CAP_NET_ADMIN)) {
318 err = EPERM;
319 goto out;
320 }
321
316 mc_op = (enum proc_cn_mcast_op *)msg->data; 322 mc_op = (enum proc_cn_mcast_op *)msg->data;
317 switch (*mc_op) { 323 switch (*mc_op) {
318 case PROC_CN_MCAST_LISTEN: 324 case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
325 err = EINVAL; 331 err = EINVAL;
326 break; 332 break;
327 } 333 }
334
335out:
328 cn_proc_ack(err, msg->seq, msg->ack); 336 cn_proc_ack(err, msg->seq, msg->ack);
329} 337}
330 338
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index d2ac91150600..46bde01eee62 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
64 * dbs: used as a shortform for demand based switching It helps to keep variable 64 * dbs: used as a shortform for demand based switching It helps to keep variable
65 * names smaller, simpler 65 * names smaller, simpler
66 * cdbs: common dbs 66 * cdbs: common dbs
67 * on_*: On-demand governor 67 * od_*: On-demand governor
68 * cs_*: Conservative governor 68 * cs_*: Conservative governor
69 */ 69 */
70 70
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 66e3a71b81a3..b61b5a3fad64 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -28,13 +28,7 @@
28 28
29static int hb_voltage_change(unsigned int freq) 29static int hb_voltage_change(unsigned int freq)
30{ 30{
31 int i; 31 u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
32 u32 msg[HB_CPUFREQ_IPC_LEN];
33
34 msg[0] = HB_CPUFREQ_CHANGE_NOTE;
35 msg[1] = freq / 1000000;
36 for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
37 msg[i] = 0;
38 32
39 return pl320_ipc_transmit(msg); 33 return pl320_ipc_transmit(msg);
40} 34}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 096fde0ebcb5..f6dd1e761129 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
662 662
663 cpu = all_cpu_data[policy->cpu]; 663 cpu = all_cpu_data[policy->cpu];
664 664
665 if (!policy->cpuinfo.max_freq)
666 return -ENODEV;
667
665 intel_pstate_get_min_max(cpu, &min, &max); 668 intel_pstate_get_min_max(cpu, &min, &max);
666 669
667 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 670 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
747 .owner = THIS_MODULE, 750 .owner = THIS_MODULE,
748}; 751};
749 752
750static void intel_pstate_exit(void)
751{
752 int cpu;
753
754 sysfs_remove_group(intel_pstate_kobject,
755 &intel_pstate_attr_group);
756 debugfs_remove_recursive(debugfs_parent);
757
758 cpufreq_unregister_driver(&intel_pstate_driver);
759
760 if (!all_cpu_data)
761 return;
762
763 get_online_cpus();
764 for_each_online_cpu(cpu) {
765 if (all_cpu_data[cpu]) {
766 del_timer_sync(&all_cpu_data[cpu]->timer);
767 kfree(all_cpu_data[cpu]);
768 }
769 }
770
771 put_online_cpus();
772 vfree(all_cpu_data);
773}
774module_exit(intel_pstate_exit);
775
776static int __initdata no_load; 753static int __initdata no_load;
777 754
778static int __init intel_pstate_init(void) 755static int __init intel_pstate_init(void)
779{ 756{
780 int rc = 0; 757 int cpu, rc = 0;
781 const struct x86_cpu_id *id; 758 const struct x86_cpu_id *id;
782 759
783 if (no_load) 760 if (no_load)
@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
802 intel_pstate_sysfs_expose_params(); 779 intel_pstate_sysfs_expose_params();
803 return rc; 780 return rc;
804out: 781out:
805 intel_pstate_exit(); 782 get_online_cpus();
783 for_each_online_cpu(cpu) {
784 if (all_cpu_data[cpu]) {
785 del_timer_sync(&all_cpu_data[cpu]->timer);
786 kfree(all_cpu_data[cpu]);
787 }
788 }
789
790 put_online_cpus();
791 vfree(all_cpu_data);
806 return -ENODEV; 792 return -ENODEV;
807} 793}
808device_initcall(intel_pstate_init); 794device_initcall(intel_pstate_init);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 7320bf891706..bea32d1ef7d5 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -426,6 +426,44 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
426 return status; 426 return status;
427} 427}
428 428
429static efi_status_t
430check_var_size_locked(struct efivars *efivars, u32 attributes,
431 unsigned long size)
432{
433 u64 storage_size, remaining_size, max_size;
434 efi_status_t status;
435 const struct efivar_operations *fops = efivars->ops;
436
437 if (!efivars->ops->query_variable_info)
438 return EFI_UNSUPPORTED;
439
440 status = fops->query_variable_info(attributes, &storage_size,
441 &remaining_size, &max_size);
442
443 if (status != EFI_SUCCESS)
444 return status;
445
446 if (!storage_size || size > remaining_size || size > max_size ||
447 (remaining_size - size) < (storage_size / 2))
448 return EFI_OUT_OF_RESOURCES;
449
450 return status;
451}
452
453
454static efi_status_t
455check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
456{
457 efi_status_t status;
458 unsigned long flags;
459
460 spin_lock_irqsave(&efivars->lock, flags);
461 status = check_var_size_locked(efivars, attributes, size);
462 spin_unlock_irqrestore(&efivars->lock, flags);
463
464 return status;
465}
466
429static ssize_t 467static ssize_t
430efivar_guid_read(struct efivar_entry *entry, char *buf) 468efivar_guid_read(struct efivar_entry *entry, char *buf)
431{ 469{
@@ -547,11 +585,16 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
547 } 585 }
548 586
549 spin_lock_irq(&efivars->lock); 587 spin_lock_irq(&efivars->lock);
550 status = efivars->ops->set_variable(new_var->VariableName, 588
551 &new_var->VendorGuid, 589 status = check_var_size_locked(efivars, new_var->Attributes,
552 new_var->Attributes, 590 new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
553 new_var->DataSize, 591
554 new_var->Data); 592 if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
593 status = efivars->ops->set_variable(new_var->VariableName,
594 &new_var->VendorGuid,
595 new_var->Attributes,
596 new_var->DataSize,
597 new_var->Data);
555 598
556 spin_unlock_irq(&efivars->lock); 599 spin_unlock_irq(&efivars->lock);
557 600
@@ -702,8 +745,7 @@ static ssize_t efivarfs_file_write(struct file *file,
702 u32 attributes; 745 u32 attributes;
703 struct inode *inode = file->f_mapping->host; 746 struct inode *inode = file->f_mapping->host;
704 unsigned long datasize = count - sizeof(attributes); 747 unsigned long datasize = count - sizeof(attributes);
705 unsigned long newdatasize; 748 unsigned long newdatasize, varsize;
706 u64 storage_size, remaining_size, max_size;
707 ssize_t bytes = 0; 749 ssize_t bytes = 0;
708 750
709 if (count < sizeof(attributes)) 751 if (count < sizeof(attributes))
@@ -722,28 +764,18 @@ static ssize_t efivarfs_file_write(struct file *file,
722 * amounts of memory. Pick a default size of 64K if 764 * amounts of memory. Pick a default size of 64K if
723 * QueryVariableInfo() isn't supported by the firmware. 765 * QueryVariableInfo() isn't supported by the firmware.
724 */ 766 */
725 spin_lock_irq(&efivars->lock);
726
727 if (!efivars->ops->query_variable_info)
728 status = EFI_UNSUPPORTED;
729 else {
730 const struct efivar_operations *fops = efivars->ops;
731 status = fops->query_variable_info(attributes, &storage_size,
732 &remaining_size, &max_size);
733 }
734 767
735 spin_unlock_irq(&efivars->lock); 768 varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
769 status = check_var_size(efivars, attributes, varsize);
736 770
737 if (status != EFI_SUCCESS) { 771 if (status != EFI_SUCCESS) {
738 if (status != EFI_UNSUPPORTED) 772 if (status != EFI_UNSUPPORTED)
739 return efi_status_to_err(status); 773 return efi_status_to_err(status);
740 774
741 remaining_size = 65536; 775 if (datasize > 65536)
776 return -ENOSPC;
742 } 777 }
743 778
744 if (datasize > remaining_size)
745 return -ENOSPC;
746
747 data = kmalloc(datasize, GFP_KERNEL); 779 data = kmalloc(datasize, GFP_KERNEL);
748 if (!data) 780 if (!data)
749 return -ENOMEM; 781 return -ENOMEM;
@@ -765,6 +797,19 @@ static ssize_t efivarfs_file_write(struct file *file,
765 */ 797 */
766 spin_lock_irq(&efivars->lock); 798 spin_lock_irq(&efivars->lock);
767 799
800 /*
801 * Ensure that the available space hasn't shrunk below the safe level
802 */
803
804 status = check_var_size_locked(efivars, attributes, varsize);
805
806 if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
807 spin_unlock_irq(&efivars->lock);
808 kfree(data);
809
810 return efi_status_to_err(status);
811 }
812
768 status = efivars->ops->set_variable(var->var.VariableName, 813 status = efivars->ops->set_variable(var->var.VariableName,
769 &var->var.VendorGuid, 814 &var->var.VendorGuid,
770 attributes, datasize, 815 attributes, datasize,
@@ -929,8 +974,8 @@ static bool efivarfs_valid_name(const char *str, int len)
929 if (len < GUID_LEN + 2) 974 if (len < GUID_LEN + 2)
930 return false; 975 return false;
931 976
932 /* GUID should be right after the first '-' */ 977 /* GUID must be preceded by a '-' */
933 if (s - 1 != strchr(str, '-')) 978 if (*(s - 1) != '-')
934 return false; 979 return false;
935 980
936 /* 981 /*
@@ -1118,15 +1163,22 @@ static struct dentry_operations efivarfs_d_ops = {
1118 1163
1119static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) 1164static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
1120{ 1165{
1166 struct dentry *d;
1121 struct qstr q; 1167 struct qstr q;
1168 int err;
1122 1169
1123 q.name = name; 1170 q.name = name;
1124 q.len = strlen(name); 1171 q.len = strlen(name);
1125 1172
1126 if (efivarfs_d_hash(NULL, NULL, &q)) 1173 err = efivarfs_d_hash(NULL, NULL, &q);
1127 return NULL; 1174 if (err)
1175 return ERR_PTR(err);
1176
1177 d = d_alloc(parent, &q);
1178 if (d)
1179 return d;
1128 1180
1129 return d_alloc(parent, &q); 1181 return ERR_PTR(-ENOMEM);
1130} 1182}
1131 1183
1132static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) 1184static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1136,6 +1188,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
1136 struct efivar_entry *entry, *n; 1188 struct efivar_entry *entry, *n;
1137 struct efivars *efivars = &__efivars; 1189 struct efivars *efivars = &__efivars;
1138 char *name; 1190 char *name;
1191 int err = -ENOMEM;
1139 1192
1140 efivarfs_sb = sb; 1193 efivarfs_sb = sb;
1141 1194
@@ -1186,8 +1239,10 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
1186 goto fail_name; 1239 goto fail_name;
1187 1240
1188 dentry = efivarfs_alloc_dentry(root, name); 1241 dentry = efivarfs_alloc_dentry(root, name);
1189 if (!dentry) 1242 if (IS_ERR(dentry)) {
1243 err = PTR_ERR(dentry);
1190 goto fail_inode; 1244 goto fail_inode;
1245 }
1191 1246
1192 /* copied by the above to local storage in the dentry. */ 1247 /* copied by the above to local storage in the dentry. */
1193 kfree(name); 1248 kfree(name);
@@ -1214,7 +1269,7 @@ fail_inode:
1214fail_name: 1269fail_name:
1215 kfree(name); 1270 kfree(name);
1216fail: 1271fail:
1217 return -ENOMEM; 1272 return err;
1218} 1273}
1219 1274
1220static struct dentry *efivarfs_mount(struct file_system_type *fs_type, 1275static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
@@ -1345,7 +1400,6 @@ static int efi_pstore_write(enum pstore_type_id type,
1345 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 1400 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
1346 struct efivars *efivars = psi->data; 1401 struct efivars *efivars = psi->data;
1347 int i, ret = 0; 1402 int i, ret = 0;
1348 u64 storage_space, remaining_space, max_variable_size;
1349 efi_status_t status = EFI_NOT_FOUND; 1403 efi_status_t status = EFI_NOT_FOUND;
1350 unsigned long flags; 1404 unsigned long flags;
1351 1405
@@ -1365,11 +1419,11 @@ static int efi_pstore_write(enum pstore_type_id type,
1365 * size: a size of logging data 1419 * size: a size of logging data
1366 * DUMP_NAME_LEN * 2: a maximum size of variable name 1420 * DUMP_NAME_LEN * 2: a maximum size of variable name
1367 */ 1421 */
1368 status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES, 1422
1369 &storage_space, 1423 status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
1370 &remaining_space, 1424 size + DUMP_NAME_LEN * 2);
1371 &max_variable_size); 1425
1372 if (status || remaining_space < size + DUMP_NAME_LEN * 2) { 1426 if (status) {
1373 spin_unlock_irqrestore(&efivars->lock, flags); 1427 spin_unlock_irqrestore(&efivars->lock, flags);
1374 *id = part; 1428 *id = part;
1375 return -ENOSPC; 1429 return -ENOSPC;
@@ -1544,6 +1598,14 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1544 return -EINVAL; 1598 return -EINVAL;
1545 } 1599 }
1546 1600
1601 status = check_var_size_locked(efivars, new_var->Attributes,
1602 new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
1603
1604 if (status && status != EFI_UNSUPPORTED) {
1605 spin_unlock_irq(&efivars->lock);
1606 return efi_status_to_err(status);
1607 }
1608
1547 /* now *really* create the variable via EFI */ 1609 /* now *really* create the variable via EFI */
1548 status = efivars->ops->set_variable(new_var->VariableName, 1610 status = efivars->ops->set_variable(new_var->VariableName,
1549 &new_var->VendorGuid, 1611 &new_var->VendorGuid,
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 6f2306db8591..f9dbd503fc40 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
128 return data & (1 << bit) ? 1 : 0; 128 return data & (1 << bit) ? 1 : 0;
129} 129}
130 130
131static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr) 131static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
132{ 132{
133 return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO; 133 return ichx_priv.use_gpio & (1 << (nr / 32));
134} 134}
135 135
136static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) 136static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fff9786cdc64..c2534d62911c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
88static void gpiod_free(struct gpio_desc *desc); 88static void gpiod_free(struct gpio_desc *desc);
89static int gpiod_direction_input(struct gpio_desc *desc); 89static int gpiod_direction_input(struct gpio_desc *desc);
90static int gpiod_direction_output(struct gpio_desc *desc, int value); 90static int gpiod_direction_output(struct gpio_desc *desc, int value);
91static int gpiod_get_direction(const struct gpio_desc *desc);
91static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); 92static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
92static int gpiod_get_value_cansleep(struct gpio_desc *desc); 93static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
93static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); 94static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
94static int gpiod_get_value(struct gpio_desc *desc); 95static int gpiod_get_value(const struct gpio_desc *desc);
95static void gpiod_set_value(struct gpio_desc *desc, int value); 96static void gpiod_set_value(struct gpio_desc *desc, int value);
96static int gpiod_cansleep(struct gpio_desc *desc); 97static int gpiod_cansleep(const struct gpio_desc *desc);
97static int gpiod_to_irq(struct gpio_desc *desc); 98static int gpiod_to_irq(const struct gpio_desc *desc);
98static int gpiod_export(struct gpio_desc *desc, bool direction_may_change); 99static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
99static int gpiod_export_link(struct device *dev, const char *name, 100static int gpiod_export_link(struct device *dev, const char *name,
100 struct gpio_desc *desc); 101 struct gpio_desc *desc);
@@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
171 return 0; 172 return 0;
172} 173}
173 174
174/* caller holds gpio_lock *OR* gpio is marked as requested */ 175static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
175static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
176{ 176{
177 return desc->chip; 177 return desc ? desc->chip : NULL;
178} 178}
179 179
180/* caller holds gpio_lock *OR* gpio is marked as requested */
180struct gpio_chip *gpio_to_chip(unsigned gpio) 181struct gpio_chip *gpio_to_chip(unsigned gpio)
181{ 182{
182 return gpiod_to_chip(gpio_to_desc(gpio)); 183 return gpiod_to_chip(gpio_to_desc(gpio));
@@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
207} 208}
208 209
209/* caller ensures gpio is valid and requested, chip->get_direction may sleep */ 210/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
210static int gpiod_get_direction(struct gpio_desc *desc) 211static int gpiod_get_direction(const struct gpio_desc *desc)
211{ 212{
212 struct gpio_chip *chip; 213 struct gpio_chip *chip;
213 unsigned offset; 214 unsigned offset;
@@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
223 if (status > 0) { 224 if (status > 0) {
224 /* GPIOF_DIR_IN, or other positive */ 225 /* GPIOF_DIR_IN, or other positive */
225 status = 1; 226 status = 1;
226 clear_bit(FLAG_IS_OUT, &desc->flags); 227 /* FLAG_IS_OUT is just a cache of the result of get_direction(),
228 * so it does not affect constness per se */
229 clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
227 } 230 }
228 if (status == 0) { 231 if (status == 0) {
229 /* GPIOF_DIR_OUT */ 232 /* GPIOF_DIR_OUT */
230 set_bit(FLAG_IS_OUT, &desc->flags); 233 set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
231 } 234 }
232 return status; 235 return status;
233} 236}
@@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
263static ssize_t gpio_direction_show(struct device *dev, 266static ssize_t gpio_direction_show(struct device *dev,
264 struct device_attribute *attr, char *buf) 267 struct device_attribute *attr, char *buf)
265{ 268{
266 struct gpio_desc *desc = dev_get_drvdata(dev); 269 const struct gpio_desc *desc = dev_get_drvdata(dev);
267 ssize_t status; 270 ssize_t status;
268 271
269 mutex_lock(&sysfs_lock); 272 mutex_lock(&sysfs_lock);
@@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
654 goto done; 657 goto done;
655 658
656 desc = gpio_to_desc(gpio); 659 desc = gpio_to_desc(gpio);
660 /* reject invalid GPIOs */
661 if (!desc) {
662 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
663 return -EINVAL;
664 }
657 665
658 /* No extra locking here; FLAG_SYSFS just signifies that the 666 /* No extra locking here; FLAG_SYSFS just signifies that the
659 * request and export were done by on behalf of userspace, so 667 * request and export were done by on behalf of userspace, so
@@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
690 if (status < 0) 698 if (status < 0)
691 goto done; 699 goto done;
692 700
693 status = -EINVAL;
694
695 desc = gpio_to_desc(gpio); 701 desc = gpio_to_desc(gpio);
696 /* reject bogus commands (gpio_unexport ignores them) */ 702 /* reject bogus commands (gpio_unexport ignores them) */
697 if (!desc) 703 if (!desc) {
698 goto done; 704 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
705 return -EINVAL;
706 }
707
708 status = -EINVAL;
699 709
700 /* No extra locking here; FLAG_SYSFS just signifies that the 710 /* No extra locking here; FLAG_SYSFS just signifies that the
701 * request and export were done by on behalf of userspace, so 711 * request and export were done by on behalf of userspace, so
@@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
846{ 856{
847 int status = -EINVAL; 857 int status = -EINVAL;
848 858
849 if (!desc) 859 if (!desc) {
850 goto done; 860 pr_warn("%s: invalid GPIO\n", __func__);
861 return -EINVAL;
862 }
851 863
852 mutex_lock(&sysfs_lock); 864 mutex_lock(&sysfs_lock);
853 865
@@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
865 877
866 mutex_unlock(&sysfs_lock); 878 mutex_unlock(&sysfs_lock);
867 879
868done:
869 if (status) 880 if (status)
870 pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc), 881 pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
871 status); 882 status);
@@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
896 struct device *dev = NULL; 907 struct device *dev = NULL;
897 int status = -EINVAL; 908 int status = -EINVAL;
898 909
899 if (!desc) 910 if (!desc) {
900 goto done; 911 pr_warn("%s: invalid GPIO\n", __func__);
912 return -EINVAL;
913 }
901 914
902 mutex_lock(&sysfs_lock); 915 mutex_lock(&sysfs_lock);
903 916
@@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
914unlock: 927unlock:
915 mutex_unlock(&sysfs_lock); 928 mutex_unlock(&sysfs_lock);
916 929
917done:
918 if (status) 930 if (status)
919 pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc), 931 pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
920 status); 932 status);
@@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
940 struct device *dev = NULL; 952 struct device *dev = NULL;
941 953
942 if (!desc) { 954 if (!desc) {
943 status = -EINVAL; 955 pr_warn("%s: invalid GPIO\n", __func__);
944 goto done; 956 return;
945 } 957 }
946 958
947 mutex_lock(&sysfs_lock); 959 mutex_lock(&sysfs_lock);
@@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
962 device_unregister(dev); 974 device_unregister(dev);
963 put_device(dev); 975 put_device(dev);
964 } 976 }
965done: 977
966 if (status) 978 if (status)
967 pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc), 979 pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
968 status); 980 status);
@@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1384 int status = -EPROBE_DEFER; 1396 int status = -EPROBE_DEFER;
1385 unsigned long flags; 1397 unsigned long flags;
1386 1398
1387 spin_lock_irqsave(&gpio_lock, flags);
1388
1389 if (!desc) { 1399 if (!desc) {
1390 status = -EINVAL; 1400 pr_warn("%s: invalid GPIO\n", __func__);
1391 goto done; 1401 return -EINVAL;
1392 } 1402 }
1403
1404 spin_lock_irqsave(&gpio_lock, flags);
1405
1393 chip = desc->chip; 1406 chip = desc->chip;
1394 if (chip == NULL) 1407 if (chip == NULL)
1395 goto done; 1408 goto done;
@@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1432done: 1445done:
1433 if (status) 1446 if (status)
1434 pr_debug("_gpio_request: gpio-%d (%s) status %d\n", 1447 pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
1435 desc ? desc_to_gpio(desc) : -1, 1448 desc_to_gpio(desc), label ? : "?", status);
1436 label ? : "?", status);
1437 spin_unlock_irqrestore(&gpio_lock, flags); 1449 spin_unlock_irqrestore(&gpio_lock, flags);
1438 return status; 1450 return status;
1439} 1451}
@@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
1616 int status = -EINVAL; 1628 int status = -EINVAL;
1617 int offset; 1629 int offset;
1618 1630
1631 if (!desc) {
1632 pr_warn("%s: invalid GPIO\n", __func__);
1633 return -EINVAL;
1634 }
1635
1619 spin_lock_irqsave(&gpio_lock, flags); 1636 spin_lock_irqsave(&gpio_lock, flags);
1620 1637
1621 if (!desc)
1622 goto fail;
1623 chip = desc->chip; 1638 chip = desc->chip;
1624 if (!chip || !chip->get || !chip->direction_input) 1639 if (!chip || !chip->get || !chip->direction_input)
1625 goto fail; 1640 goto fail;
@@ -1655,13 +1670,9 @@ lose:
1655 return status; 1670 return status;
1656fail: 1671fail:
1657 spin_unlock_irqrestore(&gpio_lock, flags); 1672 spin_unlock_irqrestore(&gpio_lock, flags);
1658 if (status) { 1673 if (status)
1659 int gpio = -1; 1674 pr_debug("%s: gpio-%d status %d\n", __func__,
1660 if (desc) 1675 desc_to_gpio(desc), status);
1661 gpio = desc_to_gpio(desc);
1662 pr_debug("%s: gpio-%d status %d\n",
1663 __func__, gpio, status);
1664 }
1665 return status; 1676 return status;
1666} 1677}
1667 1678
@@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
1678 int status = -EINVAL; 1689 int status = -EINVAL;
1679 int offset; 1690 int offset;
1680 1691
1692 if (!desc) {
1693 pr_warn("%s: invalid GPIO\n", __func__);
1694 return -EINVAL;
1695 }
1696
1681 /* Open drain pin should not be driven to 1 */ 1697 /* Open drain pin should not be driven to 1 */
1682 if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 1698 if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1683 return gpiod_direction_input(desc); 1699 return gpiod_direction_input(desc);
@@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
1688 1704
1689 spin_lock_irqsave(&gpio_lock, flags); 1705 spin_lock_irqsave(&gpio_lock, flags);
1690 1706
1691 if (!desc)
1692 goto fail;
1693 chip = desc->chip; 1707 chip = desc->chip;
1694 if (!chip || !chip->set || !chip->direction_output) 1708 if (!chip || !chip->set || !chip->direction_output)
1695 goto fail; 1709 goto fail;
@@ -1725,13 +1739,9 @@ lose:
1725 return status; 1739 return status;
1726fail: 1740fail:
1727 spin_unlock_irqrestore(&gpio_lock, flags); 1741 spin_unlock_irqrestore(&gpio_lock, flags);
1728 if (status) { 1742 if (status)
1729 int gpio = -1; 1743 pr_debug("%s: gpio-%d status %d\n", __func__,
1730 if (desc) 1744 desc_to_gpio(desc), status);
1731 gpio = desc_to_gpio(desc);
1732 pr_debug("%s: gpio-%d status %d\n",
1733 __func__, gpio, status);
1734 }
1735 return status; 1745 return status;
1736} 1746}
1737 1747
@@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
1753 int status = -EINVAL; 1763 int status = -EINVAL;
1754 int offset; 1764 int offset;
1755 1765
1766 if (!desc) {
1767 pr_warn("%s: invalid GPIO\n", __func__);
1768 return -EINVAL;
1769 }
1770
1756 spin_lock_irqsave(&gpio_lock, flags); 1771 spin_lock_irqsave(&gpio_lock, flags);
1757 1772
1758 if (!desc)
1759 goto fail;
1760 chip = desc->chip; 1773 chip = desc->chip;
1761 if (!chip || !chip->set || !chip->set_debounce) 1774 if (!chip || !chip->set || !chip->set_debounce)
1762 goto fail; 1775 goto fail;
@@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
1776 1789
1777fail: 1790fail:
1778 spin_unlock_irqrestore(&gpio_lock, flags); 1791 spin_unlock_irqrestore(&gpio_lock, flags);
1779 if (status) { 1792 if (status)
1780 int gpio = -1; 1793 pr_debug("%s: gpio-%d status %d\n", __func__,
1781 if (desc) 1794 desc_to_gpio(desc), status);
1782 gpio = desc_to_gpio(desc);
1783 pr_debug("%s: gpio-%d status %d\n",
1784 __func__, gpio, status);
1785 }
1786 1795
1787 return status; 1796 return status;
1788} 1797}
@@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
1824 * It returns the zero or nonzero value provided by the associated 1833 * It returns the zero or nonzero value provided by the associated
1825 * gpio_chip.get() method; or zero if no such method is provided. 1834 * gpio_chip.get() method; or zero if no such method is provided.
1826 */ 1835 */
1827static int gpiod_get_value(struct gpio_desc *desc) 1836static int gpiod_get_value(const struct gpio_desc *desc)
1828{ 1837{
1829 struct gpio_chip *chip; 1838 struct gpio_chip *chip;
1830 int value; 1839 int value;
1831 int offset; 1840 int offset;
1832 1841
1842 if (!desc)
1843 return 0;
1833 chip = desc->chip; 1844 chip = desc->chip;
1834 offset = gpio_chip_hwgpio(desc); 1845 offset = gpio_chip_hwgpio(desc);
1835 /* Should be using gpio_get_value_cansleep() */ 1846 /* Should be using gpio_get_value_cansleep() */
@@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
1912{ 1923{
1913 struct gpio_chip *chip; 1924 struct gpio_chip *chip;
1914 1925
1926 if (!desc)
1927 return;
1915 chip = desc->chip; 1928 chip = desc->chip;
1916 /* Should be using gpio_set_value_cansleep() */ 1929 /* Should be using gpio_set_value_cansleep() */
1917 WARN_ON(chip->can_sleep); 1930 WARN_ON(chip->can_sleep);
@@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
1938 * This is used directly or indirectly to implement gpio_cansleep(). It 1951 * This is used directly or indirectly to implement gpio_cansleep(). It
1939 * returns nonzero if access reading or writing the GPIO value can sleep. 1952 * returns nonzero if access reading or writing the GPIO value can sleep.
1940 */ 1953 */
1941static int gpiod_cansleep(struct gpio_desc *desc) 1954static int gpiod_cansleep(const struct gpio_desc *desc)
1942{ 1955{
1956 if (!desc)
1957 return 0;
1943 /* only call this on GPIOs that are valid! */ 1958 /* only call this on GPIOs that are valid! */
1944 return desc->chip->can_sleep; 1959 return desc->chip->can_sleep;
1945} 1960}
@@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
1959 * It returns the number of the IRQ signaled by this (input) GPIO, 1974 * It returns the number of the IRQ signaled by this (input) GPIO,
1960 * or a negative errno. 1975 * or a negative errno.
1961 */ 1976 */
1962static int gpiod_to_irq(struct gpio_desc *desc) 1977static int gpiod_to_irq(const struct gpio_desc *desc)
1963{ 1978{
1964 struct gpio_chip *chip; 1979 struct gpio_chip *chip;
1965 int offset; 1980 int offset;
1966 1981
1982 if (!desc)
1983 return -EINVAL;
1967 chip = desc->chip; 1984 chip = desc->chip;
1968 offset = gpio_chip_hwgpio(desc); 1985 offset = gpio_chip_hwgpio(desc);
1969 return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO; 1986 return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
1980 * Common examples include ones connected to I2C or SPI chips. 1997 * Common examples include ones connected to I2C or SPI chips.
1981 */ 1998 */
1982 1999
1983static int gpiod_get_value_cansleep(struct gpio_desc *desc) 2000static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
1984{ 2001{
1985 struct gpio_chip *chip; 2002 struct gpio_chip *chip;
1986 int value; 2003 int value;
1987 int offset; 2004 int offset;
1988 2005
1989 might_sleep_if(extra_checks); 2006 might_sleep_if(extra_checks);
2007 if (!desc)
2008 return 0;
1990 chip = desc->chip; 2009 chip = desc->chip;
1991 offset = gpio_chip_hwgpio(desc); 2010 offset = gpio_chip_hwgpio(desc);
1992 value = chip->get ? chip->get(chip, offset) : 0; 2011 value = chip->get ? chip->get(chip, offset) : 0;
@@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
2005 struct gpio_chip *chip; 2024 struct gpio_chip *chip;
2006 2025
2007 might_sleep_if(extra_checks); 2026 might_sleep_if(extra_checks);
2027 if (!desc)
2028 return;
2008 chip = desc->chip; 2029 chip = desc->chip;
2009 trace_gpio_value(desc_to_gpio(desc), 0, value); 2030 trace_gpio_value(desc_to_gpio(desc), 0, value);
2010 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 2031 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c5b8c81b9440..0a8eceb75902 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
379 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ 379 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
380 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ 380 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
381 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ 381 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
382 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ 382 INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
383 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
383 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ 384 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
384 INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ 385 INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
385 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ 386 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
386 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ 387 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
387 INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ 388 INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
388 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ 389 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
389 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ 390 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
390 INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
391 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 391 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
392 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 392 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
393 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 393 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
495 intel_modeset_disable(dev); 495 intel_modeset_disable(dev);
496 496
497 drm_irq_uninstall(dev); 497 drm_irq_uninstall(dev);
498 dev_priv->enable_hotplug_processing = false;
498 } 499 }
499 500
500 i915_save_state(dev); 501 i915_save_state(dev);
@@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
568 error = i915_gem_init_hw(dev); 569 error = i915_gem_init_hw(dev);
569 mutex_unlock(&dev->struct_mutex); 570 mutex_unlock(&dev->struct_mutex);
570 571
572 /* We need working interrupts for modeset enabling ... */
573 drm_irq_install(dev);
574
571 intel_modeset_init_hw(dev); 575 intel_modeset_init_hw(dev);
572 intel_modeset_setup_hw_state(dev, false); 576 intel_modeset_setup_hw_state(dev, false);
573 drm_irq_install(dev); 577
578 /*
579 * ... but also need to make sure that hotplug processing
580 * doesn't cause havoc. Like in the driver load code we don't
581 * bother with the tiny race here where we might loose hotplug
582 * notifications.
583 * */
574 intel_hpd_init(dev); 584 intel_hpd_init(dev);
585 dev_priv->enable_hotplug_processing = true;
575 } 586 }
576 587
577 intel_opregion_init(dev); 588 intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2cd97d1cc920..3c7bb0410b51 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
701{ 701{
702 struct drm_device *dev = (struct drm_device *) arg; 702 struct drm_device *dev = (struct drm_device *) arg;
703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
704 u32 de_iir, gt_iir, de_ier, pm_iir; 704 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
705 irqreturn_t ret = IRQ_NONE; 705 irqreturn_t ret = IRQ_NONE;
706 int i; 706 int i;
707 707
@@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
711 de_ier = I915_READ(DEIER); 711 de_ier = I915_READ(DEIER);
712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
713 713
714 /* Disable south interrupts. We'll only write to SDEIIR once, so further
715 * interrupts will will be stored on its back queue, and then we'll be
716 * able to process them after we restore SDEIER (as soon as we restore
717 * it, we'll get an interrupt if SDEIIR still has something to process
718 * due to its back queue). */
719 sde_ier = I915_READ(SDEIER);
720 I915_WRITE(SDEIER, 0);
721 POSTING_READ(SDEIER);
722
714 gt_iir = I915_READ(GTIIR); 723 gt_iir = I915_READ(GTIIR);
715 if (gt_iir) { 724 if (gt_iir) {
716 snb_gt_irq_handler(dev, dev_priv, gt_iir); 725 snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
759 768
760 I915_WRITE(DEIER, de_ier); 769 I915_WRITE(DEIER, de_ier);
761 POSTING_READ(DEIER); 770 POSTING_READ(DEIER);
771 I915_WRITE(SDEIER, sde_ier);
772 POSTING_READ(SDEIER);
762 773
763 return ret; 774 return ret;
764} 775}
@@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
778 struct drm_device *dev = (struct drm_device *) arg; 789 struct drm_device *dev = (struct drm_device *) arg;
779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 790 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
780 int ret = IRQ_NONE; 791 int ret = IRQ_NONE;
781 u32 de_iir, gt_iir, de_ier, pm_iir; 792 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
782 793
783 atomic_inc(&dev_priv->irq_received); 794 atomic_inc(&dev_priv->irq_received);
784 795
@@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
787 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 798 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
788 POSTING_READ(DEIER); 799 POSTING_READ(DEIER);
789 800
801 /* Disable south interrupts. We'll only write to SDEIIR once, so further
802 * interrupts will will be stored on its back queue, and then we'll be
803 * able to process them after we restore SDEIER (as soon as we restore
804 * it, we'll get an interrupt if SDEIIR still has something to process
805 * due to its back queue). */
806 sde_ier = I915_READ(SDEIER);
807 I915_WRITE(SDEIER, 0);
808 POSTING_READ(SDEIER);
809
790 de_iir = I915_READ(DEIIR); 810 de_iir = I915_READ(DEIIR);
791 gt_iir = I915_READ(GTIIR); 811 gt_iir = I915_READ(GTIIR);
792 pm_iir = I915_READ(GEN6_PMIIR); 812 pm_iir = I915_READ(GEN6_PMIIR);
@@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
849done: 869done:
850 I915_WRITE(DEIER, de_ier); 870 I915_WRITE(DEIER, de_ier);
851 POSTING_READ(DEIER); 871 POSTING_READ(DEIER);
872 I915_WRITE(SDEIER, sde_ier);
873 POSTING_READ(SDEIER);
852 874
853 return ret; 875 return ret;
854} 876}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 527b664d3434..848992f67d56 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1613,9 +1613,9 @@
1613#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 1613#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
1614#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1614#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1615#define ADPA_SETS_HVPOLARITY 0 1615#define ADPA_SETS_HVPOLARITY 0
1616#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1616#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
1617#define ADPA_VSYNC_CNTL_ENABLE 0 1617#define ADPA_VSYNC_CNTL_ENABLE 0
1618#define ADPA_HSYNC_CNTL_DISABLE (1<<10) 1618#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
1619#define ADPA_HSYNC_CNTL_ENABLE 0 1619#define ADPA_HSYNC_CNTL_ENABLE 0
1620#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) 1620#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
1621#define ADPA_VSYNC_ACTIVE_LOW 0 1621#define ADPA_VSYNC_ACTIVE_LOW 0
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 969d08c72d10..32a3693905ec 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
88 u32 temp; 88 u32 temp;
89 89
90 temp = I915_READ(crt->adpa_reg); 90 temp = I915_READ(crt->adpa_reg);
91 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 91 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
92 temp &= ~ADPA_DAC_ENABLE; 92 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp); 93 I915_WRITE(crt->adpa_reg, temp);
94} 94}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d64af5aa4a1c..8d0bac3c35d7 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1391 struct intel_dp *intel_dp = &intel_dig_port->dp; 1391 struct intel_dp *intel_dp = &intel_dig_port->dp;
1392 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1392 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1393 enum port port = intel_dig_port->port; 1393 enum port port = intel_dig_port->port;
1394 bool wait;
1395 uint32_t val; 1394 uint32_t val;
1395 bool wait = false;
1396 1396
1397 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { 1397 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
1398 val = I915_READ(DDI_BUF_CTL(port)); 1398 val = I915_READ(DDI_BUF_CTL(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a05ac2c91ba2..287b42c9d1a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3604 */ 3604 */
3605} 3605}
3606 3606
3607/**
3608 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3609 * cursor plane briefly if not already running after enabling the display
3610 * plane.
3611 * This workaround avoids occasional blank screens when self refresh is
3612 * enabled.
3613 */
3614static void
3615g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3616{
3617 u32 cntl = I915_READ(CURCNTR(pipe));
3618
3619 if ((cntl & CURSOR_MODE) == 0) {
3620 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3621
3622 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3623 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3624 intel_wait_for_vblank(dev_priv->dev, pipe);
3625 I915_WRITE(CURCNTR(pipe), cntl);
3626 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3627 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3628 }
3629}
3630
3607static void i9xx_crtc_enable(struct drm_crtc *crtc) 3631static void i9xx_crtc_enable(struct drm_crtc *crtc)
3608{ 3632{
3609 struct drm_device *dev = crtc->dev; 3633 struct drm_device *dev = crtc->dev;
@@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3629 3653
3630 intel_enable_pipe(dev_priv, pipe, false); 3654 intel_enable_pipe(dev_priv, pipe, false);
3631 intel_enable_plane(dev_priv, plane, pipe); 3655 intel_enable_plane(dev_priv, plane, pipe);
3656 if (IS_G4X(dev))
3657 g4x_fixup_plane(dev_priv, pipe);
3632 3658
3633 intel_crtc_load_lut(crtc); 3659 intel_crtc_load_lut(crtc);
3634 intel_update_fbc(dev); 3660 intel_update_fbc(dev);
@@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7256{ 7282{
7257 struct drm_device *dev = crtc->dev; 7283 struct drm_device *dev = crtc->dev;
7258 struct drm_i915_private *dev_priv = dev->dev_private; 7284 struct drm_i915_private *dev_priv = dev->dev_private;
7259 struct intel_framebuffer *intel_fb; 7285 struct drm_framebuffer *old_fb = crtc->fb;
7260 struct drm_i915_gem_object *obj; 7286 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
7261 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7287 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7262 struct intel_unpin_work *work; 7288 struct intel_unpin_work *work;
7263 unsigned long flags; 7289 unsigned long flags;
@@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7282 7308
7283 work->event = event; 7309 work->event = event;
7284 work->crtc = crtc; 7310 work->crtc = crtc;
7285 intel_fb = to_intel_framebuffer(crtc->fb); 7311 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
7286 work->old_fb_obj = intel_fb->obj;
7287 INIT_WORK(&work->work, intel_unpin_work_fn); 7312 INIT_WORK(&work->work, intel_unpin_work_fn);
7288 7313
7289 ret = drm_vblank_get(dev, intel_crtc->pipe); 7314 ret = drm_vblank_get(dev, intel_crtc->pipe);
@@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7303 intel_crtc->unpin_work = work; 7328 intel_crtc->unpin_work = work;
7304 spin_unlock_irqrestore(&dev->event_lock, flags); 7329 spin_unlock_irqrestore(&dev->event_lock, flags);
7305 7330
7306 intel_fb = to_intel_framebuffer(fb);
7307 obj = intel_fb->obj;
7308
7309 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 7331 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7310 flush_workqueue(dev_priv->wq); 7332 flush_workqueue(dev_priv->wq);
7311 7333
@@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7340 7362
7341cleanup_pending: 7363cleanup_pending:
7342 atomic_dec(&intel_crtc->unpin_work_count); 7364 atomic_dec(&intel_crtc->unpin_work_count);
7365 crtc->fb = old_fb;
7343 drm_gem_object_unreference(&work->old_fb_obj->base); 7366 drm_gem_object_unreference(&work->old_fb_obj->base);
7344 drm_gem_object_unreference(&obj->base); 7367 drm_gem_object_unreference(&obj->base);
7345 mutex_unlock(&dev->struct_mutex); 7368 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f61cb7998c72..6f728e5ee793 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
353 353
354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
355 if (has_aux_irq) 355 if (has_aux_irq)
356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); 356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
357 msecs_to_jiffies(10));
357 else 358 else
358 done = wait_for_atomic(C, 10) == 0; 359 done = wait_for_atomic(C, 10) == 0;
359 if (!done) 360 if (!done)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 61fee7fcdc2c..a1794c6df1bf 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
2574 I915_WRITE(GEN6_RC_SLEEP, 0); 2574 I915_WRITE(GEN6_RC_SLEEP, 0);
2575 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 2575 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2576 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 2576 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2577 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 2577 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
2578 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 2578 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2579 2579
2580 /* Check if we are enabling RC6 */ 2580 /* Check if we are enabling RC6 */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 5ea5033eae0a..4d932c46725d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -112,7 +112,6 @@ struct mga_framebuffer {
112struct mga_fbdev { 112struct mga_fbdev {
113 struct drm_fb_helper helper; 113 struct drm_fb_helper helper;
114 struct mga_framebuffer mfb; 114 struct mga_framebuffer mfb;
115 struct list_head fbdev_list;
116 void *sysram; 115 void *sysram;
117 int size; 116 int size;
118 struct ttm_bo_kmap_obj mapping; 117 struct ttm_bo_kmap_obj mapping;
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 5a88ec51b513..d3dcf54e6233 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
92 int ret; 92 int ret;
93 int data, clock; 93 int data, clock;
94 94
95 WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
95 WREG_DAC(MGA1064_GEN_IO_DATA, 0xff); 96 WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
96 WREG_DAC(MGA1064_GEN_IO_CTL, 0); 97 WREG_DAC(MGA1064_GEN_IO_CTL, 0);
97 98
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d3d99a28ddef..a274b9906ef8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
1406static int mga_vga_mode_valid(struct drm_connector *connector, 1406static int mga_vga_mode_valid(struct drm_connector *connector,
1407 struct drm_display_mode *mode) 1407 struct drm_display_mode *mode)
1408{ 1408{
1409 struct drm_device *dev = connector->dev;
1410 struct mga_device *mdev = (struct mga_device*)dev->dev_private;
1411 struct mga_fbdev *mfbdev = mdev->mfbdev;
1412 struct drm_fb_helper *fb_helper = &mfbdev->helper;
1413 struct drm_fb_helper_connector *fb_helper_conn = NULL;
1414 int bpp = 32;
1415 int i = 0;
1416
1409 /* FIXME: Add bandwidth and g200se limitations */ 1417 /* FIXME: Add bandwidth and g200se limitations */
1410 1418
1411 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || 1419 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1415 return MODE_BAD; 1423 return MODE_BAD;
1416 } 1424 }
1417 1425
1426 /* Validate the mode input by the user */
1427 for (i = 0; i < fb_helper->connector_count; i++) {
1428 if (fb_helper->connector_info[i]->connector == connector) {
1429 /* Found the helper for this connector */
1430 fb_helper_conn = fb_helper->connector_info[i];
1431 if (fb_helper_conn->cmdline_mode.specified) {
1432 if (fb_helper_conn->cmdline_mode.bpp_specified) {
1433 bpp = fb_helper_conn->cmdline_mode.bpp;
1434 }
1435 }
1436 }
1437 }
1438
1439 if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
1440 if (fb_helper_conn)
1441 fb_helper_conn->cmdline_mode.specified = false;
1442 return MODE_BAD;
1443 }
1444
1418 return MODE_OK; 1445 return MODE_OK;
1419} 1446}
1420 1447
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 61cec0f6ff1c..4857f913efdd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
350 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); 350 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
351 } 351 }
352 352
353 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); 353 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
354 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 354 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
355} 355}
356 356
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2cc1e6a5eb6a..9c41b58d57e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
869 init->offset += 2; 869 init->offset += 2;
870 870
871 init_wr32(init, dreg, idata); 871 init_wr32(init, dreg, idata);
872 init_mask(init, creg, ~mask, data | idata); 872 init_mask(init, creg, ~mask, data | iaddr);
873 } 873 }
874} 874}
875 875
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index a114a0ed7e98..2e98e8a3f1aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
142 /* drop port's i2c subdev refcount, i2c handles this itself */ 142 /* drop port's i2c subdev refcount, i2c handles this itself */
143 if (ret == 0) { 143 if (ret == 0) {
144 list_add_tail(&port->head, &i2c->ports); 144 list_add_tail(&port->head, &i2c->ports);
145 atomic_dec(&parent->refcount);
145 atomic_dec(&engine->refcount); 146 atomic_dec(&engine->refcount);
146 } 147 }
147 148
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index d28430cd2ba6..6e7a55f93a85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
47 if (drm->agp.stat == UNKNOWN) { 47 if (drm->agp.stat == UNKNOWN) {
48 if (!nouveau_agpmode) 48 if (!nouveau_agpmode)
49 return false; 49 return false;
50#ifdef __powerpc__
51 /* Disable AGP by default on all PowerPC machines for
52 * now -- At least some UniNorth-2 AGP bridges are
53 * known to be broken: DMA from the host to the card
54 * works just fine, but writeback from the card to the
55 * host goes straight to memory untranslated bypassing
56 * the GATT somehow, making them quite painful to deal
57 * with...
58 */
59 if (nouveau_agpmode == -1)
60 return false;
61#endif
50 return true; 62 return true;
51 } 63 }
52 64
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a6237c9cbbc3..87a5a56ed358 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -55,9 +55,9 @@
55 55
56/* offsets in shared sync bo of various structures */ 56/* offsets in shared sync bo of various structures */
57#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) 57#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
58#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) 58#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
59#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) 59#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
60#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) 60#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
61 61
62#define EVO_CORE_HANDLE (0xd1500000) 62#define EVO_CORE_HANDLE (0xd1500000)
63#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) 63#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
@@ -341,10 +341,8 @@ struct nv50_curs {
341 341
342struct nv50_sync { 342struct nv50_sync {
343 struct nv50_dmac base; 343 struct nv50_dmac base;
344 struct { 344 u32 addr;
345 u32 offset; 345 u32 data;
346 u16 value;
347 } sem;
348}; 346};
349 347
350struct nv50_ovly { 348struct nv50_ovly {
@@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc)
471 return nv50_disp(dev)->sync; 469 return nv50_disp(dev)->sync;
472} 470}
473 471
472struct nv50_display_flip {
473 struct nv50_disp *disp;
474 struct nv50_sync *chan;
475};
476
477static bool
478nv50_display_flip_wait(void *data)
479{
480 struct nv50_display_flip *flip = data;
481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
482 flip->chan->data);
483 return true;
484 usleep_range(1, 2);
485 return false;
486}
487
474void 488void
475nv50_display_flip_stop(struct drm_crtc *crtc) 489nv50_display_flip_stop(struct drm_crtc *crtc)
476{ 490{
477 struct nv50_sync *sync = nv50_sync(crtc); 491 struct nouveau_device *device = nouveau_dev(crtc->dev);
492 struct nv50_display_flip flip = {
493 .disp = nv50_disp(crtc->dev),
494 .chan = nv50_sync(crtc),
495 };
478 u32 *push; 496 u32 *push;
479 497
480 push = evo_wait(sync, 8); 498 push = evo_wait(flip.chan, 8);
481 if (push) { 499 if (push) {
482 evo_mthd(push, 0x0084, 1); 500 evo_mthd(push, 0x0084, 1);
483 evo_data(push, 0x00000000); 501 evo_data(push, 0x00000000);
@@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
487 evo_data(push, 0x00000000); 505 evo_data(push, 0x00000000);
488 evo_mthd(push, 0x0080, 1); 506 evo_mthd(push, 0x0080, 1);
489 evo_data(push, 0x00000000); 507 evo_data(push, 0x00000000);
490 evo_kick(push, sync); 508 evo_kick(push, flip.chan);
491 } 509 }
510
511 nv_wait_cb(device, nv50_display_flip_wait, &flip);
492} 512}
493 513
494int 514int
@@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
496 struct nouveau_channel *chan, u32 swap_interval) 516 struct nouveau_channel *chan, u32 swap_interval)
497{ 517{
498 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 518 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
499 struct nv50_disp *disp = nv50_disp(crtc->dev);
500 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 519 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
501 struct nv50_sync *sync = nv50_sync(crtc); 520 struct nv50_sync *sync = nv50_sync(crtc);
521 int head = nv_crtc->index, ret;
502 u32 *push; 522 u32 *push;
503 int ret;
504 523
505 swap_interval <<= 4; 524 swap_interval <<= 4;
506 if (swap_interval == 0) 525 if (swap_interval == 0)
@@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
510 if (unlikely(push == NULL)) 529 if (unlikely(push == NULL))
511 return -EBUSY; 530 return -EBUSY;
512 531
513 /* synchronise with the rendering channel, if necessary */ 532 if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
514 if (likely(chan)) { 533 ret = RING_SPACE(chan, 8);
534 if (ret)
535 return ret;
536
537 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
538 OUT_RING (chan, NvEvoSema0 + head);
539 OUT_RING (chan, sync->addr ^ 0x10);
540 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
541 OUT_RING (chan, sync->data + 1);
542 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
543 OUT_RING (chan, sync->addr);
544 OUT_RING (chan, sync->data);
545 } else
546 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
547 u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
548 ret = RING_SPACE(chan, 12);
549 if (ret)
550 return ret;
551
552 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
553 OUT_RING (chan, chan->vram);
554 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
555 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
556 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
557 OUT_RING (chan, sync->data + 1);
558 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
559 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
560 OUT_RING (chan, upper_32_bits(addr));
561 OUT_RING (chan, lower_32_bits(addr));
562 OUT_RING (chan, sync->data);
563 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
564 } else
565 if (chan) {
566 u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
515 ret = RING_SPACE(chan, 10); 567 ret = RING_SPACE(chan, 10);
516 if (ret) 568 if (ret)
517 return ret; 569 return ret;
518 570
519 if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { 571 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
520 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); 572 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
521 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 573 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
522 OUT_RING (chan, sync->sem.offset); 574 OUT_RING (chan, sync->data + 1);
523 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); 575 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
524 OUT_RING (chan, 0xf00d0000 | sync->sem.value); 576 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
525 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); 577 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
526 OUT_RING (chan, sync->sem.offset ^ 0x10); 578 OUT_RING (chan, upper_32_bits(addr));
527 OUT_RING (chan, 0x74b1e000); 579 OUT_RING (chan, lower_32_bits(addr));
528 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 580 OUT_RING (chan, sync->data);
529 OUT_RING (chan, NvSema); 581 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
530 } else 582 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
531 if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { 583 }
532 u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
533 offset += sync->sem.offset;
534
535 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
536 OUT_RING (chan, upper_32_bits(offset));
537 OUT_RING (chan, lower_32_bits(offset));
538 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
539 OUT_RING (chan, 0x00000002);
540 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
541 OUT_RING (chan, upper_32_bits(offset));
542 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
543 OUT_RING (chan, 0x74b1e000);
544 OUT_RING (chan, 0x00000001);
545 } else {
546 u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
547 offset += sync->sem.offset;
548
549 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
550 OUT_RING (chan, upper_32_bits(offset));
551 OUT_RING (chan, lower_32_bits(offset));
552 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
553 OUT_RING (chan, 0x00001002);
554 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
555 OUT_RING (chan, upper_32_bits(offset));
556 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
557 OUT_RING (chan, 0x74b1e000);
558 OUT_RING (chan, 0x00001001);
559 }
560 584
585 if (chan) {
586 sync->addr ^= 0x10;
587 sync->data++;
561 FIRE_RING (chan); 588 FIRE_RING (chan);
562 } else { 589 } else {
563 nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
564 0xf00d0000 | sync->sem.value);
565 evo_sync(crtc->dev); 590 evo_sync(crtc->dev);
566 } 591 }
567 592
@@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
575 evo_data(push, 0x40000000); 600 evo_data(push, 0x40000000);
576 } 601 }
577 evo_mthd(push, 0x0088, 4); 602 evo_mthd(push, 0x0088, 4);
578 evo_data(push, sync->sem.offset); 603 evo_data(push, sync->addr);
579 evo_data(push, 0xf00d0000 | sync->sem.value); 604 evo_data(push, sync->data++);
580 evo_data(push, 0x74b1e000); 605 evo_data(push, sync->data);
581 evo_data(push, NvEvoSync); 606 evo_data(push, NvEvoSync);
582 evo_mthd(push, 0x00a0, 2); 607 evo_mthd(push, 0x00a0, 2);
583 evo_data(push, 0x00000000); 608 evo_data(push, 0x00000000);
@@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
605 evo_mthd(push, 0x0080, 1); 630 evo_mthd(push, 0x0080, 1);
606 evo_data(push, 0x00000000); 631 evo_data(push, 0x00000000);
607 evo_kick(push, sync); 632 evo_kick(push, sync);
608
609 sync->sem.offset ^= 0x10;
610 sync->sem.value++;
611 return 0; 633 return 0;
612} 634}
613 635
@@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1379 if (ret) 1401 if (ret)
1380 goto out; 1402 goto out;
1381 1403
1382 head->sync.sem.offset = EVO_SYNC(1 + index, 0x00); 1404 head->sync.addr = EVO_FLIP_SEM0(index);
1405 head->sync.data = 0x00000000;
1383 1406
1384 /* allocate overlay resources */ 1407 /* allocate overlay resources */
1385 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, 1408 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
@@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev)
2112int 2135int
2113nv50_display_init(struct drm_device *dev) 2136nv50_display_init(struct drm_device *dev)
2114{ 2137{
2115 u32 *push = evo_wait(nv50_mast(dev), 32); 2138 struct nv50_disp *disp = nv50_disp(dev);
2116 if (push) { 2139 struct drm_crtc *crtc;
2117 evo_mthd(push, 0x0088, 1); 2140 u32 *push;
2118 evo_data(push, NvEvoSync); 2141
2119 evo_kick(push, nv50_mast(dev)); 2142 push = evo_wait(nv50_mast(dev), 32);
2120 return 0; 2143 if (!push)
2144 return -EBUSY;
2145
2146 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2147 struct nv50_sync *sync = nv50_sync(crtc);
2148 nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
2121 } 2149 }
2122 2150
2123 return -EBUSY; 2151 evo_mthd(push, 0x0088, 1);
2152 evo_data(push, NvEvoSync);
2153 evo_kick(push, nv50_mast(dev));
2154 return 0;
2124} 2155}
2125 2156
2126void 2157void
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3c38ea46531c..305a657bf215 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2438 if (tmp & L2_BUSY) 2438 if (tmp & L2_BUSY)
2439 reset_mask |= RADEON_RESET_VMC; 2439 reset_mask |= RADEON_RESET_VMC;
2440 2440
2441 /* Skip MC reset as it's mostly likely not hung, just busy */
2442 if (reset_mask & RADEON_RESET_MC) {
2443 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2444 reset_mask &= ~RADEON_RESET_MC;
2445 }
2446
2441 return reset_mask; 2447 return reset_mask;
2442} 2448}
2443 2449
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 99fb13286fd0..eb8ac315f92f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
834 __func__, __LINE__, toffset, surf.base_align); 834 __func__, __LINE__, toffset, surf.base_align);
835 return -EINVAL; 835 return -EINVAL;
836 } 836 }
837 if (moffset & (surf.base_align - 1)) { 837 if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
838 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", 838 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
839 __func__, __LINE__, moffset, surf.base_align); 839 __func__, __LINE__, moffset, surf.base_align);
840 return -EINVAL; 840 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7cead763be9e..d4c633e12863 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1381 if (tmp & L2_BUSY) 1381 if (tmp & L2_BUSY)
1382 reset_mask |= RADEON_RESET_VMC; 1382 reset_mask |= RADEON_RESET_VMC;
1383 1383
1384 /* Skip MC reset as it's mostly likely not hung, just busy */
1385 if (reset_mask & RADEON_RESET_MC) {
1386 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1387 reset_mask &= ~RADEON_RESET_MC;
1388 }
1389
1384 return reset_mask; 1390 return reset_mask;
1385} 1391}
1386 1392
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6d4b5611daf4..0740db3fcd22 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1394 if (r600_is_display_hung(rdev)) 1394 if (r600_is_display_hung(rdev))
1395 reset_mask |= RADEON_RESET_DISPLAY; 1395 reset_mask |= RADEON_RESET_DISPLAY;
1396 1396
1397 /* Skip MC reset as it's mostly likely not hung, just busy */
1398 if (reset_mask & RADEON_RESET_MC) {
1399 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1400 reset_mask &= ~RADEON_RESET_MC;
1401 }
1402
1397 return reset_mask; 1403 return reset_mask;
1398} 1404}
1399 1405
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e403bdda58f..78edadc9e86b 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
970 found = 1; 970 found = 1;
971 } 971 }
972 972
973 /* quirks */
974 /* Radeon 9100 (R200) */
975 if ((dev->pdev->device == 0x514D) &&
976 (dev->pdev->subsystem_vendor == 0x174B) &&
977 (dev->pdev->subsystem_device == 0x7149)) {
978 /* vbios value is bad, use the default */
979 found = 0;
980 }
981
973 if (!found) /* fallback to defaults */ 982 if (!found) /* fallback to defaults */
974 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); 983 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
975 984
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 167758488ed6..66a7f0fd9620 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -70,9 +70,10 @@
70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA 70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support 71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
72 * 2.29.0 - R500 FP16 color clear registers 72 * 2.29.0 - R500 FP16 color clear registers
73 * 2.30.0 - fix for FMASK texturing
73 */ 74 */
74#define KMS_DRIVER_MAJOR 2 75#define KMS_DRIVER_MAJOR 2
75#define KMS_DRIVER_MINOR 29 76#define KMS_DRIVER_MINOR 30
76#define KMS_DRIVER_PATCHLEVEL 0 77#define KMS_DRIVER_PATCHLEVEL 0
77int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 78int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
78int radeon_driver_unload_kms(struct drm_device *dev); 79int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 90374dd77960..48f80cd42d8f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
400{ 400{
401 unsigned long irqflags; 401 unsigned long irqflags;
402 402
403 if (!rdev->ddev->irq_enabled)
404 return;
405
403 spin_lock_irqsave(&rdev->irq.lock, irqflags); 406 spin_lock_irqsave(&rdev->irq.lock, irqflags);
404 rdev->irq.afmt[block] = true; 407 rdev->irq.afmt[block] = true;
405 radeon_irq_set(rdev); 408 radeon_irq_set(rdev);
@@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
419{ 422{
420 unsigned long irqflags; 423 unsigned long irqflags;
421 424
425 if (!rdev->ddev->irq_enabled)
426 return;
427
422 spin_lock_irqsave(&rdev->irq.lock, irqflags); 428 spin_lock_irqsave(&rdev->irq.lock, irqflags);
423 rdev->irq.afmt[block] = false; 429 rdev->irq.afmt[block] = false;
424 radeon_irq_set(rdev); 430 radeon_irq_set(rdev);
@@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
438 unsigned long irqflags; 444 unsigned long irqflags;
439 int i; 445 int i;
440 446
447 if (!rdev->ddev->irq_enabled)
448 return;
449
441 spin_lock_irqsave(&rdev->irq.lock, irqflags); 450 spin_lock_irqsave(&rdev->irq.lock, irqflags);
442 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) 451 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
443 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); 452 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
@@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
458 unsigned long irqflags; 467 unsigned long irqflags;
459 int i; 468 int i;
460 469
470 if (!rdev->ddev->irq_enabled)
471 return;
472
461 spin_lock_irqsave(&rdev->irq.lock, irqflags); 473 spin_lock_irqsave(&rdev->irq.lock, irqflags);
462 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) 474 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
463 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); 475 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 80979ed951eb..9128120da044 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
2284 if (tmp & L2_BUSY) 2284 if (tmp & L2_BUSY)
2285 reset_mask |= RADEON_RESET_VMC; 2285 reset_mask |= RADEON_RESET_VMC;
2286 2286
2287 /* Skip MC reset as it's mostly likely not hung, just busy */
2288 if (reset_mask & RADEON_RESET_MC) {
2289 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2290 reset_mask &= ~RADEON_RESET_MC;
2291 }
2292
2287 return reset_mask; 2293 return reset_mask;
2288} 2294}
2289 2295
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index c92955df0658..be1daf7344d3 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,7 +4,6 @@ config DRM_TEGRA
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select DRM_HDMI
8 select FB_CFB_FILLRECT 7 select FB_CFB_FILLRECT
9 select FB_CFB_COPYAREA 8 select FB_CFB_COPYAREA
10 select FB_CFB_IMAGEBLIT 9 select FB_CFB_IMAGEBLIT
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 9500f2f3f8fe..8758f38c948c 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -459,19 +459,25 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
459 struct dj_report *dj_report) 459 struct dj_report *dj_report)
460{ 460{
461 struct hid_device *hdev = djrcv_dev->hdev; 461 struct hid_device *hdev = djrcv_dev->hdev;
462 int sent_bytes; 462 struct hid_report *report;
463 struct hid_report_enum *output_report_enum;
464 u8 *data = (u8 *)(&dj_report->device_index);
465 int i;
463 466
464 if (!hdev->hid_output_raw_report) { 467 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
465 dev_err(&hdev->dev, "%s:" 468 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
466 "hid_output_raw_report is null\n", __func__); 469
470 if (!report) {
471 dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
467 return -ENODEV; 472 return -ENODEV;
468 } 473 }
469 474
470 sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report, 475 for (i = 0; i < report->field[0]->report_count; i++)
471 sizeof(struct dj_report), 476 report->field[0]->value[i] = data[i];
472 HID_OUTPUT_REPORT); 477
478 usbhid_submit_report(hdev, report, USB_DIR_OUT);
473 479
474 return (sent_bytes < 0) ? sent_bytes : 0; 480 return 0;
475} 481}
476 482
477static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) 483static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 9652a2c92a24..a58de38e23d8 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -62,7 +62,7 @@ struct ltc2978_data {
62 int temp_min, temp_max; 62 int temp_min, temp_max;
63 int vout_min[8], vout_max[8]; 63 int vout_min[8], vout_max[8];
64 int iout_max[2]; 64 int iout_max[2];
65 int temp2_max[2]; 65 int temp2_max;
66 struct pmbus_driver_info info; 66 struct pmbus_driver_info info;
67}; 67};
68 68
@@ -204,10 +204,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
204 ret = pmbus_read_word_data(client, page, 204 ret = pmbus_read_word_data(client, page,
205 LTC3880_MFR_TEMPERATURE2_PEAK); 205 LTC3880_MFR_TEMPERATURE2_PEAK);
206 if (ret >= 0) { 206 if (ret >= 0) {
207 if (lin11_to_val(ret) 207 if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
208 > lin11_to_val(data->temp2_max[page])) 208 data->temp2_max = ret;
209 data->temp2_max[page] = ret; 209 ret = data->temp2_max;
210 ret = data->temp2_max[page];
211 } 210 }
212 break; 211 break;
213 case PMBUS_VIRT_READ_VIN_MIN: 212 case PMBUS_VIRT_READ_VIN_MIN:
@@ -248,11 +247,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
248 247
249 switch (reg) { 248 switch (reg) {
250 case PMBUS_VIRT_RESET_IOUT_HISTORY: 249 case PMBUS_VIRT_RESET_IOUT_HISTORY:
251 data->iout_max[page] = 0x7fff; 250 data->iout_max[page] = 0x7c00;
252 ret = ltc2978_clear_peaks(client, page, data->id); 251 ret = ltc2978_clear_peaks(client, page, data->id);
253 break; 252 break;
254 case PMBUS_VIRT_RESET_TEMP2_HISTORY: 253 case PMBUS_VIRT_RESET_TEMP2_HISTORY:
255 data->temp2_max[page] = 0x7fff; 254 data->temp2_max = 0x7c00;
256 ret = ltc2978_clear_peaks(client, page, data->id); 255 ret = ltc2978_clear_peaks(client, page, data->id);
257 break; 256 break;
258 case PMBUS_VIRT_RESET_VOUT_HISTORY: 257 case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -262,12 +261,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
262 break; 261 break;
263 case PMBUS_VIRT_RESET_VIN_HISTORY: 262 case PMBUS_VIRT_RESET_VIN_HISTORY:
264 data->vin_min = 0x7bff; 263 data->vin_min = 0x7bff;
265 data->vin_max = 0; 264 data->vin_max = 0x7c00;
266 ret = ltc2978_clear_peaks(client, page, data->id); 265 ret = ltc2978_clear_peaks(client, page, data->id);
267 break; 266 break;
268 case PMBUS_VIRT_RESET_TEMP_HISTORY: 267 case PMBUS_VIRT_RESET_TEMP_HISTORY:
269 data->temp_min = 0x7bff; 268 data->temp_min = 0x7bff;
270 data->temp_max = 0x7fff; 269 data->temp_max = 0x7c00;
271 ret = ltc2978_clear_peaks(client, page, data->id); 270 ret = ltc2978_clear_peaks(client, page, data->id);
272 break; 271 break;
273 default: 272 default:
@@ -321,12 +320,13 @@ static int ltc2978_probe(struct i2c_client *client,
321 info = &data->info; 320 info = &data->info;
322 info->write_word_data = ltc2978_write_word_data; 321 info->write_word_data = ltc2978_write_word_data;
323 322
324 data->vout_min[0] = 0xffff;
325 data->vin_min = 0x7bff; 323 data->vin_min = 0x7bff;
324 data->vin_max = 0x7c00;
326 data->temp_min = 0x7bff; 325 data->temp_min = 0x7bff;
327 data->temp_max = 0x7fff; 326 data->temp_max = 0x7c00;
327 data->temp2_max = 0x7c00;
328 328
329 switch (id->driver_data) { 329 switch (data->id) {
330 case ltc2978: 330 case ltc2978:
331 info->read_word_data = ltc2978_read_word_data; 331 info->read_word_data = ltc2978_read_word_data;
332 info->pages = 8; 332 info->pages = 8;
@@ -336,7 +336,6 @@ static int ltc2978_probe(struct i2c_client *client,
336 for (i = 1; i < 8; i++) { 336 for (i = 1; i < 8; i++) {
337 info->func[i] = PMBUS_HAVE_VOUT 337 info->func[i] = PMBUS_HAVE_VOUT
338 | PMBUS_HAVE_STATUS_VOUT; 338 | PMBUS_HAVE_STATUS_VOUT;
339 data->vout_min[i] = 0xffff;
340 } 339 }
341 break; 340 break;
342 case ltc3880: 341 case ltc3880:
@@ -352,11 +351,14 @@ static int ltc2978_probe(struct i2c_client *client,
352 | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT 351 | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
353 | PMBUS_HAVE_POUT 352 | PMBUS_HAVE_POUT
354 | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP; 353 | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
355 data->vout_min[1] = 0xffff; 354 data->iout_max[0] = 0x7c00;
355 data->iout_max[1] = 0x7c00;
356 break; 356 break;
357 default: 357 default:
358 return -ENODEV; 358 return -ENODEV;
359 } 359 }
360 for (i = 0; i < info->pages; i++)
361 data->vout_min[i] = 0xffff;
360 362
361 return pmbus_do_probe(client, id, info); 363 return pmbus_do_probe(client, id, info);
362} 364}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index bfe326e896df..2507f902fb7a 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -965,7 +965,13 @@ static int sht15_probe(struct platform_device *pdev)
965 if (voltage) 965 if (voltage)
966 data->supply_uv = voltage; 966 data->supply_uv = voltage;
967 967
968 regulator_enable(data->reg); 968 ret = regulator_enable(data->reg);
969 if (ret != 0) {
970 dev_err(&pdev->dev,
971 "failed to enable regulator: %d\n", ret);
972 return ret;
973 }
974
969 /* 975 /*
970 * Setup a notifier block to update this if another device 976 * Setup a notifier block to update this if another device
971 * causes the voltage to change 977 * causes the voltage to change
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc7e478b7e5f..e5cdaf87822c 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1083,6 +1083,7 @@ static const char *dma_remap_fault_reasons[] =
1083 "non-zero reserved fields in RTP", 1083 "non-zero reserved fields in RTP",
1084 "non-zero reserved fields in CTP", 1084 "non-zero reserved fields in CTP",
1085 "non-zero reserved fields in PTE", 1085 "non-zero reserved fields in PTE",
1086 "PCE for translation request specifies blocking",
1086}; 1087};
1087 1088
1088static const char *irq_remap_fault_reasons[] = 1089static const char *irq_remap_fault_reasons[] =
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 017c67ea3f4c..ead0a4fb7448 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
294 // Allocate URBs and buffers for interrupt endpoint 294 // Allocate URBs and buffers for interrupt endpoint
295 urb = usb_alloc_urb(0, GFP_KERNEL); 295 urb = usb_alloc_urb(0, GFP_KERNEL);
296 if (!urb) { 296 if (!urb) {
297 return -ENOMEM; 297 goto err1;
298 } 298 }
299 intr->urb = urb; 299 intr->urb = urb;
300 300
301 buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL); 301 buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
302 if (!buf) { 302 if (!buf) {
303 return -ENOMEM; 303 goto err2;
304 } 304 }
305 305
306 endpoint = &altsetting->endpoint[EP_INT-1]; 306 endpoint = &altsetting->endpoint[EP_INT-1];
@@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
313 endpoint->desc.bInterval); 313 endpoint->desc.bInterval);
314 314
315 return 0; 315 return 0;
316err2:
317 usb_free_urb(intr->urb);
318 intr->urb = NULL;
319err1:
320 usb_free_urb(ctrl->urb);
321 ctrl->urb = NULL;
322
323 return -ENOMEM;
316} 324}
317 325
318/* 326/*
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index c45b3aedafba..d873cbae2fbb 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
138} 138}
139EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier); 139EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
140 140
141static int __init pl320_probe(struct amba_device *adev, 141static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
142 const struct amba_id *id)
143{ 142{
144 int ret; 143 int ret;
145 144
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e30b490055aa..4d8d90b4fe78 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -154,17 +154,6 @@ config MD_RAID456
154 154
155 If unsure, say Y. 155 If unsure, say Y.
156 156
157config MULTICORE_RAID456
158 bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
159 depends on MD_RAID456
160 depends on SMP
161 depends on EXPERIMENTAL
162 ---help---
163 Enable the raid456 module to dispatch per-stripe raid operations to a
164 thread pool.
165
166 If unsure, say N.
167
168config MD_MULTIPATH 157config MD_MULTIPATH
169 tristate "Multipath I/O support" 158 tristate "Multipath I/O support"
170 depends on BLK_DEV_MD 159 depends on BLK_DEV_MD
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9a01d1e4c783..311e3d35b272 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -91,15 +91,44 @@ static struct raid_type {
91 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} 91 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
92}; 92};
93 93
94static char *raid10_md_layout_to_format(int layout)
95{
96 /*
97 * Bit 16 and 17 stand for "offset" and "use_far_sets"
98 * Refer to MD's raid10.c for details
99 */
100 if ((layout & 0x10000) && (layout & 0x20000))
101 return "offset";
102
103 if ((layout & 0xFF) > 1)
104 return "near";
105
106 return "far";
107}
108
94static unsigned raid10_md_layout_to_copies(int layout) 109static unsigned raid10_md_layout_to_copies(int layout)
95{ 110{
96 return layout & 0xFF; 111 if ((layout & 0xFF) > 1)
112 return layout & 0xFF;
113 return (layout >> 8) & 0xFF;
97} 114}
98 115
99static int raid10_format_to_md_layout(char *format, unsigned copies) 116static int raid10_format_to_md_layout(char *format, unsigned copies)
100{ 117{
101 /* 1 "far" copy, and 'copies' "near" copies */ 118 unsigned n = 1, f = 1;
102 return (1 << 8) | (copies & 0xFF); 119
120 if (!strcmp("near", format))
121 n = copies;
122 else
123 f = copies;
124
125 if (!strcmp("offset", format))
126 return 0x30000 | (f << 8) | n;
127
128 if (!strcmp("far", format))
129 return 0x20000 | (f << 8) | n;
130
131 return (f << 8) | n;
103} 132}
104 133
105static struct raid_type *get_raid_type(char *name) 134static struct raid_type *get_raid_type(char *name)
@@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
352{ 381{
353 unsigned i, rebuild_cnt = 0; 382 unsigned i, rebuild_cnt = 0;
354 unsigned rebuilds_per_group, copies, d; 383 unsigned rebuilds_per_group, copies, d;
384 unsigned group_size, last_group_start;
355 385
356 for (i = 0; i < rs->md.raid_disks; i++) 386 for (i = 0; i < rs->md.raid_disks; i++)
357 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || 387 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
379 * as long as the failed devices occur in different mirror 409 * as long as the failed devices occur in different mirror
380 * groups (i.e. different stripes). 410 * groups (i.e. different stripes).
381 * 411 *
382 * Right now, we only allow for "near" copies. When other
383 * formats are added, we will have to check those too.
384 *
385 * When checking "near" format, make sure no adjacent devices 412 * When checking "near" format, make sure no adjacent devices
386 * have failed beyond what can be handled. In addition to the 413 * have failed beyond what can be handled. In addition to the
387 * simple case where the number of devices is a multiple of the 414 * simple case where the number of devices is a multiple of the
@@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
391 * A A B B C 418 * A A B B C
392 * C D D E E 419 * C D D E E
393 */ 420 */
394 for (i = 0; i < rs->md.raid_disks * copies; i++) { 421 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
395 if (!(i % copies)) 422 for (i = 0; i < rs->md.raid_disks * copies; i++) {
423 if (!(i % copies))
424 rebuilds_per_group = 0;
425 d = i % rs->md.raid_disks;
426 if ((!rs->dev[d].rdev.sb_page ||
427 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
428 (++rebuilds_per_group >= copies))
429 goto too_many;
430 }
431 break;
432 }
433
434 /*
435 * When checking "far" and "offset" formats, we need to ensure
436 * that the device that holds its copy is not also dead or
437 * being rebuilt. (Note that "far" and "offset" formats only
438 * support two copies right now. These formats also only ever
439 * use the 'use_far_sets' variant.)
440 *
441 * This check is somewhat complicated by the need to account
442 * for arrays that are not a multiple of (far) copies. This
443 * results in the need to treat the last (potentially larger)
444 * set differently.
445 */
446 group_size = (rs->md.raid_disks / copies);
447 last_group_start = (rs->md.raid_disks / group_size) - 1;
448 last_group_start *= group_size;
449 for (i = 0; i < rs->md.raid_disks; i++) {
450 if (!(i % copies) && !(i > last_group_start))
396 rebuilds_per_group = 0; 451 rebuilds_per_group = 0;
397 d = i % rs->md.raid_disks; 452 if ((!rs->dev[i].rdev.sb_page ||
398 if ((!rs->dev[d].rdev.sb_page || 453 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
399 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
400 (++rebuilds_per_group >= copies)) 454 (++rebuilds_per_group >= copies))
401 goto too_many; 455 goto too_many;
402 } 456 }
403 break; 457 break;
404 default: 458 default:
@@ -433,7 +487,7 @@ too_many:
433 * 487 *
434 * RAID10-only options: 488 * RAID10-only options:
435 * [raid10_copies <# copies>] Number of copies. (Default: 2) 489 * [raid10_copies <# copies>] Number of copies. (Default: 2)
436 * [raid10_format <near>] Layout algorithm. (Default: near) 490 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
437 */ 491 */
438static int parse_raid_params(struct raid_set *rs, char **argv, 492static int parse_raid_params(struct raid_set *rs, char **argv,
439 unsigned num_raid_params) 493 unsigned num_raid_params)
@@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
520 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; 574 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
521 return -EINVAL; 575 return -EINVAL;
522 } 576 }
523 if (strcmp("near", argv[i])) { 577 if (strcmp("near", argv[i]) &&
578 strcmp("far", argv[i]) &&
579 strcmp("offset", argv[i])) {
524 rs->ti->error = "Invalid 'raid10_format' value given"; 580 rs->ti->error = "Invalid 'raid10_format' value given";
525 return -EINVAL; 581 return -EINVAL;
526 } 582 }
@@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
644 return -EINVAL; 700 return -EINVAL;
645 } 701 }
646 702
703 /*
704 * If the format is not "near", we only support
705 * two copies at the moment.
706 */
707 if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
708 rs->ti->error = "Too many copies for given RAID10 format.";
709 return -EINVAL;
710 }
711
647 /* (Len * #mirrors) / #devices */ 712 /* (Len * #mirrors) / #devices */
648 sectors_per_dev = rs->ti->len * raid10_copies; 713 sectors_per_dev = rs->ti->len * raid10_copies;
649 sector_div(sectors_per_dev, rs->md.raid_disks); 714 sector_div(sectors_per_dev, rs->md.raid_disks);
@@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
854 /* 919 /*
855 * Reshaping is not currently allowed 920 * Reshaping is not currently allowed
856 */ 921 */
857 if ((le32_to_cpu(sb->level) != mddev->level) || 922 if (le32_to_cpu(sb->level) != mddev->level) {
858 (le32_to_cpu(sb->layout) != mddev->layout) || 923 DMERR("Reshaping arrays not yet supported. (RAID level change)");
859 (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) { 924 return -EINVAL;
860 DMERR("Reshaping arrays not yet supported."); 925 }
926 if (le32_to_cpu(sb->layout) != mddev->layout) {
927 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
928 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
929 DMERR(" Old layout: %s w/ %d copies",
930 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
931 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
932 DMERR(" New layout: %s w/ %d copies",
933 raid10_md_layout_to_format(mddev->layout),
934 raid10_md_layout_to_copies(mddev->layout));
935 return -EINVAL;
936 }
937 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
938 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
861 return -EINVAL; 939 return -EINVAL;
862 } 940 }
863 941
864 /* We can only change the number of devices in RAID1 right now */ 942 /* We can only change the number of devices in RAID1 right now */
865 if ((rs->raid_type->level != 1) && 943 if ((rs->raid_type->level != 1) &&
866 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { 944 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
867 DMERR("Reshaping arrays not yet supported."); 945 DMERR("Reshaping arrays not yet supported. (device count change)");
868 return -EINVAL; 946 return -EINVAL;
869 } 947 }
870 948
@@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
1329 raid10_md_layout_to_copies(rs->md.layout)); 1407 raid10_md_layout_to_copies(rs->md.layout));
1330 1408
1331 if (rs->print_flags & DMPF_RAID10_FORMAT) 1409 if (rs->print_flags & DMPF_RAID10_FORMAT)
1332 DMEMIT(" raid10_format near"); 1410 DMEMIT(" raid10_format %s",
1411 raid10_md_layout_to_format(rs->md.layout));
1333 1412
1334 DMEMIT(" %d", rs->md.raid_disks); 1413 DMEMIT(" %d", rs->md.raid_disks);
1335 for (i = 0; i < rs->md.raid_disks; i++) { 1414 for (i = 0; i < rs->md.raid_disks; i++) {
@@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
1418 1497
1419static int __init dm_raid_init(void) 1498static int __init dm_raid_init(void)
1420{ 1499{
1500 DMINFO("Loading target version %u.%u.%u",
1501 raid_target.version[0],
1502 raid_target.version[1],
1503 raid_target.version[2]);
1421 return dm_register_target(&raid_target); 1504 return dm_register_target(&raid_target);
1422} 1505}
1423 1506
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3db3d1b271f7..fcb878f88796 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
307 bio_io_error(bio); 307 bio_io_error(bio);
308 return; 308 return;
309 } 309 }
310 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
311 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
312 return;
313 }
310 smp_rmb(); /* Ensure implications of 'active' are visible */ 314 smp_rmb(); /* Ensure implications of 'active' are visible */
311 rcu_read_lock(); 315 rcu_read_lock();
312 if (mddev->suspended) { 316 if (mddev->suspended) {
@@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2994 } else if (!sectors) 2998 } else if (!sectors)
2995 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2999 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2996 rdev->data_offset; 3000 rdev->data_offset;
3001 if (!my_mddev->pers->resize)
3002 /* Cannot change size for RAID0 or Linear etc */
3003 return -EINVAL;
2997 } 3004 }
2998 if (sectors < my_mddev->dev_sectors) 3005 if (sectors < my_mddev->dev_sectors)
2999 return -EINVAL; /* component must fit device */ 3006 return -EINVAL; /* component must fit device */
@@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6525 mddev->ro = 0; 6532 mddev->ro = 0;
6526 sysfs_notify_dirent_safe(mddev->sysfs_state); 6533 sysfs_notify_dirent_safe(mddev->sysfs_state);
6527 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6534 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6528 md_wakeup_thread(mddev->thread); 6535 /* mddev_unlock will wake thread */
6536 /* If a device failed while we were read-only, we
6537 * need to make sure the metadata is updated now.
6538 */
6539 if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6540 mddev_unlock(mddev);
6541 wait_event(mddev->sb_wait,
6542 !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6543 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6544 mddev_lock(mddev);
6545 }
6529 } else { 6546 } else {
6530 err = -EROFS; 6547 err = -EROFS;
6531 goto abort_unlock; 6548 goto abort_unlock;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 24b359717a7e..0505452de8d6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
175 rdev1->new_raid_disk = j; 175 rdev1->new_raid_disk = j;
176 } 176 }
177 177
178 if (j < 0 || j >= mddev->raid_disks) { 178 if (j < 0) {
179 printk(KERN_ERR
180 "md/raid0:%s: remove inactive devices before converting to RAID0\n",
181 mdname(mddev));
182 goto abort;
183 }
184 if (j >= mddev->raid_disks) {
179 printk(KERN_ERR "md/raid0:%s: bad disk number %d - " 185 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
180 "aborting!\n", mdname(mddev), j); 186 "aborting!\n", mdname(mddev), j);
181 goto abort; 187 goto abort;
@@ -289,7 +295,7 @@ abort:
289 kfree(conf->strip_zone); 295 kfree(conf->strip_zone);
290 kfree(conf->devlist); 296 kfree(conf->devlist);
291 kfree(conf); 297 kfree(conf);
292 *private_conf = NULL; 298 *private_conf = ERR_PTR(err);
293 return err; 299 return err;
294} 300}
295 301
@@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
411 "%s does not support generic reshape\n", __func__); 417 "%s does not support generic reshape\n", __func__);
412 418
413 rdev_for_each(rdev, mddev) 419 rdev_for_each(rdev, mddev)
414 array_sectors += rdev->sectors; 420 array_sectors += (rdev->sectors &
421 ~(sector_t)(mddev->chunk_sectors-1));
415 422
416 return array_sectors; 423 return array_sectors;
417} 424}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d5bddfc4010e..fd86b372692d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
967 bio_list_merge(&conf->pending_bio_list, &plug->pending); 967 bio_list_merge(&conf->pending_bio_list, &plug->pending);
968 conf->pending_count += plug->pending_cnt; 968 conf->pending_count += plug->pending_cnt;
969 spin_unlock_irq(&conf->device_lock); 969 spin_unlock_irq(&conf->device_lock);
970 wake_up(&conf->wait_barrier);
970 md_wakeup_thread(mddev->thread); 971 md_wakeup_thread(mddev->thread);
971 kfree(plug); 972 kfree(plug);
972 return; 973 return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1000 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 1001 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1001 const unsigned long do_discard = (bio->bi_rw 1002 const unsigned long do_discard = (bio->bi_rw
1002 & (REQ_DISCARD | REQ_SECURE)); 1003 & (REQ_DISCARD | REQ_SECURE));
1004 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1003 struct md_rdev *blocked_rdev; 1005 struct md_rdev *blocked_rdev;
1004 struct blk_plug_cb *cb; 1006 struct blk_plug_cb *cb;
1005 struct raid1_plug_cb *plug = NULL; 1007 struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1303,8 @@ read_again:
1301 conf->mirrors[i].rdev->data_offset); 1303 conf->mirrors[i].rdev->data_offset);
1302 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1304 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1303 mbio->bi_end_io = raid1_end_write_request; 1305 mbio->bi_end_io = raid1_end_write_request;
1304 mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard; 1306 mbio->bi_rw =
1307 WRITE | do_flush_fua | do_sync | do_discard | do_same;
1305 mbio->bi_private = r1_bio; 1308 mbio->bi_private = r1_bio;
1306 1309
1307 atomic_inc(&r1_bio->remaining); 1310 atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
2818 if (IS_ERR(conf)) 2821 if (IS_ERR(conf))
2819 return PTR_ERR(conf); 2822 return PTR_ERR(conf);
2820 2823
2824 if (mddev->queue)
2825 blk_queue_max_write_same_sectors(mddev->queue,
2826 mddev->chunk_sectors);
2821 rdev_for_each(rdev, mddev) { 2827 rdev_for_each(rdev, mddev) {
2822 if (!mddev->gendisk) 2828 if (!mddev->gendisk)
2823 continue; 2829 continue;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 64d48249c03b..77b562d18a90 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -38,21 +38,36 @@
38 * near_copies (stored in low byte of layout) 38 * near_copies (stored in low byte of layout)
39 * far_copies (stored in second byte of layout) 39 * far_copies (stored in second byte of layout)
40 * far_offset (stored in bit 16 of layout ) 40 * far_offset (stored in bit 16 of layout )
41 * use_far_sets (stored in bit 17 of layout )
41 * 42 *
42 * The data to be stored is divided into chunks using chunksize. 43 * The data to be stored is divided into chunks using chunksize. Each device
43 * Each device is divided into far_copies sections. 44 * is divided into far_copies sections. In each section, chunks are laid out
44 * In each section, chunks are laid out in a style similar to raid0, but 45 * in a style similar to raid0, but near_copies copies of each chunk is stored
45 * near_copies copies of each chunk is stored (each on a different drive). 46 * (each on a different drive). The starting device for each section is offset
46 * The starting device for each section is offset near_copies from the starting 47 * near_copies from the starting device of the previous section. Thus there
47 * device of the previous section. 48 * are (near_copies * far_copies) of each chunk, and each is on a different
48 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different 49 * drive. near_copies and far_copies must be at least one, and their product
49 * drive. 50 * is at most raid_disks.
50 * near_copies and far_copies must be at least one, and their product is at most
51 * raid_disks.
52 * 51 *
53 * If far_offset is true, then the far_copies are handled a bit differently. 52 * If far_offset is true, then the far_copies are handled a bit differently.
54 * The copies are still in different stripes, but instead of be very far apart 53 * The copies are still in different stripes, but instead of being very far
55 * on disk, there are adjacent stripes. 54 * apart on disk, there are adjacent stripes.
55 *
56 * The far and offset algorithms are handled slightly differently if
57 * 'use_far_sets' is true. In this case, the array's devices are grouped into
58 * sets that are (near_copies * far_copies) in size. The far copied stripes
59 * are still shifted by 'near_copies' devices, but this shifting stays confined
60 * to the set rather than the entire array. This is done to improve the number
61 * of device combinations that can fail without causing the array to fail.
62 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
63 * on a device):
64 * A B C D A B C D E
65 * ... ...
66 * D A B C E A B C D
67 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
68 * [A B] [C D] [A B] [C D E]
69 * |...| |...| |...| | ... |
70 * [B A] [D C] [B A] [E C D]
56 */ 71 */
57 72
58/* 73/*
@@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
535 sector_t stripe; 550 sector_t stripe;
536 int dev; 551 int dev;
537 int slot = 0; 552 int slot = 0;
553 int last_far_set_start, last_far_set_size;
554
555 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
556 last_far_set_start *= geo->far_set_size;
557
558 last_far_set_size = geo->far_set_size;
559 last_far_set_size += (geo->raid_disks % geo->far_set_size);
538 560
539 /* now calculate first sector/dev */ 561 /* now calculate first sector/dev */
540 chunk = r10bio->sector >> geo->chunk_shift; 562 chunk = r10bio->sector >> geo->chunk_shift;
@@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
551 /* and calculate all the others */ 573 /* and calculate all the others */
552 for (n = 0; n < geo->near_copies; n++) { 574 for (n = 0; n < geo->near_copies; n++) {
553 int d = dev; 575 int d = dev;
576 int set;
554 sector_t s = sector; 577 sector_t s = sector;
555 r10bio->devs[slot].addr = sector;
556 r10bio->devs[slot].devnum = d; 578 r10bio->devs[slot].devnum = d;
579 r10bio->devs[slot].addr = s;
557 slot++; 580 slot++;
558 581
559 for (f = 1; f < geo->far_copies; f++) { 582 for (f = 1; f < geo->far_copies; f++) {
583 set = d / geo->far_set_size;
560 d += geo->near_copies; 584 d += geo->near_copies;
561 if (d >= geo->raid_disks) 585
562 d -= geo->raid_disks; 586 if ((geo->raid_disks % geo->far_set_size) &&
587 (d > last_far_set_start)) {
588 d -= last_far_set_start;
589 d %= last_far_set_size;
590 d += last_far_set_start;
591 } else {
592 d %= geo->far_set_size;
593 d += geo->far_set_size * set;
594 }
563 s += geo->stride; 595 s += geo->stride;
564 r10bio->devs[slot].devnum = d; 596 r10bio->devs[slot].devnum = d;
565 r10bio->devs[slot].addr = s; 597 r10bio->devs[slot].addr = s;
@@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
595 * or recovery, so reshape isn't happening 627 * or recovery, so reshape isn't happening
596 */ 628 */
597 struct geom *geo = &conf->geo; 629 struct geom *geo = &conf->geo;
630 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
631 int far_set_size = geo->far_set_size;
632 int last_far_set_start;
633
634 if (geo->raid_disks % geo->far_set_size) {
635 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
636 last_far_set_start *= geo->far_set_size;
637
638 if (dev >= last_far_set_start) {
639 far_set_size = geo->far_set_size;
640 far_set_size += (geo->raid_disks % geo->far_set_size);
641 far_set_start = last_far_set_start;
642 }
643 }
598 644
599 offset = sector & geo->chunk_mask; 645 offset = sector & geo->chunk_mask;
600 if (geo->far_offset) { 646 if (geo->far_offset) {
@@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
602 chunk = sector >> geo->chunk_shift; 648 chunk = sector >> geo->chunk_shift;
603 fc = sector_div(chunk, geo->far_copies); 649 fc = sector_div(chunk, geo->far_copies);
604 dev -= fc * geo->near_copies; 650 dev -= fc * geo->near_copies;
605 if (dev < 0) 651 if (dev < far_set_start)
606 dev += geo->raid_disks; 652 dev += far_set_size;
607 } else { 653 } else {
608 while (sector >= geo->stride) { 654 while (sector >= geo->stride) {
609 sector -= geo->stride; 655 sector -= geo->stride;
610 if (dev < geo->near_copies) 656 if (dev < (geo->near_copies + far_set_start))
611 dev += geo->raid_disks - geo->near_copies; 657 dev += far_set_size - geo->near_copies;
612 else 658 else
613 dev -= geo->near_copies; 659 dev -= geo->near_copies;
614 } 660 }
@@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1073 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1119 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1074 conf->pending_count += plug->pending_cnt; 1120 conf->pending_count += plug->pending_cnt;
1075 spin_unlock_irq(&conf->device_lock); 1121 spin_unlock_irq(&conf->device_lock);
1122 wake_up(&conf->wait_barrier);
1076 md_wakeup_thread(mddev->thread); 1123 md_wakeup_thread(mddev->thread);
1077 kfree(plug); 1124 kfree(plug);
1078 return; 1125 return;
@@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1105 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1152 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1106 const unsigned long do_discard = (bio->bi_rw 1153 const unsigned long do_discard = (bio->bi_rw
1107 & (REQ_DISCARD | REQ_SECURE)); 1154 & (REQ_DISCARD | REQ_SECURE));
1155 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1108 unsigned long flags; 1156 unsigned long flags;
1109 struct md_rdev *blocked_rdev; 1157 struct md_rdev *blocked_rdev;
1110 struct blk_plug_cb *cb; 1158 struct blk_plug_cb *cb;
@@ -1460,7 +1508,8 @@ retry_write:
1460 rdev)); 1508 rdev));
1461 mbio->bi_bdev = rdev->bdev; 1509 mbio->bi_bdev = rdev->bdev;
1462 mbio->bi_end_io = raid10_end_write_request; 1510 mbio->bi_end_io = raid10_end_write_request;
1463 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; 1511 mbio->bi_rw =
1512 WRITE | do_sync | do_fua | do_discard | do_same;
1464 mbio->bi_private = r10_bio; 1513 mbio->bi_private = r10_bio;
1465 1514
1466 atomic_inc(&r10_bio->remaining); 1515 atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1551,8 @@ retry_write:
1502 r10_bio, rdev)); 1551 r10_bio, rdev));
1503 mbio->bi_bdev = rdev->bdev; 1552 mbio->bi_bdev = rdev->bdev;
1504 mbio->bi_end_io = raid10_end_write_request; 1553 mbio->bi_end_io = raid10_end_write_request;
1505 mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; 1554 mbio->bi_rw =
1555 WRITE | do_sync | do_fua | do_discard | do_same;
1506 mbio->bi_private = r10_bio; 1556 mbio->bi_private = r10_bio;
1507 1557
1508 atomic_inc(&r10_bio->remaining); 1558 atomic_inc(&r10_bio->remaining);
@@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3436 disks = mddev->raid_disks + mddev->delta_disks; 3486 disks = mddev->raid_disks + mddev->delta_disks;
3437 break; 3487 break;
3438 } 3488 }
3439 if (layout >> 17) 3489 if (layout >> 18)
3440 return -1; 3490 return -1;
3441 if (chunk < (PAGE_SIZE >> 9) || 3491 if (chunk < (PAGE_SIZE >> 9) ||
3442 !is_power_of_2(chunk)) 3492 !is_power_of_2(chunk))
@@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3448 geo->near_copies = nc; 3498 geo->near_copies = nc;
3449 geo->far_copies = fc; 3499 geo->far_copies = fc;
3450 geo->far_offset = fo; 3500 geo->far_offset = fo;
3501 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
3451 geo->chunk_mask = chunk - 1; 3502 geo->chunk_mask = chunk - 1;
3452 geo->chunk_shift = ffz(~chunk); 3503 geo->chunk_shift = ffz(~chunk);
3453 return nc*fc; 3504 return nc*fc;
@@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
3569 if (mddev->queue) { 3620 if (mddev->queue) {
3570 blk_queue_max_discard_sectors(mddev->queue, 3621 blk_queue_max_discard_sectors(mddev->queue,
3571 mddev->chunk_sectors); 3622 mddev->chunk_sectors);
3623 blk_queue_max_write_same_sectors(mddev->queue,
3624 mddev->chunk_sectors);
3572 blk_queue_io_min(mddev->queue, chunk_size); 3625 blk_queue_io_min(mddev->queue, chunk_size);
3573 if (conf->geo.raid_disks % conf->geo.near_copies) 3626 if (conf->geo.raid_disks % conf->geo.near_copies)
3574 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3627 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 1054cf602345..157d69e83ff4 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,6 +33,11 @@ struct r10conf {
33 * far_offset, in which case it is 33 * far_offset, in which case it is
34 * 1 stripe. 34 * 1 stripe.
35 */ 35 */
36 int far_set_size; /* The number of devices in a set,
37 * where a 'set' are devices that
38 * contain far/offset copies of
39 * each other.
40 */
36 int chunk_shift; /* shift from chunks to sectors */ 41 int chunk_shift; /* shift from chunks to sectors */
37 sector_t chunk_mask; 42 sector_t chunk_mask;
38 } prev, geo; 43 } prev, geo;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5af2d2709081..3ee2912889e7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
1403 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1403 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1404} 1404}
1405 1405
1406static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1406static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1407{ 1407{
1408 int overlap_clear = 0, i, disks = sh->disks; 1408 int overlap_clear = 0, i, disks = sh->disks;
1409 struct dma_async_tx_descriptor *tx = NULL; 1409 struct dma_async_tx_descriptor *tx = NULL;
@@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1468 put_cpu(); 1468 put_cpu();
1469} 1469}
1470 1470
1471#ifdef CONFIG_MULTICORE_RAID456
1472static void async_run_ops(void *param, async_cookie_t cookie)
1473{
1474 struct stripe_head *sh = param;
1475 unsigned long ops_request = sh->ops.request;
1476
1477 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1478 wake_up(&sh->ops.wait_for_ops);
1479
1480 __raid_run_ops(sh, ops_request);
1481 release_stripe(sh);
1482}
1483
1484static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1485{
1486 /* since handle_stripe can be called outside of raid5d context
1487 * we need to ensure sh->ops.request is de-staged before another
1488 * request arrives
1489 */
1490 wait_event(sh->ops.wait_for_ops,
1491 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1492 sh->ops.request = ops_request;
1493
1494 atomic_inc(&sh->count);
1495 async_schedule(async_run_ops, sh);
1496}
1497#else
1498#define raid_run_ops __raid_run_ops
1499#endif
1500
1501static int grow_one_stripe(struct r5conf *conf) 1471static int grow_one_stripe(struct r5conf *conf)
1502{ 1472{
1503 struct stripe_head *sh; 1473 struct stripe_head *sh;
@@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
1506 return 0; 1476 return 0;
1507 1477
1508 sh->raid_conf = conf; 1478 sh->raid_conf = conf;
1509 #ifdef CONFIG_MULTICORE_RAID456
1510 init_waitqueue_head(&sh->ops.wait_for_ops);
1511 #endif
1512 1479
1513 spin_lock_init(&sh->stripe_lock); 1480 spin_lock_init(&sh->stripe_lock);
1514 1481
@@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1627 break; 1594 break;
1628 1595
1629 nsh->raid_conf = conf; 1596 nsh->raid_conf = conf;
1630 #ifdef CONFIG_MULTICORE_RAID456
1631 init_waitqueue_head(&nsh->ops.wait_for_ops);
1632 #endif
1633 spin_lock_init(&nsh->stripe_lock); 1597 spin_lock_init(&nsh->stripe_lock);
1634 1598
1635 list_add(&nsh->lru, &newstripes); 1599 list_add(&nsh->lru, &newstripes);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 11d01d67b3f5..7bd068a6056a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1629 1629
1630 /* If this is the first slave, then we need to set the master's hardware 1630 /* If this is the first slave, then we need to set the master's hardware
1631 * address to be the same as the slave's. */ 1631 * address to be the same as the slave's. */
1632 if (bond->dev_addr_from_first) 1632 if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
1633 bond_set_dev_addr(bond->dev, slave_dev); 1633 bond_set_dev_addr(bond->dev, slave_dev);
1634 1634
1635 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1635 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 639049d7e92d..da5f4397f87c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
301 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", 301 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
302 ring->start); 302 ring->start);
303 } else { 303 } else {
304 /* Omit CRC. */
305 len -= ETH_FCS_LEN;
306
304 new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); 307 new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
305 if (new_skb) { 308 if (new_skb) {
306 skb_put(new_skb, len); 309 skb_put(new_skb, len);
307 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, 310 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
308 new_skb->data, 311 new_skb->data,
309 len); 312 len);
313 skb_checksum_none_assert(skb);
310 new_skb->protocol = 314 new_skb->protocol =
311 eth_type_trans(new_skb, bgmac->net_dev); 315 eth_type_trans(new_skb, bgmac->net_dev);
312 netif_receive_skb(new_skb); 316 netif_receive_skb(new_skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecac04a3687c..a923bc4d5a1f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3142 tsum = ~csum_fold(csum_add((__force __wsum) csum, 3142 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3143 csum_partial(t_header, -fix, 0))); 3143 csum_partial(t_header, -fix, 0)));
3144 3144
3145 return bswab16(csum); 3145 return bswab16(tsum);
3146} 3146}
3147 3147
3148static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 3148static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 9a674b14b403..edfa67adf2f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full; 281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) 282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full; 283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
284 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
285 cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
284 } 286 }
285 287
286 cmd->maxtxpkt = 0; 288 cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
463 ADVERTISED_10000baseKR_Full)) 465 ADVERTISED_10000baseKR_Full))
464 bp->link_params.speed_cap_mask[cfg_idx] |= 466 bp->link_params.speed_cap_mask[cfg_idx] |=
465 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; 467 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
468
469 if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
470 bp->link_params.speed_cap_mask[cfg_idx] |=
471 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
466 } 472 }
467 } else { /* forced speed */ 473 } else { /* forced speed */
468 /* advertise the requested speed and duplex if supported */ 474 /* advertise the requested speed and duplex if supported */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1663e0b6b5a0..31c5787970db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10422 MDIO_PMA_DEVAD, 10422 MDIO_PMA_DEVAD,
10423 MDIO_PMA_REG_8481_LED1_MASK, 10423 MDIO_PMA_REG_8481_LED1_MASK,
10424 0x0); 10424 0x0);
10425 if (phy->type ==
10426 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10427 /* Disable MI_INT interrupt before setting LED4
10428 * source to constant off.
10429 */
10430 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
10431 params->port*4) &
10432 NIG_MASK_MI_INT) {
10433 params->link_flags |=
10434 LINK_FLAGS_INT_DISABLED;
10435
10436 bnx2x_bits_dis(
10437 bp,
10438 NIG_REG_MASK_INTERRUPT_PORT0 +
10439 params->port*4,
10440 NIG_MASK_MI_INT);
10441 }
10442 bnx2x_cl45_write(bp, phy,
10443 MDIO_PMA_DEVAD,
10444 MDIO_PMA_REG_8481_SIGNAL_MASK,
10445 0x0);
10446 }
10425 } 10447 }
10426 break; 10448 break;
10427 case LED_MODE_ON: 10449 case LED_MODE_ON:
@@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10468 MDIO_PMA_DEVAD, 10490 MDIO_PMA_DEVAD,
10469 MDIO_PMA_REG_8481_LED1_MASK, 10491 MDIO_PMA_REG_8481_LED1_MASK,
10470 0x20); 10492 0x20);
10493 if (phy->type ==
10494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10495 /* Disable MI_INT interrupt before setting LED4
10496 * source to constant on.
10497 */
10498 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
10499 params->port*4) &
10500 NIG_MASK_MI_INT) {
10501 params->link_flags |=
10502 LINK_FLAGS_INT_DISABLED;
10503
10504 bnx2x_bits_dis(
10505 bp,
10506 NIG_REG_MASK_INTERRUPT_PORT0 +
10507 params->port*4,
10508 NIG_MASK_MI_INT);
10509 }
10510 bnx2x_cl45_write(bp, phy,
10511 MDIO_PMA_DEVAD,
10512 MDIO_PMA_REG_8481_SIGNAL_MASK,
10513 0x20);
10514 }
10471 } 10515 }
10472 break; 10516 break;
10473 10517
@@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10532 MDIO_PMA_DEVAD, 10576 MDIO_PMA_DEVAD,
10533 MDIO_PMA_REG_8481_LINK_SIGNAL, 10577 MDIO_PMA_REG_8481_LINK_SIGNAL,
10534 val); 10578 val);
10579 if (phy->type ==
10580 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10581 /* Restore LED4 source to external link,
10582 * and re-enable interrupts.
10583 */
10584 bnx2x_cl45_write(bp, phy,
10585 MDIO_PMA_DEVAD,
10586 MDIO_PMA_REG_8481_SIGNAL_MASK,
10587 0x40);
10588 if (params->link_flags &
10589 LINK_FLAGS_INT_DISABLED) {
10590 bnx2x_link_int_enable(params);
10591 params->link_flags &=
10592 ~LINK_FLAGS_INT_DISABLED;
10593 }
10594 }
10535 } 10595 }
10536 break; 10596 break;
10537 } 10597 }
@@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11791 phy->media_type = ETH_PHY_KR; 11851 phy->media_type = ETH_PHY_KR;
11792 phy->flags |= FLAGS_WC_DUAL_MODE; 11852 phy->flags |= FLAGS_WC_DUAL_MODE;
11793 phy->supported &= (SUPPORTED_20000baseKR2_Full | 11853 phy->supported &= (SUPPORTED_20000baseKR2_Full |
11854 SUPPORTED_10000baseT_Full |
11855 SUPPORTED_1000baseT_Full |
11794 SUPPORTED_Autoneg | 11856 SUPPORTED_Autoneg |
11795 SUPPORTED_FIBRE | 11857 SUPPORTED_FIBRE |
11796 SUPPORTED_Pause | 11858 SUPPORTED_Pause |
@@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13437 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 13499 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13438 bnx2x_set_aer_mmd(params, phy); 13500 bnx2x_set_aer_mmd(params, phy);
13439 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 13501 if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
13440 (phy->speed_cap_mask & SPEED_20000)) 13502 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
13441 bnx2x_check_kr2_wa(params, vars, phy); 13503 bnx2x_check_kr2_wa(params, vars, phy);
13442 bnx2x_check_over_curr(params, vars); 13504 bnx2x_check_over_curr(params, vars);
13443 if (vars->rx_tx_asic_rst) 13505 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d25c7d79787a..be5c195d03dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -307,7 +307,8 @@ struct link_params {
307 struct bnx2x *bp; 307 struct bnx2x *bp;
308 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when 308 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
309 req_flow_ctrl is set to AUTO */ 309 req_flow_ctrl is set to AUTO */
310 u16 rsrv1; 310 u16 link_flags;
311#define LINK_FLAGS_INT_DISABLED (1<<0)
311 u32 lfa_base; 312 u32 lfa_base;
312}; 313};
313 314
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fccc3bf2141d..069a155d16ed 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
246 struct bufdesc *bdp; 246 struct bufdesc *bdp;
247 void *bufaddr; 247 void *bufaddr;
248 unsigned short status; 248 unsigned short status;
249 unsigned long flags; 249 unsigned int index;
250 250
251 if (!fep->link) { 251 if (!fep->link) {
252 /* Link is down or autonegotiation is in progress. */ 252 /* Link is down or autonegotiation is in progress. */
253 return NETDEV_TX_BUSY; 253 return NETDEV_TX_BUSY;
254 } 254 }
255 255
256 spin_lock_irqsave(&fep->hw_lock, flags);
257 /* Fill in a Tx ring entry */ 256 /* Fill in a Tx ring entry */
258 bdp = fep->cur_tx; 257 bdp = fep->cur_tx;
259 258
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
264 * This should not happen, since ndev->tbusy should be set. 263 * This should not happen, since ndev->tbusy should be set.
265 */ 264 */
266 printk("%s: tx queue full!.\n", ndev->name); 265 printk("%s: tx queue full!.\n", ndev->name);
267 spin_unlock_irqrestore(&fep->hw_lock, flags);
268 return NETDEV_TX_BUSY; 266 return NETDEV_TX_BUSY;
269 } 267 }
270 268
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
280 * 4-byte boundaries. Use bounce buffers to copy data 278 * 4-byte boundaries. Use bounce buffers to copy data
281 * and get it aligned. Ugh. 279 * and get it aligned. Ugh.
282 */ 280 */
281 if (fep->bufdesc_ex)
282 index = (struct bufdesc_ex *)bdp -
283 (struct bufdesc_ex *)fep->tx_bd_base;
284 else
285 index = bdp - fep->tx_bd_base;
286
283 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 287 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
284 unsigned int index;
285 if (fep->bufdesc_ex)
286 index = (struct bufdesc_ex *)bdp -
287 (struct bufdesc_ex *)fep->tx_bd_base;
288 else
289 index = bdp - fep->tx_bd_base;
290 memcpy(fep->tx_bounce[index], skb->data, skb->len); 288 memcpy(fep->tx_bounce[index], skb->data, skb->len);
291 bufaddr = fep->tx_bounce[index]; 289 bufaddr = fep->tx_bounce[index];
292 } 290 }
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
300 swap_buffer(bufaddr, skb->len); 298 swap_buffer(bufaddr, skb->len);
301 299
302 /* Save skb pointer */ 300 /* Save skb pointer */
303 fep->tx_skbuff[fep->skb_cur] = skb; 301 fep->tx_skbuff[index] = skb;
304
305 ndev->stats.tx_bytes += skb->len;
306 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
307 302
308 /* Push the data cache so the CPM does not get stale memory 303 /* Push the data cache so the CPM does not get stale memory
309 * data. 304 * data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
331 ebdp->cbd_esc = BD_ENET_TX_INT; 326 ebdp->cbd_esc = BD_ENET_TX_INT;
332 } 327 }
333 } 328 }
334 /* Trigger transmission start */
335 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
336
337 /* If this was the last BD in the ring, start at the beginning again. */ 329 /* If this was the last BD in the ring, start at the beginning again. */
338 if (status & BD_ENET_TX_WRAP) 330 if (status & BD_ENET_TX_WRAP)
339 bdp = fep->tx_bd_base; 331 bdp = fep->tx_bd_base;
340 else 332 else
341 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 333 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
342 334
343 if (bdp == fep->dirty_tx) { 335 fep->cur_tx = bdp;
344 fep->tx_full = 1; 336
337 if (fep->cur_tx == fep->dirty_tx)
345 netif_stop_queue(ndev); 338 netif_stop_queue(ndev);
346 }
347 339
348 fep->cur_tx = bdp; 340 /* Trigger transmission start */
341 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
349 342
350 skb_tx_timestamp(skb); 343 skb_tx_timestamp(skb);
351 344
352 spin_unlock_irqrestore(&fep->hw_lock, flags);
353
354 return NETDEV_TX_OK; 345 return NETDEV_TX_OK;
355} 346}
356 347
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
406 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 397 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
407 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 398 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
408 399
409 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
410 fep->cur_rx = fep->rx_bd_base; 400 fep->cur_rx = fep->rx_bd_base;
411 401
412 /* Reset SKB transmit buffers. */
413 fep->skb_cur = fep->skb_dirty = 0;
414 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 402 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
415 if (fep->tx_skbuff[i]) { 403 if (fep->tx_skbuff[i]) {
416 dev_kfree_skb_any(fep->tx_skbuff[i]); 404 dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
573 struct bufdesc *bdp; 561 struct bufdesc *bdp;
574 unsigned short status; 562 unsigned short status;
575 struct sk_buff *skb; 563 struct sk_buff *skb;
564 int index = 0;
576 565
577 fep = netdev_priv(ndev); 566 fep = netdev_priv(ndev);
578 spin_lock(&fep->hw_lock);
579 bdp = fep->dirty_tx; 567 bdp = fep->dirty_tx;
580 568
569 /* get next bdp of dirty_tx */
570 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
571 bdp = fep->tx_bd_base;
572 else
573 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
574
581 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 575 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
582 if (bdp == fep->cur_tx && fep->tx_full == 0) 576
577 /* current queue is empty */
578 if (bdp == fep->cur_tx)
583 break; 579 break;
584 580
581 if (fep->bufdesc_ex)
582 index = (struct bufdesc_ex *)bdp -
583 (struct bufdesc_ex *)fep->tx_bd_base;
584 else
585 index = bdp - fep->tx_bd_base;
586
585 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 587 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
586 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 588 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
587 bdp->cbd_bufaddr = 0; 589 bdp->cbd_bufaddr = 0;
588 590
589 skb = fep->tx_skbuff[fep->skb_dirty]; 591 skb = fep->tx_skbuff[index];
592
590 /* Check for errors. */ 593 /* Check for errors. */
591 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 594 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
592 BD_ENET_TX_RL | BD_ENET_TX_UN | 595 BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
631 634
632 /* Free the sk buffer associated with this last transmit */ 635 /* Free the sk buffer associated with this last transmit */
633 dev_kfree_skb_any(skb); 636 dev_kfree_skb_any(skb);
634 fep->tx_skbuff[fep->skb_dirty] = NULL; 637 fep->tx_skbuff[index] = NULL;
635 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 638
639 fep->dirty_tx = bdp;
636 640
637 /* Update pointer to next buffer descriptor to be transmitted */ 641 /* Update pointer to next buffer descriptor to be transmitted */
638 if (status & BD_ENET_TX_WRAP) 642 if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
642 646
643 /* Since we have freed up a buffer, the ring is no longer full 647 /* Since we have freed up a buffer, the ring is no longer full
644 */ 648 */
645 if (fep->tx_full) { 649 if (fep->dirty_tx != fep->cur_tx) {
646 fep->tx_full = 0;
647 if (netif_queue_stopped(ndev)) 650 if (netif_queue_stopped(ndev))
648 netif_wake_queue(ndev); 651 netif_wake_queue(ndev);
649 } 652 }
650 } 653 }
651 fep->dirty_tx = bdp; 654 return;
652 spin_unlock(&fep->hw_lock);
653} 655}
654 656
655 657
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
816 int_events = readl(fep->hwp + FEC_IEVENT); 818 int_events = readl(fep->hwp + FEC_IEVENT);
817 writel(int_events, fep->hwp + FEC_IEVENT); 819 writel(int_events, fep->hwp + FEC_IEVENT);
818 820
819 if (int_events & FEC_ENET_RXF) { 821 if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
820 ret = IRQ_HANDLED; 822 ret = IRQ_HANDLED;
821 823
822 /* Disable the RX interrupt */ 824 /* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
827 } 829 }
828 } 830 }
829 831
830 /* Transmit OK, or non-fatal error. Update the buffer
831 * descriptors. FEC handles all errors, we just discover
832 * them as part of the transmit process.
833 */
834 if (int_events & FEC_ENET_TXF) {
835 ret = IRQ_HANDLED;
836 fec_enet_tx(ndev);
837 }
838
839 if (int_events & FEC_ENET_MII) { 832 if (int_events & FEC_ENET_MII) {
840 ret = IRQ_HANDLED; 833 ret = IRQ_HANDLED;
841 complete(&fep->mdio_done); 834 complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
851 int pkts = fec_enet_rx(ndev, budget); 844 int pkts = fec_enet_rx(ndev, budget);
852 struct fec_enet_private *fep = netdev_priv(ndev); 845 struct fec_enet_private *fep = netdev_priv(ndev);
853 846
847 fec_enet_tx(ndev);
848
854 if (pkts < budget) { 849 if (pkts < budget) {
855 napi_complete(napi); 850 napi_complete(napi);
856 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 851 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
1646 1641
1647 /* ...and the same for transmit */ 1642 /* ...and the same for transmit */
1648 bdp = fep->tx_bd_base; 1643 bdp = fep->tx_bd_base;
1644 fep->cur_tx = bdp;
1649 for (i = 0; i < TX_RING_SIZE; i++) { 1645 for (i = 0; i < TX_RING_SIZE; i++) {
1650 1646
1651 /* Initialize the BD for every fragment in the page. */ 1647 /* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
1657 /* Set the last buffer to wrap */ 1653 /* Set the last buffer to wrap */
1658 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1654 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1659 bdp->cbd_sc |= BD_SC_WRAP; 1655 bdp->cbd_sc |= BD_SC_WRAP;
1656 fep->dirty_tx = bdp;
1660 1657
1661 fec_restart(ndev, 0); 1658 fec_restart(ndev, 0);
1662 1659
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 01579b8e37c4..f5390071efd0 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -97,6 +97,13 @@ struct bufdesc {
97 unsigned short cbd_sc; /* Control and status info */ 97 unsigned short cbd_sc; /* Control and status info */
98 unsigned long cbd_bufaddr; /* Buffer address */ 98 unsigned long cbd_bufaddr; /* Buffer address */
99}; 99};
100#else
101struct bufdesc {
102 unsigned short cbd_sc; /* Control and status info */
103 unsigned short cbd_datlen; /* Data length */
104 unsigned long cbd_bufaddr; /* Buffer address */
105};
106#endif
100 107
101struct bufdesc_ex { 108struct bufdesc_ex {
102 struct bufdesc desc; 109 struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
107 unsigned short res0[4]; 114 unsigned short res0[4];
108}; 115};
109 116
110#else
111struct bufdesc {
112 unsigned short cbd_sc; /* Control and status info */
113 unsigned short cbd_datlen; /* Data length */
114 unsigned long cbd_bufaddr; /* Buffer address */
115};
116#endif
117
118/* 117/*
119 * The following definitions courtesy of commproc.h, which where 118 * The following definitions courtesy of commproc.h, which where
120 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). 119 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
214 unsigned char *tx_bounce[TX_RING_SIZE]; 213 unsigned char *tx_bounce[TX_RING_SIZE];
215 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 214 struct sk_buff *tx_skbuff[TX_RING_SIZE];
216 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 215 struct sk_buff *rx_skbuff[RX_RING_SIZE];
217 ushort skb_cur;
218 ushort skb_dirty;
219 216
220 /* CPM dual port RAM relative addresses */ 217 /* CPM dual port RAM relative addresses */
221 dma_addr_t bd_dma; 218 dma_addr_t bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
227 /* The ring entries to be free()ed */ 224 /* The ring entries to be free()ed */
228 struct bufdesc *dirty_tx; 225 struct bufdesc *dirty_tx;
229 226
230 uint tx_full;
231 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 227 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
232 spinlock_t hw_lock; 228 spinlock_t hw_lock;
233 229
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8900398ba103..28fb50a1e9c3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4765 4765
4766 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4766 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4767 4767
4768 rtl_tx_performance_tweak(pdev, 4768 if (tp->dev->mtu <= ETH_DATA_LEN) {
4769 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4769 rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
4770 PCI_EXP_DEVCTL_NOSNOOP_EN);
4771 }
4770} 4772}
4771 4773
4772static void rtl_hw_start_8168bef(struct rtl8169_private *tp) 4774static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4789 4791
4790 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4792 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4791 4793
4792 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4794 if (tp->dev->mtu <= ETH_DATA_LEN)
4795 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4793 4796
4794 rtl_disable_clock_request(pdev); 4797 rtl_disable_clock_request(pdev);
4795 4798
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4822 4825
4823 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4826 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4824 4827
4825 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4828 if (tp->dev->mtu <= ETH_DATA_LEN)
4829 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4826 4830
4827 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4831 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4828} 4832}
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4841 4845
4842 RTL_W8(MaxTxPacketSize, TxPacketMax); 4846 RTL_W8(MaxTxPacketSize, TxPacketMax);
4843 4847
4844 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4848 if (tp->dev->mtu <= ETH_DATA_LEN)
4849 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4845 4850
4846 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4851 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4847} 4852}
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4901 4906
4902 RTL_W8(MaxTxPacketSize, TxPacketMax); 4907 RTL_W8(MaxTxPacketSize, TxPacketMax);
4903 4908
4904 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4909 if (tp->dev->mtu <= ETH_DATA_LEN)
4910 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4905 4911
4906 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4912 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4907} 4913}
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4913 4919
4914 rtl_csi_access_enable_1(tp); 4920 rtl_csi_access_enable_1(tp);
4915 4921
4916 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4922 if (tp->dev->mtu <= ETH_DATA_LEN)
4923 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4917 4924
4918 RTL_W8(MaxTxPacketSize, TxPacketMax); 4925 RTL_W8(MaxTxPacketSize, TxPacketMax);
4919 4926
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4972 4979
4973 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 4980 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4974 4981
4975 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4982 if (tp->dev->mtu <= ETH_DATA_LEN)
4983 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4976 4984
4977 RTL_W8(MaxTxPacketSize, TxPacketMax); 4985 RTL_W8(MaxTxPacketSize, TxPacketMax);
4978 4986
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4998 5006
4999 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 5007 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5000 5008
5001 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5009 if (tp->dev->mtu <= ETH_DATA_LEN)
5010 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5002 5011
5003 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5012 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5004 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5013 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index bf57b3cb16ab..0bc00991d310 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
779 tx_queue->txd.entries); 779 tx_queue->txd.entries);
780 } 780 }
781 781
782 efx_device_detach_sync(efx);
782 efx_stop_all(efx); 783 efx_stop_all(efx);
783 efx_stop_interrupts(efx, true); 784 efx_stop_interrupts(efx, true);
784 785
@@ -832,6 +833,7 @@ out:
832 833
833 efx_start_interrupts(efx, true); 834 efx_start_interrupts(efx, true);
834 efx_start_all(efx); 835 efx_start_all(efx);
836 netif_device_attach(efx->net_dev);
835 return rc; 837 return rc;
836 838
837rollback: 839rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
1641 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1643 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1642 efx_flush_all(efx); 1644 efx_flush_all(efx);
1643 1645
1644 /* Stop the kernel transmit interface late, so the watchdog 1646 /* Stop the kernel transmit interface. This is only valid if
1645 * timer isn't ticking over the flush */ 1647 * the device is stopped or detached; otherwise the watchdog
1648 * may fire immediately.
1649 */
1650 WARN_ON(netif_running(efx->net_dev) &&
1651 netif_device_present(efx->net_dev));
1646 netif_tx_disable(efx->net_dev); 1652 netif_tx_disable(efx->net_dev);
1647 1653
1648 efx_stop_datapath(efx); 1654 efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1963 if (new_mtu > EFX_MAX_MTU) 1969 if (new_mtu > EFX_MAX_MTU)
1964 return -EINVAL; 1970 return -EINVAL;
1965 1971
1966 efx_stop_all(efx);
1967
1968 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1972 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1969 1973
1974 efx_device_detach_sync(efx);
1975 efx_stop_all(efx);
1976
1970 mutex_lock(&efx->mac_lock); 1977 mutex_lock(&efx->mac_lock);
1971 net_dev->mtu = new_mtu; 1978 net_dev->mtu = new_mtu;
1972 efx->type->reconfigure_mac(efx); 1979 efx->type->reconfigure_mac(efx);
1973 mutex_unlock(&efx->mac_lock); 1980 mutex_unlock(&efx->mac_lock);
1974 1981
1975 efx_start_all(efx); 1982 efx_start_all(efx);
1983 netif_device_attach(efx->net_dev);
1976 return 0; 1984 return 0;
1977} 1985}
1978 1986
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2d756c1d7142..0a90abd2421b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -210,6 +210,7 @@ struct efx_tx_queue {
210 * Will be %NULL if the buffer slot is currently free. 210 * Will be %NULL if the buffer slot is currently free.
211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. 211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
212 * Will be %NULL if the buffer slot is currently free. 212 * Will be %NULL if the buffer slot is currently free.
213 * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
213 * @len: Buffer length, in bytes. 214 * @len: Buffer length, in bytes.
214 * @flags: Flags for buffer and packet state. 215 * @flags: Flags for buffer and packet state.
215 */ 216 */
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
219 struct sk_buff *skb; 220 struct sk_buff *skb;
220 struct page *page; 221 struct page *page;
221 } u; 222 } u;
222 unsigned int len; 223 u16 page_offset;
224 u16 len;
223 u16 flags; 225 u16 flags;
224}; 226};
225#define EFX_RX_BUF_PAGE 0x0001 227#define EFX_RX_BUF_PAGE 0x0001
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d096b4..879ff5849bbd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, 90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91 struct efx_rx_buffer *buf) 91 struct efx_rx_buffer *buf)
92{ 92{
93 /* Offset is always within one page, so we don't need to consider 93 return buf->page_offset + efx->type->rx_buffer_hash_size;
94 * the page order.
95 */
96 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
97 efx->type->rx_buffer_hash_size;
98} 94}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 95static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 96{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
187 struct efx_nic *efx = rx_queue->efx; 183 struct efx_nic *efx = rx_queue->efx;
188 struct efx_rx_buffer *rx_buf; 184 struct efx_rx_buffer *rx_buf;
189 struct page *page; 185 struct page *page;
186 unsigned int page_offset;
190 struct efx_rx_page_state *state; 187 struct efx_rx_page_state *state;
191 dma_addr_t dma_addr; 188 dma_addr_t dma_addr;
192 unsigned index, count; 189 unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 state->dma_addr = dma_addr; 208 state->dma_addr = dma_addr;
212 209
213 dma_addr += sizeof(struct efx_rx_page_state); 210 dma_addr += sizeof(struct efx_rx_page_state);
211 page_offset = sizeof(struct efx_rx_page_state);
214 212
215 split: 213 split:
216 index = rx_queue->added_count & rx_queue->ptr_mask; 214 index = rx_queue->added_count & rx_queue->ptr_mask;
217 rx_buf = efx_rx_buffer(rx_queue, index); 215 rx_buf = efx_rx_buffer(rx_queue, index);
218 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
219 rx_buf->u.page = page; 217 rx_buf->u.page = page;
218 rx_buf->page_offset = page_offset;
220 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
221 rx_buf->flags = EFX_RX_BUF_PAGE; 220 rx_buf->flags = EFX_RX_BUF_PAGE;
222 ++rx_queue->added_count; 221 ++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
227 /* Use the second half of the page */ 226 /* Use the second half of the page */
228 get_page(page); 227 get_page(page);
229 dma_addr += (PAGE_SIZE >> 1); 228 dma_addr += (PAGE_SIZE >> 1);
229 page_offset += (PAGE_SIZE >> 1);
230 ++count; 230 ++count;
231 goto split; 231 goto split;
232 } 232 }
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
236} 236}
237 237
238static void efx_unmap_rx_buffer(struct efx_nic *efx, 238static void efx_unmap_rx_buffer(struct efx_nic *efx,
239 struct efx_rx_buffer *rx_buf) 239 struct efx_rx_buffer *rx_buf,
240 unsigned int used_len)
240{ 241{
241 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 242 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
242 struct efx_rx_page_state *state; 243 struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
247 state->dma_addr, 248 state->dma_addr,
248 efx_rx_buf_size(efx), 249 efx_rx_buf_size(efx),
249 DMA_FROM_DEVICE); 250 DMA_FROM_DEVICE);
251 } else if (used_len) {
252 dma_sync_single_for_cpu(&efx->pci_dev->dev,
253 rx_buf->dma_addr, used_len,
254 DMA_FROM_DEVICE);
250 } 255 }
251 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 256 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
252 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, 257 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 274static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270 struct efx_rx_buffer *rx_buf) 275 struct efx_rx_buffer *rx_buf)
271{ 276{
272 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 277 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
273 efx_free_rx_buffer(rx_queue->efx, rx_buf); 278 efx_free_rx_buffer(rx_queue->efx, rx_buf);
274} 279}
275 280
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
535 goto out; 540 goto out;
536 } 541 }
537 542
538 /* Release card resources - assumes all RX buffers consumed in-order 543 /* Release and/or sync DMA mapping - assumes all RX buffers
539 * per RX queue 544 * consumed in-order per RX queue
540 */ 545 */
541 efx_unmap_rx_buffer(efx, rx_buf); 546 efx_unmap_rx_buffer(efx, rx_buf, len);
542 547
543 /* Prefetch nice and early so data will (hopefully) be in cache by 548 /* Prefetch nice and early so data will (hopefully) be in cache by
544 * the time we look at it. 549 * the time we look at it.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7e93df6585e7..01ffbc486982 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
731 731
732 writel(vlan, &priv->host_port_regs->port_vlan); 732 writel(vlan, &priv->host_port_regs->port_vlan);
733 733
734 for (i = 0; i < 2; i++) 734 for (i = 0; i < priv->data.slaves; i++)
735 slave_write(priv->slaves + i, vlan, reg); 735 slave_write(priv->slaves + i, vlan, reg);
736 736
737 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, 737 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 29934446436a..abf7b6153d00 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
257 .phy_id = PHY_ID_KSZ9021, 257 .phy_id = PHY_ID_KSZ9021,
258 .phy_id_mask = 0x000ffffe, 258 .phy_id_mask = 0x000ffffe,
259 .name = "Micrel KSZ9021 Gigabit PHY", 259 .name = "Micrel KSZ9021 Gigabit PHY",
260 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause 260 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
261 | SUPPORTED_Asym_Pause),
262 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 261 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
263 .config_init = kszphy_config_init, 262 .config_init = kszphy_config_init,
264 .config_aneg = genphy_config_aneg, 263 .config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9930f9999561..3657b4a29124 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
44 44
45void phy_device_free(struct phy_device *phydev) 45void phy_device_free(struct phy_device *phydev)
46{ 46{
47 kfree(phydev); 47 put_device(&phydev->dev);
48} 48}
49EXPORT_SYMBOL(phy_device_free); 49EXPORT_SYMBOL(phy_device_free);
50 50
51static void phy_device_release(struct device *dev) 51static void phy_device_release(struct device *dev)
52{ 52{
53 phy_device_free(to_phy_device(dev)); 53 kfree(to_phy_device(dev));
54} 54}
55 55
56static struct phy_driver genphy_driver; 56static struct phy_driver genphy_driver;
@@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
201 there's no driver _already_ loaded. */ 201 there's no driver _already_ loaded. */
202 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); 202 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
203 203
204 device_initialize(&dev->dev);
205
204 return dev; 206 return dev;
205} 207}
206EXPORT_SYMBOL(phy_device_create); 208EXPORT_SYMBOL(phy_device_create);
@@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
363 /* Run all of the fixups for this PHY */ 365 /* Run all of the fixups for this PHY */
364 phy_scan_fixups(phydev); 366 phy_scan_fixups(phydev);
365 367
366 err = device_register(&phydev->dev); 368 err = device_add(&phydev->dev);
367 if (err) { 369 if (err) {
368 pr_err("phy %d failed to register\n", phydev->addr); 370 pr_err("PHY %d failed to add\n", phydev->addr);
369 goto out; 371 goto out;
370 } 372 }
371 373
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index da92ed3797aa..3b6e9b83342d 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -156,6 +156,24 @@ config USB_NET_AX8817X
156 This driver creates an interface named "ethX", where X depends on 156 This driver creates an interface named "ethX", where X depends on
157 what other networking devices you have in use. 157 what other networking devices you have in use.
158 158
159config USB_NET_AX88179_178A
160 tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
161 depends on USB_USBNET
162 select CRC32
163 select PHYLIB
164 default y
165 help
166 This option adds support for ASIX AX88179 based USB 3.0/2.0
167 to Gigabit Ethernet adapters.
168
169 This driver should work with at least the following devices:
170 * ASIX AX88179
171 * ASIX AX88178A
172 * Sitcomm LN-032
173
174 This driver creates an interface named "ethX", where X depends on
175 what other networking devices you have in use.
176
159config USB_NET_CDCETHER 177config USB_NET_CDCETHER
160 tristate "CDC Ethernet support (smart devices such as cable modems)" 178 tristate "CDC Ethernet support (smart devices such as cable modems)"
161 depends on USB_USBNET 179 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 478691326f37..119b06c9aa16 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_HSO) += hso.o 9obj-$(CONFIG_USB_HSO) += hso.o
10obj-$(CONFIG_USB_NET_AX8817X) += asix.o 10obj-$(CONFIG_USB_NET_AX8817X) += asix.o
11asix-y := asix_devices.o asix_common.o ax88172a.o 11asix-y := asix_devices.o asix_common.o ax88172a.o
12obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
12obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 13obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
13obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 14obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
14obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 15obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2205dbc8d32f..709753469099 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
924 .tx_fixup = asix_tx_fixup, 924 .tx_fixup = asix_tx_fixup,
925}; 925};
926 926
927/*
928 * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
929 * no-name packaging.
930 * USB device strings are:
931 * 1: Manufacturer: USBLINK
932 * 2: Product: HG20F9 USB2.0
933 * 3: Serial: 000003
934 * Appears to be compatible with Asix 88772B.
935 */
936static const struct driver_info hg20f9_info = {
937 .description = "HG20F9 USB 2.0 Ethernet",
938 .bind = ax88772_bind,
939 .unbind = ax88772_unbind,
940 .status = asix_status,
941 .link_reset = ax88772_link_reset,
942 .reset = ax88772_reset,
943 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
944 FLAG_MULTI_PACKET,
945 .rx_fixup = asix_rx_fixup_common,
946 .tx_fixup = asix_tx_fixup,
947 .data = FLAG_EEPROM_MAC,
948};
949
927extern const struct driver_info ax88172a_info; 950extern const struct driver_info ax88172a_info;
928 951
929static const struct usb_device_id products [] = { 952static const struct usb_device_id products [] = {
@@ -1063,6 +1086,14 @@ static const struct usb_device_id products [] = {
1063 /* ASIX 88172a demo board */ 1086 /* ASIX 88172a demo board */
1064 USB_DEVICE(0x0b95, 0x172a), 1087 USB_DEVICE(0x0b95, 0x172a),
1065 .driver_info = (unsigned long) &ax88172a_info, 1088 .driver_info = (unsigned long) &ax88172a_info,
1089}, {
1090 /*
1091 * USBLINK HG20F9 "USB 2.0 LAN"
1092 * Appears to have gazumped Linksys's manufacturer ID but
1093 * doesn't (yet) conflict with any known Linksys product.
1094 */
1095 USB_DEVICE(0x066b, 0x20f9),
1096 .driver_info = (unsigned long) &hg20f9_info,
1066}, 1097},
1067 { }, // END 1098 { }, // END
1068}; 1099};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
new file mode 100644
index 000000000000..71c27d8d214f
--- /dev/null
+++ b/drivers/net/usb/ax88179_178a.c
@@ -0,0 +1,1448 @@
1/*
2 * ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet Devices
3 *
4 * Copyright (C) 2011-2013 ASIX
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/etherdevice.h>
23#include <linux/mii.h>
24#include <linux/usb.h>
25#include <linux/crc32.h>
26#include <linux/usb/usbnet.h>
27
28#define AX88179_PHY_ID 0x03
29#define AX_EEPROM_LEN 0x100
30#define AX88179_EEPROM_MAGIC 0x17900b95
31#define AX_MCAST_FLTSIZE 8
32#define AX_MAX_MCAST 64
33#define AX_INT_PPLS_LINK ((u32)BIT(16))
34#define AX_RXHDR_L4_TYPE_MASK 0x1c
35#define AX_RXHDR_L4_TYPE_UDP 4
36#define AX_RXHDR_L4_TYPE_TCP 16
37#define AX_RXHDR_L3CSUM_ERR 2
38#define AX_RXHDR_L4CSUM_ERR 1
39#define AX_RXHDR_CRC_ERR ((u32)BIT(31))
40#define AX_RXHDR_DROP_ERR ((u32)BIT(30))
41#define AX_ACCESS_MAC 0x01
42#define AX_ACCESS_PHY 0x02
43#define AX_ACCESS_EEPROM 0x04
44#define AX_ACCESS_EFUS 0x05
45#define AX_PAUSE_WATERLVL_HIGH 0x54
46#define AX_PAUSE_WATERLVL_LOW 0x55
47
48#define PHYSICAL_LINK_STATUS 0x02
49 #define AX_USB_SS 0x04
50 #define AX_USB_HS 0x02
51
52#define GENERAL_STATUS 0x03
53/* Check AX88179 version. UA1:Bit2 = 0, UA2:Bit2 = 1 */
54 #define AX_SECLD 0x04
55
56#define AX_SROM_ADDR 0x07
57#define AX_SROM_CMD 0x0a
58 #define EEP_RD 0x04
59 #define EEP_BUSY 0x10
60
61#define AX_SROM_DATA_LOW 0x08
62#define AX_SROM_DATA_HIGH 0x09
63
64#define AX_RX_CTL 0x0b
65 #define AX_RX_CTL_DROPCRCERR 0x0100
66 #define AX_RX_CTL_IPE 0x0200
67 #define AX_RX_CTL_START 0x0080
68 #define AX_RX_CTL_AP 0x0020
69 #define AX_RX_CTL_AM 0x0010
70 #define AX_RX_CTL_AB 0x0008
71 #define AX_RX_CTL_AMALL 0x0002
72 #define AX_RX_CTL_PRO 0x0001
73 #define AX_RX_CTL_STOP 0x0000
74
75#define AX_NODE_ID 0x10
76#define AX_MULFLTARY 0x16
77
78#define AX_MEDIUM_STATUS_MODE 0x22
79 #define AX_MEDIUM_GIGAMODE 0x01
80 #define AX_MEDIUM_FULL_DUPLEX 0x02
81 #define AX_MEDIUM_ALWAYS_ONE 0x04
82 #define AX_MEDIUM_EN_125MHZ 0x08
83 #define AX_MEDIUM_RXFLOW_CTRLEN 0x10
84 #define AX_MEDIUM_TXFLOW_CTRLEN 0x20
85 #define AX_MEDIUM_RECEIVE_EN 0x100
86 #define AX_MEDIUM_PS 0x200
87 #define AX_MEDIUM_JUMBO_EN 0x8040
88
89#define AX_MONITOR_MOD 0x24
90 #define AX_MONITOR_MODE_RWLC 0x02
91 #define AX_MONITOR_MODE_RWMP 0x04
92 #define AX_MONITOR_MODE_PMEPOL 0x20
93 #define AX_MONITOR_MODE_PMETYPE 0x40
94
95#define AX_GPIO_CTRL 0x25
96 #define AX_GPIO_CTRL_GPIO3EN 0x80
97 #define AX_GPIO_CTRL_GPIO2EN 0x40
98 #define AX_GPIO_CTRL_GPIO1EN 0x20
99
100#define AX_PHYPWR_RSTCTL 0x26
101 #define AX_PHYPWR_RSTCTL_BZ 0x0010
102 #define AX_PHYPWR_RSTCTL_IPRL 0x0020
103 #define AX_PHYPWR_RSTCTL_AT 0x1000
104
105#define AX_RX_BULKIN_QCTRL 0x2e
106#define AX_CLK_SELECT 0x33
107 #define AX_CLK_SELECT_BCS 0x01
108 #define AX_CLK_SELECT_ACS 0x02
109 #define AX_CLK_SELECT_ULR 0x08
110
111#define AX_RXCOE_CTL 0x34
112 #define AX_RXCOE_IP 0x01
113 #define AX_RXCOE_TCP 0x02
114 #define AX_RXCOE_UDP 0x04
115 #define AX_RXCOE_TCPV6 0x20
116 #define AX_RXCOE_UDPV6 0x40
117
118#define AX_TXCOE_CTL 0x35
119 #define AX_TXCOE_IP 0x01
120 #define AX_TXCOE_TCP 0x02
121 #define AX_TXCOE_UDP 0x04
122 #define AX_TXCOE_TCPV6 0x20
123 #define AX_TXCOE_UDPV6 0x40
124
125#define AX_LEDCTRL 0x73
126
127#define GMII_PHY_PHYSR 0x11
128 #define GMII_PHY_PHYSR_SMASK 0xc000
129 #define GMII_PHY_PHYSR_GIGA 0x8000
130 #define GMII_PHY_PHYSR_100 0x4000
131 #define GMII_PHY_PHYSR_FULL 0x2000
132 #define GMII_PHY_PHYSR_LINK 0x400
133
134#define GMII_LED_ACT 0x1a
135 #define GMII_LED_ACTIVE_MASK 0xff8f
136 #define GMII_LED0_ACTIVE BIT(4)
137 #define GMII_LED1_ACTIVE BIT(5)
138 #define GMII_LED2_ACTIVE BIT(6)
139
140#define GMII_LED_LINK 0x1c
141 #define GMII_LED_LINK_MASK 0xf888
142 #define GMII_LED0_LINK_10 BIT(0)
143 #define GMII_LED0_LINK_100 BIT(1)
144 #define GMII_LED0_LINK_1000 BIT(2)
145 #define GMII_LED1_LINK_10 BIT(4)
146 #define GMII_LED1_LINK_100 BIT(5)
147 #define GMII_LED1_LINK_1000 BIT(6)
148 #define GMII_LED2_LINK_10 BIT(8)
149 #define GMII_LED2_LINK_100 BIT(9)
150 #define GMII_LED2_LINK_1000 BIT(10)
151 #define LED0_ACTIVE BIT(0)
152 #define LED0_LINK_10 BIT(1)
153 #define LED0_LINK_100 BIT(2)
154 #define LED0_LINK_1000 BIT(3)
155 #define LED0_FD BIT(4)
156 #define LED0_USB3_MASK 0x001f
157 #define LED1_ACTIVE BIT(5)
158 #define LED1_LINK_10 BIT(6)
159 #define LED1_LINK_100 BIT(7)
160 #define LED1_LINK_1000 BIT(8)
161 #define LED1_FD BIT(9)
162 #define LED1_USB3_MASK 0x03e0
163 #define LED2_ACTIVE BIT(10)
164 #define LED2_LINK_1000 BIT(13)
165 #define LED2_LINK_100 BIT(12)
166 #define LED2_LINK_10 BIT(11)
167 #define LED2_FD BIT(14)
168 #define LED_VALID BIT(15)
169 #define LED2_USB3_MASK 0x7c00
170
171#define GMII_PHYPAGE 0x1e
172#define GMII_PHY_PAGE_SELECT 0x1f
173 #define GMII_PHY_PGSEL_EXT 0x0007
174 #define GMII_PHY_PGSEL_PAGE0 0x0000
175
176struct ax88179_data {
177 u16 rxctl;
178 u16 reserved;
179};
180
181struct ax88179_int_data {
182 __le32 intdata1;
183 __le32 intdata2;
184};
185
186static const struct {
187 unsigned char ctrl, timer_l, timer_h, size, ifg;
188} AX88179_BULKIN_SIZE[] = {
189 {7, 0x4f, 0, 0x12, 0xff},
190 {7, 0x20, 3, 0x16, 0xff},
191 {7, 0xae, 7, 0x18, 0xff},
192 {7, 0xcc, 0x4c, 0x18, 8},
193};
194
195static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
196 u16 size, void *data, int in_pm)
197{
198 int ret;
199 int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
200
201 BUG_ON(!dev);
202
203 if (!in_pm)
204 fn = usbnet_read_cmd;
205 else
206 fn = usbnet_read_cmd_nopm;
207
208 ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
209 value, index, data, size);
210
211 if (unlikely(ret < 0))
212 netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
213 index, ret);
214
215 return ret;
216}
217
218static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
219 u16 size, void *data, int in_pm)
220{
221 int ret;
222 int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
223
224 BUG_ON(!dev);
225
226 if (!in_pm)
227 fn = usbnet_write_cmd;
228 else
229 fn = usbnet_write_cmd_nopm;
230
231 ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
232 value, index, data, size);
233
234 if (unlikely(ret < 0))
235 netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
236 index, ret);
237
238 return ret;
239}
240
241static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
242 u16 index, u16 size, void *data)
243{
244 u16 buf;
245
246 if (2 == size) {
247 buf = *((u16 *)data);
248 cpu_to_le16s(&buf);
249 usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
250 USB_RECIP_DEVICE, value, index, &buf,
251 size);
252 } else {
253 usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
254 USB_RECIP_DEVICE, value, index, data,
255 size);
256 }
257}
258
259static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
260 u16 index, u16 size, void *data)
261{
262 int ret;
263
264 if (2 == size) {
265 u16 buf;
266 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
267 le16_to_cpus(&buf);
268 *((u16 *)data) = buf;
269 } else if (4 == size) {
270 u32 buf;
271 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
272 le32_to_cpus(&buf);
273 *((u32 *)data) = buf;
274 } else {
275 ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
276 }
277
278 return ret;
279}
280
281static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
282 u16 index, u16 size, void *data)
283{
284 int ret;
285
286 if (2 == size) {
287 u16 buf;
288 buf = *((u16 *)data);
289 cpu_to_le16s(&buf);
290 ret = __ax88179_write_cmd(dev, cmd, value, index,
291 size, &buf, 1);
292 } else {
293 ret = __ax88179_write_cmd(dev, cmd, value, index,
294 size, data, 1);
295 }
296
297 return ret;
298}
299
300static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
301 u16 size, void *data)
302{
303 int ret;
304
305 if (2 == size) {
306 u16 buf;
307 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
308 le16_to_cpus(&buf);
309 *((u16 *)data) = buf;
310 } else if (4 == size) {
311 u32 buf;
312 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
313 le32_to_cpus(&buf);
314 *((u32 *)data) = buf;
315 } else {
316 ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
317 }
318
319 return ret;
320}
321
322static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
323 u16 size, void *data)
324{
325 int ret;
326
327 if (2 == size) {
328 u16 buf;
329 buf = *((u16 *)data);
330 cpu_to_le16s(&buf);
331 ret = __ax88179_write_cmd(dev, cmd, value, index,
332 size, &buf, 0);
333 } else {
334 ret = __ax88179_write_cmd(dev, cmd, value, index,
335 size, data, 0);
336 }
337
338 return ret;
339}
340
341static void ax88179_status(struct usbnet *dev, struct urb *urb)
342{
343 struct ax88179_int_data *event;
344 u32 link;
345
346 if (urb->actual_length < 8)
347 return;
348
349 event = urb->transfer_buffer;
350 le32_to_cpus((void *)&event->intdata1);
351
352 link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16;
353
354 if (netif_carrier_ok(dev->net) != link) {
355 if (link)
356 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
357 else
358 netif_carrier_off(dev->net);
359
360 netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
361 }
362}
363
364static int ax88179_mdio_read(struct net_device *netdev, int phy_id, int loc)
365{
366 struct usbnet *dev = netdev_priv(netdev);
367 u16 res;
368
369 ax88179_read_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
370 return res;
371}
372
373static void ax88179_mdio_write(struct net_device *netdev, int phy_id, int loc,
374 int val)
375{
376 struct usbnet *dev = netdev_priv(netdev);
377 u16 res = (u16) val;
378
379 ax88179_write_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
380}
381
382static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
383{
384 struct usbnet *dev = usb_get_intfdata(intf);
385 u16 tmp16;
386 u8 tmp8;
387
388 usbnet_suspend(intf, message);
389
390 /* Disable RX path */
391 ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
392 2, 2, &tmp16);
393 tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
394 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
395 2, 2, &tmp16);
396
397 /* Force bulk-in zero length */
398 ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
399 2, 2, &tmp16);
400
401 tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
402 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
403 2, 2, &tmp16);
404
405 /* change clock */
406 tmp8 = 0;
407 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
408
409 /* Configure RX control register => stop operation */
410 tmp16 = AX_RX_CTL_STOP;
411 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
412
413 return 0;
414}
415
416/* This function is used to enable the autodetach function. */
417/* This function is determined by offset 0x43 of EEPROM */
418static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
419{
420 u16 tmp16;
421 u8 tmp8;
422 int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
423 int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
424
425 if (!in_pm) {
426 fnr = ax88179_read_cmd;
427 fnw = ax88179_write_cmd;
428 } else {
429 fnr = ax88179_read_cmd_nopm;
430 fnw = ax88179_write_cmd_nopm;
431 }
432
433 if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
434 return 0;
435
436 if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
437 return 0;
438
439 /* Enable Auto Detach bit */
440 tmp8 = 0;
441 fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
442 tmp8 |= AX_CLK_SELECT_ULR;
443 fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
444
445 fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
446 tmp16 |= AX_PHYPWR_RSTCTL_AT;
447 fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
448
449 return 0;
450}
451
452static int ax88179_resume(struct usb_interface *intf)
453{
454 struct usbnet *dev = usb_get_intfdata(intf);
455 u16 tmp16;
456 u8 tmp8;
457
458 netif_carrier_off(dev->net);
459
460 /* Power up ethernet PHY */
461 tmp16 = 0;
462 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
463 2, 2, &tmp16);
464 udelay(1000);
465
466 tmp16 = AX_PHYPWR_RSTCTL_IPRL;
467 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
468 2, 2, &tmp16);
469 msleep(200);
470
471 /* Ethernet PHY Auto Detach*/
472 ax88179_auto_detach(dev, 1);
473
474 /* Enable clock */
475 ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
476 tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
477 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
478 msleep(100);
479
480 /* Configure RX control register => start operation */
481 tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
482 AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
483 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
484
485 return usbnet_resume(intf);
486}
487
488static void
489ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
490{
491 struct usbnet *dev = netdev_priv(net);
492 u8 opt;
493
494 if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
495 1, 1, &opt) < 0) {
496 wolinfo->supported = 0;
497 wolinfo->wolopts = 0;
498 return;
499 }
500
501 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
502 wolinfo->wolopts = 0;
503 if (opt & AX_MONITOR_MODE_RWLC)
504 wolinfo->wolopts |= WAKE_PHY;
505 if (opt & AX_MONITOR_MODE_RWMP)
506 wolinfo->wolopts |= WAKE_MAGIC;
507}
508
509static int
510ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
511{
512 struct usbnet *dev = netdev_priv(net);
513 u8 opt = 0;
514
515 if (wolinfo->wolopts & WAKE_PHY)
516 opt |= AX_MONITOR_MODE_RWLC;
517 if (wolinfo->wolopts & WAKE_MAGIC)
518 opt |= AX_MONITOR_MODE_RWMP;
519
520 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
521 1, 1, &opt) < 0)
522 return -EINVAL;
523
524 return 0;
525}
526
527static int ax88179_get_eeprom_len(struct net_device *net)
528{
529 return AX_EEPROM_LEN;
530}
531
532static int
533ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
534 u8 *data)
535{
536 struct usbnet *dev = netdev_priv(net);
537 u16 *eeprom_buff;
538 int first_word, last_word;
539 int i, ret;
540
541 if (eeprom->len == 0)
542 return -EINVAL;
543
544 eeprom->magic = AX88179_EEPROM_MAGIC;
545
546 first_word = eeprom->offset >> 1;
547 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
548 eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
549 GFP_KERNEL);
550 if (!eeprom_buff)
551 return -ENOMEM;
552
553 /* ax88179/178A returns 2 bytes from eeprom on read */
554 for (i = first_word; i <= last_word; i++) {
555 ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
556 &eeprom_buff[i - first_word],
557 0);
558 if (ret < 0) {
559 kfree(eeprom_buff);
560 return -EIO;
561 }
562 }
563
564 memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
565 kfree(eeprom_buff);
566 return 0;
567}
568
569static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
570{
571 struct usbnet *dev = netdev_priv(net);
572 return mii_ethtool_gset(&dev->mii, cmd);
573}
574
575static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
576{
577 struct usbnet *dev = netdev_priv(net);
578 return mii_ethtool_sset(&dev->mii, cmd);
579}
580
581
582static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
583{
584 struct usbnet *dev = netdev_priv(net);
585 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
586}
587
588static const struct ethtool_ops ax88179_ethtool_ops = {
589 .get_link = ethtool_op_get_link,
590 .get_msglevel = usbnet_get_msglevel,
591 .set_msglevel = usbnet_set_msglevel,
592 .get_wol = ax88179_get_wol,
593 .set_wol = ax88179_set_wol,
594 .get_eeprom_len = ax88179_get_eeprom_len,
595 .get_eeprom = ax88179_get_eeprom,
596 .get_settings = ax88179_get_settings,
597 .set_settings = ax88179_set_settings,
598 .nway_reset = usbnet_nway_reset,
599};
600
601static void ax88179_set_multicast(struct net_device *net)
602{
603 struct usbnet *dev = netdev_priv(net);
604 struct ax88179_data *data = (struct ax88179_data *)dev->data;
605 u8 *m_filter = ((u8 *)dev->data) + 12;
606
607 data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
608
609 if (net->flags & IFF_PROMISC) {
610 data->rxctl |= AX_RX_CTL_PRO;
611 } else if (net->flags & IFF_ALLMULTI ||
612 netdev_mc_count(net) > AX_MAX_MCAST) {
613 data->rxctl |= AX_RX_CTL_AMALL;
614 } else if (netdev_mc_empty(net)) {
615 /* just broadcast and directed */
616 } else {
617 /* We use the 20 byte dev->data for our 8 byte filter buffer
618 * to avoid allocating memory that is tricky to free later
619 */
620 u32 crc_bits;
621 struct netdev_hw_addr *ha;
622
623 memset(m_filter, 0, AX_MCAST_FLTSIZE);
624
625 netdev_for_each_mc_addr(ha, net) {
626 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
627 *(m_filter + (crc_bits >> 3)) |= (1 << (crc_bits & 7));
628 }
629
630 ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_MULFLTARY,
631 AX_MCAST_FLTSIZE, AX_MCAST_FLTSIZE,
632 m_filter);
633
634 data->rxctl |= AX_RX_CTL_AM;
635 }
636
637 ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_RX_CTL,
638 2, 2, &data->rxctl);
639}
640
641static int
642ax88179_set_features(struct net_device *net, netdev_features_t features)
643{
644 u8 tmp;
645 struct usbnet *dev = netdev_priv(net);
646 netdev_features_t changed = net->features ^ features;
647
648 if (changed & NETIF_F_IP_CSUM) {
649 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
650 tmp ^= AX_TXCOE_TCP | AX_TXCOE_UDP;
651 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
652 }
653
654 if (changed & NETIF_F_IPV6_CSUM) {
655 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
656 tmp ^= AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
657 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
658 }
659
660 if (changed & NETIF_F_RXCSUM) {
661 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
662 tmp ^= AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
663 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
664 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
665 }
666
667 return 0;
668}
669
670static int ax88179_change_mtu(struct net_device *net, int new_mtu)
671{
672 struct usbnet *dev = netdev_priv(net);
673 u16 tmp16;
674
675 if (new_mtu <= 0 || new_mtu > 4088)
676 return -EINVAL;
677
678 net->mtu = new_mtu;
679 dev->hard_mtu = net->mtu + net->hard_header_len;
680
681 if (net->mtu > 1500) {
682 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
683 2, 2, &tmp16);
684 tmp16 |= AX_MEDIUM_JUMBO_EN;
685 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
686 2, 2, &tmp16);
687 } else {
688 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
689 2, 2, &tmp16);
690 tmp16 &= ~AX_MEDIUM_JUMBO_EN;
691 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
692 2, 2, &tmp16);
693 }
694
695 return 0;
696}
697
698static int ax88179_set_mac_addr(struct net_device *net, void *p)
699{
700 struct usbnet *dev = netdev_priv(net);
701 struct sockaddr *addr = p;
702
703 if (netif_running(net))
704 return -EBUSY;
705 if (!is_valid_ether_addr(addr->sa_data))
706 return -EADDRNOTAVAIL;
707
708 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
709
710 /* Set the MAC address */
711 return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
712 ETH_ALEN, net->dev_addr);
713}
714
715static const struct net_device_ops ax88179_netdev_ops = {
716 .ndo_open = usbnet_open,
717 .ndo_stop = usbnet_stop,
718 .ndo_start_xmit = usbnet_start_xmit,
719 .ndo_tx_timeout = usbnet_tx_timeout,
720 .ndo_change_mtu = ax88179_change_mtu,
721 .ndo_set_mac_address = ax88179_set_mac_addr,
722 .ndo_validate_addr = eth_validate_addr,
723 .ndo_do_ioctl = ax88179_ioctl,
724 .ndo_set_rx_mode = ax88179_set_multicast,
725 .ndo_set_features = ax88179_set_features,
726};
727
728static int ax88179_check_eeprom(struct usbnet *dev)
729{
730 u8 i, buf, eeprom[20];
731 u16 csum, delay = HZ / 10;
732 unsigned long jtimeout;
733
734 /* Read EEPROM content */
735 for (i = 0; i < 6; i++) {
736 buf = i;
737 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
738 1, 1, &buf) < 0)
739 return -EINVAL;
740
741 buf = EEP_RD;
742 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
743 1, 1, &buf) < 0)
744 return -EINVAL;
745
746 jtimeout = jiffies + delay;
747 do {
748 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
749 1, 1, &buf);
750
751 if (time_after(jiffies, jtimeout))
752 return -EINVAL;
753
754 } while (buf & EEP_BUSY);
755
756 __ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
757 2, 2, &eeprom[i * 2], 0);
758
759 if ((i == 0) && (eeprom[0] == 0xFF))
760 return -EINVAL;
761 }
762
763 csum = eeprom[6] + eeprom[7] + eeprom[8] + eeprom[9];
764 csum = (csum >> 8) + (csum & 0xff);
765 if ((csum + eeprom[10]) != 0xff)
766 return -EINVAL;
767
768 return 0;
769}
770
771static int ax88179_check_efuse(struct usbnet *dev, u16 *ledmode)
772{
773 u8 i;
774 u8 efuse[64];
775 u16 csum = 0;
776
777 if (ax88179_read_cmd(dev, AX_ACCESS_EFUS, 0, 64, 64, efuse) < 0)
778 return -EINVAL;
779
780 if (*efuse == 0xFF)
781 return -EINVAL;
782
783 for (i = 0; i < 64; i++)
784 csum = csum + efuse[i];
785
786 while (csum > 255)
787 csum = (csum & 0x00FF) + ((csum >> 8) & 0x00FF);
788
789 if (csum != 0xFF)
790 return -EINVAL;
791
792 *ledmode = (efuse[51] << 8) | efuse[52];
793
794 return 0;
795}
796
797static int ax88179_convert_old_led(struct usbnet *dev, u16 *ledvalue)
798{
799 u16 led;
800
801 /* Loaded the old eFuse LED Mode */
802 if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x3C, 1, 2, &led) < 0)
803 return -EINVAL;
804
805 led >>= 8;
806 switch (led) {
807 case 0xFF:
808 led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
809 LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
810 LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
811 break;
812 case 0xFE:
813 led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 | LED_VALID;
814 break;
815 case 0xFD:
816 led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 |
817 LED2_LINK_10 | LED_VALID;
818 break;
819 case 0xFC:
820 led = LED0_ACTIVE | LED1_ACTIVE | LED1_LINK_1000 | LED2_ACTIVE |
821 LED2_LINK_100 | LED2_LINK_10 | LED_VALID;
822 break;
823 default:
824 led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
825 LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
826 LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
827 break;
828 }
829
830 *ledvalue = led;
831
832 return 0;
833}
834
835static int ax88179_led_setting(struct usbnet *dev)
836{
837 u8 ledfd, value = 0;
838 u16 tmp, ledact, ledlink, ledvalue = 0, delay = HZ / 10;
839 unsigned long jtimeout;
840
841 /* Check AX88179 version. UA1 or UA2*/
842 ax88179_read_cmd(dev, AX_ACCESS_MAC, GENERAL_STATUS, 1, 1, &value);
843
844 if (!(value & AX_SECLD)) { /* UA1 */
845 value = AX_GPIO_CTRL_GPIO3EN | AX_GPIO_CTRL_GPIO2EN |
846 AX_GPIO_CTRL_GPIO1EN;
847 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_GPIO_CTRL,
848 1, 1, &value) < 0)
849 return -EINVAL;
850 }
851
852 /* Check EEPROM */
853 if (!ax88179_check_eeprom(dev)) {
854 value = 0x42;
855 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
856 1, 1, &value) < 0)
857 return -EINVAL;
858
859 value = EEP_RD;
860 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
861 1, 1, &value) < 0)
862 return -EINVAL;
863
864 jtimeout = jiffies + delay;
865 do {
866 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
867 1, 1, &value);
868
869 if (time_after(jiffies, jtimeout))
870 return -EINVAL;
871
872 } while (value & EEP_BUSY);
873
874 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_HIGH,
875 1, 1, &value);
876 ledvalue = (value << 8);
877
878 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
879 1, 1, &value);
880 ledvalue |= value;
881
882 /* load internal ROM for defaule setting */
883 if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
884 ax88179_convert_old_led(dev, &ledvalue);
885
886 } else if (!ax88179_check_efuse(dev, &ledvalue)) {
887 if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
888 ax88179_convert_old_led(dev, &ledvalue);
889 } else {
890 ax88179_convert_old_led(dev, &ledvalue);
891 }
892
893 tmp = GMII_PHY_PGSEL_EXT;
894 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
895 GMII_PHY_PAGE_SELECT, 2, &tmp);
896
897 tmp = 0x2c;
898 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
899 GMII_PHYPAGE, 2, &tmp);
900
901 ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
902 GMII_LED_ACT, 2, &ledact);
903
904 ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
905 GMII_LED_LINK, 2, &ledlink);
906
907 ledact &= GMII_LED_ACTIVE_MASK;
908 ledlink &= GMII_LED_LINK_MASK;
909
910 if (ledvalue & LED0_ACTIVE)
911 ledact |= GMII_LED0_ACTIVE;
912
913 if (ledvalue & LED1_ACTIVE)
914 ledact |= GMII_LED1_ACTIVE;
915
916 if (ledvalue & LED2_ACTIVE)
917 ledact |= GMII_LED2_ACTIVE;
918
919 if (ledvalue & LED0_LINK_10)
920 ledlink |= GMII_LED0_LINK_10;
921
922 if (ledvalue & LED1_LINK_10)
923 ledlink |= GMII_LED1_LINK_10;
924
925 if (ledvalue & LED2_LINK_10)
926 ledlink |= GMII_LED2_LINK_10;
927
928 if (ledvalue & LED0_LINK_100)
929 ledlink |= GMII_LED0_LINK_100;
930
931 if (ledvalue & LED1_LINK_100)
932 ledlink |= GMII_LED1_LINK_100;
933
934 if (ledvalue & LED2_LINK_100)
935 ledlink |= GMII_LED2_LINK_100;
936
937 if (ledvalue & LED0_LINK_1000)
938 ledlink |= GMII_LED0_LINK_1000;
939
940 if (ledvalue & LED1_LINK_1000)
941 ledlink |= GMII_LED1_LINK_1000;
942
943 if (ledvalue & LED2_LINK_1000)
944 ledlink |= GMII_LED2_LINK_1000;
945
946 tmp = ledact;
947 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
948 GMII_LED_ACT, 2, &tmp);
949
950 tmp = ledlink;
951 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
952 GMII_LED_LINK, 2, &tmp);
953
954 tmp = GMII_PHY_PGSEL_PAGE0;
955 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
956 GMII_PHY_PAGE_SELECT, 2, &tmp);
957
958 /* LED full duplex setting */
959 ledfd = 0;
960 if (ledvalue & LED0_FD)
961 ledfd |= 0x01;
962 else if ((ledvalue & LED0_USB3_MASK) == 0)
963 ledfd |= 0x02;
964
965 if (ledvalue & LED1_FD)
966 ledfd |= 0x04;
967 else if ((ledvalue & LED1_USB3_MASK) == 0)
968 ledfd |= 0x08;
969
970 if (ledvalue & LED2_FD)
971 ledfd |= 0x10;
972 else if ((ledvalue & LED2_USB3_MASK) == 0)
973 ledfd |= 0x20;
974
975 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_LEDCTRL, 1, 1, &ledfd);
976
977 return 0;
978}
979
980static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
981{
982 u8 buf[5];
983 u16 *tmp16;
984 u8 *tmp;
985 struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
986
987 usbnet_get_endpoints(dev, intf);
988
989 tmp16 = (u16 *)buf;
990 tmp = (u8 *)buf;
991
992 memset(ax179_data, 0, sizeof(*ax179_data));
993
994 /* Power up ethernet PHY */
995 *tmp16 = 0;
996 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
997 *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
998 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
999 msleep(200);
1000
1001 *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
1002 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
1003 msleep(100);
1004
1005 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
1006 ETH_ALEN, dev->net->dev_addr);
1007 memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
1008
1009 /* RX bulk configuration */
1010 memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
1011 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
1012
1013 dev->rx_urb_size = 1024 * 20;
1014
1015 *tmp = 0x34;
1016 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
1017
1018 *tmp = 0x52;
1019 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
1020 1, 1, tmp);
1021
1022 dev->net->netdev_ops = &ax88179_netdev_ops;
1023 dev->net->ethtool_ops = &ax88179_ethtool_ops;
1024 dev->net->needed_headroom = 8;
1025
1026 /* Initialize MII structure */
1027 dev->mii.dev = dev->net;
1028 dev->mii.mdio_read = ax88179_mdio_read;
1029 dev->mii.mdio_write = ax88179_mdio_write;
1030 dev->mii.phy_id_mask = 0xff;
1031 dev->mii.reg_num_mask = 0xff;
1032 dev->mii.phy_id = 0x03;
1033 dev->mii.supports_gmii = 1;
1034
1035 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1036 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1037
1038 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1039 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1040
1041 /* Enable checksum offload */
1042 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
1043 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
1044 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
1045
1046 *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
1047 AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
1048 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
1049
1050 /* Configure RX control register => start operation */
1051 *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
1052 AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
1053 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
1054
1055 *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
1056 AX_MONITOR_MODE_RWMP;
1057 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
1058
1059 /* Configure default medium type => giga */
1060 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1061 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
1062 AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
1063 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1064 2, 2, tmp16);
1065
1066 ax88179_led_setting(dev);
1067
1068 /* Restart autoneg */
1069 mii_nway_restart(&dev->mii);
1070
1071 netif_carrier_off(dev->net);
1072
1073 return 0;
1074}
1075
1076static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
1077{
1078 u16 tmp16;
1079
1080 /* Configure RX control register => stop operation */
1081 tmp16 = AX_RX_CTL_STOP;
1082 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
1083
1084 tmp16 = 0;
1085 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp16);
1086
1087 /* Power down ethernet PHY */
1088 tmp16 = 0;
1089 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
1090}
1091
1092static void
1093ax88179_rx_checksum(struct sk_buff *skb, u32 *pkt_hdr)
1094{
1095 skb->ip_summed = CHECKSUM_NONE;
1096
1097 /* checksum error bit is set */
1098 if ((*pkt_hdr & AX_RXHDR_L3CSUM_ERR) ||
1099 (*pkt_hdr & AX_RXHDR_L4CSUM_ERR))
1100 return;
1101
1102 /* It must be a TCP or UDP packet with a valid checksum */
1103 if (((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_TCP) ||
1104 ((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_UDP))
1105 skb->ip_summed = CHECKSUM_UNNECESSARY;
1106}
1107
1108static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1109{
1110 struct sk_buff *ax_skb;
1111 int pkt_cnt;
1112 u32 rx_hdr;
1113 u16 hdr_off;
1114 u32 *pkt_hdr;
1115
1116 skb_trim(skb, skb->len - 4);
1117 memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
1118 le32_to_cpus(&rx_hdr);
1119
1120 pkt_cnt = (u16)rx_hdr;
1121 hdr_off = (u16)(rx_hdr >> 16);
1122 pkt_hdr = (u32 *)(skb->data + hdr_off);
1123
1124 while (pkt_cnt--) {
1125 u16 pkt_len;
1126
1127 le32_to_cpus(pkt_hdr);
1128 pkt_len = (*pkt_hdr >> 16) & 0x1fff;
1129
1130 /* Check CRC or runt packet */
1131 if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
1132 (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
1133 skb_pull(skb, (pkt_len + 7) & 0xFFF8);
1134 pkt_hdr++;
1135 continue;
1136 }
1137
1138 if (pkt_cnt == 0) {
1139 /* Skip IP alignment psudo header */
1140 skb_pull(skb, 2);
1141 skb->len = pkt_len;
1142 skb_set_tail_pointer(skb, pkt_len);
1143 skb->truesize = pkt_len + sizeof(struct sk_buff);
1144 ax88179_rx_checksum(skb, pkt_hdr);
1145 return 1;
1146 }
1147
1148 ax_skb = skb_clone(skb, GFP_ATOMIC);
1149 if (ax_skb) {
1150 ax_skb->len = pkt_len;
1151 ax_skb->data = skb->data + 2;
1152 skb_set_tail_pointer(ax_skb, pkt_len);
1153 ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
1154 ax88179_rx_checksum(ax_skb, pkt_hdr);
1155 usbnet_skb_return(dev, ax_skb);
1156 } else {
1157 return 0;
1158 }
1159
1160 skb_pull(skb, (pkt_len + 7) & 0xFFF8);
1161 pkt_hdr++;
1162 }
1163 return 1;
1164}
1165
1166static struct sk_buff *
1167ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1168{
1169 u32 tx_hdr1, tx_hdr2;
1170 int frame_size = dev->maxpacket;
1171 int mss = skb_shinfo(skb)->gso_size;
1172 int headroom;
1173 int tailroom;
1174
1175 tx_hdr1 = skb->len;
1176 tx_hdr2 = mss;
1177 if (((skb->len + 8) % frame_size) == 0)
1178 tx_hdr2 |= 0x80008000; /* Enable padding */
1179
1180 skb_linearize(skb);
1181 headroom = skb_headroom(skb);
1182 tailroom = skb_tailroom(skb);
1183
1184 if (!skb_header_cloned(skb) &&
1185 !skb_cloned(skb) &&
1186 (headroom + tailroom) >= 8) {
1187 if (headroom < 8) {
1188 skb->data = memmove(skb->head + 8, skb->data, skb->len);
1189 skb_set_tail_pointer(skb, skb->len);
1190 }
1191 } else {
1192 struct sk_buff *skb2;
1193
1194 skb2 = skb_copy_expand(skb, 8, 0, flags);
1195 dev_kfree_skb_any(skb);
1196 skb = skb2;
1197 if (!skb)
1198 return NULL;
1199 }
1200
1201 skb_push(skb, 4);
1202 cpu_to_le32s(&tx_hdr2);
1203 skb_copy_to_linear_data(skb, &tx_hdr2, 4);
1204
1205 skb_push(skb, 4);
1206 cpu_to_le32s(&tx_hdr1);
1207 skb_copy_to_linear_data(skb, &tx_hdr1, 4);
1208
1209 return skb;
1210}
1211
1212static int ax88179_link_reset(struct usbnet *dev)
1213{
1214 struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
1215 u8 tmp[5], link_sts;
1216 u16 mode, tmp16, delay = HZ / 10;
1217 u32 tmp32 = 0x40000000;
1218 unsigned long jtimeout;
1219
1220 jtimeout = jiffies + delay;
1221 while (tmp32 & 0x40000000) {
1222 mode = 0;
1223 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &mode);
1224 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2,
1225 &ax179_data->rxctl);
1226
1227 /*link up, check the usb device control TX FIFO full or empty*/
1228 ax88179_read_cmd(dev, 0x81, 0x8c, 0, 4, &tmp32);
1229
1230 if (time_after(jiffies, jtimeout))
1231 return 0;
1232 }
1233
1234 mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1235 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
1236
1237 ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
1238 1, 1, &link_sts);
1239
1240 ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
1241 GMII_PHY_PHYSR, 2, &tmp16);
1242
1243 if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
1244 return 0;
1245 } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
1246 mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
1247 if (dev->net->mtu > 1500)
1248 mode |= AX_MEDIUM_JUMBO_EN;
1249
1250 if (link_sts & AX_USB_SS)
1251 memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
1252 else if (link_sts & AX_USB_HS)
1253 memcpy(tmp, &AX88179_BULKIN_SIZE[1], 5);
1254 else
1255 memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
1256 } else if (GMII_PHY_PHYSR_100 == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
1257 mode |= AX_MEDIUM_PS;
1258
1259 if (link_sts & (AX_USB_SS | AX_USB_HS))
1260 memcpy(tmp, &AX88179_BULKIN_SIZE[2], 5);
1261 else
1262 memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
1263 } else {
1264 memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
1265 }
1266
1267 /* RX bulk configuration */
1268 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
1269
1270 dev->rx_urb_size = (1024 * (tmp[3] + 2));
1271
1272 if (tmp16 & GMII_PHY_PHYSR_FULL)
1273 mode |= AX_MEDIUM_FULL_DUPLEX;
1274 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1275 2, 2, &mode);
1276
1277 netif_carrier_on(dev->net);
1278
1279 return 0;
1280}
1281
1282static int ax88179_reset(struct usbnet *dev)
1283{
1284 u8 buf[5];
1285 u16 *tmp16;
1286 u8 *tmp;
1287
1288 tmp16 = (u16 *)buf;
1289 tmp = (u8 *)buf;
1290
1291 /* Power up ethernet PHY */
1292 *tmp16 = 0;
1293 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
1294
1295 *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
1296 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
1297 msleep(200);
1298
1299 *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
1300 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
1301 msleep(100);
1302
1303 /* Ethernet PHY Auto Detach*/
1304 ax88179_auto_detach(dev, 0);
1305
1306 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
1307 dev->net->dev_addr);
1308 memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
1309
1310 /* RX bulk configuration */
1311 memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
1312 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
1313
1314 dev->rx_urb_size = 1024 * 20;
1315
1316 *tmp = 0x34;
1317 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
1318
1319 *tmp = 0x52;
1320 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
1321 1, 1, tmp);
1322
1323 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1324 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1325
1326 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1327 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1328
1329 /* Enable checksum offload */
1330 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
1331 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
1332 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
1333
1334 *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
1335 AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
1336 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
1337
1338 /* Configure RX control register => start operation */
1339 *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
1340 AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
1341 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
1342
1343 *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
1344 AX_MONITOR_MODE_RWMP;
1345 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
1346
1347 /* Configure default medium type => giga */
1348 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1349 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
1350 AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
1351 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1352 2, 2, tmp16);
1353
1354 ax88179_led_setting(dev);
1355
1356 /* Restart autoneg */
1357 mii_nway_restart(&dev->mii);
1358
1359 netif_carrier_off(dev->net);
1360
1361 return 0;
1362}
1363
1364static int ax88179_stop(struct usbnet *dev)
1365{
1366 u16 tmp16;
1367
1368 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1369 2, 2, &tmp16);
1370 tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
1371 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1372 2, 2, &tmp16);
1373
1374 return 0;
1375}
1376
1377static const struct driver_info ax88179_info = {
1378 .description = "ASIX AX88179 USB 3.0 Gigibit Ethernet",
1379 .bind = ax88179_bind,
1380 .unbind = ax88179_unbind,
1381 .status = ax88179_status,
1382 .link_reset = ax88179_link_reset,
1383 .reset = ax88179_reset,
1384 .stop = ax88179_stop,
1385 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1386 .rx_fixup = ax88179_rx_fixup,
1387 .tx_fixup = ax88179_tx_fixup,
1388};
1389
1390static const struct driver_info ax88178a_info = {
1391 .description = "ASIX AX88178A USB 2.0 Gigibit Ethernet",
1392 .bind = ax88179_bind,
1393 .unbind = ax88179_unbind,
1394 .status = ax88179_status,
1395 .link_reset = ax88179_link_reset,
1396 .reset = ax88179_reset,
1397 .stop = ax88179_stop,
1398 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1399 .rx_fixup = ax88179_rx_fixup,
1400 .tx_fixup = ax88179_tx_fixup,
1401};
1402
1403static const struct driver_info sitecom_info = {
1404 .description = "Sitecom USB 3.0 to Gigabit Adapter",
1405 .bind = ax88179_bind,
1406 .unbind = ax88179_unbind,
1407 .status = ax88179_status,
1408 .link_reset = ax88179_link_reset,
1409 .reset = ax88179_reset,
1410 .stop = ax88179_stop,
1411 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1412 .rx_fixup = ax88179_rx_fixup,
1413 .tx_fixup = ax88179_tx_fixup,
1414};
1415
1416static const struct usb_device_id products[] = {
1417{
1418 /* ASIX AX88179 10/100/1000 */
1419 USB_DEVICE(0x0b95, 0x1790),
1420 .driver_info = (unsigned long)&ax88179_info,
1421}, {
1422 /* ASIX AX88178A 10/100/1000 */
1423 USB_DEVICE(0x0b95, 0x178a),
1424 .driver_info = (unsigned long)&ax88178a_info,
1425}, {
1426 /* Sitecom USB 3.0 to Gigabit Adapter */
1427 USB_DEVICE(0x0df6, 0x0072),
1428 .driver_info = (unsigned long) &sitecom_info,
1429},
1430 { },
1431};
1432MODULE_DEVICE_TABLE(usb, products);
1433
1434static struct usb_driver ax88179_178a_driver = {
1435 .name = "ax88179_178a",
1436 .id_table = products,
1437 .probe = usbnet_probe,
1438 .suspend = ax88179_suspend,
1439 .resume = ax88179_resume,
1440 .disconnect = usbnet_disconnect,
1441 .supports_autosuspend = 1,
1442 .disable_hub_initiated_lpm = 1,
1443};
1444
1445module_usb_driver(ax88179_178a_driver);
1446
1447MODULE_DESCRIPTION("ASIX AX88179/178A based USB 3.0/2.0 Gigabit Ethernet Devices");
1448MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4a8c25a22294..61b74a2b89ac 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
1213 .driver_info = (unsigned long) &wwan_info, 1213 .driver_info = (unsigned long) &wwan_info,
1214 }, 1214 },
1215 1215
1216 /* tag Huawei devices as wwan */
1217 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
1218 USB_CLASS_COMM,
1219 USB_CDC_SUBCLASS_NCM,
1220 USB_CDC_PROTO_NONE),
1221 .driver_info = (unsigned long)&wwan_info,
1222 },
1223
1216 /* Huawei NCM devices disguised as vendor specific */ 1224 /* Huawei NCM devices disguised as vendor specific */
1217 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16), 1225 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
1218 .driver_info = (unsigned long)&wwan_info, 1226 .driver_info = (unsigned long)&wwan_info,
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 5f845beeb18b..050ca4a4850d 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -27,7 +27,7 @@
27#define WME_MAX_BA WME_BA_BMP_SIZE 27#define WME_MAX_BA WME_BA_BMP_SIZE
28#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) 28#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
29 29
30#define ATH_RSSI_DUMMY_MARKER 0x127 30#define ATH_RSSI_DUMMY_MARKER 127
31#define ATH_RSSI_LPF_LEN 10 31#define ATH_RSSI_LPF_LEN 10
32#define RSSI_LPF_THRESHOLD -20 32#define RSSI_LPF_THRESHOLD -20
33#define ATH_RSSI_EP_MULTIPLIER (1<<7) 33#define ATH_RSSI_EP_MULTIPLIER (1<<7)
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 96bfb18078fa..d3b099d7898b 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -22,6 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
25#include <linux/leds.h> 26#include <linux/leds.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <net/mac80211.h> 28#include <net/mac80211.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3ad1fd05c5e7..bd8251c1c749 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1067 1067
1068 last_rssi = priv->rx.last_rssi; 1068 last_rssi = priv->rx.last_rssi;
1069 1069
1070 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 1070 if (ieee80211_is_beacon(hdr->frame_control) &&
1071 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, 1071 !is_zero_ether_addr(common->curbssid) &&
1072 ATH_RSSI_EP_MULTIPLIER); 1072 ether_addr_equal(hdr->addr3, common->curbssid)) {
1073 s8 rssi = rxbuf->rxstatus.rs_rssi;
1073 1074
1074 if (rxbuf->rxstatus.rs_rssi < 0) 1075 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
1075 rxbuf->rxstatus.rs_rssi = 0; 1076 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
1076 1077
1077 if (ieee80211_is_beacon(fc)) 1078 if (rssi < 0)
1078 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; 1079 rssi = 0;
1080
1081 priv->ah->stats.avgbrssi = rssi;
1082 }
1079 1083
1080 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); 1084 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
1081 rx_status->band = hw->conf.channel->band; 1085 rx_status->band = hw->conf.channel->band;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2a2ae403e0e5..07e25260c31d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1463 reset_type = ATH9K_RESET_POWER_ON; 1463 reset_type = ATH9K_RESET_POWER_ON;
1464 else 1464 else
1465 reset_type = ATH9K_RESET_COLD; 1465 reset_type = ATH9K_RESET_COLD;
1466 } 1466 } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
1467 (REG_READ(ah, AR_CR) & AR_CR_RXE))
1468 reset_type = ATH9K_RESET_COLD;
1467 1469
1468 if (!ath9k_hw_set_reset_reg(ah, reset_type)) 1470 if (!ath9k_hw_set_reset_reg(ah, reset_type))
1469 return false; 1471 return false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 9a0f45ec9e01..10f01793d7a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
349TRACE_EVENT(iwlwifi_dev_hcmd, 349TRACE_EVENT(iwlwifi_dev_hcmd,
350 TP_PROTO(const struct device *dev, 350 TP_PROTO(const struct device *dev,
351 struct iwl_host_cmd *cmd, u16 total_size, 351 struct iwl_host_cmd *cmd, u16 total_size,
352 const void *hdr, size_t hdr_len), 352 struct iwl_cmd_header *hdr),
353 TP_ARGS(dev, cmd, total_size, hdr, hdr_len), 353 TP_ARGS(dev, cmd, total_size, hdr),
354 TP_STRUCT__entry( 354 TP_STRUCT__entry(
355 DEV_ENTRY 355 DEV_ENTRY
356 __dynamic_array(u8, hcmd, total_size) 356 __dynamic_array(u8, hcmd, total_size)
357 __field(u32, flags) 357 __field(u32, flags)
358 ), 358 ),
359 TP_fast_assign( 359 TP_fast_assign(
360 int i, offset = hdr_len; 360 int i, offset = sizeof(*hdr);
361 361
362 DEV_ASSIGN; 362 DEV_ASSIGN;
363 __entry->flags = cmd->flags; 363 __entry->flags = cmd->flags;
364 memcpy(__get_dynamic_array(hcmd), hdr, hdr_len); 364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
365 365
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
367 if (!cmd->len[i]) 367 if (!cmd->len[i])
368 continue; 368 continue;
369 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
370 continue;
371 memcpy((u8 *)__get_dynamic_array(hcmd) + offset, 369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
372 cmd->data[i], cmd->len[i]); 370 cmd->data[i], cmd->len[i]);
373 offset += cmd->len[i]; 371 offset += cmd->len[i];
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 14fc8d39fc28..3392011a8768 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db {
136 u8 data[]; 136 u8 data[];
137} __packed; 137} __packed;
138 138
139#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
140static inline void iwl_phy_db_test_pic(__le32 pic)
141{
142 WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
143}
144
145struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) 139struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
146{ 140{
147 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), 141 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
@@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
260 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num; 254 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
261 } 255 }
262 256
263 /* Test PIC */
264 if (type != IWL_PHY_DB_CFG)
265 iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
266 (size / sizeof(__le32)) - 1));
267
268 IWL_DEBUG_INFO(phy_db->trans, 257 IWL_DEBUG_INFO(phy_db->trans,
269 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", 258 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
270 __func__, __LINE__, type, size); 259 __func__, __LINE__, type, size);
@@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
372 *size = entry->size; 361 *size = entry->size;
373 } 362 }
374 363
375 /* Test PIC */
376 if (type != IWL_PHY_DB_CFG)
377 iwl_phy_db_test_pic(*(((__le32 *)*data) +
378 (*size / sizeof(__le32)) - 1));
379
380 IWL_DEBUG_INFO(phy_db->trans, 364 IWL_DEBUG_INFO(phy_db->trans,
381 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", 365 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
382 __func__, __LINE__, type, *size); 366 __func__, __LINE__, type, *size);
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index c64d864799cd..994c8c263dc0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -61,6 +61,7 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#include <linux/etherdevice.h>
64#include <net/cfg80211.h> 65#include <net/cfg80211.h>
65#include <net/ipv6.h> 66#include <net/ipv6.h>
66#include "iwl-modparams.h" 67#include "iwl-modparams.h"
@@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
192 sizeof(wkc), &wkc); 193 sizeof(wkc), &wkc);
193 data->error = ret != 0; 194 data->error = ret != 0;
194 195
196 mvm->ptk_ivlen = key->iv_len;
197 mvm->ptk_icvlen = key->icv_len;
198 mvm->gtk_ivlen = key->iv_len;
199 mvm->gtk_icvlen = key->icv_len;
200
195 /* don't upload key again */ 201 /* don't upload key again */
196 goto out_unlock; 202 goto out_unlock;
197 } 203 }
@@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
304 */ 310 */
305 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
306 key->hw_key_idx = 0; 312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len;
307 } else { 315 } else {
308 data->gtk_key_idx++; 316 data->gtk_key_idx++;
309 key->hw_key_idx = data->gtk_key_idx; 317 key->hw_key_idx = data->gtk_key_idx;
318 mvm->gtk_ivlen = key->iv_len;
319 mvm->gtk_icvlen = key->icv_len;
310 } 320 }
311 321
312 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); 322 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
@@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
649 /* We reprogram keys and shouldn't allocate new key indices */ 659 /* We reprogram keys and shouldn't allocate new key indices */
650 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 660 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
651 661
662 mvm->ptk_ivlen = 0;
663 mvm->ptk_icvlen = 0;
664 mvm->ptk_ivlen = 0;
665 mvm->ptk_icvlen = 0;
666
652 /* 667 /*
653 * The D3 firmware still hardcodes the AP station ID for the 668 * The D3 firmware still hardcodes the AP station ID for the
654 * BSS we're associated with as 0. As a result, we have to move 669 * BSS we're associated with as 0. As a result, we have to move
@@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
783 struct iwl_wowlan_status *status; 798 struct iwl_wowlan_status *status;
784 u32 reasons; 799 u32 reasons;
785 int ret, len; 800 int ret, len;
786 bool pkt8023 = false;
787 struct sk_buff *pkt = NULL; 801 struct sk_buff *pkt = NULL;
788 802
789 iwl_trans_read_mem_bytes(mvm->trans, base, 803 iwl_trans_read_mem_bytes(mvm->trans, base,
@@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
824 status = (void *)cmd.resp_pkt->data; 838 status = (void *)cmd.resp_pkt->data;
825 839
826 if (len - sizeof(struct iwl_cmd_header) != 840 if (len - sizeof(struct iwl_cmd_header) !=
827 sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) { 841 sizeof(*status) +
842 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
828 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 843 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
829 goto out; 844 goto out;
830 } 845 }
@@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
836 goto report; 851 goto report;
837 } 852 }
838 853
839 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) { 854 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
840 wakeup.magic_pkt = true; 855 wakeup.magic_pkt = true;
841 pkt8023 = true;
842 }
843 856
844 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) { 857 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
845 wakeup.pattern_idx = 858 wakeup.pattern_idx =
846 le16_to_cpu(status->pattern_number); 859 le16_to_cpu(status->pattern_number);
847 pkt8023 = true;
848 }
849 860
850 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 861 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
851 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 862 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
852 wakeup.disconnect = true; 863 wakeup.disconnect = true;
853 864
854 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) { 865 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
855 wakeup.gtk_rekey_failure = true; 866 wakeup.gtk_rekey_failure = true;
856 pkt8023 = true;
857 }
858 867
859 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) { 868 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
860 wakeup.rfkill_release = true; 869 wakeup.rfkill_release = true;
861 pkt8023 = true;
862 }
863 870
864 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) { 871 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
865 wakeup.eap_identity_req = true; 872 wakeup.eap_identity_req = true;
866 pkt8023 = true;
867 }
868 873
869 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) { 874 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
870 wakeup.four_way_handshake = true; 875 wakeup.four_way_handshake = true;
871 pkt8023 = true;
872 }
873 876
874 if (status->wake_packet_bufsize) { 877 if (status->wake_packet_bufsize) {
875 u32 pktsize = le32_to_cpu(status->wake_packet_bufsize); 878 int pktsize = le32_to_cpu(status->wake_packet_bufsize);
876 u32 pktlen = le32_to_cpu(status->wake_packet_length); 879 int pktlen = le32_to_cpu(status->wake_packet_length);
880 const u8 *pktdata = status->wake_packet;
881 struct ieee80211_hdr *hdr = (void *)pktdata;
882 int truncated = pktlen - pktsize;
883
884 /* this would be a firmware bug */
885 if (WARN_ON_ONCE(truncated < 0))
886 truncated = 0;
887
888 if (ieee80211_is_data(hdr->frame_control)) {
889 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
890 int ivlen = 0, icvlen = 4; /* also FCS */
877 891
878 if (pkt8023) {
879 pkt = alloc_skb(pktsize, GFP_KERNEL); 892 pkt = alloc_skb(pktsize, GFP_KERNEL);
880 if (!pkt) 893 if (!pkt)
881 goto report; 894 goto report;
882 memcpy(skb_put(pkt, pktsize), status->wake_packet, 895
883 pktsize); 896 memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
897 pktdata += hdrlen;
898 pktsize -= hdrlen;
899
900 if (ieee80211_has_protected(hdr->frame_control)) {
901 if (is_multicast_ether_addr(hdr->addr1)) {
902 ivlen = mvm->gtk_ivlen;
903 icvlen += mvm->gtk_icvlen;
904 } else {
905 ivlen = mvm->ptk_ivlen;
906 icvlen += mvm->ptk_icvlen;
907 }
908 }
909
910 /* if truncated, FCS/ICV is (partially) gone */
911 if (truncated >= icvlen) {
912 icvlen = 0;
913 truncated -= icvlen;
914 } else {
915 icvlen -= truncated;
916 truncated = 0;
917 }
918
919 pktsize -= ivlen + icvlen;
920 pktdata += ivlen;
921
922 memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
923
884 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 924 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
885 goto report; 925 goto report;
886 wakeup.packet = pkt->data; 926 wakeup.packet = pkt->data;
887 wakeup.packet_present_len = pkt->len; 927 wakeup.packet_present_len = pkt->len;
888 wakeup.packet_len = pkt->len - (pktlen - pktsize); 928 wakeup.packet_len = pkt->len - truncated;
889 wakeup.packet_80211 = false; 929 wakeup.packet_80211 = false;
890 } else { 930 } else {
931 int fcslen = 4;
932
933 if (truncated >= 4) {
934 truncated -= 4;
935 fcslen = 0;
936 } else {
937 fcslen -= truncated;
938 truncated = 0;
939 }
940 pktsize -= fcslen;
891 wakeup.packet = status->wake_packet; 941 wakeup.packet = status->wake_packet;
892 wakeup.packet_present_len = pktsize; 942 wakeup.packet_present_len = pktsize;
893 wakeup.packet_len = pktlen; 943 wakeup.packet_len = pktlen - truncated;
894 wakeup.packet_80211 = true; 944 wakeup.packet_80211 = true;
895 } 945 }
896 } 946 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e8264e11b12d..7e169b085afe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
557 return ret; 557 return ret;
558} 558}
559 559
560static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 560static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
561 struct ieee80211_vif *vif) 561 struct ieee80211_vif *vif)
562{ 562{
563 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
564 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
565 u32 tfd_msk = 0, ac; 563 u32 tfd_msk = 0, ac;
566 564
567 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 565 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
594 */ 592 */
595 flush_work(&mvm->sta_drained_wk); 593 flush_work(&mvm->sta_drained_wk);
596 } 594 }
595}
596
597static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
598 struct ieee80211_vif *vif)
599{
600 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
601 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
602
603 iwl_mvm_prepare_mac_removal(mvm, vif);
597 604
598 mutex_lock(&mvm->mutex); 605 mutex_lock(&mvm->mutex);
599 606
600 /* 607 /*
601 * For AP/GO interface, the tear down of the resources allocated to the 608 * For AP/GO interface, the tear down of the resources allocated to the
602 * interface should be handled as part of the bss_info_changed flow. 609 * interface is be handled as part of the stop_ap flow.
603 */ 610 */
604 if (vif->type == NL80211_IFTYPE_AP) { 611 if (vif->type == NL80211_IFTYPE_AP) {
605 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 612 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
@@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
763 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 770 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
764 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 771 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
765 772
773 iwl_mvm_prepare_mac_removal(mvm, vif);
774
766 mutex_lock(&mvm->mutex); 775 mutex_lock(&mvm->mutex);
767 776
768 mvmvif->ap_active = false; 777 mvmvif->ap_active = false;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 4e339ccfa800..537711b10478 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -327,6 +327,10 @@ struct iwl_mvm {
327 struct led_classdev led; 327 struct led_classdev led;
328 328
329 struct ieee80211_vif *p2p_device_vif; 329 struct ieee80211_vif *p2p_device_vif;
330
331#ifdef CONFIG_PM_SLEEP
332 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
333#endif
330}; 334};
331 335
332/* Extract MVM priv from op_mode and _hw */ 336/* Extract MVM priv from op_mode and _hw */
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index aa2a39a637dd..3d62e8055352 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -182,6 +182,15 @@ struct iwl_queue {
182#define TFD_TX_CMD_SLOTS 256 182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32 183#define TFD_CMD_SLOTS 32
184 184
185/*
186 * The FH will write back to the first TB only, so we need
187 * to copy some data into the buffer regardless of whether
188 * it should be mapped or not. This indicates how much to
189 * copy, even for HCMDs it must be big enough to fit the
190 * DRAM scratch from the TX cmd, at least 16 bytes.
191 */
192#define IWL_HCMD_MIN_COPY_SIZE 16
193
185struct iwl_pcie_txq_entry { 194struct iwl_pcie_txq_entry {
186 struct iwl_device_cmd *cmd; 195 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd; 196 struct iwl_device_cmd *copy_cmd;
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8e9e3212fe78..8b625a7f5685 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1152,10 +1152,12 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1152 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1153 dma_addr_t phys_addr;
1154 int idx; 1154 int idx;
1155 u16 copy_size, cmd_size; 1155 u16 copy_size, cmd_size, dma_size;
1156 bool had_nocopy = false; 1156 bool had_nocopy = false;
1157 int i; 1157 int i;
1158 u32 cmd_pos; 1158 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS];
1159 1161
1160 copy_size = sizeof(out_cmd->hdr); 1162 copy_size = sizeof(out_cmd->hdr);
1161 cmd_size = sizeof(out_cmd->hdr); 1163 cmd_size = sizeof(out_cmd->hdr);
@@ -1164,8 +1166,23 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1164 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
1165 1167
1166 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1169 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i];
1171
1167 if (!cmd->len[i]) 1172 if (!cmd->len[i])
1168 continue; 1173 continue;
1174
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1178
1179 if (copy > cmdlen[i])
1180 copy = cmdlen[i];
1181 cmdlen[i] -= copy;
1182 cmddata[i] += copy;
1183 copy_size += copy;
1184 }
1185
1169 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1186 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1170 had_nocopy = true; 1187 had_nocopy = true;
1171 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1188 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1185,7 +1202,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1185 goto free_dup_buf; 1202 goto free_dup_buf;
1186 } 1203 }
1187 1204
1188 dup_buf = kmemdup(cmd->data[i], cmd->len[i], 1205 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1189 GFP_ATOMIC); 1206 GFP_ATOMIC);
1190 if (!dup_buf) 1207 if (!dup_buf)
1191 return -ENOMEM; 1208 return -ENOMEM;
@@ -1195,7 +1212,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1195 idx = -EINVAL; 1212 idx = -EINVAL;
1196 goto free_dup_buf; 1213 goto free_dup_buf;
1197 } 1214 }
1198 copy_size += cmd->len[i]; 1215 copy_size += cmdlen[i];
1199 } 1216 }
1200 cmd_size += cmd->len[i]; 1217 cmd_size += cmd->len[i];
1201 } 1218 }
@@ -1242,14 +1259,31 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1242 1259
1243 /* and copy the data that needs to be copied */ 1260 /* and copy the data that needs to be copied */
1244 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1261 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr);
1245 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1246 if (!cmd->len[i]) 1264 int copy = 0;
1265
1266 if (!cmd->len)
1247 continue; 1267 continue;
1248 if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1268
1249 IWL_HCMD_DFL_DUP)) 1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1250 break; 1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1251 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); 1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1252 cmd_pos += cmd->len[i]; 1272
1273 if (copy > cmd->len[i])
1274 copy = cmd->len[i];
1275 }
1276
1277 /* copy everything if not nocopy/dup */
1278 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1279 IWL_HCMD_DFL_DUP)))
1280 copy = cmd->len[i];
1281
1282 if (copy) {
1283 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1284 cmd_pos += copy;
1285 copy_size += copy;
1286 }
1253 } 1287 }
1254 1288
1255 WARN_ON_ONCE(txq->entries[idx].copy_cmd); 1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
@@ -1275,7 +1309,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1275 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1276 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1277 1311
1278 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 1312 /*
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
1314 * still map at least that many bytes for the hardware to write back to.
1315 * We have enough space, so that's not a problem.
1316 */
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
1318
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
1279 DMA_BIDIRECTIONAL); 1320 DMA_BIDIRECTIONAL);
1280 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1281 idx = -ENOMEM; 1322 idx = -ENOMEM;
@@ -1283,14 +1324,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1283 } 1324 }
1284 1325
1285 dma_unmap_addr_set(out_meta, mapping, phys_addr); 1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1286 dma_unmap_len_set(out_meta, len, copy_size); 1327 dma_unmap_len_set(out_meta, len, dma_size);
1287 1328
1288 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); 1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1289 1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */
1290 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1291 const void *data = cmd->data[i]; 1333 const void *data = cmddata[i];
1292 1334
1293 if (!cmd->len[i]) 1335 if (!cmdlen[i])
1294 continue; 1336 continue;
1295 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1337 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1296 IWL_HCMD_DFL_DUP))) 1338 IWL_HCMD_DFL_DUP)))
@@ -1298,7 +1340,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1298 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1299 data = dup_buf; 1341 data = dup_buf;
1300 phys_addr = dma_map_single(trans->dev, (void *)data, 1342 phys_addr = dma_map_single(trans->dev, (void *)data,
1301 cmd->len[i], DMA_BIDIRECTIONAL); 1343 cmdlen[i], DMA_BIDIRECTIONAL);
1302 if (dma_mapping_error(trans->dev, phys_addr)) { 1344 if (dma_mapping_error(trans->dev, phys_addr)) {
1303 iwl_pcie_tfd_unmap(trans, out_meta, 1345 iwl_pcie_tfd_unmap(trans, out_meta,
1304 &txq->tfds[q->write_ptr], 1346 &txq->tfds[q->write_ptr],
@@ -1307,7 +1349,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1307 goto out; 1349 goto out;
1308 } 1350 }
1309 1351
1310 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); 1352 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1311 } 1353 }
1312 1354
1313 out_meta->flags = cmd->flags; 1355 out_meta->flags = cmd->flags;
@@ -1317,8 +1359,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1317 1359
1318 txq->need_update = 1; 1360 txq->need_update = 1;
1319 1361
1320 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, 1362 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1321 &out_cmd->hdr, copy_size);
1322 1363
1323 /* start timer if queue currently empty */ 1364 /* start timer if queue currently empty */
1324 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1365 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 739309e70d8b..45578335e420 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
825 825
826 sdio_release_host(func); 826 sdio_release_host(func);
827 827
828 /* Set fw_ready before queuing any commands so that
829 * lbs_thread won't block from sending them to firmware.
830 */
831 priv->fw_ready = 1;
832
828 /* 833 /*
829 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions 834 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
830 */ 835 */
@@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
839 netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n"); 844 netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
840 } 845 }
841 846
842 priv->fw_ready = 1;
843 wake_up(&card->pwron_waitq); 847 wake_up(&card->pwron_waitq);
844 848
845 if (!card->started) { 849 if (!card->started) {
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 35c79722c361..5c395e2e6a2b 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
302 i++; 302 i++;
303 usleep_range(10, 20); 303 usleep_range(10, 20);
304 /* 50ms max wait */ 304 /* 50ms max wait */
305 if (i == 50000) 305 if (i == 5000)
306 break; 306 break;
307 } 307 }
308 308
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 1031db66474a..189744db65e0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1236 */ 1236 */
1237 if_limit = &rt2x00dev->if_limits_ap; 1237 if_limit = &rt2x00dev->if_limits_ap;
1238 if_limit->max = rt2x00dev->ops->max_ap_intf; 1238 if_limit->max = rt2x00dev->ops->max_ap_intf;
1239 if_limit->types = BIT(NL80211_IFTYPE_AP) | 1239 if_limit->types = BIT(NL80211_IFTYPE_AP);
1240 BIT(NL80211_IFTYPE_MESH_POINT); 1240#ifdef CONFIG_MAC80211_MESH
1241 if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
1242#endif
1241 1243
1242 /* 1244 /*
1243 * Build up AP interface combinations structure. 1245 * Build up AP interface combinations structure.
@@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1309 rt2x00dev->hw->wiphy->interface_modes |= 1311 rt2x00dev->hw->wiphy->interface_modes |=
1310 BIT(NL80211_IFTYPE_ADHOC) | 1312 BIT(NL80211_IFTYPE_ADHOC) |
1311 BIT(NL80211_IFTYPE_AP) | 1313 BIT(NL80211_IFTYPE_AP) |
1314#ifdef CONFIG_MAC80211_MESH
1312 BIT(NL80211_IFTYPE_MESH_POINT) | 1315 BIT(NL80211_IFTYPE_MESH_POINT) |
1316#endif
1313 BIT(NL80211_IFTYPE_WDS); 1317 BIT(NL80211_IFTYPE_WDS);
1314 1318
1315 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 1319 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 39c937f9b426..dee5dddaa292 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -331,8 +331,14 @@ static void pci_acpi_cleanup(struct device *dev)
331 } 331 }
332} 332}
333 333
334static bool pci_acpi_bus_match(struct device *dev)
335{
336 return dev->bus == &pci_bus_type;
337}
338
334static struct acpi_bus_type acpi_pci_bus = { 339static struct acpi_bus_type acpi_pci_bus = {
335 .bus = &pci_bus_type, 340 .name = "PCI",
341 .match = pci_acpi_bus_match,
336 .find_device = acpi_pci_find_device, 342 .find_device = acpi_pci_find_device,
337 .setup = pci_acpi_setup, 343 .setup = pci_acpi_setup,
338 .cleanup = pci_acpi_cleanup, 344 .cleanup = pci_acpi_cleanup,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 8813fc03aa09..55cd459a3908 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -353,8 +353,14 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
353/* complete initialization of a PNPACPI device includes having 353/* complete initialization of a PNPACPI device includes having
354 * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. 354 * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
355 */ 355 */
356static bool acpi_pnp_bus_match(struct device *dev)
357{
358 return dev->bus == &pnp_bus_type;
359}
360
356static struct acpi_bus_type __initdata acpi_pnp_bus = { 361static struct acpi_bus_type __initdata acpi_pnp_bus = {
357 .bus = &pnp_bus_type, 362 .name = "PNP",
363 .match = acpi_pnp_bus_match,
358 .find_device = acpi_pnp_find_device, 364 .find_device = acpi_pnp_find_device,
359}; 365};
360 366
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index da9782bd27d0..e3661c20cf38 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2830,7 +2830,7 @@ EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
2830 * regulator_allow_bypass - allow the regulator to go into bypass mode 2830 * regulator_allow_bypass - allow the regulator to go into bypass mode
2831 * 2831 *
2832 * @regulator: Regulator to configure 2832 * @regulator: Regulator to configure
2833 * @allow: enable or disable bypass mode 2833 * @enable: enable or disable bypass mode
2834 * 2834 *
2835 * Allow the regulator to go into bypass mode if all other consumers 2835 * Allow the regulator to go into bypass mode if all other consumers
2836 * for the regulator also enable bypass mode and the machine 2836 * for the regulator also enable bypass mode and the machine
@@ -3057,9 +3057,13 @@ int regulator_bulk_enable(int num_consumers,
3057 return 0; 3057 return 0;
3058 3058
3059err: 3059err:
3060 pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); 3060 for (i = 0; i < num_consumers; i++) {
3061 while (--i >= 0) 3061 if (consumers[i].ret < 0)
3062 regulator_disable(consumers[i].consumer); 3062 pr_err("Failed to enable %s: %d\n", consumers[i].supply,
3063 consumers[i].ret);
3064 else
3065 regulator_disable(consumers[i].consumer);
3066 }
3063 3067
3064 return ret; 3068 return ret;
3065} 3069}
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 219d162b651e..a53c11a529d5 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -528,7 +528,7 @@ static int db8500_regulator_probe(struct platform_device *pdev)
528 return 0; 528 return 0;
529} 529}
530 530
531static int __exit db8500_regulator_remove(struct platform_device *pdev) 531static int db8500_regulator_remove(struct platform_device *pdev)
532{ 532{
533 int i; 533 int i;
534 534
@@ -553,7 +553,7 @@ static struct platform_driver db8500_regulator_driver = {
553 .owner = THIS_MODULE, 553 .owner = THIS_MODULE,
554 }, 554 },
555 .probe = db8500_regulator_probe, 555 .probe = db8500_regulator_probe,
556 .remove = __exit_p(db8500_regulator_remove), 556 .remove = db8500_regulator_remove,
557}; 557};
558 558
559static int __init db8500_regulator_init(void) 559static int __init db8500_regulator_init(void)
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index cde13bb5a8fb..39cf14606784 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -4,6 +4,7 @@
4 * Copyright 2011-2012 Texas Instruments Inc. 4 * Copyright 2011-2012 Texas Instruments Inc.
5 * 5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk> 6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Ian Lartey <ian@slimlogic.co.uk>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
@@ -156,7 +157,7 @@ static const struct regs_info palmas_regs_info[] = {
156 * 157 *
157 * So they are basically (maxV-minV)/stepV 158 * So they are basically (maxV-minV)/stepV
158 */ 159 */
159#define PALMAS_SMPS_NUM_VOLTAGES 116 160#define PALMAS_SMPS_NUM_VOLTAGES 117
160#define PALMAS_SMPS10_NUM_VOLTAGES 2 161#define PALMAS_SMPS10_NUM_VOLTAGES 2
161#define PALMAS_LDO_NUM_VOLTAGES 50 162#define PALMAS_LDO_NUM_VOLTAGES 50
162 163
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 74508cc62d67..f705d25b437c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -471,24 +471,23 @@ twl4030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
471 selector); 471 selector);
472} 472}
473 473
474static int twl4030ldo_get_voltage(struct regulator_dev *rdev) 474static int twl4030ldo_get_voltage_sel(struct regulator_dev *rdev)
475{ 475{
476 struct twlreg_info *info = rdev_get_drvdata(rdev); 476 struct twlreg_info *info = rdev_get_drvdata(rdev);
477 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, 477 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
478 VREG_VOLTAGE);
479 478
480 if (vsel < 0) 479 if (vsel < 0)
481 return vsel; 480 return vsel;
482 481
483 vsel &= info->table_len - 1; 482 vsel &= info->table_len - 1;
484 return LDO_MV(info->table[vsel]) * 1000; 483 return vsel;
485} 484}
486 485
487static struct regulator_ops twl4030ldo_ops = { 486static struct regulator_ops twl4030ldo_ops = {
488 .list_voltage = twl4030ldo_list_voltage, 487 .list_voltage = twl4030ldo_list_voltage,
489 488
490 .set_voltage_sel = twl4030ldo_set_voltage_sel, 489 .set_voltage_sel = twl4030ldo_set_voltage_sel,
491 .get_voltage = twl4030ldo_get_voltage, 490 .get_voltage_sel = twl4030ldo_get_voltage_sel,
492 491
493 .enable = twl4030reg_enable, 492 .enable = twl4030reg_enable,
494 .disable = twl4030reg_disable, 493 .disable = twl4030reg_disable,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 765398c063c7..c31187d79343 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -71,9 +71,14 @@ struct kmem_cache *scsi_sdb_cache;
71#ifdef CONFIG_ACPI 71#ifdef CONFIG_ACPI
72#include <acpi/acpi_bus.h> 72#include <acpi/acpi_bus.h>
73 73
74static bool acpi_scsi_bus_match(struct device *dev)
75{
76 return dev->bus == &scsi_bus_type;
77}
78
74int scsi_register_acpi_bus_type(struct acpi_bus_type *bus) 79int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
75{ 80{
76 bus->bus = &scsi_bus_type; 81 bus->match = acpi_scsi_bus_match;
77 return register_acpi_bus_type(bus); 82 return register_acpi_bus_type(bus);
78} 83}
79EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type); 84EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 1956593ee89d..81e939e90c4c 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -881,17 +881,12 @@ static struct vio_driver hvcs_vio_driver = {
881/* Only called from hvcs_get_pi please */ 881/* Only called from hvcs_get_pi please */
882static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd) 882static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
883{ 883{
884 int clclength;
885
886 hvcsd->p_unit_address = pi->unit_address; 884 hvcsd->p_unit_address = pi->unit_address;
887 hvcsd->p_partition_ID = pi->partition_ID; 885 hvcsd->p_partition_ID = pi->partition_ID;
888 clclength = strlen(&pi->location_code[0]);
889 if (clclength > HVCS_CLC_LENGTH)
890 clclength = HVCS_CLC_LENGTH;
891 886
892 /* copy the null-term char too */ 887 /* copy the null-term char too */
893 strncpy(&hvcsd->p_location_code[0], 888 strlcpy(&hvcsd->p_location_code[0],
894 &pi->location_code[0], clclength + 1); 889 &pi->location_code[0], sizeof(hvcsd->p_location_code));
895} 890}
896 891
897/* 892/*
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index cef4252bb31a..b6f4bad3f756 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -210,9 +210,14 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
210 return 0; 210 return 0;
211} 211}
212 212
213static bool usb_acpi_bus_match(struct device *dev)
214{
215 return is_usb_device(dev) || is_usb_port(dev);
216}
217
213static struct acpi_bus_type usb_acpi_bus = { 218static struct acpi_bus_type usb_acpi_bus = {
214 .bus = &usb_bus_type, 219 .name = "USB",
215 .find_bridge = usb_acpi_find_device, 220 .match = usb_acpi_bus_match,
216 .find_device = usb_acpi_find_device, 221 .find_device = usb_acpi_find_device,
217}; 222};
218 223
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig
index e15ef38c24fa..434aa313f077 100644
--- a/fs/ecryptfs/Kconfig
+++ b/fs/ecryptfs/Kconfig
@@ -12,3 +12,11 @@ config ECRYPT_FS
12 12
13 To compile this file system support as a module, choose M here: the 13 To compile this file system support as a module, choose M here: the
14 module will be called ecryptfs. 14 module will be called ecryptfs.
15
16config ECRYPT_FS_MESSAGING
17 bool "Enable notifications for userspace key wrap/unwrap"
18 depends on ECRYPT_FS
19 help
20 Enables the /dev/ecryptfs entry for use by ecryptfsd. This allows
21 for userspace to wrap/unwrap file encryption keys by other
22 backends, like OpenSSL.
diff --git a/fs/ecryptfs/Makefile b/fs/ecryptfs/Makefile
index 2cc9ee4ad2eb..49678a69947d 100644
--- a/fs/ecryptfs/Makefile
+++ b/fs/ecryptfs/Makefile
@@ -1,7 +1,10 @@
1# 1#
2# Makefile for the Linux 2.6 eCryptfs 2# Makefile for the Linux eCryptfs
3# 3#
4 4
5obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o 5obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o
6 6
7ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o miscdev.o kthread.o debug.o 7ecryptfs-y := dentry.o file.o inode.o main.o super.o mmap.o read_write.o \
8 crypto.o keystore.o kthread.o debug.o
9
10ecryptfs-$(CONFIG_ECRYPT_FS_MESSAGING) += messaging.o miscdev.o
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index a7b0c2dfb3db..d5c25db4398f 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -301,17 +301,14 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
301 while (size > 0 && i < sg_size) { 301 while (size > 0 && i < sg_size) {
302 pg = virt_to_page(addr); 302 pg = virt_to_page(addr);
303 offset = offset_in_page(addr); 303 offset = offset_in_page(addr);
304 if (sg) 304 sg_set_page(&sg[i], pg, 0, offset);
305 sg_set_page(&sg[i], pg, 0, offset);
306 remainder_of_page = PAGE_CACHE_SIZE - offset; 305 remainder_of_page = PAGE_CACHE_SIZE - offset;
307 if (size >= remainder_of_page) { 306 if (size >= remainder_of_page) {
308 if (sg) 307 sg[i].length = remainder_of_page;
309 sg[i].length = remainder_of_page;
310 addr += remainder_of_page; 308 addr += remainder_of_page;
311 size -= remainder_of_page; 309 size -= remainder_of_page;
312 } else { 310 } else {
313 if (sg) 311 sg[i].length = size;
314 sg[i].length = size;
315 addr += size; 312 addr += size;
316 size = 0; 313 size = 0;
317 } 314 }
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 1b5d9af937df..bf12ba5dd223 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -45,14 +45,12 @@
45static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) 45static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
46{ 46{
47 struct dentry *lower_dentry; 47 struct dentry *lower_dentry;
48 struct vfsmount *lower_mnt;
49 int rc = 1; 48 int rc = 1;
50 49
51 if (flags & LOOKUP_RCU) 50 if (flags & LOOKUP_RCU)
52 return -ECHILD; 51 return -ECHILD;
53 52
54 lower_dentry = ecryptfs_dentry_to_lower(dentry); 53 lower_dentry = ecryptfs_dentry_to_lower(dentry);
55 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
56 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) 54 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
57 goto out; 55 goto out;
58 rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); 56 rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 7e2c6f5d7985..dd299b389d4e 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -172,6 +172,19 @@ ecryptfs_get_key_payload_data(struct key *key)
172#define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE 24 172#define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE 24
173#define ECRYPTFS_ENCRYPTED_DENTRY_NAME_LEN (18 + 1 + 4 + 1 + 32) 173#define ECRYPTFS_ENCRYPTED_DENTRY_NAME_LEN (18 + 1 + 4 + 1 + 32)
174 174
175#ifdef CONFIG_ECRYPT_FS_MESSAGING
176# define ECRYPTFS_VERSIONING_MASK_MESSAGING (ECRYPTFS_VERSIONING_DEVMISC \
177 | ECRYPTFS_VERSIONING_PUBKEY)
178#else
179# define ECRYPTFS_VERSIONING_MASK_MESSAGING 0
180#endif
181
182#define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \
183 | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \
184 | ECRYPTFS_VERSIONING_XATTR \
185 | ECRYPTFS_VERSIONING_MULTKEY \
186 | ECRYPTFS_VERSIONING_MASK_MESSAGING \
187 | ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION)
175struct ecryptfs_key_sig { 188struct ecryptfs_key_sig {
176 struct list_head crypt_stat_list; 189 struct list_head crypt_stat_list;
177 char keysig[ECRYPTFS_SIG_SIZE_HEX + 1]; 190 char keysig[ECRYPTFS_SIG_SIZE_HEX + 1];
@@ -399,7 +412,9 @@ struct ecryptfs_daemon {
399 struct hlist_node euid_chain; 412 struct hlist_node euid_chain;
400}; 413};
401 414
415#ifdef CONFIG_ECRYPT_FS_MESSAGING
402extern struct mutex ecryptfs_daemon_hash_mux; 416extern struct mutex ecryptfs_daemon_hash_mux;
417#endif
403 418
404static inline size_t 419static inline size_t
405ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat) 420ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
@@ -610,6 +625,7 @@ int
610ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, 625ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
611 size_t size, int flags); 626 size_t size, int flags);
612int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode); 627int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
628#ifdef CONFIG_ECRYPT_FS_MESSAGING
613int ecryptfs_process_response(struct ecryptfs_daemon *daemon, 629int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
614 struct ecryptfs_message *msg, u32 seq); 630 struct ecryptfs_message *msg, u32 seq);
615int ecryptfs_send_message(char *data, int data_len, 631int ecryptfs_send_message(char *data, int data_len,
@@ -618,6 +634,24 @@ int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
618 struct ecryptfs_message **emsg); 634 struct ecryptfs_message **emsg);
619int ecryptfs_init_messaging(void); 635int ecryptfs_init_messaging(void);
620void ecryptfs_release_messaging(void); 636void ecryptfs_release_messaging(void);
637#else
638static inline int ecryptfs_init_messaging(void)
639{
640 return 0;
641}
642static inline void ecryptfs_release_messaging(void)
643{ }
644static inline int ecryptfs_send_message(char *data, int data_len,
645 struct ecryptfs_msg_ctx **msg_ctx)
646{
647 return -ENOTCONN;
648}
649static inline int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
650 struct ecryptfs_message **emsg)
651{
652 return -ENOMSG;
653}
654#endif
621 655
622void 656void
623ecryptfs_write_header_metadata(char *virt, 657ecryptfs_write_header_metadata(char *virt,
@@ -655,12 +689,11 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
655 size_t offset_in_page, size_t size, 689 size_t offset_in_page, size_t size,
656 struct inode *ecryptfs_inode); 690 struct inode *ecryptfs_inode);
657struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index); 691struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index);
658int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon);
659int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon);
660int ecryptfs_parse_packet_length(unsigned char *data, size_t *size, 692int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
661 size_t *length_size); 693 size_t *length_size);
662int ecryptfs_write_packet_length(char *dest, size_t size, 694int ecryptfs_write_packet_length(char *dest, size_t size,
663 size_t *packet_size_length); 695 size_t *packet_size_length);
696#ifdef CONFIG_ECRYPT_FS_MESSAGING
664int ecryptfs_init_ecryptfs_miscdev(void); 697int ecryptfs_init_ecryptfs_miscdev(void);
665void ecryptfs_destroy_ecryptfs_miscdev(void); 698void ecryptfs_destroy_ecryptfs_miscdev(void);
666int ecryptfs_send_miscdev(char *data, size_t data_size, 699int ecryptfs_send_miscdev(char *data, size_t data_size,
@@ -669,6 +702,9 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
669void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx); 702void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx);
670int 703int
671ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file); 704ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file);
705int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon);
706int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon);
707#endif
672int ecryptfs_init_kthread(void); 708int ecryptfs_init_kthread(void);
673void ecryptfs_destroy_kthread(void); 709void ecryptfs_destroy_kthread(void);
674int ecryptfs_privileged_open(struct file **lower_file, 710int ecryptfs_privileged_open(struct file **lower_file,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 53acc9d0c138..63b1f54b6a1f 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -199,7 +199,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
199 struct dentry *ecryptfs_dentry = file->f_path.dentry; 199 struct dentry *ecryptfs_dentry = file->f_path.dentry;
200 /* Private value of ecryptfs_dentry allocated in 200 /* Private value of ecryptfs_dentry allocated in
201 * ecryptfs_lookup() */ 201 * ecryptfs_lookup() */
202 struct dentry *lower_dentry;
203 struct ecryptfs_file_info *file_info; 202 struct ecryptfs_file_info *file_info;
204 203
205 mount_crypt_stat = &ecryptfs_superblock_to_private( 204 mount_crypt_stat = &ecryptfs_superblock_to_private(
@@ -222,7 +221,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
222 rc = -ENOMEM; 221 rc = -ENOMEM;
223 goto out; 222 goto out;
224 } 223 }
225 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
226 crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; 224 crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
227 mutex_lock(&crypt_stat->cs_mutex); 225 mutex_lock(&crypt_stat->cs_mutex);
228 if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) { 226 if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) {
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e0f07fb6d56b..5eab400e2590 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -999,8 +999,8 @@ out:
999 return rc; 999 return rc;
1000} 1000}
1001 1001
1002int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, 1002static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
1003 struct kstat *stat) 1003 struct kstat *stat)
1004{ 1004{
1005 struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 1005 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1006 int rc = 0; 1006 int rc = 0;
@@ -1021,8 +1021,8 @@ int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
1021 return rc; 1021 return rc;
1022} 1022}
1023 1023
1024int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, 1024static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1025 struct kstat *stat) 1025 struct kstat *stat)
1026{ 1026{
1027 struct kstat lower_stat; 1027 struct kstat lower_stat;
1028 int rc; 1028 int rc;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 2333203a120b..7d52806c2119 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1150,7 +1150,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1150 struct ecryptfs_message *msg = NULL; 1150 struct ecryptfs_message *msg = NULL;
1151 char *auth_tok_sig; 1151 char *auth_tok_sig;
1152 char *payload; 1152 char *payload;
1153 size_t payload_len; 1153 size_t payload_len = 0;
1154 int rc; 1154 int rc;
1155 1155
1156 rc = ecryptfs_get_auth_tok_sig(&auth_tok_sig, auth_tok); 1156 rc = ecryptfs_get_auth_tok_sig(&auth_tok_sig, auth_tok);
@@ -1168,7 +1168,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1168 rc = ecryptfs_send_message(payload, payload_len, &msg_ctx); 1168 rc = ecryptfs_send_message(payload, payload_len, &msg_ctx);
1169 if (rc) { 1169 if (rc) {
1170 ecryptfs_printk(KERN_ERR, "Error sending message to " 1170 ecryptfs_printk(KERN_ERR, "Error sending message to "
1171 "ecryptfsd\n"); 1171 "ecryptfsd: %d\n", rc);
1172 goto out; 1172 goto out;
1173 } 1173 }
1174 rc = ecryptfs_wait_for_response(msg_ctx, &msg); 1174 rc = ecryptfs_wait_for_response(msg_ctx, &msg);
@@ -1202,8 +1202,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1202 crypt_stat->key_size); 1202 crypt_stat->key_size);
1203 } 1203 }
1204out: 1204out:
1205 if (msg) 1205 kfree(msg);
1206 kfree(msg);
1207 return rc; 1206 return rc;
1208} 1207}
1209 1208
@@ -1989,7 +1988,7 @@ pki_encrypt_session_key(struct key *auth_tok_key,
1989 rc = ecryptfs_send_message(payload, payload_len, &msg_ctx); 1988 rc = ecryptfs_send_message(payload, payload_len, &msg_ctx);
1990 if (rc) { 1989 if (rc) {
1991 ecryptfs_printk(KERN_ERR, "Error sending message to " 1990 ecryptfs_printk(KERN_ERR, "Error sending message to "
1992 "ecryptfsd\n"); 1991 "ecryptfsd: %d\n", rc);
1993 goto out; 1992 goto out;
1994 } 1993 }
1995 rc = ecryptfs_wait_for_response(msg_ctx, &msg); 1994 rc = ecryptfs_wait_for_response(msg_ctx, &msg);
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 8d7a577ae497..49ff8ea08f1c 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -97,8 +97,7 @@ static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx)
97void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) 97void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
98{ 98{
99 list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list); 99 list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list);
100 if (msg_ctx->msg) 100 kfree(msg_ctx->msg);
101 kfree(msg_ctx->msg);
102 msg_ctx->msg = NULL; 101 msg_ctx->msg = NULL;
103 msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE; 102 msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE;
104} 103}
@@ -283,7 +282,7 @@ ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type,
283 int rc; 282 int rc;
284 283
285 rc = ecryptfs_find_daemon_by_euid(&daemon); 284 rc = ecryptfs_find_daemon_by_euid(&daemon);
286 if (rc || !daemon) { 285 if (rc) {
287 rc = -ENOTCONN; 286 rc = -ENOTCONN;
288 goto out; 287 goto out;
289 } 288 }
diff --git a/fs/namei.c b/fs/namei.c
index 961bc1268366..57ae9c8c66bf 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -689,8 +689,6 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
689 nd->path = *path; 689 nd->path = *path;
690 nd->inode = nd->path.dentry->d_inode; 690 nd->inode = nd->path.dentry->d_inode;
691 nd->flags |= LOOKUP_JUMPED; 691 nd->flags |= LOOKUP_JUMPED;
692
693 BUG_ON(nd->inode->i_op->follow_link);
694} 692}
695 693
696static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) 694static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index e65278f560c4..22ba56e834e2 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -437,11 +437,9 @@ void acpi_remove_dir(struct acpi_device *);
437 */ 437 */
438struct acpi_bus_type { 438struct acpi_bus_type {
439 struct list_head list; 439 struct list_head list;
440 struct bus_type *bus; 440 const char *name;
441 /* For general devices under the bus */ 441 bool (*match)(struct device *dev);
442 int (*find_device) (struct device *, acpi_handle *); 442 int (*find_device) (struct device *, acpi_handle *);
443 /* For bridges, such as PCI root bridge, IDE controller */
444 int (*find_bridge) (struct device *, acpi_handle *);
445 void (*setup)(struct device *); 443 void (*setup)(struct device *);
446 void (*cleanup)(struct device *); 444 void (*cleanup)(struct device *);
447}; 445};
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8839b3a24660..e3e0d651c6ca 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -443,12 +443,12 @@ struct drm_crtc {
443 * @dpms: set power state (see drm_crtc_funcs above) 443 * @dpms: set power state (see drm_crtc_funcs above)
444 * @save: save connector state 444 * @save: save connector state
445 * @restore: restore connector state 445 * @restore: restore connector state
446 * @reset: reset connector after state has been invalidate (e.g. resume) 446 * @reset: reset connector after state has been invalidated (e.g. resume)
447 * @detect: is this connector active? 447 * @detect: is this connector active?
448 * @fill_modes: fill mode list for this connector 448 * @fill_modes: fill mode list for this connector
449 * @set_property: property for this connector may need update 449 * @set_property: property for this connector may need an update
450 * @destroy: make object go away 450 * @destroy: make object go away
451 * @force: notify the driver the connector is forced on 451 * @force: notify the driver that the connector is forced on
452 * 452 *
453 * Each CRTC may have one or more connectors attached to it. The functions 453 * Each CRTC may have one or more connectors attached to it. The functions
454 * below allow the core DRM code to control connectors, enumerate available modes, 454 * below allow the core DRM code to control connectors, enumerate available modes,
diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h
index 2224a8c0cb64..8d5ab998a222 100644
--- a/include/linux/ecryptfs.h
+++ b/include/linux/ecryptfs.h
@@ -6,9 +6,8 @@
6#define ECRYPTFS_VERSION_MINOR 0x04 6#define ECRYPTFS_VERSION_MINOR 0x04
7#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03 7#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03
8/* These flags indicate which features are supported by the kernel 8/* These flags indicate which features are supported by the kernel
9 * module; userspace tools such as the mount helper read 9 * module; userspace tools such as the mount helper read the feature
10 * ECRYPTFS_VERSIONING_MASK from a sysfs handle in order to determine 10 * bits from a sysfs handle in order to determine how to behave. */
11 * how to behave. */
12#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001 11#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001
13#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002 12#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002
14#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004 13#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004
@@ -19,13 +18,6 @@
19#define ECRYPTFS_VERSIONING_HMAC 0x00000080 18#define ECRYPTFS_VERSIONING_HMAC 0x00000080
20#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100 19#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100
21#define ECRYPTFS_VERSIONING_GCM 0x00000200 20#define ECRYPTFS_VERSIONING_GCM 0x00000200
22#define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \
23 | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \
24 | ECRYPTFS_VERSIONING_PUBKEY \
25 | ECRYPTFS_VERSIONING_XATTR \
26 | ECRYPTFS_VERSIONING_MULTKEY \
27 | ECRYPTFS_VERSIONING_DEVMISC \
28 | ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION)
29#define ECRYPTFS_MAX_PASSWORD_LENGTH 64 21#define ECRYPTFS_MAX_PASSWORD_LENGTH 64
30#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH 22#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH
31#define ECRYPTFS_SALT_SIZE 8 23#define ECRYPTFS_SALT_SIZE 8
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 29eb805ea4a6..c1d6555d2567 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -118,10 +118,8 @@
118 118
119#ifdef CONFIG_PREEMPT_COUNT 119#ifdef CONFIG_PREEMPT_COUNT
120# define preemptible() (preempt_count() == 0 && !irqs_disabled()) 120# define preemptible() (preempt_count() == 0 && !irqs_disabled())
121# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
122#else 121#else
123# define preemptible() 0 122# define preemptible() 0
124# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
125#endif 123#endif
126 124
127#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) 125#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ef9acd3c8450..01d25e6fc792 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -854,6 +854,8 @@ type_pf_tresize(struct ip_set *set, bool retried)
854retry: 854retry:
855 ret = 0; 855 ret = 0;
856 htable_bits++; 856 htable_bits++;
857 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
858 set->name, orig->htable_bits, htable_bits, orig);
857 if (!htable_bits) { 859 if (!htable_bits) {
858 /* In case we have plenty of memory :-) */ 860 /* In case we have plenty of memory :-) */
859 pr_warning("Cannot increase the hashsize of set %s further\n", 861 pr_warning("Cannot increase the hashsize of set %s further\n",
@@ -873,7 +875,7 @@ retry:
873 data = ahash_tdata(n, j); 875 data = ahash_tdata(n, j);
874 m = hbucket(t, HKEY(data, h->initval, htable_bits)); 876 m = hbucket(t, HKEY(data, h->initval, htable_bits));
875 ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, 877 ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
876 type_pf_data_timeout(data)); 878 ip_set_timeout_get(type_pf_data_timeout(data)));
877 if (ret < 0) { 879 if (ret < 0) {
878 read_unlock_bh(&set->lock); 880 read_unlock_bh(&set->lock);
879 ahash_destroy(t); 881 ahash_destroy(t);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 23070fd83872..7df93f52db08 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -199,6 +199,8 @@ enum regulator_type {
199 * output when using regulator_set_voltage_sel_regmap 199 * output when using regulator_set_voltage_sel_regmap
200 * @enable_reg: Register for control when using regmap enable/disable ops 200 * @enable_reg: Register for control when using regmap enable/disable ops
201 * @enable_mask: Mask for control when using regmap enable/disable ops 201 * @enable_mask: Mask for control when using regmap enable/disable ops
202 * @bypass_reg: Register for control when using regmap set_bypass
203 * @bypass_mask: Mask for control when using regmap set_bypass
202 * 204 *
203 * @enable_time: Time taken for initial enable of regulator (in uS). 205 * @enable_time: Time taken for initial enable of regulator (in uS).
204 */ 206 */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index c65dee059913..13e929679550 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -24,6 +24,9 @@ struct smpboot_thread_data;
24 * parked (cpu offline) 24 * parked (cpu offline)
25 * @unpark: Optional unpark function, called when the thread is 25 * @unpark: Optional unpark function, called when the thread is
26 * unparked (cpu online) 26 * unparked (cpu online)
27 * @pre_unpark: Optional unpark function, called before the thread is
28 * unparked (cpu online). This is not guaranteed to be
29 * called on the target cpu of the thread. Careful!
27 * @selfparking: Thread is not parked by the park function. 30 * @selfparking: Thread is not parked by the park function.
28 * @thread_comm: The base name of the thread 31 * @thread_comm: The base name of the thread
29 */ 32 */
@@ -37,6 +40,7 @@ struct smp_hotplug_thread {
37 void (*cleanup)(unsigned int cpu, bool online); 40 void (*cleanup)(unsigned int cpu, bool online);
38 void (*park)(unsigned int cpu); 41 void (*park)(unsigned int cpu);
39 void (*unpark)(unsigned int cpu); 42 void (*unpark)(unsigned int cpu);
43 void (*pre_unpark)(unsigned int cpu);
40 bool selfparking; 44 bool selfparking;
41 const char *thread_comm; 45 const char *thread_comm;
42}; 46};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 23f2e98d4b65..cf0694d4ad60 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1045,6 +1045,10 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1045 if (sysctl_tcp_low_latency || !tp->ucopy.task) 1045 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1046 return false; 1046 return false;
1047 1047
1048 if (skb->len <= tcp_hdrlen(skb) &&
1049 skb_queue_len(&tp->ucopy.prequeue) == 0)
1050 return false;
1051
1048 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1052 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1049 tp->ucopy.memory += skb->truesize; 1053 tp->ucopy.memory += skb->truesize;
1050 if (tp->ucopy.memory > sk->sk_rcvbuf) { 1054 if (tp->ucopy.memory > sk->sk_rcvbuf) {
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index b9bde5727829..25d3d8b6e4e1 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -209,6 +209,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
209{ 209{
210 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 210 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
211 211
212 if (ht->pre_unpark)
213 ht->pre_unpark(cpu);
212 kthread_unpark(tsk); 214 kthread_unpark(tsk);
213} 215}
214 216
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b4d252fd195b..14d7758074aa 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -323,18 +323,10 @@ void irq_enter(void)
323 323
324static inline void invoke_softirq(void) 324static inline void invoke_softirq(void)
325{ 325{
326 if (!force_irqthreads) { 326 if (!force_irqthreads)
327#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
328 __do_softirq(); 327 __do_softirq();
329#else 328 else
330 do_softirq();
331#endif
332 } else {
333 __local_bh_disable((unsigned long)__builtin_return_address(0),
334 SOFTIRQ_OFFSET);
335 wakeup_softirqd(); 329 wakeup_softirqd();
336 __local_bh_enable(SOFTIRQ_OFFSET);
337 }
338} 330}
339 331
340/* 332/*
@@ -342,9 +334,15 @@ static inline void invoke_softirq(void)
342 */ 334 */
343void irq_exit(void) 335void irq_exit(void)
344{ 336{
337#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
338 local_irq_disable();
339#else
340 WARN_ON_ONCE(!irqs_disabled());
341#endif
342
345 account_irq_exit_time(current); 343 account_irq_exit_time(current);
346 trace_hardirq_exit(); 344 trace_hardirq_exit();
347 sub_preempt_count(IRQ_EXIT_OFFSET); 345 sub_preempt_count(HARDIRQ_OFFSET);
348 if (!in_interrupt() && local_softirq_pending()) 346 if (!in_interrupt() && local_softirq_pending())
349 invoke_softirq(); 347 invoke_softirq();
350 348
@@ -354,7 +352,6 @@ void irq_exit(void)
354 tick_nohz_irq_exit(); 352 tick_nohz_irq_exit();
355#endif 353#endif
356 rcu_irq_exit(); 354 rcu_irq_exit();
357 sched_preempt_enable_no_resched();
358} 355}
359 356
360/* 357/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 95d178c62d5a..c09f2955ae30 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -336,7 +336,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
336 .create = cpu_stop_create, 336 .create = cpu_stop_create,
337 .setup = cpu_stop_unpark, 337 .setup = cpu_stop_unpark,
338 .park = cpu_stop_park, 338 .park = cpu_stop_park,
339 .unpark = cpu_stop_unpark, 339 .pre_unpark = cpu_stop_unpark,
340 .selfparking = true, 340 .selfparking = true,
341}; 341};
342 342
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 1ae1d9cb278d..21760f008974 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
118 return NULL; 118 return NULL;
119} 119}
120 120
121void caif_flow_cb(struct sk_buff *skb) 121static void caif_flow_cb(struct sk_buff *skb)
122{ 122{
123 struct caif_device_entry *caifd; 123 struct caif_device_entry *caifd;
124 void (*dtor)(struct sk_buff *skb) = NULL; 124 void (*dtor)(struct sk_buff *skb) = NULL;
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 3ebc8cbc91ff..ef8ebaa993cf 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
81 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 81 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
82} 82}
83 83
84struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], 84static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
85 u8 braddr[ETH_ALEN]) 85 u8 braddr[ETH_ALEN])
86{ 86{
87 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); 87 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
88 88
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 87abd3e2bd32..2bdf802e28e2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
228 icmp_send(skb, ICMP_DEST_UNREACH, 228 icmp_send(skb, ICMP_DEST_UNREACH,
229 ICMP_PROT_UNREACH, 0); 229 ICMP_PROT_UNREACH, 0);
230 } 230 }
231 } else 231 kfree_skb(skb);
232 } else {
232 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); 233 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
233 kfree_skb(skb); 234 consume_skb(skb);
235 }
234 } 236 }
235 } 237 }
236 out: 238 out:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a759e19496d2..0d9bdacce99f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5485 if (tcp_checksum_complete_user(sk, skb)) 5485 if (tcp_checksum_complete_user(sk, skb))
5486 goto csum_error; 5486 goto csum_error;
5487 5487
5488 if ((int)skb->truesize > sk->sk_forward_alloc)
5489 goto step5;
5490
5488 /* Predicted packet is in window by definition. 5491 /* Predicted packet is in window by definition.
5489 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5492 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5490 * Hence, check seq<=rcv_wup reduces to: 5493 * Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5496 5499
5497 tcp_rcv_rtt_measure_ts(sk, skb); 5500 tcp_rcv_rtt_measure_ts(sk, skb);
5498 5501
5499 if ((int)skb->truesize > sk->sk_forward_alloc)
5500 goto step5;
5501
5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5503 5503
5504 /* Bulk data transfer: receiver */ 5504 /* Bulk data transfer: receiver */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5b10414e619e..b1876e52091e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -241,9 +241,11 @@ resubmit:
241 icmpv6_send(skb, ICMPV6_PARAMPROB, 241 icmpv6_send(skb, ICMPV6_PARAMPROB,
242 ICMPV6_UNK_NEXTHDR, nhoff); 242 ICMPV6_UNK_NEXTHDR, nhoff);
243 } 243 }
244 } else 244 kfree_skb(skb);
245 } else {
245 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 246 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
246 kfree_skb(skb); 247 consume_skb(skb);
248 }
247 } 249 }
248 rcu_read_unlock(); 250 rcu_read_unlock();
249 return 0; 251 return 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 928266569689..e5fe0041adfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
1915restart: 1915restart:
1916 read_lock_bh(&table->tb6_lock); 1916 read_lock_bh(&table->tb6_lock);
1917 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { 1917 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1918 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { 1918 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
1919 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
1919 dst_hold(&rt->dst); 1920 dst_hold(&rt->dst);
1920 read_unlock_bh(&table->tb6_lock); 1921 read_unlock_bh(&table->tb6_lock);
1921 ip6_del_rt(rt); 1922 ip6_del_rt(rt);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index e71e85ba2bf1..29340a9a6fb9 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
495/* case CS_ISO_8859_9: */ 495/* case CS_ISO_8859_9: */
496/* case CS_UNICODE: */ 496/* case CS_UNICODE: */
497 default: 497 default:
498 IRDA_DEBUG(0, "%s(), charset %s, not supported\n", 498 IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
499 __func__, ias_charset_types[charset]); 499 __func__, charset,
500 charset < ARRAY_SIZE(ias_charset_types) ?
501 ias_charset_types[charset] :
502 "(unknown)");
500 503
501 /* Aborting, close connection! */ 504 /* Aborting, close connection! */
502 iriap_disconnect_request(self); 505 iriap_disconnect_request(self);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 3f4e3afc191a..6a53371dba1f 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
355 l2tp_xmit_skb(session, skb, session->hdr_len); 355 l2tp_xmit_skb(session, skb, session->hdr_len);
356 356
357 sock_put(ps->tunnel_sock); 357 sock_put(ps->tunnel_sock);
358 sock_put(sk);
358 359
359 return error; 360 return error;
360 361
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 09d96a8f6c2c..808f5fcd1ced 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3285,13 +3285,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
3285 struct cfg80211_chan_def *chandef) 3285 struct cfg80211_chan_def *chandef)
3286{ 3286{
3287 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); 3287 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
3288 struct ieee80211_local *local = wiphy_priv(wiphy);
3288 struct ieee80211_chanctx_conf *chanctx_conf; 3289 struct ieee80211_chanctx_conf *chanctx_conf;
3289 int ret = -ENODATA; 3290 int ret = -ENODATA;
3290 3291
3291 rcu_read_lock(); 3292 rcu_read_lock();
3292 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3293 if (local->use_chanctx) {
3293 if (chanctx_conf) { 3294 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3294 *chandef = chanctx_conf->def; 3295 if (chanctx_conf) {
3296 *chandef = chanctx_conf->def;
3297 ret = 0;
3298 }
3299 } else if (local->open_count == local->monitors) {
3300 *chandef = local->monitor_chandef;
3295 ret = 0; 3301 ret = 0;
3296 } 3302 }
3297 rcu_read_unlock(); 3303 rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 2c059e54e885..640afab304d7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
107 107
108 lockdep_assert_held(&local->mtx); 108 lockdep_assert_held(&local->mtx);
109 109
110 active = !list_empty(&local->chanctx_list); 110 active = !list_empty(&local->chanctx_list) || local->monitors;
111 111
112 if (!local->ops->remain_on_channel) { 112 if (!local->ops->remain_on_channel) {
113 list_for_each_entry(roc, &local->roc_list, list) { 113 list_for_each_entry(roc, &local->roc_list, list) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index de8548bf0a7f..ce78d1149f1d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1231 if (local->queue_stop_reasons[q] || 1231 if (local->queue_stop_reasons[q] ||
1232 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1232 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1233 if (unlikely(info->flags & 1233 if (unlikely(info->flags &
1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK && 1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
1235 local->queue_stop_reasons[q] & 1235 if (local->queue_stop_reasons[q] &
1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) { 1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
1237 /*
1238 * Drop off-channel frames if queues
1239 * are stopped for any reason other
1240 * than off-channel operation. Never
1241 * queue them.
1242 */
1243 spin_unlock_irqrestore(
1244 &local->queue_stop_reason_lock,
1245 flags);
1246 ieee80211_purge_tx_queue(&local->hw,
1247 skbs);
1248 return true;
1249 }
1250 } else {
1251
1237 /* 1252 /*
1238 * Drop off-channel frames if queues are stopped 1253 * Since queue is stopped, queue up frames for
1239 * for any reason other than off-channel 1254 * later transmission from the tx-pending
1240 * operation. Never queue them. 1255 * tasklet when the queue is woken again.
1241 */ 1256 */
1242 spin_unlock_irqrestore( 1257 if (txpending)
1243 &local->queue_stop_reason_lock, flags); 1258 skb_queue_splice_init(skbs,
1244 ieee80211_purge_tx_queue(&local->hw, skbs); 1259 &local->pending[q]);
1245 return true; 1260 else
1261 skb_queue_splice_tail_init(skbs,
1262 &local->pending[q]);
1263
1264 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1265 flags);
1266 return false;
1246 } 1267 }
1247
1248 /*
1249 * Since queue is stopped, queue up frames for later
1250 * transmission from the tx-pending tasklet when the
1251 * queue is woken again.
1252 */
1253 if (txpending)
1254 skb_queue_splice_init(skbs, &local->pending[q]);
1255 else
1256 skb_queue_splice_tail_init(skbs,
1257 &local->pending[q]);
1258
1259 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1260 flags);
1261 return false;
1262 } 1268 }
1263 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1269 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1264 1270
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1844 } 1850 }
1845 1851
1846 if (!is_multicast_ether_addr(skb->data)) { 1852 if (!is_multicast_ether_addr(skb->data)) {
1853 struct sta_info *next_hop;
1854 bool mpp_lookup = true;
1855
1847 mpath = mesh_path_lookup(sdata, skb->data); 1856 mpath = mesh_path_lookup(sdata, skb->data);
1848 if (!mpath) 1857 if (mpath) {
1858 mpp_lookup = false;
1859 next_hop = rcu_dereference(mpath->next_hop);
1860 if (!next_hop ||
1861 !(mpath->flags & (MESH_PATH_ACTIVE |
1862 MESH_PATH_RESOLVING)))
1863 mpp_lookup = true;
1864 }
1865
1866 if (mpp_lookup)
1849 mppath = mpp_path_lookup(sdata, skb->data); 1867 mppath = mpp_path_lookup(sdata, skb->data);
1868
1869 if (mppath && mpath)
1870 mesh_path_del(mpath->sdata, mpath->dst);
1850 } 1871 }
1851 1872
1852 /* 1873 /*
@@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2350 if (local->tim_in_locked_section) { 2371 if (local->tim_in_locked_section) {
2351 __ieee80211_beacon_add_tim(sdata, ps, skb); 2372 __ieee80211_beacon_add_tim(sdata, ps, skb);
2352 } else { 2373 } else {
2353 spin_lock(&local->tim_lock); 2374 spin_lock_bh(&local->tim_lock);
2354 __ieee80211_beacon_add_tim(sdata, ps, skb); 2375 __ieee80211_beacon_add_tim(sdata, ps, skb);
2355 spin_unlock(&local->tim_lock); 2376 spin_unlock_bh(&local->tim_lock);
2356 } 2377 }
2357 2378
2358 return 0; 2379 return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f82b2e606cfd..1ba9dbc0e107 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
1470 if (ret == -EAGAIN) 1470 if (ret == -EAGAIN)
1471 ret = 1; 1471 ret = 1;
1472 1472
1473 return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST; 1473 return (ret < 0 && ret != -ENOTEMPTY) ? ret :
1474 ret > 0 ? 0 : -IPSET_ERR_EXIST;
1474} 1475}
1475 1476
1476/* Get headed data of a set */ 1477/* Get headed data of a set */
diff --git a/net/rds/message.c b/net/rds/message.c
index f0a4658f3273..aba232f9f308 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
82void rds_message_put(struct rds_message *rm) 82void rds_message_put(struct rds_message *rm)
83{ 83{
84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); 84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
85 if (atomic_read(&rm->m_refcount) == 0) { 85 WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
86printk(KERN_CRIT "danger refcount zero on %p\n", rm);
87WARN_ON(1);
88 }
89 if (atomic_dec_and_test(&rm->m_refcount)) { 86 if (atomic_dec_and_test(&rm->m_refcount)) {
90 BUG_ON(!list_empty(&rm->m_sock_item)); 87 BUG_ON(!list_empty(&rm->m_sock_item));
91 BUG_ON(!list_empty(&rm->m_conn_item)); 88 BUG_ON(!list_empty(&rm->m_conn_item));
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
197{ 194{
198 struct rds_message *rm; 195 struct rds_message *rm;
199 196
197 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
198 return NULL;
199
200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); 200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
201 if (!rm) 201 if (!rm)
202 goto out; 202 goto out;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2b3ef03c6098..12ed45dbe75d 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
155 155
156 /* SCTP-AUTH extensions*/ 156 /* SCTP-AUTH extensions*/
157 INIT_LIST_HEAD(&ep->endpoint_shared_keys); 157 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
158 null_key = sctp_auth_shkey_create(0, GFP_KERNEL); 158 null_key = sctp_auth_shkey_create(0, gfp);
159 if (!null_key) 159 if (!null_key)
160 goto nomem; 160 goto nomem;
161 161
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c99458df3f3f..b9070736b8d9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5653 if (len < sizeof(sctp_assoc_t)) 5653 if (len < sizeof(sctp_assoc_t))
5654 return -EINVAL; 5654 return -EINVAL;
5655 5655
5656 /* Allow the struct to grow and fill in as much as possible */
5657 len = min_t(size_t, len, sizeof(sas));
5658
5656 if (copy_from_user(&sas, optval, len)) 5659 if (copy_from_user(&sas, optval, len))
5657 return -EFAULT; 5660 return -EFAULT;
5658 5661
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5686 /* Mark beginning of a new observation period */ 5689 /* Mark beginning of a new observation period */
5687 asoc->stats.max_obs_rto = asoc->rto_min; 5690 asoc->stats.max_obs_rto = asoc->rto_min;
5688 5691
5689 /* Allow the struct to grow and fill in as much as possible */
5690 len = min_t(size_t, len, sizeof(sas));
5691
5692 if (put_user(len, optlen)) 5692 if (put_user(len, optlen))
5693 return -EFAULT; 5693 return -EFAULT;
5694 5694
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 442ad4ed6315..825ea94415b3 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -41,8 +41,6 @@
41#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
42#include <net/sctp/sm.h> 42#include <net/sctp/sm.h>
43 43
44#define MAX_KMALLOC_SIZE 131072
45
46static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, 44static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
47 __u16 out); 45 __u16 out);
48 46
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
65 int size; 63 int size;
66 64
67 size = sctp_ssnmap_size(in, out); 65 size = sctp_ssnmap_size(in, out);
68 if (size <= MAX_KMALLOC_SIZE) 66 if (size <= KMALLOC_MAX_SIZE)
69 retval = kmalloc(size, gfp); 67 retval = kmalloc(size, gfp);
70 else 68 else
71 retval = (struct sctp_ssnmap *) 69 retval = (struct sctp_ssnmap *)
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
82 return retval; 80 return retval;
83 81
84fail_map: 82fail_map:
85 if (size <= MAX_KMALLOC_SIZE) 83 if (size <= KMALLOC_MAX_SIZE)
86 kfree(retval); 84 kfree(retval);
87 else 85 else
88 free_pages((unsigned long)retval, get_order(size)); 86 free_pages((unsigned long)retval, get_order(size));
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
124 int size; 122 int size;
125 123
126 size = sctp_ssnmap_size(map->in.len, map->out.len); 124 size = sctp_ssnmap_size(map->in.len, map->out.len);
127 if (size <= MAX_KMALLOC_SIZE) 125 if (size <= KMALLOC_MAX_SIZE)
128 kfree(map); 126 kfree(map);
129 else 127 else
130 free_pages((unsigned long)map, get_order(size)); 128 free_pages((unsigned long)map, get_order(size));
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 5f25e0c92c31..396c45174e5b 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -51,7 +51,7 @@
51static void sctp_tsnmap_update(struct sctp_tsnmap *map); 51static void sctp_tsnmap_update(struct sctp_tsnmap *map);
52static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, 52static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
53 __u16 len, __u16 *start, __u16 *end); 53 __u16 len, __u16 *start, __u16 *end);
54static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); 54static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
55 55
56/* Initialize a block of memory as a tsnmap. */ 56/* Initialize a block of memory as a tsnmap. */
57struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, 57struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
124 124
125 gap = tsn - map->base_tsn; 125 gap = tsn - map->base_tsn;
126 126
127 if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) 127 if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 if (!sctp_tsnmap_has_gap(map) && gap == 0) { 130 if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
360 return ngaps; 360 return ngaps;
361} 361}
362 362
363static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) 363static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
364{ 364{
365 unsigned long *new; 365 unsigned long *new;
366 unsigned long inc; 366 unsigned long inc;
367 u16 len; 367 u16 len;
368 368
369 if (gap >= SCTP_TSN_MAP_SIZE) 369 if (size > SCTP_TSN_MAP_SIZE)
370 return 0; 370 return 0;
371 371
372 inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; 372 inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
373 len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); 373 len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
374 374
375 new = kzalloc(len>>3, GFP_ATOMIC); 375 new = kzalloc(len>>3, GFP_ATOMIC);
376 if (!new) 376 if (!new)
377 return 0; 377 return 0;
378 378
379 bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); 379 bitmap_copy(new, map->tsn_map,
380 map->max_tsn_seen - map->cumulative_tsn_ack_point);
380 kfree(map->tsn_map); 381 kfree(map->tsn_map);
381 map->tsn_map = new; 382 map->tsn_map = new;
382 map->len = len; 383 map->len = len;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ada17464b65b..0fd5b3d2df03 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
106{ 106{
107 struct sk_buff_head temp; 107 struct sk_buff_head temp;
108 struct sctp_ulpevent *event; 108 struct sctp_ulpevent *event;
109 int event_eor = 0;
109 110
110 /* Create an event from the incoming chunk. */ 111 /* Create an event from the incoming chunk. */
111 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list. 129 * very first SKB on the 'temp' list.
129 */ 130 */
130 if (event) 131 if (event) {
132 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
131 sctp_ulpq_tail_event(ulpq, event); 133 sctp_ulpq_tail_event(ulpq, event);
134 }
132 135
133 return 0; 136 return event_eor;
134} 137}
135 138
136/* Add a new event for propagation to the ULP. */ 139/* Add a new event for propagation to the ULP. */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
540 ctsn = cevent->tsn; 543 ctsn = cevent->tsn;
541 544
542 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 545 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
546 case SCTP_DATA_FIRST_FRAG:
547 if (!first_frag)
548 return NULL;
549 goto done;
543 case SCTP_DATA_MIDDLE_FRAG: 550 case SCTP_DATA_MIDDLE_FRAG:
544 if (!first_frag) { 551 if (!first_frag) {
545 first_frag = pos; 552 first_frag = pos;
546 next_tsn = ctsn + 1; 553 next_tsn = ctsn + 1;
547 last_frag = pos; 554 last_frag = pos;
548 } else if (next_tsn == ctsn) 555 } else if (next_tsn == ctsn) {
549 next_tsn++; 556 next_tsn++;
550 else 557 last_frag = pos;
558 } else
551 goto done; 559 goto done;
552 break; 560 break;
553 case SCTP_DATA_LAST_FRAG: 561 case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
651 } else 659 } else
652 goto done; 660 goto done;
653 break; 661 break;
662
663 case SCTP_DATA_LAST_FRAG:
664 if (!first_frag)
665 return NULL;
666 else
667 goto done;
668 break;
669
654 default: 670 default:
655 return NULL; 671 return NULL;
656 } 672 }
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
962 struct sk_buff_head *list, __u16 needed) 978 struct sk_buff_head *list, __u16 needed)
963{ 979{
964 __u16 freed = 0; 980 __u16 freed = 0;
965 __u32 tsn; 981 __u32 tsn, last_tsn;
966 struct sk_buff *skb; 982 struct sk_buff *skb, *flist, *last;
967 struct sctp_ulpevent *event; 983 struct sctp_ulpevent *event;
968 struct sctp_tsnmap *tsnmap; 984 struct sctp_tsnmap *tsnmap;
969 985
970 tsnmap = &ulpq->asoc->peer.tsn_map; 986 tsnmap = &ulpq->asoc->peer.tsn_map;
971 987
972 while ((skb = __skb_dequeue_tail(list)) != NULL) { 988 while ((skb = skb_peek_tail(list)) != NULL) {
973 freed += skb_headlen(skb);
974 event = sctp_skb2event(skb); 989 event = sctp_skb2event(skb);
975 tsn = event->tsn; 990 tsn = event->tsn;
976 991
992 /* Don't renege below the Cumulative TSN ACK Point. */
993 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
994 break;
995
996 /* Events in ordering queue may have multiple fragments
997 * corresponding to additional TSNs. Sum the total
998 * freed space; find the last TSN.
999 */
1000 freed += skb_headlen(skb);
1001 flist = skb_shinfo(skb)->frag_list;
1002 for (last = flist; flist; flist = flist->next) {
1003 last = flist;
1004 freed += skb_headlen(last);
1005 }
1006 if (last)
1007 last_tsn = sctp_skb2event(last)->tsn;
1008 else
1009 last_tsn = tsn;
1010
1011 /* Unlink the event, then renege all applicable TSNs. */
1012 __skb_unlink(skb, list);
977 sctp_ulpevent_free(event); 1013 sctp_ulpevent_free(event);
978 sctp_tsnmap_renege(tsnmap, tsn); 1014 while (TSN_lte(tsn, last_tsn)) {
1015 sctp_tsnmap_renege(tsnmap, tsn);
1016 tsn++;
1017 }
979 if (freed >= needed) 1018 if (freed >= needed)
980 return freed; 1019 return freed;
981 } 1020 }
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1002 struct sctp_ulpevent *event; 1041 struct sctp_ulpevent *event;
1003 struct sctp_association *asoc; 1042 struct sctp_association *asoc;
1004 struct sctp_sock *sp; 1043 struct sctp_sock *sp;
1044 __u32 ctsn;
1045 struct sk_buff *skb;
1005 1046
1006 asoc = ulpq->asoc; 1047 asoc = ulpq->asoc;
1007 sp = sctp_sk(asoc->base.sk); 1048 sp = sctp_sk(asoc->base.sk);
1008 1049
1009 /* If the association is already in Partial Delivery mode 1050 /* If the association is already in Partial Delivery mode
1010 * we have noting to do. 1051 * we have nothing to do.
1011 */ 1052 */
1012 if (ulpq->pd_mode) 1053 if (ulpq->pd_mode)
1013 return; 1054 return;
1014 1055
1056 /* Data must be at or below the Cumulative TSN ACK Point to
1057 * start partial delivery.
1058 */
1059 skb = skb_peek(&asoc->ulpq.reasm);
1060 if (skb != NULL) {
1061 ctsn = sctp_skb2event(skb)->tsn;
1062 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1063 return;
1064 }
1065
1015 /* If the user enabled fragment interleave socket option, 1066 /* If the user enabled fragment interleave socket option,
1016 * multiple associations can enter partial delivery. 1067 * multiple associations can enter partial delivery.
1017 * Otherwise, we can only enter partial delivery if the 1068 * Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1054 } 1105 }
1055 /* If able to free enough room, accept this chunk. */ 1106 /* If able to free enough room, accept this chunk. */
1056 if (chunk && (freed >= needed)) { 1107 if (chunk && (freed >= needed)) {
1057 __u32 tsn; 1108 int retval;
1058 tsn = ntohl(chunk->subh.data_hdr->tsn); 1109 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1059 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); 1110 /*
1060 sctp_ulpq_tail_data(ulpq, chunk, gfp); 1111 * Enter partial delivery if chunk has not been
1061 1112 * delivered; otherwise, drain the reassembly queue.
1062 sctp_ulpq_partial_delivery(ulpq, gfp); 1113 */
1114 if (retval <= 0)
1115 sctp_ulpq_partial_delivery(ulpq, gfp);
1116 else if (retval == 1)
1117 sctp_ulpq_reasm_drain(ulpq);
1063 } 1118 }
1064 1119
1065 sk_mem_reclaim(asoc->base.sk); 1120 sk_mem_reclaim(asoc->base.sk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 35545ccc30fd..e652d05ff712 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -554,16 +554,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
556 goto nla_put_failure; 556 goto nla_put_failure;
557 if (chan->flags & IEEE80211_CHAN_RADAR) { 557 if ((chan->flags & IEEE80211_CHAN_RADAR) &&
558 u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered); 558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
559 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 559 goto nla_put_failure;
560 goto nla_put_failure;
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
562 chan->dfs_state))
563 goto nla_put_failure;
564 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
565 goto nla_put_failure;
566 }
567 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && 560 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
568 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) 561 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
569 goto nla_put_failure; 562 goto nla_put_failure;
@@ -900,9 +893,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
900 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, 893 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
901 c->max_interfaces)) 894 c->max_interfaces))
902 goto nla_put_failure; 895 goto nla_put_failure;
903 if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
904 c->radar_detect_widths))
905 goto nla_put_failure;
906 896
907 nla_nest_end(msg, nl_combi); 897 nla_nest_end(msg, nl_combi);
908 } 898 }
@@ -914,48 +904,6 @@ nla_put_failure:
914 return -ENOBUFS; 904 return -ENOBUFS;
915} 905}
916 906
917#ifdef CONFIG_PM
918static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
919 struct sk_buff *msg)
920{
921 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
922 struct nlattr *nl_tcp;
923
924 if (!tcp)
925 return 0;
926
927 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
928 if (!nl_tcp)
929 return -ENOBUFS;
930
931 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
932 tcp->data_payload_max))
933 return -ENOBUFS;
934
935 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
936 tcp->data_payload_max))
937 return -ENOBUFS;
938
939 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
940 return -ENOBUFS;
941
942 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
943 sizeof(*tcp->tok), tcp->tok))
944 return -ENOBUFS;
945
946 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
947 tcp->data_interval_max))
948 return -ENOBUFS;
949
950 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
951 tcp->wake_payload_max))
952 return -ENOBUFS;
953
954 nla_nest_end(msg, nl_tcp);
955 return 0;
956}
957#endif
958
959static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 907static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
960 struct cfg80211_registered_device *dev) 908 struct cfg80211_registered_device *dev)
961{ 909{
@@ -1330,9 +1278,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1330 goto nla_put_failure; 1278 goto nla_put_failure;
1331 } 1279 }
1332 1280
1333 if (nl80211_send_wowlan_tcp_caps(dev, msg))
1334 goto nla_put_failure;
1335
1336 nla_nest_end(msg, nl_wowlan); 1281 nla_nest_end(msg, nl_wowlan);
1337 } 1282 }
1338#endif 1283#endif
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index 066f5f3e3f4c..c3908862bc8b 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -285,7 +285,12 @@ local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev
285static int 285static int
286note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev) 286note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev)
287{ 287{
288 struct seq_oss_synthinfo *info = &dp->synths[dev]; 288 struct seq_oss_synthinfo *info;
289
290 if (!snd_seq_oss_synth_is_valid(dp, dev))
291 return -ENXIO;
292
293 info = &dp->synths[dev];
289 switch (info->arg.event_passing) { 294 switch (info->arg.event_passing) {
290 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 295 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
291 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 296 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -340,7 +345,12 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
340static int 345static int
341note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev) 346note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev)
342{ 347{
343 struct seq_oss_synthinfo *info = &dp->synths[dev]; 348 struct seq_oss_synthinfo *info;
349
350 if (!snd_seq_oss_synth_is_valid(dp, dev))
351 return -ENXIO;
352
353 info = &dp->synths[dev];
344 switch (info->arg.event_passing) { 354 switch (info->arg.event_passing) {
345 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 355 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
346 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 356 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 857586135d18..0097f3619faa 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -213,7 +213,10 @@ static int slave_put(struct snd_kcontrol *kcontrol,
213 } 213 }
214 if (!changed) 214 if (!changed)
215 return 0; 215 return 0;
216 return slave_put_val(slave, ucontrol); 216 err = slave_put_val(slave, ucontrol);
217 if (err < 0)
218 return err;
219 return 1;
217} 220}
218 221
219static int slave_tlv_cmd(struct snd_kcontrol *kcontrol, 222static int slave_tlv_cmd(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 04b57383e8cb..97c68dd24ef5 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3334,6 +3334,8 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
3334 return -EBUSY; 3334 return -EBUSY;
3335 } 3335 }
3336 spdif = snd_array_new(&codec->spdif_out); 3336 spdif = snd_array_new(&codec->spdif_out);
3337 if (!spdif)
3338 return -ENOMEM;
3337 for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) { 3339 for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) {
3338 kctl = snd_ctl_new1(dig_mix, codec); 3340 kctl = snd_ctl_new1(dig_mix, codec);
3339 if (!kctl) 3341 if (!kctl)
@@ -3431,11 +3433,16 @@ static struct snd_kcontrol_new spdif_share_sw = {
3431int snd_hda_create_spdif_share_sw(struct hda_codec *codec, 3433int snd_hda_create_spdif_share_sw(struct hda_codec *codec,
3432 struct hda_multi_out *mout) 3434 struct hda_multi_out *mout)
3433{ 3435{
3436 struct snd_kcontrol *kctl;
3437
3434 if (!mout->dig_out_nid) 3438 if (!mout->dig_out_nid)
3435 return 0; 3439 return 0;
3440
3441 kctl = snd_ctl_new1(&spdif_share_sw, mout);
3442 if (!kctl)
3443 return -ENOMEM;
3436 /* ATTENTION: here mout is passed as private_data, instead of codec */ 3444 /* ATTENTION: here mout is passed as private_data, instead of codec */
3437 return snd_hda_ctl_add(codec, mout->dig_out_nid, 3445 return snd_hda_ctl_add(codec, mout->dig_out_nid, kctl);
3438 snd_ctl_new1(&spdif_share_sw, mout));
3439} 3446}
3440EXPORT_SYMBOL_HDA(snd_hda_create_spdif_share_sw); 3447EXPORT_SYMBOL_HDA(snd_hda_create_spdif_share_sw);
3441 3448
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index db02c1e96b08..eefc4563b2f9 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -2298,6 +2298,11 @@ static int dspxfr_one_seg(struct hda_codec *codec,
2298 hda_frame_size_words = ((sample_rate_div == 0) ? 0 : 2298 hda_frame_size_words = ((sample_rate_div == 0) ? 0 :
2299 (num_chans * sample_rate_mul / sample_rate_div)); 2299 (num_chans * sample_rate_mul / sample_rate_div));
2300 2300
2301 if (hda_frame_size_words == 0) {
2302 snd_printdd(KERN_ERR "frmsz zero\n");
2303 return -EINVAL;
2304 }
2305
2301 buffer_size_words = min(buffer_size_words, 2306 buffer_size_words = min(buffer_size_words,
2302 (unsigned int)(UC_RANGE(chip_addx, 1) ? 2307 (unsigned int)(UC_RANGE(chip_addx, 1) ?
2303 65536 : 32768)); 2308 65536 : 32768));
@@ -2308,8 +2313,7 @@ static int dspxfr_one_seg(struct hda_codec *codec,
2308 chip_addx, hda_frame_size_words, num_chans, 2313 chip_addx, hda_frame_size_words, num_chans,
2309 sample_rate_mul, sample_rate_div, buffer_size_words); 2314 sample_rate_mul, sample_rate_div, buffer_size_words);
2310 2315
2311 if ((buffer_addx == NULL) || (hda_frame_size_words == 0) || 2316 if (buffer_size_words < hda_frame_size_words) {
2312 (buffer_size_words < hda_frame_size_words)) {
2313 snd_printdd(KERN_ERR "dspxfr_one_seg:failed\n"); 2317 snd_printdd(KERN_ERR "dspxfr_one_seg:failed\n");
2314 return -EINVAL; 2318 return -EINVAL;
2315 } 2319 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2d4237bc0d8e..563c24df4d6f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3163,6 +3163,7 @@ static int patch_alc269(struct hda_codec *codec)
3163 case 0x10ec0290: 3163 case 0x10ec0290:
3164 spec->codec_variant = ALC269_TYPE_ALC280; 3164 spec->codec_variant = ALC269_TYPE_ALC280;
3165 break; 3165 break;
3166 case 0x10ec0233:
3166 case 0x10ec0282: 3167 case 0x10ec0282:
3167 case 0x10ec0283: 3168 case 0x10ec0283:
3168 spec->codec_variant = ALC269_TYPE_ALC282; 3169 spec->codec_variant = ALC269_TYPE_ALC282;
@@ -3862,6 +3863,7 @@ static int patch_alc680(struct hda_codec *codec)
3862 */ 3863 */
3863static const struct hda_codec_preset snd_hda_preset_realtek[] = { 3864static const struct hda_codec_preset snd_hda_preset_realtek[] = {
3864 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 }, 3865 { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
3866 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
3865 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 }, 3867 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
3866 { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 }, 3868 { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
3867 { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 }, 3869 { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 2ffdc35d5ffd..806407a3973e 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2594,6 +2594,8 @@ static int snd_ice1712_create(struct snd_card *card,
2594 snd_ice1712_proc_init(ice); 2594 snd_ice1712_proc_init(ice);
2595 synchronize_irq(pci->irq); 2595 synchronize_irq(pci->irq);
2596 2596
2597 card->private_data = ice;
2598
2597 err = pci_request_regions(pci, "ICE1712"); 2599 err = pci_request_regions(pci, "ICE1712");
2598 if (err < 0) { 2600 if (err < 0) {
2599 kfree(ice); 2601 kfree(ice);
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index b8d461db369f..b82bbf584146 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -573,6 +573,13 @@ static const struct reg_default wm5102_sysclk_reva_patch[] = {
573 { 0x025e, 0x0112 }, 573 { 0x025e, 0x0112 },
574}; 574};
575 575
576static const struct reg_default wm5102_sysclk_revb_patch[] = {
577 { 0x3081, 0x08FE },
578 { 0x3083, 0x00ED },
579 { 0x30C1, 0x08FE },
580 { 0x30C3, 0x00ED },
581};
582
576static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, 583static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
577 struct snd_kcontrol *kcontrol, int event) 584 struct snd_kcontrol *kcontrol, int event)
578{ 585{
@@ -587,6 +594,10 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
587 patch = wm5102_sysclk_reva_patch; 594 patch = wm5102_sysclk_reva_patch;
588 patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch); 595 patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch);
589 break; 596 break;
597 default:
598 patch = wm5102_sysclk_revb_patch;
599 patch_size = ARRAY_SIZE(wm5102_sysclk_revb_patch);
600 break;
590 } 601 }
591 602
592 switch (event) { 603 switch (event) {
@@ -755,7 +766,7 @@ SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
755 766
756SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, 767SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
757 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), 768 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
758SOC_DOUBLE_R("OUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L, 769SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
759 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1), 770 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1),
760SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L, 771SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L,
761 ARIZONA_OUT3L_MUTE_SHIFT, 1, 1), 772 ARIZONA_OUT3L_MUTE_SHIFT, 1, 1),
@@ -767,7 +778,7 @@ SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L,
767SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L, 778SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L,
768 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT, 779 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT,
769 0xbf, 0, digital_tlv), 780 0xbf, 0, digital_tlv),
770SOC_DOUBLE_R_TLV("OUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L, 781SOC_DOUBLE_R_TLV("HPOUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L,
771 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT, 782 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT,
772 0xbf, 0, digital_tlv), 783 0xbf, 0, digital_tlv),
773SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L, 784SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index cd17b477781d..cdeb301da1f6 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -213,9 +213,9 @@ ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE),
213 213
214SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L, 214SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
215 ARIZONA_OUT1_OSR_SHIFT, 1, 0), 215 ARIZONA_OUT1_OSR_SHIFT, 1, 0),
216SOC_SINGLE("OUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L, 216SOC_SINGLE("HPOUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
217 ARIZONA_OUT2_OSR_SHIFT, 1, 0), 217 ARIZONA_OUT2_OSR_SHIFT, 1, 0),
218SOC_SINGLE("OUT3 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L, 218SOC_SINGLE("HPOUT3 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
219 ARIZONA_OUT3_OSR_SHIFT, 1, 0), 219 ARIZONA_OUT3_OSR_SHIFT, 1, 0),
220SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L, 220SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
221 ARIZONA_OUT4_OSR_SHIFT, 1, 0), 221 ARIZONA_OUT4_OSR_SHIFT, 1, 0),
@@ -226,9 +226,9 @@ SOC_SINGLE("SPKDAT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_6L,
226 226
227SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, 227SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
228 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), 228 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
229SOC_DOUBLE_R("OUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L, 229SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
230 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1), 230 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1),
231SOC_DOUBLE_R("OUT3 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L, 231SOC_DOUBLE_R("HPOUT3 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L,
232 ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_OUT3L_MUTE_SHIFT, 1, 1), 232 ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_OUT3L_MUTE_SHIFT, 1, 1),
233SOC_DOUBLE_R("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L, 233SOC_DOUBLE_R("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L,
234 ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1), 234 ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1),
@@ -240,10 +240,10 @@ SOC_DOUBLE_R("SPKDAT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_6L,
240SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L, 240SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L,
241 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT, 241 ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT,
242 0xbf, 0, digital_tlv), 242 0xbf, 0, digital_tlv),
243SOC_DOUBLE_R_TLV("OUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L, 243SOC_DOUBLE_R_TLV("HPOUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L,
244 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT, 244 ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT,
245 0xbf, 0, digital_tlv), 245 0xbf, 0, digital_tlv),
246SOC_DOUBLE_R_TLV("OUT3 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L, 246SOC_DOUBLE_R_TLV("HPOUT3 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L,
247 ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_OUT3L_VOL_SHIFT, 247 ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_OUT3L_VOL_SHIFT,
248 0xbf, 0, digital_tlv), 248 0xbf, 0, digital_tlv),
249SOC_DOUBLE_R_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L, 249SOC_DOUBLE_R_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L,
@@ -260,11 +260,11 @@ SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
260 ARIZONA_OUTPUT_PATH_CONFIG_1R, 260 ARIZONA_OUTPUT_PATH_CONFIG_1R,
261 ARIZONA_OUT1L_PGA_VOL_SHIFT, 261 ARIZONA_OUT1L_PGA_VOL_SHIFT,
262 0x34, 0x40, 0, ana_tlv), 262 0x34, 0x40, 0, ana_tlv),
263SOC_DOUBLE_R_RANGE_TLV("OUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L, 263SOC_DOUBLE_R_RANGE_TLV("HPOUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
264 ARIZONA_OUTPUT_PATH_CONFIG_2R, 264 ARIZONA_OUTPUT_PATH_CONFIG_2R,
265 ARIZONA_OUT2L_PGA_VOL_SHIFT, 265 ARIZONA_OUT2L_PGA_VOL_SHIFT,
266 0x34, 0x40, 0, ana_tlv), 266 0x34, 0x40, 0, ana_tlv),
267SOC_DOUBLE_R_RANGE_TLV("OUT3 Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L, 267SOC_DOUBLE_R_RANGE_TLV("HPOUT3 Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
268 ARIZONA_OUTPUT_PATH_CONFIG_3R, 268 ARIZONA_OUTPUT_PATH_CONFIG_3R,
269 ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv), 269 ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
270 270
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index ec0efc1443ba..0e8b3aaf6c8d 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1301,7 +1301,7 @@ static irqreturn_t wm8350_hpl_jack_handler(int irq, void *data)
1301 if (device_may_wakeup(wm8350->dev)) 1301 if (device_may_wakeup(wm8350->dev))
1302 pm_wakeup_event(wm8350->dev, 250); 1302 pm_wakeup_event(wm8350->dev, 250);
1303 1303
1304 schedule_delayed_work(&priv->hpl.work, 200); 1304 schedule_delayed_work(&priv->hpl.work, msecs_to_jiffies(200));
1305 1305
1306 return IRQ_HANDLED; 1306 return IRQ_HANDLED;
1307} 1307}
@@ -1318,7 +1318,7 @@ static irqreturn_t wm8350_hpr_jack_handler(int irq, void *data)
1318 if (device_may_wakeup(wm8350->dev)) 1318 if (device_may_wakeup(wm8350->dev))
1319 pm_wakeup_event(wm8350->dev, 250); 1319 pm_wakeup_event(wm8350->dev, 250);
1320 1320
1321 schedule_delayed_work(&priv->hpr.work, 200); 1321 schedule_delayed_work(&priv->hpr.work, msecs_to_jiffies(200));
1322 1322
1323 return IRQ_HANDLED; 1323 return IRQ_HANDLED;
1324} 1324}
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 9bb927325993..a64b93425ae3 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -53,8 +53,8 @@
53 * using 2 wire for device control, so we cache them instead. 53 * using 2 wire for device control, so we cache them instead.
54 */ 54 */
55static const struct reg_default wm8960_reg_defaults[] = { 55static const struct reg_default wm8960_reg_defaults[] = {
56 { 0x0, 0x0097 }, 56 { 0x0, 0x00a7 },
57 { 0x1, 0x0097 }, 57 { 0x1, 0x00a7 },
58 { 0x2, 0x0000 }, 58 { 0x2, 0x0000 },
59 { 0x3, 0x0000 }, 59 { 0x3, 0x0000 },
60 { 0x4, 0x0000 }, 60 { 0x4, 0x0000 },
@@ -323,8 +323,8 @@ SND_SOC_DAPM_MIXER("Left Input Mixer", WM8960_POWER3, 5, 0,
323SND_SOC_DAPM_MIXER("Right Input Mixer", WM8960_POWER3, 4, 0, 323SND_SOC_DAPM_MIXER("Right Input Mixer", WM8960_POWER3, 4, 0,
324 wm8960_rin, ARRAY_SIZE(wm8960_rin)), 324 wm8960_rin, ARRAY_SIZE(wm8960_rin)),
325 325
326SND_SOC_DAPM_ADC("Left ADC", "Capture", WM8960_POWER2, 3, 0), 326SND_SOC_DAPM_ADC("Left ADC", "Capture", WM8960_POWER1, 3, 0),
327SND_SOC_DAPM_ADC("Right ADC", "Capture", WM8960_POWER2, 2, 0), 327SND_SOC_DAPM_ADC("Right ADC", "Capture", WM8960_POWER1, 2, 0),
328 328
329SND_SOC_DAPM_DAC("Left DAC", "Playback", WM8960_POWER2, 8, 0), 329SND_SOC_DAPM_DAC("Left DAC", "Playback", WM8960_POWER2, 8, 0),
330SND_SOC_DAPM_DAC("Right DAC", "Playback", WM8960_POWER2, 7, 0), 330SND_SOC_DAPM_DAC("Right DAC", "Playback", WM8960_POWER2, 7, 0),
diff --git a/sound/soc/tegra/tegra20_i2s.h b/sound/soc/tegra/tegra20_i2s.h
index c27069d24d77..729958713cd4 100644
--- a/sound/soc/tegra/tegra20_i2s.h
+++ b/sound/soc/tegra/tegra20_i2s.h
@@ -121,7 +121,7 @@
121 121
122#define TEGRA20_I2S_TIMING_NON_SYM_ENABLE (1 << 12) 122#define TEGRA20_I2S_TIMING_NON_SYM_ENABLE (1 << 12)
123#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0 123#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0
124#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7fff 124#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7ff
125#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT) 125#define TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT)
126 126
127/* Fields in TEGRA20_I2S_FIFO_SCR */ 127/* Fields in TEGRA20_I2S_FIFO_SCR */
diff --git a/sound/soc/tegra/tegra30_i2s.h b/sound/soc/tegra/tegra30_i2s.h
index 34dc47b9581c..a294d942b9f7 100644
--- a/sound/soc/tegra/tegra30_i2s.h
+++ b/sound/soc/tegra/tegra30_i2s.h
@@ -110,7 +110,7 @@
110 110
111#define TEGRA30_I2S_TIMING_NON_SYM_ENABLE (1 << 12) 111#define TEGRA30_I2S_TIMING_NON_SYM_ENABLE (1 << 12)
112#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0 112#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT 0
113#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7fff 113#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US 0x7ff
114#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT) 114#define TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK (TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US << TEGRA30_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT)
115 115
116/* Fields in TEGRA30_I2S_OFFSET */ 116/* Fields in TEGRA30_I2S_OFFSET */
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 880cdd5dc63f..77edcdcc016b 100644
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -125,6 +125,63 @@ test_open_unlink()
125 ./open-unlink $file 125 ./open-unlink $file
126} 126}
127 127
128# test that we can create a range of filenames
129test_valid_filenames()
130{
131 local attrs='\x07\x00\x00\x00'
132 local ret=0
133
134 local file_list="abc dump-type0-11-1-1362436005 1234 -"
135 for f in $file_list; do
136 local file=$efivarfs_mount/$f-$test_guid
137
138 printf "$attrs\x00" > $file
139
140 if [ ! -e $file ]; then
141 echo "$file could not be created" >&2
142 ret=1
143 else
144 rm $file
145 fi
146 done
147
148 exit $ret
149}
150
151test_invalid_filenames()
152{
153 local attrs='\x07\x00\x00\x00'
154 local ret=0
155
156 local file_list="
157 -1234-1234-1234-123456789abc
158 foo
159 foo-bar
160 -foo-
161 foo-barbazba-foob-foob-foob-foobarbazfoo
162 foo-------------------------------------
163 -12345678-1234-1234-1234-123456789abc
164 a-12345678=1234-1234-1234-123456789abc
165 a-12345678-1234=1234-1234-123456789abc
166 a-12345678-1234-1234=1234-123456789abc
167 a-12345678-1234-1234-1234=123456789abc
168 1112345678-1234-1234-1234-123456789abc"
169
170 for f in $file_list; do
171 local file=$efivarfs_mount/$f
172
173 printf "$attrs\x00" 2>/dev/null > $file
174
175 if [ -e $file ]; then
176 echo "Creating $file should have failed" >&2
177 rm $file
178 ret=1
179 fi
180 done
181
182 exit $ret
183}
184
128check_prereqs 185check_prereqs
129 186
130rc=0 187rc=0
@@ -135,5 +192,7 @@ run_test test_create_read
135run_test test_delete 192run_test test_delete
136run_test test_zero_size_delete 193run_test test_zero_size_delete
137run_test test_open_unlink 194run_test test_open_unlink
195run_test test_valid_filenames
196run_test test_invalid_filenames
138 197
139exit $rc 198exit $rc