aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig14
-rw-r--r--arch/arm/include/asm/cputype.h2
-rw-r--r--arch/arm/include/asm/glue-proc.h9
-rw-r--r--arch/arm/include/asm/smp_plat.h2
-rw-r--r--arch/arm/kernel/devtree.c10
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mm/nommu.c6
-rw-r--r--arch/arm/mm/proc-fa526.S1
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-v7.S34
-rw-r--r--arch/mn10300/include/asm/uaccess.h2
-rw-r--r--arch/mn10300/kernel/setup.c54
-rw-r--r--arch/powerpc/kernel/pci-common.c17
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c3
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c24
-rw-r--r--arch/s390/include/asm/dma-mapping.h3
-rw-r--r--arch/s390/kernel/ipl.c8
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/mm/mem_detect.c3
-rw-r--r--arch/x86/kernel/kprobes/core.c14
-rw-r--r--crypto/algboss.c15
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/internal.h6
-rw-r--r--drivers/acpi/dock.c179
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/ata/libata-acpi.c37
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/block/rbd.c14
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c17
-rw-r--r--drivers/gpio/gpio-omap.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c5
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/tablet/wacom_wac.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c28
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h2
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c14
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c38
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wan/dlci.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c17
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c53
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/setup-bus.c8
-rw-r--r--drivers/regulator/tps6586x-regulator.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c15
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/libfc/fc_exch.c37
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c26
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c2
-rw-r--r--fs/exec.c16
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/splice.c1
-rw-r--r--fs/ubifs/dir.c54
-rw-r--r--include/acpi/acpi_drivers.h8
-rw-r--r--include/linux/if_vlan.h2
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--kernel/events/hw_breakpoint.c6
-rw-r--r--kernel/ptrace.c20
-rw-r--r--kernel/time/tick-broadcast.c7
-rw-r--r--net/bluetooth/hci_core.c15
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bridge/br_multicast.c5
-rw-r--r--net/core/dev.c34
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/ethtool.c6
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/core/sock.c17
-rw-r--r--net/ipv4/gre.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c12
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv6/addrconf.c12
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/ieee80211_i.h5
-rw-r--r--net/mac80211/mlme.c87
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c3
-rw-r--r--net/netfilter/nf_conntrack_labels.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_nat_sip.c3
-rw-r--r--net/netfilter/xt_TCPMSS.c25
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c6
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c4
-rw-r--r--sound/soc/mxs/mxs-saif.c35
-rw-r--r--sound/soc/samsung/i2s.c66
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c4
154 files changed, 5702 insertions, 674 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f98ca633b528..3458d6343e01 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
420 for a passive TCP connection will happen after 63seconds. 420 for a passive TCP connection will happen after 63seconds.
421 421
422tcp_syncookies - BOOLEAN 422tcp_syncookies - BOOLEAN
423 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES 423 Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
424 Send out syncookies when the syn backlog queue of a socket 424 Send out syncookies when the syn backlog queue of a socket
425 overflows. This is to prevent against the common 'SYN flood attack' 425 overflows. This is to prevent against the common 'SYN flood attack'
426 Default: FALSE 426 Default: 1
427 427
428 Note, that syncookies is fallback facility. 428 Note, that syncookies is fallback facility.
429 It MUST NOT be used to help highly loaded servers to stand 429 It MUST NOT be used to help highly loaded servers to stand
diff --git a/MAINTAINERS b/MAINTAINERS
index 5be702cc8449..ad7e322ad17b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3220,7 +3220,7 @@ F: lib/fault-inject.c
3220 3220
3221FCOE SUBSYSTEM (libfc, libfcoe, fcoe) 3221FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
3222M: Robert Love <robert.w.love@intel.com> 3222M: Robert Love <robert.w.love@intel.com>
3223L: devel@open-fcoe.org 3223L: fcoe-devel@open-fcoe.org
3224W: www.Open-FCoE.org 3224W: www.Open-FCoE.org
3225S: Supported 3225S: Supported
3226F: drivers/scsi/libfc/ 3226F: drivers/scsi/libfc/
diff --git a/Makefile b/Makefile
index 0142c934adbd..e5e3ba085191 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2651b1da1c56..136f263ed47b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1087,6 +1087,20 @@ if !MMU
1087source "arch/arm/Kconfig-nommu" 1087source "arch/arm/Kconfig-nommu"
1088endif 1088endif
1089 1089
1090config PJ4B_ERRATA_4742
1091 bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
1092 depends on CPU_PJ4B && MACH_ARMADA_370
1093 default y
1094 help
1095 When coming out of either a Wait for Interrupt (WFI) or a Wait for
1096 Event (WFE) IDLE states, a specific timing sensitivity exists between
1097 the retiring WFI/WFE instructions and the newly issued subsequent
1098 instructions. This sensitivity can result in a CPU hang scenario.
1099 Workaround:
1100 The software must insert either a Data Synchronization Barrier (DSB)
1101 or Data Memory Barrier (DMB) command immediately after the WFI/WFE
1102 instruction
1103
1090config ARM_ERRATA_326103 1104config ARM_ERRATA_326103
1091 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" 1105 bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
1092 depends on CPU_V6 1106 depends on CPU_V6
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 7652712d1d14..dba62cb1ad08 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -32,6 +32,8 @@
32 32
33#define MPIDR_HWID_BITMASK 0xFFFFFF 33#define MPIDR_HWID_BITMASK 0xFFFFFF
34 34
35#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
36
35#define MPIDR_LEVEL_BITS 8 37#define MPIDR_LEVEL_BITS 8
36#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) 38#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
37 39
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index ac1dd54724b6..8017e94acc5e 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,15 @@
230# endif 230# endif
231#endif 231#endif
232 232
233#ifdef CONFIG_CPU_PJ4B
234# ifdef CPU_NAME
235# undef MULTI_CPU
236# define MULTI_CPU
237# else
238# define CPU_NAME cpu_pj4b
239# endif
240#endif
241
233#ifndef MULTI_CPU 242#ifndef MULTI_CPU
234#define cpu_proc_init __glue(CPU_NAME,_proc_init) 243#define cpu_proc_init __glue(CPU_NAME,_proc_init)
235#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) 244#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index aaa61b6f50ff..e78983202737 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
49/* 49/*
50 * Logical CPU mapping. 50 * Logical CPU mapping.
51 */ 51 */
52extern int __cpu_logical_map[]; 52extern u32 __cpu_logical_map[];
53#define cpu_logical_map(cpu) __cpu_logical_map[cpu] 53#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
54/* 54/*
55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0] 55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5af04f6daa33..5859c8bc727c 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void)
82 u32 i, j, cpuidx = 1; 82 u32 i, j, cpuidx = 1;
83 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; 83 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
84 84
85 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; 85 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
86 bool bootcpu_valid = false; 86 bool bootcpu_valid = false;
87 cpus = of_find_node_by_path("/cpus"); 87 cpus = of_find_node_by_path("/cpus");
88 88
@@ -92,6 +92,9 @@ void __init arm_dt_init_cpu_maps(void)
92 for_each_child_of_node(cpus, cpu) { 92 for_each_child_of_node(cpus, cpu) {
93 u32 hwid; 93 u32 hwid;
94 94
95 if (of_node_cmp(cpu->type, "cpu"))
96 continue;
97
95 pr_debug(" * %s...\n", cpu->full_name); 98 pr_debug(" * %s...\n", cpu->full_name);
96 /* 99 /*
97 * A device tree containing CPU nodes with missing "reg" 100 * A device tree containing CPU nodes with missing "reg"
@@ -149,9 +152,10 @@ void __init arm_dt_init_cpu_maps(void)
149 tmp_map[i] = hwid; 152 tmp_map[i] = hwid;
150 } 153 }
151 154
152 if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], " 155 if (!bootcpu_valid) {
153 "fall back to default cpu_logical_map\n")) 156 pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
154 return; 157 return;
158 }
155 159
156 /* 160 /*
157 * Since the boot CPU node contains proper data, and all nodes have 161 * Since the boot CPU node contains proper data, and all nodes have
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1522c7ae31b0..b4b1d397592b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -444,7 +444,7 @@ void notrace cpu_init(void)
444 : "r14"); 444 : "r14");
445} 445}
446 446
447int __cpu_logical_map[NR_CPUS]; 447u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
448 448
449void __init smp_setup_processor_id(void) 449void __init smp_setup_processor_id(void)
450{ 450{
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index d51225f90ae2..eb5293a69a84 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
57} 57}
58EXPORT_SYMBOL(flush_dcache_page); 58EXPORT_SYMBOL(flush_dcache_page);
59 59
60void flush_kernel_dcache_page(struct page *page)
61{
62 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
63}
64EXPORT_SYMBOL(flush_kernel_dcache_page);
65
60void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 66void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
61 unsigned long uaddr, void *dst, const void *src, 67 unsigned long uaddr, void *dst, const void *src,
62 unsigned long len) 68 unsigned long len)
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index d217e9795d74..aaeb6c127c7a 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -81,7 +81,6 @@ ENDPROC(cpu_fa526_reset)
81 */ 81 */
82 .align 4 82 .align 4
83ENTRY(cpu_fa526_do_idle) 83ENTRY(cpu_fa526_do_idle)
84 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
85 mov pc, lr 84 mov pc, lr
86 85
87 86
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f9a0aa725ea9..e3c48a3fe063 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns)
333 .endif 333 .endif
334 .size \name\()_tlb_fns, . - \name\()_tlb_fns 334 .size \name\()_tlb_fns, . - \name\()_tlb_fns
335.endm 335.endm
336
337.macro globl_equ x, y
338 .globl \x
339 .equ \x, \y
340.endm
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 4c8c9c10a388..e35fec34453e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -140,6 +140,29 @@ ENTRY(cpu_v7_do_resume)
140ENDPROC(cpu_v7_do_resume) 140ENDPROC(cpu_v7_do_resume)
141#endif 141#endif
142 142
143#ifdef CONFIG_CPU_PJ4B
144 globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
145 globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
146 globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init
147 globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
148 globl_equ cpu_pj4b_reset, cpu_v7_reset
149#ifdef CONFIG_PJ4B_ERRATA_4742
150ENTRY(cpu_pj4b_do_idle)
151 dsb @ WFI may enter a low-power mode
152 wfi
153 dsb @barrier
154 mov pc, lr
155ENDPROC(cpu_pj4b_do_idle)
156#else
157 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
158#endif
159 globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
160 globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend
161 globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume
162 globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size
163
164#endif
165
143 __CPUINIT 166 __CPUINIT
144 167
145/* 168/*
@@ -350,6 +373,9 @@ __v7_setup_stack:
350 373
351 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 374 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
352 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 375 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
376#ifdef CONFIG_CPU_PJ4B
377 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
378#endif
353 379
354 .section ".rodata" 380 .section ".rodata"
355 381
@@ -362,7 +388,7 @@ __v7_setup_stack:
362 /* 388 /*
363 * Standard v7 proc info content 389 * Standard v7 proc info content
364 */ 390 */
365.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 391.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
366 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 392 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
367 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) 393 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
368 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 394 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@@ -375,7 +401,7 @@ __v7_setup_stack:
375 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ 401 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
376 HWCAP_EDSP | HWCAP_TLS | \hwcaps 402 HWCAP_EDSP | HWCAP_TLS | \hwcaps
377 .long cpu_v7_name 403 .long cpu_v7_name
378 .long v7_processor_functions 404 .long \proc_fns
379 .long v7wbi_tlb_fns 405 .long v7wbi_tlb_fns
380 .long v6_user_fns 406 .long v6_user_fns
381 .long v7_cache_fns 407 .long v7_cache_fns
@@ -407,12 +433,14 @@ __v7_ca9mp_proc_info:
407 /* 433 /*
408 * Marvell PJ4B processor. 434 * Marvell PJ4B processor.
409 */ 435 */
436#ifdef CONFIG_CPU_PJ4B
410 .type __v7_pj4b_proc_info, #object 437 .type __v7_pj4b_proc_info, #object
411__v7_pj4b_proc_info: 438__v7_pj4b_proc_info:
412 .long 0x560f5800 439 .long 0x560f5800
413 .long 0xff0fff00 440 .long 0xff0fff00
414 __v7_proc __v7_pj4b_setup 441 __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
415 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 442 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
443#endif
416 444
417 /* 445 /*
418 * ARM Ltd. Cortex A7 processor. 446 * ARM Ltd. Cortex A7 processor.
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 780560b330d9..d7966e0f7698 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
161 161
162#define __get_user_check(x, ptr, size) \ 162#define __get_user_check(x, ptr, size) \
163({ \ 163({ \
164 const __typeof__(ptr) __guc_ptr = (ptr); \ 164 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
165 int _e; \ 165 int _e; \
166 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ 166 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
167 _e = __get_user_nocheck((x), __guc_ptr, (size)); \ 167 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index 33c3bd1e5c6d..ebac9c11f796 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -38,6 +38,7 @@ struct mn10300_cpuinfo boot_cpu_data;
38/* For PCI or other memory-mapped resources */ 38/* For PCI or other memory-mapped resources */
39unsigned long pci_mem_start = 0x18000000; 39unsigned long pci_mem_start = 0x18000000;
40 40
41static char __initdata cmd_line[COMMAND_LINE_SIZE];
41char redboot_command_line[COMMAND_LINE_SIZE] = 42char redboot_command_line[COMMAND_LINE_SIZE] =
42 "console=ttyS0,115200 root=/dev/mtdblock3 rw"; 43 "console=ttyS0,115200 root=/dev/mtdblock3 rw";
43 44
@@ -74,45 +75,19 @@ static const char *const mn10300_cputypes[] = {
74}; 75};
75 76
76/* 77/*
77 * 78 * Pick out the memory size. We look for mem=size,
79 * where size is "size[KkMm]"
78 */ 80 */
79static void __init parse_mem_cmdline(char **cmdline_p) 81static int __init early_mem(char *p)
80{ 82{
81 char *from, *to, c; 83 memory_size = memparse(p, &p);
82
83 /* save unparsed command line copy for /proc/cmdline */
84 strcpy(boot_command_line, redboot_command_line);
85
86 /* see if there's an explicit memory size option */
87 from = redboot_command_line;
88 to = redboot_command_line;
89 c = ' ';
90
91 for (;;) {
92 if (c == ' ' && !memcmp(from, "mem=", 4)) {
93 if (to != redboot_command_line)
94 to--;
95 memory_size = memparse(from + 4, &from);
96 }
97
98 c = *(from++);
99 if (!c)
100 break;
101
102 *(to++) = c;
103 }
104
105 *to = '\0';
106 *cmdline_p = redboot_command_line;
107 84
108 if (memory_size == 0) 85 if (memory_size == 0)
109 panic("Memory size not known\n"); 86 panic("Memory size not known\n");
110 87
111 memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + 88 return 0;
112 memory_size;
113 if (memory_end > phys_memory_end)
114 memory_end = phys_memory_end;
115} 89}
90early_param("mem", early_mem);
116 91
117/* 92/*
118 * architecture specific setup 93 * architecture specific setup
@@ -125,7 +100,20 @@ void __init setup_arch(char **cmdline_p)
125 cpu_init(); 100 cpu_init();
126 unit_setup(); 101 unit_setup();
127 smp_init_cpus(); 102 smp_init_cpus();
128 parse_mem_cmdline(cmdline_p); 103
104 /* save unparsed command line copy for /proc/cmdline */
105 strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
106
107 /* populate cmd_line too for later use, preserving boot_command_line */
108 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
109 *cmdline_p = cmd_line;
110
111 parse_early_param();
112
113 memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
114 memory_size;
115 if (memory_end > phys_memory_end)
116 memory_end = phys_memory_end;
129 117
130 init_mm.start_code = (unsigned long)&_text; 118 init_mm.start_code = (unsigned long)&_text;
131 init_mm.end_code = (unsigned long) &_etext; 119 init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index eabeec991016..f46914a0f33e 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -994,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
994 ppc_md.pci_dma_bus_setup(bus); 994 ppc_md.pci_dma_bus_setup(bus);
995} 995}
996 996
997void pcibios_setup_device(struct pci_dev *dev) 997static void pcibios_setup_device(struct pci_dev *dev)
998{ 998{
999 /* Fixup NUMA node as it may not be setup yet by the generic 999 /* Fixup NUMA node as it may not be setup yet by the generic
1000 * code and is needed by the DMA init 1000 * code and is needed by the DMA init
@@ -1015,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
1015 ppc_md.pci_irq_fixup(dev); 1015 ppc_md.pci_irq_fixup(dev);
1016} 1016}
1017 1017
1018int pcibios_add_device(struct pci_dev *dev)
1019{
1020 /*
1021 * We can only call pcibios_setup_device() after bus setup is complete,
1022 * since some of the platform specific DMA setup code depends on it.
1023 */
1024 if (dev->bus->is_added)
1025 pcibios_setup_device(dev);
1026 return 0;
1027}
1028
1018void pcibios_setup_bus_devices(struct pci_bus *bus) 1029void pcibios_setup_bus_devices(struct pci_bus *bus)
1019{ 1030{
1020 struct pci_dev *dev; 1031 struct pci_dev *dev;
@@ -1469,10 +1480,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1469 if (ppc_md.pcibios_enable_device_hook(dev)) 1480 if (ppc_md.pcibios_enable_device_hook(dev))
1470 return -EINVAL; 1481 return -EINVAL;
1471 1482
1472 /* avoid pcie irq fix up impact on cardbus */
1473 if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
1474 pcibios_setup_device(dev);
1475
1476 return pci_enable_resources(dev, mask); 1483 return pci_enable_resources(dev, mask);
1477} 1484}
1478 1485
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 5a4c87903057..5ce3ba7ad137 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -294,8 +294,6 @@ void __init eeh_addr_cache_build(void)
294 spin_lock_init(&pci_io_addr_cache_root.piar_lock); 294 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
295 295
296 for_each_pci_dev(dev) { 296 for_each_pci_dev(dev) {
297 eeh_addr_cache_insert_dev(dev);
298
299 dn = pci_device_to_OF_node(dev); 297 dn = pci_device_to_OF_node(dev);
300 if (!dn) 298 if (!dn)
301 continue; 299 continue;
@@ -308,6 +306,8 @@ void __init eeh_addr_cache_build(void)
308 dev->dev.archdata.edev = edev; 306 dev->dev.archdata.edev = edev;
309 edev->pdev = dev; 307 edev->pdev = dev;
310 308
309 eeh_addr_cache_insert_dev(dev);
310
311 eeh_sysfs_add_device(dev); 311 eeh_sysfs_add_device(dev);
312 } 312 }
313 313
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
index fe43d1aa2cf1..9d4a9e8562b2 100644
--- a/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -639,7 +639,8 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
639 639
640 if (pe->type & EEH_PE_PHB) { 640 if (pe->type & EEH_PE_PHB) {
641 bus = pe->phb->bus; 641 bus = pe->phb->bus;
642 } else if (pe->type & EEH_PE_BUS) { 642 } else if (pe->type & EEH_PE_BUS ||
643 pe->type & EEH_PE_DEVICE) {
643 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 644 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
644 pdev = eeh_dev_to_pci_dev(edev); 645 pdev = eeh_dev_to_pci_dev(edev);
645 if (pdev) 646 if (pdev)
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 028ac1f71b51..46ac1ddea683 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
97 return indirect_read_config(bus, devfn, offset, len, val); 97 return indirect_read_config(bus, devfn, offset, len, val);
98} 98}
99 99
100static struct pci_ops fsl_indirect_pci_ops = 100#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
101
102static struct pci_ops fsl_indirect_pcie_ops =
101{ 103{
102 .read = fsl_indirect_read_config, 104 .read = fsl_indirect_read_config,
103 .write = indirect_write_config, 105 .write = indirect_write_config,
104}; 106};
105 107
106static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
107 resource_size_t cfg_addr,
108 resource_size_t cfg_data, u32 flags)
109{
110 setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
111 hose->ops = &fsl_indirect_pci_ops;
112}
113
114#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
115
116#define MAX_PHYS_ADDR_BITS 40 108#define MAX_PHYS_ADDR_BITS 40
117static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; 109static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
118 110
@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
504 if (!hose->private_data) 496 if (!hose->private_data)
505 goto no_bridge; 497 goto no_bridge;
506 498
507 fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 499 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
508 PPC_INDIRECT_TYPE_BIG_ENDIAN); 500 PPC_INDIRECT_TYPE_BIG_ENDIAN);
509 501
510 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) 502 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
511 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; 503 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
512 504
513 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 505 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
506 /* use fsl_indirect_read_config for PCIe */
507 hose->ops = &fsl_indirect_pcie_ops;
514 /* For PCIE read HEADER_TYPE to identify controler mode */ 508 /* For PCIE read HEADER_TYPE to identify controler mode */
515 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); 509 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
516 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) 510 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
814 if (ret) 808 if (ret)
815 goto err0; 809 goto err0;
816 } else { 810 } else {
817 fsl_setup_indirect_pci(hose, rsrc_cfg.start, 811 setup_indirect_pci(hose, rsrc_cfg.start,
818 rsrc_cfg.start + 4, 0); 812 rsrc_cfg.start + 4, 0);
819 } 813 }
820 814
821 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " 815 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 886ac7d4937a..2f8c1abeb086 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
50{ 50{
51 struct dma_map_ops *dma_ops = get_dma_ops(dev); 51 struct dma_map_ops *dma_ops = get_dma_ops(dev);
52 52
53 debug_dma_mapping_error(dev, dma_addr);
53 if (dma_ops->mapping_error) 54 if (dma_ops->mapping_error)
54 return dma_ops->mapping_error(dev, dma_addr); 55 return dma_ops->mapping_error(dev, dma_addr);
55 return (dma_addr == 0UL); 56 return (dma_addr == DMA_ERROR_CODE);
56} 57}
57 58
58static inline void *dma_alloc_coherent(struct device *dev, size_t size, 59static inline void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index d8a6a385d048..feb719d3c851 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
754 .write = reipl_fcp_scpdata_write, 754 .write = reipl_fcp_scpdata_write,
755}; 755};
756 756
757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
758 reipl_block_fcp->ipl_info.fcp.wwpn); 758 reipl_block_fcp->ipl_info.fcp.wwpn);
759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", 759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
760 reipl_block_fcp->ipl_info.fcp.lun); 760 reipl_block_fcp->ipl_info.fcp.lun);
761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", 761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
762 reipl_block_fcp->ipl_info.fcp.bootprog); 762 reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
1323 1323
1324/* FCP dump device attributes */ 1324/* FCP dump device attributes */
1325 1325
1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", 1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
1327 dump_block_fcp->ipl_info.fcp.wwpn); 1327 dump_block_fcp->ipl_info.fcp.wwpn);
1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", 1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
1329 dump_block_fcp->ipl_info.fcp.lun); 1329 dump_block_fcp->ipl_info.fcp.lun);
1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", 1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
1331 dump_block_fcp->ipl_info.fcp.bootprog); 1331 dump_block_fcp->ipl_info.fcp.bootprog);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 408e866ae548..dd3c1994b8bd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
312} 312}
313EXPORT_SYMBOL(measurement_alert_subclass_unregister); 313EXPORT_SYMBOL(measurement_alert_subclass_unregister);
314 314
315#ifdef CONFIG_SMP
315void synchronize_irq(unsigned int irq) 316void synchronize_irq(unsigned int irq)
316{ 317{
317 /* 318 /*
@@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
320 */ 321 */
321} 322}
322EXPORT_SYMBOL_GPL(synchronize_irq); 323EXPORT_SYMBOL_GPL(synchronize_irq);
324#endif
323 325
324#ifndef CONFIG_PCI 326#ifndef CONFIG_PCI
325 327
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 3cbd3b8bf311..cca388253a39 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
123 continue; 123 continue;
124 } else if ((addr <= chunk->addr) && 124 } else if ((addr <= chunk->addr) &&
125 (addr + size >= chunk->addr + chunk->size)) { 125 (addr + size >= chunk->addr + chunk->size)) {
126 memset(chunk, 0 , sizeof(*chunk)); 126 memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
127 memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
127 } else if (addr + size < chunk->addr + chunk->size) { 128 } else if (addr + size < chunk->addr + chunk->size) {
128 chunk->size = chunk->addr + chunk->size - addr - size; 129 chunk->size = chunk->addr + chunk->size - addr - size;
129 chunk->addr = addr + size; 130 chunk->addr = addr + size;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 9895a9a41380..211bce445522 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
365 return insn.length; 365 return insn.length;
366} 366}
367 367
368static void __kprobes arch_copy_kprobe(struct kprobe *p) 368static int __kprobes arch_copy_kprobe(struct kprobe *p)
369{ 369{
370 int ret;
371
370 /* Copy an instruction with recovering if other optprobe modifies it.*/ 372 /* Copy an instruction with recovering if other optprobe modifies it.*/
371 __copy_instruction(p->ainsn.insn, p->addr); 373 ret = __copy_instruction(p->ainsn.insn, p->addr);
374 if (!ret)
375 return -EINVAL;
372 376
373 /* 377 /*
374 * __copy_instruction can modify the displacement of the instruction, 378 * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
384 388
385 /* Also, displacement change doesn't affect the first byte */ 389 /* Also, displacement change doesn't affect the first byte */
386 p->opcode = p->ainsn.insn[0]; 390 p->opcode = p->ainsn.insn[0];
391
392 return 0;
387} 393}
388 394
389int __kprobes arch_prepare_kprobe(struct kprobe *p) 395int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
397 p->ainsn.insn = get_insn_slot(); 403 p->ainsn.insn = get_insn_slot();
398 if (!p->ainsn.insn) 404 if (!p->ainsn.insn)
399 return -ENOMEM; 405 return -ENOMEM;
400 arch_copy_kprobe(p); 406
401 return 0; 407 return arch_copy_kprobe(p);
402} 408}
403 409
404void __kprobes arch_arm_kprobe(struct kprobe *p) 410void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 769219b29309..76fc0b23fc6c 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -45,10 +45,9 @@ struct cryptomgr_param {
45 } nu32; 45 } nu32;
46 } attrs[CRYPTO_MAX_ATTRS]; 46 } attrs[CRYPTO_MAX_ATTRS];
47 47
48 char larval[CRYPTO_MAX_ALG_NAME];
49 char template[CRYPTO_MAX_ALG_NAME]; 48 char template[CRYPTO_MAX_ALG_NAME];
50 49
51 struct completion *completion; 50 struct crypto_larval *larval;
52 51
53 u32 otype; 52 u32 otype;
54 u32 omask; 53 u32 omask;
@@ -87,7 +86,8 @@ static int cryptomgr_probe(void *data)
87 crypto_tmpl_put(tmpl); 86 crypto_tmpl_put(tmpl);
88 87
89out: 88out:
90 complete_all(param->completion); 89 complete_all(&param->larval->completion);
90 crypto_alg_put(&param->larval->alg);
91 kfree(param); 91 kfree(param);
92 module_put_and_exit(0); 92 module_put_and_exit(0);
93} 93}
@@ -187,18 +187,19 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
187 param->otype = larval->alg.cra_flags; 187 param->otype = larval->alg.cra_flags;
188 param->omask = larval->mask; 188 param->omask = larval->mask;
189 189
190 memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); 190 crypto_alg_get(&larval->alg);
191 191 param->larval = larval;
192 param->completion = &larval->completion;
193 192
194 thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe"); 193 thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
195 if (IS_ERR(thread)) 194 if (IS_ERR(thread))
196 goto err_free_param; 195 goto err_put_larval;
197 196
198 wait_for_completion_interruptible(&larval->completion); 197 wait_for_completion_interruptible(&larval->completion);
199 198
200 return NOTIFY_STOP; 199 return NOTIFY_STOP;
201 200
201err_put_larval:
202 crypto_alg_put(&larval->alg);
202err_free_param: 203err_free_param:
203 kfree(param); 204 kfree(param);
204err_put_module: 205err_put_module:
diff --git a/crypto/api.c b/crypto/api.c
index 033a7147e5eb..3b6180336d3d 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -34,12 +34,6 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
34BLOCKING_NOTIFIER_HEAD(crypto_chain); 34BLOCKING_NOTIFIER_HEAD(crypto_chain);
35EXPORT_SYMBOL_GPL(crypto_chain); 35EXPORT_SYMBOL_GPL(crypto_chain);
36 36
37static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
38{
39 atomic_inc(&alg->cra_refcnt);
40 return alg;
41}
42
43struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) 37struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
44{ 38{
45 return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; 39 return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
diff --git a/crypto/internal.h b/crypto/internal.h
index 9ebedae3fb54..bd39bfc92eab 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -103,6 +103,12 @@ int crypto_register_notifier(struct notifier_block *nb);
103int crypto_unregister_notifier(struct notifier_block *nb); 103int crypto_unregister_notifier(struct notifier_block *nb);
104int crypto_probing_notify(unsigned long val, void *v); 104int crypto_probing_notify(unsigned long val, void *v);
105 105
106static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
107{
108 atomic_inc(&alg->cra_refcnt);
109 return alg;
110}
111
106static inline void crypto_alg_put(struct crypto_alg *alg) 112static inline void crypto_alg_put(struct crypto_alg *alg)
107{ 113{
108 if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) 114 if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index ec117c6c996c..14de9f46972e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -66,20 +66,21 @@ struct dock_station {
66 spinlock_t dd_lock; 66 spinlock_t dd_lock;
67 struct mutex hp_lock; 67 struct mutex hp_lock;
68 struct list_head dependent_devices; 68 struct list_head dependent_devices;
69 struct list_head hotplug_devices;
70 69
71 struct list_head sibling; 70 struct list_head sibling;
72 struct platform_device *dock_device; 71 struct platform_device *dock_device;
73}; 72};
74static LIST_HEAD(dock_stations); 73static LIST_HEAD(dock_stations);
75static int dock_station_count; 74static int dock_station_count;
75static DEFINE_MUTEX(hotplug_lock);
76 76
77struct dock_dependent_device { 77struct dock_dependent_device {
78 struct list_head list; 78 struct list_head list;
79 struct list_head hotplug_list;
80 acpi_handle handle; 79 acpi_handle handle;
81 const struct acpi_dock_ops *ops; 80 const struct acpi_dock_ops *hp_ops;
82 void *context; 81 void *hp_context;
82 unsigned int hp_refcount;
83 void (*hp_release)(void *);
83}; 84};
84 85
85#define DOCK_DOCKING 0x00000001 86#define DOCK_DOCKING 0x00000001
@@ -111,7 +112,6 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
111 112
112 dd->handle = handle; 113 dd->handle = handle;
113 INIT_LIST_HEAD(&dd->list); 114 INIT_LIST_HEAD(&dd->list);
114 INIT_LIST_HEAD(&dd->hotplug_list);
115 115
116 spin_lock(&ds->dd_lock); 116 spin_lock(&ds->dd_lock);
117 list_add_tail(&dd->list, &ds->dependent_devices); 117 list_add_tail(&dd->list, &ds->dependent_devices);
@@ -121,35 +121,90 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
121} 121}
122 122
123/** 123/**
124 * dock_add_hotplug_device - associate a hotplug handler with the dock station 124 * dock_init_hotplug - Initialize a hotplug device on a docking station.
125 * @ds: The dock station 125 * @dd: Dock-dependent device.
126 * @dd: The dependent device struct 126 * @ops: Dock operations to attach to the dependent device.
127 * 127 * @context: Data to pass to the @ops callbacks and @release.
128 * Add the dependent device to the dock's hotplug device list 128 * @init: Optional initialization routine to run after setting up context.
129 * @release: Optional release routine to run on removal.
129 */ 130 */
130static void 131static int dock_init_hotplug(struct dock_dependent_device *dd,
131dock_add_hotplug_device(struct dock_station *ds, 132 const struct acpi_dock_ops *ops, void *context,
132 struct dock_dependent_device *dd) 133 void (*init)(void *), void (*release)(void *))
133{ 134{
134 mutex_lock(&ds->hp_lock); 135 int ret = 0;
135 list_add_tail(&dd->hotplug_list, &ds->hotplug_devices); 136
136 mutex_unlock(&ds->hp_lock); 137 mutex_lock(&hotplug_lock);
138
139 if (dd->hp_context) {
140 ret = -EEXIST;
141 } else {
142 dd->hp_refcount = 1;
143 dd->hp_ops = ops;
144 dd->hp_context = context;
145 dd->hp_release = release;
146 }
147
148 if (!WARN_ON(ret) && init)
149 init(context);
150
151 mutex_unlock(&hotplug_lock);
152 return ret;
137} 153}
138 154
139/** 155/**
140 * dock_del_hotplug_device - remove a hotplug handler from the dock station 156 * dock_release_hotplug - Decrement hotplug reference counter of dock device.
141 * @ds: The dock station 157 * @dd: Dock-dependent device.
142 * @dd: the dependent device struct
143 * 158 *
144 * Delete the dependent device from the dock's hotplug device list 159 * Decrement the reference counter of @dd and if 0, detach its hotplug
160 * operations from it, reset its context pointer and run the optional release
161 * routine if present.
145 */ 162 */
146static void 163static void dock_release_hotplug(struct dock_dependent_device *dd)
147dock_del_hotplug_device(struct dock_station *ds,
148 struct dock_dependent_device *dd)
149{ 164{
150 mutex_lock(&ds->hp_lock); 165 void (*release)(void *) = NULL;
151 list_del(&dd->hotplug_list); 166 void *context = NULL;
152 mutex_unlock(&ds->hp_lock); 167
168 mutex_lock(&hotplug_lock);
169
170 if (dd->hp_context && !--dd->hp_refcount) {
171 dd->hp_ops = NULL;
172 context = dd->hp_context;
173 dd->hp_context = NULL;
174 release = dd->hp_release;
175 dd->hp_release = NULL;
176 }
177
178 if (release && context)
179 release(context);
180
181 mutex_unlock(&hotplug_lock);
182}
183
184static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
185 bool uevent)
186{
187 acpi_notify_handler cb = NULL;
188 bool run = false;
189
190 mutex_lock(&hotplug_lock);
191
192 if (dd->hp_context) {
193 run = true;
194 dd->hp_refcount++;
195 if (dd->hp_ops)
196 cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
197 }
198
199 mutex_unlock(&hotplug_lock);
200
201 if (!run)
202 return;
203
204 if (cb)
205 cb(dd->handle, event, dd->hp_context);
206
207 dock_release_hotplug(dd);
153} 208}
154 209
155/** 210/**
@@ -360,9 +415,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
360 /* 415 /*
361 * First call driver specific hotplug functions 416 * First call driver specific hotplug functions
362 */ 417 */
363 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) 418 list_for_each_entry(dd, &ds->dependent_devices, list)
364 if (dd->ops && dd->ops->handler) 419 dock_hotplug_event(dd, event, false);
365 dd->ops->handler(dd->handle, event, dd->context);
366 420
367 /* 421 /*
368 * Now make sure that an acpi_device is created for each 422 * Now make sure that an acpi_device is created for each
@@ -398,9 +452,8 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
398 if (num == DOCK_EVENT) 452 if (num == DOCK_EVENT)
399 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 453 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
400 454
401 list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) 455 list_for_each_entry(dd, &ds->dependent_devices, list)
402 if (dd->ops && dd->ops->uevent) 456 dock_hotplug_event(dd, event, true);
403 dd->ops->uevent(dd->handle, event, dd->context);
404 457
405 if (num != DOCK_EVENT) 458 if (num != DOCK_EVENT)
406 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 459 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
@@ -570,19 +623,24 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
570 * @handle: the handle of the device 623 * @handle: the handle of the device
571 * @ops: handlers to call after docking 624 * @ops: handlers to call after docking
572 * @context: device specific data 625 * @context: device specific data
626 * @init: Optional initialization routine to run after registration
627 * @release: Optional release routine to run on unregistration
573 * 628 *
574 * If a driver would like to perform a hotplug operation after a dock 629 * If a driver would like to perform a hotplug operation after a dock
575 * event, they can register an acpi_notifiy_handler to be called by 630 * event, they can register an acpi_notifiy_handler to be called by
576 * the dock driver after _DCK is executed. 631 * the dock driver after _DCK is executed.
577 */ 632 */
578int 633int register_hotplug_dock_device(acpi_handle handle,
579register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, 634 const struct acpi_dock_ops *ops, void *context,
580 void *context) 635 void (*init)(void *), void (*release)(void *))
581{ 636{
582 struct dock_dependent_device *dd; 637 struct dock_dependent_device *dd;
583 struct dock_station *dock_station; 638 struct dock_station *dock_station;
584 int ret = -EINVAL; 639 int ret = -EINVAL;
585 640
641 if (WARN_ON(!context))
642 return -EINVAL;
643
586 if (!dock_station_count) 644 if (!dock_station_count)
587 return -ENODEV; 645 return -ENODEV;
588 646
@@ -597,12 +655,8 @@ register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops
597 * ops 655 * ops
598 */ 656 */
599 dd = find_dock_dependent_device(dock_station, handle); 657 dd = find_dock_dependent_device(dock_station, handle);
600 if (dd) { 658 if (dd && !dock_init_hotplug(dd, ops, context, init, release))
601 dd->ops = ops;
602 dd->context = context;
603 dock_add_hotplug_device(dock_station, dd);
604 ret = 0; 659 ret = 0;
605 }
606 } 660 }
607 661
608 return ret; 662 return ret;
@@ -624,7 +678,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
624 list_for_each_entry(dock_station, &dock_stations, sibling) { 678 list_for_each_entry(dock_station, &dock_stations, sibling) {
625 dd = find_dock_dependent_device(dock_station, handle); 679 dd = find_dock_dependent_device(dock_station, handle);
626 if (dd) 680 if (dd)
627 dock_del_hotplug_device(dock_station, dd); 681 dock_release_hotplug(dd);
628 } 682 }
629} 683}
630EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); 684EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -953,7 +1007,6 @@ static int __init dock_add(acpi_handle handle)
953 mutex_init(&dock_station->hp_lock); 1007 mutex_init(&dock_station->hp_lock);
954 spin_lock_init(&dock_station->dd_lock); 1008 spin_lock_init(&dock_station->dd_lock);
955 INIT_LIST_HEAD(&dock_station->sibling); 1009 INIT_LIST_HEAD(&dock_station->sibling);
956 INIT_LIST_HEAD(&dock_station->hotplug_devices);
957 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 1010 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
958 INIT_LIST_HEAD(&dock_station->dependent_devices); 1011 INIT_LIST_HEAD(&dock_station->dependent_devices);
959 1012
@@ -994,30 +1047,6 @@ err_unregister:
994} 1047}
995 1048
996/** 1049/**
997 * dock_remove - free up resources related to the dock station
998 */
999static int dock_remove(struct dock_station *ds)
1000{
1001 struct dock_dependent_device *dd, *tmp;
1002 struct platform_device *dock_device = ds->dock_device;
1003
1004 if (!dock_station_count)
1005 return 0;
1006
1007 /* remove dependent devices */
1008 list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
1009 kfree(dd);
1010
1011 list_del(&ds->sibling);
1012
1013 /* cleanup sysfs */
1014 sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
1015 platform_device_unregister(dock_device);
1016
1017 return 0;
1018}
1019
1020/**
1021 * find_dock_and_bay - look for dock stations and bays 1050 * find_dock_and_bay - look for dock stations and bays
1022 * @handle: acpi handle of a device 1051 * @handle: acpi handle of a device
1023 * @lvl: unused 1052 * @lvl: unused
@@ -1035,7 +1064,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
1035 return AE_OK; 1064 return AE_OK;
1036} 1065}
1037 1066
1038static int __init dock_init(void) 1067int __init acpi_dock_init(void)
1039{ 1068{
1040 if (acpi_disabled) 1069 if (acpi_disabled)
1041 return 0; 1070 return 0;
@@ -1054,19 +1083,3 @@ static int __init dock_init(void)
1054 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); 1083 ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
1055 return 0; 1084 return 0;
1056} 1085}
1057
1058static void __exit dock_exit(void)
1059{
1060 struct dock_station *tmp, *dock_station;
1061
1062 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1063 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
1064 dock_remove(dock_station);
1065}
1066
1067/*
1068 * Must be called before drivers of devices in dock, otherwise we can't know
1069 * which devices are in a dock
1070 */
1071subsys_initcall(dock_init);
1072module_exit(dock_exit);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 297cbf456f86..c610a76d92c4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -40,6 +40,11 @@ void acpi_container_init(void);
40#else 40#else
41static inline void acpi_container_init(void) {} 41static inline void acpi_container_init(void) {}
42#endif 42#endif
43#ifdef CONFIG_ACPI_DOCK
44void acpi_dock_init(void);
45#else
46static inline void acpi_dock_init(void) {}
47#endif
43#ifdef CONFIG_ACPI_HOTPLUG_MEMORY 48#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
44void acpi_memory_hotplug_init(void); 49void acpi_memory_hotplug_init(void);
45#else 50#else
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b14ac46948c9..27da63061e11 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2042,6 +2042,7 @@ int __init acpi_scan_init(void)
2042 acpi_lpss_init(); 2042 acpi_lpss_init();
2043 acpi_container_init(); 2043 acpi_container_init();
2044 acpi_memory_hotplug_init(); 2044 acpi_memory_hotplug_init();
2045 acpi_dock_init();
2045 2046
2046 mutex_lock(&acpi_scan_lock); 2047 mutex_lock(&acpi_scan_lock);
2047 /* 2048 /*
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 87f2f395d79a..cf4e7020adac 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -156,8 +156,10 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
156 156
157 spin_unlock_irqrestore(ap->lock, flags); 157 spin_unlock_irqrestore(ap->lock, flags);
158 158
159 if (wait) 159 if (wait) {
160 ata_port_wait_eh(ap); 160 ata_port_wait_eh(ap);
161 flush_work(&ap->hotplug_task.work);
162 }
161} 163}
162 164
163static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) 165static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@@ -214,6 +216,39 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
214 .uevent = ata_acpi_ap_uevent, 216 .uevent = ata_acpi_ap_uevent,
215}; 217};
216 218
219void ata_acpi_hotplug_init(struct ata_host *host)
220{
221 int i;
222
223 for (i = 0; i < host->n_ports; i++) {
224 struct ata_port *ap = host->ports[i];
225 acpi_handle handle;
226 struct ata_device *dev;
227
228 if (!ap)
229 continue;
230
231 handle = ata_ap_acpi_handle(ap);
232 if (handle) {
233 /* we might be on a docking station */
234 register_hotplug_dock_device(handle,
235 &ata_acpi_ap_dock_ops, ap,
236 NULL, NULL);
237 }
238
239 ata_for_each_dev(dev, &ap->link, ALL) {
240 handle = ata_dev_acpi_handle(dev);
241 if (!handle)
242 continue;
243
244 /* we might be on a docking station */
245 register_hotplug_dock_device(handle,
246 &ata_acpi_dev_dock_ops,
247 dev, NULL, NULL);
248 }
249 }
250}
251
217/** 252/**
218 * ata_acpi_dissociate - dissociate ATA host from ACPI objects 253 * ata_acpi_dissociate - dissociate ATA host from ACPI objects
219 * @host: target ATA host 254 * @host: target ATA host
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f2184276539d..adf002a3c584 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6148,6 +6148,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6148 if (rc) 6148 if (rc)
6149 goto err_tadd; 6149 goto err_tadd;
6150 6150
6151 ata_acpi_hotplug_init(host);
6152
6151 /* set cable, sata_spd_limit and report */ 6153 /* set cable, sata_spd_limit and report */
6152 for (i = 0; i < host->n_ports; i++) { 6154 for (i = 0; i < host->n_ports; i++) {
6153 struct ata_port *ap = host->ports[i]; 6155 struct ata_port *ap = host->ports[i];
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index c949dd311b2e..577d902bc4de 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -122,6 +122,7 @@ extern int ata_acpi_register(void);
122extern void ata_acpi_unregister(void); 122extern void ata_acpi_unregister(void);
123extern void ata_acpi_bind(struct ata_device *dev); 123extern void ata_acpi_bind(struct ata_device *dev);
124extern void ata_acpi_unbind(struct ata_device *dev); 124extern void ata_acpi_unbind(struct ata_device *dev);
125extern void ata_acpi_hotplug_init(struct ata_host *host);
125#else 126#else
126static inline void ata_acpi_dissociate(struct ata_host *host) { } 127static inline void ata_acpi_dissociate(struct ata_host *host) { }
127static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 128static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -134,6 +135,7 @@ static inline int ata_acpi_register(void) { return 0; }
134static inline void ata_acpi_unregister(void) { } 135static inline void ata_acpi_unregister(void) { }
135static inline void ata_acpi_bind(struct ata_device *dev) { } 136static inline void ata_acpi_bind(struct ata_device *dev) { }
136static inline void ata_acpi_unbind(struct ata_device *dev) { } 137static inline void ata_acpi_unbind(struct ata_device *dev) { }
138static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
137#endif 139#endif
138 140
139/* libata-scsi.c */ 141/* libata-scsi.c */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 49394e3f31bc..aff789d6fccd 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2252,13 +2252,17 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2252 obj_request->pages, length, 2252 obj_request->pages, length,
2253 offset & ~PAGE_MASK, false, false); 2253 offset & ~PAGE_MASK, false, false);
2254 2254
2255 /*
2256 * set obj_request->img_request before formatting
2257 * the osd_request so that it gets the right snapc
2258 */
2259 rbd_img_obj_request_add(img_request, obj_request);
2255 if (write_request) 2260 if (write_request)
2256 rbd_osd_req_format_write(obj_request); 2261 rbd_osd_req_format_write(obj_request);
2257 else 2262 else
2258 rbd_osd_req_format_read(obj_request); 2263 rbd_osd_req_format_read(obj_request);
2259 2264
2260 obj_request->img_offset = img_offset; 2265 obj_request->img_offset = img_offset;
2261 rbd_img_obj_request_add(img_request, obj_request);
2262 2266
2263 img_offset += length; 2267 img_offset += length;
2264 resid -= length; 2268 resid -= length;
@@ -4243,6 +4247,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4243 4247
4244 down_write(&rbd_dev->header_rwsem); 4248 down_write(&rbd_dev->header_rwsem);
4245 4249
4250 ret = rbd_dev_v2_image_size(rbd_dev);
4251 if (ret)
4252 goto out;
4253
4246 if (first_time) { 4254 if (first_time) {
4247 ret = rbd_dev_v2_header_onetime(rbd_dev); 4255 ret = rbd_dev_v2_header_onetime(rbd_dev);
4248 if (ret) 4256 if (ret)
@@ -4276,10 +4284,6 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4276 "is EXPERIMENTAL!"); 4284 "is EXPERIMENTAL!");
4277 } 4285 }
4278 4286
4279 ret = rbd_dev_v2_image_size(rbd_dev);
4280 if (ret)
4281 goto out;
4282
4283 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) 4287 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4284 if (rbd_dev->mapping.size != rbd_dev->header.image_size) 4288 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4285 rbd_dev->mapping.size = rbd_dev->header.image_size; 4289 rbd_dev->mapping.size = rbd_dev->header.image_size;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b3bd6d..9a9f51875df5 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
498 add_wait_queue(&thread->wait_q, &wait); 498 add_wait_queue(&thread->wait_q, &wait);
499 499
500 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
501 if (kthread_should_stop()) {
502 BT_DBG("main_thread: break from main thread");
503 break;
504 }
501 505
502 if (adapter->wakeup_tries || 506 if (adapter->wakeup_tries ||
503 ((!adapter->int_count) && 507 ((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
513 517
514 BT_DBG("main_thread woke up"); 518 BT_DBG("main_thread woke up");
515 519
516 if (kthread_should_stop()) {
517 BT_DBG("main_thread: break from main thread");
518 break;
519 }
520
521 spin_lock_irqsave(&priv->driver_lock, flags); 520 spin_lock_irqsave(&priv->driver_lock, flags);
522 if (adapter->int_count) { 521 if (adapter->int_count) {
523 adapter->int_count = 0; 522 adapter->int_count = 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b9bb5def6f1..93eb5cbcc1f6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,6 +47,8 @@ static struct od_ops od_ops;
47static struct cpufreq_governor cpufreq_gov_ondemand; 47static struct cpufreq_governor cpufreq_gov_ondemand;
48#endif 48#endif
49 49
50static unsigned int default_powersave_bias;
51
50static void ondemand_powersave_bias_init_cpu(int cpu) 52static void ondemand_powersave_bias_init_cpu(int cpu)
51{ 53{
52 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); 54 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -543,7 +545,7 @@ static int od_init(struct dbs_data *dbs_data)
543 545
544 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; 546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
545 tuners->ignore_nice = 0; 547 tuners->ignore_nice = 0;
546 tuners->powersave_bias = 0; 548 tuners->powersave_bias = default_powersave_bias;
547 tuners->io_is_busy = should_io_be_busy(); 549 tuners->io_is_busy = should_io_be_busy();
548 550
549 dbs_data->tuners = tuners; 551 dbs_data->tuners = tuners;
@@ -585,6 +587,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
585 unsigned int cpu; 587 unsigned int cpu;
586 cpumask_t done; 588 cpumask_t done;
587 589
590 default_powersave_bias = powersave_bias;
588 cpumask_clear(&done); 591 cpumask_clear(&done);
589 592
590 get_online_cpus(); 593 get_online_cpus();
@@ -593,11 +596,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
593 continue; 596 continue;
594 597
595 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; 598 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
596 dbs_data = policy->governor_data; 599 if (!policy)
597 od_tuners = dbs_data->tuners; 600 continue;
598 od_tuners->powersave_bias = powersave_bias;
599 601
600 cpumask_or(&done, &done, policy->cpus); 602 cpumask_or(&done, &done, policy->cpus);
603
604 if (policy->governor != &cpufreq_gov_ondemand)
605 continue;
606
607 dbs_data = policy->governor_data;
608 od_tuners = dbs_data->tuners;
609 od_tuners->powersave_bias = default_powersave_bias;
601 } 610 }
602 put_online_cpus(); 611 put_online_cpus();
603} 612}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d3f7d2db870f..4a430360af5a 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1094,6 +1094,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
1094 const struct omap_gpio_platform_data *pdata; 1094 const struct omap_gpio_platform_data *pdata;
1095 struct resource *res; 1095 struct resource *res;
1096 struct gpio_bank *bank; 1096 struct gpio_bank *bank;
1097#ifdef CONFIG_ARCH_OMAP1
1098 int irq_base;
1099#endif
1097 1100
1098 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1101 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1099 1102
@@ -1135,11 +1138,28 @@ static int omap_gpio_probe(struct platform_device *pdev)
1135 pdata->get_context_loss_count; 1138 pdata->get_context_loss_count;
1136 } 1139 }
1137 1140
1141#ifdef CONFIG_ARCH_OMAP1
1142 /*
1143 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1144 * irq_alloc_descs() and irq_domain_add_legacy() and just use a
1145 * linear IRQ domain mapping for all OMAP platforms.
1146 */
1147 irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1148 if (irq_base < 0) {
1149 dev_err(dev, "Couldn't allocate IRQ numbers\n");
1150 return -ENODEV;
1151 }
1138 1152
1153 bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
1154 0, &irq_domain_simple_ops, NULL);
1155#else
1139 bank->domain = irq_domain_add_linear(node, bank->width, 1156 bank->domain = irq_domain_add_linear(node, bank->width,
1140 &irq_domain_simple_ops, NULL); 1157 &irq_domain_simple_ops, NULL);
1141 if (!bank->domain) 1158#endif
1159 if (!bank->domain) {
1160 dev_err(dev, "Couldn't register an IRQ domain\n");
1142 return -ENODEV; 1161 return -ENODEV;
1162 }
1143 1163
1144 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1164 if (bank->regs->set_dataout && bank->regs->clr_dataout)
1145 bank->set_dataout = _set_gpio_dataout_reg; 1165 bank->set_dataout = _set_gpio_dataout_reg;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9d00dcf9a2d..9669a0b8b440 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1697,6 +1697,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1697struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 1697struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1698 struct drm_gem_object *gem_obj, int flags); 1698 struct drm_gem_object *gem_obj, int flags);
1699 1699
1700void i915_gem_restore_fences(struct drm_device *dev);
1701
1700/* i915_gem_context.c */ 1702/* i915_gem_context.c */
1701void i915_gem_context_init(struct drm_device *dev); 1703void i915_gem_context_init(struct drm_device *dev);
1702void i915_gem_context_fini(struct drm_device *dev); 1704void i915_gem_context_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 970ad17c99ab..9e35dafc5807 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1801,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1801 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1802 gfp &= ~(__GFP_IO | __GFP_WAIT); 1802 gfp &= ~(__GFP_IO | __GFP_WAIT);
1803 } 1803 }
1804 1804#ifdef CONFIG_SWIOTLB
1805 if (swiotlb_nr_tbl()) {
1806 st->nents++;
1807 sg_set_page(sg, page, PAGE_SIZE, 0);
1808 sg = sg_next(sg);
1809 continue;
1810 }
1811#endif
1805 if (!i || page_to_pfn(page) != last_pfn + 1) { 1812 if (!i || page_to_pfn(page) != last_pfn + 1) {
1806 if (i) 1813 if (i)
1807 sg = sg_next(sg); 1814 sg = sg_next(sg);
@@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1812 } 1819 }
1813 last_pfn = page_to_pfn(page); 1820 last_pfn = page_to_pfn(page);
1814 } 1821 }
1815 1822#ifdef CONFIG_SWIOTLB
1816 sg_mark_end(sg); 1823 if (!swiotlb_nr_tbl())
1824#endif
1825 sg_mark_end(sg);
1817 obj->pages = st; 1826 obj->pages = st;
1818 1827
1819 if (i915_gem_object_needs_bit17_swizzle(obj)) 1828 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2117,25 +2126,15 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2117 } 2126 }
2118} 2127}
2119 2128
2120static void i915_gem_reset_fences(struct drm_device *dev) 2129void i915_gem_restore_fences(struct drm_device *dev)
2121{ 2130{
2122 struct drm_i915_private *dev_priv = dev->dev_private; 2131 struct drm_i915_private *dev_priv = dev->dev_private;
2123 int i; 2132 int i;
2124 2133
2125 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2134 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2126 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2135 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2127 2136 i915_gem_write_fence(dev, i, reg->obj);
2128 if (reg->obj)
2129 i915_gem_object_fence_lost(reg->obj);
2130
2131 i915_gem_write_fence(dev, i, NULL);
2132
2133 reg->pin_count = 0;
2134 reg->obj = NULL;
2135 INIT_LIST_HEAD(&reg->lru_list);
2136 } 2137 }
2137
2138 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2139} 2138}
2140 2139
2141void i915_gem_reset(struct drm_device *dev) 2140void i915_gem_reset(struct drm_device *dev)
@@ -2158,8 +2157,7 @@ void i915_gem_reset(struct drm_device *dev)
2158 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 2157 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2159 } 2158 }
2160 2159
2161 /* The fence registers are invalidated so clear them out */ 2160 i915_gem_restore_fences(dev);
2162 i915_gem_reset_fences(dev);
2163} 2161}
2164 2162
2165/** 2163/**
@@ -3865,8 +3863,6 @@ i915_gem_idle(struct drm_device *dev)
3865 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3863 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3866 i915_gem_evict_everything(dev); 3864 i915_gem_evict_everything(dev);
3867 3865
3868 i915_gem_reset_fences(dev);
3869
3870 /* Hack! Don't let anybody do execbuf while we don't control the chip. 3866 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3871 * We need to replace this with a semaphore, or something. 3867 * We need to replace this with a semaphore, or something.
3872 * And not confound mm.suspended! 3868 * And not confound mm.suspended!
@@ -4193,7 +4189,8 @@ i915_gem_load(struct drm_device *dev)
4193 dev_priv->num_fence_regs = 8; 4189 dev_priv->num_fence_regs = 8;
4194 4190
4195 /* Initialize fence registers to zero */ 4191 /* Initialize fence registers to zero */
4196 i915_gem_reset_fences(dev); 4192 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4193 i915_gem_restore_fences(dev);
4197 4194
4198 i915_gem_detect_bit_6_swizzle(dev); 4195 i915_gem_detect_bit_6_swizzle(dev);
4199 init_waitqueue_head(&dev_priv->pending_flip_queue); 4196 init_waitqueue_head(&dev_priv->pending_flip_queue);
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 41f0fdecfbdc..369b3d8776ab 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -384,6 +384,7 @@ int i915_restore_state(struct drm_device *dev)
384 384
385 mutex_lock(&dev->struct_mutex); 385 mutex_lock(&dev->struct_mutex);
386 386
387 i915_gem_restore_fences(dev);
387 i915_restore_display(dev); 388 i915_restore_display(dev);
388 389
389 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 390 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index a4b71b25fa53..a30f29425c21 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -171,6 +171,11 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
172 return -EINVAL; 172 return -EINVAL;
173 173
174 if (!access_ok(VERIFY_READ,
175 (void *)(unsigned long)user_cmd.command,
176 user_cmd.command_size))
177 return -EFAULT;
178
174 ret = qxl_alloc_release_reserved(qdev, 179 ret = qxl_alloc_release_reserved(qdev,
175 sizeof(union qxl_release_info) + 180 sizeof(union qxl_release_info) +
176 user_cmd.command_size, 181 user_cmd.command_size,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d6cbfe9df218..fa061d46527f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -137,7 +137,7 @@ static const struct xpad_device {
137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, 138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, 139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, 140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 62a2c0e4cc99..7ac9c9818d55 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
431 431
432config KEYBOARD_OPENCORES 432config KEYBOARD_OPENCORES
433 tristate "OpenCores Keyboard Controller" 433 tristate "OpenCores Keyboard Controller"
434 depends on HAS_IOMEM
434 help 435 help
435 Say Y here if you want to use the OpenCores Keyboard Controller 436 Say Y here if you want to use the OpenCores Keyboard Controller
436 http://www.opencores.org/project,keyboardcontroller 437 http://www.opencores.org/project,keyboardcontroller
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index aebfe3ecb945..1bda828f4b55 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
205 205
206config SERIO_ALTERA_PS2 206config SERIO_ALTERA_PS2
207 tristate "Altera UP PS/2 controller" 207 tristate "Altera UP PS/2 controller"
208 depends on HAS_IOMEM
208 help 209 help
209 Say Y here if you have Altera University Program PS/2 ports. 210 Say Y here if you have Altera University Program PS/2 ports.
210 211
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 518282da6d85..384fbcd0cee0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ 363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
364 case 0x160802: /* Cintiq 13HD Pro Pen */ 364 case 0x160802: /* Cintiq 13HD Pro Pen */
365 case 0x180802: /* DTH2242 Pen */ 365 case 0x180802: /* DTH2242 Pen */
366 case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
366 wacom->tool[idx] = BTN_TOOL_PEN; 367 wacom->tool[idx] = BTN_TOOL_PEN;
367 break; 368 break;
368 369
@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
401 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ 402 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
402 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ 403 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
403 case 0x18080a: /* DTH2242 Eraser */ 404 case 0x18080a: /* DTH2242 Eraser */
405 case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
404 wacom->tool[idx] = BTN_TOOL_RUBBER; 406 wacom->tool[idx] = BTN_TOOL_RUBBER;
405 break; 407 break;
406 408
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 8e60437ac85b..ae89d2609ab0 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); 116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
117} 117}
118 118
119static int cyttsp_handshake(struct cyttsp *ts)
120{
121 if (ts->pdata->use_hndshk)
122 return ttsp_send_command(ts,
123 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
124
125 return 0;
126}
127
119static int cyttsp_load_bl_regs(struct cyttsp *ts) 128static int cyttsp_load_bl_regs(struct cyttsp *ts)
120{ 129{
121 memset(&ts->bl_data, 0, sizeof(ts->bl_data)); 130 memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
133 memcpy(bl_cmd, bl_command, sizeof(bl_command)); 142 memcpy(bl_cmd, bl_command, sizeof(bl_command));
134 if (ts->pdata->bl_keys) 143 if (ts->pdata->bl_keys)
135 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], 144 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
136 ts->pdata->bl_keys, sizeof(bl_command)); 145 ts->pdata->bl_keys, CY_NUM_BL_KEYS);
137 146
138 error = ttsp_write_block_data(ts, CY_REG_BASE, 147 error = ttsp_write_block_data(ts, CY_REG_BASE,
139 sizeof(bl_cmd), bl_cmd); 148 sizeof(bl_cmd), bl_cmd);
@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
167 if (error) 176 if (error)
168 return error; 177 return error;
169 178
179 error = cyttsp_handshake(ts);
180 if (error)
181 return error;
182
170 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; 183 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
171} 184}
172 185
@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
188 if (error) 201 if (error)
189 return error; 202 return error;
190 203
204 error = cyttsp_handshake(ts);
205 if (error)
206 return error;
207
191 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) 208 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
192 return -EIO; 209 return -EIO;
193 210
@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
344 goto out; 361 goto out;
345 362
346 /* provide flow control handshake */ 363 /* provide flow control handshake */
347 if (ts->pdata->use_hndshk) { 364 error = cyttsp_handshake(ts);
348 error = ttsp_send_command(ts, 365 if (error)
349 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); 366 goto out;
350 if (error)
351 goto out;
352 }
353 367
354 if (unlikely(ts->state == CY_IDLE_STATE)) 368 if (unlikely(ts->state == CY_IDLE_STATE))
355 goto out; 369 goto out;
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 1aa3c6967e70..f1ebde369f86 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -67,8 +67,8 @@ struct cyttsp_xydata {
67/* TTSP System Information interface definition */ 67/* TTSP System Information interface definition */
68struct cyttsp_sysinfo_data { 68struct cyttsp_sysinfo_data {
69 u8 hst_mode; 69 u8 hst_mode;
70 u8 mfg_cmd;
71 u8 mfg_stat; 70 u8 mfg_stat;
71 u8 mfg_cmd;
72 u8 cid[3]; 72 u8 cid[3];
73 u8 tt_undef1; 73 u8 tt_undef1;
74 u8 uid[8]; 74 u8 uid[8];
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 721b9186a5d1..4b93ed4d5cd6 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = {
107 .name = "tps6586x-gpio", 107 .name = "tps6586x-gpio",
108 }, 108 },
109 { 109 {
110 .name = "tps6586x-pmic", 110 .name = "tps6586x-regulator",
111 }, 111 },
112 { 112 {
113 .name = "tps6586x-rtc", 113 .name = "tps6586x-rtc",
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02d9ae7d527e..f97569613526 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2413,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond)
2413 2413
2414 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 2414 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2415 bond->dev->name, slave->dev->name, 2415 bond->dev->name, slave->dev->name,
2416 slave->speed, slave->duplex ? "full" : "half"); 2416 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2417 slave->duplex ? "full" : "half");
2417 2418
2418 /* notify ad that the link status has changed */ 2419 /* notify ad that the link status has changed */
2419 if (bond->params.mode == BOND_MODE_8023AD) 2420 if (bond->params.mode == BOND_MODE_8023AD)
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef08f301..cbd388eea682 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
977 err = usb_8dev_cmd_version(priv, &version); 977 err = usb_8dev_cmd_version(priv, &version);
978 if (err) { 978 if (err) {
979 netdev_err(netdev, "can't get firmware version\n"); 979 netdev_err(netdev, "can't get firmware version\n");
980 goto cleanup_cmd_msg_buffer; 980 goto cleanup_unregister_candev;
981 } else { 981 } else {
982 netdev_info(netdev, 982 netdev_info(netdev,
983 "firmware: %d.%d, hardware: %d.%d\n", 983 "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
989 989
990 return 0; 990 return 0;
991 991
992cleanup_unregister_candev:
993 unregister_netdev(priv->netdev);
994
992cleanup_cmd_msg_buffer: 995cleanup_cmd_msg_buffer:
993 kfree(priv->cmd_msg_buffer); 996 kfree(priv->cmd_msg_buffer);
994 997
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cfff..ad6aa1e98348 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
67 To compile this driver as a module, choose M here. The module 67 To compile this driver as a module, choose M here. The module
68 will be called atl1c. 68 will be called atl1c.
69 69
70config ALX
71 tristate "Qualcomm Atheros AR816x/AR817x support"
72 depends on PCI
73 select CRC32
74 select NET_CORE
75 select MDIO
76 help
77 This driver supports the Qualcomm Atheros L1F ethernet adapter,
78 i.e. the following chipsets:
79
80 1969:1091 - AR8161 Gigabit Ethernet
81 1969:1090 - AR8162 Fast Ethernet
82 1969:10A1 - AR8171 Gigabit Ethernet
83 1969:10A0 - AR8172 Fast Ethernet
84
85 To compile this driver as a module, choose M here. The module
86 will be called alx.
87
70endif # NET_VENDOR_ATHEROS 88endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576ff..5cf1c65bbce9 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
6obj-$(CONFIG_ATL2) += atlx/ 6obj-$(CONFIG_ATL2) += atlx/
7obj-$(CONFIG_ATL1E) += atl1e/ 7obj-$(CONFIG_ATL1E) += atl1e/
8obj-$(CONFIG_ATL1C) += atl1c/ 8obj-$(CONFIG_ATL1C) += atl1c/
9obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 000000000000..5901fa407d52
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ALX) += alx.o
2alx-objs := main.o ethtool.o hw.o
3ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 000000000000..50b3ae2b143d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef _ALX_H_
36#define _ALX_H_
37
38#include <linux/types.h>
39#include <linux/etherdevice.h>
40#include <linux/dma-mapping.h>
41#include <linux/spinlock.h>
42#include "hw.h"
43
44#define ALX_WATCHDOG_TIME (5 * HZ)
45
46struct alx_buffer {
47 struct sk_buff *skb;
48 DEFINE_DMA_UNMAP_ADDR(dma);
49 DEFINE_DMA_UNMAP_LEN(size);
50};
51
52struct alx_rx_queue {
53 struct alx_rrd *rrd;
54 dma_addr_t rrd_dma;
55
56 struct alx_rfd *rfd;
57 dma_addr_t rfd_dma;
58
59 struct alx_buffer *bufs;
60
61 u16 write_idx, read_idx;
62 u16 rrd_read_idx;
63};
64#define ALX_RX_ALLOC_THRESH 32
65
66struct alx_tx_queue {
67 struct alx_txd *tpd;
68 dma_addr_t tpd_dma;
69 struct alx_buffer *bufs;
70 u16 write_idx, read_idx;
71};
72
73#define ALX_DEFAULT_TX_WORK 128
74
75enum alx_device_quirks {
76 ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
77};
78
79struct alx_priv {
80 struct net_device *dev;
81
82 struct alx_hw hw;
83
84 /* all descriptor memory */
85 struct {
86 dma_addr_t dma;
87 void *virt;
88 int size;
89 } descmem;
90
91 /* protect int_mask updates */
92 spinlock_t irq_lock;
93 u32 int_mask;
94
95 int tx_ringsz;
96 int rx_ringsz;
97 int rxbuf_size;
98
99 struct napi_struct napi;
100 struct alx_tx_queue txq;
101 struct alx_rx_queue rxq;
102
103 struct work_struct link_check_wk;
104 struct work_struct reset_wk;
105
106 u16 msg_enable;
107
108 bool msi;
109};
110
111extern const struct ethtool_ops alx_ethtool_ops;
112extern const char alx_drv_name[];
113
114#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 000000000000..6fa2aec2bc81
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/mdio.h>
42#include <linux/interrupt.h>
43#include <asm/byteorder.h>
44
45#include "alx.h"
46#include "reg.h"
47#include "hw.h"
48
49
50static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
51{
52 struct alx_priv *alx = netdev_priv(netdev);
53 struct alx_hw *hw = &alx->hw;
54
55 ecmd->supported = SUPPORTED_10baseT_Half |
56 SUPPORTED_10baseT_Full |
57 SUPPORTED_100baseT_Half |
58 SUPPORTED_100baseT_Full |
59 SUPPORTED_Autoneg |
60 SUPPORTED_TP |
61 SUPPORTED_Pause;
62 if (alx_hw_giga(hw))
63 ecmd->supported |= SUPPORTED_1000baseT_Full;
64
65 ecmd->advertising = ADVERTISED_TP;
66 if (hw->adv_cfg & ADVERTISED_Autoneg)
67 ecmd->advertising |= hw->adv_cfg;
68
69 ecmd->port = PORT_TP;
70 ecmd->phy_address = 0;
71 if (hw->adv_cfg & ADVERTISED_Autoneg)
72 ecmd->autoneg = AUTONEG_ENABLE;
73 else
74 ecmd->autoneg = AUTONEG_DISABLE;
75 ecmd->transceiver = XCVR_INTERNAL;
76
77 if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
78 if (hw->flowctrl & ALX_FC_RX) {
79 ecmd->advertising |= ADVERTISED_Pause;
80
81 if (!(hw->flowctrl & ALX_FC_TX))
82 ecmd->advertising |= ADVERTISED_Asym_Pause;
83 } else if (hw->flowctrl & ALX_FC_TX) {
84 ecmd->advertising |= ADVERTISED_Asym_Pause;
85 }
86 }
87
88 if (hw->link_speed != SPEED_UNKNOWN) {
89 ethtool_cmd_speed_set(ecmd,
90 hw->link_speed - hw->link_speed % 10);
91 ecmd->duplex = hw->link_speed % 10;
92 } else {
93 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
94 ecmd->duplex = DUPLEX_UNKNOWN;
95 }
96
97 return 0;
98}
99
100static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
101{
102 struct alx_priv *alx = netdev_priv(netdev);
103 struct alx_hw *hw = &alx->hw;
104 u32 adv_cfg;
105
106 ASSERT_RTNL();
107
108 if (ecmd->autoneg == AUTONEG_ENABLE) {
109 if (ecmd->advertising & ADVERTISED_1000baseT_Half)
110 return -EINVAL;
111 adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
112 } else {
113 int speed = ethtool_cmd_speed(ecmd);
114
115 switch (speed + ecmd->duplex) {
116 case SPEED_10 + DUPLEX_HALF:
117 adv_cfg = ADVERTISED_10baseT_Half;
118 break;
119 case SPEED_10 + DUPLEX_FULL:
120 adv_cfg = ADVERTISED_10baseT_Full;
121 break;
122 case SPEED_100 + DUPLEX_HALF:
123 adv_cfg = ADVERTISED_100baseT_Half;
124 break;
125 case SPEED_100 + DUPLEX_FULL:
126 adv_cfg = ADVERTISED_100baseT_Full;
127 break;
128 default:
129 return -EINVAL;
130 }
131 }
132
133 hw->adv_cfg = adv_cfg;
134 return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
135}
136
137static void alx_get_pauseparam(struct net_device *netdev,
138 struct ethtool_pauseparam *pause)
139{
140 struct alx_priv *alx = netdev_priv(netdev);
141 struct alx_hw *hw = &alx->hw;
142
143 if (hw->flowctrl & ALX_FC_ANEG &&
144 hw->adv_cfg & ADVERTISED_Autoneg)
145 pause->autoneg = AUTONEG_ENABLE;
146 else
147 pause->autoneg = AUTONEG_DISABLE;
148
149 if (hw->flowctrl & ALX_FC_TX)
150 pause->tx_pause = 1;
151 else
152 pause->tx_pause = 0;
153
154 if (hw->flowctrl & ALX_FC_RX)
155 pause->rx_pause = 1;
156 else
157 pause->rx_pause = 0;
158}
159
160
161static int alx_set_pauseparam(struct net_device *netdev,
162 struct ethtool_pauseparam *pause)
163{
164 struct alx_priv *alx = netdev_priv(netdev);
165 struct alx_hw *hw = &alx->hw;
166 int err = 0;
167 bool reconfig_phy = false;
168 u8 fc = 0;
169
170 if (pause->tx_pause)
171 fc |= ALX_FC_TX;
172 if (pause->rx_pause)
173 fc |= ALX_FC_RX;
174 if (pause->autoneg)
175 fc |= ALX_FC_ANEG;
176
177 ASSERT_RTNL();
178
179 /* restart auto-neg for auto-mode */
180 if (hw->adv_cfg & ADVERTISED_Autoneg) {
181 if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
182 reconfig_phy = true;
183 if (fc & hw->flowctrl & ALX_FC_ANEG &&
184 (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
185 reconfig_phy = true;
186 }
187
188 if (reconfig_phy) {
189 err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
190 return err;
191 }
192
193 /* flow control on mac */
194 if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
195 alx_cfg_mac_flowcontrol(hw, fc);
196
197 hw->flowctrl = fc;
198
199 return 0;
200}
201
202static u32 alx_get_msglevel(struct net_device *netdev)
203{
204 struct alx_priv *alx = netdev_priv(netdev);
205
206 return alx->msg_enable;
207}
208
209static void alx_set_msglevel(struct net_device *netdev, u32 data)
210{
211 struct alx_priv *alx = netdev_priv(netdev);
212
213 alx->msg_enable = data;
214}
215
216static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
217{
218 struct alx_priv *alx = netdev_priv(netdev);
219 struct alx_hw *hw = &alx->hw;
220
221 wol->supported = WAKE_MAGIC | WAKE_PHY;
222 wol->wolopts = 0;
223
224 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
225 wol->wolopts |= WAKE_MAGIC;
226 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
227 wol->wolopts |= WAKE_PHY;
228}
229
230static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
231{
232 struct alx_priv *alx = netdev_priv(netdev);
233 struct alx_hw *hw = &alx->hw;
234
235 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
236 WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
237 return -EOPNOTSUPP;
238
239 hw->sleep_ctrl = 0;
240
241 if (wol->wolopts & WAKE_MAGIC)
242 hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
243 if (wol->wolopts & WAKE_PHY)
244 hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
245
246 device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
247
248 return 0;
249}
250
251static void alx_get_drvinfo(struct net_device *netdev,
252 struct ethtool_drvinfo *drvinfo)
253{
254 struct alx_priv *alx = netdev_priv(netdev);
255
256 strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
257 strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
258 sizeof(drvinfo->bus_info));
259}
260
261const struct ethtool_ops alx_ethtool_ops = {
262 .get_settings = alx_get_settings,
263 .set_settings = alx_set_settings,
264 .get_pauseparam = alx_get_pauseparam,
265 .set_pauseparam = alx_set_pauseparam,
266 .get_drvinfo = alx_get_drvinfo,
267 .get_msglevel = alx_get_msglevel,
268 .set_msglevel = alx_set_msglevel,
269 .get_wol = alx_get_wol,
270 .set_wol = alx_set_wol,
271 .get_link = ethtool_op_get_link,
272};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 000000000000..220a16ad0e49
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36#include <linux/pci.h>
37#include <linux/mdio.h>
38#include "reg.h"
39#include "hw.h"
40
41static inline bool alx_is_rev_a(u8 rev)
42{
43 return rev == ALX_REV_A0 || rev == ALX_REV_A1;
44}
45
46static int alx_wait_mdio_idle(struct alx_hw *hw)
47{
48 u32 val;
49 int i;
50
51 for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
52 val = alx_read_mem32(hw, ALX_MDIO);
53 if (!(val & ALX_MDIO_BUSY))
54 return 0;
55 udelay(10);
56 }
57
58 return -ETIMEDOUT;
59}
60
61static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
62 u16 reg, u16 *phy_data)
63{
64 u32 val, clk_sel;
65 int err;
66
67 *phy_data = 0;
68
69 /* use slow clock when it's in hibernation status */
70 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
71 ALX_MDIO_CLK_SEL_25MD4 :
72 ALX_MDIO_CLK_SEL_25MD128;
73
74 if (ext) {
75 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
76 reg << ALX_MDIO_EXTN_REG_SHIFT;
77 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
78
79 val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
80 ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
81 clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
82 } else {
83 val = ALX_MDIO_SPRES_PRMBL |
84 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
85 reg << ALX_MDIO_REG_SHIFT |
86 ALX_MDIO_START | ALX_MDIO_OP_READ;
87 }
88 alx_write_mem32(hw, ALX_MDIO, val);
89
90 err = alx_wait_mdio_idle(hw);
91 if (err)
92 return err;
93 val = alx_read_mem32(hw, ALX_MDIO);
94 *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
95 return 0;
96}
97
98static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
99 u16 reg, u16 phy_data)
100{
101 u32 val, clk_sel;
102
103 /* use slow clock when it's in hibernation status */
104 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
105 ALX_MDIO_CLK_SEL_25MD4 :
106 ALX_MDIO_CLK_SEL_25MD128;
107
108 if (ext) {
109 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
110 reg << ALX_MDIO_EXTN_REG_SHIFT;
111 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
112
113 val = ALX_MDIO_SPRES_PRMBL |
114 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
115 phy_data << ALX_MDIO_DATA_SHIFT |
116 ALX_MDIO_START | ALX_MDIO_MODE_EXT;
117 } else {
118 val = ALX_MDIO_SPRES_PRMBL |
119 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
120 reg << ALX_MDIO_REG_SHIFT |
121 phy_data << ALX_MDIO_DATA_SHIFT |
122 ALX_MDIO_START;
123 }
124 alx_write_mem32(hw, ALX_MDIO, val);
125
126 return alx_wait_mdio_idle(hw);
127}
128
129static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
130{
131 return alx_read_phy_core(hw, false, 0, reg, phy_data);
132}
133
134static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
135{
136 return alx_write_phy_core(hw, false, 0, reg, phy_data);
137}
138
139static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
140{
141 return alx_read_phy_core(hw, true, dev, reg, pdata);
142}
143
144static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
145{
146 return alx_write_phy_core(hw, true, dev, reg, data);
147}
148
149static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
150{
151 int err;
152
153 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
154 if (err)
155 return err;
156
157 return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
158}
159
160static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
161{
162 int err;
163
164 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
165 if (err)
166 return err;
167
168 return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
169}
170
171int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
172{
173 int err;
174
175 spin_lock(&hw->mdio_lock);
176 err = __alx_read_phy_reg(hw, reg, phy_data);
177 spin_unlock(&hw->mdio_lock);
178
179 return err;
180}
181
182int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
183{
184 int err;
185
186 spin_lock(&hw->mdio_lock);
187 err = __alx_write_phy_reg(hw, reg, phy_data);
188 spin_unlock(&hw->mdio_lock);
189
190 return err;
191}
192
193int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
194{
195 int err;
196
197 spin_lock(&hw->mdio_lock);
198 err = __alx_read_phy_ext(hw, dev, reg, pdata);
199 spin_unlock(&hw->mdio_lock);
200
201 return err;
202}
203
204int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
205{
206 int err;
207
208 spin_lock(&hw->mdio_lock);
209 err = __alx_write_phy_ext(hw, dev, reg, data);
210 spin_unlock(&hw->mdio_lock);
211
212 return err;
213}
214
215static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
216{
217 int err;
218
219 spin_lock(&hw->mdio_lock);
220 err = __alx_read_phy_dbg(hw, reg, pdata);
221 spin_unlock(&hw->mdio_lock);
222
223 return err;
224}
225
226static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
227{
228 int err;
229
230 spin_lock(&hw->mdio_lock);
231 err = __alx_write_phy_dbg(hw, reg, data);
232 spin_unlock(&hw->mdio_lock);
233
234 return err;
235}
236
237static u16 alx_get_phy_config(struct alx_hw *hw)
238{
239 u32 val;
240 u16 phy_val;
241
242 val = alx_read_mem32(hw, ALX_PHY_CTRL);
243 /* phy in reset */
244 if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
245 return ALX_DRV_PHY_UNKNOWN;
246
247 val = alx_read_mem32(hw, ALX_DRV);
248 val = ALX_GET_FIELD(val, ALX_DRV_PHY);
249 if (ALX_DRV_PHY_UNKNOWN == val)
250 return ALX_DRV_PHY_UNKNOWN;
251
252 alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
253 if (ALX_PHY_INITED == phy_val)
254 return val;
255
256 return ALX_DRV_PHY_UNKNOWN;
257}
258
259static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
260{
261 u32 read;
262 int i;
263
264 for (i = 0; i < ALX_SLD_MAX_TO; i++) {
265 read = alx_read_mem32(hw, reg);
266 if ((read & wait) == 0) {
267 if (val)
268 *val = read;
269 return true;
270 }
271 mdelay(1);
272 }
273
274 return false;
275}
276
277static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
278{
279 u32 mac0, mac1;
280
281 mac0 = alx_read_mem32(hw, ALX_STAD0);
282 mac1 = alx_read_mem32(hw, ALX_STAD1);
283
284 /* addr should be big-endian */
285 *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
286 *(__be16 *)addr = cpu_to_be16(mac1);
287
288 return is_valid_ether_addr(addr);
289}
290
291int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
292{
293 u32 val;
294
295 /* try to get it from register first */
296 if (alx_read_macaddr(hw, addr))
297 return 0;
298
299 /* try to load from efuse */
300 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
301 return -EIO;
302 alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
303 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
304 return -EIO;
305 if (alx_read_macaddr(hw, addr))
306 return 0;
307
308 /* try to load from flash/eeprom (if present) */
309 val = alx_read_mem32(hw, ALX_EFLD);
310 if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
311 if (!alx_wait_reg(hw, ALX_EFLD,
312 ALX_EFLD_STAT | ALX_EFLD_START, &val))
313 return -EIO;
314 alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
315 if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
316 return -EIO;
317 if (alx_read_macaddr(hw, addr))
318 return 0;
319 }
320
321 return -EIO;
322}
323
324void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
325{
326 u32 val;
327
328 /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
329 val = be32_to_cpu(*(__be32 *)(addr + 2));
330 alx_write_mem32(hw, ALX_STAD0, val);
331 val = be16_to_cpu(*(__be16 *)addr);
332 alx_write_mem32(hw, ALX_STAD1, val);
333}
334
335static void alx_enable_osc(struct alx_hw *hw)
336{
337 u32 val;
338
339 /* rising edge */
340 val = alx_read_mem32(hw, ALX_MISC);
341 alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
342 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
343}
344
345static void alx_reset_osc(struct alx_hw *hw, u8 rev)
346{
347 u32 val, val2;
348
349 /* clear Internal OSC settings, switching OSC by hw itself */
350 val = alx_read_mem32(hw, ALX_MISC3);
351 alx_write_mem32(hw, ALX_MISC3,
352 (val & ~ALX_MISC3_25M_BY_SW) |
353 ALX_MISC3_25M_NOTO_INTNL);
354
355 /* 25M clk from chipset may be unstable 1s after de-assert of
356 * PERST, driver need re-calibrate before enter Sleep for WoL
357 */
358 val = alx_read_mem32(hw, ALX_MISC);
359 if (rev >= ALX_REV_B0) {
360 /* restore over current protection def-val,
361 * this val could be reset by MAC-RST
362 */
363 ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
364 /* a 0->1 change will update the internal val of osc */
365 val &= ~ALX_MISC_INTNLOSC_OPEN;
366 alx_write_mem32(hw, ALX_MISC, val);
367 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
368 /* hw will automatically dis OSC after cab. */
369 val2 = alx_read_mem32(hw, ALX_MSIC2);
370 val2 &= ~ALX_MSIC2_CALB_START;
371 alx_write_mem32(hw, ALX_MSIC2, val2);
372 alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
373 } else {
374 val &= ~ALX_MISC_INTNLOSC_OPEN;
375 /* disable isolate for rev A devices */
376 if (alx_is_rev_a(rev))
377 val &= ~ALX_MISC_ISO_EN;
378
379 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
380 alx_write_mem32(hw, ALX_MISC, val);
381 }
382
383 udelay(20);
384}
385
386static int alx_stop_mac(struct alx_hw *hw)
387{
388 u32 rxq, txq, val;
389 u16 i;
390
391 rxq = alx_read_mem32(hw, ALX_RXQ0);
392 alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
393 txq = alx_read_mem32(hw, ALX_TXQ0);
394 alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
395
396 udelay(40);
397
398 hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
399 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
400
401 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
402 val = alx_read_mem32(hw, ALX_MAC_STS);
403 if (!(val & ALX_MAC_STS_IDLE))
404 return 0;
405 udelay(10);
406 }
407
408 return -ETIMEDOUT;
409}
410
411int alx_reset_mac(struct alx_hw *hw)
412{
413 u32 val, pmctrl;
414 int i, ret;
415 u8 rev;
416 bool a_cr;
417
418 pmctrl = 0;
419 rev = alx_hw_revision(hw);
420 a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
421
422 /* disable all interrupts, RXQ/TXQ */
423 alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
424 alx_write_mem32(hw, ALX_IMR, 0);
425 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
426
427 ret = alx_stop_mac(hw);
428 if (ret)
429 return ret;
430
431 /* mac reset workaroud */
432 alx_write_mem32(hw, ALX_RFD_PIDX, 1);
433
434 /* dis l0s/l1 before mac reset */
435 if (a_cr) {
436 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
437 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
438 alx_write_mem32(hw, ALX_PMCTRL,
439 pmctrl & ~(ALX_PMCTRL_L1_EN |
440 ALX_PMCTRL_L0S_EN));
441 }
442
443 /* reset whole mac safely */
444 val = alx_read_mem32(hw, ALX_MASTER);
445 alx_write_mem32(hw, ALX_MASTER,
446 val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
447
448 /* make sure it's real idle */
449 udelay(10);
450 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
451 val = alx_read_mem32(hw, ALX_RFD_PIDX);
452 if (val == 0)
453 break;
454 udelay(10);
455 }
456 for (; i < ALX_DMA_MAC_RST_TO; i++) {
457 val = alx_read_mem32(hw, ALX_MASTER);
458 if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
459 break;
460 udelay(10);
461 }
462 if (i == ALX_DMA_MAC_RST_TO)
463 return -EIO;
464 udelay(10);
465
466 if (a_cr) {
467 alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
468 /* restore l0s / l1 */
469 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
470 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
471 }
472
473 alx_reset_osc(hw, rev);
474
475 /* clear Internal OSC settings, switching OSC by hw itself,
476 * disable isolate for rev A devices
477 */
478 val = alx_read_mem32(hw, ALX_MISC3);
479 alx_write_mem32(hw, ALX_MISC3,
480 (val & ~ALX_MISC3_25M_BY_SW) |
481 ALX_MISC3_25M_NOTO_INTNL);
482 val = alx_read_mem32(hw, ALX_MISC);
483 val &= ~ALX_MISC_INTNLOSC_OPEN;
484 if (alx_is_rev_a(rev))
485 val &= ~ALX_MISC_ISO_EN;
486 alx_write_mem32(hw, ALX_MISC, val);
487 udelay(20);
488
489 /* driver control speed/duplex, hash-alg */
490 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
491
492 val = alx_read_mem32(hw, ALX_SERDES);
493 alx_write_mem32(hw, ALX_SERDES,
494 val | ALX_SERDES_MACCLK_SLWDWN |
495 ALX_SERDES_PHYCLK_SLWDWN);
496
497 return 0;
498}
499
500void alx_reset_phy(struct alx_hw *hw)
501{
502 int i;
503 u32 val;
504 u16 phy_val;
505
506 /* (DSP)reset PHY core */
507 val = alx_read_mem32(hw, ALX_PHY_CTRL);
508 val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
509 ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
510 ALX_PHY_CTRL_CLS);
511 val |= ALX_PHY_CTRL_RST_ANALOG;
512
513 val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
514 alx_write_mem32(hw, ALX_PHY_CTRL, val);
515 udelay(10);
516 alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
517
518 for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
519 udelay(10);
520
521 /* phy power saving & hib */
522 alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
523 alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
524 ALX_SYSMODCTRL_IECHOADJ_DEF);
525 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
526 ALX_VDRVBIAS_DEF);
527
528 /* EEE advertisement */
529 val = alx_read_mem32(hw, ALX_LPI_CTRL);
530 alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
531 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
532
533 /* phy power saving */
534 alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
535 alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
536 alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
537 alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
538 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
539 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
540 phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
541 /* rtl8139c, 120m issue */
542 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
543 ALX_MIIEXT_NLP78_120M_DEF);
544 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
545 ALX_MIIEXT_S3DIG10_DEF);
546
547 if (hw->lnk_patch) {
548 /* Turn off half amplitude */
549 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
550 &phy_val);
551 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
552 phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
553 /* Turn off Green feature */
554 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
555 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
556 phy_val | ALX_GREENCFG2_BP_GREEN);
557 /* Turn off half Bias */
558 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
559 &phy_val);
560 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
561 phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
562 }
563
564 /* set phy interrupt mask */
565 alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
566}
567
568#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
569
570void alx_reset_pcie(struct alx_hw *hw)
571{
572 u8 rev = alx_hw_revision(hw);
573 u32 val;
574 u16 val16;
575
576 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
577 pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
578 if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
579 val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
580 pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
581 }
582
583 /* clear WoL setting/status */
584 val = alx_read_mem32(hw, ALX_WOL0);
585 alx_write_mem32(hw, ALX_WOL0, 0);
586
587 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
588 alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
589
590 /* mask some pcie error bits */
591 val = alx_read_mem32(hw, ALX_UE_SVRT);
592 val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
593 alx_write_mem32(hw, ALX_UE_SVRT, val);
594
595 /* wol 25M & pclk */
596 val = alx_read_mem32(hw, ALX_MASTER);
597 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
598 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
599 (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
600 alx_write_mem32(hw, ALX_MASTER,
601 val | ALX_MASTER_PCLKSEL_SRDS |
602 ALX_MASTER_WAKEN_25M);
603 } else {
604 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
605 (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
606 alx_write_mem32(hw, ALX_MASTER,
607 (val & ~ALX_MASTER_PCLKSEL_SRDS) |
608 ALX_MASTER_WAKEN_25M);
609 }
610
611 /* ASPM setting */
612 alx_enable_aspm(hw, true, true);
613
614 udelay(10);
615}
616
617void alx_start_mac(struct alx_hw *hw)
618{
619 u32 mac, txq, rxq;
620
621 rxq = alx_read_mem32(hw, ALX_RXQ0);
622 alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
623 txq = alx_read_mem32(hw, ALX_TXQ0);
624 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
625
626 mac = hw->rx_ctrl;
627 if (hw->link_speed % 10 == DUPLEX_FULL)
628 mac |= ALX_MAC_CTRL_FULLD;
629 else
630 mac &= ~ALX_MAC_CTRL_FULLD;
631 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
632 hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
633 ALX_MAC_CTRL_SPEED_10_100);
634 mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
635 hw->rx_ctrl = mac;
636 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
637}
638
639void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
640{
641 if (fc & ALX_FC_RX)
642 hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
643 else
644 hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
645
646 if (fc & ALX_FC_TX)
647 hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
648 else
649 hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
650
651 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
652}
653
654void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
655{
656 u32 pmctrl;
657 u8 rev = alx_hw_revision(hw);
658
659 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
660
661 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
662 ALX_PMCTRL_LCKDET_TIMER_DEF);
663 pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
664 ALX_PMCTRL_L1_CLKSW_EN |
665 ALX_PMCTRL_L1_SRDSRX_PWD;
666 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
667 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
668 pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
669 ALX_PMCTRL_L1_SRDSPLL_EN |
670 ALX_PMCTRL_L1_BUFSRX_EN |
671 ALX_PMCTRL_SADLY_EN |
672 ALX_PMCTRL_HOTRST_WTEN|
673 ALX_PMCTRL_L0S_EN |
674 ALX_PMCTRL_L1_EN |
675 ALX_PMCTRL_ASPM_FCEN |
676 ALX_PMCTRL_TXL1_AFTER_L0S |
677 ALX_PMCTRL_RXL1_AFTER_L0S);
678 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
679 pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
680
681 if (l0s_en)
682 pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
683 if (l1_en)
684 pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
685
686 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
687}
688
689
690static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
691{
692 u32 cfg = 0;
693
694 if (ethadv_cfg & ADVERTISED_Autoneg) {
695 cfg |= ALX_DRV_PHY_AUTO;
696 if (ethadv_cfg & ADVERTISED_10baseT_Half)
697 cfg |= ALX_DRV_PHY_10;
698 if (ethadv_cfg & ADVERTISED_10baseT_Full)
699 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
700 if (ethadv_cfg & ADVERTISED_100baseT_Half)
701 cfg |= ALX_DRV_PHY_100;
702 if (ethadv_cfg & ADVERTISED_100baseT_Full)
703 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
704 if (ethadv_cfg & ADVERTISED_1000baseT_Half)
705 cfg |= ALX_DRV_PHY_1000;
706 if (ethadv_cfg & ADVERTISED_1000baseT_Full)
707 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
708 if (ethadv_cfg & ADVERTISED_Pause)
709 cfg |= ADVERTISE_PAUSE_CAP;
710 if (ethadv_cfg & ADVERTISED_Asym_Pause)
711 cfg |= ADVERTISE_PAUSE_ASYM;
712 } else {
713 switch (ethadv_cfg) {
714 case ADVERTISED_10baseT_Half:
715 cfg |= ALX_DRV_PHY_10;
716 break;
717 case ADVERTISED_100baseT_Half:
718 cfg |= ALX_DRV_PHY_100;
719 break;
720 case ADVERTISED_10baseT_Full:
721 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
722 break;
723 case ADVERTISED_100baseT_Full:
724 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
725 break;
726 }
727 }
728
729 return cfg;
730}
731
732int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
733{
734 u16 adv, giga, cr;
735 u32 val;
736 int err = 0;
737
738 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
739 val = alx_read_mem32(hw, ALX_DRV);
740 ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
741
742 if (ethadv & ADVERTISED_Autoneg) {
743 adv = ADVERTISE_CSMA;
744 adv |= ethtool_adv_to_mii_adv_t(ethadv);
745
746 if (flowctrl & ALX_FC_ANEG) {
747 if (flowctrl & ALX_FC_RX) {
748 adv |= ADVERTISED_Pause;
749 if (!(flowctrl & ALX_FC_TX))
750 adv |= ADVERTISED_Asym_Pause;
751 } else if (flowctrl & ALX_FC_TX) {
752 adv |= ADVERTISED_Asym_Pause;
753 }
754 }
755 giga = 0;
756 if (alx_hw_giga(hw))
757 giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
758
759 cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
760
761 if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
762 alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
763 alx_write_phy_reg(hw, MII_BMCR, cr))
764 err = -EBUSY;
765 } else {
766 cr = BMCR_RESET;
767 if (ethadv == ADVERTISED_100baseT_Half ||
768 ethadv == ADVERTISED_100baseT_Full)
769 cr |= BMCR_SPEED100;
770 if (ethadv == ADVERTISED_10baseT_Full ||
771 ethadv == ADVERTISED_100baseT_Full)
772 cr |= BMCR_FULLDPLX;
773
774 err = alx_write_phy_reg(hw, MII_BMCR, cr);
775 }
776
777 if (!err) {
778 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
779 val |= ethadv_to_hw_cfg(hw, ethadv);
780 }
781
782 alx_write_mem32(hw, ALX_DRV, val);
783
784 return err;
785}
786
787
788void alx_post_phy_link(struct alx_hw *hw)
789{
790 u16 phy_val, len, agc;
791 u8 revid = alx_hw_revision(hw);
792 bool adj_th = revid == ALX_REV_B0;
793 int speed;
794
795 if (hw->link_speed == SPEED_UNKNOWN)
796 speed = SPEED_UNKNOWN;
797 else
798 speed = hw->link_speed - hw->link_speed % 10;
799
800 if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
801 return;
802
803 /* 1000BT/AZ, wrong cable length */
804 if (speed != SPEED_UNKNOWN) {
805 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
806 &phy_val);
807 len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
808 alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
809 agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
810
811 if ((speed == SPEED_1000 &&
812 (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
813 (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
814 (speed == SPEED_100 &&
815 (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
816 (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
817 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
818 ALX_AZ_ANADECT_LONG);
819 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
820 &phy_val);
821 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
822 phy_val | ALX_AFE_10BT_100M_TH);
823 } else {
824 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
825 ALX_AZ_ANADECT_DEF);
826 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
827 ALX_MIIEXT_AFE, &phy_val);
828 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
829 phy_val & ~ALX_AFE_10BT_100M_TH);
830 }
831
832 /* threshold adjust */
833 if (adj_th && hw->lnk_patch) {
834 if (speed == SPEED_100) {
835 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
836 ALX_MSE16DB_UP);
837 } else if (speed == SPEED_1000) {
838 /*
839 * Giga link threshold, raise the tolerance of
840 * noise 50%
841 */
842 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
843 &phy_val);
844 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
845 ALX_MSE20DB_TH_HI);
846 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
847 phy_val);
848 }
849 }
850 } else {
851 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
852 &phy_val);
853 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
854 phy_val & ~ALX_AFE_10BT_100M_TH);
855
856 if (adj_th && hw->lnk_patch) {
857 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
858 ALX_MSE16DB_DOWN);
859 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
860 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
861 ALX_MSE20DB_TH_DEF);
862 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
863 }
864 }
865}
866
867
868/* NOTE:
869 * 1. phy link must be established before calling this function
870 * 2. wol option (pattern,magic,link,etc.) is configed before call it.
871 */
872int alx_pre_suspend(struct alx_hw *hw, int speed)
873{
874 u32 master, mac, phy, val;
875 int err = 0;
876
877 master = alx_read_mem32(hw, ALX_MASTER);
878 master &= ~ALX_MASTER_PCLKSEL_SRDS;
879 mac = hw->rx_ctrl;
880 /* 10/100 half */
881 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
882 mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
883
884 phy = alx_read_mem32(hw, ALX_PHY_CTRL);
885 phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
886 phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
887 ALX_PHY_CTRL_HIB_EN;
888
889 /* without any activity */
890 if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
891 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
892 if (err)
893 return err;
894 phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
895 } else {
896 if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
897 mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
898 if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
899 mac |= ALX_MAC_CTRL_TX_EN;
900 if (speed % 10 == DUPLEX_FULL)
901 mac |= ALX_MAC_CTRL_FULLD;
902 if (speed >= SPEED_1000)
903 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
904 ALX_MAC_CTRL_SPEED_1000);
905 phy |= ALX_PHY_CTRL_DSPRST_OUT;
906 err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
907 ALX_MIIEXT_S3DIG10,
908 ALX_MIIEXT_S3DIG10_SL);
909 if (err)
910 return err;
911 }
912
913 alx_enable_osc(hw);
914 hw->rx_ctrl = mac;
915 alx_write_mem32(hw, ALX_MASTER, master);
916 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
917 alx_write_mem32(hw, ALX_PHY_CTRL, phy);
918
919 /* set val of PDLL D3PLLOFF */
920 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
921 val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
922 alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
923
924 return 0;
925}
926
927bool alx_phy_configured(struct alx_hw *hw)
928{
929 u32 cfg, hw_cfg;
930
931 cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
932 cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
933 hw_cfg = alx_get_phy_config(hw);
934
935 if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
936 return false;
937
938 return cfg == hw_cfg;
939}
940
941int alx_get_phy_link(struct alx_hw *hw, int *speed)
942{
943 struct pci_dev *pdev = hw->pdev;
944 u16 bmsr, giga;
945 int err;
946
947 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
948 if (err)
949 return err;
950
951 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
952 if (err)
953 return err;
954
955 if (!(bmsr & BMSR_LSTATUS)) {
956 *speed = SPEED_UNKNOWN;
957 return 0;
958 }
959
960 /* speed/duplex result is saved in PHY Specific Status Register */
961 err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
962 if (err)
963 return err;
964
965 if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
966 goto wrong_speed;
967
968 switch (giga & ALX_GIGA_PSSR_SPEED) {
969 case ALX_GIGA_PSSR_1000MBS:
970 *speed = SPEED_1000;
971 break;
972 case ALX_GIGA_PSSR_100MBS:
973 *speed = SPEED_100;
974 break;
975 case ALX_GIGA_PSSR_10MBS:
976 *speed = SPEED_10;
977 break;
978 default:
979 goto wrong_speed;
980 }
981
982 *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
983 return 1;
984
985wrong_speed:
986 dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
987 return -EINVAL;
988}
989
990int alx_clear_phy_intr(struct alx_hw *hw)
991{
992 u16 isr;
993
994 /* clear interrupt status by reading it */
995 return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
996}
997
998int alx_config_wol(struct alx_hw *hw)
999{
1000 u32 wol = 0;
1001 int err = 0;
1002
1003 /* turn on magic packet event */
1004 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
1005 wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
1006
1007 /* turn on link up event */
1008 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
1009 wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
1010 /* only link up can wake up */
1011 err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
1012 }
1013 alx_write_mem32(hw, ALX_WOL0, wol);
1014
1015 return err;
1016}
1017
1018void alx_disable_rss(struct alx_hw *hw)
1019{
1020 u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
1021
1022 ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
1023 alx_write_mem32(hw, ALX_RXQ0, ctrl);
1024}
1025
1026void alx_configure_basic(struct alx_hw *hw)
1027{
1028 u32 val, raw_mtu, max_payload;
1029 u16 val16;
1030 u8 chip_rev = alx_hw_revision(hw);
1031
1032 alx_set_macaddr(hw, hw->mac_addr);
1033
1034 alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
1035
1036 /* idle timeout to switch clk_125M */
1037 if (chip_rev >= ALX_REV_B0)
1038 alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
1039 ALX_IDLE_DECISN_TIMER_DEF);
1040
1041 alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
1042
1043 val = alx_read_mem32(hw, ALX_MASTER);
1044 val |= ALX_MASTER_IRQMOD2_EN |
1045 ALX_MASTER_IRQMOD1_EN |
1046 ALX_MASTER_SYSALVTIMER_EN;
1047 alx_write_mem32(hw, ALX_MASTER, val);
1048 alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
1049 (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
1050 /* intr re-trig timeout */
1051 alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
1052 /* tpd threshold to trig int */
1053 alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
1054 alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
1055
1056 raw_mtu = hw->mtu + ETH_HLEN;
1057 alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
1058 if (raw_mtu > ALX_MTU_JUMBO_TH)
1059 hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
1060
1061 if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
1062 val = (raw_mtu + 8 + 7) >> 3;
1063 else
1064 val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
1065 alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
1066
1067 max_payload = pcie_get_readrq(hw->pdev) >> 8;
1068 /*
1069 * if BIOS had changed the default dma read max length,
1070 * restore it to default value
1071 */
1072 if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
1073 pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
1074
1075 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
1076 ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
1077 ALX_TXQ0_SUPT_IPOPT |
1078 ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
1079 alx_write_mem32(hw, ALX_TXQ0, val);
1080 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
1081 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
1082 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
1083 ALX_HQTPD_BURST_EN;
1084 alx_write_mem32(hw, ALX_HQTPD, val);
1085
1086 /* rxq, flow control */
1087 val = alx_read_mem32(hw, ALX_SRAM5);
1088 val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
1089 if (val > ALX_SRAM_RXF_LEN_8K) {
1090 val16 = ALX_MTU_STD_ALGN >> 3;
1091 val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
1092 } else {
1093 val16 = ALX_MTU_STD_ALGN >> 3;
1094 val = (val - ALX_MTU_STD_ALGN) >> 3;
1095 }
1096 alx_write_mem32(hw, ALX_RXQ2,
1097 val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
1098 val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
1099 val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
1100 ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
1101 ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
1102 ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
1103 ALX_RXQ0_IPV6_PARSE_EN;
1104
1105 if (alx_hw_giga(hw))
1106 ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
1107 ALX_RXQ0_ASPM_THRESH_100M);
1108
1109 alx_write_mem32(hw, ALX_RXQ0, val);
1110
1111 val = alx_read_mem32(hw, ALX_DMA);
1112 val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
1113 ALX_DMA_RREQ_PRI_DATA |
1114 max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
1115 ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
1116 ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
1117 (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
1118 alx_write_mem32(hw, ALX_DMA, val);
1119
1120 /* default multi-tx-q weights */
1121 val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
1122 4 << ALX_WRR_PRI0_SHIFT |
1123 4 << ALX_WRR_PRI1_SHIFT |
1124 4 << ALX_WRR_PRI2_SHIFT |
1125 4 << ALX_WRR_PRI3_SHIFT;
1126 alx_write_mem32(hw, ALX_WRR, val);
1127}
1128
1129static inline u32 alx_speed_to_ethadv(int speed)
1130{
1131 switch (speed) {
1132 case SPEED_1000 + DUPLEX_FULL:
1133 return ADVERTISED_1000baseT_Full;
1134 case SPEED_100 + DUPLEX_FULL:
1135 return ADVERTISED_100baseT_Full;
1136 case SPEED_100 + DUPLEX_HALF:
1137 return ADVERTISED_10baseT_Half;
1138 case SPEED_10 + DUPLEX_FULL:
1139 return ADVERTISED_10baseT_Full;
1140 case SPEED_10 + DUPLEX_HALF:
1141 return ADVERTISED_10baseT_Half;
1142 default:
1143 return 0;
1144 }
1145}
1146
1147int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
1148{
1149 int i, err, spd;
1150 u16 lpa;
1151
1152 err = alx_get_phy_link(hw, &spd);
1153 if (err < 0)
1154 return err;
1155
1156 if (spd == SPEED_UNKNOWN)
1157 return 0;
1158
1159 err = alx_read_phy_reg(hw, MII_LPA, &lpa);
1160 if (err)
1161 return err;
1162
1163 if (!(lpa & LPA_LPACK)) {
1164 *speed = spd;
1165 return 0;
1166 }
1167
1168 if (lpa & LPA_10FULL)
1169 *speed = SPEED_10 + DUPLEX_FULL;
1170 else if (lpa & LPA_10HALF)
1171 *speed = SPEED_10 + DUPLEX_HALF;
1172 else if (lpa & LPA_100FULL)
1173 *speed = SPEED_100 + DUPLEX_FULL;
1174 else
1175 *speed = SPEED_100 + DUPLEX_HALF;
1176
1177 if (*speed != spd) {
1178 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
1179 if (err)
1180 return err;
1181 err = alx_setup_speed_duplex(hw,
1182 alx_speed_to_ethadv(*speed) |
1183 ADVERTISED_Autoneg,
1184 ALX_FC_ANEG | ALX_FC_RX |
1185 ALX_FC_TX);
1186 if (err)
1187 return err;
1188
1189 /* wait for linkup */
1190 for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
1191 int speed2;
1192
1193 msleep(100);
1194
1195 err = alx_get_phy_link(hw, &speed2);
1196 if (err < 0)
1197 return err;
1198 if (speed2 != SPEED_UNKNOWN)
1199 break;
1200 }
1201 if (i == ALX_MAX_SETUP_LNK_CYCLE)
1202 return -ETIMEDOUT;
1203 }
1204
1205 return 0;
1206}
1207
1208bool alx_get_phy_info(struct alx_hw *hw)
1209{
1210 u16 devs1, devs2;
1211
1212 if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
1213 alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
1214 return false;
1215
1216 /* since we haven't PMA/PMD status2 register, we can't
1217 * use mdio45_probe function for prtad and mmds.
1218 * use fixed MMD3 to get mmds.
1219 */
1220 if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
1221 alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
1222 return false;
1223 hw->mdio.mmds = devs1 | devs2 << 16;
1224
1225 return true;
1226}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 000000000000..65e723d2172a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_HW_H_
36#define ALX_HW_H_
37#include <linux/types.h>
38#include <linux/mdio.h>
39#include <linux/pci.h>
40#include "reg.h"
41
42/* Transmit Packet Descriptor, contains 4 32-bit words.
43 *
44 * 31 16 0
45 * +----------------+----------------+
46 * | vlan-tag | buf length |
47 * +----------------+----------------+
48 * | Word 1 |
49 * +----------------+----------------+
50 * | Word 2: buf addr lo |
51 * +----------------+----------------+
52 * | Word 3: buf addr hi |
53 * +----------------+----------------+
54 *
55 * Word 2 and 3 combine to form a 64-bit buffer address
56 *
57 * Word 1 has three forms, depending on the state of bit 8/12/13:
58 * if bit8 =='1', the definition is just for custom checksum offload.
59 * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
60 * for the skb is special for LSO V2, Word 2 become total skb length ,
61 * Word 3 is meaningless.
62 * other condition, the definition is for general skb or ip/tcp/udp
63 * checksum or LSO(TSO) offload.
64 *
65 * Here is the depiction:
66 *
67 * 0-+ 0-+
68 * 1 | 1 |
69 * 2 | 2 |
70 * 3 | Payload offset 3 | L4 header offset
71 * 4 | (7:0) 4 | (7:0)
72 * 5 | 5 |
73 * 6 | 6 |
74 * 7-+ 7-+
75 * 8 Custom csum enable = 1 8 Custom csum enable = 0
76 * 9 General IPv4 checksum 9 General IPv4 checksum
77 * 10 General TCP checksum 10 General TCP checksum
78 * 11 General UDP checksum 11 General UDP checksum
79 * 12 Large Send Segment enable 12 Large Send Segment enable
80 * 13 Large Send Segment type 13 Large Send Segment type
81 * 14 VLAN tagged 14 VLAN tagged
82 * 15 Insert VLAN tag 15 Insert VLAN tag
83 * 16 IPv4 packet 16 IPv4 packet
84 * 17 Ethernet frame type 17 Ethernet frame type
85 * 18-+ 18-+
86 * 19 | 19 |
87 * 20 | 20 |
88 * 21 | Custom csum offset 21 |
89 * 22 | (25:18) 22 |
90 * 23 | 23 | MSS (30:18)
91 * 24 | 24 |
92 * 25-+ 25 |
93 * 26-+ 26 |
94 * 27 | 27 |
95 * 28 | Reserved 28 |
96 * 29 | 29 |
97 * 30-+ 30-+
98 * 31 End of packet 31 End of packet
99 */
100struct alx_txd {
101 __le16 len;
102 __le16 vlan_tag;
103 __le32 word1;
104 union {
105 __le64 addr;
106 struct {
107 __le32 pkt_len;
108 __le32 resvd;
109 } l;
110 } adrl;
111} __packed;
112
113/* tpd word 1 */
114#define TPD_CXSUMSTART_MASK 0x00FF
115#define TPD_CXSUMSTART_SHIFT 0
116#define TPD_L4HDROFFSET_MASK 0x00FF
117#define TPD_L4HDROFFSET_SHIFT 0
118#define TPD_CXSUM_EN_MASK 0x0001
119#define TPD_CXSUM_EN_SHIFT 8
120#define TPD_IP_XSUM_MASK 0x0001
121#define TPD_IP_XSUM_SHIFT 9
122#define TPD_TCP_XSUM_MASK 0x0001
123#define TPD_TCP_XSUM_SHIFT 10
124#define TPD_UDP_XSUM_MASK 0x0001
125#define TPD_UDP_XSUM_SHIFT 11
126#define TPD_LSO_EN_MASK 0x0001
127#define TPD_LSO_EN_SHIFT 12
128#define TPD_LSO_V2_MASK 0x0001
129#define TPD_LSO_V2_SHIFT 13
130#define TPD_VLTAGGED_MASK 0x0001
131#define TPD_VLTAGGED_SHIFT 14
132#define TPD_INS_VLTAG_MASK 0x0001
133#define TPD_INS_VLTAG_SHIFT 15
134#define TPD_IPV4_MASK 0x0001
135#define TPD_IPV4_SHIFT 16
136#define TPD_ETHTYPE_MASK 0x0001
137#define TPD_ETHTYPE_SHIFT 17
138#define TPD_CXSUMOFFSET_MASK 0x00FF
139#define TPD_CXSUMOFFSET_SHIFT 18
140#define TPD_MSS_MASK 0x1FFF
141#define TPD_MSS_SHIFT 18
142#define TPD_EOP_MASK 0x0001
143#define TPD_EOP_SHIFT 31
144
145#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
146
147/* Receive Free Descriptor */
148struct alx_rfd {
149 __le64 addr; /* data buffer address, length is
150 * declared in register --- every
151 * buffer has the same size
152 */
153} __packed;
154
155/* Receive Return Descriptor, contains 4 32-bit words.
156 *
157 * 31 16 0
158 * +----------------+----------------+
159 * | Word 0 |
160 * +----------------+----------------+
161 * | Word 1: RSS Hash value |
162 * +----------------+----------------+
163 * | Word 2 |
164 * +----------------+----------------+
165 * | Word 3 |
166 * +----------------+----------------+
167 *
168 * Word 0 depiction & Word 2 depiction:
169 *
170 * 0--+ 0--+
171 * 1 | 1 |
172 * 2 | 2 |
173 * 3 | 3 |
174 * 4 | 4 |
175 * 5 | 5 |
176 * 6 | 6 |
177 * 7 | IP payload checksum 7 | VLAN tag
178 * 8 | (15:0) 8 | (15:0)
179 * 9 | 9 |
180 * 10 | 10 |
181 * 11 | 11 |
182 * 12 | 12 |
183 * 13 | 13 |
184 * 14 | 14 |
185 * 15-+ 15-+
186 * 16-+ 16-+
187 * 17 | Number of RFDs 17 |
188 * 18 | (19:16) 18 |
189 * 19-+ 19 | Protocol ID
190 * 20-+ 20 | (23:16)
191 * 21 | 21 |
192 * 22 | 22 |
193 * 23 | 23-+
194 * 24 | 24 | Reserved
195 * 25 | Start index of RFD-ring 25-+
196 * 26 | (31:20) 26 | RSS Q-num (27:25)
197 * 27 | 27-+
198 * 28 | 28-+
199 * 29 | 29 | RSS Hash algorithm
200 * 30 | 30 | (31:28)
201 * 31-+ 31-+
202 *
203 * Word 3 depiction:
204 *
205 * 0--+
206 * 1 |
207 * 2 |
208 * 3 |
209 * 4 |
210 * 5 |
211 * 6 |
212 * 7 | Packet length (include FCS)
213 * 8 | (13:0)
214 * 9 |
215 * 10 |
216 * 11 |
217 * 12 |
218 * 13-+
219 * 14 L4 Header checksum error
220 * 15 IPv4 checksum error
221 * 16 VLAN tagged
222 * 17-+
223 * 18 | Protocol ID (19:17)
224 * 19-+
225 * 20 Receive error summary
226 * 21 FCS(CRC) error
227 * 22 Frame alignment error
228 * 23 Truncated packet
229 * 24 Runt packet
230 * 25 Incomplete packet due to insufficient rx-desc
231 * 26 Broadcast packet
232 * 27 Multicast packet
233 * 28 Ethernet type (EII or 802.3)
234 * 29 FIFO overflow
235 * 30 Length error (for 802.3, length field mismatch with actual len)
236 * 31 Updated, indicate to driver that this RRD is refreshed.
237 */
238struct alx_rrd {
239 __le32 word0;
240 __le32 rss_hash;
241 __le32 word2;
242 __le32 word3;
243} __packed;
244
245/* rrd word 0 */
246#define RRD_XSUM_MASK 0xFFFF
247#define RRD_XSUM_SHIFT 0
248#define RRD_NOR_MASK 0x000F
249#define RRD_NOR_SHIFT 16
250#define RRD_SI_MASK 0x0FFF
251#define RRD_SI_SHIFT 20
252
253/* rrd word 2 */
254#define RRD_VLTAG_MASK 0xFFFF
255#define RRD_VLTAG_SHIFT 0
256#define RRD_PID_MASK 0x00FF
257#define RRD_PID_SHIFT 16
258/* non-ip packet */
259#define RRD_PID_NONIP 0
260/* ipv4(only) */
261#define RRD_PID_IPV4 1
262/* tcp/ipv6 */
263#define RRD_PID_IPV6TCP 2
264/* tcp/ipv4 */
265#define RRD_PID_IPV4TCP 3
266/* udp/ipv6 */
267#define RRD_PID_IPV6UDP 4
268/* udp/ipv4 */
269#define RRD_PID_IPV4UDP 5
270/* ipv6(only) */
271#define RRD_PID_IPV6 6
272/* LLDP packet */
273#define RRD_PID_LLDP 7
274/* 1588 packet */
275#define RRD_PID_1588 8
276#define RRD_RSSQ_MASK 0x0007
277#define RRD_RSSQ_SHIFT 25
278#define RRD_RSSALG_MASK 0x000F
279#define RRD_RSSALG_SHIFT 28
280#define RRD_RSSALG_TCPV6 0x1
281#define RRD_RSSALG_IPV6 0x2
282#define RRD_RSSALG_TCPV4 0x4
283#define RRD_RSSALG_IPV4 0x8
284
285/* rrd word 3 */
286#define RRD_PKTLEN_MASK 0x3FFF
287#define RRD_PKTLEN_SHIFT 0
288#define RRD_ERR_L4_MASK 0x0001
289#define RRD_ERR_L4_SHIFT 14
290#define RRD_ERR_IPV4_MASK 0x0001
291#define RRD_ERR_IPV4_SHIFT 15
292#define RRD_VLTAGGED_MASK 0x0001
293#define RRD_VLTAGGED_SHIFT 16
294#define RRD_OLD_PID_MASK 0x0007
295#define RRD_OLD_PID_SHIFT 17
296#define RRD_ERR_RES_MASK 0x0001
297#define RRD_ERR_RES_SHIFT 20
298#define RRD_ERR_FCS_MASK 0x0001
299#define RRD_ERR_FCS_SHIFT 21
300#define RRD_ERR_FAE_MASK 0x0001
301#define RRD_ERR_FAE_SHIFT 22
302#define RRD_ERR_TRUNC_MASK 0x0001
303#define RRD_ERR_TRUNC_SHIFT 23
304#define RRD_ERR_RUNT_MASK 0x0001
305#define RRD_ERR_RUNT_SHIFT 24
306#define RRD_ERR_ICMP_MASK 0x0001
307#define RRD_ERR_ICMP_SHIFT 25
308#define RRD_BCAST_MASK 0x0001
309#define RRD_BCAST_SHIFT 26
310#define RRD_MCAST_MASK 0x0001
311#define RRD_MCAST_SHIFT 27
312#define RRD_ETHTYPE_MASK 0x0001
313#define RRD_ETHTYPE_SHIFT 28
314#define RRD_ERR_FIFOV_MASK 0x0001
315#define RRD_ERR_FIFOV_SHIFT 29
316#define RRD_ERR_LEN_MASK 0x0001
317#define RRD_ERR_LEN_SHIFT 30
318#define RRD_UPDATED_MASK 0x0001
319#define RRD_UPDATED_SHIFT 31
320
321
322#define ALX_MAX_SETUP_LNK_CYCLE 50
323
324/* for FlowControl */
325#define ALX_FC_RX 0x01
326#define ALX_FC_TX 0x02
327#define ALX_FC_ANEG 0x04
328
329/* for sleep control */
330#define ALX_SLEEP_WOL_PHY 0x00000001
331#define ALX_SLEEP_WOL_MAGIC 0x00000002
332#define ALX_SLEEP_CIFS 0x00000004
333#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
334 ALX_SLEEP_WOL_MAGIC | \
335 ALX_SLEEP_CIFS)
336
337/* for RSS hash type */
338#define ALX_RSS_HASH_TYPE_IPV4 0x1
339#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
340#define ALX_RSS_HASH_TYPE_IPV6 0x4
341#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
342#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
343 ALX_RSS_HASH_TYPE_IPV4_TCP | \
344 ALX_RSS_HASH_TYPE_IPV6 | \
345 ALX_RSS_HASH_TYPE_IPV6_TCP)
346#define ALX_DEF_RXBUF_SIZE 1536
347#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
348#define ALX_MAX_TSO_PKT_SIZE (7*1024)
349#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
350#define ALX_MIN_FRAME_SIZE 68
351#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
352
353#define ALX_MAX_RX_QUEUES 8
354#define ALX_MAX_TX_QUEUES 4
355#define ALX_MAX_HANDLED_INTRS 5
356
357#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
358 ALX_ISR_DMAW | \
359 ALX_ISR_DMAR | \
360 ALX_ISR_SMB | \
361 ALX_ISR_MANU | \
362 ALX_ISR_TIMER)
363
364#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
365 ALX_ISR_DMAW | ALX_ISR_DMAR)
366
367#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
368 ALX_ISR_TXF_UR | \
369 ALX_ISR_RFD_UR)
370
371#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
372 ALX_ISR_TX_Q1 | \
373 ALX_ISR_TX_Q2 | \
374 ALX_ISR_TX_Q3 | \
375 ALX_ISR_RX_Q0 | \
376 ALX_ISR_RX_Q1 | \
377 ALX_ISR_RX_Q2 | \
378 ALX_ISR_RX_Q3 | \
379 ALX_ISR_RX_Q4 | \
380 ALX_ISR_RX_Q5 | \
381 ALX_ISR_RX_Q6 | \
382 ALX_ISR_RX_Q7)
383
384/* maximum interrupt vectors for msix */
385#define ALX_MAX_MSIX_INTRS 16
386
387#define ALX_GET_FIELD(_data, _field) \
388 (((_data) >> _field ## _SHIFT) & _field ## _MASK)
389
390#define ALX_SET_FIELD(_data, _field, _value) do { \
391 (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
392 (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
393 } while (0)
394
395struct alx_hw {
396 struct pci_dev *pdev;
397 u8 __iomem *hw_addr;
398
399 /* current & permanent mac addr */
400 u8 mac_addr[ETH_ALEN];
401 u8 perm_addr[ETH_ALEN];
402
403 u16 mtu;
404 u16 imt;
405 u8 dma_chnl;
406 u8 max_dma_chnl;
407 /* tpd threshold to trig INT */
408 u32 ith_tpd;
409 u32 rx_ctrl;
410 u32 mc_hash[2];
411
412 u32 smb_timer;
413 /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
414 int link_speed;
415
416 /* auto-neg advertisement or force mode config */
417 u32 adv_cfg;
418 u8 flowctrl;
419
420 u32 sleep_ctrl;
421
422 spinlock_t mdio_lock;
423 struct mdio_if_info mdio;
424 u16 phy_id[2];
425
426 /* PHY link patch flag */
427 bool lnk_patch;
428};
429
430static inline int alx_hw_revision(struct alx_hw *hw)
431{
432 return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
433}
434
435static inline bool alx_hw_with_cr(struct alx_hw *hw)
436{
437 return hw->pdev->revision & 1;
438}
439
440static inline bool alx_hw_giga(struct alx_hw *hw)
441{
442 return hw->pdev->device & 1;
443}
444
445static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
446{
447 writeb(val, hw->hw_addr + reg);
448}
449
450static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
451{
452 writew(val, hw->hw_addr + reg);
453}
454
455static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
456{
457 return readw(hw->hw_addr + reg);
458}
459
460static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
461{
462 writel(val, hw->hw_addr + reg);
463}
464
465static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
466{
467 return readl(hw->hw_addr + reg);
468}
469
470static inline void alx_post_write(struct alx_hw *hw)
471{
472 readl(hw->hw_addr);
473}
474
475int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
476void alx_reset_phy(struct alx_hw *hw);
477void alx_reset_pcie(struct alx_hw *hw);
478void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
479int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
480void alx_post_phy_link(struct alx_hw *hw);
481int alx_pre_suspend(struct alx_hw *hw, int speed);
482int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
483int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
484int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
485int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
486int alx_get_phy_link(struct alx_hw *hw, int *speed);
487int alx_clear_phy_intr(struct alx_hw *hw);
488int alx_config_wol(struct alx_hw *hw);
489void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
490void alx_start_mac(struct alx_hw *hw);
491int alx_reset_mac(struct alx_hw *hw);
492void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
493bool alx_phy_configured(struct alx_hw *hw);
494void alx_configure_basic(struct alx_hw *hw);
495void alx_disable_rss(struct alx_hw *hw);
496int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
497bool alx_get_phy_info(struct alx_hw *hw);
498
499#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 000000000000..418de8b13165
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
54
55static void alx_free_txbuf(struct alx_priv *alx, int entry)
56{
57 struct alx_buffer *txb = &alx->txq.bufs[entry];
58
59 if (dma_unmap_len(txb, size)) {
60 dma_unmap_single(&alx->hw.pdev->dev,
61 dma_unmap_addr(txb, dma),
62 dma_unmap_len(txb, size),
63 DMA_TO_DEVICE);
64 dma_unmap_len_set(txb, size, 0);
65 }
66
67 if (txb->skb) {
68 dev_kfree_skb_any(txb->skb);
69 txb->skb = NULL;
70 }
71}
72
73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74{
75 struct alx_rx_queue *rxq = &alx->rxq;
76 struct sk_buff *skb;
77 struct alx_buffer *cur_buf;
78 dma_addr_t dma;
79 u16 cur, next, count = 0;
80
81 next = cur = rxq->write_idx;
82 if (++next == alx->rx_ringsz)
83 next = 0;
84 cur_buf = &rxq->bufs[cur];
85
86 while (!cur_buf->skb && next != rxq->read_idx) {
87 struct alx_rfd *rfd = &rxq->rfd[cur];
88
89 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
90 if (!skb)
91 break;
92 dma = dma_map_single(&alx->hw.pdev->dev,
93 skb->data, alx->rxbuf_size,
94 DMA_FROM_DEVICE);
95 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
96 dev_kfree_skb(skb);
97 break;
98 }
99
100 /* Unfortunately, RX descriptor buffers must be 4-byte
101 * aligned, so we can't use IP alignment.
102 */
103 if (WARN_ON(dma & 3)) {
104 dev_kfree_skb(skb);
105 break;
106 }
107
108 cur_buf->skb = skb;
109 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
110 dma_unmap_addr_set(cur_buf, dma, dma);
111 rfd->addr = cpu_to_le64(dma);
112
113 cur = next;
114 if (++next == alx->rx_ringsz)
115 next = 0;
116 cur_buf = &rxq->bufs[cur];
117 count++;
118 }
119
120 if (count) {
121 /* flush all updates before updating hardware */
122 wmb();
123 rxq->write_idx = cur;
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
125 }
126
127 return count;
128}
129
130static inline int alx_tpd_avail(struct alx_priv *alx)
131{
132 struct alx_tx_queue *txq = &alx->txq;
133
134 if (txq->write_idx >= txq->read_idx)
135 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
136 return txq->read_idx - txq->write_idx - 1;
137}
138
139static bool alx_clean_tx_irq(struct alx_priv *alx)
140{
141 struct alx_tx_queue *txq = &alx->txq;
142 u16 hw_read_idx, sw_read_idx;
143 unsigned int total_bytes = 0, total_packets = 0;
144 int budget = ALX_DEFAULT_TX_WORK;
145
146 sw_read_idx = txq->read_idx;
147 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
148
149 if (sw_read_idx != hw_read_idx) {
150 while (sw_read_idx != hw_read_idx && budget > 0) {
151 struct sk_buff *skb;
152
153 skb = txq->bufs[sw_read_idx].skb;
154 if (skb) {
155 total_bytes += skb->len;
156 total_packets++;
157 budget--;
158 }
159
160 alx_free_txbuf(alx, sw_read_idx);
161
162 if (++sw_read_idx == alx->tx_ringsz)
163 sw_read_idx = 0;
164 }
165 txq->read_idx = sw_read_idx;
166
167 netdev_completed_queue(alx->dev, total_packets, total_bytes);
168 }
169
170 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
171 alx_tpd_avail(alx) > alx->tx_ringsz/4)
172 netif_wake_queue(alx->dev);
173
174 return sw_read_idx == hw_read_idx;
175}
176
177static void alx_schedule_link_check(struct alx_priv *alx)
178{
179 schedule_work(&alx->link_check_wk);
180}
181
182static void alx_schedule_reset(struct alx_priv *alx)
183{
184 schedule_work(&alx->reset_wk);
185}
186
187static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
188{
189 struct alx_rx_queue *rxq = &alx->rxq;
190 struct alx_rrd *rrd;
191 struct alx_buffer *rxb;
192 struct sk_buff *skb;
193 u16 length, rfd_cleaned = 0;
194
195 while (budget > 0) {
196 rrd = &rxq->rrd[rxq->rrd_read_idx];
197 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
198 break;
199 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
200
201 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
202 RRD_SI) != rxq->read_idx ||
203 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
204 RRD_NOR) != 1) {
205 alx_schedule_reset(alx);
206 return 0;
207 }
208
209 rxb = &rxq->bufs[rxq->read_idx];
210 dma_unmap_single(&alx->hw.pdev->dev,
211 dma_unmap_addr(rxb, dma),
212 dma_unmap_len(rxb, size),
213 DMA_FROM_DEVICE);
214 dma_unmap_len_set(rxb, size, 0);
215 skb = rxb->skb;
216 rxb->skb = NULL;
217
218 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
219 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
220 rrd->word3 = 0;
221 dev_kfree_skb_any(skb);
222 goto next_pkt;
223 }
224
225 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
226 RRD_PKTLEN) - ETH_FCS_LEN;
227 skb_put(skb, length);
228 skb->protocol = eth_type_trans(skb, alx->dev);
229
230 skb_checksum_none_assert(skb);
231 if (alx->dev->features & NETIF_F_RXCSUM &&
232 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
233 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
234 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
235 RRD_PID)) {
236 case RRD_PID_IPV6UDP:
237 case RRD_PID_IPV4UDP:
238 case RRD_PID_IPV4TCP:
239 case RRD_PID_IPV6TCP:
240 skb->ip_summed = CHECKSUM_UNNECESSARY;
241 break;
242 }
243 }
244
245 napi_gro_receive(&alx->napi, skb);
246 budget--;
247
248next_pkt:
249 if (++rxq->read_idx == alx->rx_ringsz)
250 rxq->read_idx = 0;
251 if (++rxq->rrd_read_idx == alx->rx_ringsz)
252 rxq->rrd_read_idx = 0;
253
254 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
255 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
256 }
257
258 if (rfd_cleaned)
259 alx_refill_rx_ring(alx, GFP_ATOMIC);
260
261 return budget > 0;
262}
263
264static int alx_poll(struct napi_struct *napi, int budget)
265{
266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
267 struct alx_hw *hw = &alx->hw;
268 bool complete = true;
269 unsigned long flags;
270
271 complete = alx_clean_tx_irq(alx) &&
272 alx_clean_rx_irq(alx, budget);
273
274 if (!complete)
275 return 1;
276
277 napi_complete(&alx->napi);
278
279 /* enable interrupt */
280 spin_lock_irqsave(&alx->irq_lock, flags);
281 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
282 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
283 spin_unlock_irqrestore(&alx->irq_lock, flags);
284
285 alx_post_write(hw);
286
287 return 0;
288}
289
290static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
291{
292 struct alx_hw *hw = &alx->hw;
293 bool write_int_mask = false;
294
295 spin_lock(&alx->irq_lock);
296
297 /* ACK interrupt */
298 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
299 intr &= alx->int_mask;
300
301 if (intr & ALX_ISR_FATAL) {
302 netif_warn(alx, hw, alx->dev,
303 "fatal interrupt 0x%x, resetting\n", intr);
304 alx_schedule_reset(alx);
305 goto out;
306 }
307
308 if (intr & ALX_ISR_ALERT)
309 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
310
311 if (intr & ALX_ISR_PHY) {
312 /* suppress PHY interrupt, because the source
313 * is from PHY internal. only the internal status
314 * is cleared, the interrupt status could be cleared.
315 */
316 alx->int_mask &= ~ALX_ISR_PHY;
317 write_int_mask = true;
318 alx_schedule_link_check(alx);
319 }
320
321 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
322 napi_schedule(&alx->napi);
323 /* mask rx/tx interrupt, enable them when napi complete */
324 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
325 write_int_mask = true;
326 }
327
328 if (write_int_mask)
329 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
330
331 alx_write_mem32(hw, ALX_ISR, 0);
332
333 out:
334 spin_unlock(&alx->irq_lock);
335 return IRQ_HANDLED;
336}
337
338static irqreturn_t alx_intr_msi(int irq, void *data)
339{
340 struct alx_priv *alx = data;
341
342 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
343}
344
345static irqreturn_t alx_intr_legacy(int irq, void *data)
346{
347 struct alx_priv *alx = data;
348 struct alx_hw *hw = &alx->hw;
349 u32 intr;
350
351 intr = alx_read_mem32(hw, ALX_ISR);
352
353 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
354 return IRQ_NONE;
355
356 return alx_intr_handle(alx, intr);
357}
358
359static void alx_init_ring_ptrs(struct alx_priv *alx)
360{
361 struct alx_hw *hw = &alx->hw;
362 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
363
364 alx->rxq.read_idx = 0;
365 alx->rxq.write_idx = 0;
366 alx->rxq.rrd_read_idx = 0;
367 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
368 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
369 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
370 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
371 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
372 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
373
374 alx->txq.read_idx = 0;
375 alx->txq.write_idx = 0;
376 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
377 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
378 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
379
380 /* load these pointers into the chip */
381 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
382}
383
384static void alx_free_txring_buf(struct alx_priv *alx)
385{
386 struct alx_tx_queue *txq = &alx->txq;
387 int i;
388
389 if (!txq->bufs)
390 return;
391
392 for (i = 0; i < alx->tx_ringsz; i++)
393 alx_free_txbuf(alx, i);
394
395 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
396 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
397 txq->write_idx = 0;
398 txq->read_idx = 0;
399
400 netdev_reset_queue(alx->dev);
401}
402
403static void alx_free_rxring_buf(struct alx_priv *alx)
404{
405 struct alx_rx_queue *rxq = &alx->rxq;
406 struct alx_buffer *cur_buf;
407 u16 i;
408
409 if (rxq == NULL)
410 return;
411
412 for (i = 0; i < alx->rx_ringsz; i++) {
413 cur_buf = rxq->bufs + i;
414 if (cur_buf->skb) {
415 dma_unmap_single(&alx->hw.pdev->dev,
416 dma_unmap_addr(cur_buf, dma),
417 dma_unmap_len(cur_buf, size),
418 DMA_FROM_DEVICE);
419 dev_kfree_skb(cur_buf->skb);
420 cur_buf->skb = NULL;
421 dma_unmap_len_set(cur_buf, size, 0);
422 dma_unmap_addr_set(cur_buf, dma, 0);
423 }
424 }
425
426 rxq->write_idx = 0;
427 rxq->read_idx = 0;
428 rxq->rrd_read_idx = 0;
429}
430
431static void alx_free_buffers(struct alx_priv *alx)
432{
433 alx_free_txring_buf(alx);
434 alx_free_rxring_buf(alx);
435}
436
437static int alx_reinit_rings(struct alx_priv *alx)
438{
439 alx_free_buffers(alx);
440
441 alx_init_ring_ptrs(alx);
442
443 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
444 return -ENOMEM;
445
446 return 0;
447}
448
449static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
450{
451 u32 crc32, bit, reg;
452
453 crc32 = ether_crc(ETH_ALEN, addr);
454 reg = (crc32 >> 31) & 0x1;
455 bit = (crc32 >> 26) & 0x1F;
456
457 mc_hash[reg] |= BIT(bit);
458}
459
460static void __alx_set_rx_mode(struct net_device *netdev)
461{
462 struct alx_priv *alx = netdev_priv(netdev);
463 struct alx_hw *hw = &alx->hw;
464 struct netdev_hw_addr *ha;
465 u32 mc_hash[2] = {};
466
467 if (!(netdev->flags & IFF_ALLMULTI)) {
468 netdev_for_each_mc_addr(ha, netdev)
469 alx_add_mc_addr(hw, ha->addr, mc_hash);
470
471 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
472 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
473 }
474
475 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
476 if (netdev->flags & IFF_PROMISC)
477 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
478 if (netdev->flags & IFF_ALLMULTI)
479 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
480
481 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
482}
483
484static void alx_set_rx_mode(struct net_device *netdev)
485{
486 __alx_set_rx_mode(netdev);
487}
488
489static int alx_set_mac_address(struct net_device *netdev, void *data)
490{
491 struct alx_priv *alx = netdev_priv(netdev);
492 struct alx_hw *hw = &alx->hw;
493 struct sockaddr *addr = data;
494
495 if (!is_valid_ether_addr(addr->sa_data))
496 return -EADDRNOTAVAIL;
497
498 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
499 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
500
501 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
502 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
503 alx_set_macaddr(hw, hw->mac_addr);
504
505 return 0;
506}
507
508static int alx_alloc_descriptors(struct alx_priv *alx)
509{
510 alx->txq.bufs = kcalloc(alx->tx_ringsz,
511 sizeof(struct alx_buffer),
512 GFP_KERNEL);
513 if (!alx->txq.bufs)
514 return -ENOMEM;
515
516 alx->rxq.bufs = kcalloc(alx->rx_ringsz,
517 sizeof(struct alx_buffer),
518 GFP_KERNEL);
519 if (!alx->rxq.bufs)
520 goto out_free;
521
522 /* physical tx/rx ring descriptors
523 *
524 * Allocate them as a single chunk because they must not cross a
525 * 4G boundary (hardware has a single register for high 32 bits
526 * of addresses only)
527 */
528 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
529 sizeof(struct alx_rrd) * alx->rx_ringsz +
530 sizeof(struct alx_rfd) * alx->rx_ringsz;
531 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
532 alx->descmem.size,
533 &alx->descmem.dma,
534 GFP_KERNEL);
535 if (!alx->descmem.virt)
536 goto out_free;
537
538 alx->txq.tpd = (void *)alx->descmem.virt;
539 alx->txq.tpd_dma = alx->descmem.dma;
540
541 /* alignment requirement for next block */
542 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
543
544 alx->rxq.rrd =
545 (void *)((u8 *)alx->descmem.virt +
546 sizeof(struct alx_txd) * alx->tx_ringsz);
547 alx->rxq.rrd_dma = alx->descmem.dma +
548 sizeof(struct alx_txd) * alx->tx_ringsz;
549
550 /* alignment requirement for next block */
551 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
552
553 alx->rxq.rfd =
554 (void *)((u8 *)alx->descmem.virt +
555 sizeof(struct alx_txd) * alx->tx_ringsz +
556 sizeof(struct alx_rrd) * alx->rx_ringsz);
557 alx->rxq.rfd_dma = alx->descmem.dma +
558 sizeof(struct alx_txd) * alx->tx_ringsz +
559 sizeof(struct alx_rrd) * alx->rx_ringsz;
560
561 return 0;
562out_free:
563 kfree(alx->txq.bufs);
564 kfree(alx->rxq.bufs);
565 return -ENOMEM;
566}
567
568static int alx_alloc_rings(struct alx_priv *alx)
569{
570 int err;
571
572 err = alx_alloc_descriptors(alx);
573 if (err)
574 return err;
575
576 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
577 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
578 alx->tx_ringsz = alx->tx_ringsz;
579
580 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
581
582 alx_reinit_rings(alx);
583 return 0;
584}
585
586static void alx_free_rings(struct alx_priv *alx)
587{
588 netif_napi_del(&alx->napi);
589 alx_free_buffers(alx);
590
591 kfree(alx->txq.bufs);
592 kfree(alx->rxq.bufs);
593
594 dma_free_coherent(&alx->hw.pdev->dev,
595 alx->descmem.size,
596 alx->descmem.virt,
597 alx->descmem.dma);
598}
599
600static void alx_config_vector_mapping(struct alx_priv *alx)
601{
602 struct alx_hw *hw = &alx->hw;
603
604 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
605 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
606 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
607}
608
609static void alx_irq_enable(struct alx_priv *alx)
610{
611 struct alx_hw *hw = &alx->hw;
612
613 /* level-1 interrupt switch */
614 alx_write_mem32(hw, ALX_ISR, 0);
615 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
616 alx_post_write(hw);
617}
618
619static void alx_irq_disable(struct alx_priv *alx)
620{
621 struct alx_hw *hw = &alx->hw;
622
623 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
624 alx_write_mem32(hw, ALX_IMR, 0);
625 alx_post_write(hw);
626
627 synchronize_irq(alx->hw.pdev->irq);
628}
629
630static int alx_request_irq(struct alx_priv *alx)
631{
632 struct pci_dev *pdev = alx->hw.pdev;
633 struct alx_hw *hw = &alx->hw;
634 int err;
635 u32 msi_ctrl;
636
637 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
638
639 if (!pci_enable_msi(alx->hw.pdev)) {
640 alx->msi = true;
641
642 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
643 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
644 err = request_irq(pdev->irq, alx_intr_msi, 0,
645 alx->dev->name, alx);
646 if (!err)
647 goto out;
648 /* fall back to legacy interrupt */
649 pci_disable_msi(alx->hw.pdev);
650 }
651
652 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
653 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
654 alx->dev->name, alx);
655out:
656 if (!err)
657 alx_config_vector_mapping(alx);
658 return err;
659}
660
661static void alx_free_irq(struct alx_priv *alx)
662{
663 struct pci_dev *pdev = alx->hw.pdev;
664
665 free_irq(pdev->irq, alx);
666
667 if (alx->msi) {
668 pci_disable_msi(alx->hw.pdev);
669 alx->msi = false;
670 }
671}
672
673static int alx_identify_hw(struct alx_priv *alx)
674{
675 struct alx_hw *hw = &alx->hw;
676 int rev = alx_hw_revision(hw);
677
678 if (rev > ALX_REV_C0)
679 return -EINVAL;
680
681 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
682
683 return 0;
684}
685
686static int alx_init_sw(struct alx_priv *alx)
687{
688 struct pci_dev *pdev = alx->hw.pdev;
689 struct alx_hw *hw = &alx->hw;
690 int err;
691
692 err = alx_identify_hw(alx);
693 if (err) {
694 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
695 return err;
696 }
697
698 alx->hw.lnk_patch =
699 pdev->device == ALX_DEV_ID_AR8161 &&
700 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
701 pdev->subsystem_device == 0x0091 &&
702 pdev->revision == 0;
703
704 hw->smb_timer = 400;
705 hw->mtu = alx->dev->mtu;
706 alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
707 alx->tx_ringsz = 256;
708 alx->rx_ringsz = 512;
709 hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
710 hw->imt = 200;
711 alx->int_mask = ALX_ISR_MISC;
712 hw->dma_chnl = hw->max_dma_chnl;
713 hw->ith_tpd = alx->tx_ringsz / 3;
714 hw->link_speed = SPEED_UNKNOWN;
715 hw->adv_cfg = ADVERTISED_Autoneg |
716 ADVERTISED_10baseT_Half |
717 ADVERTISED_10baseT_Full |
718 ADVERTISED_100baseT_Full |
719 ADVERTISED_100baseT_Half |
720 ADVERTISED_1000baseT_Full;
721 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
722
723 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
724 ALX_MAC_CTRL_MHASH_ALG_HI5B |
725 ALX_MAC_CTRL_BRD_EN |
726 ALX_MAC_CTRL_PCRCE |
727 ALX_MAC_CTRL_CRCE |
728 ALX_MAC_CTRL_RXFC_EN |
729 ALX_MAC_CTRL_TXFC_EN |
730 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
731
732 return err;
733}
734
735
736static netdev_features_t alx_fix_features(struct net_device *netdev,
737 netdev_features_t features)
738{
739 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
740 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
741
742 return features;
743}
744
745static void alx_netif_stop(struct alx_priv *alx)
746{
747 alx->dev->trans_start = jiffies;
748 if (netif_carrier_ok(alx->dev)) {
749 netif_carrier_off(alx->dev);
750 netif_tx_disable(alx->dev);
751 napi_disable(&alx->napi);
752 }
753}
754
755static void alx_halt(struct alx_priv *alx)
756{
757 struct alx_hw *hw = &alx->hw;
758
759 alx_netif_stop(alx);
760 hw->link_speed = SPEED_UNKNOWN;
761
762 alx_reset_mac(hw);
763
764 /* disable l0s/l1 */
765 alx_enable_aspm(hw, false, false);
766 alx_irq_disable(alx);
767 alx_free_buffers(alx);
768}
769
770static void alx_configure(struct alx_priv *alx)
771{
772 struct alx_hw *hw = &alx->hw;
773
774 alx_configure_basic(hw);
775 alx_disable_rss(hw);
776 __alx_set_rx_mode(alx->dev);
777
778 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
779}
780
781static void alx_activate(struct alx_priv *alx)
782{
783 /* hardware setting lost, restore it */
784 alx_reinit_rings(alx);
785 alx_configure(alx);
786
787 /* clear old interrupts */
788 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
789
790 alx_irq_enable(alx);
791
792 alx_schedule_link_check(alx);
793}
794
795static void alx_reinit(struct alx_priv *alx)
796{
797 ASSERT_RTNL();
798
799 alx_halt(alx);
800 alx_activate(alx);
801}
802
803static int alx_change_mtu(struct net_device *netdev, int mtu)
804{
805 struct alx_priv *alx = netdev_priv(netdev);
806 int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
807
808 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
809 (max_frame > ALX_MAX_FRAME_SIZE))
810 return -EINVAL;
811
812 if (netdev->mtu == mtu)
813 return 0;
814
815 netdev->mtu = mtu;
816 alx->hw.mtu = mtu;
817 alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
818 ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
819 netdev_update_features(netdev);
820 if (netif_running(netdev))
821 alx_reinit(alx);
822 return 0;
823}
824
825static void alx_netif_start(struct alx_priv *alx)
826{
827 netif_tx_wake_all_queues(alx->dev);
828 napi_enable(&alx->napi);
829 netif_carrier_on(alx->dev);
830}
831
832static int __alx_open(struct alx_priv *alx, bool resume)
833{
834 int err;
835
836 if (!resume)
837 netif_carrier_off(alx->dev);
838
839 err = alx_alloc_rings(alx);
840 if (err)
841 return err;
842
843 alx_configure(alx);
844
845 err = alx_request_irq(alx);
846 if (err)
847 goto out_free_rings;
848
849 /* clear old interrupts */
850 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
851
852 alx_irq_enable(alx);
853
854 if (!resume)
855 netif_tx_start_all_queues(alx->dev);
856
857 alx_schedule_link_check(alx);
858 return 0;
859
860out_free_rings:
861 alx_free_rings(alx);
862 return err;
863}
864
865static void __alx_stop(struct alx_priv *alx)
866{
867 alx_halt(alx);
868 alx_free_irq(alx);
869 alx_free_rings(alx);
870}
871
872static const char *alx_speed_desc(u16 speed)
873{
874 switch (speed) {
875 case SPEED_1000 + DUPLEX_FULL:
876 return "1 Gbps Full";
877 case SPEED_100 + DUPLEX_FULL:
878 return "100 Mbps Full";
879 case SPEED_100 + DUPLEX_HALF:
880 return "100 Mbps Half";
881 case SPEED_10 + DUPLEX_FULL:
882 return "10 Mbps Full";
883 case SPEED_10 + DUPLEX_HALF:
884 return "10 Mbps Half";
885 default:
886 return "Unknown speed";
887 }
888}
889
890static void alx_check_link(struct alx_priv *alx)
891{
892 struct alx_hw *hw = &alx->hw;
893 unsigned long flags;
894 int speed, old_speed;
895 int err;
896
897 /* clear PHY internal interrupt status, otherwise the main
898 * interrupt status will be asserted forever
899 */
900 alx_clear_phy_intr(hw);
901
902 err = alx_get_phy_link(hw, &speed);
903 if (err < 0)
904 goto reset;
905
906 spin_lock_irqsave(&alx->irq_lock, flags);
907 alx->int_mask |= ALX_ISR_PHY;
908 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
909 spin_unlock_irqrestore(&alx->irq_lock, flags);
910
911 old_speed = hw->link_speed;
912
913 if (old_speed == speed)
914 return;
915 hw->link_speed = speed;
916
917 if (speed != SPEED_UNKNOWN) {
918 netif_info(alx, link, alx->dev,
919 "NIC Up: %s\n", alx_speed_desc(speed));
920 alx_post_phy_link(hw);
921 alx_enable_aspm(hw, true, true);
922 alx_start_mac(hw);
923
924 if (old_speed == SPEED_UNKNOWN)
925 alx_netif_start(alx);
926 } else {
927 /* link is now down */
928 alx_netif_stop(alx);
929 netif_info(alx, link, alx->dev, "Link Down\n");
930 err = alx_reset_mac(hw);
931 if (err)
932 goto reset;
933 alx_irq_disable(alx);
934
935 /* MAC reset causes all HW settings to be lost, restore all */
936 err = alx_reinit_rings(alx);
937 if (err)
938 goto reset;
939 alx_configure(alx);
940 alx_enable_aspm(hw, false, true);
941 alx_post_phy_link(hw);
942 alx_irq_enable(alx);
943 }
944
945 return;
946
947reset:
948 alx_schedule_reset(alx);
949}
950
951static int alx_open(struct net_device *netdev)
952{
953 return __alx_open(netdev_priv(netdev), false);
954}
955
956static int alx_stop(struct net_device *netdev)
957{
958 __alx_stop(netdev_priv(netdev));
959 return 0;
960}
961
962static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
963{
964 struct alx_priv *alx = pci_get_drvdata(pdev);
965 struct net_device *netdev = alx->dev;
966 struct alx_hw *hw = &alx->hw;
967 int err, speed;
968
969 netif_device_detach(netdev);
970
971 if (netif_running(netdev))
972 __alx_stop(alx);
973
974#ifdef CONFIG_PM_SLEEP
975 err = pci_save_state(pdev);
976 if (err)
977 return err;
978#endif
979
980 err = alx_select_powersaving_speed(hw, &speed);
981 if (err)
982 return err;
983 err = alx_clear_phy_intr(hw);
984 if (err)
985 return err;
986 err = alx_pre_suspend(hw, speed);
987 if (err)
988 return err;
989 err = alx_config_wol(hw);
990 if (err)
991 return err;
992
993 *wol_en = false;
994 if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
995 netif_info(alx, wol, netdev,
996 "wol: ctrl=%X, speed=%X\n",
997 hw->sleep_ctrl, speed);
998 device_set_wakeup_enable(&pdev->dev, true);
999 *wol_en = true;
1000 }
1001
1002 pci_disable_device(pdev);
1003
1004 return 0;
1005}
1006
1007static void alx_shutdown(struct pci_dev *pdev)
1008{
1009 int err;
1010 bool wol_en;
1011
1012 err = __alx_shutdown(pdev, &wol_en);
1013 if (!err) {
1014 pci_wake_from_d3(pdev, wol_en);
1015 pci_set_power_state(pdev, PCI_D3hot);
1016 } else {
1017 dev_err(&pdev->dev, "shutdown fail %d\n", err);
1018 }
1019}
1020
1021static void alx_link_check(struct work_struct *work)
1022{
1023 struct alx_priv *alx;
1024
1025 alx = container_of(work, struct alx_priv, link_check_wk);
1026
1027 rtnl_lock();
1028 alx_check_link(alx);
1029 rtnl_unlock();
1030}
1031
1032static void alx_reset(struct work_struct *work)
1033{
1034 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1035
1036 rtnl_lock();
1037 alx_reinit(alx);
1038 rtnl_unlock();
1039}
1040
1041static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1042{
1043 u8 cso, css;
1044
1045 if (skb->ip_summed != CHECKSUM_PARTIAL)
1046 return 0;
1047
1048 cso = skb_checksum_start_offset(skb);
1049 if (cso & 1)
1050 return -EINVAL;
1051
1052 css = cso + skb->csum_offset;
1053 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1054 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1055 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1056
1057 return 0;
1058}
1059
1060static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1061{
1062 struct alx_tx_queue *txq = &alx->txq;
1063 struct alx_txd *tpd, *first_tpd;
1064 dma_addr_t dma;
1065 int maplen, f, first_idx = txq->write_idx;
1066
1067 first_tpd = &txq->tpd[txq->write_idx];
1068 tpd = first_tpd;
1069
1070 maplen = skb_headlen(skb);
1071 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
1072 DMA_TO_DEVICE);
1073 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1074 goto err_dma;
1075
1076 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1077 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1078
1079 tpd->adrl.addr = cpu_to_le64(dma);
1080 tpd->len = cpu_to_le16(maplen);
1081
1082 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1083 struct skb_frag_struct *frag;
1084
1085 frag = &skb_shinfo(skb)->frags[f];
1086
1087 if (++txq->write_idx == alx->tx_ringsz)
1088 txq->write_idx = 0;
1089 tpd = &txq->tpd[txq->write_idx];
1090
1091 tpd->word1 = first_tpd->word1;
1092
1093 maplen = skb_frag_size(frag);
1094 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
1095 maplen, DMA_TO_DEVICE);
1096 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1097 goto err_dma;
1098 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1099 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1100
1101 tpd->adrl.addr = cpu_to_le64(dma);
1102 tpd->len = cpu_to_le16(maplen);
1103 }
1104
1105 /* last TPD, set EOP flag and store skb */
1106 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1107 txq->bufs[txq->write_idx].skb = skb;
1108
1109 if (++txq->write_idx == alx->tx_ringsz)
1110 txq->write_idx = 0;
1111
1112 return 0;
1113
1114err_dma:
1115 f = first_idx;
1116 while (f != txq->write_idx) {
1117 alx_free_txbuf(alx, f);
1118 if (++f == alx->tx_ringsz)
1119 f = 0;
1120 }
1121 return -ENOMEM;
1122}
1123
1124static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1125 struct net_device *netdev)
1126{
1127 struct alx_priv *alx = netdev_priv(netdev);
1128 struct alx_tx_queue *txq = &alx->txq;
1129 struct alx_txd *first;
1130 int tpdreq = skb_shinfo(skb)->nr_frags + 1;
1131
1132 if (alx_tpd_avail(alx) < tpdreq) {
1133 netif_stop_queue(alx->dev);
1134 goto drop;
1135 }
1136
1137 first = &txq->tpd[txq->write_idx];
1138 memset(first, 0, sizeof(*first));
1139
1140 if (alx_tx_csum(skb, first))
1141 goto drop;
1142
1143 if (alx_map_tx_skb(alx, skb) < 0)
1144 goto drop;
1145
1146 netdev_sent_queue(alx->dev, skb->len);
1147
1148 /* flush updates before updating hardware */
1149 wmb();
1150 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
1151
1152 if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
1153 netif_stop_queue(alx->dev);
1154
1155 return NETDEV_TX_OK;
1156
1157drop:
1158 dev_kfree_skb(skb);
1159 return NETDEV_TX_OK;
1160}
1161
1162static void alx_tx_timeout(struct net_device *dev)
1163{
1164 struct alx_priv *alx = netdev_priv(dev);
1165
1166 alx_schedule_reset(alx);
1167}
1168
1169static int alx_mdio_read(struct net_device *netdev,
1170 int prtad, int devad, u16 addr)
1171{
1172 struct alx_priv *alx = netdev_priv(netdev);
1173 struct alx_hw *hw = &alx->hw;
1174 u16 val;
1175 int err;
1176
1177 if (prtad != hw->mdio.prtad)
1178 return -EINVAL;
1179
1180 if (devad == MDIO_DEVAD_NONE)
1181 err = alx_read_phy_reg(hw, addr, &val);
1182 else
1183 err = alx_read_phy_ext(hw, devad, addr, &val);
1184
1185 if (err)
1186 return err;
1187 return val;
1188}
1189
1190static int alx_mdio_write(struct net_device *netdev,
1191 int prtad, int devad, u16 addr, u16 val)
1192{
1193 struct alx_priv *alx = netdev_priv(netdev);
1194 struct alx_hw *hw = &alx->hw;
1195
1196 if (prtad != hw->mdio.prtad)
1197 return -EINVAL;
1198
1199 if (devad == MDIO_DEVAD_NONE)
1200 return alx_write_phy_reg(hw, addr, val);
1201
1202 return alx_write_phy_ext(hw, devad, addr, val);
1203}
1204
1205static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1206{
1207 struct alx_priv *alx = netdev_priv(netdev);
1208
1209 if (!netif_running(netdev))
1210 return -EAGAIN;
1211
1212 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1213}
1214
1215#ifdef CONFIG_NET_POLL_CONTROLLER
1216static void alx_poll_controller(struct net_device *netdev)
1217{
1218 struct alx_priv *alx = netdev_priv(netdev);
1219
1220 if (alx->msi)
1221 alx_intr_msi(0, alx);
1222 else
1223 alx_intr_legacy(0, alx);
1224}
1225#endif
1226
1227static const struct net_device_ops alx_netdev_ops = {
1228 .ndo_open = alx_open,
1229 .ndo_stop = alx_stop,
1230 .ndo_start_xmit = alx_start_xmit,
1231 .ndo_set_rx_mode = alx_set_rx_mode,
1232 .ndo_validate_addr = eth_validate_addr,
1233 .ndo_set_mac_address = alx_set_mac_address,
1234 .ndo_change_mtu = alx_change_mtu,
1235 .ndo_do_ioctl = alx_ioctl,
1236 .ndo_tx_timeout = alx_tx_timeout,
1237 .ndo_fix_features = alx_fix_features,
1238#ifdef CONFIG_NET_POLL_CONTROLLER
1239 .ndo_poll_controller = alx_poll_controller,
1240#endif
1241};
1242
1243static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1244{
1245 struct net_device *netdev;
1246 struct alx_priv *alx;
1247 struct alx_hw *hw;
1248 bool phy_configured;
1249 int bars, pm_cap, err;
1250
1251 err = pci_enable_device_mem(pdev);
1252 if (err)
1253 return err;
1254
1255 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1256 * shared register for the high 32 bits, so only a single, aligned,
1257 * 4 GB physical address range can be used for descriptors.
1258 */
1259 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
1260 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1261 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1262 } else {
1263 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1264 if (err) {
1265 err = dma_set_coherent_mask(&pdev->dev,
1266 DMA_BIT_MASK(32));
1267 if (err) {
1268 dev_err(&pdev->dev,
1269 "No usable DMA config, aborting\n");
1270 goto out_pci_disable;
1271 }
1272 }
1273 }
1274
1275 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1276 err = pci_request_selected_regions(pdev, bars, alx_drv_name);
1277 if (err) {
1278 dev_err(&pdev->dev,
1279 "pci_request_selected_regions failed(bars:%d)\n", bars);
1280 goto out_pci_disable;
1281 }
1282
1283 pci_enable_pcie_error_reporting(pdev);
1284 pci_set_master(pdev);
1285
1286 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1287 if (pm_cap == 0) {
1288 dev_err(&pdev->dev,
1289 "Can't find power management capability, aborting\n");
1290 err = -EIO;
1291 goto out_pci_release;
1292 }
1293
1294 err = pci_set_power_state(pdev, PCI_D0);
1295 if (err)
1296 goto out_pci_release;
1297
1298 netdev = alloc_etherdev(sizeof(*alx));
1299 if (!netdev) {
1300 err = -ENOMEM;
1301 goto out_pci_release;
1302 }
1303
1304 SET_NETDEV_DEV(netdev, &pdev->dev);
1305 alx = netdev_priv(netdev);
1306 alx->dev = netdev;
1307 alx->hw.pdev = pdev;
1308 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1309 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1310 hw = &alx->hw;
1311 pci_set_drvdata(pdev, alx);
1312
1313 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1314 if (!hw->hw_addr) {
1315 dev_err(&pdev->dev, "cannot map device registers\n");
1316 err = -EIO;
1317 goto out_free_netdev;
1318 }
1319
1320 netdev->netdev_ops = &alx_netdev_ops;
1321 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
1322 netdev->irq = pdev->irq;
1323 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1324
1325 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1326 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1327
1328 err = alx_init_sw(alx);
1329 if (err) {
1330 dev_err(&pdev->dev, "net device private data init failed\n");
1331 goto out_unmap;
1332 }
1333
1334 alx_reset_pcie(hw);
1335
1336 phy_configured = alx_phy_configured(hw);
1337
1338 if (!phy_configured)
1339 alx_reset_phy(hw);
1340
1341 err = alx_reset_mac(hw);
1342 if (err) {
1343 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1344 goto out_unmap;
1345 }
1346
1347 /* setup link to put it in a known good starting state */
1348 if (!phy_configured) {
1349 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1350 if (err) {
1351 dev_err(&pdev->dev,
1352 "failed to configure PHY speed/duplex (err=%d)\n",
1353 err);
1354 goto out_unmap;
1355 }
1356 }
1357
1358 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
1359
1360 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1361 dev_warn(&pdev->dev,
1362 "Invalid permanent address programmed, using random one\n");
1363 eth_hw_addr_random(netdev);
1364 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1365 }
1366
1367 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1368 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1369 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1370
1371 hw->mdio.prtad = 0;
1372 hw->mdio.mmds = 0;
1373 hw->mdio.dev = netdev;
1374 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1375 MDIO_SUPPORTS_C22 |
1376 MDIO_EMULATE_C22;
1377 hw->mdio.mdio_read = alx_mdio_read;
1378 hw->mdio.mdio_write = alx_mdio_write;
1379
1380 if (!alx_get_phy_info(hw)) {
1381 dev_err(&pdev->dev, "failed to identify PHY\n");
1382 err = -EIO;
1383 goto out_unmap;
1384 }
1385
1386 INIT_WORK(&alx->link_check_wk, alx_link_check);
1387 INIT_WORK(&alx->reset_wk, alx_reset);
1388 spin_lock_init(&alx->hw.mdio_lock);
1389 spin_lock_init(&alx->irq_lock);
1390
1391 netif_carrier_off(netdev);
1392
1393 err = register_netdev(netdev);
1394 if (err) {
1395 dev_err(&pdev->dev, "register netdevice failed\n");
1396 goto out_unmap;
1397 }
1398
1399 device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
1400
1401 netdev_info(netdev,
1402 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1403 netdev->dev_addr);
1404
1405 return 0;
1406
1407out_unmap:
1408 iounmap(hw->hw_addr);
1409out_free_netdev:
1410 free_netdev(netdev);
1411out_pci_release:
1412 pci_release_selected_regions(pdev, bars);
1413out_pci_disable:
1414 pci_disable_device(pdev);
1415 return err;
1416}
1417
1418static void alx_remove(struct pci_dev *pdev)
1419{
1420 struct alx_priv *alx = pci_get_drvdata(pdev);
1421 struct alx_hw *hw = &alx->hw;
1422
1423 cancel_work_sync(&alx->link_check_wk);
1424 cancel_work_sync(&alx->reset_wk);
1425
1426 /* restore permanent mac address */
1427 alx_set_macaddr(hw, hw->perm_addr);
1428
1429 unregister_netdev(alx->dev);
1430 iounmap(hw->hw_addr);
1431 pci_release_selected_regions(pdev,
1432 pci_select_bars(pdev, IORESOURCE_MEM));
1433
1434 pci_disable_pcie_error_reporting(pdev);
1435 pci_disable_device(pdev);
1436 pci_set_drvdata(pdev, NULL);
1437
1438 free_netdev(alx->dev);
1439}
1440
1441#ifdef CONFIG_PM_SLEEP
1442static int alx_suspend(struct device *dev)
1443{
1444 struct pci_dev *pdev = to_pci_dev(dev);
1445 int err;
1446 bool wol_en;
1447
1448 err = __alx_shutdown(pdev, &wol_en);
1449 if (err) {
1450 dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
1451 return err;
1452 }
1453
1454 if (wol_en) {
1455 pci_prepare_to_sleep(pdev);
1456 } else {
1457 pci_wake_from_d3(pdev, false);
1458 pci_set_power_state(pdev, PCI_D3hot);
1459 }
1460
1461 return 0;
1462}
1463
1464static int alx_resume(struct device *dev)
1465{
1466 struct pci_dev *pdev = to_pci_dev(dev);
1467 struct alx_priv *alx = pci_get_drvdata(pdev);
1468 struct net_device *netdev = alx->dev;
1469 struct alx_hw *hw = &alx->hw;
1470 int err;
1471
1472 pci_set_power_state(pdev, PCI_D0);
1473 pci_restore_state(pdev);
1474 pci_save_state(pdev);
1475
1476 pci_enable_wake(pdev, PCI_D3hot, 0);
1477 pci_enable_wake(pdev, PCI_D3cold, 0);
1478
1479 hw->link_speed = SPEED_UNKNOWN;
1480 alx->int_mask = ALX_ISR_MISC;
1481
1482 alx_reset_pcie(hw);
1483 alx_reset_phy(hw);
1484
1485 err = alx_reset_mac(hw);
1486 if (err) {
1487 netif_err(alx, hw, alx->dev,
1488 "resume:reset_mac fail %d\n", err);
1489 return -EIO;
1490 }
1491
1492 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1493 if (err) {
1494 netif_err(alx, hw, alx->dev,
1495 "resume:setup_speed_duplex fail %d\n", err);
1496 return -EIO;
1497 }
1498
1499 if (netif_running(netdev)) {
1500 err = __alx_open(alx, true);
1501 if (err)
1502 return err;
1503 }
1504
1505 netif_device_attach(netdev);
1506
1507 return err;
1508}
1509#endif
1510
1511static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1512 pci_channel_state_t state)
1513{
1514 struct alx_priv *alx = pci_get_drvdata(pdev);
1515 struct net_device *netdev = alx->dev;
1516 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1517
1518 dev_info(&pdev->dev, "pci error detected\n");
1519
1520 rtnl_lock();
1521
1522 if (netif_running(netdev)) {
1523 netif_device_detach(netdev);
1524 alx_halt(alx);
1525 }
1526
1527 if (state == pci_channel_io_perm_failure)
1528 rc = PCI_ERS_RESULT_DISCONNECT;
1529 else
1530 pci_disable_device(pdev);
1531
1532 rtnl_unlock();
1533
1534 return rc;
1535}
1536
1537static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1538{
1539 struct alx_priv *alx = pci_get_drvdata(pdev);
1540 struct alx_hw *hw = &alx->hw;
1541 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1542
1543 dev_info(&pdev->dev, "pci error slot reset\n");
1544
1545 rtnl_lock();
1546
1547 if (pci_enable_device(pdev)) {
1548 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1549 goto out;
1550 }
1551
1552 pci_set_master(pdev);
1553 pci_enable_wake(pdev, PCI_D3hot, 0);
1554 pci_enable_wake(pdev, PCI_D3cold, 0);
1555
1556 alx_reset_pcie(hw);
1557 if (!alx_reset_mac(hw))
1558 rc = PCI_ERS_RESULT_RECOVERED;
1559out:
1560 pci_cleanup_aer_uncorrect_error_status(pdev);
1561
1562 rtnl_unlock();
1563
1564 return rc;
1565}
1566
1567static void alx_pci_error_resume(struct pci_dev *pdev)
1568{
1569 struct alx_priv *alx = pci_get_drvdata(pdev);
1570 struct net_device *netdev = alx->dev;
1571
1572 dev_info(&pdev->dev, "pci error resume\n");
1573
1574 rtnl_lock();
1575
1576 if (netif_running(netdev)) {
1577 alx_activate(alx);
1578 netif_device_attach(netdev);
1579 }
1580
1581 rtnl_unlock();
1582}
1583
1584static const struct pci_error_handlers alx_err_handlers = {
1585 .error_detected = alx_pci_error_detected,
1586 .slot_reset = alx_pci_error_slot_reset,
1587 .resume = alx_pci_error_resume,
1588};
1589
1590#ifdef CONFIG_PM_SLEEP
1591static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1592#define ALX_PM_OPS (&alx_pm_ops)
1593#else
1594#define ALX_PM_OPS NULL
1595#endif
1596
1597static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
1598 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1599 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1600 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1601 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1602 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1603 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1604 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
1605 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
1606 {}
1607};
1608
1609static struct pci_driver alx_driver = {
1610 .name = alx_drv_name,
1611 .id_table = alx_pci_tbl,
1612 .probe = alx_probe,
1613 .remove = alx_remove,
1614 .shutdown = alx_shutdown,
1615 .err_handler = &alx_err_handlers,
1616 .driver.pm = ALX_PM_OPS,
1617};
1618
1619module_pci_driver(alx_driver);
1620MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
1621MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1622MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1623MODULE_DESCRIPTION(
1624 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1625MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 000000000000..e4358c98bc4e
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_REG_H
36#define ALX_REG_H
37
38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0
43
44/* rev definition,
45 * bit(0): with xD support
46 * bit(1): with Card Reader function
47 * bit(7:2): real revision
48 */
49#define ALX_PCI_REVID_SHIFT 3
50#define ALX_REV_A0 0
51#define ALX_REV_A1 1
52#define ALX_REV_B0 2
53#define ALX_REV_C0 3
54
55#define ALX_DEV_CTRL 0x0060
56#define ALX_DEV_CTRL_MAXRRS_MIN 2
57
58#define ALX_MSIX_MASK 0x0090
59
60#define ALX_UE_SVRT 0x010C
61#define ALX_UE_SVRT_FCPROTERR BIT(13)
62#define ALX_UE_SVRT_DLPROTERR BIT(4)
63
64/* eeprom & flash load register */
65#define ALX_EFLD 0x0204
66#define ALX_EFLD_F_EXIST BIT(10)
67#define ALX_EFLD_E_EXIST BIT(9)
68#define ALX_EFLD_STAT BIT(5)
69#define ALX_EFLD_START BIT(0)
70
71/* eFuse load register */
72#define ALX_SLD 0x0218
73#define ALX_SLD_STAT BIT(12)
74#define ALX_SLD_START BIT(11)
75#define ALX_SLD_MAX_TO 100
76
77#define ALX_PDLL_TRNS1 0x1104
78#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
79
80#define ALX_PMCTRL 0x12F8
81#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
82/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
83#define ALX_PMCTRL_ASPM_FCEN BIT(30)
84#define ALX_PMCTRL_SADLY_EN BIT(29)
85#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
86#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
87#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
88/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
89#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
90#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
91#define ALX_PMCTRL_L1REG_TO_DEF 0xF
92#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
93#define ALX_PMCTRL_L1_TIMER_MASK 0x7
94#define ALX_PMCTRL_L1_TIMER_SHIFT 16
95#define ALX_PMCTRL_L1_TIMER_16US 4
96#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
97/* bit13: enable pcie clk switch in L1 state */
98#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
99#define ALX_PMCTRL_L0S_EN BIT(12)
100#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
101#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
102/* bit6: power down serdes RX */
103#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
104#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
105#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
106#define ALX_PMCTRL_L1_EN BIT(3)
107
108/*******************************************************/
109/* following registers are mapped only to memory space */
110/*******************************************************/
111
112#define ALX_MASTER 0x1400
113/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
114#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
115/* bit11: irq moduration for rx */
116#define ALX_MASTER_IRQMOD2_EN BIT(11)
117/* bit10: irq moduration for tx/rx */
118#define ALX_MASTER_IRQMOD1_EN BIT(10)
119#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
120#define ALX_MASTER_OOB_DIS BIT(6)
121/* bit5: wakeup without pcie clk */
122#define ALX_MASTER_WAKEN_25M BIT(5)
123/* bit0: MAC & DMA reset */
124#define ALX_MASTER_DMA_MAC_RST BIT(0)
125#define ALX_DMA_MAC_RST_TO 50
126
127#define ALX_IRQ_MODU_TIMER 0x1408
128#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
129#define ALX_IRQ_MODU_TIMER1_SHIFT 0
130
131#define ALX_PHY_CTRL 0x140C
132#define ALX_PHY_CTRL_100AB_EN BIT(17)
133/* bit14: affect MAC & PHY, go to low power sts */
134#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
135/* bit13: 1:pll always ON, 0:can switch in lpw */
136#define ALX_PHY_CTRL_PLL_ON BIT(13)
137#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
138#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
139#define ALX_PHY_CTRL_HIB_EN BIT(10)
140#define ALX_PHY_CTRL_IDDQ BIT(7)
141#define ALX_PHY_CTRL_GATE_25M BIT(5)
142#define ALX_PHY_CTRL_LED_MODE BIT(2)
143/* bit0: out of dsp RST state */
144#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
145#define ALX_PHY_CTRL_DSPRST_TO 80
146#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
147 ALX_PHY_CTRL_100AB_EN | \
148 ALX_PHY_CTRL_PLL_ON)
149
150#define ALX_MAC_STS 0x1410
151#define ALX_MAC_STS_TXQ_BUSY BIT(3)
152#define ALX_MAC_STS_RXQ_BUSY BIT(2)
153#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
154#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
155#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
156 ALX_MAC_STS_RXQ_BUSY | \
157 ALX_MAC_STS_TXMAC_BUSY | \
158 ALX_MAC_STS_RXMAC_BUSY)
159
160#define ALX_MDIO 0x1414
161#define ALX_MDIO_MODE_EXT BIT(30)
162#define ALX_MDIO_BUSY BIT(27)
163#define ALX_MDIO_CLK_SEL_MASK 0x7
164#define ALX_MDIO_CLK_SEL_SHIFT 24
165#define ALX_MDIO_CLK_SEL_25MD4 0
166#define ALX_MDIO_CLK_SEL_25MD128 7
167#define ALX_MDIO_START BIT(23)
168#define ALX_MDIO_SPRES_PRMBL BIT(22)
169/* bit21: 1:read,0:write */
170#define ALX_MDIO_OP_READ BIT(21)
171#define ALX_MDIO_REG_MASK 0x1F
172#define ALX_MDIO_REG_SHIFT 16
173#define ALX_MDIO_DATA_MASK 0xFFFF
174#define ALX_MDIO_DATA_SHIFT 0
175#define ALX_MDIO_MAX_AC_TO 120
176
177#define ALX_MDIO_EXTN 0x1448
178#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
179#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
180#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
181#define ALX_MDIO_EXTN_REG_SHIFT 0
182
183#define ALX_SERDES 0x1424
184#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
185#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
186
187#define ALX_LPI_CTRL 0x1440
188#define ALX_LPI_CTRL_EN BIT(0)
189
190/* for B0+, bit[13..] for C0+ */
191#define ALX_HRTBT_EXT_CTRL 0x1AD0
192#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
193#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
194#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
195#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
196#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
197#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
198#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
199#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
200#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
201#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
202#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
203#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
204#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
205#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
206#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
207#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
208#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
209#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
210#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
211#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
212
213#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
214#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
215#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
216#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
217#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
218#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
219
220/* 1B8C ~ 1B94 for C0+ */
221#define ALX_SWOI_ACER_CTRL 0x1B8C
222#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
223#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
224#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
225#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
226#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
227
228#define ALX_SWOI_IOAC_CTRL_2 0x1B90
229#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
230#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
231#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
232#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
233#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
234#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
235
236#define ALX_SWOI_IOAC_CTRL_3 0x1B94
237#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
238#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
239#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
240#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
241#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
242#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
243
244/* for B0 */
245#define ALX_IDLE_DECISN_TIMER 0x1474
246/* 1ms */
247#define ALX_IDLE_DECISN_TIMER_DEF 0x400
248
249#define ALX_MAC_CTRL 0x1480
250#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
251#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
252/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
253#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
254#define ALX_MAC_CTRL_BRD_EN BIT(26)
255#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
256#define ALX_MAC_CTRL_SPEED_MASK 0x3
257#define ALX_MAC_CTRL_SPEED_SHIFT 20
258#define ALX_MAC_CTRL_SPEED_10_100 1
259#define ALX_MAC_CTRL_SPEED_1000 2
260#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
261#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
262#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
263#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
264#define ALX_MAC_CTRL_PCRCE BIT(7)
265#define ALX_MAC_CTRL_CRCE BIT(6)
266#define ALX_MAC_CTRL_FULLD BIT(5)
267#define ALX_MAC_CTRL_RXFC_EN BIT(3)
268#define ALX_MAC_CTRL_TXFC_EN BIT(2)
269#define ALX_MAC_CTRL_RX_EN BIT(1)
270#define ALX_MAC_CTRL_TX_EN BIT(0)
271
272#define ALX_STAD0 0x1488
273#define ALX_STAD1 0x148C
274
275#define ALX_HASH_TBL0 0x1490
276#define ALX_HASH_TBL1 0x1494
277
278#define ALX_MTU 0x149C
279#define ALX_MTU_JUMBO_TH 1514
280#define ALX_MTU_STD_ALGN 1536
281
282#define ALX_SRAM5 0x1524
283#define ALX_SRAM_RXF_LEN_MASK 0xFFF
284#define ALX_SRAM_RXF_LEN_SHIFT 0
285#define ALX_SRAM_RXF_LEN_8K (8*1024)
286
287#define ALX_SRAM9 0x1534
288#define ALX_SRAM_LOAD_PTR BIT(0)
289
290#define ALX_RX_BASE_ADDR_HI 0x1540
291
292#define ALX_TX_BASE_ADDR_HI 0x1544
293
294#define ALX_RFD_ADDR_LO 0x1550
295#define ALX_RFD_RING_SZ 0x1560
296#define ALX_RFD_BUF_SZ 0x1564
297
298#define ALX_RRD_ADDR_LO 0x1568
299#define ALX_RRD_RING_SZ 0x1578
300
301/* pri3: highest, pri0: lowest */
302#define ALX_TPD_PRI3_ADDR_LO 0x14E4
303#define ALX_TPD_PRI2_ADDR_LO 0x14E0
304#define ALX_TPD_PRI1_ADDR_LO 0x157C
305#define ALX_TPD_PRI0_ADDR_LO 0x1580
306
307/* producer index is 16bit */
308#define ALX_TPD_PRI3_PIDX 0x1618
309#define ALX_TPD_PRI2_PIDX 0x161A
310#define ALX_TPD_PRI1_PIDX 0x15F0
311#define ALX_TPD_PRI0_PIDX 0x15F2
312
313/* consumer index is 16bit */
314#define ALX_TPD_PRI3_CIDX 0x161C
315#define ALX_TPD_PRI2_CIDX 0x161E
316#define ALX_TPD_PRI1_CIDX 0x15F4
317#define ALX_TPD_PRI0_CIDX 0x15F6
318
319#define ALX_TPD_RING_SZ 0x1584
320
321#define ALX_TXQ0 0x1590
322#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
323#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
324#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
325#define ALX_TXQ0_LSO_8023_EN BIT(7)
326#define ALX_TXQ0_MODE_ENHANCE BIT(6)
327#define ALX_TXQ0_EN BIT(5)
328#define ALX_TXQ0_SUPT_IPOPT BIT(4)
329#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
330#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
331#define ALX_TXQ_TPD_BURSTPREF_DEF 5
332
333#define ALX_TXQ1 0x1594
334/* bit11: drop large packet, len > (rfd buf) */
335#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
336#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
337
338#define ALX_RXQ0 0x15A0
339#define ALX_RXQ0_EN BIT(31)
340#define ALX_RXQ0_RSS_HASH_EN BIT(29)
341#define ALX_RXQ0_RSS_MODE_MASK 0x3
342#define ALX_RXQ0_RSS_MODE_SHIFT 26
343#define ALX_RXQ0_RSS_MODE_DIS 0
344#define ALX_RXQ0_RSS_MODE_MQMI 3
345#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
346#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
347#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
348#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
349#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
350#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
351#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
352#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
353#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
354#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
355#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
356#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
357#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
358#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
359#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
360 ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
361 ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
362 ALX_RXQ0_RSS_HSTYP_IPV4_EN)
363#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
364#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
365#define ALX_RXQ0_ASPM_THRESH_100M 3
366
367#define ALX_RXQ2 0x15A8
368#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
369#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
370#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
371#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
372/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
373 * rx-packet(1522) + delay-of-link(64)
374 * = 3212.
375 */
376#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
377
378#define ALX_DMA 0x15C0
379#define ALX_DMA_RCHNL_SEL_MASK 0x3
380#define ALX_DMA_RCHNL_SEL_SHIFT 26
381#define ALX_DMA_WDLY_CNT_MASK 0xF
382#define ALX_DMA_WDLY_CNT_SHIFT 16
383#define ALX_DMA_WDLY_CNT_DEF 4
384#define ALX_DMA_RDLY_CNT_MASK 0x1F
385#define ALX_DMA_RDLY_CNT_SHIFT 11
386#define ALX_DMA_RDLY_CNT_DEF 15
387/* bit10: 0:tpd with pri, 1: data */
388#define ALX_DMA_RREQ_PRI_DATA BIT(10)
389#define ALX_DMA_RREQ_BLEN_MASK 0x7
390#define ALX_DMA_RREQ_BLEN_SHIFT 4
391#define ALX_DMA_RORDER_MODE_MASK 0x7
392#define ALX_DMA_RORDER_MODE_SHIFT 0
393#define ALX_DMA_RORDER_MODE_OUT 4
394
395#define ALX_WOL0 0x14A0
396#define ALX_WOL0_PME_LINK BIT(5)
397#define ALX_WOL0_LINK_EN BIT(4)
398#define ALX_WOL0_PME_MAGIC_EN BIT(3)
399#define ALX_WOL0_MAGIC_EN BIT(2)
400
401#define ALX_RFD_PIDX 0x15E0
402
403#define ALX_RFD_CIDX 0x15F8
404
405/* MIB */
406#define ALX_MIB_BASE 0x1700
407#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
408#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
409#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
410#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
411
412#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
413#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
414#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
415#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
416
417#define ALX_ISR 0x1600
418#define ALX_ISR_DIS BIT(31)
419#define ALX_ISR_RX_Q7 BIT(30)
420#define ALX_ISR_RX_Q6 BIT(29)
421#define ALX_ISR_RX_Q5 BIT(28)
422#define ALX_ISR_RX_Q4 BIT(27)
423#define ALX_ISR_PCIE_LNKDOWN BIT(26)
424#define ALX_ISR_RX_Q3 BIT(19)
425#define ALX_ISR_RX_Q2 BIT(18)
426#define ALX_ISR_RX_Q1 BIT(17)
427#define ALX_ISR_RX_Q0 BIT(16)
428#define ALX_ISR_TX_Q0 BIT(15)
429#define ALX_ISR_PHY BIT(12)
430#define ALX_ISR_DMAW BIT(10)
431#define ALX_ISR_DMAR BIT(9)
432#define ALX_ISR_TXF_UR BIT(8)
433#define ALX_ISR_TX_Q3 BIT(7)
434#define ALX_ISR_TX_Q2 BIT(6)
435#define ALX_ISR_TX_Q1 BIT(5)
436#define ALX_ISR_RFD_UR BIT(4)
437#define ALX_ISR_RXF_OV BIT(3)
438#define ALX_ISR_MANU BIT(2)
439#define ALX_ISR_TIMER BIT(1)
440#define ALX_ISR_SMB BIT(0)
441
442#define ALX_IMR 0x1604
443
444/* re-send assert msg if SW no response */
445#define ALX_INT_RETRIG 0x1608
446/* 40ms */
447#define ALX_INT_RETRIG_TO 20000
448
449#define ALX_SMB_TIMER 0x15C4
450
451#define ALX_TINT_TPD_THRSHLD 0x15C8
452
453#define ALX_TINT_TIMER 0x15CC
454
455#define ALX_CLK_GATE 0x1814
456#define ALX_CLK_GATE_RXMAC BIT(5)
457#define ALX_CLK_GATE_TXMAC BIT(4)
458#define ALX_CLK_GATE_RXQ BIT(3)
459#define ALX_CLK_GATE_TXQ BIT(2)
460#define ALX_CLK_GATE_DMAR BIT(1)
461#define ALX_CLK_GATE_DMAW BIT(0)
462#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
463 ALX_CLK_GATE_TXMAC | \
464 ALX_CLK_GATE_RXQ | \
465 ALX_CLK_GATE_TXQ | \
466 ALX_CLK_GATE_DMAR | \
467 ALX_CLK_GATE_DMAW)
468
469/* interop between drivers */
470#define ALX_DRV 0x1804
471#define ALX_DRV_PHY_AUTO BIT(28)
472#define ALX_DRV_PHY_1000 BIT(27)
473#define ALX_DRV_PHY_100 BIT(26)
474#define ALX_DRV_PHY_10 BIT(25)
475#define ALX_DRV_PHY_DUPLEX BIT(24)
476/* bit23: adv Pause */
477#define ALX_DRV_PHY_PAUSE BIT(23)
478/* bit22: adv Asym Pause */
479#define ALX_DRV_PHY_MASK 0xFF
480#define ALX_DRV_PHY_SHIFT 21
481#define ALX_DRV_PHY_UNKNOWN 0
482
483/* flag of phy inited */
484#define ALX_PHY_INITED 0x003F
485
486/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
487#define ALX_WOL_CTRL2 0x1830
488#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
489#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
490#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
491#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
492
493#define ALX_WOL_CTRL3 0x1834
494#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
495#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
496
497#define ALX_WOL_CTRL4 0x1838
498#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
499#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
500#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
501#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
502#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
503#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
504#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
505#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
506#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
507#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
508#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
509#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
510#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
511#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
512#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
513#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
514#define ALX_WOL_CTRL4_PT15_EN BIT(15)
515#define ALX_WOL_CTRL4_PT14_EN BIT(14)
516#define ALX_WOL_CTRL4_PT13_EN BIT(13)
517#define ALX_WOL_CTRL4_PT12_EN BIT(12)
518#define ALX_WOL_CTRL4_PT11_EN BIT(11)
519#define ALX_WOL_CTRL4_PT10_EN BIT(10)
520#define ALX_WOL_CTRL4_PT9_EN BIT(9)
521#define ALX_WOL_CTRL4_PT8_EN BIT(8)
522#define ALX_WOL_CTRL4_PT7_EN BIT(7)
523#define ALX_WOL_CTRL4_PT6_EN BIT(6)
524#define ALX_WOL_CTRL4_PT5_EN BIT(5)
525#define ALX_WOL_CTRL4_PT4_EN BIT(4)
526#define ALX_WOL_CTRL4_PT3_EN BIT(3)
527#define ALX_WOL_CTRL4_PT2_EN BIT(2)
528#define ALX_WOL_CTRL4_PT1_EN BIT(1)
529#define ALX_WOL_CTRL4_PT0_EN BIT(0)
530
531#define ALX_WOL_CTRL5 0x183C
532#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
533#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
534#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
535#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
536#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
537#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
538#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
539#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
540
541#define ALX_WOL_CTRL6 0x1840
542#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
543#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
544#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
545#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
546#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
547#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
548#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
549#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
550
551#define ALX_WOL_CTRL7 0x1844
552#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
553#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
554#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
555#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
556#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
557#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
558#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
559#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
560
561#define ALX_WOL_CTRL8 0x1848
562#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
563#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
564#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
565#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
566#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
567#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
568#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
569#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
570
571#define ALX_ACER_FIXED_PTN0 0x1850
572#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
573#define ALX_ACER_FIXED_PTN0_SHIFT 0
574
575#define ALX_ACER_FIXED_PTN1 0x1854
576#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
577#define ALX_ACER_FIXED_PTN1_SHIFT 0
578
579#define ALX_ACER_RANDOM_NUM0 0x1858
580#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
581#define ALX_ACER_RANDOM_NUM0_SHIFT 0
582
583#define ALX_ACER_RANDOM_NUM1 0x185C
584#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
585#define ALX_ACER_RANDOM_NUM1_SHIFT 0
586
587#define ALX_ACER_RANDOM_NUM2 0x1860
588#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
589#define ALX_ACER_RANDOM_NUM2_SHIFT 0
590
591#define ALX_ACER_RANDOM_NUM3 0x1864
592#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
593#define ALX_ACER_RANDOM_NUM3_SHIFT 0
594
595#define ALX_ACER_MAGIC 0x1868
596#define ALX_ACER_MAGIC_EN BIT(31)
597#define ALX_ACER_MAGIC_PME_EN BIT(30)
598#define ALX_ACER_MAGIC_MATCH BIT(29)
599#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
600#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
601#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
602#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
603#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
604
605#define ALX_ACER_TIMER 0x186C
606#define ALX_ACER_TIMER_EN BIT(31)
607#define ALX_ACER_TIMER_PME_EN BIT(30)
608#define ALX_ACER_TIMER_MATCH BIT(29)
609#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
610#define ALX_ACER_TIMER_THRES_SHIFT 0
611#define ALX_ACER_TIMER_THRES_DEF 1
612
613/* RSS definitions */
614#define ALX_RSS_KEY0 0x14B0
615#define ALX_RSS_KEY1 0x14B4
616#define ALX_RSS_KEY2 0x14B8
617#define ALX_RSS_KEY3 0x14BC
618#define ALX_RSS_KEY4 0x14C0
619#define ALX_RSS_KEY5 0x14C4
620#define ALX_RSS_KEY6 0x14C8
621#define ALX_RSS_KEY7 0x14CC
622#define ALX_RSS_KEY8 0x14D0
623#define ALX_RSS_KEY9 0x14D4
624
625#define ALX_RSS_IDT_TBL0 0x1B00
626
627#define ALX_MSI_MAP_TBL1 0x15D0
628#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
629#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
630#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
631#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
632#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
633#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
634
635#define ALX_MSI_MAP_TBL2 0x15D8
636#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
637#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
638#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
639#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
640#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
641#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
642
643#define ALX_MSI_ID_MAP 0x15D4
644
645#define ALX_MSI_RETRANS_TIMER 0x1920
646/* bit16: 1:line,0:standard */
647#define ALX_MSI_MASK_SEL_LINE BIT(16)
648#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
649#define ALX_MSI_RETRANS_TM_SHIFT 0
650
651/* CR DMA ctrl */
652
653/* TX QoS */
654#define ALX_WRR 0x1938
655#define ALX_WRR_PRI_MASK 0x3
656#define ALX_WRR_PRI_SHIFT 29
657#define ALX_WRR_PRI_RESTRICT_NONE 3
658#define ALX_WRR_PRI3_MASK 0x1F
659#define ALX_WRR_PRI3_SHIFT 24
660#define ALX_WRR_PRI2_MASK 0x1F
661#define ALX_WRR_PRI2_SHIFT 16
662#define ALX_WRR_PRI1_MASK 0x1F
663#define ALX_WRR_PRI1_SHIFT 8
664#define ALX_WRR_PRI0_MASK 0x1F
665#define ALX_WRR_PRI0_SHIFT 0
666
667#define ALX_HQTPD 0x193C
668#define ALX_HQTPD_BURST_EN BIT(31)
669#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
670#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
671#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
672#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
673#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
674#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
675
676#define ALX_MISC 0x19C0
677#define ALX_MISC_PSW_OCP_MASK 0x7
678#define ALX_MISC_PSW_OCP_SHIFT 21
679#define ALX_MISC_PSW_OCP_DEF 0x7
680#define ALX_MISC_ISO_EN BIT(12)
681#define ALX_MISC_INTNLOSC_OPEN BIT(3)
682
683#define ALX_MSIC2 0x19C8
684#define ALX_MSIC2_CALB_START BIT(0)
685
686#define ALX_MISC3 0x19CC
687/* bit1: 1:Software control 25M */
688#define ALX_MISC3_25M_BY_SW BIT(1)
689/* bit0: 25M switch to intnl OSC */
690#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
691
692/* MSIX tbl in memory space */
693#define ALX_MSIX_ENTRY_BASE 0x2000
694
695/********************* PHY regs definition ***************************/
696
697/* PHY Specific Status Register */
698#define ALX_MII_GIGA_PSSR 0x11
699#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
700#define ALX_GIGA_PSSR_DPLX 0x2000
701#define ALX_GIGA_PSSR_SPEED 0xC000
702#define ALX_GIGA_PSSR_10MBS 0x0000
703#define ALX_GIGA_PSSR_100MBS 0x4000
704#define ALX_GIGA_PSSR_1000MBS 0x8000
705
706/* PHY Interrupt Enable Register */
707#define ALX_MII_IER 0x12
708#define ALX_IER_LINK_UP 0x0400
709#define ALX_IER_LINK_DOWN 0x0800
710
711/* PHY Interrupt Status Register */
712#define ALX_MII_ISR 0x13
713
714#define ALX_MII_DBG_ADDR 0x1D
715#define ALX_MII_DBG_DATA 0x1E
716
717/***************************** debug port *************************************/
718
719#define ALX_MIIDBG_ANACTRL 0x00
720#define ALX_ANACTRL_DEF 0x02EF
721
722#define ALX_MIIDBG_SYSMODCTRL 0x04
723/* en half bias */
724#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
725
726#define ALX_MIIDBG_SRDSYSMOD 0x05
727#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
728#define ALX_SRDSYSMOD_DEF 0x2C46
729
730#define ALX_MIIDBG_HIBNEG 0x0B
731#define ALX_HIBNEG_PSHIB_EN 0x8000
732#define ALX_HIBNEG_HIB_PSE 0x1000
733#define ALX_HIBNEG_DEF 0xBC40
734#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
735 ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
736
737#define ALX_MIIDBG_TST10BTCFG 0x12
738#define ALX_TST10BTCFG_DEF 0x4C04
739
740#define ALX_MIIDBG_AZ_ANADECT 0x15
741#define ALX_AZ_ANADECT_DEF 0x3220
742#define ALX_AZ_ANADECT_LONG 0x3210
743
744#define ALX_MIIDBG_MSE16DB 0x18
745#define ALX_MSE16DB_UP 0x05EA
746#define ALX_MSE16DB_DOWN 0x02EA
747
748#define ALX_MIIDBG_MSE20DB 0x1C
749#define ALX_MSE20DB_TH_MASK 0x7F
750#define ALX_MSE20DB_TH_SHIFT 2
751#define ALX_MSE20DB_TH_DEF 0x2E
752#define ALX_MSE20DB_TH_HI 0x54
753
754#define ALX_MIIDBG_AGC 0x23
755#define ALX_AGC_2_VGA_MASK 0x3FU
756#define ALX_AGC_2_VGA_SHIFT 8
757#define ALX_AGC_LONG1G_LIMT 40
758#define ALX_AGC_LONG100M_LIMT 44
759
760#define ALX_MIIDBG_LEGCYPS 0x29
761#define ALX_LEGCYPS_EN 0x8000
762#define ALX_LEGCYPS_DEF 0x129D
763
764#define ALX_MIIDBG_TST100BTCFG 0x36
765#define ALX_TST100BTCFG_DEF 0xE12C
766
767#define ALX_MIIDBG_GREENCFG 0x3B
768#define ALX_GREENCFG_DEF 0x7078
769
770#define ALX_MIIDBG_GREENCFG2 0x3D
771#define ALX_GREENCFG2_BP_GREEN 0x8000
772#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
773
774/******* dev 3 *********/
775#define ALX_MIIEXT_PCS 3
776
777#define ALX_MIIEXT_CLDCTRL3 0x8003
778#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
779
780#define ALX_MIIEXT_CLDCTRL5 0x8005
781#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
782
783#define ALX_MIIEXT_CLDCTRL6 0x8006
784#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
785#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
786#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
787#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
788
789#define ALX_MIIEXT_VDRVBIAS 0x8062
790#define ALX_VDRVBIAS_DEF 0x3
791
792/********* dev 7 **********/
793#define ALX_MIIEXT_ANEG 7
794
795#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
796#define ALX_LOCAL_EEEADV_1000BT 0x0004
797#define ALX_LOCAL_EEEADV_100BT 0x0002
798
799#define ALX_MIIEXT_AFE 0x801A
800#define ALX_AFE_10BT_100M_TH 0x0040
801
802#define ALX_MIIEXT_S3DIG10 0x8023
803/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
804#define ALX_MIIEXT_S3DIG10_SL 0x0001
805#define ALX_MIIEXT_S3DIG10_DEF 0
806
807#define ALX_MIIEXT_NLP78 0x8027
808#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
809
810#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c777b9013164..a13463e8a2c3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
744 status = tg3_ape_read32(tp, gnt + off); 744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit) 745 if (status == bit)
746 break; 746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
747 udelay(10); 750 udelay(10);
748 } 751 }
749 752
@@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 for (i = 0; i < delay_cnt; i++) { 1638 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break; 1640 break;
1641 if (pci_channel_offline(tp->pdev))
1642 break;
1643
1638 udelay(8); 1644 udelay(8);
1639 } 1645 }
1640} 1646}
@@ -1813,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp)
1813 for (i = 0; i < 200; i++) { 1819 for (i = 0; i < 200; i++) {
1814 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1815 return 0; 1821 return 0;
1822 if (pci_channel_offline(tp->pdev))
1823 return -ENODEV;
1824
1816 udelay(100); 1825 udelay(100);
1817 } 1826 }
1818 return -ENODEV; 1827 return -ENODEV;
@@ -1823,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp)
1823 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1824 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1825 break; 1834 break;
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1839 }
1840
1841 break;
1842 }
1843
1826 udelay(10); 1844 udelay(10);
1827 } 1845 }
1828 1846
@@ -3520,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3520 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3538 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3521 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3539 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3522 break; 3540 break;
3541 if (pci_channel_offline(tp->pdev))
3542 return -EBUSY;
3523 } 3543 }
3524 3544
3525 return (i == iters) ? -EBUSY : 0; 3545 return (i == iters) ? -EBUSY : 0;
@@ -8589,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo
8589 tw32_f(ofs, val); 8609 tw32_f(ofs, val);
8590 8610
8591 for (i = 0; i < MAX_WAIT_CNT; i++) { 8611 for (i = 0; i < MAX_WAIT_CNT; i++) {
8612 if (pci_channel_offline(tp->pdev)) {
8613 dev_err(&tp->pdev->dev,
8614 "tg3_stop_block device offline, "
8615 "ofs=%lx enable_bit=%x\n",
8616 ofs, enable_bit);
8617 return -ENODEV;
8618 }
8619
8592 udelay(100); 8620 udelay(100);
8593 val = tr32(ofs); 8621 val = tr32(ofs);
8594 if ((val & enable_bit) == 0) 8622 if ((val & enable_bit) == 0)
@@ -8612,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
8612 8640
8613 tg3_disable_ints(tp); 8641 tg3_disable_ints(tp);
8614 8642
8643 if (pci_channel_offline(tp->pdev)) {
8644 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8645 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8646 err = -ENODEV;
8647 goto err_no_dev;
8648 }
8649
8615 tp->rx_mode &= ~RX_MODE_ENABLE; 8650 tp->rx_mode &= ~RX_MODE_ENABLE;
8616 tw32_f(MAC_RX_MODE, tp->rx_mode); 8651 tw32_f(MAC_RX_MODE, tp->rx_mode);
8617 udelay(10); 8652 udelay(10);
@@ -8660,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
8660 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8695 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8661 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8696 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8662 8697
8698err_no_dev:
8663 for (i = 0; i < tp->irq_cnt; i++) { 8699 for (i = 0; i < tp->irq_cnt; i++) {
8664 struct tg3_napi *tnapi = &tp->napi[i]; 8700 struct tg3_napi *tnapi = &tp->napi[i];
8665 if (tnapi->hw_status) 8701 if (tnapi->hw_status)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a667015be22a..d48099f03b7f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex)
516 /* Set MII speed */ 516 /* Set MII speed */
517 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 517 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
518 518
519#if !defined(CONFIG_M5272)
519 /* set RX checksum */ 520 /* set RX checksum */
520 val = readl(fep->hwp + FEC_RACC); 521 val = readl(fep->hwp + FEC_RACC);
521 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 522 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex)
523 else 524 else
524 val &= ~FEC_RACC_OPTIONS; 525 val &= ~FEC_RACC_OPTIONS;
525 writel(val, fep->hwp + FEC_RACC); 526 writel(val, fep->hwp + FEC_RACC);
527#endif
526 528
527 /* 529 /*
528 * The phy interface and speed need to get configured 530 * The phy interface and speed need to get configured
@@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex)
575#endif 577#endif
576 } 578 }
577 579
580#if !defined(CONFIG_M5272)
578 /* enable pause frame*/ 581 /* enable pause frame*/
579 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 582 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
580 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 583 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex)
592 } else { 595 } else {
593 rcntl &= ~FEC_ENET_FCE; 596 rcntl &= ~FEC_ENET_FCE;
594 } 597 }
598#endif /* !defined(CONFIG_M5272) */
595 599
596 writel(rcntl, fep->hwp + FEC_R_CNTRL); 600 writel(rcntl, fep->hwp + FEC_R_CNTRL);
597 601
@@ -1205,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1205 /* mask with MAC supported features */ 1209 /* mask with MAC supported features */
1206 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { 1210 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1207 phy_dev->supported &= PHY_GBIT_FEATURES; 1211 phy_dev->supported &= PHY_GBIT_FEATURES;
1212#if !defined(CONFIG_M5272)
1208 phy_dev->supported |= SUPPORTED_Pause; 1213 phy_dev->supported |= SUPPORTED_Pause;
1214#endif
1209 } 1215 }
1210 else 1216 else
1211 phy_dev->supported &= PHY_BASIC_FEATURES; 1217 phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1390,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
1390 } 1396 }
1391} 1397}
1392 1398
1399#if !defined(CONFIG_M5272)
1400
1393static void fec_enet_get_pauseparam(struct net_device *ndev, 1401static void fec_enet_get_pauseparam(struct net_device *ndev,
1394 struct ethtool_pauseparam *pause) 1402 struct ethtool_pauseparam *pause)
1395{ 1403{
@@ -1436,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1436 return 0; 1444 return 0;
1437} 1445}
1438 1446
1447#endif /* !defined(CONFIG_M5272) */
1448
1439static const struct ethtool_ops fec_enet_ethtool_ops = { 1449static const struct ethtool_ops fec_enet_ethtool_ops = {
1450#if !defined(CONFIG_M5272)
1440 .get_pauseparam = fec_enet_get_pauseparam, 1451 .get_pauseparam = fec_enet_get_pauseparam,
1441 .set_pauseparam = fec_enet_set_pauseparam, 1452 .set_pauseparam = fec_enet_set_pauseparam,
1453#endif
1442 .get_settings = fec_enet_get_settings, 1454 .get_settings = fec_enet_get_settings,
1443 .set_settings = fec_enet_set_settings, 1455 .set_settings = fec_enet_set_settings,
1444 .get_drvinfo = fec_enet_get_drvinfo, 1456 .get_drvinfo = fec_enet_get_drvinfo,
@@ -1874,10 +1886,12 @@ fec_probe(struct platform_device *pdev)
1874 /* setup board info structure */ 1886 /* setup board info structure */
1875 fep = netdev_priv(ndev); 1887 fep = netdev_priv(ndev);
1876 1888
1889#if !defined(CONFIG_M5272)
1877 /* default enable pause frame auto negotiation */ 1890 /* default enable pause frame auto negotiation */
1878 if (pdev->id_entry && 1891 if (pdev->id_entry &&
1879 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) 1892 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
1880 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 1893 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
1894#endif
1881 1895
1882 fep->hwp = devm_request_and_ioremap(&pdev->dev, r); 1896 fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
1883 fep->pdev = pdev; 1897 fep->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2ad1494efbb3..d1cbfb12c1ca 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1757 memset(rxq->rx_desc_area, 0, size); 1757 memset(rxq->rx_desc_area, 0, size);
1758 1758
1759 rxq->rx_desc_area_size = size; 1759 rxq->rx_desc_area_size = size;
1760 rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), 1760 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
1761 GFP_KERNEL); 1761 GFP_KERNEL);
1762 if (rxq->rx_skb == NULL) 1762 if (rxq->rx_skb == NULL)
1763 goto out_free; 1763 goto out_free;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 339bb323cb0c..1c8af8ba08d9 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev)
1015 int rx_desc_num = pep->rx_ring_size; 1015 int rx_desc_num = pep->rx_ring_size;
1016 1016
1017 /* Allocate RX skb rings */ 1017 /* Allocate RX skb rings */
1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, 1018 pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 if (!pep->rx_skb) 1020 if (!pep->rx_skb)
1021 return -ENOMEM; 1021 return -ENOMEM;
@@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev)
1076 int size = 0, i = 0; 1076 int size = 0, i = 0;
1077 int tx_desc_num = pep->tx_ring_size; 1077 int tx_desc_num = pep->tx_ring_size;
1078 1078
1079 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, 1079 pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1080 GFP_KERNEL); 1080 GFP_KERNEL);
1081 if (!pep->tx_skb) 1081 if (!pep->tx_skb)
1082 return -ENOMEM; 1082 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2f4a26039e80..8a434997a0df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
632 dev->caps.cqe_size = 32; 632 dev->caps.cqe_size = 32;
633 } 633 }
634 634
635 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
636 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
637
635 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 638 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
636 639
637 return 0; 640 return 0;
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 921729f9c85c..91a8a5d28037 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -46,17 +46,25 @@
46union mgmt_port_ring_entry { 46union mgmt_port_ring_entry {
47 u64 d64; 47 u64 d64;
48 struct { 48 struct {
49 u64 reserved_62_63:2; 49#define RING_ENTRY_CODE_DONE 0xf
50#define RING_ENTRY_CODE_MORE 0x10
51#ifdef __BIG_ENDIAN_BITFIELD
52 u64 reserved_62_63:2;
50 /* Length of the buffer/packet in bytes */ 53 /* Length of the buffer/packet in bytes */
51 u64 len:14; 54 u64 len:14;
52 /* For TX, signals that the packet should be timestamped */ 55 /* For TX, signals that the packet should be timestamped */
53 u64 tstamp:1; 56 u64 tstamp:1;
54 /* The RX error code */ 57 /* The RX error code */
55 u64 code:7; 58 u64 code:7;
56#define RING_ENTRY_CODE_DONE 0xf
57#define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */ 59 /* Physical address of the buffer */
59 u64 addr:40; 60 u64 addr:40;
61#else
62 u64 addr:40;
63 u64 code:7;
64 u64 tstamp:1;
65 u64 len:14;
66 u64 reserved_62_63:2;
67#endif
60 } s; 68 } s;
61}; 69};
62 70
@@ -1141,10 +1149,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
1141 /* For compensation state to lock. */ 1149 /* For compensation state to lock. */
1142 ndelay(1040 * NS_PER_PHY_CLK); 1150 ndelay(1040 * NS_PER_PHY_CLK);
1143 1151
1144 /* Some Ethernet switches cannot handle standard 1152 /* Default Interframe Gaps are too small. Recommended
1145 * Interframe Gap, increase to 16 bytes. 1153 * workaround is.
1154 *
1155 * AGL_GMX_TX_IFG[IFG1]=14
1156 * AGL_GMX_TX_IFG[IFG2]=10
1146 */ 1157 */
1147 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); 1158 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1148 } 1159 }
1149 1160
1150 octeon_mgmt_rx_fill_ring(netdev); 1161 octeon_mgmt_rx_fill_ring(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 43562c256379..6acf82b9f018 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
642 qlcnic_83xx_config_intrpt(adapter, 0); 642 qlcnic_83xx_config_intrpt(adapter, 0);
643 } 643 }
644 /* Allow dma queues to drain after context reset */ 644 /* Allow dma queues to drain after context reset */
645 msleep(20); 645 mdelay(20);
646 } 646 }
647} 647}
648 648
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5e3982fc5398..e29fe8dbd226 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
380 .eesipr_value = 0x01ff009f, 380 .eesipr_value = 0x01ff009f,
381 381
382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 384 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
385 EESR_ECI,
385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 386 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
386 387
387 .apr = 1, 388 .apr = 1,
@@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
427 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 428 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
428 429
429 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 430 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
430 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 431 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
431 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 432 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
433 EESR_ECI,
432 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 434 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
433 435
434 .apr = 1, 436 .apr = 1,
@@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
478 .rmcr_value = 0x00000001, 480 .rmcr_value = 0x00000001,
479 481
480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 482 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 483 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
482 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 484 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
485 EESR_ECI,
483 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 486 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
484 487
485 .apr = 1, 488 .apr = 1,
@@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
592 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 595 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
593 596
594 .tx_check = EESR_TC1 | EESR_FTC, 597 .tx_check = EESR_TC1 | EESR_FTC,
595 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 598 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
596 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 599 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
597 EESR_ECI, 600 EESR_TDE | EESR_ECI,
598 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 601 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
599 EESR_TFE, 602 EESR_TFE,
600 .fdr_value = 0x0000072f, 603 .fdr_value = 0x0000072f,
@@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
674 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 677 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
675 678
676 .tx_check = EESR_TC1 | EESR_FTC, 679 .tx_check = EESR_TC1 | EESR_FTC,
677 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
678 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
679 EESR_ECI, 682 EESR_TDE | EESR_ECI,
680 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 683 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
681 EESR_TFE, 684 EESR_TFE,
682 685
@@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
811 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 814 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
812 815
813 .tx_check = EESR_TC1 | EESR_FTC, 816 .tx_check = EESR_TC1 | EESR_FTC,
814 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 817 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
815 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 818 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
816 EESR_ECI, 819 EESR_TDE | EESR_ECI,
817 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 820 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
818 EESR_TFE, 821 EESR_TFE,
819 822
@@ -1549,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1549 1552
1550ignore_link: 1553ignore_link:
1551 if (intr_status & EESR_TWB) { 1554 if (intr_status & EESR_TWB) {
1552 /* Write buck end. unused write back interrupt */ 1555 /* Unused write back interrupt */
1553 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1556 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1554 ndev->stats.tx_aborted_errors++; 1557 ndev->stats.tx_aborted_errors++;
1555 if (netif_msg_tx_err(mdp)) 1558 if (netif_msg_tx_err(mdp))
1556 dev_err(&ndev->dev, "Transmit Abort\n"); 1559 dev_err(&ndev->dev, "Transmit Abort\n");
1560 }
1557 } 1561 }
1558 1562
1559 if (intr_status & EESR_RABT) { 1563 if (intr_status & EESR_RABT) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 1ddc9f235bcb..62689a5823be 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -253,7 +253,7 @@ enum EESR_BIT {
253 253
254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ 254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
255 EESR_RTO) 255 EESR_RTO)
256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ 256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
257 EESR_RDE | EESR_RFRMER | EESR_ADE | \ 257 EESR_RDE | EESR_RFRMER | EESR_ADE | \
258 EESR_TFE | EESR_TDE | EESR_ECI) 258 EESR_TFE | EESR_TDE | EESR_ECI)
259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ 259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 39e4cb39de29..4a14a940c65e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2139 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2139 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2140 return sprintf(buf, "%d\n", efx->phy_type); 2140 return sprintf(buf, "%d\n", efx->phy_type);
2141} 2141}
2142static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); 2142static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2143 2143
2144static int efx_register_netdev(struct efx_nic *efx) 2144static int efx_register_netdev(struct efx_nic *efx)
2145{ 2145{
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7788fbe44f0a..95176979b2d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -297,8 +297,8 @@ struct dma_features {
297#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ 297#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
298 298
299/* Default LPI timers */ 299/* Default LPI timers */
300#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 300#define STMMAC_DEFAULT_LIT_LS 0x3E8
301#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 301#define STMMAC_DEFAULT_TWT_LS 0x0
302 302
303#define STMMAC_CHAIN_MODE 0x1 303#define STMMAC_CHAIN_MODE 0x1
304#define STMMAC_RING_MODE 0x2 304#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ee919ca8b8a0..e9eab29db7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
131module_param(eee_timer, int, S_IRUGO | S_IWUSR); 131module_param(eee_timer, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
133#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 133#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
134 134
135/* By default the driver will use the ring mode to manage tx and rx descriptors 135/* By default the driver will use the ring mode to manage tx and rx descriptors
136 * but passing this value so user can force to use the chain instead of the ring 136 * but passing this value so user can force to use the chain instead of the ring
@@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
288 struct stmmac_priv *priv = (struct stmmac_priv *)arg; 288 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
289 289
290 stmmac_enable_eee_mode(priv); 290 stmmac_enable_eee_mode(priv);
291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
292} 292}
293 293
294/** 294/**
@@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
304{ 304{
305 bool ret = false; 305 bool ret = false;
306 306
307 /* Using PCS we cannot dial with the phy registers at this stage
308 * so we do not support extra feature like EEE.
309 */
310 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
311 (priv->pcs == STMMAC_PCS_RTBI))
312 goto out;
313
307 /* MAC core supports the EEE feature. */ 314 /* MAC core supports the EEE feature. */
308 if (priv->dma_cap.eee) { 315 if (priv->dma_cap.eee) {
309 /* Check if the PHY supports EEE */ 316 /* Check if the PHY supports EEE */
310 if (phy_init_eee(priv->phydev, 1)) 317 if (phy_init_eee(priv->phydev, 1))
311 goto out; 318 goto out;
312 319
313 priv->eee_active = 1; 320 if (!priv->eee_active) {
314 init_timer(&priv->eee_ctrl_timer); 321 priv->eee_active = 1;
315 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 322 init_timer(&priv->eee_ctrl_timer);
316 priv->eee_ctrl_timer.data = (unsigned long)priv; 323 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
317 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); 324 priv->eee_ctrl_timer.data = (unsigned long)priv;
318 add_timer(&priv->eee_ctrl_timer); 325 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
319 326 add_timer(&priv->eee_ctrl_timer);
320 priv->hw->mac->set_eee_timer(priv->ioaddr, 327
321 STMMAC_DEFAULT_LIT_LS_TIMER, 328 priv->hw->mac->set_eee_timer(priv->ioaddr,
322 priv->tx_lpi_timer); 329 STMMAC_DEFAULT_LIT_LS,
330 priv->tx_lpi_timer);
331 } else
332 /* Set HW EEE according to the speed */
333 priv->hw->mac->set_eee_pls(priv->ioaddr,
334 priv->phydev->link);
323 335
324 pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); 336 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
325 337
@@ -329,20 +341,6 @@ out:
329 return ret; 341 return ret;
330} 342}
331 343
332/**
333 * stmmac_eee_adjust: adjust HW EEE according to the speed
334 * @priv: driver private structure
335 * Description:
336 * When the EEE has been already initialised we have to
337 * modify the PLS bit in the LPI ctrl & status reg according
338 * to the PHY link status. For this reason.
339 */
340static void stmmac_eee_adjust(struct stmmac_priv *priv)
341{
342 if (priv->eee_enabled)
343 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
344}
345
346/* stmmac_get_tx_hwtstamp: get HW TX timestamps 344/* stmmac_get_tx_hwtstamp: get HW TX timestamps
347 * @priv: driver private structure 345 * @priv: driver private structure
348 * @entry : descriptor index to be used. 346 * @entry : descriptor index to be used.
@@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev)
769 if (new_state && netif_msg_link(priv)) 767 if (new_state && netif_msg_link(priv))
770 phy_print_status(phydev); 768 phy_print_status(phydev);
771 769
772 stmmac_eee_adjust(priv); 770 /* At this stage, it could be needed to setup the EEE or adjust some
771 * MAC related HW registers.
772 */
773 priv->eee_enabled = stmmac_eee_init(priv);
773 774
774 spin_unlock_irqrestore(&priv->lock, flags); 775 spin_unlock_irqrestore(&priv->lock, flags);
775 776
@@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1277 1278
1278 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1279 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1279 stmmac_enable_eee_mode(priv); 1280 stmmac_enable_eee_mode(priv);
1280 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 1281 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1281 } 1282 }
1282 spin_unlock(&priv->tx_lock); 1283 spin_unlock(&priv->tx_lock);
1283} 1284}
@@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev)
1671 if (priv->phydev) 1672 if (priv->phydev)
1672 phy_start(priv->phydev); 1673 phy_start(priv->phydev);
1673 1674
1674 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1675 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1675 1676
1676 /* Using PCS we cannot dial with the phy registers at this stage 1677 priv->eee_enabled = stmmac_eee_init(priv);
1677 * so we do not support extra feature like EEE.
1678 */
1679 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1680 priv->pcs != STMMAC_PCS_RTBI)
1681 priv->eee_enabled = stmmac_eee_init(priv);
1682 1678
1683 stmmac_init_tx_coalesce(priv); 1679 stmmac_init_tx_coalesce(priv);
1684 1680
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 21a5b291b4b3..d1a769f35f9d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev)
1679 priv->rx_packet_max = max(rx_packet_max, 128); 1679 priv->rx_packet_max = max(rx_packet_max, 128);
1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1681 priv->irq_enabled = true; 1681 priv->irq_enabled = true;
1682 if (!ndev) { 1682 if (!priv->cpts) {
1683 pr_err("error allocating cpts\n"); 1683 pr_err("error allocating cpts\n");
1684 goto clean_ndev_ret; 1684 goto clean_ndev_ret;
1685 } 1685 }
@@ -1973,9 +1973,12 @@ static int cpsw_suspend(struct device *dev)
1973{ 1973{
1974 struct platform_device *pdev = to_platform_device(dev); 1974 struct platform_device *pdev = to_platform_device(dev);
1975 struct net_device *ndev = platform_get_drvdata(pdev); 1975 struct net_device *ndev = platform_get_drvdata(pdev);
1976 struct cpsw_priv *priv = netdev_priv(ndev);
1976 1977
1977 if (netif_running(ndev)) 1978 if (netif_running(ndev))
1978 cpsw_ndo_stop(ndev); 1979 cpsw_ndo_stop(ndev);
1980 soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
1981 soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
1979 pm_runtime_put_sync(&pdev->dev); 1982 pm_runtime_put_sync(&pdev->dev);
1980 1983
1981 return 0; 1984 return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49dfd592ac1e..053c84fd0853 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
705 } 705 }
706 706
707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
708 ret = dma_mapping_error(ctlr->dev, buffer);
709 if (ret) {
710 cpdma_desc_free(ctlr->pool, desc, 1);
711 ret = -EINVAL;
712 goto unlock_ret;
713 }
714
708 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 715 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
709 cpdma_desc_to_port(chan, mode, directed); 716 cpdma_desc_to_port(chan, mode, directed);
710 717
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ab2307b5d9a7..4dccead586be 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
285 285
286 skb->protocol = eth_type_trans(skb, net); 286 skb->protocol = eth_type_trans(skb, net);
287 skb->ip_summed = CHECKSUM_NONE; 287 skb->ip_summed = CHECKSUM_NONE;
288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); 288 if (packet->vlan_tci & VLAN_TAG_PRESENT)
289 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
290 packet->vlan_tci);
289 291
290 net->stats.rx_packets++; 292 net->stats.rx_packets++;
291 net->stats.rx_bytes += packet->total_data_buflen; 293 net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 59e9605de316..b6dd6a75919a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
524 return -EMSGSIZE; 524 return -EMSGSIZE;
525 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 525 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
526 if (num_pages != size) { 526 if (num_pages != size) {
527 for (i = 0; i < num_pages; i++) 527 int j;
528 put_page(page[i]); 528
529 for (j = 0; j < num_pages; j++)
530 put_page(page[i + j]);
529 return -EFAULT; 531 return -EFAULT;
530 } 532 }
531 truesize = size * PAGE_SIZE; 533 truesize = size * PAGE_SIZE;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bfa9bb48e42d..9c61f8734a40 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
1010 return -EMSGSIZE; 1010 return -EMSGSIZE;
1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
1012 if (num_pages != size) { 1012 if (num_pages != size) {
1013 for (i = 0; i < num_pages; i++) 1013 int j;
1014 put_page(page[i]); 1014
1015 for (j = 0; j < num_pages; j++)
1016 put_page(page[i + j]);
1015 return -EFAULT; 1017 return -EFAULT;
1016 } 1018 }
1017 truesize = size * PAGE_SIZE; 1019 truesize = size * PAGE_SIZE;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d095d0d3056b..56459215a22b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -590,7 +590,13 @@ static const struct usb_device_id products[] = {
590 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 590 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
591 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ 591 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
592 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ 592 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
593 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ 593 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
594 {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
595 {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
596 {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
597 {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
598 {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
599 {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
594 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ 600 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
595 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ 601 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
596 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ 602 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b1d2ee7156b..57325f356d4f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -565,18 +565,22 @@ skip:
565 565
566/* Watch incoming packets to learn mapping between Ethernet address 566/* Watch incoming packets to learn mapping between Ethernet address
567 * and Tunnel endpoint. 567 * and Tunnel endpoint.
568 * Return true if packet is bogus and should be droppped.
568 */ 569 */
569static void vxlan_snoop(struct net_device *dev, 570static bool vxlan_snoop(struct net_device *dev,
570 __be32 src_ip, const u8 *src_mac) 571 __be32 src_ip, const u8 *src_mac)
571{ 572{
572 struct vxlan_dev *vxlan = netdev_priv(dev); 573 struct vxlan_dev *vxlan = netdev_priv(dev);
573 struct vxlan_fdb *f; 574 struct vxlan_fdb *f;
574 int err;
575 575
576 f = vxlan_find_mac(vxlan, src_mac); 576 f = vxlan_find_mac(vxlan, src_mac);
577 if (likely(f)) { 577 if (likely(f)) {
578 if (likely(f->remote.remote_ip == src_ip)) 578 if (likely(f->remote.remote_ip == src_ip))
579 return; 579 return false;
580
581 /* Don't migrate static entries, drop packets */
582 if (f->state & NUD_NOARP)
583 return true;
580 584
581 if (net_ratelimit()) 585 if (net_ratelimit())
582 netdev_info(dev, 586 netdev_info(dev,
@@ -588,14 +592,19 @@ static void vxlan_snoop(struct net_device *dev,
588 } else { 592 } else {
589 /* learned new entry */ 593 /* learned new entry */
590 spin_lock(&vxlan->hash_lock); 594 spin_lock(&vxlan->hash_lock);
591 err = vxlan_fdb_create(vxlan, src_mac, src_ip, 595
592 NUD_REACHABLE, 596 /* close off race between vxlan_flush and incoming packets */
593 NLM_F_EXCL|NLM_F_CREATE, 597 if (netif_running(dev))
594 vxlan->dst_port, 598 vxlan_fdb_create(vxlan, src_mac, src_ip,
595 vxlan->default_dst.remote_vni, 599 NUD_REACHABLE,
596 0, NTF_SELF); 600 NLM_F_EXCL|NLM_F_CREATE,
601 vxlan->dst_port,
602 vxlan->default_dst.remote_vni,
603 0, NTF_SELF);
597 spin_unlock(&vxlan->hash_lock); 604 spin_unlock(&vxlan->hash_lock);
598 } 605 }
606
607 return false;
599} 608}
600 609
601 610
@@ -727,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
727 vxlan->dev->dev_addr) == 0) 736 vxlan->dev->dev_addr) == 0)
728 goto drop; 737 goto drop;
729 738
730 if (vxlan->flags & VXLAN_F_LEARN) 739 if ((vxlan->flags & VXLAN_F_LEARN) &&
731 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); 740 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
741 goto drop;
732 742
733 __skb_tunnel_rx(skb, vxlan->dev); 743 __skb_tunnel_rx(skb, vxlan->dev);
734 skb_reset_network_header(skb); 744 skb_reset_network_header(skb);
@@ -1151,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1151 struct sk_buff *skb1; 1161 struct sk_buff *skb1;
1152 1162
1153 skb1 = skb_clone(skb, GFP_ATOMIC); 1163 skb1 = skb_clone(skb, GFP_ATOMIC);
1154 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); 1164 if (skb1) {
1155 if (rc == NETDEV_TX_OK) 1165 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1156 rc = rc1; 1166 if (rc == NETDEV_TX_OK)
1167 rc = rc1;
1168 }
1157 } 1169 }
1158 1170
1159 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); 1171 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 147614ed86aa..6a8a382c5f4c 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
384 struct frad_local *flp; 384 struct frad_local *flp;
385 struct net_device *master, *slave; 385 struct net_device *master, *slave;
386 int err; 386 int err;
387 bool found = false;
388
389 rtnl_lock();
387 390
388 /* validate slave device */ 391 /* validate slave device */
389 master = __dev_get_by_name(&init_net, dlci->devname); 392 master = __dev_get_by_name(&init_net, dlci->devname);
390 if (!master) 393 if (!master) {
391 return -ENODEV; 394 err = -ENODEV;
395 goto out;
396 }
397
398 list_for_each_entry(dlp, &dlci_devs, list) {
399 if (dlp->master == master) {
400 found = true;
401 break;
402 }
403 }
404 if (!found) {
405 err = -ENODEV;
406 goto out;
407 }
392 408
393 if (netif_running(master)) { 409 if (netif_running(master)) {
394 return -EBUSY; 410 err = -EBUSY;
411 goto out;
395 } 412 }
396 413
397 dlp = netdev_priv(master); 414 dlp = netdev_priv(master);
398 slave = dlp->slave; 415 slave = dlp->slave;
399 flp = netdev_priv(slave); 416 flp = netdev_priv(slave);
400 417
401 rtnl_lock();
402 err = (*flp->deassoc)(slave, master); 418 err = (*flp->deassoc)(slave, master);
403 if (!err) { 419 if (!err) {
404 list_del(&dlp->list); 420 list_del(&dlp->list);
@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
407 423
408 dev_put(slave); 424 dev_put(slave);
409 } 425 }
426out:
410 rtnl_unlock(); 427 rtnl_unlock();
411
412 return err; 428 return err;
413} 429}
414 430
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0743a47cef8f..62f1b7636c92 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1174 mutex_lock(&priv->htc_pm_lock); 1174 mutex_lock(&priv->htc_pm_lock);
1175 1175
1176 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1176 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1177 if (priv->ps_idle) 1177 if (!priv->ps_idle)
1178 chip_reset = true; 1178 chip_reset = true;
1179 1179
1180 mutex_unlock(&priv->htc_pm_lock); 1180 mutex_unlock(&priv->htc_pm_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 1c9b1bac8b0d..83ab6be3fe6d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1570,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1570 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1570 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1571 return; 1571 return;
1572 1572
1573 rcu_read_lock();
1574
1573 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1575 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1574 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1576 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1575 1577
@@ -1608,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1608 1610
1609 if (ac == last_ac || 1611 if (ac == last_ac ||
1610 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1612 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1611 return; 1613 break;
1612 } 1614 }
1615
1616 rcu_read_unlock();
1613} 1617}
1614 1618
1615/***********/ 1619/***********/
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index b98f2235978e..2c593570497c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,6 +930,10 @@ fail:
930 brcmf_fws_del_interface(ifp); 930 brcmf_fws_del_interface(ifp);
931 brcmf_fws_deinit(drvr); 931 brcmf_fws_deinit(drvr);
932 } 932 }
933 if (drvr->iflist[0]) {
934 free_netdev(ifp->ndev);
935 drvr->iflist[0] = NULL;
936 }
933 if (p2p_ifp) { 937 if (p2p_ifp) {
934 free_netdev(p2p_ifp->ndev); 938 free_netdev(p2p_ifp->ndev);
935 drvr->iflist[1] = NULL; 939 drvr->iflist[1] = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aeedd184..9fd6f2fef11b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
3074 */ 3074 */
3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) 3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3076{ 3076{
3077 /* disallow PS when one of the following global conditions meets */ 3077 /* not supporting PS so always return false for now */
3078 if (!wlc->pub->associated) 3078 return false;
3079 return false;
3080
3081 /* disallow PS when one of these meets when not scanning */
3082 if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
3083 return false;
3084
3085 if (wlc->bsscfg->type == BRCMS_TYPE_AP)
3086 return false;
3087
3088 if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
3089 return false;
3090
3091 return true;
3092} 3079}
3093 3080
3094static void brcms_c_statsupd(struct brcms_c_info *wlc) 3081static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d9ca1e..fe31590a51b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@ out:
816 rs_sta->last_txrate_idx = idx; 816 rs_sta->last_txrate_idx = idx;
817 info->control.rates[0].idx = rs_sta->last_txrate_idx; 817 info->control.rates[0].idx = rs_sta->last_txrate_idx;
818 } 818 }
819 info->control.rates[0].count = 1;
819 820
820 D_RATE("leave: %d\n", idx); 821 D_RATE("leave: %d\n", idx);
821} 822}
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227e120..ed3c42a63a43 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2268 info->control.rates[0].flags = 0; 2268 info->control.rates[0].flags = 0;
2269 } 2269 }
2270 info->control.rates[0].idx = rate_idx; 2270 info->control.rates[0].idx = rate_idx;
2271 2271 info->control.rates[0].count = 1;
2272} 2272}
2273 2273
2274static void * 2274static void *
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 907bd6e50aad..10fbb176cc8e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2799 info->control.rates[0].flags = 0; 2799 info->control.rates[0].flags = 0;
2800 } 2800 }
2801 info->control.rates[0].idx = rate_idx; 2801 info->control.rates[0].idx = rate_idx;
2802 2802 info->control.rates[0].count = 1;
2803} 2803}
2804 2804
2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, 2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446fa00bd..cd1ad0019185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1379 int ret; 1379 int ret;
1380 1380
1381 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) 1381 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1382 return; 1382 return;
1383 1383
1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 39aad9893e0b..40fed1f511e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1000 */ 1000 */
1001 if (load_module) { 1001 if (load_module) {
1002 err = request_module("%s", op->name); 1002 err = request_module("%s", op->name);
1003#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
1003 if (err) 1004 if (err)
1004 IWL_ERR(drv, 1005 IWL_ERR(drv,
1005 "failed to load module %s (error %d), is dynamic loading enabled?\n", 1006 "failed to load module %s (error %d), is dynamic loading enabled?\n",
1006 op->name, err); 1007 op->name, err);
1008#endif
1007 } 1009 }
1008 return; 1010 return;
1009 1011
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 55334d542e26..b99fe3163866 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2546 info->control.rates[0].flags = 0; 2546 info->control.rates[0].flags = 0;
2547 } 2547 }
2548 info->control.rates[0].idx = rate_idx; 2548 info->control.rates[0].idx = rate_idx;
2549 info->control.rates[0].count = 1;
2549} 2550}
2550 2551
2551static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, 2552static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f212f16502ff..48c1891e3df6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
181 return; 181 return;
182 } else if (ieee80211_is_back_req(fc)) { 182 } else if (ieee80211_is_back_req(fc)) {
183 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 183 tx_cmd->tx_flags |=
184 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
184 } 185 }
185 186
186 /* HT rate doesn't make sense for a non data frame */ 187 /* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b52d70c75e1a..72f32e5caa4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3027 * TODO: we do not use +6 dBm option to do not increase power beyond 3027 * TODO: we do not use +6 dBm option to do not increase power beyond
3028 * regulatory limit, however this could be utilized for devices with 3028 * regulatory limit, however this could be utilized for devices with
3029 * CAPABILITY_POWER_LIMIT. 3029 * CAPABILITY_POWER_LIMIT.
3030 *
3031 * TODO: add different temperature compensation code for RT3290 & RT5390
3032 * to allow to use BBP_R1 for those chips.
3030 */ 3033 */
3031 rt2800_bbp_read(rt2x00dev, 1, &r1); 3034 if (!rt2x00_rt(rt2x00dev, RT3290) &&
3032 if (delta <= -12) { 3035 !rt2x00_rt(rt2x00dev, RT5390)) {
3033 power_ctrl = 2; 3036 rt2800_bbp_read(rt2x00dev, 1, &r1);
3034 delta += 12; 3037 if (delta <= -12) {
3035 } else if (delta <= -6) { 3038 power_ctrl = 2;
3036 power_ctrl = 1; 3039 delta += 12;
3037 delta += 6; 3040 } else if (delta <= -6) {
3038 } else { 3041 power_ctrl = 1;
3039 power_ctrl = 0; 3042 delta += 6;
3043 } else {
3044 power_ctrl = 0;
3045 }
3046 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
3047 rt2800_bbp_write(rt2x00dev, 1, r1);
3040 } 3048 }
3041 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); 3049
3042 rt2800_bbp_write(rt2x00dev, 1, r1);
3043 offset = TX_PWR_CFG_0; 3050 offset = TX_PWR_CFG_0;
3044 3051
3045 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { 3052 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 716aa93fff76..59df8575a48c 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,6 +61,7 @@ static DEFINE_MUTEX(bridge_mutex);
61static void handle_hotplug_event_bridge (acpi_handle, u32, void *); 61static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
62static void acpiphp_sanitize_bus(struct pci_bus *bus); 62static void acpiphp_sanitize_bus(struct pci_bus *bus);
63static void acpiphp_set_hpp_values(struct pci_bus *bus); 63static void acpiphp_set_hpp_values(struct pci_bus *bus);
64static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
64static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); 65static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
65static void free_bridge(struct kref *kref); 66static void free_bridge(struct kref *kref);
66 67
@@ -147,7 +148,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
147 148
148 149
149static const struct acpi_dock_ops acpiphp_dock_ops = { 150static const struct acpi_dock_ops acpiphp_dock_ops = {
150 .handler = handle_hotplug_event_func, 151 .handler = hotplug_event_func,
151}; 152};
152 153
153/* Check whether the PCI device is managed by native PCIe hotplug driver */ 154/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -179,6 +180,20 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
179 return true; 180 return true;
180} 181}
181 182
183static void acpiphp_dock_init(void *data)
184{
185 struct acpiphp_func *func = data;
186
187 get_bridge(func->slot->bridge);
188}
189
190static void acpiphp_dock_release(void *data)
191{
192 struct acpiphp_func *func = data;
193
194 put_bridge(func->slot->bridge);
195}
196
182/* callback routine to register each ACPI PCI slot object */ 197/* callback routine to register each ACPI PCI slot object */
183static acpi_status 198static acpi_status
184register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) 199register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -298,7 +313,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
298 */ 313 */
299 newfunc->flags &= ~FUNC_HAS_EJ0; 314 newfunc->flags &= ~FUNC_HAS_EJ0;
300 if (register_hotplug_dock_device(handle, 315 if (register_hotplug_dock_device(handle,
301 &acpiphp_dock_ops, newfunc)) 316 &acpiphp_dock_ops, newfunc,
317 acpiphp_dock_init, acpiphp_dock_release))
302 dbg("failed to register dock device\n"); 318 dbg("failed to register dock device\n");
303 319
304 /* we need to be notified when dock events happen 320 /* we need to be notified when dock events happen
@@ -670,6 +686,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
670 struct pci_bus *bus = slot->bridge->pci_bus; 686 struct pci_bus *bus = slot->bridge->pci_bus;
671 struct acpiphp_func *func; 687 struct acpiphp_func *func;
672 int num, max, pass; 688 int num, max, pass;
689 LIST_HEAD(add_list);
673 690
674 if (slot->flags & SLOT_ENABLED) 691 if (slot->flags & SLOT_ENABLED)
675 goto err_exit; 692 goto err_exit;
@@ -694,13 +711,15 @@ static int __ref enable_device(struct acpiphp_slot *slot)
694 max = pci_scan_bridge(bus, dev, max, pass); 711 max = pci_scan_bridge(bus, dev, max, pass);
695 if (pass && dev->subordinate) { 712 if (pass && dev->subordinate) {
696 check_hotplug_bridge(slot, dev); 713 check_hotplug_bridge(slot, dev);
697 pci_bus_size_bridges(dev->subordinate); 714 pcibios_resource_survey_bus(dev->subordinate);
715 __pci_bus_size_bridges(dev->subordinate,
716 &add_list);
698 } 717 }
699 } 718 }
700 } 719 }
701 } 720 }
702 721
703 pci_bus_assign_resources(bus); 722 __pci_bus_assign_resources(bus, &add_list, NULL);
704 acpiphp_sanitize_bus(bus); 723 acpiphp_sanitize_bus(bus);
705 acpiphp_set_hpp_values(bus); 724 acpiphp_set_hpp_values(bus);
706 acpiphp_set_acpi_region(slot); 725 acpiphp_set_acpi_region(slot);
@@ -1065,22 +1084,12 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
1065 alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge); 1084 alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
1066} 1085}
1067 1086
1068static void _handle_hotplug_event_func(struct work_struct *work) 1087static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
1069{ 1088{
1070 struct acpiphp_func *func; 1089 struct acpiphp_func *func = context;
1071 char objname[64]; 1090 char objname[64];
1072 struct acpi_buffer buffer = { .length = sizeof(objname), 1091 struct acpi_buffer buffer = { .length = sizeof(objname),
1073 .pointer = objname }; 1092 .pointer = objname };
1074 struct acpi_hp_work *hp_work;
1075 acpi_handle handle;
1076 u32 type;
1077
1078 hp_work = container_of(work, struct acpi_hp_work, work);
1079 handle = hp_work->handle;
1080 type = hp_work->type;
1081 func = (struct acpiphp_func *)hp_work->context;
1082
1083 acpi_scan_lock_acquire();
1084 1093
1085 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 1094 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1086 1095
@@ -1113,6 +1122,18 @@ static void _handle_hotplug_event_func(struct work_struct *work)
1113 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); 1122 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
1114 break; 1123 break;
1115 } 1124 }
1125}
1126
1127static void _handle_hotplug_event_func(struct work_struct *work)
1128{
1129 struct acpi_hp_work *hp_work;
1130 struct acpiphp_func *func;
1131
1132 hp_work = container_of(work, struct acpi_hp_work, work);
1133 func = hp_work->context;
1134 acpi_scan_lock_acquire();
1135
1136 hotplug_event_func(hp_work->handle, hp_work->type, func);
1116 1137
1117 acpi_scan_lock_release(); 1138 acpi_scan_lock_release();
1118 kfree(hp_work); /* allocated in handle_hotplug_event_func */ 1139 kfree(hp_work); /* allocated in handle_hotplug_event_func */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 68678ed76b0d..d1182c4a754e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -202,6 +202,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
202 struct resource *res, unsigned int reg); 202 struct resource *res, unsigned int reg);
203int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); 203int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
204void pci_configure_ari(struct pci_dev *dev); 204void pci_configure_ari(struct pci_dev *dev);
205void __ref __pci_bus_size_bridges(struct pci_bus *bus,
206 struct list_head *realloc_head);
207void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
208 struct list_head *realloc_head,
209 struct list_head *fail_head);
205 210
206/** 211/**
207 * pci_ari_enabled - query ARI forwarding status 212 * pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 16abaaa1f83c..d254e2379533 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1044,7 +1044,7 @@ handle_done:
1044 ; 1044 ;
1045} 1045}
1046 1046
1047static void __ref __pci_bus_size_bridges(struct pci_bus *bus, 1047void __ref __pci_bus_size_bridges(struct pci_bus *bus,
1048 struct list_head *realloc_head) 1048 struct list_head *realloc_head)
1049{ 1049{
1050 struct pci_dev *dev; 1050 struct pci_dev *dev;
@@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
1115} 1115}
1116EXPORT_SYMBOL(pci_bus_size_bridges); 1116EXPORT_SYMBOL(pci_bus_size_bridges);
1117 1117
1118static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, 1118void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
1119 struct list_head *realloc_head, 1119 struct list_head *realloc_head,
1120 struct list_head *fail_head) 1120 struct list_head *fail_head)
1121{ 1121{
1122 struct pci_bus *b; 1122 struct pci_bus *b;
1123 struct pci_dev *dev; 1123 struct pci_dev *dev;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index d8fa37d5c734..2c9155b66f09 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev)
439 439
440static struct platform_driver tps6586x_regulator_driver = { 440static struct platform_driver tps6586x_regulator_driver = {
441 .driver = { 441 .driver = {
442 .name = "tps6586x-pmic", 442 .name = "tps6586x-regulator",
443 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
444 }, 444 },
445 .probe = tps6586x_regulator_probe, 445 .probe = tps6586x_regulator_probe,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 292b24f9bf93..32ae6c67ea3a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1656,9 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1656 1656
1657 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1657 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1658 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { 1658 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
1659 skb->vlan_tci = VLAN_TAG_PRESENT | 1659 /* must set skb->dev before calling vlan_put_tag */
1660 vlan_dev_vlan_id(fcoe->netdev);
1661 skb->dev = fcoe->realdev; 1660 skb->dev = fcoe->realdev;
1661 skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1662 vlan_dev_vlan_id(fcoe->netdev));
1663 if (!skb)
1664 return -ENOMEM;
1662 } else 1665 } else
1663 skb->dev = fcoe->netdev; 1666 skb->dev = fcoe->netdev;
1664 1667
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index cd743c545ce9..795843dde8ec 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1548,9 +1548,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
1548{ 1548{
1549 struct fcoe_fcf *fcf; 1549 struct fcoe_fcf *fcf;
1550 struct fcoe_fcf *best = fip->sel_fcf; 1550 struct fcoe_fcf *best = fip->sel_fcf;
1551 struct fcoe_fcf *first;
1552
1553 first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
1554 1551
1555 list_for_each_entry(fcf, &fip->fcfs, list) { 1552 list_for_each_entry(fcf, &fip->fcfs, list) {
1556 LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " 1553 LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
@@ -1568,17 +1565,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
1568 "" : "un"); 1565 "" : "un");
1569 continue; 1566 continue;
1570 } 1567 }
1571 if (fcf->fabric_name != first->fabric_name || 1568 if (!best || fcf->pri < best->pri || best->flogi_sent)
1572 fcf->vfid != first->vfid || 1569 best = fcf;
1573 fcf->fc_map != first->fc_map) { 1570 if (fcf->fabric_name != best->fabric_name ||
1571 fcf->vfid != best->vfid ||
1572 fcf->fc_map != best->fc_map) {
1574 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " 1573 LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
1575 "or FC-MAP\n"); 1574 "or FC-MAP\n");
1576 return NULL; 1575 return NULL;
1577 } 1576 }
1578 if (fcf->flogi_sent)
1579 continue;
1580 if (!best || fcf->pri < best->pri || best->flogi_sent)
1581 best = fcf;
1582 } 1577 }
1583 fip->sel_fcf = best; 1578 fip->sel_fcf = best;
1584 if (best) { 1579 if (best) {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 82a3c1ec8706..6c4cedb44c07 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8980,19 +8980,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8980 if (!ioa_cfg->res_entries) 8980 if (!ioa_cfg->res_entries)
8981 goto out; 8981 goto out;
8982 8982
8983 if (ioa_cfg->sis64) {
8984 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8985 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8986 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8987 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8988 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8989 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8990
8991 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8992 || !ioa_cfg->vset_ids)
8993 goto out_free_res_entries;
8994 }
8995
8996 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 8983 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8997 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 8984 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8998 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 8985 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
@@ -9089,9 +9076,6 @@ out_free_vpd_cbs:
9089 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9076 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9090out_free_res_entries: 9077out_free_res_entries:
9091 kfree(ioa_cfg->res_entries); 9078 kfree(ioa_cfg->res_entries);
9092 kfree(ioa_cfg->target_ids);
9093 kfree(ioa_cfg->array_ids);
9094 kfree(ioa_cfg->vset_ids);
9095 goto out; 9079 goto out;
9096} 9080}
9097 9081
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index a1fb840596ef..07a85ce41782 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg {
1440 /* 1440 /*
1441 * Bitmaps for SIS64 generated target values 1441 * Bitmaps for SIS64 generated target values
1442 */ 1442 */
1443 unsigned long *target_ids; 1443 unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
1444 unsigned long *array_ids; 1444 unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
1445 unsigned long *vset_ids; 1445 unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
1446 1446
1447 u16 type; /* CCIN of the card */ 1447 u16 type; /* CCIN of the card */
1448 1448
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index c772d8d27159..8b928c67e4b9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -463,13 +463,7 @@ static void fc_exch_delete(struct fc_exch *ep)
463 fc_exch_release(ep); /* drop hold for exch in mp */ 463 fc_exch_release(ep); /* drop hold for exch in mp */
464} 464}
465 465
466/** 466static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
467 * fc_seq_send() - Send a frame using existing sequence/exchange pair
468 * @lport: The local port that the exchange will be sent on
469 * @sp: The sequence to be sent
470 * @fp: The frame to be sent on the exchange
471 */
472static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
473 struct fc_frame *fp) 467 struct fc_frame *fp)
474{ 468{
475 struct fc_exch *ep; 469 struct fc_exch *ep;
@@ -479,7 +473,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
479 u8 fh_type = fh->fh_type; 473 u8 fh_type = fh->fh_type;
480 474
481 ep = fc_seq_exch(sp); 475 ep = fc_seq_exch(sp);
482 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); 476 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
483 477
484 f_ctl = ntoh24(fh->fh_f_ctl); 478 f_ctl = ntoh24(fh->fh_f_ctl);
485 fc_exch_setup_hdr(ep, fp, f_ctl); 479 fc_exch_setup_hdr(ep, fp, f_ctl);
@@ -502,17 +496,34 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
502 error = lport->tt.frame_send(lport, fp); 496 error = lport->tt.frame_send(lport, fp);
503 497
504 if (fh_type == FC_TYPE_BLS) 498 if (fh_type == FC_TYPE_BLS)
505 return error; 499 goto out;
506 500
507 /* 501 /*
508 * Update the exchange and sequence flags, 502 * Update the exchange and sequence flags,
509 * assuming all frames for the sequence have been sent. 503 * assuming all frames for the sequence have been sent.
510 * We can only be called to send once for each sequence. 504 * We can only be called to send once for each sequence.
511 */ 505 */
512 spin_lock_bh(&ep->ex_lock);
513 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ 506 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
514 if (f_ctl & FC_FC_SEQ_INIT) 507 if (f_ctl & FC_FC_SEQ_INIT)
515 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 508 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
509out:
510 return error;
511}
512
513/**
514 * fc_seq_send() - Send a frame using existing sequence/exchange pair
515 * @lport: The local port that the exchange will be sent on
516 * @sp: The sequence to be sent
517 * @fp: The frame to be sent on the exchange
518 */
519static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
520 struct fc_frame *fp)
521{
522 struct fc_exch *ep;
523 int error;
524 ep = fc_seq_exch(sp);
525 spin_lock_bh(&ep->ex_lock);
526 error = fc_seq_send_locked(lport, sp, fp);
516 spin_unlock_bh(&ep->ex_lock); 527 spin_unlock_bh(&ep->ex_lock);
517 return error; 528 return error;
518} 529}
@@ -629,7 +640,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
629 if (fp) { 640 if (fp) {
630 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, 641 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
631 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 642 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
632 error = fc_seq_send(ep->lp, sp, fp); 643 error = fc_seq_send_locked(ep->lp, sp, fp);
633 } else 644 } else
634 error = -ENOBUFS; 645 error = -ENOBUFS;
635 return error; 646 return error;
@@ -1132,7 +1143,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1132 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; 1143 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1133 f_ctl |= ep->f_ctl; 1144 f_ctl |= ep->f_ctl;
1134 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); 1145 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1135 fc_seq_send(ep->lp, sp, fp); 1146 fc_seq_send_locked(ep->lp, sp, fp);
1136} 1147}
1137 1148
1138/** 1149/**
@@ -1307,8 +1318,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1307 ap->ba_low_seq_cnt = htons(sp->cnt); 1318 ap->ba_low_seq_cnt = htons(sp->cnt);
1308 } 1319 }
1309 sp = fc_seq_start_next_locked(sp); 1320 sp = fc_seq_start_next_locked(sp);
1310 spin_unlock_bh(&ep->ex_lock);
1311 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); 1321 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1322 spin_unlock_bh(&ep->ex_lock);
1312 fc_frame_free(rx_fp); 1323 fc_frame_free(rx_fp);
1313 return; 1324 return;
1314 1325
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index d518d17e940f..6bbb9447b75d 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1962,7 +1962,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
1962 rdata->flags |= FC_RP_FLAGS_RETRY; 1962 rdata->flags |= FC_RP_FLAGS_RETRY;
1963 rdata->supported_classes = FC_COS_CLASS3; 1963 rdata->supported_classes = FC_COS_CLASS3;
1964 1964
1965 if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) 1965 if (!(lport->service_params & FCP_SPPF_INIT_FCN))
1966 return 0; 1966 return 0;
1967 1967
1968 spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; 1968 spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 98ab921070d2..0a5c8951cebb 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -278,3 +278,14 @@ qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
278 278
279 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); 279 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
280} 280}
281
282static inline void
283qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
284{
285 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
286 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
287 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
288 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
289 complete(&ha->mbx_intr_comp);
290 }
291}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 259d9205d876..d2a4c75e5b8f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -104,14 +104,9 @@ qla2100_intr_handler(int irq, void *dev_id)
104 RD_REG_WORD(&reg->hccr); 104 RD_REG_WORD(&reg->hccr);
105 } 105 }
106 } 106 }
107 qla2x00_handle_mbx_completion(ha, status);
107 spin_unlock_irqrestore(&ha->hardware_lock, flags); 108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
108 109
109 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
110 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
111 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
112 complete(&ha->mbx_intr_comp);
113 }
114
115 return (IRQ_HANDLED); 110 return (IRQ_HANDLED);
116} 111}
117 112
@@ -221,14 +216,9 @@ qla2300_intr_handler(int irq, void *dev_id)
221 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 216 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
222 RD_REG_WORD_RELAXED(&reg->hccr); 217 RD_REG_WORD_RELAXED(&reg->hccr);
223 } 218 }
219 qla2x00_handle_mbx_completion(ha, status);
224 spin_unlock_irqrestore(&ha->hardware_lock, flags); 220 spin_unlock_irqrestore(&ha->hardware_lock, flags);
225 221
226 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229 complete(&ha->mbx_intr_comp);
230 }
231
232 return (IRQ_HANDLED); 222 return (IRQ_HANDLED);
233} 223}
234 224
@@ -2613,14 +2603,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
2613 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2603 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2614 ndelay(3500); 2604 ndelay(3500);
2615 } 2605 }
2606 qla2x00_handle_mbx_completion(ha, status);
2616 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2617 2608
2618 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2619 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2620 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2621 complete(&ha->mbx_intr_comp);
2622 }
2623
2624 return IRQ_HANDLED; 2609 return IRQ_HANDLED;
2625} 2610}
2626 2611
@@ -2763,13 +2748,9 @@ qla24xx_msix_default(int irq, void *dev_id)
2763 } 2748 }
2764 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2749 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2765 } while (0); 2750 } while (0);
2751 qla2x00_handle_mbx_completion(ha, status);
2766 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2767 2753
2768 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2769 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2770 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2771 complete(&ha->mbx_intr_comp);
2772 }
2773 return IRQ_HANDLED; 2754 return IRQ_HANDLED;
2774} 2755}
2775 2756
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9e5d89db7272..3587ec267fa6 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -179,8 +179,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
179 179
180 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 180 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
181 181
182 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
183
184 } else { 182 } else {
185 ql_dbg(ql_dbg_mbx, vha, 0x1011, 183 ql_dbg(ql_dbg_mbx, vha, 0x1011,
186 "Cmd=%x Polling Mode.\n", command); 184 "Cmd=%x Polling Mode.\n", command);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 937fed8cb038..a6df55838365 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -148,9 +148,6 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
148 spin_unlock_irqrestore(&ha->hardware_lock, flags); 148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
149 149
150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
151
152 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
153
154 } else { 151 } else {
155 ql_dbg(ql_dbg_mbx, vha, 0x112c, 152 ql_dbg(ql_dbg_mbx, vha, 0x112c,
156 "Cmd=%x Polling Mode.\n", command); 153 "Cmd=%x Polling Mode.\n", command);
@@ -2934,13 +2931,10 @@ qlafx00_intr_handler(int irq, void *dev_id)
2934 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2931 QLAFX00_CLR_INTR_REG(ha, clr_intr);
2935 QLAFX00_RD_INTR_REG(ha); 2932 QLAFX00_RD_INTR_REG(ha);
2936 } 2933 }
2934
2935 qla2x00_handle_mbx_completion(ha, status);
2937 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2936 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2938 2937
2939 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2940 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2941 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2942 complete(&ha->mbx_intr_comp);
2943 }
2944 return IRQ_HANDLED; 2938 return IRQ_HANDLED;
2945} 2939}
2946 2940
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 10754f518303..cce0cd0d7ec4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2074,9 +2074,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
2074 } 2074 }
2075 WRT_REG_DWORD(&reg->host_int, 0); 2075 WRT_REG_DWORD(&reg->host_int, 0);
2076 } 2076 }
2077 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2078 if (!ha->flags.msi_enabled)
2079 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2080 2077
2081#ifdef QL_DEBUG_LEVEL_17 2078#ifdef QL_DEBUG_LEVEL_17
2082 if (!irq && ha->flags.eeh_busy) 2079 if (!irq && ha->flags.eeh_busy)
@@ -2085,11 +2082,12 @@ qla82xx_intr_handler(int irq, void *dev_id)
2085 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2082 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2086#endif 2083#endif
2087 2084
2088 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2085 qla2x00_handle_mbx_completion(ha, status);
2089 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2086 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2090 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2087
2091 complete(&ha->mbx_intr_comp); 2088 if (!ha->flags.msi_enabled)
2092 } 2089 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2090
2093 return IRQ_HANDLED; 2091 return IRQ_HANDLED;
2094} 2092}
2095 2093
@@ -2149,8 +2147,6 @@ qla82xx_msix_default(int irq, void *dev_id)
2149 WRT_REG_DWORD(&reg->host_int, 0); 2147 WRT_REG_DWORD(&reg->host_int, 0);
2150 } while (0); 2148 } while (0);
2151 2149
2152 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2153
2154#ifdef QL_DEBUG_LEVEL_17 2150#ifdef QL_DEBUG_LEVEL_17
2155 if (!irq && ha->flags.eeh_busy) 2151 if (!irq && ha->flags.eeh_busy)
2156 ql_log(ql_log_warn, vha, 0x5044, 2152 ql_log(ql_log_warn, vha, 0x5044,
@@ -2158,11 +2154,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2158 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2154 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2159#endif 2155#endif
2160 2156
2161 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2157 qla2x00_handle_mbx_completion(ha, status);
2162 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2163 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2159
2164 complete(&ha->mbx_intr_comp);
2165 }
2166 return IRQ_HANDLED; 2160 return IRQ_HANDLED;
2167} 2161}
2168 2162
@@ -3345,7 +3339,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3345 ha->flags.mbox_busy = 0; 3339 ha->flags.mbox_busy = 0;
3346 ql_log(ql_log_warn, vha, 0x6010, 3340 ql_log(ql_log_warn, vha, 0x6010,
3347 "Doing premature completion of mbx command.\n"); 3341 "Doing premature completion of mbx command.\n");
3348 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) 3342 if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
3349 complete(&ha->mbx_intr_comp); 3343 complete(&ha->mbx_intr_comp);
3350 } 3344 }
3351} 3345}
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index c735c5a008a2..6427600b5bbe 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
59 int ret; 59 int ret;
60 60
61 sg_free_table(sgt); 61 sg_free_table(sgt);
62 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 62 ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
63 if (ret) 63 if (ret)
64 return ret; 64 return ret;
65 } 65 }
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f5d84d6f8222..48b396fced0a 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1076 return NULL; 1076 return NULL;
1077 1077
1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); 1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1079 if (!pdata) { 1079 if (!pdata) {
1080 dev_err(&pdev->dev, 1080 dev_err(&pdev->dev,
1081 "failed to allocate memory for platform data\n"); 1081 "failed to allocate memory for platform data\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5000586cb98d..71cc3e6ef47c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
444 } 444 }
445 445
446 ret = pm_runtime_get_sync(&sdd->pdev->dev); 446 ret = pm_runtime_get_sync(&sdd->pdev->dev);
447 if (ret != 0) { 447 if (ret < 0) {
448 dev_err(dev, "Failed to enable device: %d\n", ret); 448 dev_err(dev, "Failed to enable device: %d\n", ret);
449 goto out_tx; 449 goto out_tx;
450 } 450 }
diff --git a/fs/exec.c b/fs/exec.c
index 643019585574..ffd7a813ad3d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1135,13 +1135,6 @@ void setup_new_exec(struct linux_binprm * bprm)
1135 set_dumpable(current->mm, suid_dumpable); 1135 set_dumpable(current->mm, suid_dumpable);
1136 } 1136 }
1137 1137
1138 /*
1139 * Flush performance counters when crossing a
1140 * security domain:
1141 */
1142 if (!get_dumpable(current->mm))
1143 perf_event_exit_task(current);
1144
1145 /* An exec changes our domain. We are no longer part of the thread 1138 /* An exec changes our domain. We are no longer part of the thread
1146 group */ 1139 group */
1147 1140
@@ -1205,6 +1198,15 @@ void install_exec_creds(struct linux_binprm *bprm)
1205 1198
1206 commit_creds(bprm->cred); 1199 commit_creds(bprm->cred);
1207 bprm->cred = NULL; 1200 bprm->cred = NULL;
1201
1202 /*
1203 * Disable monitoring for regular users
1204 * when executing setuid binaries. Must
1205 * wait until new credentials are committed
1206 * by commit_creds() above
1207 */
1208 if (get_dumpable(current->mm) != SUID_DUMP_USER)
1209 perf_event_exit_task(current);
1208 /* 1210 /*
1209 * cred_guard_mutex must be held at least to this point to prevent 1211 * cred_guard_mutex must be held at least to this point to prevent
1210 * ptrace_attach() from altering our determination of the task's 1212 * ptrace_attach() from altering our determination of the task's
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e570081f9f76..35f281033142 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2470 .mode = mode 2470 .mode = mode
2471 }; 2471 };
2472 int err; 2472 int err;
2473 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2474 (mode & FALLOC_FL_PUNCH_HOLE);
2473 2475
2474 if (fc->no_fallocate) 2476 if (fc->no_fallocate)
2475 return -EOPNOTSUPP; 2477 return -EOPNOTSUPP;
2476 2478
2477 if (mode & FALLOC_FL_PUNCH_HOLE) { 2479 if (lock_inode) {
2478 mutex_lock(&inode->i_mutex); 2480 mutex_lock(&inode->i_mutex);
2479 fuse_set_nowrite(inode); 2481 if (mode & FALLOC_FL_PUNCH_HOLE)
2482 fuse_set_nowrite(inode);
2480 } 2483 }
2481 2484
2482 req = fuse_get_req_nopages(fc); 2485 req = fuse_get_req_nopages(fc);
@@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2511 fuse_invalidate_attr(inode); 2514 fuse_invalidate_attr(inode);
2512 2515
2513out: 2516out:
2514 if (mode & FALLOC_FL_PUNCH_HOLE) { 2517 if (lock_inode) {
2515 fuse_release_nowrite(inode); 2518 if (mode & FALLOC_FL_PUNCH_HOLE)
2519 fuse_release_nowrite(inode);
2516 mutex_unlock(&inode->i_mutex); 2520 mutex_unlock(&inode->i_mutex);
2517 } 2521 }
2518 2522
diff --git a/fs/splice.c b/fs/splice.c
index 9eca476227d5..d37431dd60a1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
1283 * @in: file to splice from 1283 * @in: file to splice from
1284 * @ppos: input file offset 1284 * @ppos: input file offset
1285 * @out: file to splice to 1285 * @out: file to splice to
1286 * @opos: output file offset
1286 * @len: number of bytes to splice 1287 * @len: number of bytes to splice
1287 * @flags: splice modifier flags 1288 * @flags: splice modifier flags
1288 * 1289 *
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index de08c92f2e23..605af512aec2 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -349,31 +349,50 @@ static unsigned int vfs_dent_type(uint8_t type)
349static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) 349static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
350{ 350{
351 int err, over = 0; 351 int err, over = 0;
352 loff_t pos = file->f_pos;
352 struct qstr nm; 353 struct qstr nm;
353 union ubifs_key key; 354 union ubifs_key key;
354 struct ubifs_dent_node *dent; 355 struct ubifs_dent_node *dent;
355 struct inode *dir = file_inode(file); 356 struct inode *dir = file_inode(file);
356 struct ubifs_info *c = dir->i_sb->s_fs_info; 357 struct ubifs_info *c = dir->i_sb->s_fs_info;
357 358
358 dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); 359 dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
359 360
360 if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) 361 if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
361 /* 362 /*
362 * The directory was seek'ed to a senseless position or there 363 * The directory was seek'ed to a senseless position or there
363 * are no more entries. 364 * are no more entries.
364 */ 365 */
365 return 0; 366 return 0;
366 367
368 if (file->f_version == 0) {
369 /*
370 * The file was seek'ed, which means that @file->private_data
371 * is now invalid. This may also be just the first
372 * 'ubifs_readdir()' invocation, in which case
373 * @file->private_data is NULL, and the below code is
374 * basically a no-op.
375 */
376 kfree(file->private_data);
377 file->private_data = NULL;
378 }
379
380 /*
381 * 'generic_file_llseek()' unconditionally sets @file->f_version to
382 * zero, and we use this for detecting whether the file was seek'ed.
383 */
384 file->f_version = 1;
385
367 /* File positions 0 and 1 correspond to "." and ".." */ 386 /* File positions 0 and 1 correspond to "." and ".." */
368 if (file->f_pos == 0) { 387 if (pos == 0) {
369 ubifs_assert(!file->private_data); 388 ubifs_assert(!file->private_data);
370 over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); 389 over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
371 if (over) 390 if (over)
372 return 0; 391 return 0;
373 file->f_pos = 1; 392 file->f_pos = pos = 1;
374 } 393 }
375 394
376 if (file->f_pos == 1) { 395 if (pos == 1) {
377 ubifs_assert(!file->private_data); 396 ubifs_assert(!file->private_data);
378 over = filldir(dirent, "..", 2, 1, 397 over = filldir(dirent, "..", 2, 1,
379 parent_ino(file->f_path.dentry), DT_DIR); 398 parent_ino(file->f_path.dentry), DT_DIR);
@@ -389,7 +408,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
389 goto out; 408 goto out;
390 } 409 }
391 410
392 file->f_pos = key_hash_flash(c, &dent->key); 411 file->f_pos = pos = key_hash_flash(c, &dent->key);
393 file->private_data = dent; 412 file->private_data = dent;
394 } 413 }
395 414
@@ -397,17 +416,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
397 if (!dent) { 416 if (!dent) {
398 /* 417 /*
399 * The directory was seek'ed to and is now readdir'ed. 418 * The directory was seek'ed to and is now readdir'ed.
400 * Find the entry corresponding to @file->f_pos or the 419 * Find the entry corresponding to @pos or the closest one.
401 * closest one.
402 */ 420 */
403 dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); 421 dent_key_init_hash(c, &key, dir->i_ino, pos);
404 nm.name = NULL; 422 nm.name = NULL;
405 dent = ubifs_tnc_next_ent(c, &key, &nm); 423 dent = ubifs_tnc_next_ent(c, &key, &nm);
406 if (IS_ERR(dent)) { 424 if (IS_ERR(dent)) {
407 err = PTR_ERR(dent); 425 err = PTR_ERR(dent);
408 goto out; 426 goto out;
409 } 427 }
410 file->f_pos = key_hash_flash(c, &dent->key); 428 file->f_pos = pos = key_hash_flash(c, &dent->key);
411 file->private_data = dent; 429 file->private_data = dent;
412 } 430 }
413 431
@@ -419,7 +437,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
419 ubifs_inode(dir)->creat_sqnum); 437 ubifs_inode(dir)->creat_sqnum);
420 438
421 nm.len = le16_to_cpu(dent->nlen); 439 nm.len = le16_to_cpu(dent->nlen);
422 over = filldir(dirent, dent->name, nm.len, file->f_pos, 440 over = filldir(dirent, dent->name, nm.len, pos,
423 le64_to_cpu(dent->inum), 441 le64_to_cpu(dent->inum),
424 vfs_dent_type(dent->type)); 442 vfs_dent_type(dent->type));
425 if (over) 443 if (over)
@@ -435,9 +453,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
435 } 453 }
436 454
437 kfree(file->private_data); 455 kfree(file->private_data);
438 file->f_pos = key_hash_flash(c, &dent->key); 456 file->f_pos = pos = key_hash_flash(c, &dent->key);
439 file->private_data = dent; 457 file->private_data = dent;
440 cond_resched(); 458 cond_resched();
459
460 if (file->f_version == 0)
461 /*
462 * The file was seek'ed meanwhile, lets return and start
463 * reading direntries from the new position on the next
464 * invocation.
465 */
466 return 0;
441 } 467 }
442 468
443out: 469out:
@@ -448,15 +474,13 @@ out:
448 474
449 kfree(file->private_data); 475 kfree(file->private_data);
450 file->private_data = NULL; 476 file->private_data = NULL;
477 /* 2 is a special value indicating that there are no more direntries */
451 file->f_pos = 2; 478 file->f_pos = 2;
452 return 0; 479 return 0;
453} 480}
454 481
455/* If a directory is seeked, we have to free saved readdir() state */
456static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) 482static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
457{ 483{
458 kfree(file->private_data);
459 file->private_data = NULL;
460 return generic_file_llseek(file, offset, whence); 484 return generic_file_llseek(file, offset, whence);
461} 485}
462 486
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index e6168a24b9f0..b420939f5eb5 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -123,7 +123,9 @@ extern int register_dock_notifier(struct notifier_block *nb);
123extern void unregister_dock_notifier(struct notifier_block *nb); 123extern void unregister_dock_notifier(struct notifier_block *nb);
124extern int register_hotplug_dock_device(acpi_handle handle, 124extern int register_hotplug_dock_device(acpi_handle handle,
125 const struct acpi_dock_ops *ops, 125 const struct acpi_dock_ops *ops,
126 void *context); 126 void *context,
127 void (*init)(void *),
128 void (*release)(void *));
127extern void unregister_hotplug_dock_device(acpi_handle handle); 129extern void unregister_hotplug_dock_device(acpi_handle handle);
128#else 130#else
129static inline int is_dock_device(acpi_handle handle) 131static inline int is_dock_device(acpi_handle handle)
@@ -139,7 +141,9 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
139} 141}
140static inline int register_hotplug_dock_device(acpi_handle handle, 142static inline int register_hotplug_dock_device(acpi_handle handle,
141 const struct acpi_dock_ops *ops, 143 const struct acpi_dock_ops *ops,
142 void *context) 144 void *context,
145 void (*init)(void *),
146 void (*release)(void *))
143{ 147{
144 return -ENODEV; 148 return -ENODEV;
145} 149}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 52bd03b38962..637fa71de0c7 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -44,7 +44,7 @@ struct vlan_hdr {
44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) 44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
45 * @h_dest: destination ethernet address 45 * @h_dest: destination ethernet address
46 * @h_source: source ethernet address 46 * @h_source: source ethernet address
47 * @h_vlan_proto: ethernet protocol (always 0x8100) 47 * @h_vlan_proto: ethernet protocol
48 * @h_vlan_TCI: priority and VLAN ID 48 * @h_vlan_TCI: priority and VLAN ID
49 * @h_vlan_encapsulated_proto: packet type ID or len 49 * @h_vlan_encapsulated_proto: packet type ID or len
50 */ 50 */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 60584b185a0c..96e4c21e15e0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1695,6 +1695,7 @@ extern int init_dummy_netdev(struct net_device *dev);
1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1698extern int netdev_get_name(struct net *net, char *name, int ifindex);
1698extern int dev_restart(struct net_device *dev); 1699extern int dev_restart(struct net_device *dev);
1699#ifdef CONFIG_NETPOLL_TRAP 1700#ifdef CONFIG_NETPOLL_TRAP
1700extern int netpoll_trap(void); 1701extern int netpoll_trap(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9c676eae3968..dec1748cd002 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -627,6 +627,7 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
627} 627}
628 628
629extern void kfree_skb(struct sk_buff *skb); 629extern void kfree_skb(struct sk_buff *skb);
630extern void kfree_skb_list(struct sk_buff *segs);
630extern void skb_tx_error(struct sk_buff *skb); 631extern void skb_tx_error(struct sk_buff *skb);
631extern void consume_skb(struct sk_buff *skb); 632extern void consume_skb(struct sk_buff *skb);
632extern void __kfree_skb(struct sk_buff *skb); 633extern void __kfree_skb(struct sk_buff *skb);
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ab5d4992e568..bdc6e87ff3eb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -261,6 +261,7 @@ header-y += net_dropmon.h
261header-y += net_tstamp.h 261header-y += net_tstamp.h
262header-y += netconf.h 262header-y += netconf.h
263header-y += netdevice.h 263header-y += netdevice.h
264header-y += netlink_diag.h
264header-y += netfilter.h 265header-y += netfilter.h
265header-y += netfilter_arp.h 266header-y += netfilter_arp.h
266header-y += netfilter_bridge.h 267header-y += netfilter_bridge.h
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index a64f8aeb5c1f..20185ea64aa6 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->hw.bp_target == tsk && 121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type && 122 find_slot_idx(iter) == type &&
123 cpu == iter->cpu) 123 (iter->cpu < 0 || cpu == iter->cpu))
124 count += hw_breakpoint_weight(iter); 124 count += hw_breakpoint_weight(iter);
125 } 125 }
126 126
@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
149 return; 149 return;
150 } 150 }
151 151
152 for_each_online_cpu(cpu) { 152 for_each_possible_cpu(cpu) {
153 unsigned int nr; 153 unsigned int nr;
154 154
155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu); 155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
235 if (cpu >= 0) { 235 if (cpu >= 0) {
236 toggle_bp_task_slot(bp, cpu, enable, type, weight); 236 toggle_bp_task_slot(bp, cpu, enable, type, weight);
237 } else { 237 } else {
238 for_each_online_cpu(cpu) 238 for_each_possible_cpu(cpu)
239 toggle_bp_task_slot(bp, cpu, enable, type, weight); 239 toggle_bp_task_slot(bp, cpu, enable, type, weight);
240 } 240 }
241 241
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index aed981a3f69c..335a7ae697f5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -665,20 +665,22 @@ static int ptrace_peek_siginfo(struct task_struct *child,
665 if (unlikely(is_compat_task())) { 665 if (unlikely(is_compat_task())) {
666 compat_siginfo_t __user *uinfo = compat_ptr(data); 666 compat_siginfo_t __user *uinfo = compat_ptr(data);
667 667
668 ret = copy_siginfo_to_user32(uinfo, &info); 668 if (copy_siginfo_to_user32(uinfo, &info) ||
669 ret |= __put_user(info.si_code, &uinfo->si_code); 669 __put_user(info.si_code, &uinfo->si_code)) {
670 ret = -EFAULT;
671 break;
672 }
673
670 } else 674 } else
671#endif 675#endif
672 { 676 {
673 siginfo_t __user *uinfo = (siginfo_t __user *) data; 677 siginfo_t __user *uinfo = (siginfo_t __user *) data;
674 678
675 ret = copy_siginfo_to_user(uinfo, &info); 679 if (copy_siginfo_to_user(uinfo, &info) ||
676 ret |= __put_user(info.si_code, &uinfo->si_code); 680 __put_user(info.si_code, &uinfo->si_code)) {
677 } 681 ret = -EFAULT;
678 682 break;
679 if (ret) { 683 }
680 ret = -EFAULT;
681 break;
682 } 684 }
683 685
684 data += sizeof(siginfo_t); 686 data += sizeof(siginfo_t);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index b4c245580b79..20d6fba70652 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -599,8 +599,6 @@ void tick_broadcast_oneshot_control(unsigned long reason)
599 } else { 599 } else {
600 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 600 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
601 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 601 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
602 if (dev->next_event.tv64 == KTIME_MAX)
603 goto out;
604 /* 602 /*
605 * The cpu which was handling the broadcast 603 * The cpu which was handling the broadcast
606 * timer marked this cpu in the broadcast 604 * timer marked this cpu in the broadcast
@@ -615,6 +613,11 @@ void tick_broadcast_oneshot_control(unsigned long reason)
615 goto out; 613 goto out;
616 614
617 /* 615 /*
616 * Bail out if there is no next event.
617 */
618 if (dev->next_event.tv64 == KTIME_MAX)
619 goto out;
620 /*
618 * If the pending bit is not set, then we are 621 * If the pending bit is not set, then we are
619 * either the CPU handling the broadcast 622 * either the CPU handling the broadcast
620 * interrupt or we got woken by something else. 623 * interrupt or we got woken by something else.
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d817c932d634..ace5e55fe5a3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -341,7 +341,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
341 341
342static void bredr_setup(struct hci_request *req) 342static void bredr_setup(struct hci_request *req)
343{ 343{
344 struct hci_cp_delete_stored_link_key cp;
345 __le16 param; 344 __le16 param;
346 __u8 flt_type; 345 __u8 flt_type;
347 346
@@ -365,10 +364,6 @@ static void bredr_setup(struct hci_request *req)
365 param = __constant_cpu_to_le16(0x7d00); 364 param = __constant_cpu_to_le16(0x7d00);
366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367 366
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
371
372 /* Read page scan parameters */ 367 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { 368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); 369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
@@ -602,6 +597,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
602 struct hci_dev *hdev = req->hdev; 597 struct hci_dev *hdev = req->hdev;
603 u8 p; 598 u8 p;
604 599
600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
603
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 sizeof(cp), &cp);
608 }
609
605 if (hdev->commands[5] & 0x10) 610 if (hdev->commands[5] & 0x10)
606 hci_setup_link_policy(req); 611 hci_setup_link_policy(req);
607 612
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24bee07ee4ce..68843a28a7af 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2852,6 +2852,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 conn, code, ident, dlen); 2853 conn, code, ident, dlen);
2854 2854
2855 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2856 return NULL;
2857
2855 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2858 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2856 count = min_t(unsigned int, conn->mtu, len); 2859 count = min_t(unsigned int, conn->mtu, len);
2857 2860
@@ -4330,7 +4333,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4330 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4333 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4331 u16 type, result; 4334 u16 type, result;
4332 4335
4333 if (cmd_len != sizeof(*rsp)) 4336 if (cmd_len < sizeof(*rsp))
4334 return -EPROTO; 4337 return -EPROTO;
4335 4338
4336 type = __le16_to_cpu(rsp->type); 4339 type = __le16_to_cpu(rsp->type);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 81f2389f78eb..d6448e35e027 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -465,8 +465,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
465 skb_set_transport_header(skb, skb->len); 465 skb_set_transport_header(skb, skb->len);
466 mldq = (struct mld_msg *) icmp6_hdr(skb); 466 mldq = (struct mld_msg *) icmp6_hdr(skb);
467 467
468 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : 468 interval = ipv6_addr_any(group) ?
469 br->multicast_query_response_interval; 469 br->multicast_query_response_interval :
470 br->multicast_last_member_interval;
470 471
471 mldq->mld_type = ICMPV6_MGM_QUERY; 472 mldq->mld_type = ICMPV6_MGM_QUERY;
472 mldq->mld_code = 0; 473 mldq->mld_code = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index fc1e289397f5..faebb398fb46 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -792,6 +792,40 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
792EXPORT_SYMBOL(dev_get_by_index); 792EXPORT_SYMBOL(dev_get_by_index);
793 793
794/** 794/**
795 * netdev_get_name - get a netdevice name, knowing its ifindex.
796 * @net: network namespace
797 * @name: a pointer to the buffer where the name will be stored.
798 * @ifindex: the ifindex of the interface to get the name from.
799 *
800 * The use of raw_seqcount_begin() and cond_resched() before
801 * retrying is required as we want to give the writers a chance
802 * to complete when CONFIG_PREEMPT is not set.
803 */
804int netdev_get_name(struct net *net, char *name, int ifindex)
805{
806 struct net_device *dev;
807 unsigned int seq;
808
809retry:
810 seq = raw_seqcount_begin(&devnet_rename_seq);
811 rcu_read_lock();
812 dev = dev_get_by_index_rcu(net, ifindex);
813 if (!dev) {
814 rcu_read_unlock();
815 return -ENODEV;
816 }
817
818 strcpy(name, dev->name);
819 rcu_read_unlock();
820 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
821 cond_resched();
822 goto retry;
823 }
824
825 return 0;
826}
827
828/**
795 * dev_getbyhwaddr_rcu - find a device by its hardware address 829 * dev_getbyhwaddr_rcu - find a device by its hardware address
796 * @net: the applicable net namespace 830 * @net: the applicable net namespace
797 * @type: media type of device 831 * @type: media type of device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 6cc0481faade..5b7d0e1d0664 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -19,9 +19,8 @@
19 19
20static int dev_ifname(struct net *net, struct ifreq __user *arg) 20static int dev_ifname(struct net *net, struct ifreq __user *arg)
21{ 21{
22 struct net_device *dev;
23 struct ifreq ifr; 22 struct ifreq ifr;
24 unsigned seq; 23 int error;
25 24
26 /* 25 /*
27 * Fetch the caller's info block. 26 * Fetch the caller's info block.
@@ -30,19 +29,9 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
30 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
31 return -EFAULT; 30 return -EFAULT;
32 31
33retry: 32 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
34 seq = read_seqcount_begin(&devnet_rename_seq); 33 if (error)
35 rcu_read_lock(); 34 return error;
36 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
37 if (!dev) {
38 rcu_read_unlock();
39 return -ENODEV;
40 }
41
42 strcpy(ifr.ifr_name, dev->name);
43 rcu_read_unlock();
44 if (read_seqcount_retry(&devnet_rename_seq, seq))
45 goto retry;
46 35
47 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 36 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
48 return -EFAULT; 37 return -EFAULT;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 22efdaa76ebf..ce91766eeca9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
60 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", 60 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
61 [NETIF_F_HIGHDMA_BIT] = "highdma", 61 [NETIF_F_HIGHDMA_BIT] = "highdma",
62 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", 62 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
63 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", 63 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
64 64
65 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", 65 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse",
66 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", 66 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
67 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", 67 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert",
68 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", 68 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse",
69 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", 69 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cfd777bd6bd0..1c1738cc4538 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -483,15 +483,8 @@ EXPORT_SYMBOL(skb_add_rx_frag);
483 483
484static void skb_drop_list(struct sk_buff **listp) 484static void skb_drop_list(struct sk_buff **listp)
485{ 485{
486 struct sk_buff *list = *listp; 486 kfree_skb_list(*listp);
487
488 *listp = NULL; 487 *listp = NULL;
489
490 do {
491 struct sk_buff *this = list;
492 list = list->next;
493 kfree_skb(this);
494 } while (list);
495} 488}
496 489
497static inline void skb_drop_fraglist(struct sk_buff *skb) 490static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -651,6 +644,17 @@ void kfree_skb(struct sk_buff *skb)
651} 644}
652EXPORT_SYMBOL(kfree_skb); 645EXPORT_SYMBOL(kfree_skb);
653 646
647void kfree_skb_list(struct sk_buff *segs)
648{
649 while (segs) {
650 struct sk_buff *next = segs->next;
651
652 kfree_skb(segs);
653 segs = next;
654 }
655}
656EXPORT_SYMBOL(kfree_skb_list);
657
654/** 658/**
655 * skb_tx_error - report an sk_buff xmit error 659 * skb_tx_error - report an sk_buff xmit error
656 * @skb: buffer that triggered an error 660 * @skb: buffer that triggered an error
diff --git a/net/core/sock.c b/net/core/sock.c
index 88868a9d21da..d6d024cfaaaf 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -571,9 +571,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
571 int ret = -ENOPROTOOPT; 571 int ret = -ENOPROTOOPT;
572#ifdef CONFIG_NETDEVICES 572#ifdef CONFIG_NETDEVICES
573 struct net *net = sock_net(sk); 573 struct net *net = sock_net(sk);
574 struct net_device *dev;
575 char devname[IFNAMSIZ]; 574 char devname[IFNAMSIZ];
576 unsigned seq;
577 575
578 if (sk->sk_bound_dev_if == 0) { 576 if (sk->sk_bound_dev_if == 0) {
579 len = 0; 577 len = 0;
@@ -584,20 +582,9 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
584 if (len < IFNAMSIZ) 582 if (len < IFNAMSIZ)
585 goto out; 583 goto out;
586 584
587retry: 585 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
588 seq = read_seqcount_begin(&devnet_rename_seq); 586 if (ret)
589 rcu_read_lock();
590 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
591 ret = -ENODEV;
592 if (!dev) {
593 rcu_read_unlock();
594 goto out; 587 goto out;
595 }
596
597 strcpy(devname, dev->name);
598 rcu_read_unlock();
599 if (read_seqcount_retry(&devnet_rename_seq, seq))
600 goto retry;
601 588
602 len = strlen(devname) + 1; 589 len = strlen(devname) + 1;
603 590
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index b2e805af9b87..7856d1651d05 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -178,7 +178,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
178 178
179 err = __skb_linearize(skb); 179 err = __skb_linearize(skb);
180 if (err) { 180 if (err) {
181 kfree_skb(segs); 181 kfree_skb_list(segs);
182 segs = ERR_PTR(err); 182 segs = ERR_PTR(err);
183 goto out; 183 goto out;
184 } 184 }
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ff4b781b1056..32b0e978c8e0 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -125,15 +125,16 @@ static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
125/* timer function to flush queue in flushtimeout time */ 125/* timer function to flush queue in flushtimeout time */
126static void ulog_timer(unsigned long data) 126static void ulog_timer(unsigned long data)
127{ 127{
128 unsigned int groupnum = *((unsigned int *)data);
128 struct ulog_net *ulog = container_of((void *)data, 129 struct ulog_net *ulog = container_of((void *)data,
129 struct ulog_net, 130 struct ulog_net,
130 nlgroup[*(unsigned int *)data]); 131 nlgroup[groupnum]);
131 pr_debug("timer function called, calling ulog_send\n"); 132 pr_debug("timer function called, calling ulog_send\n");
132 133
133 /* lock to protect against somebody modifying our structure 134 /* lock to protect against somebody modifying our structure
134 * from ipt_ulog_target at the same time */ 135 * from ipt_ulog_target at the same time */
135 spin_lock_bh(&ulog->lock); 136 spin_lock_bh(&ulog->lock);
136 ulog_send(ulog, data); 137 ulog_send(ulog, groupnum);
137 spin_unlock_bh(&ulog->lock); 138 spin_unlock_bh(&ulog->lock);
138} 139}
139 140
@@ -407,8 +408,11 @@ static int __net_init ulog_tg_net_init(struct net *net)
407 408
408 spin_lock_init(&ulog->lock); 409 spin_lock_init(&ulog->lock);
409 /* initialize ulog_buffers */ 410 /* initialize ulog_buffers */
410 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 411 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
411 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i); 412 ulog->nlgroup[i] = i;
413 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
414 (unsigned long)&ulog->nlgroup[i]);
415 }
412 416
413 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg); 417 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
414 if (!ulog->nflognl) 418 if (!ulog->nflognl)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 719652305a29..7999fc55c83b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1003 struct tcp_sock *tp = tcp_sk(sk); 1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_info *md5sig; 1004 struct tcp_md5sig_info *md5sig;
1005 1005
1006 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1006 key = tcp_md5_do_lookup(sk, addr, family);
1007 if (key) { 1007 if (key) {
1008 /* Pre-existing entry - just update that one. */ 1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key->key, newkey, newkeylen); 1009 memcpy(key->key, newkey, newkeylen);
@@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1048 struct tcp_md5sig_key *key; 1048 struct tcp_md5sig_key *key;
1049 struct tcp_md5sig_info *md5sig; 1049 struct tcp_md5sig_info *md5sig;
1050 1050
1051 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1051 key = tcp_md5_do_lookup(sk, addr, family);
1052 if (!key) 1052 if (!key)
1053 return -ENOENT; 1053 return -ENOENT;
1054 hlist_del_rcu(&key->node); 1054 hlist_del_rcu(&key->node);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 1bbf744c2cc3..4ab4c38958c6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2655,6 +2655,9 @@ static void init_loopback(struct net_device *dev)
2655 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) 2655 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
2656 continue; 2656 continue;
2657 2657
2658 if (sp_ifa->rt)
2659 continue;
2660
2658 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); 2661 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
2659 2662
2660 /* Failure cases are ignored */ 2663 /* Failure cases are ignored */
@@ -4303,6 +4306,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4303 struct inet6_ifaddr *ifp; 4306 struct inet6_ifaddr *ifp;
4304 struct net_device *dev = idev->dev; 4307 struct net_device *dev = idev->dev;
4305 bool update_rs = false; 4308 bool update_rs = false;
4309 struct in6_addr ll_addr;
4306 4310
4307 if (token == NULL) 4311 if (token == NULL)
4308 return -EINVAL; 4312 return -EINVAL;
@@ -4322,11 +4326,9 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4322 4326
4323 write_unlock_bh(&idev->lock); 4327 write_unlock_bh(&idev->lock);
4324 4328
4325 if (!idev->dead && (idev->if_flags & IF_READY)) { 4329 if (!idev->dead && (idev->if_flags & IF_READY) &&
4326 struct in6_addr ll_addr; 4330 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
4327 4331 IFA_F_OPTIMISTIC)) {
4328 ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
4329 IFA_F_OPTIMISTIC);
4330 4332
4331 /* If we're not ready, then normal ifup will take care 4333 /* If we're not ready, then normal ifup will take care
4332 * of this. Otherwise, we need to request our rs here. 4334 * of this. Otherwise, we need to request our rs here.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dae1949019d7..d5d20cde8d92 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -381,9 +381,8 @@ int ip6_forward(struct sk_buff *skb)
381 * cannot be fragmented, because there is no warranty 381 * cannot be fragmented, because there is no warranty
382 * that different fragments will go along one path. --ANK 382 * that different fragments will go along one path. --ANK
383 */ 383 */
384 if (opt->ra) { 384 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
385 u8 *ptr = skb_network_header(skb) + opt->ra; 385 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
386 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
387 return 0; 386 return 0;
388 } 387 }
389 388
@@ -822,11 +821,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
822 const struct flowi6 *fl6) 821 const struct flowi6 *fl6)
823{ 822{
824 struct ipv6_pinfo *np = inet6_sk(sk); 823 struct ipv6_pinfo *np = inet6_sk(sk);
825 struct rt6_info *rt = (struct rt6_info *)dst; 824 struct rt6_info *rt;
826 825
827 if (!dst) 826 if (!dst)
828 goto out; 827 goto out;
829 828
829 if (dst->ops->family != AF_INET6) {
830 dst_release(dst);
831 return NULL;
832 }
833
834 rt = (struct rt6_info *)dst;
830 /* Yes, checking route validity in not connected 835 /* Yes, checking route validity in not connected
831 * case is not very simple. Take into account, 836 * case is not very simple. Take into account,
832 * that we do not support routing by source, TOS, 837 * that we do not support routing by source, TOS,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2712ab22a174..ca4ffcc287f1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1493,7 +1493,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1493 */ 1493 */
1494 1494
1495 if (ha) 1495 if (ha)
1496 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha); 1496 ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
1497 1497
1498 /* 1498 /*
1499 * build redirect option and copy skb over to the new packet. 1499 * build redirect option and copy skb over to the new packet.
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 97bcf2bae857..c9b6a6e6a1e8 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -204,7 +204,7 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
204 if (ct != NULL && !nf_ct_is_untracked(ct)) { 204 if (ct != NULL && !nf_ct_is_untracked(ct)) {
205 help = nfct_help(ct); 205 help = nfct_help(ct);
206 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) { 206 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
207 nf_conntrack_get_reasm(skb); 207 nf_conntrack_get_reasm(reasm);
208 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm, 208 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
209 (struct net_device *)in, 209 (struct net_device *)in,
210 (struct net_device *)out, 210 (struct net_device *)out,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c5fbd7589681..9da862070dd8 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1710,6 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c)
1710 hdr->sadb_msg_version = PF_KEY_V2; 1710 hdr->sadb_msg_version = PF_KEY_V2;
1711 hdr->sadb_msg_errno = (uint8_t) 0; 1711 hdr->sadb_msg_errno = (uint8_t) 0;
1712 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 1712 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
1713 hdr->sadb_msg_reserved = 0;
1713 1714
1714 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 1715 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
1715 1716
@@ -2699,6 +2700,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2699 hdr->sadb_msg_errno = (uint8_t) 0; 2700 hdr->sadb_msg_errno = (uint8_t) 0;
2700 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2701 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2701 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2702 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2703 hdr->sadb_msg_reserved = 0;
2702 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 2704 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2703 return 0; 2705 return 0;
2704 2706
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 1a89c80e6407..4fdb306e42e0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1057,6 +1057,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1057 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1057 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1058 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 1058 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
1059 1059
1060 if (sdata->wdev.cac_started) {
1061 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
1062 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
1063 GFP_KERNEL);
1064 }
1065
1060 drv_stop_ap(sdata->local, sdata); 1066 drv_stop_ap(sdata->local, sdata);
1061 1067
1062 /* free all potentially still buffered bcast frames */ 1068 /* free all potentially still buffered bcast frames */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 44be28cfc6c4..9ca8e3278cc0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1497,10 +1497,11 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
1497 ieee80211_tx_skb_tid(sdata, skb, 7); 1497 ieee80211_tx_skb_tid(sdata, skb, 7);
1498} 1498}
1499 1499
1500u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, 1500u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
1501 struct ieee802_11_elems *elems, 1501 struct ieee802_11_elems *elems,
1502 u64 filter, u32 crc); 1502 u64 filter, u32 crc);
1503static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, 1503static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
1504 bool action,
1504 struct ieee802_11_elems *elems) 1505 struct ieee802_11_elems *elems)
1505{ 1506{
1506 ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); 1507 ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a8c2130c8ba4..741448b30825 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2522,8 +2522,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2522 u16 capab_info, aid; 2522 u16 capab_info, aid;
2523 struct ieee802_11_elems elems; 2523 struct ieee802_11_elems elems;
2524 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 2524 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
2525 const struct cfg80211_bss_ies *bss_ies = NULL;
2526 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
2525 u32 changed = 0; 2527 u32 changed = 0;
2526 int err; 2528 int err;
2529 bool ret;
2527 2530
2528 /* AssocResp and ReassocResp have identical structure */ 2531 /* AssocResp and ReassocResp have identical structure */
2529 2532
@@ -2555,21 +2558,86 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2555 ifmgd->aid = aid; 2558 ifmgd->aid = aid;
2556 2559
2557 /* 2560 /*
2561 * Some APs are erroneously not including some information in their
2562 * (re)association response frames. Try to recover by using the data
2563 * from the beacon or probe response. This seems to afflict mobile
2564 * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
2565 * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
2566 */
2567 if ((assoc_data->wmm && !elems.wmm_param) ||
2568 (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2569 (!elems.ht_cap_elem || !elems.ht_operation)) ||
2570 (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2571 (!elems.vht_cap_elem || !elems.vht_operation))) {
2572 const struct cfg80211_bss_ies *ies;
2573 struct ieee802_11_elems bss_elems;
2574
2575 rcu_read_lock();
2576 ies = rcu_dereference(cbss->ies);
2577 if (ies)
2578 bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
2579 GFP_ATOMIC);
2580 rcu_read_unlock();
2581 if (!bss_ies)
2582 return false;
2583
2584 ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
2585 false, &bss_elems);
2586 if (assoc_data->wmm &&
2587 !elems.wmm_param && bss_elems.wmm_param) {
2588 elems.wmm_param = bss_elems.wmm_param;
2589 sdata_info(sdata,
2590 "AP bug: WMM param missing from AssocResp\n");
2591 }
2592
2593 /*
2594 * Also check if we requested HT/VHT, otherwise the AP doesn't
2595 * have to include the IEs in the (re)association response.
2596 */
2597 if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
2598 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
2599 elems.ht_cap_elem = bss_elems.ht_cap_elem;
2600 sdata_info(sdata,
2601 "AP bug: HT capability missing from AssocResp\n");
2602 }
2603 if (!elems.ht_operation && bss_elems.ht_operation &&
2604 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
2605 elems.ht_operation = bss_elems.ht_operation;
2606 sdata_info(sdata,
2607 "AP bug: HT operation missing from AssocResp\n");
2608 }
2609 if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
2610 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
2611 elems.vht_cap_elem = bss_elems.vht_cap_elem;
2612 sdata_info(sdata,
2613 "AP bug: VHT capa missing from AssocResp\n");
2614 }
2615 if (!elems.vht_operation && bss_elems.vht_operation &&
2616 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
2617 elems.vht_operation = bss_elems.vht_operation;
2618 sdata_info(sdata,
2619 "AP bug: VHT operation missing from AssocResp\n");
2620 }
2621 }
2622
2623 /*
2558 * We previously checked these in the beacon/probe response, so 2624 * We previously checked these in the beacon/probe response, so
2559 * they should be present here. This is just a safety net. 2625 * they should be present here. This is just a safety net.
2560 */ 2626 */
2561 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && 2627 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2562 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { 2628 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
2563 sdata_info(sdata, 2629 sdata_info(sdata,
2564 "HT AP is missing WMM params or HT capability/operation in AssocResp\n"); 2630 "HT AP is missing WMM params or HT capability/operation\n");
2565 return false; 2631 ret = false;
2632 goto out;
2566 } 2633 }
2567 2634
2568 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && 2635 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2569 (!elems.vht_cap_elem || !elems.vht_operation)) { 2636 (!elems.vht_cap_elem || !elems.vht_operation)) {
2570 sdata_info(sdata, 2637 sdata_info(sdata,
2571 "VHT AP is missing VHT capability/operation in AssocResp\n"); 2638 "VHT AP is missing VHT capability/operation\n");
2572 return false; 2639 ret = false;
2640 goto out;
2573 } 2641 }
2574 2642
2575 mutex_lock(&sdata->local->sta_mtx); 2643 mutex_lock(&sdata->local->sta_mtx);
@@ -2580,7 +2648,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2580 sta = sta_info_get(sdata, cbss->bssid); 2648 sta = sta_info_get(sdata, cbss->bssid);
2581 if (WARN_ON(!sta)) { 2649 if (WARN_ON(!sta)) {
2582 mutex_unlock(&sdata->local->sta_mtx); 2650 mutex_unlock(&sdata->local->sta_mtx);
2583 return false; 2651 ret = false;
2652 goto out;
2584 } 2653 }
2585 2654
2586 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; 2655 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
@@ -2633,7 +2702,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2633 sta->sta.addr); 2702 sta->sta.addr);
2634 WARN_ON(__sta_info_destroy(sta)); 2703 WARN_ON(__sta_info_destroy(sta));
2635 mutex_unlock(&sdata->local->sta_mtx); 2704 mutex_unlock(&sdata->local->sta_mtx);
2636 return false; 2705 ret = false;
2706 goto out;
2637 } 2707 }
2638 2708
2639 mutex_unlock(&sdata->local->sta_mtx); 2709 mutex_unlock(&sdata->local->sta_mtx);
@@ -2673,7 +2743,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2673 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 2743 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
2674 ieee80211_sta_reset_beacon_monitor(sdata); 2744 ieee80211_sta_reset_beacon_monitor(sdata);
2675 2745
2676 return true; 2746 ret = true;
2747 out:
2748 kfree(bss_ies);
2749 return ret;
2677} 2750}
2678 2751
2679static enum rx_mgmt_action __must_check 2752static enum rx_mgmt_action __must_check
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d3f414fe67e0..a02bef35b134 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -615,7 +615,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
615 if (rates[i].idx < 0) 615 if (rates[i].idx < 0)
616 break; 616 break;
617 617
618 rate_idx_match_mask(&rates[i], sband, mask, chan_width, 618 rate_idx_match_mask(&rates[i], sband, chan_width, mask,
619 mcs_mask); 619 mcs_mask);
620 } 620 }
621} 621}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 27e07150eb46..72e6292955bb 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -661,12 +661,12 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
661} 661}
662EXPORT_SYMBOL(ieee80211_queue_delayed_work); 662EXPORT_SYMBOL(ieee80211_queue_delayed_work);
663 663
664u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, 664u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
665 struct ieee802_11_elems *elems, 665 struct ieee802_11_elems *elems,
666 u64 filter, u32 crc) 666 u64 filter, u32 crc)
667{ 667{
668 size_t left = len; 668 size_t left = len;
669 u8 *pos = start; 669 const u8 *pos = start;
670 bool calc_crc = filter != 0; 670 bool calc_crc = filter != 0;
671 DECLARE_BITMAP(seen_elems, 256); 671 DECLARE_BITMAP(seen_elems, 256);
672 const u8 *ie; 672 const u8 *ie;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 05565d2b3a61..23b8eb53a569 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1442,7 +1442,8 @@ ignore_ipip:
1442 1442
1443 /* do the statistics and put it back */ 1443 /* do the statistics and put it back */
1444 ip_vs_in_stats(cp, skb); 1444 ip_vs_in_stats(cp, skb);
1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1446 IPPROTO_SCTP == cih->protocol)
1446 offset += 2 * sizeof(__u16); 1447 offset += 2 * sizeof(__u16);
1447 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); 1448 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1448 1449
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 8fe2e99428b7..355d2ef08094 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -45,7 +45,7 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
45 if (test_bit(bit, labels->bits)) 45 if (test_bit(bit, labels->bits))
46 return 0; 46 return 0;
47 47
48 if (test_and_set_bit(bit, labels->bits)) 48 if (!test_and_set_bit(bit, labels->bits))
49 nf_conntrack_event_cache(IPCT_LABEL, ct); 49 nf_conntrack_event_cache(IPCT_LABEL, ct);
50 50
51 return 0; 51 return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6d0f8a17c5b7..ecf065f94032 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1825,6 +1825,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1826 (1 << IPCT_ASSURED) | 1826 (1 << IPCT_ASSURED) |
1827 (1 << IPCT_HELPER) | 1827 (1 << IPCT_HELPER) |
1828 (1 << IPCT_LABEL) |
1828 (1 << IPCT_PROTOINFO) | 1829 (1 << IPCT_PROTOINFO) |
1829 (1 << IPCT_NATSEQADJ) | 1830 (1 << IPCT_NATSEQADJ) |
1830 (1 << IPCT_MARK), 1831 (1 << IPCT_MARK),
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 96ccdf78a29f..dac11f73868e 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -230,9 +230,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
230 &ct->tuplehash[!dir].tuple.src.u3, 230 &ct->tuplehash[!dir].tuple.src.u3,
231 false); 231 false);
232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
233 poff, plen, buffer, buflen)) 233 poff, plen, buffer, buflen)) {
234 nf_ct_helper_log(skb, ct, "cannot mangle received"); 234 nf_ct_helper_log(skb, ct, "cannot mangle received");
235 return NF_DROP; 235 return NF_DROP;
236 }
236 } 237 }
237 238
238 /* The rport= parameter (RFC 3581) contains the port number 239 /* The rport= parameter (RFC 3581) contains the port number
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index afaebc766933..7011c71646f0 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -45,17 +45,22 @@ optlen(const u_int8_t *opt, unsigned int offset)
45 45
46static int 46static int
47tcpmss_mangle_packet(struct sk_buff *skb, 47tcpmss_mangle_packet(struct sk_buff *skb,
48 const struct xt_tcpmss_info *info, 48 const struct xt_action_param *par,
49 unsigned int in_mtu, 49 unsigned int in_mtu,
50 unsigned int tcphoff, 50 unsigned int tcphoff,
51 unsigned int minlen) 51 unsigned int minlen)
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo;
53 struct tcphdr *tcph; 54 struct tcphdr *tcph;
54 unsigned int tcplen, i; 55 unsigned int tcplen, i;
55 __be16 oldval; 56 __be16 oldval;
56 u16 newmss; 57 u16 newmss;
57 u8 *opt; 58 u8 *opt;
58 59
60 /* This is a fragment, no TCP header is available */
61 if (par->fragoff != 0)
62 return XT_CONTINUE;
63
59 if (!skb_make_writable(skb, skb->len)) 64 if (!skb_make_writable(skb, skb->len))
60 return -1; 65 return -1;
61 66
@@ -125,11 +130,17 @@ tcpmss_mangle_packet(struct sk_buff *skb,
125 130
126 skb_put(skb, TCPOLEN_MSS); 131 skb_put(skb, TCPOLEN_MSS);
127 132
128 /* RFC 879 states that the default MSS is 536 without specific 133 /*
129 * knowledge that the destination host is prepared to accept larger. 134 * IPv4: RFC 1122 states "If an MSS option is not received at
130 * Since no MSS was provided, we MUST NOT set a value > 536. 135 * connection setup, TCP MUST assume a default send MSS of 536".
136 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
137 * length IPv6 header of 60, ergo the default MSS value is 1220
138 * Since no MSS was provided, we must use the default values
131 */ 139 */
132 newmss = min(newmss, (u16)536); 140 if (par->family == NFPROTO_IPV4)
141 newmss = min(newmss, (u16)536);
142 else
143 newmss = min(newmss, (u16)1220);
133 144
134 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
135 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
@@ -188,7 +199,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
188 __be16 newlen; 199 __be16 newlen;
189 int ret; 200 int ret;
190 201
191 ret = tcpmss_mangle_packet(skb, par->targinfo, 202 ret = tcpmss_mangle_packet(skb, par,
192 tcpmss_reverse_mtu(skb, PF_INET), 203 tcpmss_reverse_mtu(skb, PF_INET),
193 iph->ihl * 4, 204 iph->ihl * 4,
194 sizeof(*iph) + sizeof(struct tcphdr)); 205 sizeof(*iph) + sizeof(struct tcphdr));
@@ -217,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
217 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); 228 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
218 if (tcphoff < 0) 229 if (tcphoff < 0)
219 return NF_DROP; 230 return NF_DROP;
220 ret = tcpmss_mangle_packet(skb, par->targinfo, 231 ret = tcpmss_mangle_packet(skb, par,
221 tcpmss_reverse_mtu(skb, PF_INET6), 232 tcpmss_reverse_mtu(skb, PF_INET6),
222 tcphoff, 233 tcphoff,
223 sizeof(*ipv6h) + sizeof(struct tcphdr)); 234 sizeof(*ipv6h) + sizeof(struct tcphdr));
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 1eb1a44bfd3d..b68fa191710f 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -48,11 +48,13 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
48 return NF_DROP; 48 return NF_DROP;
49 49
50 len = skb->len - tcphoff; 50 len = skb->len - tcphoff;
51 if (len < (int)sizeof(struct tcphdr) || 51 if (len < (int)sizeof(struct tcphdr))
52 tcp_hdr(skb)->doff * 4 > len)
53 return NF_DROP; 52 return NF_DROP;
54 53
55 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len)
56 return NF_DROP;
57
56 opt = (u_int8_t *)tcph; 58 opt = (u_int8_t *)tcph;
57 59
58 /* 60 /*
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d5aed3bb3945..b14b7e3cb6e6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1564,12 +1564,17 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1564 struct cfg80211_registered_device *dev; 1564 struct cfg80211_registered_device *dev;
1565 s64 filter_wiphy = -1; 1565 s64 filter_wiphy = -1;
1566 bool split = false; 1566 bool split = false;
1567 struct nlattr **tb = nl80211_fam.attrbuf; 1567 struct nlattr **tb;
1568 int res; 1568 int res;
1569 1569
1570 /* will be zeroed in nlmsg_parse() */
1571 tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
1572 if (!tb)
1573 return -ENOMEM;
1574
1570 mutex_lock(&cfg80211_mutex); 1575 mutex_lock(&cfg80211_mutex);
1571 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 1576 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
1572 tb, nl80211_fam.maxattr, nl80211_policy); 1577 tb, NL80211_ATTR_MAX, nl80211_policy);
1573 if (res == 0) { 1578 if (res == 0) {
1574 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; 1579 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
1575 if (tb[NL80211_ATTR_WIPHY]) 1580 if (tb[NL80211_ATTR_WIPHY])
@@ -1583,6 +1588,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1583 netdev = dev_get_by_index(sock_net(skb->sk), ifidx); 1588 netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
1584 if (!netdev) { 1589 if (!netdev) {
1585 mutex_unlock(&cfg80211_mutex); 1590 mutex_unlock(&cfg80211_mutex);
1591 kfree(tb);
1586 return -ENODEV; 1592 return -ENODEV;
1587 } 1593 }
1588 if (netdev->ieee80211_ptr) { 1594 if (netdev->ieee80211_ptr) {
@@ -1593,6 +1599,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1593 dev_put(netdev); 1599 dev_put(netdev);
1594 } 1600 }
1595 } 1601 }
1602 kfree(tb);
1596 1603
1597 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1604 list_for_each_entry(dev, &cfg80211_rdev_list, list) {
1598 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1605 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b1dc7d426438..e2de9ecfd641 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3377,7 +3377,7 @@ static int wm8962_probe(struct snd_soc_codec *codec)
3377{ 3377{
3378 int ret; 3378 int ret;
3379 struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec); 3379 struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
3380 struct wm8962_pdata *pdata = dev_get_platdata(codec->dev); 3380 struct wm8962_pdata *pdata = &wm8962->pdata;
3381 int i, trigger, irq_pol; 3381 int i, trigger, irq_pol;
3382 bool dmicclk, dmicdat; 3382 bool dmicclk, dmicdat;
3383 3383
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 7a8bc1220b2e..3f726e4f88db 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -113,13 +113,13 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
113 ssi_pdev = of_find_device_by_node(ssi_np); 113 ssi_pdev = of_find_device_by_node(ssi_np);
114 if (!ssi_pdev) { 114 if (!ssi_pdev) {
115 dev_err(&pdev->dev, "failed to find SSI platform device\n"); 115 dev_err(&pdev->dev, "failed to find SSI platform device\n");
116 ret = -EINVAL; 116 ret = -EPROBE_DEFER;
117 goto fail; 117 goto fail;
118 } 118 }
119 codec_dev = of_find_i2c_device_by_node(codec_np); 119 codec_dev = of_find_i2c_device_by_node(codec_np);
120 if (!codec_dev) { 120 if (!codec_dev) {
121 dev_err(&pdev->dev, "failed to find codec platform device\n"); 121 dev_err(&pdev->dev, "failed to find codec platform device\n");
122 return -EINVAL; 122 return -EPROBE_DEFER;
123 } 123 }
124 124
125 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 125 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 49d870034bc3..54511c5e6a7c 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/clk-provider.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/time.h> 29#include <linux/time.h>
29#include <sound/core.h> 30#include <sound/core.h>
@@ -658,6 +659,33 @@ static irqreturn_t mxs_saif_irq(int irq, void *dev_id)
658 return IRQ_HANDLED; 659 return IRQ_HANDLED;
659} 660}
660 661
662static int mxs_saif_mclk_init(struct platform_device *pdev)
663{
664 struct mxs_saif *saif = platform_get_drvdata(pdev);
665 struct device_node *np = pdev->dev.of_node;
666 struct clk *clk;
667 int ret;
668
669 clk = clk_register_divider(&pdev->dev, "mxs_saif_mclk",
670 __clk_get_name(saif->clk), 0,
671 saif->base + SAIF_CTRL,
672 BP_SAIF_CTRL_BITCLK_MULT_RATE, 3,
673 0, NULL);
674 if (IS_ERR(clk)) {
675 ret = PTR_ERR(clk);
676 if (ret == -EEXIST)
677 return 0;
678 dev_err(&pdev->dev, "failed to register mclk: %d\n", ret);
679 return PTR_ERR(clk);
680 }
681
682 ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
683 if (ret)
684 return ret;
685
686 return 0;
687}
688
661static int mxs_saif_probe(struct platform_device *pdev) 689static int mxs_saif_probe(struct platform_device *pdev)
662{ 690{
663 struct device_node *np = pdev->dev.of_node; 691 struct device_node *np = pdev->dev.of_node;
@@ -734,6 +762,13 @@ static int mxs_saif_probe(struct platform_device *pdev)
734 762
735 platform_set_drvdata(pdev, saif); 763 platform_set_drvdata(pdev, saif);
736 764
765 /* We only support saif0 being tx and clock master */
766 if (saif->id == 0) {
767 ret = mxs_saif_mclk_init(pdev);
768 if (ret)
769 dev_warn(&pdev->dev, "failed to init clocks\n");
770 }
771
737 ret = snd_soc_register_component(&pdev->dev, &mxs_saif_component, 772 ret = snd_soc_register_component(&pdev->dev, &mxs_saif_component,
738 &mxs_saif_dai, 1); 773 &mxs_saif_dai, 1);
739 if (ret) { 774 if (ret) {
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 82ebb1a51479..7a1734697434 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1016,52 +1016,6 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
1016 return i2s; 1016 return i2s;
1017} 1017}
1018 1018
1019#ifdef CONFIG_OF
1020static int samsung_i2s_parse_dt_gpio(struct i2s_dai *i2s)
1021{
1022 struct device *dev = &i2s->pdev->dev;
1023 int index, gpio, ret;
1024
1025 for (index = 0; index < 7; index++) {
1026 gpio = of_get_gpio(dev->of_node, index);
1027 if (!gpio_is_valid(gpio)) {
1028 dev_err(dev, "invalid gpio[%d]: %d\n", index, gpio);
1029 goto free_gpio;
1030 }
1031
1032 ret = gpio_request(gpio, dev_name(dev));
1033 if (ret) {
1034 dev_err(dev, "gpio [%d] request failed\n", gpio);
1035 goto free_gpio;
1036 }
1037 i2s->gpios[index] = gpio;
1038 }
1039 return 0;
1040
1041free_gpio:
1042 while (--index >= 0)
1043 gpio_free(i2s->gpios[index]);
1044 return -EINVAL;
1045}
1046
1047static void samsung_i2s_dt_gpio_free(struct i2s_dai *i2s)
1048{
1049 unsigned int index;
1050 for (index = 0; index < 7; index++)
1051 gpio_free(i2s->gpios[index]);
1052}
1053#else
1054static int samsung_i2s_parse_dt_gpio(struct i2s_dai *dai)
1055{
1056 return -EINVAL;
1057}
1058
1059static void samsung_i2s_dt_gpio_free(struct i2s_dai *dai)
1060{
1061}
1062
1063#endif
1064
1065static const struct of_device_id exynos_i2s_match[]; 1019static const struct of_device_id exynos_i2s_match[];
1066 1020
1067static inline int samsung_i2s_get_driver_data(struct platform_device *pdev) 1021static inline int samsung_i2s_get_driver_data(struct platform_device *pdev)
@@ -1235,18 +1189,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1235 pri_dai->sec_dai = sec_dai; 1189 pri_dai->sec_dai = sec_dai;
1236 } 1190 }
1237 1191
1238 if (np) { 1192 if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
1239 if (samsung_i2s_parse_dt_gpio(pri_dai)) { 1193 dev_err(&pdev->dev, "Unable to configure gpio\n");
1240 dev_err(&pdev->dev, "Unable to configure gpio\n"); 1194 ret = -EINVAL;
1241 ret = -EINVAL; 1195 goto err;
1242 goto err;
1243 }
1244 } else {
1245 if (i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
1246 dev_err(&pdev->dev, "Unable to configure gpio\n");
1247 ret = -EINVAL;
1248 goto err;
1249 }
1250 } 1196 }
1251 1197
1252 snd_soc_register_component(&pri_dai->pdev->dev, &samsung_i2s_component, 1198 snd_soc_register_component(&pri_dai->pdev->dev, &samsung_i2s_component,
@@ -1267,14 +1213,10 @@ static int samsung_i2s_remove(struct platform_device *pdev)
1267{ 1213{
1268 struct i2s_dai *i2s, *other; 1214 struct i2s_dai *i2s, *other;
1269 struct resource *res; 1215 struct resource *res;
1270 struct s3c_audio_pdata *i2s_pdata = pdev->dev.platform_data;
1271 1216
1272 i2s = dev_get_drvdata(&pdev->dev); 1217 i2s = dev_get_drvdata(&pdev->dev);
1273 other = i2s->pri_dai ? : i2s->sec_dai; 1218 other = i2s->pri_dai ? : i2s->sec_dai;
1274 1219
1275 if (!i2s_pdata->cfg_gpio && pdev->dev.of_node)
1276 samsung_i2s_dt_gpio_free(i2s->pri_dai);
1277
1278 if (other) { 1220 if (other) {
1279 other->pri_dai = NULL; 1221 other->pri_dai = NULL;
1280 other->sec_dai = NULL; 1222 other->sec_dai = NULL;
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index 20e98d1dded2..e5e81b111001 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -1,6 +1,4 @@
1/* sound/soc/samsung/s3c-i2c-v2.c 1/* ALSA Soc Audio Layer - I2S core for newer Samsung SoCs.
2 *
3 * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs.
4 * 2 *
5 * Copyright (c) 2006 Wolfson Microelectronics PLC. 3 * Copyright (c) 2006 Wolfson Microelectronics PLC.
6 * Graeme Gregory graeme.gregory@wolfsonmicro.com 4 * Graeme Gregory graeme.gregory@wolfsonmicro.com