aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex-debug.c
blob: 50d022e5a5606dd64d985afbff9d33621f06ab96 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/*
 * kernel/mutex-debug.c
 *
 * Debugging code for mutexes
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * lock debugging, locking tree, deadlock detection started by:
 *
 *  Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
 *  Released under the General Public License (GPL).
 */
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/poison.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>

#include "mutex-debug.h"

/*
 * Must be called with lock->wait_lock held.
 */
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
{
	memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
	waiter->magic = waiter;
	INIT_LIST_HEAD(&waiter->list);
}

void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
	SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
	DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
	DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
	DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
}

void debug_mutex_free_waiter(struct mutex_waiter *waiter)
{
	DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
	memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
}

void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
			    struct thread_info *ti)
{
	SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));

	/* Mark the current thread as blocked on the lock: */
	ti->task->blocked_on = waiter;
}

void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
			 struct thread_info *ti)
{
	DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
	DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
	DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
	ti->task->blocked_on = NULL;

	list_del_init(&waiter->list);
	waiter->task = NULL;
}

void debug_mutex_unlock(struct mutex *lock)
{
	if (unlikely(!debug_locks))
		return;

	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
	DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
	mutex_clear_owner(lock);
}

void debug_mutex_init(struct mutex *lock, const char *name,
		      struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
	lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
	lock->magic = lock;
}

/***
 * mutex_destroy - mark a mutex unusable
 * @lock: the mutex to be destroyed
 *
 * This function marks the mutex uninitialized, and any subsequent
 * use of the mutex is forbidden. The mutex must not be locked when
 * this function is called.
 */
void mutex_destroy(struct mutex *lock)
{
	DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
	lock->magic = NULL;
}

EXPORT_SYMBOL_GPL(mutex_destroy);
> -rw-r--r--drivers/net/can/xilinx_can.c784
-rw-r--r--drivers/net/dsa/Kconfig32
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/b53/Kconfig10
-rw-r--r--drivers/net/dsa/b53/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c356
-rw-r--r--drivers/net/dsa/b53/b53_priv.h44
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c214
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h128
-rw-r--r--drivers/net/dsa/b53/b53_srab.c220
-rw-r--r--drivers/net/dsa/bcm_sf2.c242
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c46
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h2
-rw-r--r--drivers/net/dsa/dsa_loop.c43
-rw-r--r--drivers/net/dsa/lan9303-core.c11
-rw-r--r--drivers/net/dsa/lantiq_gswip.c1167
-rw-r--r--drivers/net/dsa/lantiq_pce.h153
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c1
-rw-r--r--drivers/net/dsa/mt7530.c17
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c677
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h65
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c111
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h47
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c65
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h28
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c134
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c169
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h71
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c548
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h43
-rw-r--r--drivers/net/dsa/qca8k.c80
-rw-r--r--drivers/net/dsa/qca8k.h7
-rw-r--r--drivers/net/dsa/realtek-smi.c489
-rw-r--r--drivers/net/dsa/realtek-smi.h144
-rw-r--r--drivers/net/dsa/rtl8366.c515
-rw-r--r--drivers/net/dsa/rtl8366rb.c1454
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.c1365
-rw-r--r--drivers/net/ethernet/3com/3c59x.c62
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/Kconfig17
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c232
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1
-rw-r--r--drivers/net/ethernet/8390/etherh.c35
-rw-r--r--drivers/net/ethernet/8390/mac8390.c20
-rw-r--r--drivers/net/ethernet/8390/xsurf100.c382
-rw-r--r--drivers/net/ethernet/Kconfig22
-rw-r--r--drivers/net/ethernet/Makefile11
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c4
-rw-r--r--drivers/net/ethernet/agere/et131x.c12
-rw-r--r--drivers/net/ethernet/alacritech/slic.h1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/alteon/acenic.c6
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c5
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h425
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c325
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c289
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h78
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h229
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c625
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h206
-rw-r--r--drivers/net/ethernet/amd/Kconfig6
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c16
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c12
-rw-r--r--drivers/net/ethernet/amd/declance.c12
-rw-r--r--drivers/net/ethernet/amd/lance.c8
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c6
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c142
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c217
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c171
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c36
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c362
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h31
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c30
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/apple/bmac.c12
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c230
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c95
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c71
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c90
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c155
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c211
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h165
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c321
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h4
-rw-r--r--drivers/net/ethernet/arc/Kconfig6
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c14
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c22
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c11
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig14
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c26
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c315
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h16
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c138
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c95
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2274
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h307
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c257
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c124
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c202
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c555
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1506
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c69
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c87
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c5
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c19
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c19
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c28
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c20
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c124
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c230
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c8
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c5
-rw-r--r--drivers/net/ethernet/cavium/Kconfig18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c107
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c797
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1038
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c807
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c481
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h119
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c131
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h31
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c52
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h98
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h90
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c61
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c156
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c19
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c62
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c382
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h106
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h99
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c711
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c208
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c406
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c70
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c71
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c505
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c301
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h63
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c73
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h15
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c4
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c18
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c112
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c152
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig40
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/ethoc.c14
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c28
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c15
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c121
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c67
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h158
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2829
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h446
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c630
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c222
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h14
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h480
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h569
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c1752
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h921
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.c194
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h45
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c79
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c27
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c18
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c80
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c572
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c31
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig17
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c581
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c242
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c120
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h107
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1611
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c248
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c146
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h275
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1090
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h83
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3522
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h448
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c85
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c751
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c26
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c157
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h33
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h97
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c52
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c314
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c5
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c19
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c34
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h3
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c653
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h46
-rw-r--r--drivers/net/ethernet/intel/Kconfig79
-rw-r--r--drivers/net/ethernet/intel/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e100.c30
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c33
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_osdep.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile27
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c138
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c71
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c31
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c97
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c46
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h22
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c98
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c842
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c650
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c394
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c558
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c959
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h25
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile40
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2739
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h59
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1412
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h58
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h239
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h182
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h76
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h154
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h337
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h548
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1512
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h452
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c841
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile15
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq.c (renamed from drivers/net/ethernet/intel/i40evf/i40e_adminq.c)339
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_adminq.h)65
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h530
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h418
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_alloc.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c (renamed from drivers/net/ethernet/intel/i40evf/i40evf_client.c)230
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h (renamed from drivers/net/ethernet/intel/i40evf/i40evf_client.h)32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c955
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_devids.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c1036
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c (renamed from drivers/net/ethernet/intel/i40evf/i40evf_main.c)1813
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_osdep.h52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h67
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h68
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_status.h)34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_trace.h)109
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c (renamed from drivers/net/ethernet/intel/i40evf/i40e_txrx.c)843
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h523
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h688
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (renamed from drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c)567
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h162
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c971
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c110
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c877
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h548
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2621
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c3802
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c380
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c1702
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2673
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h173
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c36
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h41
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c24
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h23
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h37
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c114
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c714
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c29
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c29
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h26
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile10
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h443
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c541
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h107
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h389
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h321
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c490
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.h13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c806
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3901
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c215
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c791
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h221
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile27
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c66
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c459
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1331
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c801
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c670
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.h66
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h60
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c176
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h26
-rw-r--r--drivers/net/ethernet/jme.c14
-rw-r--r--drivers/net/ethernet/lantiq_etop.c21
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c568
-rw-r--r--drivers/net/ethernet/marvell/Kconfig12
-rw-r--r--drivers/net/ethernet/marvell/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c468
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c8956
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1117
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c1074
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h233
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c703
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5550
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2496
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h333
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c721
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h211
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h525
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h262
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h5709
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1772
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h368
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c515
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1959
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c472
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c816
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h502
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h917
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c91
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c947
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c439
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c383
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h)42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c392
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c583
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c380
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c835
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1301
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c367
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c578
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c627
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c370
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h100
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/ib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c337
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h111
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1629
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c735
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h369
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c428
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c536
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c438
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h228
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c523
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c260
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c199
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c354
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c982
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c673
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c770
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h36
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c7
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c723
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c313
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h235
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c1159
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h74
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c8
-rw-r--r--drivers/net/ethernet/mscc/Kconfig32
-rw-r--r--drivers/net/ethernet/mscc/Makefile5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1789
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h503
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ana.h625
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c363
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev.h275
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c116
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qs.h78
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h270
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c424
-rw-r--r--drivers/net/ethernet/mscc/ocelot_rew.h81
-rw-r--r--drivers/net/ethernet/mscc/ocelot_sys.h144
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/neterion/Kconfig23
-rw-r--r--drivers/net/ethernet/neterion/s2io.c7
-rw-r--r--drivers/net/ethernet/neterion/s2io.h22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c67
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c3
-rw-r--r--drivers/net/ethernet/netronome/Kconfig13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile8
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c292
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c750
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h111
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c142
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h65
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c1460
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c168
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h185
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c282
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c424
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c427
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h85
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c697
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c128
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h122
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c200
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c220
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_abi.h112
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c83
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h91
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h112
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c97
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c209
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h61
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h64
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c335
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h44
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c102
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c53
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c36
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c149
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h36
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c195
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h74
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c364
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c100
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c330
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c34
-rw-r--r--drivers/net/ethernet/ni/Kconfig3
-rw-r--r--drivers/net/ethernet/ni/nixge.c185
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c78
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h40
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c262
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h35
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h91
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c866
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c335
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1045
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c63
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c107
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c86
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c480
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c752
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h175
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c1337
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c243
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c120
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c100
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c72
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c277
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h28
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h29
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c246
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c991
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c258
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c372
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c61
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c11
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c141
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h32
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c110
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c159
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c28
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c24
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c63
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig3
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2301
-rw-r--r--drivers/net/ethernet/renesas/Kconfig3
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h16
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c268
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c302
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h67
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c15
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c33
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c105
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c39
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/farch.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/sfc/tx.c33
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c16
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c15
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c6
-rw-r--r--drivers/net/ethernet/socionext/Kconfig2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c100
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c305
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h258
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c267
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c269
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h240
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c388
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c430
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c290
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h491
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1073
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c140
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c354
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c3
-rw-r--r--drivers/net/ethernet/sun/sungem.c22
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c5
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c14
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c17
-rw-r--r--drivers/net/ethernet/ti/cpsw.c674
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h8
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c43
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c29
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c27
-rw-r--r--drivers/net/ethernet/ti/netcp.h3
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c248
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c14
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c8
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c136
-rw-r--r--drivers/net/fddi/Kconfig11
-rw-r--r--drivers/net/fddi/Makefile1
-rw-r--r--drivers/net/fddi/defza.c1565
-rw-r--r--drivers/net/fddi/defza.h792
-rw-r--r--drivers/net/fddi/skfp/ecm.c3
-rw-r--r--drivers/net/fddi/skfp/h/cmtdef.h9
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c11
-rw-r--r--drivers/net/fddi/skfp/skfddi.c55
-rw-r--r--drivers/net/fjes/fjes_main.c8
-rw-r--r--drivers/net/geneve.c144
-rw-r--r--drivers/net/gtp.c8
-rw-r--r--drivers/net/hamradio/6pack.c26
-rw-r--r--drivers/net/hamradio/bpqether.c24
-rw-r--r--drivers/net/hamradio/mkiss.c23
-rw-r--r--drivers/net/hamradio/scc.c17
-rw-r--r--drivers/net/hamradio/yam.c20
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/Kconfig1
-rw-r--r--drivers/net/hyperv/hyperv_net.h259
-rw-r--r--drivers/net/hyperv/netvsc.c152
-rw-r--r--drivers/net/hyperv/netvsc_drv.c375
-rw-r--r--drivers/net/hyperv/rndis_filter.c192
-rw-r--r--drivers/net/ieee802154/Kconfig11
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/adf7242.c37
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/ca8210.c10
-rw-r--r--drivers/net/ieee802154/fakelb.c5
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c929
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.h73
-rw-r--r--drivers/net/ieee802154/mcr20a.c75
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c40
-rw-r--r--drivers/net/loopback.c10
-rw-r--r--drivers/net/macsec.c38
-rw-r--r--drivers/net/macvlan.c72
-rw-r--r--drivers/net/net_failover.c842
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bpf.c99
-rw-r--r--drivers/net/netdevsim/devlink.c8
-rw-r--r--drivers/net/netdevsim/fib.c9
-rw-r--r--drivers/net/netdevsim/ipsec.c297
-rw-r--r--drivers/net/netdevsim/netdev.c119
-rw-r--r--drivers/net/netdevsim/netdevsim.h74
-rw-r--r--drivers/net/nlmon.c6
-rw-r--r--drivers/net/ntb_netdev.c32
-rw-r--r--drivers/net/phy/Kconfig35
-rw-r--r--drivers/net/phy/Makefile4
-rw-r--r--drivers/net/phy/aquantia.c12
-rw-r--r--drivers/net/phy/asix.c63
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c6
-rw-r--r--drivers/net/phy/bcm63xx.c9
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/broadcom.c34
-rw-r--r--drivers/net/phy/dp83640.c28
-rw-r--r--drivers/net/phy/dp83848.c35
-rw-r--r--drivers/net/phy/dp83tc811.c371
-rw-r--r--drivers/net/phy/et1011c.c3
-rw-r--r--drivers/net/phy/fixed_phy.c7
-rw-r--r--drivers/net/phy/marvell.c126
-rw-r--r--drivers/net/phy/marvell10g.c17
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c83
-rw-r--r--drivers/net/phy/mdio-bitbang.c9
-rw-r--r--drivers/net/phy/mdio-boardinfo.c5
-rw-r--r--drivers/net/phy/mdio-gpio.c141
-rw-r--r--drivers/net/phy/mdio-mscc-miim.c193
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c108
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c21
-rw-r--r--drivers/net/phy/mdio-thunder.c4
-rw-r--r--drivers/net/phy/mdio_bus.c49
-rw-r--r--drivers/net/phy/micrel.c135
-rw-r--r--drivers/net/phy/microchip.c58
-rw-r--r--drivers/net/phy/microchip_t1.c74
-rw-r--r--drivers/net/phy/mscc.c1528
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy.c221
-rw-r--r--drivers/net/phy/phy_device.c442
-rw-r--r--drivers/net/phy/phy_led_triggers.c6
-rw-r--r--drivers/net/phy/phylink.c128
-rw-r--r--drivers/net/phy/realtek.c98
-rw-r--r--drivers/net/phy/sfp-bus.c42
-rw-r--r--drivers/net/phy/sfp.c857
-rw-r--r--drivers/net/phy/smsc.c5
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/vitesse.c175
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c20
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/ppp_mppe.c83
-rw-r--r--drivers/net/ppp/pppoe.c21
-rw-r--r--drivers/net/ppp/pptp.c3
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/slip/slip.c27
-rw-r--r--drivers/net/tap.c99
-rw-r--r--drivers/net/team/team.c39
-rw-r--r--drivers/net/thunderbolt.c5
-rw-r--r--drivers/net/tun.c582
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_common.c11
-rw-r--r--drivers/net/usb/asix_devices.c42
-rw-r--r--drivers/net/usb/ax88179_178a.c7
-rw-r--r--drivers/net/usb/catc.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c13
-rw-r--r--drivers/net/usb/hso.c62
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/kaweth.c8
-rw-r--r--drivers/net/usb/lan78xx.c345
-rw-r--r--drivers/net/usb/lan78xx.h14
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c67
-rw-r--r--drivers/net/usb/r8152.c58
-rw-r--r--drivers/net/usb/rtl8150.c7
-rw-r--r--drivers/net/usb/smsc75xx.c66
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/veth.c916
-rw-r--r--drivers/net/virtio_net.c511
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/vrf.c38
-rw-r--r--drivers/net/vsockmon.c14
-rw-r--r--drivers/net/vxlan.c371
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c100
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/x25_asy.c19
-rw-r--r--drivers/net/wimax/i2400m/control.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c3
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig26
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile9
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c311
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h77
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c981
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h101
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c98
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c247
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c160
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c92
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h163
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c336
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c190
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c200
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h67
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c290
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h45
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c1019
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h129
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c2072
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h677
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h151
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c1655
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h98
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h111
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c381
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h647
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c417
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h187
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c308
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c19
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c19
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c83
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c85
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c5
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h3
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h3
-rw-r--r--drivers/net/wireless/ath/regd.h7
-rw-r--r--drivers/net/wireless/ath/regd_common.h60
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c219
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h24
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c83
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c241
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c149
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.h46
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode_i.h29
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h12
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig2
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c458
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c937
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c289
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c437
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c81
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c130
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c72
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h59
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c818
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1624
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h568
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h348
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c1244
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1195
-rw-r--r--drivers/net/wireless/atmel/atmel.c33
-rw-r--r--drivers/net/wireless/atmel/atmel_pci.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c18
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c10
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c112
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c78
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c50
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h3
-rw-r--r--drivers/net/wireless/cisco/airo.c10
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c38
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h14
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c23
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h392
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c769
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h177
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/nvm.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c181
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c552
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-scd.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c396
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h191
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c243
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c190
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c905
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/testmode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c323
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c831
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h277
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c441
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c629
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c417
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c252
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c94
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c34
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_info.c5
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c153
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c6
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.c12
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c4
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_eth.c6
-rw-r--r--drivers/net/wireless/intersil/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c203
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c38
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c14
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c17
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c52
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c79
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c52
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c54
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig25
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile21
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c97
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h284
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c348
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c396
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h234
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c197
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c152
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c221
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c930
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h312
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c353
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h214
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h140
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dma.h)37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c153
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h)139
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mac.c)956
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mac.h)141
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c224
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_regs.h)102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_trace.c)2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_trace.h)33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c202
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c359
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c446
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h231
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c)56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c)355
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c222
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h)62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_pci.c)8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c)432
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c531
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c310
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c405
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c252
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c153
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c309
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c222
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c179
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c290
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c230
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_core.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c879
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c624
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c444
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c763
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c263
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c143
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c886
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c17
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c15
-rw-r--r--drivers/net/wireless/quantenna/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c339
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c737
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c70
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c72
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c392
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h85
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c1249
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h (renamed from drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h)80
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h121
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c1481
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h91
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h356
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h119
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c174
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c278
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c149
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00pci.h6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/ray_cs.c21
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c13
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c228
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c205
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c78
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c69
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c69
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c71
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h65
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c33
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c138
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c78
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c58
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c32
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c25
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h1
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h21
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h5
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h4
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c10
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c6
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c6
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c29
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c90
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c578
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c146
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c66
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c30
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c5
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c21
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/hash.c51
-rw-r--r--drivers/net/xen-netback/interface.c10
-rw-r--r--drivers/net/xen-netback/netback.c7
-rw-r--r--drivers/net/xen-netback/xenbus.c7
-rw-r--r--drivers/net/xen-netfront.c60
1983 files changed, 220060 insertions, 79249 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 891846655000..d03775100f7d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -198,6 +198,7 @@ config VXLAN
198config GENEVE 198config GENEVE
199 tristate "Generic Network Virtualization Encapsulation" 199 tristate "Generic Network Virtualization Encapsulation"
200 depends on INET && NET_UDP_TUNNEL 200 depends on INET && NET_UDP_TUNNEL
201 depends on IPV6 || !IPV6
201 select NET_IP_TUNNEL 202 select NET_IP_TUNNEL
202 select GRO_CELLS 203 select GRO_CELLS
203 ---help--- 204 ---help---
@@ -331,6 +332,7 @@ config VETH
331config VIRTIO_NET 332config VIRTIO_NET
332 tristate "Virtio network driver" 333 tristate "Virtio network driver"
333 depends on VIRTIO 334 depends on VIRTIO
335 select NET_FAILOVER
334 ---help--- 336 ---help---
335 This is the virtual network driver for virtio. It can be used with 337 This is the virtual network driver for virtio. It can be used with
336 QEMU based VMMs (like KVM or Xen). Say Y or M. 338 QEMU based VMMs (like KVM or Xen). Say Y or M.
@@ -509,4 +511,16 @@ config NETDEVSIM
509 To compile this driver as a module, choose M here: the module 511 To compile this driver as a module, choose M here: the module
510 will be called netdevsim. 512 will be called netdevsim.
511 513
514config NET_FAILOVER
515 tristate "Failover driver"
516 select FAILOVER
517 help
518 This provides an automated failover mechanism via APIs to create
519 and destroy a failover master netdev and manages a primary and
520 standby slave netdevs that get registered via the generic failover
521 infrastructure. This can be used by paravirtual drivers to enable
522 an alternate low latency datapath. It alsoenables live migration of
523 a VM with direct attached VF by failing over to the paravirtual
524 datapath when the VF is unplugged.
525
512endif # NETDEVICES 526endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 91e67e375dd4..21cde7e78621 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/
78thunderbolt-net-y += thunderbolt.o 78thunderbolt-net-y += thunderbolt.o
79obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o 79obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
80obj-$(CONFIG_NETDEVSIM) += netdevsim/ 80obj-$(CONFIG_NETDEVSIM) += netdevsim/
81obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9375cef22420..3d27616d9c85 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
283 case SIOCFINDIPDDPRT: 283 case SIOCFINDIPDDPRT:
284 spin_lock_bh(&ipddp_route_lock); 284 spin_lock_bh(&ipddp_route_lock);
285 rp = __ipddp_find_route(&rcp); 285 rp = __ipddp_find_route(&rcp);
286 if (rp) 286 if (rp) {
287 memcpy(&rcp2, rp, sizeof(rcp2)); 287 memset(&rcp2, 0, sizeof(rcp2));
288 rcp2.ip = rp->ip;
289 rcp2.at = rp->at;
290 rcp2.flags = rp->flags;
291 }
288 spin_unlock_bh(&ipddp_route_lock); 292 spin_unlock_bh(&ipddp_route_lock);
289 293
290 if (rp) { 294 if (rp) {
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index a07e24970be4..11c5bad95226 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -33,7 +33,7 @@
33#include <linux/ioport.h> 33#include <linux/ioport.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/netdevice.h> 35#include <linux/netdevice.h>
36#include <linux/bootmem.h> 36#include <linux/memblock.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/io.h> 39#include <linux/io.h>
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 38fa60ddaf2e..28510e33924f 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -38,7 +38,7 @@
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/bootmem.h> 41#include <linux/memblock.h>
42#include <linux/io.h> 42#include <linux/io.h>
43 43
44#include "arcdevice.h" 44#include "arcdevice.h"
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 4e56aaf2b984..2c546013a980 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -34,7 +34,7 @@
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/bootmem.h> 37#include <linux/memblock.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/io.h> 40#include <linux/io.h>
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index f43fb2f958a5..93dfcef8afc4 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
2086 aggregator->aggregator_identifier); 2086 aggregator->aggregator_identifier);
2087 2087
2088 /* Tell the partner that this port is not suitable for aggregation */ 2088 /* Tell the partner that this port is not suitable for aggregation */
2089 port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
2090 port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
2091 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
2089 port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; 2092 port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
2090 __update_lacpdu_from_port(port); 2093 __update_lacpdu_from_port(port);
2091 ad_lacpdu_send(port); 2094 ad_lacpdu_send(port);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5eb0df2e5464..e82108c917a6 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -40,11 +40,6 @@
40#include <net/bonding.h> 40#include <net/bonding.h>
41#include <net/bond_alb.h> 41#include <net/bond_alb.h>
42 42
43
44
45static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
46 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
47};
48static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 43static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
49 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 44 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
50}; 45};
@@ -420,8 +415,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
420 415
421 if (assigned_slave) { 416 if (assigned_slave) {
422 rx_hash_table[index].slave = assigned_slave; 417 rx_hash_table[index].slave = assigned_slave;
423 if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst, 418 if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) {
424 mac_bcast)) {
425 bond_info->rx_hashtbl[index].ntt = 1; 419 bond_info->rx_hashtbl[index].ntt = 1;
426 bond_info->rx_ntt = 1; 420 bond_info->rx_ntt = 1;
427 /* A slave has been removed from the 421 /* A slave has been removed from the
@@ -524,7 +518,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
524 client_info = &(bond_info->rx_hashtbl[hash_index]); 518 client_info = &(bond_info->rx_hashtbl[hash_index]);
525 519
526 if ((client_info->slave == slave) && 520 if ((client_info->slave == slave) &&
527 !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { 521 is_valid_ether_addr(client_info->mac_dst)) {
528 client_info->ntt = 1; 522 client_info->ntt = 1;
529 ntt = 1; 523 ntt = 1;
530 } 524 }
@@ -565,7 +559,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
565 if ((client_info->ip_src == src_ip) && 559 if ((client_info->ip_src == src_ip) &&
566 !ether_addr_equal_64bits(client_info->slave->dev->dev_addr, 560 !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
567 bond->dev->dev_addr) && 561 bond->dev->dev_addr) &&
568 !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { 562 is_valid_ether_addr(client_info->mac_dst)) {
569 client_info->ntt = 1; 563 client_info->ntt = 1;
570 bond_info->rx_ntt = 1; 564 bond_info->rx_ntt = 1;
571 } 565 }
@@ -593,7 +587,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
593 if ((client_info->ip_src == arp->ip_src) && 587 if ((client_info->ip_src == arp->ip_src) &&
594 (client_info->ip_dst == arp->ip_dst)) { 588 (client_info->ip_dst == arp->ip_dst)) {
595 /* the entry is already assigned to this client */ 589 /* the entry is already assigned to this client */
596 if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) { 590 if (!is_broadcast_ether_addr(arp->mac_dst)) {
597 /* update mac address from arp */ 591 /* update mac address from arp */
598 ether_addr_copy(client_info->mac_dst, arp->mac_dst); 592 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
599 } 593 }
@@ -641,7 +635,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
641 ether_addr_copy(client_info->mac_src, arp->mac_src); 635 ether_addr_copy(client_info->mac_src, arp->mac_src);
642 client_info->slave = assigned_slave; 636 client_info->slave = assigned_slave;
643 637
644 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { 638 if (is_valid_ether_addr(client_info->mac_dst)) {
645 client_info->ntt = 1; 639 client_info->ntt = 1;
646 bond->alb_info.rx_ntt = 1; 640 bond->alb_info.rx_ntt = 1;
647 } else { 641 } else {
@@ -733,8 +727,10 @@ static void rlb_rebalance(struct bonding *bond)
733 assigned_slave = __rlb_next_rx_slave(bond); 727 assigned_slave = __rlb_next_rx_slave(bond);
734 if (assigned_slave && (client_info->slave != assigned_slave)) { 728 if (assigned_slave && (client_info->slave != assigned_slave)) {
735 client_info->slave = assigned_slave; 729 client_info->slave = assigned_slave;
736 client_info->ntt = 1; 730 if (!is_zero_ether_addr(client_info->mac_dst)) {
737 ntt = 1; 731 client_info->ntt = 1;
732 ntt = 1;
733 }
738 } 734 }
739 } 735 }
740 736
@@ -1319,8 +1315,8 @@ void bond_alb_deinitialize(struct bonding *bond)
1319 rlb_deinitialize(bond); 1315 rlb_deinitialize(bond);
1320} 1316}
1321 1317
1322static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, 1318static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1323 struct slave *tx_slave) 1319 struct slave *tx_slave)
1324{ 1320{
1325 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1321 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1326 struct ethhdr *eth_data = eth_hdr(skb); 1322 struct ethhdr *eth_data = eth_hdr(skb);
@@ -1354,7 +1350,7 @@ out:
1354 return NETDEV_TX_OK; 1350 return NETDEV_TX_OK;
1355} 1351}
1356 1352
1357int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) 1353netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1358{ 1354{
1359 struct bonding *bond = netdev_priv(bond_dev); 1355 struct bonding *bond = netdev_priv(bond_dev);
1360 struct ethhdr *eth_data; 1356 struct ethhdr *eth_data;
@@ -1392,7 +1388,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1392 return bond_do_alb_xmit(skb, bond, tx_slave); 1388 return bond_do_alb_xmit(skb, bond, tx_slave);
1393} 1389}
1394 1390
1395int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) 1391netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1396{ 1392{
1397 struct bonding *bond = netdev_priv(bond_dev); 1393 struct bonding *bond = netdev_priv(bond_dev);
1398 struct ethhdr *eth_data; 1394 struct ethhdr *eth_data;
@@ -1412,9 +1408,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1412 case ETH_P_IP: { 1408 case ETH_P_IP: {
1413 const struct iphdr *iph = ip_hdr(skb); 1409 const struct iphdr *iph = ip_hdr(skb);
1414 1410
1415 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || 1411 if (is_broadcast_ether_addr(eth_data->h_dest) ||
1416 (iph->daddr == ip_bcast) || 1412 iph->daddr == ip_bcast ||
1417 (iph->protocol == IPPROTO_IGMP)) { 1413 iph->protocol == IPPROTO_IGMP) {
1418 do_tx_balance = false; 1414 do_tx_balance = false;
1419 break; 1415 break;
1420 } 1416 }
@@ -1426,7 +1422,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1426 /* IPv6 doesn't really use broadcast mac address, but leave 1422 /* IPv6 doesn't really use broadcast mac address, but leave
1427 * that here just in case. 1423 * that here just in case.
1428 */ 1424 */
1429 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { 1425 if (is_broadcast_ether_addr(eth_data->h_dest)) {
1430 do_tx_balance = false; 1426 do_tx_balance = false;
1431 break; 1427 break;
1432 } 1428 }
@@ -1482,8 +1478,24 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1482 } 1478 }
1483 1479
1484 if (do_tx_balance) { 1480 if (do_tx_balance) {
1485 hash_index = _simple_hash(hash_start, hash_size); 1481 if (bond->params.tlb_dynamic_lb) {
1486 tx_slave = tlb_choose_channel(bond, hash_index, skb->len); 1482 hash_index = _simple_hash(hash_start, hash_size);
1483 tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
1484 } else {
1485 /*
1486 * do_tx_balance means we are free to select the tx_slave
1487 * So we do exactly what tlb would do for hash selection
1488 */
1489
1490 struct bond_up_slave *slaves;
1491 unsigned int count;
1492
1493 slaves = rcu_dereference(bond->slave_arr);
1494 count = slaves ? READ_ONCE(slaves->count) : 0;
1495 if (likely(count))
1496 tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
1497 count];
1498 }
1487 } 1499 }
1488 1500
1489 return bond_do_alb_xmit(skb, bond, tx_slave); 1501 return bond_do_alb_xmit(skb, bond, tx_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1f1e97b26f95..333387f1f1fe 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -159,7 +159,7 @@ module_param(min_links, int, 0);
159MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); 159MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
160 160
161module_param(xmit_hash_policy, charp, 0); 161module_param(xmit_hash_policy, charp, 0);
162MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " 162MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
163 "0 for layer 2 (default), 1 for layer 3+4, " 163 "0 for layer 2 (default), 1 for layer 3+4, "
164 "2 for layer 2+3, 3 for encap layer 2+3, " 164 "2 for layer 2+3, 3 for encap layer 2+3, "
165 "4 for encap layer 3+4"); 165 "4 for encap layer 3+4");
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
210static void bond_slave_arr_handler(struct work_struct *work); 210static void bond_slave_arr_handler(struct work_struct *work);
211static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 211static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
212 int mod); 212 int mod);
213static void bond_netdev_notify_work(struct work_struct *work);
213 214
214/*---------------------------- General routines -----------------------------*/ 215/*---------------------------- General routines -----------------------------*/
215 216
@@ -247,7 +248,7 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
247 248
248 BUILD_BUG_ON(sizeof(skb->queue_mapping) != 249 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
249 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); 250 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
250 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; 251 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
251 252
252 if (unlikely(netpoll_tx_running(bond->dev))) 253 if (unlikely(netpoll_tx_running(bond->dev)))
253 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 254 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -962,7 +963,8 @@ static inline void slave_disable_netpoll(struct slave *slave)
962 return; 963 return;
963 964
964 slave->np = NULL; 965 slave->np = NULL;
965 __netpoll_free_async(np); 966
967 __netpoll_free(np);
966} 968}
967 969
968static void bond_poll_controller(struct net_device *bond_dev) 970static void bond_poll_controller(struct net_device *bond_dev)
@@ -971,16 +973,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
971 struct slave *slave = NULL; 973 struct slave *slave = NULL;
972 struct list_head *iter; 974 struct list_head *iter;
973 struct ad_info ad_info; 975 struct ad_info ad_info;
974 struct netpoll_info *ni;
975 const struct net_device_ops *ops;
976 976
977 if (BOND_MODE(bond) == BOND_MODE_8023AD) 977 if (BOND_MODE(bond) == BOND_MODE_8023AD)
978 if (bond_3ad_get_active_agg_info(bond, &ad_info)) 978 if (bond_3ad_get_active_agg_info(bond, &ad_info))
979 return; 979 return;
980 980
981 bond_for_each_slave_rcu(bond, slave, iter) { 981 bond_for_each_slave_rcu(bond, slave, iter) {
982 ops = slave->dev->netdev_ops; 982 if (!bond_slave_is_up(slave))
983 if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
984 continue; 983 continue;
985 984
986 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 985 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +991,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
992 continue; 991 continue;
993 } 992 }
994 993
995 ni = rcu_dereference_bh(slave->dev->npinfo); 994 netpoll_poll_dev(slave->dev);
996 if (down_trylock(&ni->dev_lock))
997 continue;
998 ops->ndo_poll_controller(slave->dev);
999 up(&ni->dev_lock);
1000 } 995 }
1001} 996}
1002 997
@@ -1107,7 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
1107 1102
1108done: 1103done:
1109 bond_dev->vlan_features = vlan_features; 1104 bond_dev->vlan_features = vlan_features;
1110 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; 1105 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1106 NETIF_F_GSO_UDP_L4;
1111 bond_dev->gso_max_segs = gso_max_segs; 1107 bond_dev->gso_max_segs = gso_max_segs;
1112 netif_set_gso_max_size(bond_dev, gso_max_size); 1108 netif_set_gso_max_size(bond_dev, gso_max_size);
1113 1109
@@ -1176,9 +1172,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1176 } 1172 }
1177 } 1173 }
1178 1174
1179 /* don't change skb->dev for link-local packets */ 1175 /* Link-local multicast packets should be passed to the
1180 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) 1176 * stack on the link they arrive as well as pass them to the
1177 * bond-master device. These packets are mostly usable when
1178 * stack receives it with the link on which they arrive
1179 * (e.g. LLDP) they also must be available on master. Some of
1180 * the use cases include (but are not limited to): LLDP agents
1181 * that must be able to operate both on enslaved interfaces as
1182 * well as on bonds themselves; linux bridges that must be able
1183 * to process/pass BPDUs from attached bonds when any kind of
1184 * STP version is enabled on the network.
1185 */
1186 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
1187 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1188
1189 if (nskb) {
1190 nskb->dev = bond->dev;
1191 nskb->queue_mapping = 0;
1192 netif_rx(nskb);
1193 }
1181 return RX_HANDLER_PASS; 1194 return RX_HANDLER_PASS;
1195 }
1182 if (bond_should_deliver_exact_match(skb, slave, bond)) 1196 if (bond_should_deliver_exact_match(skb, slave, bond))
1183 return RX_HANDLER_EXACT; 1197 return RX_HANDLER_EXACT;
1184 1198
@@ -1217,12 +1231,37 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1217 } 1231 }
1218} 1232}
1219 1233
1234static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1235 enum netdev_lag_tx_type type)
1236{
1237 if (type != NETDEV_LAG_TX_TYPE_HASH)
1238 return NETDEV_LAG_HASH_NONE;
1239
1240 switch (bond->params.xmit_policy) {
1241 case BOND_XMIT_POLICY_LAYER2:
1242 return NETDEV_LAG_HASH_L2;
1243 case BOND_XMIT_POLICY_LAYER34:
1244 return NETDEV_LAG_HASH_L34;
1245 case BOND_XMIT_POLICY_LAYER23:
1246 return NETDEV_LAG_HASH_L23;
1247 case BOND_XMIT_POLICY_ENCAP23:
1248 return NETDEV_LAG_HASH_E23;
1249 case BOND_XMIT_POLICY_ENCAP34:
1250 return NETDEV_LAG_HASH_E34;
1251 default:
1252 return NETDEV_LAG_HASH_UNKNOWN;
1253 }
1254}
1255
1220static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, 1256static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1221 struct netlink_ext_ack *extack) 1257 struct netlink_ext_ack *extack)
1222{ 1258{
1223 struct netdev_lag_upper_info lag_upper_info; 1259 struct netdev_lag_upper_info lag_upper_info;
1260 enum netdev_lag_tx_type type;
1224 1261
1225 lag_upper_info.tx_type = bond_lag_tx_type(bond); 1262 type = bond_lag_tx_type(bond);
1263 lag_upper_info.tx_type = type;
1264 lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1226 1265
1227 return netdev_master_upper_dev_link(slave->dev, bond->dev, slave, 1266 return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1228 &lag_upper_info, extack); 1267 &lag_upper_info, extack);
@@ -1250,6 +1289,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
1250 return NULL; 1289 return NULL;
1251 } 1290 }
1252 } 1291 }
1292 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1293
1253 return slave; 1294 return slave;
1254} 1295}
1255 1296
@@ -1257,6 +1298,7 @@ static void bond_free_slave(struct slave *slave)
1257{ 1298{
1258 struct bonding *bond = bond_get_bond_by_slave(slave); 1299 struct bonding *bond = bond_get_bond_by_slave(slave);
1259 1300
1301 cancel_delayed_work_sync(&slave->notify_work);
1260 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1302 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1261 kfree(SLAVE_AD_INFO(slave)); 1303 kfree(SLAVE_AD_INFO(slave));
1262 1304
@@ -1278,39 +1320,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1278 info->link_failure_count = slave->link_failure_count; 1320 info->link_failure_count = slave->link_failure_count;
1279} 1321}
1280 1322
1281static void bond_netdev_notify(struct net_device *dev,
1282 struct netdev_bonding_info *info)
1283{
1284 rtnl_lock();
1285 netdev_bonding_info_change(dev, info);
1286 rtnl_unlock();
1287}
1288
1289static void bond_netdev_notify_work(struct work_struct *_work) 1323static void bond_netdev_notify_work(struct work_struct *_work)
1290{ 1324{
1291 struct netdev_notify_work *w = 1325 struct slave *slave = container_of(_work, struct slave,
1292 container_of(_work, struct netdev_notify_work, work.work); 1326 notify_work.work);
1293 1327
1294 bond_netdev_notify(w->dev, &w->bonding_info); 1328 if (rtnl_trylock()) {
1295 dev_put(w->dev); 1329 struct netdev_bonding_info binfo;
1296 kfree(w); 1330
1331 bond_fill_ifslave(slave, &binfo.slave);
1332 bond_fill_ifbond(slave->bond, &binfo.master);
1333 netdev_bonding_info_change(slave->dev, &binfo);
1334 rtnl_unlock();
1335 } else {
1336 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1337 }
1297} 1338}
1298 1339
1299void bond_queue_slave_event(struct slave *slave) 1340void bond_queue_slave_event(struct slave *slave)
1300{ 1341{
1301 struct bonding *bond = slave->bond; 1342 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1302 struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
1303
1304 if (!nnw)
1305 return;
1306
1307 dev_hold(slave->dev);
1308 nnw->dev = slave->dev;
1309 bond_fill_ifslave(slave, &nnw->bonding_info.slave);
1310 bond_fill_ifbond(bond, &nnw->bonding_info.master);
1311 INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1312
1313 queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1314} 1343}
1315 1344
1316void bond_lower_state_changed(struct slave *slave) 1345void bond_lower_state_changed(struct slave *slave)
@@ -1691,6 +1720,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1691 goto err_upper_unlink; 1720 goto err_upper_unlink;
1692 } 1721 }
1693 1722
1723 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1724
1694 /* If the mode uses primary, then the following is handled by 1725 /* If the mode uses primary, then the following is handled by
1695 * bond_change_active_slave(). 1726 * bond_change_active_slave().
1696 */ 1727 */
@@ -1735,10 +1766,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1735 unblock_netpoll_tx(); 1766 unblock_netpoll_tx();
1736 } 1767 }
1737 1768
1738 if (bond_mode_uses_xmit_hash(bond)) 1769 if (bond_mode_can_use_xmit_hash(bond))
1739 bond_update_slave_arr(bond, NULL); 1770 bond_update_slave_arr(bond, NULL);
1740 1771
1741 bond->nest_level = dev_get_nest_level(bond_dev);
1742 1772
1743 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", 1773 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
1744 slave_dev->name, 1774 slave_dev->name,
@@ -1870,7 +1900,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1870 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1900 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1871 bond_3ad_unbind_slave(slave); 1901 bond_3ad_unbind_slave(slave);
1872 1902
1873 if (bond_mode_uses_xmit_hash(bond)) 1903 if (bond_mode_can_use_xmit_hash(bond))
1874 bond_update_slave_arr(bond, slave); 1904 bond_update_slave_arr(bond, slave);
1875 1905
1876 netdev_info(bond_dev, "Releasing %s interface %s\n", 1906 netdev_info(bond_dev, "Releasing %s interface %s\n",
@@ -2137,6 +2167,24 @@ static int bond_miimon_inspect(struct bonding *bond)
2137 return commit; 2167 return commit;
2138} 2168}
2139 2169
2170static void bond_miimon_link_change(struct bonding *bond,
2171 struct slave *slave,
2172 char link)
2173{
2174 switch (BOND_MODE(bond)) {
2175 case BOND_MODE_8023AD:
2176 bond_3ad_handle_link_change(slave, link);
2177 break;
2178 case BOND_MODE_TLB:
2179 case BOND_MODE_ALB:
2180 bond_alb_handle_link_change(bond, slave, link);
2181 break;
2182 case BOND_MODE_XOR:
2183 bond_update_slave_arr(bond, NULL);
2184 break;
2185 }
2186}
2187
2140static void bond_miimon_commit(struct bonding *bond) 2188static void bond_miimon_commit(struct bonding *bond)
2141{ 2189{
2142 struct list_head *iter; 2190 struct list_head *iter;
@@ -2178,16 +2226,7 @@ static void bond_miimon_commit(struct bonding *bond)
2178 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, 2226 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2179 slave->duplex ? "full" : "half"); 2227 slave->duplex ? "full" : "half");
2180 2228
2181 /* notify ad that the link status has changed */ 2229 bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2182 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2183 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2184
2185 if (bond_is_lb(bond))
2186 bond_alb_handle_link_change(bond, slave,
2187 BOND_LINK_UP);
2188
2189 if (BOND_MODE(bond) == BOND_MODE_XOR)
2190 bond_update_slave_arr(bond, NULL);
2191 2230
2192 if (!bond->curr_active_slave || slave == primary) 2231 if (!bond->curr_active_slave || slave == primary)
2193 goto do_failover; 2232 goto do_failover;
@@ -2209,16 +2248,7 @@ static void bond_miimon_commit(struct bonding *bond)
2209 netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n", 2248 netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
2210 slave->dev->name); 2249 slave->dev->name);
2211 2250
2212 if (BOND_MODE(bond) == BOND_MODE_8023AD) 2251 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2213 bond_3ad_handle_link_change(slave,
2214 BOND_LINK_DOWN);
2215
2216 if (bond_is_lb(bond))
2217 bond_alb_handle_link_change(bond, slave,
2218 BOND_LINK_DOWN);
2219
2220 if (BOND_MODE(bond) == BOND_MODE_XOR)
2221 bond_update_slave_arr(bond, NULL);
2222 2252
2223 if (slave == rcu_access_pointer(bond->curr_active_slave)) 2253 if (slave == rcu_access_pointer(bond->curr_active_slave))
2224 goto do_failover; 2254 goto do_failover;
@@ -2392,7 +2422,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2392 struct list_head *iter; 2422 struct list_head *iter;
2393 2423
2394 if (start_dev == end_dev) { 2424 if (start_dev == end_dev) {
2395 tags = kzalloc(sizeof(*tags) * (level + 1), GFP_ATOMIC); 2425 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2396 if (!tags) 2426 if (!tags)
2397 return ERR_PTR(-ENOMEM); 2427 return ERR_PTR(-ENOMEM);
2398 tags[level].vlan_proto = VLAN_N_VID; 2428 tags[level].vlan_proto = VLAN_N_VID;
@@ -3082,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
3082 case NETDEV_CHANGE: 3112 case NETDEV_CHANGE:
3083 /* For 802.3ad mode only: 3113 /* For 802.3ad mode only:
3084 * Getting invalid Speed/Duplex values here will put slave 3114 * Getting invalid Speed/Duplex values here will put slave
3085 * in weird state. So mark it as link-down for the time 3115 * in weird state. So mark it as link-fail for the time
3086 * being and let link-monitoring (miimon) set it right when 3116 * being and let link-monitoring (miimon) set it right when
3087 * correct speeds/duplex are available. 3117 * correct speeds/duplex are available.
3088 */ 3118 */
3089 if (bond_update_speed_duplex(slave) && 3119 if (bond_update_speed_duplex(slave) &&
3090 BOND_MODE(bond) == BOND_MODE_8023AD) 3120 BOND_MODE(bond) == BOND_MODE_8023AD)
3091 slave->link = BOND_LINK_DOWN; 3121 slave->link = BOND_LINK_FAIL;
3092 3122
3093 if (BOND_MODE(bond) == BOND_MODE_8023AD) 3123 if (BOND_MODE(bond) == BOND_MODE_8023AD)
3094 bond_3ad_adapter_speed_duplex_changed(slave); 3124 bond_3ad_adapter_speed_duplex_changed(slave);
@@ -3102,7 +3132,7 @@ static int bond_slave_netdev_event(unsigned long event,
3102 * events. If these (miimon/arpmon) parameters are configured 3132 * events. If these (miimon/arpmon) parameters are configured
3103 * then array gets refreshed twice and that should be fine! 3133 * then array gets refreshed twice and that should be fine!
3104 */ 3134 */
3105 if (bond_mode_uses_xmit_hash(bond)) 3135 if (bond_mode_can_use_xmit_hash(bond))
3106 bond_update_slave_arr(bond, NULL); 3136 bond_update_slave_arr(bond, NULL);
3107 break; 3137 break;
3108 case NETDEV_CHANGEMTU: 3138 case NETDEV_CHANGEMTU:
@@ -3322,7 +3352,7 @@ static int bond_open(struct net_device *bond_dev)
3322 */ 3352 */
3323 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) 3353 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3324 return -ENOMEM; 3354 return -ENOMEM;
3325 if (bond->params.tlb_dynamic_lb) 3355 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3326 queue_delayed_work(bond->wq, &bond->alb_work, 0); 3356 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3327 } 3357 }
3328 3358
@@ -3341,7 +3371,7 @@ static int bond_open(struct net_device *bond_dev)
3341 bond_3ad_initiate_agg_selection(bond, 1); 3371 bond_3ad_initiate_agg_selection(bond, 1);
3342 } 3372 }
3343 3373
3344 if (bond_mode_uses_xmit_hash(bond)) 3374 if (bond_mode_can_use_xmit_hash(bond))
3345 bond_update_slave_arr(bond, NULL); 3375 bond_update_slave_arr(bond, NULL);
3346 3376
3347 return 0; 3377 return 0;
@@ -3389,6 +3419,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3389 } 3419 }
3390} 3420}
3391 3421
3422static int bond_get_nest_level(struct net_device *bond_dev)
3423{
3424 struct bonding *bond = netdev_priv(bond_dev);
3425
3426 return bond->nest_level;
3427}
3428
3392static void bond_get_stats(struct net_device *bond_dev, 3429static void bond_get_stats(struct net_device *bond_dev,
3393 struct rtnl_link_stats64 *stats) 3430 struct rtnl_link_stats64 *stats)
3394{ 3431{
@@ -3397,7 +3434,7 @@ static void bond_get_stats(struct net_device *bond_dev,
3397 struct list_head *iter; 3434 struct list_head *iter;
3398 struct slave *slave; 3435 struct slave *slave;
3399 3436
3400 spin_lock(&bond->stats_lock); 3437 spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
3401 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 3438 memcpy(stats, &bond->bond_stats, sizeof(*stats));
3402 3439
3403 rcu_read_lock(); 3440 rcu_read_lock();
@@ -3807,7 +3844,8 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
3807 return slave_id; 3844 return slave_id;
3808} 3845}
3809 3846
3810static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) 3847static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
3848 struct net_device *bond_dev)
3811{ 3849{
3812 struct bonding *bond = netdev_priv(bond_dev); 3850 struct bonding *bond = netdev_priv(bond_dev);
3813 struct iphdr *iph = ip_hdr(skb); 3851 struct iphdr *iph = ip_hdr(skb);
@@ -3843,7 +3881,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3843/* In active-backup mode, we know that bond->curr_active_slave is always valid if 3881/* In active-backup mode, we know that bond->curr_active_slave is always valid if
3844 * the bond has a usable interface. 3882 * the bond has a usable interface.
3845 */ 3883 */
3846static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) 3884static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
3885 struct net_device *bond_dev)
3847{ 3886{
3848 struct bonding *bond = netdev_priv(bond_dev); 3887 struct bonding *bond = netdev_priv(bond_dev);
3849 struct slave *slave; 3888 struct slave *slave;
@@ -3892,7 +3931,7 @@ err:
3892 * to determine the slave interface - 3931 * to determine the slave interface -
3893 * (a) BOND_MODE_8023AD 3932 * (a) BOND_MODE_8023AD
3894 * (b) BOND_MODE_XOR 3933 * (b) BOND_MODE_XOR
3895 * (c) BOND_MODE_TLB && tlb_dynamic_lb == 0 3934 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
3896 * 3935 *
3897 * The caller is expected to hold RTNL only and NO other lock! 3936 * The caller is expected to hold RTNL only and NO other lock!
3898 */ 3937 */
@@ -3945,6 +3984,11 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
3945 continue; 3984 continue;
3946 if (skipslave == slave) 3985 if (skipslave == slave)
3947 continue; 3986 continue;
3987
3988 netdev_dbg(bond->dev,
3989 "Adding slave dev %s to tx hash array[%d]\n",
3990 slave->dev->name, new_arr->count);
3991
3948 new_arr->arr[new_arr->count++] = slave; 3992 new_arr->arr[new_arr->count++] = slave;
3949 } 3993 }
3950 3994
@@ -3981,7 +4025,8 @@ out:
3981 * usable slave array is formed in the control path. The xmit function 4025 * usable slave array is formed in the control path. The xmit function
3982 * just calculates hash and sends the packet out. 4026 * just calculates hash and sends the packet out.
3983 */ 4027 */
3984static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) 4028static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
4029 struct net_device *dev)
3985{ 4030{
3986 struct bonding *bond = netdev_priv(dev); 4031 struct bonding *bond = netdev_priv(dev);
3987 struct slave *slave; 4032 struct slave *slave;
@@ -4001,7 +4046,8 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
4001} 4046}
4002 4047
4003/* in broadcast mode, we send everything to all usable interfaces. */ 4048/* in broadcast mode, we send everything to all usable interfaces. */
4004static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) 4049static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
4050 struct net_device *bond_dev)
4005{ 4051{
4006 struct bonding *bond = netdev_priv(bond_dev); 4052 struct bonding *bond = netdev_priv(bond_dev);
4007 struct slave *slave = NULL; 4053 struct slave *slave = NULL;
@@ -4038,12 +4084,12 @@ static inline int bond_slave_override(struct bonding *bond,
4038 struct slave *slave = NULL; 4084 struct slave *slave = NULL;
4039 struct list_head *iter; 4085 struct list_head *iter;
4040 4086
4041 if (!skb->queue_mapping) 4087 if (!skb_rx_queue_recorded(skb))
4042 return 1; 4088 return 1;
4043 4089
4044 /* Find out if any slaves have the same mapping as this skb. */ 4090 /* Find out if any slaves have the same mapping as this skb. */
4045 bond_for_each_slave_rcu(bond, slave, iter) { 4091 bond_for_each_slave_rcu(bond, slave, iter) {
4046 if (slave->queue_id == skb->queue_mapping) { 4092 if (slave->queue_id == skb_get_queue_mapping(skb)) {
4047 if (bond_slave_is_up(slave) && 4093 if (bond_slave_is_up(slave) &&
4048 slave->link == BOND_LINK_UP) { 4094 slave->link == BOND_LINK_UP) {
4049 bond_dev_queue_xmit(bond, skb, slave->dev); 4095 bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -4059,7 +4105,8 @@ static inline int bond_slave_override(struct bonding *bond,
4059 4105
4060 4106
4061static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 4107static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4062 void *accel_priv, select_queue_fallback_t fallback) 4108 struct net_device *sb_dev,
4109 select_queue_fallback_t fallback)
4063{ 4110{
4064 /* This helper function exists to help dev_pick_tx get the correct 4111 /* This helper function exists to help dev_pick_tx get the correct
4065 * destination queue. Using a helper function skips a call to 4112 * destination queue. Using a helper function skips a call to
@@ -4069,7 +4116,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4069 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 4116 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4070 4117
4071 /* Save the original txq to restore before passing to the driver */ 4118 /* Save the original txq to restore before passing to the driver */
4072 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 4119 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4073 4120
4074 if (unlikely(txq >= dev->real_num_tx_queues)) { 4121 if (unlikely(txq >= dev->real_num_tx_queues)) {
4075 do { 4122 do {
@@ -4192,6 +4239,7 @@ static const struct net_device_ops bond_netdev_ops = {
4192 .ndo_neigh_setup = bond_neigh_setup, 4239 .ndo_neigh_setup = bond_neigh_setup,
4193 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4240 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4194 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4241 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4242 .ndo_get_lock_subclass = bond_get_nest_level,
4195#ifdef CONFIG_NET_POLL_CONTROLLER 4243#ifdef CONFIG_NET_POLL_CONTROLLER
4196 .ndo_netpoll_setup = bond_netpoll_setup, 4244 .ndo_netpoll_setup = bond_netpoll_setup,
4197 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4245 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4259,7 +4307,7 @@ void bond_setup(struct net_device *bond_dev)
4259 NETIF_F_HW_VLAN_CTAG_RX | 4307 NETIF_F_HW_VLAN_CTAG_RX |
4260 NETIF_F_HW_VLAN_CTAG_FILTER; 4308 NETIF_F_HW_VLAN_CTAG_FILTER;
4261 4309
4262 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; 4310 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
4263 bond_dev->features |= bond_dev->hw_features; 4311 bond_dev->features |= bond_dev->hw_features;
4264} 4312}
4265 4313
@@ -4320,9 +4368,9 @@ static int bond_check_params(struct bond_params *params)
4320 } 4368 }
4321 4369
4322 if (xmit_hash_policy) { 4370 if (xmit_hash_policy) {
4323 if ((bond_mode != BOND_MODE_XOR) && 4371 if (bond_mode == BOND_MODE_ROUNDROBIN ||
4324 (bond_mode != BOND_MODE_8023AD) && 4372 bond_mode == BOND_MODE_ACTIVEBACKUP ||
4325 (bond_mode != BOND_MODE_TLB)) { 4373 bond_mode == BOND_MODE_BROADCAST) {
4326 pr_info("xmit_hash_policy param is irrelevant in mode %s\n", 4374 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
4327 bond_mode_name(bond_mode)); 4375 bond_mode_name(bond_mode));
4328 } else { 4376 } else {
@@ -4690,6 +4738,7 @@ static int bond_init(struct net_device *bond_dev)
4690 if (!bond->wq) 4738 if (!bond->wq)
4691 return -ENOMEM; 4739 return -ENOMEM;
4692 4740
4741 bond->nest_level = SINGLE_DEPTH_NESTING;
4693 netdev_lockdep_set_classes(bond_dev); 4742 netdev_lockdep_set_classes(bond_dev);
4694 4743
4695 list_add_tail(&bond->bond_list, &bn->dev_list); 4744 list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 9697977b80f0..6b9ad8673218 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
638 goto nla_put_failure; 638 goto nla_put_failure;
639 639
640 if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM, 640 if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
641 sizeof(bond->params.ad_actor_system), 641 ETH_ALEN, &bond->params.ad_actor_system))
642 &bond->params.ad_actor_system))
643 goto nla_put_failure; 642 goto nla_put_failure;
644 } 643 }
645 if (!bond_3ad_get_active_agg_info(bond, &info)) { 644 if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 58c705f24f96..4d5d01cb8141 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -395,7 +395,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
395 .id = BOND_OPT_TLB_DYNAMIC_LB, 395 .id = BOND_OPT_TLB_DYNAMIC_LB,
396 .name = "tlb_dynamic_lb", 396 .name = "tlb_dynamic_lb",
397 .desc = "Enable dynamic flow shuffling", 397 .desc = "Enable dynamic flow shuffling",
398 .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)), 398 .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB) | BIT(BOND_MODE_ALB)),
399 .values = bond_tlb_dynamic_lb_tbl, 399 .values = bond_tlb_dynamic_lb_tbl,
400 .flags = BOND_OPTFLAG_IFDOWN, 400 .flags = BOND_OPTFLAG_IFDOWN,
401 .set = bond_option_tlb_dynamic_lb_set, 401 .set = bond_option_tlb_dynamic_lb_set,
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
743static int bond_option_mode_set(struct bonding *bond, 743static int bond_option_mode_set(struct bonding *bond,
744 const struct bond_opt_value *newval) 744 const struct bond_opt_value *newval)
745{ 745{
746 if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { 746 if (!bond_mode_uses_arp(newval->value)) {
747 netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", 747 if (bond->params.arp_interval) {
748 newval->string); 748 netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
749 /* disable arp monitoring */ 749 newval->string);
750 bond->params.arp_interval = 0; 750 /* disable arp monitoring */
751 /* set miimon to default value */ 751 bond->params.arp_interval = 0;
752 bond->params.miimon = BOND_DEFAULT_MIIMON; 752 }
753 netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", 753
754 bond->params.miimon); 754 if (!bond->params.miimon) {
755 /* set miimon to default value */
756 bond->params.miimon = BOND_DEFAULT_MIIMON;
757 netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
758 bond->params.miimon);
759 }
755 } 760 }
756 761
757 if (newval->value == BOND_MODE_ALB) 762 if (newval->value == BOND_MODE_ALB)
@@ -1142,6 +1147,7 @@ static int bond_option_primary_set(struct bonding *bond,
1142 slave->dev->name); 1147 slave->dev->name);
1143 rcu_assign_pointer(bond->primary_slave, slave); 1148 rcu_assign_pointer(bond->primary_slave, slave);
1144 strcpy(bond->params.primary, slave->dev->name); 1149 strcpy(bond->params.primary, slave->dev->name);
1150 bond->force_primary = true;
1145 bond_select_active_slave(bond); 1151 bond_select_active_slave(bond);
1146 goto out; 1152 goto out;
1147 } 1153 }
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 01059f1a7bca..9f7d83e827c3 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -10,7 +10,7 @@
10static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 10static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
11 __acquires(RCU) 11 __acquires(RCU)
12{ 12{
13 struct bonding *bond = seq->private; 13 struct bonding *bond = PDE_DATA(file_inode(seq->file));
14 struct list_head *iter; 14 struct list_head *iter;
15 struct slave *slave; 15 struct slave *slave;
16 loff_t off = 0; 16 loff_t off = 0;
@@ -29,7 +29,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
29 29
30static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) 30static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
31{ 31{
32 struct bonding *bond = seq->private; 32 struct bonding *bond = PDE_DATA(file_inode(seq->file));
33 struct list_head *iter; 33 struct list_head *iter;
34 struct slave *slave; 34 struct slave *slave;
35 bool found = false; 35 bool found = false;
@@ -56,7 +56,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
56 56
57static void bond_info_show_master(struct seq_file *seq) 57static void bond_info_show_master(struct seq_file *seq)
58{ 58{
59 struct bonding *bond = seq->private; 59 struct bonding *bond = PDE_DATA(file_inode(seq->file));
60 const struct bond_opt_value *optval; 60 const struct bond_opt_value *optval;
61 struct slave *curr, *primary; 61 struct slave *curr, *primary;
62 int i; 62 int i;
@@ -167,7 +167,7 @@ static void bond_info_show_master(struct seq_file *seq)
167static void bond_info_show_slave(struct seq_file *seq, 167static void bond_info_show_slave(struct seq_file *seq,
168 const struct slave *slave) 168 const struct slave *slave)
169{ 169{
170 struct bonding *bond = seq->private; 170 struct bonding *bond = PDE_DATA(file_inode(seq->file));
171 171
172 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 172 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
173 seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link)); 173 seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
@@ -257,38 +257,14 @@ static const struct seq_operations bond_info_seq_ops = {
257 .show = bond_info_seq_show, 257 .show = bond_info_seq_show,
258}; 258};
259 259
260static int bond_info_open(struct inode *inode, struct file *file)
261{
262 struct seq_file *seq;
263 int res;
264
265 res = seq_open(file, &bond_info_seq_ops);
266 if (!res) {
267 /* recover the pointer buried in proc_dir_entry data */
268 seq = file->private_data;
269 seq->private = PDE_DATA(inode);
270 }
271
272 return res;
273}
274
275static const struct file_operations bond_info_fops = {
276 .owner = THIS_MODULE,
277 .open = bond_info_open,
278 .read = seq_read,
279 .llseek = seq_lseek,
280 .release = seq_release,
281};
282
283void bond_create_proc_entry(struct bonding *bond) 260void bond_create_proc_entry(struct bonding *bond)
284{ 261{
285 struct net_device *bond_dev = bond->dev; 262 struct net_device *bond_dev = bond->dev;
286 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); 263 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
287 264
288 if (bn->proc_dir) { 265 if (bn->proc_dir) {
289 bond->proc_entry = proc_create_data(bond_dev->name, 266 bond->proc_entry = proc_create_seq_data(bond_dev->name, 0444,
290 0444, bn->proc_dir, 267 bn->proc_dir, &bond_info_seq_ops, bond);
291 &bond_info_fops, bond);
292 if (bond->proc_entry == NULL) 268 if (bond->proc_entry == NULL)
293 netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n", 269 netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n",
294 DRV_NAME, bond_dev->name); 270 DRV_NAME, bond_dev->name);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6096440e96ea..35847250da5a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -160,14 +160,19 @@ static ssize_t bonding_sysfs_store_option(struct device *d,
160{ 160{
161 struct bonding *bond = to_bond(d); 161 struct bonding *bond = to_bond(d);
162 const struct bond_option *opt; 162 const struct bond_option *opt;
163 char *buffer_clone;
163 int ret; 164 int ret;
164 165
165 opt = bond_opt_get_by_name(attr->attr.name); 166 opt = bond_opt_get_by_name(attr->attr.name);
166 if (WARN_ON(!opt)) 167 if (WARN_ON(!opt))
167 return -ENOENT; 168 return -ENOENT;
168 ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer); 169 buffer_clone = kstrndup(buffer, count, GFP_KERNEL);
170 if (!buffer_clone)
171 return -ENOMEM;
172 ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone);
169 if (!ret) 173 if (!ret)
170 ret = count; 174 ret = count;
175 kfree(buffer_clone);
171 176
172 return ret; 177 return ret;
173} 178}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2cb75988b328..7cdd0cead693 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -73,6 +73,12 @@ config CAN_CALC_BITTIMING
73config CAN_LEDS 73config CAN_LEDS
74 bool "Enable LED triggers for Netlink based drivers" 74 bool "Enable LED triggers for Netlink based drivers"
75 depends on LEDS_CLASS 75 depends on LEDS_CLASS
76 # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
77 # everything that this driver is doing. This is marked as broken
78 # because it uses stuff that is intended to be changed or removed.
79 # Please consider switching to the netdev trigger and confirm it
80 # fulfills your needs instead of fixing this driver.
81 depends on BROKEN
76 select LEDS_TRIGGERS 82 select LEDS_TRIGGERS
77 ---help--- 83 ---help---
78 This option adds two LED triggers for packet receive and transmit 84 This option adds two LED triggers for packet receive and transmit
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d4dd4da23997..da636a22c542 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
73 73
74static int i82527_compat; 74static int i82527_compat;
75module_param(i82527_compat, int, 0444); 75module_param(i82527_compat, int, 0444);
76MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode " 76MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode "
77 "without using additional functions"); 77 "without using additional functions");
78 78
79/* 79/*
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c71f1cb205f..3b3f88ffab53 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
477} 477}
478EXPORT_SYMBOL_GPL(can_put_echo_skb); 478EXPORT_SYMBOL_GPL(can_put_echo_skb);
479 479
480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
481{
482 struct can_priv *priv = netdev_priv(dev);
483 struct sk_buff *skb = priv->echo_skb[idx];
484 struct canfd_frame *cf;
485
486 if (idx >= priv->echo_skb_max) {
487 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
488 __func__, idx, priv->echo_skb_max);
489 return NULL;
490 }
491
492 if (!skb) {
493 netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
494 __func__, idx);
495 return NULL;
496 }
497
498 /* Using "struct canfd_frame::len" for the frame
499 * length is supported on both CAN and CANFD frames.
500 */
501 cf = (struct canfd_frame *)skb->data;
502 *len_ptr = cf->len;
503 priv->echo_skb[idx] = NULL;
504
505 return skb;
506}
507
480/* 508/*
481 * Get the skb from the stack and loop it back locally 509 * Get the skb from the stack and loop it back locally
482 * 510 *
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
486 */ 514 */
487unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) 515unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
488{ 516{
489 struct can_priv *priv = netdev_priv(dev); 517 struct sk_buff *skb;
490 518 u8 len;
491 BUG_ON(idx >= priv->echo_skb_max);
492
493 if (priv->echo_skb[idx]) {
494 struct sk_buff *skb = priv->echo_skb[idx];
495 struct can_frame *cf = (struct can_frame *)skb->data;
496 u8 dlc = cf->can_dlc;
497 519
498 netif_rx(priv->echo_skb[idx]); 520 skb = __can_get_echo_skb(dev, idx, &len);
499 priv->echo_skb[idx] = NULL; 521 if (!skb)
522 return 0;
500 523
501 return dlc; 524 netif_rx(skb);
502 }
503 525
504 return 0; 526 return len;
505} 527}
506EXPORT_SYMBOL_GPL(can_get_echo_skb); 528EXPORT_SYMBOL_GPL(can_get_echo_skb);
507 529
@@ -649,8 +671,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
649 can_skb_prv(skb)->ifindex = dev->ifindex; 671 can_skb_prv(skb)->ifindex = dev->ifindex;
650 can_skb_prv(skb)->skbcnt = 0; 672 can_skb_prv(skb)->skbcnt = 0;
651 673
652 *cf = skb_put(skb, sizeof(struct can_frame)); 674 *cf = skb_put_zero(skb, sizeof(struct can_frame));
653 memset(*cf, 0, sizeof(struct can_frame));
654 675
655 return skb; 676 return skb;
656} 677}
@@ -678,8 +699,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
678 can_skb_prv(skb)->ifindex = dev->ifindex; 699 can_skb_prv(skb)->ifindex = dev->ifindex;
679 can_skb_prv(skb)->skbcnt = 0; 700 can_skb_prv(skb)->skbcnt = 0;
680 701
681 *cfd = skb_put(skb, sizeof(struct canfd_frame)); 702 *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
682 memset(*cfd, 0, sizeof(struct canfd_frame));
683 703
684 return skb; 704 return skb;
685} 705}
@@ -703,7 +723,8 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb);
703/* 723/*
704 * Allocate and setup space for the CAN network device 724 * Allocate and setup space for the CAN network device
705 */ 725 */
706struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) 726struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
727 unsigned int txqs, unsigned int rxqs)
707{ 728{
708 struct net_device *dev; 729 struct net_device *dev;
709 struct can_priv *priv; 730 struct can_priv *priv;
@@ -715,7 +736,8 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
715 else 736 else
716 size = sizeof_priv; 737 size = sizeof_priv;
717 738
718 dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup); 739 dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
740 txqs, rxqs);
719 if (!dev) 741 if (!dev)
720 return NULL; 742 return NULL;
721 743
@@ -734,7 +756,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
734 756
735 return dev; 757 return dev;
736} 758}
737EXPORT_SYMBOL_GPL(alloc_candev); 759EXPORT_SYMBOL_GPL(alloc_candev_mqs);
738 760
739/* 761/*
740 * Free space of the CAN network device 762 * Free space of the CAN network device
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d53a45bf2a72..75ce11395ee8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1,24 +1,13 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * flexcan.c - FLEXCAN CAN controller driver 2//
3 * 3// flexcan.c - FLEXCAN CAN controller driver
4 * Copyright (c) 2005-2006 Varma Electronics Oy 4//
5 * Copyright (c) 2009 Sascha Hauer, Pengutronix 5// Copyright (c) 2005-2006 Varma Electronics Oy
6 * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> 6// Copyright (c) 2009 Sascha Hauer, Pengutronix
7 * Copyright (c) 2014 David Jander, Protonic Holland 7// Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
8 * 8// Copyright (c) 2014 David Jander, Protonic Holland
9 * Based on code originally by Andrey Volkov <avolkov@varma-el.com> 9//
10 * 10// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
11 * LICENCE:
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 */
22 11
23#include <linux/netdevice.h> 12#include <linux/netdevice.h>
24#include <linux/can.h> 13#include <linux/can.h>
@@ -146,13 +135,12 @@
146 135
147/* FLEXCAN interrupt flag register (IFLAG) bits */ 136/* FLEXCAN interrupt flag register (IFLAG) bits */
148/* Errata ERR005829 step7: Reserve first valid MB */ 137/* Errata ERR005829 step7: Reserve first valid MB */
149#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 138#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
150#define FLEXCAN_TX_MB_OFF_FIFO 9
151#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 139#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
152#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 140#define FLEXCAN_TX_MB 63
153#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) 141#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
154#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 142#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
155#define FLEXCAN_IFLAG_MB(x) BIT(x) 143#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
156#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) 144#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
157#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) 145#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
158#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) 146#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -270,9 +258,7 @@ struct flexcan_priv {
270 struct can_rx_offload offload; 258 struct can_rx_offload offload;
271 259
272 struct flexcan_regs __iomem *regs; 260 struct flexcan_regs __iomem *regs;
273 struct flexcan_mb __iomem *tx_mb;
274 struct flexcan_mb __iomem *tx_mb_reserved; 261 struct flexcan_mb __iomem *tx_mb_reserved;
275 u8 tx_mb_idx;
276 u32 reg_ctrl_default; 262 u32 reg_ctrl_default;
277 u32 reg_imask1_default; 263 u32 reg_imask1_default;
278 u32 reg_imask2_default; 264 u32 reg_imask2_default;
@@ -523,9 +509,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
523 return err; 509 return err;
524} 510}
525 511
526static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) 512static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
527{ 513{
528 const struct flexcan_priv *priv = netdev_priv(dev); 514 const struct flexcan_priv *priv = netdev_priv(dev);
515 struct flexcan_regs __iomem *regs = priv->regs;
529 struct can_frame *cf = (struct can_frame *)skb->data; 516 struct can_frame *cf = (struct can_frame *)skb->data;
530 u32 can_id; 517 u32 can_id;
531 u32 data; 518 u32 data;
@@ -548,17 +535,17 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
548 535
549 if (cf->can_dlc > 0) { 536 if (cf->can_dlc > 0) {
550 data = be32_to_cpup((__be32 *)&cf->data[0]); 537 data = be32_to_cpup((__be32 *)&cf->data[0]);
551 priv->write(data, &priv->tx_mb->data[0]); 538 priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
552 } 539 }
553 if (cf->can_dlc > 4) { 540 if (cf->can_dlc > 4) {
554 data = be32_to_cpup((__be32 *)&cf->data[4]); 541 data = be32_to_cpup((__be32 *)&cf->data[4]);
555 priv->write(data, &priv->tx_mb->data[1]); 542 priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
556 } 543 }
557 544
558 can_put_echo_skb(skb, dev, 0); 545 can_put_echo_skb(skb, dev, 0);
559 546
560 priv->write(can_id, &priv->tx_mb->can_id); 547 priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
561 priv->write(ctrl, &priv->tx_mb->can_ctrl); 548 priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
562 549
563 /* Errata ERR005829 step8: 550 /* Errata ERR005829 step8:
564 * Write twice INACTIVE(0x8) code to first MB. 551 * Write twice INACTIVE(0x8) code to first MB.
@@ -574,9 +561,13 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
574static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) 561static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
575{ 562{
576 struct flexcan_priv *priv = netdev_priv(dev); 563 struct flexcan_priv *priv = netdev_priv(dev);
564 struct flexcan_regs __iomem *regs = priv->regs;
577 struct sk_buff *skb; 565 struct sk_buff *skb;
578 struct can_frame *cf; 566 struct can_frame *cf;
579 bool rx_errors = false, tx_errors = false; 567 bool rx_errors = false, tx_errors = false;
568 u32 timestamp;
569
570 timestamp = priv->read(&regs->timer) << 16;
580 571
581 skb = alloc_can_err_skb(dev, &cf); 572 skb = alloc_can_err_skb(dev, &cf);
582 if (unlikely(!skb)) 573 if (unlikely(!skb))
@@ -623,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
623 if (tx_errors) 614 if (tx_errors)
624 dev->stats.tx_errors++; 615 dev->stats.tx_errors++;
625 616
626 can_rx_offload_irq_queue_err_skb(&priv->offload, skb); 617 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
627} 618}
628 619
629static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) 620static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
630{ 621{
631 struct flexcan_priv *priv = netdev_priv(dev); 622 struct flexcan_priv *priv = netdev_priv(dev);
623 struct flexcan_regs __iomem *regs = priv->regs;
632 struct sk_buff *skb; 624 struct sk_buff *skb;
633 struct can_frame *cf; 625 struct can_frame *cf;
634 enum can_state new_state, rx_state, tx_state; 626 enum can_state new_state, rx_state, tx_state;
635 int flt; 627 int flt;
636 struct can_berr_counter bec; 628 struct can_berr_counter bec;
629 u32 timestamp;
630
631 timestamp = priv->read(&regs->timer) << 16;
637 632
638 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; 633 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
639 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { 634 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -663,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
663 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 658 if (unlikely(new_state == CAN_STATE_BUS_OFF))
664 can_bus_off(dev); 659 can_bus_off(dev);
665 660
666 can_rx_offload_irq_queue_err_skb(&priv->offload, skb); 661 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
667} 662}
668 663
669static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 664static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -731,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
731 priv->write(BIT(n - 32), &regs->iflag2); 726 priv->write(BIT(n - 32), &regs->iflag2);
732 } else { 727 } else {
733 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); 728 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
734 priv->read(&regs->timer);
735 } 729 }
736 730
731 /* Read the Free Running Timer. It is optional but recommended
732 * to unlock Mailbox as soon as possible and make it available
733 * for reception.
734 */
735 priv->read(&regs->timer);
736
737 return 1; 737 return 1;
738} 738}
739 739
@@ -743,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
743 struct flexcan_regs __iomem *regs = priv->regs; 743 struct flexcan_regs __iomem *regs = priv->regs;
744 u32 iflag1, iflag2; 744 u32 iflag1, iflag2;
745 745
746 iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default; 746 iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
747 iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default & 747 ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
748 ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 748 iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
749 749
750 return (u64)iflag2 << 32 | iflag1; 750 return (u64)iflag2 << 32 | iflag1;
751} 751}
@@ -757,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
757 struct flexcan_priv *priv = netdev_priv(dev); 757 struct flexcan_priv *priv = netdev_priv(dev);
758 struct flexcan_regs __iomem *regs = priv->regs; 758 struct flexcan_regs __iomem *regs = priv->regs;
759 irqreturn_t handled = IRQ_NONE; 759 irqreturn_t handled = IRQ_NONE;
760 u32 reg_iflag1, reg_esr; 760 u32 reg_iflag2, reg_esr;
761 enum can_state last_state = priv->can.state; 761 enum can_state last_state = priv->can.state;
762 762
763 reg_iflag1 = priv->read(&regs->iflag1);
764
765 /* reception interrupt */ 763 /* reception interrupt */
766 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 764 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
767 u64 reg_iflag; 765 u64 reg_iflag;
@@ -775,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
775 break; 773 break;
776 } 774 }
777 } else { 775 } else {
776 u32 reg_iflag1;
777
778 reg_iflag1 = priv->read(&regs->iflag1);
778 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { 779 if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
779 handled = IRQ_HANDLED; 780 handled = IRQ_HANDLED;
780 can_rx_offload_irq_offload_fifo(&priv->offload); 781 can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -790,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
790 } 791 }
791 } 792 }
792 793
794 reg_iflag2 = priv->read(&regs->iflag2);
795
793 /* transmission complete interrupt */ 796 /* transmission complete interrupt */
794 if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { 797 if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
798 u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
799
795 handled = IRQ_HANDLED; 800 handled = IRQ_HANDLED;
796 stats->tx_bytes += can_get_echo_skb(dev, 0); 801 stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
802 0, reg_ctrl << 16);
797 stats->tx_packets++; 803 stats->tx_packets++;
798 can_led_event(dev, CAN_LED_EVENT_TX); 804 can_led_event(dev, CAN_LED_EVENT_TX);
799 805
800 /* after sending a RTR frame MB is in RX mode */ 806 /* after sending a RTR frame MB is in RX mode */
801 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 807 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
802 &priv->tx_mb->can_ctrl); 808 &regs->mb[FLEXCAN_TX_MB].can_ctrl);
803 priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1); 809 priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
804 netif_wake_queue(dev); 810 netif_wake_queue(dev);
805 } 811 }
806 812
@@ -942,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
942 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); 948 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
943 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | 949 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
944 FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | 950 FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
945 FLEXCAN_MCR_IDAM_C; 951 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
946 952
947 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 953 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
948 reg_mcr &= ~FLEXCAN_MCR_FEN; 954 reg_mcr &= ~FLEXCAN_MCR_FEN;
949 reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); 955 else
950 } else { 956 reg_mcr |= FLEXCAN_MCR_FEN;
951 reg_mcr |= FLEXCAN_MCR_FEN | 957
952 FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
953 }
954 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 958 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
955 priv->write(reg_mcr, &regs->mcr); 959 priv->write(reg_mcr, &regs->mcr);
956 960
@@ -993,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
993 priv->write(reg_ctrl2, &regs->ctrl2); 997 priv->write(reg_ctrl2, &regs->ctrl2);
994 } 998 }
995 999
996 /* clear and invalidate all mailboxes first */
997 for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
998 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
999 &regs->mb[i].can_ctrl);
1000 }
1001
1002 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1000 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
1003 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) 1001 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
1004 priv->write(FLEXCAN_MB_CODE_RX_EMPTY, 1002 priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
1005 &regs->mb[i].can_ctrl); 1003 &regs->mb[i].can_ctrl);
1004 }
1005 } else {
1006 /* clear and invalidate unused mailboxes first */
1007 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
1008 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1009 &regs->mb[i].can_ctrl);
1010 }
1006 } 1011 }
1007 1012
1008 /* Errata ERR005829: mark first TX mailbox as INACTIVE */ 1013 /* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1011,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
1011 1016
1012 /* mark TX mailbox as INACTIVE */ 1017 /* mark TX mailbox as INACTIVE */
1013 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 1018 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
1014 &priv->tx_mb->can_ctrl); 1019 &regs->mb[FLEXCAN_TX_MB].can_ctrl);
1015 1020
1016 /* acceptance mask/acceptance code (accept everything) */ 1021 /* acceptance mask/acceptance code (accept everything) */
1017 priv->write(0x0, &regs->rxgmask); 1022 priv->write(0x0, &regs->rxgmask);
@@ -1366,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
1366 priv->devtype_data = devtype_data; 1371 priv->devtype_data = devtype_data;
1367 priv->reg_xceiver = reg_xceiver; 1372 priv->reg_xceiver = reg_xceiver;
1368 1373
1369 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1374 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
1370 priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
1371 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; 1375 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
1372 } else { 1376 else
1373 priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
1374 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; 1377 priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
1375 }
1376 priv->tx_mb = &regs->mb[priv->tx_mb_idx];
1377 1378
1378 priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 1379 priv->reg_imask1_default = 0;
1379 priv->reg_imask2_default = 0; 1380 priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
1380 1381
1381 priv->offload.mailbox_read = flexcan_mailbox_read; 1382 priv->offload.mailbox_read = flexcan_mailbox_read;
1382 1383
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 2d3046afa80d..7eec1d9f86a0 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1057,7 +1057,7 @@ static int grcan_open(struct net_device *dev)
1057 return err; 1057 return err;
1058 } 1058 }
1059 1059
1060 priv->echo_skb = kzalloc(dma->tx.size * sizeof(*priv->echo_skb), 1060 priv->echo_skb = kcalloc(dma->tx.size, sizeof(*priv->echo_skb),
1061 GFP_KERNEL); 1061 GFP_KERNEL);
1062 if (!priv->echo_skb) { 1062 if (!priv->echo_skb) {
1063 err = -ENOMEM; 1063 err = -ENOMEM;
@@ -1066,7 +1066,7 @@ static int grcan_open(struct net_device *dev)
1066 priv->can.echo_skb_max = dma->tx.size; 1066 priv->can.echo_skb_max = dma->tx.size;
1067 priv->can.echo_skb = priv->echo_skb; 1067 priv->can.echo_skb = priv->echo_skb;
1068 1068
1069 priv->txdlc = kzalloc(dma->tx.size * sizeof(*priv->txdlc), GFP_KERNEL); 1069 priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
1070 if (!priv->txdlc) { 1070 if (!priv->txdlc) {
1071 err = -ENOMEM; 1071 err = -ENOMEM;
1072 goto exit_free_echo_skb; 1072 goto exit_free_echo_skb;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index adfdb66a486e..02042cb09bd2 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev)
1684 return 0; 1684 return 0;
1685} 1685}
1686 1686
1687static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) 1687static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
1688{ 1688{
1689 struct ican3_dev *mod = netdev_priv(ndev); 1689 struct ican3_dev *mod = netdev_priv(ndev);
1690 struct can_frame *cf = (struct can_frame *)skb->data; 1690 struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b397a33f3d32..9b449400376b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
634 int err; 634 int err;
635 635
636 err = pm_runtime_get_sync(priv->device); 636 err = pm_runtime_get_sync(priv->device);
637 if (err) 637 if (err < 0) {
638 pm_runtime_put_noidle(priv->device); 638 pm_runtime_put_noidle(priv->device);
639 return err;
640 }
639 641
640 return err; 642 return 0;
641} 643}
642 644
643static void m_can_clk_stop(struct m_can_priv *priv) 645static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
1109 1111
1110 } else { 1112 } else {
1111 /* Version 3.1.x or 3.2.x */ 1113 /* Version 3.1.x or 3.2.x */
1112 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); 1114 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
1115 CCCR_NISO);
1113 1116
1114 /* Only 3.2.x has NISO Bit implemented */ 1117 /* Only 3.2.x has NISO Bit implemented */
1115 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 1118 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
1642 priv->can.clock.freq = clk_get_rate(cclk); 1645 priv->can.clock.freq = clk_get_rate(cclk);
1643 priv->mram_base = mram_addr; 1646 priv->mram_base = mram_addr;
1644 1647
1645 m_can_of_parse_mram(priv, mram_config_vals);
1646
1647 platform_set_drvdata(pdev, dev); 1648 platform_set_drvdata(pdev, dev);
1648 SET_NETDEV_DEV(dev, &pdev->dev); 1649 SET_NETDEV_DEV(dev, &pdev->dev);
1649 1650
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
1666 goto clk_disable; 1667 goto clk_disable;
1667 } 1668 }
1668 1669
1670 m_can_of_parse_mram(priv, mram_config_vals);
1671
1669 devm_can_led_init(dev); 1672 devm_can_led_init(dev);
1670 1673
1671 of_can_transceiver(dev); 1674 of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
1687 return ret; 1690 return ret;
1688} 1691}
1689 1692
1690/* TODO: runtime PM with power down or sleep mode */
1691
1692static __maybe_unused int m_can_suspend(struct device *dev) 1693static __maybe_unused int m_can_suspend(struct device *dev)
1693{ 1694{
1694 struct net_device *ndev = dev_get_drvdata(dev); 1695 struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
1715 1716
1716 pinctrl_pm_select_default_state(dev); 1717 pinctrl_pm_select_default_state(dev);
1717 1718
1718 m_can_init_ram(priv);
1719
1720 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1719 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1721 1720
1722 if (netif_running(ndev)) { 1721 if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
1726 if (ret) 1725 if (ret)
1727 return ret; 1726 return ret;
1728 1727
1728 m_can_init_ram(priv);
1729 m_can_start(ndev); 1729 m_can_start(ndev);
1730 netif_device_attach(ndev); 1730 netif_device_attach(ndev);
1731 netif_start_queue(ndev); 1731 netif_start_queue(ndev);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
86 return 0; 86 return 0;
87 } 87 }
88 cdm = of_iomap(np_cdm, 0); 88 cdm = of_iomap(np_cdm, 0);
89 if (!cdm) {
90 of_node_put(np_cdm);
91 dev_err(&ofdev->dev, "can't map clock node!\n");
92 return 0;
93 }
89 94
90 if (in_8(&cdm->ipb_clk_sel) & 0x1) 95 if (in_8(&cdm->ipb_clk_sel) & 0x1)
91 freq *= 2; 96 freq *= 2;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index ed8561d4a90f..5696d7e80751 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -486,7 +486,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
486 if (msg_size <= 0) 486 if (msg_size <= 0)
487 break; 487 break;
488 488
489 msg_ptr += msg_size; 489 msg_ptr += ALIGN(msg_size, 4);
490 } 490 }
491 491
492 if (msg_size < 0) 492 if (msg_size < 0)
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 3c51a884db87..c458d5fdc8d3 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
58#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ 58#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
59#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ 59#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
60 60
61#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
62 ((u32)(y) << 16) | \
63 ((u32)(z) << 8))
64
61/* System Control Registers Bits */ 65/* System Control Registers Bits */
62#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ 66#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
63#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ 67#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@@ -170,9 +174,6 @@ struct pciefd_page {
170 u32 size; 174 u32 size;
171}; 175};
172 176
173#define CANFD_IRQ_SET 0x00000001
174#define CANFD_TX_PATH_SET 0x00000002
175
176/* CAN-FD channel object */ 177/* CAN-FD channel object */
177struct pciefd_board; 178struct pciefd_board;
178struct pciefd_can { 179struct pciefd_can {
@@ -414,7 +415,7 @@ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
414 break; 415 break;
415 416
416 /* going into operational mode: setup IRQ handler */ 417 /* going into operational mode: setup IRQ handler */
417 err = request_irq(priv->board->pci_dev->irq, 418 err = request_irq(priv->ucan.ndev->irq,
418 pciefd_irq_handler, 419 pciefd_irq_handler,
419 IRQF_SHARED, 420 IRQF_SHARED,
420 PCIEFD_DRV_NAME, 421 PCIEFD_DRV_NAME,
@@ -487,15 +488,18 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
487 488
488 /* controller now in reset mode: */ 489 /* controller now in reset mode: */
489 490
491 /* disable IRQ for this CAN */
492 pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
493 PCIEFD_REG_CAN_RX_CTL_CLR);
494
490 /* stop and reset DMA addresses in Tx/Rx engines */ 495 /* stop and reset DMA addresses in Tx/Rx engines */
491 pciefd_can_clear_tx_dma(priv); 496 pciefd_can_clear_tx_dma(priv);
492 pciefd_can_clear_rx_dma(priv); 497 pciefd_can_clear_rx_dma(priv);
493 498
494 /* disable IRQ for this CAN */ 499 /* wait for above commands to complete (read cycle) */
495 pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, 500 (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
496 PCIEFD_REG_CAN_RX_CTL_CLR);
497 501
498 free_irq(priv->board->pci_dev->irq, priv); 502 free_irq(priv->ucan.ndev->irq, priv);
499 503
500 ucan->can.state = CAN_STATE_STOPPED; 504 ucan->can.state = CAN_STATE_STOPPED;
501 505
@@ -634,7 +638,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
634 GFP_KERNEL); 638 GFP_KERNEL);
635 if (!priv->tx_dma_vaddr) { 639 if (!priv->tx_dma_vaddr) {
636 dev_err(&pciefd->pci_dev->dev, 640 dev_err(&pciefd->pci_dev->dev,
637 "Tx dmaim_alloc_coherent(%u) failure\n", 641 "Tx dmam_alloc_coherent(%u) failure\n",
638 PCIEFD_TX_DMA_SIZE); 642 PCIEFD_TX_DMA_SIZE);
639 goto err_free_candev; 643 goto err_free_candev;
640 } 644 }
@@ -687,7 +691,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
687 pciefd->can[pciefd->can_count] = priv; 691 pciefd->can[pciefd->can_count] = priv;
688 692
689 dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", 693 dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
690 ndev->name, priv->reg_base, pciefd->pci_dev->irq); 694 ndev->name, priv->reg_base, ndev->irq);
691 695
692 return 0; 696 return 0;
693 697
@@ -752,8 +756,7 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
752 can_count = 1; 756 can_count = 1;
753 757
754 /* allocate board structure object */ 758 /* allocate board structure object */
755 pciefd = devm_kzalloc(&pdev->dev, sizeof(*pciefd) + 759 pciefd = devm_kzalloc(&pdev->dev, struct_size(pciefd, can, can_count),
756 can_count * sizeof(*pciefd->can),
757 GFP_KERNEL); 760 GFP_KERNEL);
758 if (!pciefd) { 761 if (!pciefd) {
759 err = -ENOMEM; 762 err = -ENOMEM;
@@ -783,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
783 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, 786 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
784 hw_ver_major, hw_ver_minor, hw_ver_sub); 787 hw_ver_major, hw_ver_minor, hw_ver_sub);
785 788
789#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
790 /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
791 * 64-bit logical addresses: this workaround forces usage of 32-bit
792 * DMA addresses only when such a fw is detected.
793 */
794 if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
795 PCIEFD_FW_VERSION(3, 3, 0)) {
796 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
797 if (err)
798 dev_warn(&pdev->dev,
799 "warning: can't set DMA mask %llxh (err %d)\n",
800 DMA_BIT_MASK(32), err);
801 }
802#endif
803
786 /* stop system clock */ 804 /* stop system clock */
787 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, 805 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
788 PCIEFD_REG_SYS_CTL_CLR); 806 PCIEFD_REG_SYS_CTL_CLR);
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 11662f479e76..771a46083739 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -24,6 +24,9 @@
24 24
25#define RCAR_CAN_DRV_NAME "rcar_can" 25#define RCAR_CAN_DRV_NAME "rcar_can"
26 26
27#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
28 BIT(CLKR_CLKEXT))
29
27/* Mailbox configuration: 30/* Mailbox configuration:
28 * mailbox 60 - 63 - Rx FIFO mailboxes 31 * mailbox 60 - 63 - Rx FIFO mailboxes
29 * mailbox 56 - 59 - Tx FIFO mailboxes 32 * mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
789 goto fail_clk; 792 goto fail_clk;
790 } 793 }
791 794
792 if (clock_select >= ARRAY_SIZE(clock_names)) { 795 if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
793 err = -EINVAL; 796 err = -EINVAL;
794 dev_err(&pdev->dev, "invalid CAN clock selected\n"); 797 dev_err(&pdev->dev, "invalid CAN clock selected\n");
795 goto fail_clk; 798 goto fail_clk;
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index d94dae216820..2ce4fa8698c7 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -79,7 +79,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
79static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, 79static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
80 int (*compare)(struct sk_buff *a, struct sk_buff *b)) 80 int (*compare)(struct sk_buff *a, struct sk_buff *b))
81{ 81{
82 struct sk_buff *pos, *insert = (struct sk_buff *)head; 82 struct sk_buff *pos, *insert = NULL;
83 83
84 skb_queue_reverse_walk(head, pos) { 84 skb_queue_reverse_walk(head, pos) {
85 const struct can_rx_offload_cb *cb_pos, *cb_new; 85 const struct can_rx_offload_cb *cb_pos, *cb_new;
@@ -99,8 +99,10 @@ static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buf
99 insert = pos; 99 insert = pos;
100 break; 100 break;
101 } 101 }
102 102 if (!insert)
103 __skb_queue_after(head, insert, new); 103 __skb_queue_head(head, new);
104 else
105 __skb_queue_after(head, insert, new);
104} 106}
105 107
106static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) 108static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
@@ -209,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
209} 211}
210EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); 212EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
211 213
212int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) 214int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
215 struct sk_buff *skb, u32 timestamp)
216{
217 struct can_rx_offload_cb *cb;
218 unsigned long flags;
219
220 if (skb_queue_len(&offload->skb_queue) >
221 offload->skb_queue_len_max)
222 return -ENOMEM;
223
224 cb = can_rx_offload_get_cb(skb);
225 cb->timestamp = timestamp;
226
227 spin_lock_irqsave(&offload->skb_queue.lock, flags);
228 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
229 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
230
231 can_rx_offload_schedule(offload);
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
236
237unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
238 unsigned int idx, u32 timestamp)
239{
240 struct net_device *dev = offload->dev;
241 struct net_device_stats *stats = &dev->stats;
242 struct sk_buff *skb;
243 u8 len;
244 int err;
245
246 skb = __can_get_echo_skb(dev, idx, &len);
247 if (!skb)
248 return 0;
249
250 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
251 if (err) {
252 stats->rx_errors++;
253 stats->tx_fifo_errors++;
254 }
255
256 return len;
257}
258EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
259
260int can_rx_offload_queue_tail(struct can_rx_offload *offload,
261 struct sk_buff *skb)
213{ 262{
214 if (skb_queue_len(&offload->skb_queue) > 263 if (skb_queue_len(&offload->skb_queue) >
215 offload->skb_queue_len_max) 264 offload->skb_queue_len_max)
@@ -220,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
220 269
221 return 0; 270 return 0;
222} 271}
223EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); 272EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
224 273
225static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 274static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
226{ 275{
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5adc95c922ee..a97b81d1d0da 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -608,7 +608,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
608 writeb(0x00, cfg_base + PITA_GPIOICR); 608 writeb(0x00, cfg_base + PITA_GPIOICR);
609 /* Toggle reset */ 609 /* Toggle reset */
610 writeb(0x05, cfg_base + PITA_MISC + 3); 610 writeb(0x05, cfg_base + PITA_MISC + 3);
611 mdelay(5); 611 usleep_range(5000, 6000);
612 /* Leave parport mux mode */ 612 /* Leave parport mux mode */
613 writeb(0x04, cfg_base + PITA_MISC + 3); 613 writeb(0x04, cfg_base + PITA_MISC + 3);
614 614
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 485b19c9ae47..b8c39ede7cd5 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -530,7 +530,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
530 pcan_write_reg(card, PCC_CCR, ccr); 530 pcan_write_reg(card, PCC_CCR, ccr);
531 531
532 /* wait 2ms before unresetting channels */ 532 /* wait 2ms before unresetting channels */
533 mdelay(2); 533 usleep_range(2000, 3000);
534 534
535 ccr &= ~PCC_CCR_RST_ALL; 535 ccr &= ~PCC_CCR_RST_ALL;
536 pcan_write_reg(card, PCC_CCR, ccr); 536 pcan_write_reg(card, PCC_CCR, ccr);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 89d60d8e467c..aa97dbc797b6 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -703,7 +703,7 @@ static int __init slcan_init(void)
703 pr_info("slcan: serial line CAN interface driver\n"); 703 pr_info("slcan: serial line CAN interface driver\n");
704 pr_info("slcan: %d dynamic interface channels.\n", maxdev); 704 pr_info("slcan: %d dynamic interface channels.\n", maxdev);
705 705
706 slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL); 706 slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
707 if (!slcan_devs) 707 if (!slcan_devs)
708 return -ENOMEM; 708 return -ENOMEM;
709 709
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 53e320c92a8b..ddaf46239e39 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
760{ 760{
761 struct hi3110_priv *priv = netdev_priv(net); 761 struct hi3110_priv *priv = netdev_priv(net);
762 struct spi_device *spi = priv->spi; 762 struct spi_device *spi = priv->spi;
763 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; 763 unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
764 int ret; 764 int ret;
765 765
766 ret = open_candev(net); 766 ret = open_candev(net);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 1ac2090a1721..093fc9a529f0 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode)
409 * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 409 * xx xx xx xx ff ll 00 11 22 33 44 55 66 77
410 * [ can_id ] [flags] [len] [can data (up to 8 bytes] 410 * [ can_id ] [flags] [len] [can data (up to 8 bytes]
411 */ 411 */
412static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) 412static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
413{ 413{
414 struct sun4ican_priv *priv = netdev_priv(dev); 414 struct sun4ican_priv *priv = netdev_priv(dev);
415 struct can_frame *cf = (struct can_frame *)skb->data; 415 struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index c36f4bdcbf4f..750d04d9e2ae 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -1,6 +1,12 @@
1menu "CAN USB interfaces" 1menu "CAN USB interfaces"
2 depends on USB 2 depends on USB
3 3
4config CAN_8DEV_USB
5 tristate "8 devices USB2CAN interface"
6 ---help---
7 This driver supports the USB2CAN interface
8 from 8 devices (http://www.8devices.com).
9
4config CAN_EMS_USB 10config CAN_EMS_USB
5 tristate "EMS CPC-USB/ARM7 CAN/USB interface" 11 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
6 ---help--- 12 ---help---
@@ -26,7 +32,7 @@ config CAN_KVASER_USB
26 tristate "Kvaser CAN/USB interface" 32 tristate "Kvaser CAN/USB interface"
27 ---help--- 33 ---help---
28 This driver adds support for Kvaser CAN/USB devices like Kvaser 34 This driver adds support for Kvaser CAN/USB devices like Kvaser
29 Leaf Light and Kvaser USBcan II. 35 Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
30 36
31 The driver provides support for the following devices: 37 The driver provides support for the following devices:
32 - Kvaser Leaf Light 38 - Kvaser Leaf Light
@@ -55,12 +61,30 @@ config CAN_KVASER_USB
55 - Kvaser Memorator HS/HS 61 - Kvaser Memorator HS/HS
56 - Kvaser Memorator HS/LS 62 - Kvaser Memorator HS/LS
57 - Scania VCI2 (if you have the Kvaser logo on top) 63 - Scania VCI2 (if you have the Kvaser logo on top)
64 - Kvaser BlackBird v2
65 - Kvaser Leaf Pro HS v2
66 - Kvaser Hybrid 2xCAN/LIN
67 - Kvaser Hybrid Pro 2xCAN/LIN
68 - Kvaser Memorator 2xHS v2
69 - Kvaser Memorator Pro 2xHS v2
70 - Kvaser Memorator Pro 5xHS
71 - Kvaser USBcan Light 4xHS
72 - Kvaser USBcan Pro 2xHS v2
73 - Kvaser USBcan Pro 5xHS
74 - ATI Memorator Pro 2xHS v2
75 - ATI USBcan Pro 2xHS v2
58 76
59 If unsure, say N. 77 If unsure, say N.
60 78
61 To compile this driver as a module, choose M here: the 79 To compile this driver as a module, choose M here: the
62 module will be called kvaser_usb. 80 module will be called kvaser_usb.
63 81
82config CAN_MCBA_USB
83 tristate "Microchip CAN BUS Analyzer interface"
84 ---help---
85 This driver supports the CAN BUS Analyzer interface
86 from Microchip (http://www.microchip.com/development-tools/).
87
64config CAN_PEAK_USB 88config CAN_PEAK_USB
65 tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD" 89 tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
66 ---help--- 90 ---help---
@@ -77,16 +101,26 @@ config CAN_PEAK_USB
77 101
78 (see also http://www.peak-system.com). 102 (see also http://www.peak-system.com).
79 103
80config CAN_8DEV_USB
81 tristate "8 devices USB2CAN interface"
82 ---help---
83 This driver supports the USB2CAN interface
84 from 8 devices (http://www.8devices.com).
85
86config CAN_MCBA_USB 104config CAN_MCBA_USB
87 tristate "Microchip CAN BUS Analyzer interface" 105 tristate "Microchip CAN BUS Analyzer interface"
88 ---help--- 106 ---help---
89 This driver supports the CAN BUS Analyzer interface 107 This driver supports the CAN BUS Analyzer interface
90 from Microchip (http://www.microchip.com/development-tools/). 108 from Microchip (http://www.microchip.com/development-tools/).
91 109
110config CAN_UCAN
111 tristate "Theobroma Systems UCAN interface"
112 ---help---
113 This driver supports the Theobroma Systems
114 UCAN USB-CAN interface.
115
116 The UCAN driver supports the microcontroller-based USB/CAN
117 adapters from Theobroma Systems. There are two form-factors
118 that run essentially the same firmware:
119
120 * Seal: standalone USB stick
121 https://www.theobroma-systems.com/seal)
122 * Mule: integrated on the PCB of various System-on-Modules
123 from Theobroma Systems like the A31-µQ7 and the RK3399-Q7
124 (https://www.theobroma-systems.com/rk3399-q7)
125
92endmenu 126endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 49ac7b99ba32..aa0f17c0b2ed 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,10 +3,11 @@
3# Makefile for the Linux Controller Area Network USB drivers. 3# Makefile for the Linux Controller Area Network USB drivers.
4# 4#
5 5
6obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
6obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 7obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
7obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o 8obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
8obj-$(CONFIG_CAN_GS_USB) += gs_usb.o 9obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
9obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o 10obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
10obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
11obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
12obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o 11obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
12obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
13obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 12ff0020ecd6..b7dfd4109d24 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
1072 usb_free_urb(dev->intr_urb); 1072 usb_free_urb(dev->intr_urb);
1073 1073
1074 kfree(dev->intr_in_buffer); 1074 kfree(dev->intr_in_buffer);
1075 kfree(dev->tx_msg_buffer);
1075 } 1076 }
1076} 1077}
1077 1078
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
deleted file mode 100644
index daed57d3d209..000000000000
--- a/drivers/net/can/usb/kvaser_usb.c
+++ /dev/null
@@ -1,2085 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation version 2.
5 *
6 * Parts of this driver are based on the following:
7 * - Kvaser linux leaf driver (version 4.78)
8 * - CAN driver for esd CAN-USB/2
9 * - Kvaser linux usbcanII driver (version 5.3)
10 *
11 * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
12 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
13 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
14 * Copyright (C) 2015 Valeo S.A.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/kernel.h>
19#include <linux/completion.h>
20#include <linux/module.h>
21#include <linux/netdevice.h>
22#include <linux/usb.h>
23
24#include <linux/can.h>
25#include <linux/can/dev.h>
26#include <linux/can/error.h>
27
28#define MAX_RX_URBS 4
29#define START_TIMEOUT 1000 /* msecs */
30#define STOP_TIMEOUT 1000 /* msecs */
31#define USB_SEND_TIMEOUT 1000 /* msecs */
32#define USB_RECV_TIMEOUT 1000 /* msecs */
33#define RX_BUFFER_SIZE 3072
34#define CAN_USB_CLOCK 8000000
35#define MAX_NET_DEVICES 3
36#define MAX_USBCAN_NET_DEVICES 2
37
38/* Kvaser Leaf USB devices */
39#define KVASER_VENDOR_ID 0x0bfd
40#define USB_LEAF_DEVEL_PRODUCT_ID 10
41#define USB_LEAF_LITE_PRODUCT_ID 11
42#define USB_LEAF_PRO_PRODUCT_ID 12
43#define USB_LEAF_SPRO_PRODUCT_ID 14
44#define USB_LEAF_PRO_LS_PRODUCT_ID 15
45#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
46#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
47#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
48#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
49#define USB_MEMO2_DEVEL_PRODUCT_ID 22
50#define USB_MEMO2_HSHS_PRODUCT_ID 23
51#define USB_UPRO_HSHS_PRODUCT_ID 24
52#define USB_LEAF_LITE_GI_PRODUCT_ID 25
53#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
54#define USB_MEMO2_HSLS_PRODUCT_ID 27
55#define USB_LEAF_LITE_CH_PRODUCT_ID 28
56#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
57#define USB_OEM_MERCURY_PRODUCT_ID 34
58#define USB_OEM_LEAF_PRODUCT_ID 35
59#define USB_CAN_R_PRODUCT_ID 39
60#define USB_LEAF_LITE_V2_PRODUCT_ID 288
61#define USB_MINI_PCIE_HS_PRODUCT_ID 289
62#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
63#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
64#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
65
66static inline bool kvaser_is_leaf(const struct usb_device_id *id)
67{
68 return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
69 id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
70}
71
72/* Kvaser USBCan-II devices */
73#define USB_USBCAN_REVB_PRODUCT_ID 2
74#define USB_VCI2_PRODUCT_ID 3
75#define USB_USBCAN2_PRODUCT_ID 4
76#define USB_MEMORATOR_PRODUCT_ID 5
77
78static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
79{
80 return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
81 id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
82}
83
84/* USB devices features */
85#define KVASER_HAS_SILENT_MODE BIT(0)
86#define KVASER_HAS_TXRX_ERRORS BIT(1)
87
88/* Message header size */
89#define MSG_HEADER_LEN 2
90
91/* Can message flags */
92#define MSG_FLAG_ERROR_FRAME BIT(0)
93#define MSG_FLAG_OVERRUN BIT(1)
94#define MSG_FLAG_NERR BIT(2)
95#define MSG_FLAG_WAKEUP BIT(3)
96#define MSG_FLAG_REMOTE_FRAME BIT(4)
97#define MSG_FLAG_RESERVED BIT(5)
98#define MSG_FLAG_TX_ACK BIT(6)
99#define MSG_FLAG_TX_REQUEST BIT(7)
100
101/* Can states (M16C CxSTRH register) */
102#define M16C_STATE_BUS_RESET BIT(0)
103#define M16C_STATE_BUS_ERROR BIT(4)
104#define M16C_STATE_BUS_PASSIVE BIT(5)
105#define M16C_STATE_BUS_OFF BIT(6)
106
107/* Can msg ids */
108#define CMD_RX_STD_MESSAGE 12
109#define CMD_TX_STD_MESSAGE 13
110#define CMD_RX_EXT_MESSAGE 14
111#define CMD_TX_EXT_MESSAGE 15
112#define CMD_SET_BUS_PARAMS 16
113#define CMD_GET_BUS_PARAMS 17
114#define CMD_GET_BUS_PARAMS_REPLY 18
115#define CMD_GET_CHIP_STATE 19
116#define CMD_CHIP_STATE_EVENT 20
117#define CMD_SET_CTRL_MODE 21
118#define CMD_GET_CTRL_MODE 22
119#define CMD_GET_CTRL_MODE_REPLY 23
120#define CMD_RESET_CHIP 24
121#define CMD_RESET_CARD 25
122#define CMD_START_CHIP 26
123#define CMD_START_CHIP_REPLY 27
124#define CMD_STOP_CHIP 28
125#define CMD_STOP_CHIP_REPLY 29
126
127#define CMD_LEAF_GET_CARD_INFO2 32
128#define CMD_USBCAN_RESET_CLOCK 32
129#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
130
131#define CMD_GET_CARD_INFO 34
132#define CMD_GET_CARD_INFO_REPLY 35
133#define CMD_GET_SOFTWARE_INFO 38
134#define CMD_GET_SOFTWARE_INFO_REPLY 39
135#define CMD_ERROR_EVENT 45
136#define CMD_FLUSH_QUEUE 48
137#define CMD_RESET_ERROR_COUNTER 49
138#define CMD_TX_ACKNOWLEDGE 50
139#define CMD_CAN_ERROR_EVENT 51
140#define CMD_FLUSH_QUEUE_REPLY 68
141
142#define CMD_LEAF_USB_THROTTLE 77
143#define CMD_LEAF_LOG_MESSAGE 106
144
145/* error factors */
146#define M16C_EF_ACKE BIT(0)
147#define M16C_EF_CRCE BIT(1)
148#define M16C_EF_FORME BIT(2)
149#define M16C_EF_STFE BIT(3)
150#define M16C_EF_BITE0 BIT(4)
151#define M16C_EF_BITE1 BIT(5)
152#define M16C_EF_RCVE BIT(6)
153#define M16C_EF_TRE BIT(7)
154
155/* Only Leaf-based devices can report M16C error factors,
156 * thus define our own error status flags for USBCANII
157 */
158#define USBCAN_ERROR_STATE_NONE 0
159#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
160#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
161#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
162
163/* bittiming parameters */
164#define KVASER_USB_TSEG1_MIN 1
165#define KVASER_USB_TSEG1_MAX 16
166#define KVASER_USB_TSEG2_MIN 1
167#define KVASER_USB_TSEG2_MAX 8
168#define KVASER_USB_SJW_MAX 4
169#define KVASER_USB_BRP_MIN 1
170#define KVASER_USB_BRP_MAX 64
171#define KVASER_USB_BRP_INC 1
172
173/* ctrl modes */
174#define KVASER_CTRL_MODE_NORMAL 1
175#define KVASER_CTRL_MODE_SILENT 2
176#define KVASER_CTRL_MODE_SELFRECEPTION 3
177#define KVASER_CTRL_MODE_OFF 4
178
179/* Extended CAN identifier flag */
180#define KVASER_EXTENDED_FRAME BIT(31)
181
182/* Kvaser USB CAN dongles are divided into two major families:
183 * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
184 * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
185 */
186enum kvaser_usb_family {
187 KVASER_LEAF,
188 KVASER_USBCAN,
189};
190
191struct kvaser_msg_simple {
192 u8 tid;
193 u8 channel;
194} __packed;
195
196struct kvaser_msg_cardinfo {
197 u8 tid;
198 u8 nchannels;
199 union {
200 struct {
201 __le32 serial_number;
202 __le32 padding;
203 } __packed leaf0;
204 struct {
205 __le32 serial_number_low;
206 __le32 serial_number_high;
207 } __packed usbcan0;
208 } __packed;
209 __le32 clock_resolution;
210 __le32 mfgdate;
211 u8 ean[8];
212 u8 hw_revision;
213 union {
214 struct {
215 u8 usb_hs_mode;
216 } __packed leaf1;
217 struct {
218 u8 padding;
219 } __packed usbcan1;
220 } __packed;
221 __le16 padding;
222} __packed;
223
224struct kvaser_msg_cardinfo2 {
225 u8 tid;
226 u8 reserved;
227 u8 pcb_id[24];
228 __le32 oem_unlock_code;
229} __packed;
230
231struct leaf_msg_softinfo {
232 u8 tid;
233 u8 padding0;
234 __le32 sw_options;
235 __le32 fw_version;
236 __le16 max_outstanding_tx;
237 __le16 padding1[9];
238} __packed;
239
240struct usbcan_msg_softinfo {
241 u8 tid;
242 u8 fw_name[5];
243 __le16 max_outstanding_tx;
244 u8 padding[6];
245 __le32 fw_version;
246 __le16 checksum;
247 __le16 sw_options;
248} __packed;
249
250struct kvaser_msg_busparams {
251 u8 tid;
252 u8 channel;
253 __le32 bitrate;
254 u8 tseg1;
255 u8 tseg2;
256 u8 sjw;
257 u8 no_samp;
258} __packed;
259
260struct kvaser_msg_tx_can {
261 u8 channel;
262 u8 tid;
263 u8 msg[14];
264 union {
265 struct {
266 u8 padding;
267 u8 flags;
268 } __packed leaf;
269 struct {
270 u8 flags;
271 u8 padding;
272 } __packed usbcan;
273 } __packed;
274} __packed;
275
276struct kvaser_msg_rx_can_header {
277 u8 channel;
278 u8 flag;
279} __packed;
280
281struct leaf_msg_rx_can {
282 u8 channel;
283 u8 flag;
284
285 __le16 time[3];
286 u8 msg[14];
287} __packed;
288
289struct usbcan_msg_rx_can {
290 u8 channel;
291 u8 flag;
292
293 u8 msg[14];
294 __le16 time;
295} __packed;
296
297struct leaf_msg_chip_state_event {
298 u8 tid;
299 u8 channel;
300
301 __le16 time[3];
302 u8 tx_errors_count;
303 u8 rx_errors_count;
304
305 u8 status;
306 u8 padding[3];
307} __packed;
308
309struct usbcan_msg_chip_state_event {
310 u8 tid;
311 u8 channel;
312
313 u8 tx_errors_count;
314 u8 rx_errors_count;
315 __le16 time;
316
317 u8 status;
318 u8 padding[3];
319} __packed;
320
321struct kvaser_msg_tx_acknowledge_header {
322 u8 channel;
323 u8 tid;
324} __packed;
325
326struct leaf_msg_tx_acknowledge {
327 u8 channel;
328 u8 tid;
329
330 __le16 time[3];
331 u8 flags;
332 u8 time_offset;
333} __packed;
334
335struct usbcan_msg_tx_acknowledge {
336 u8 channel;
337 u8 tid;
338
339 __le16 time;
340 __le16 padding;
341} __packed;
342
343struct leaf_msg_error_event {
344 u8 tid;
345 u8 flags;
346 __le16 time[3];
347 u8 channel;
348 u8 padding;
349 u8 tx_errors_count;
350 u8 rx_errors_count;
351 u8 status;
352 u8 error_factor;
353} __packed;
354
355struct usbcan_msg_error_event {
356 u8 tid;
357 u8 padding;
358 u8 tx_errors_count_ch0;
359 u8 rx_errors_count_ch0;
360 u8 tx_errors_count_ch1;
361 u8 rx_errors_count_ch1;
362 u8 status_ch0;
363 u8 status_ch1;
364 __le16 time;
365} __packed;
366
367struct kvaser_msg_ctrl_mode {
368 u8 tid;
369 u8 channel;
370 u8 ctrl_mode;
371 u8 padding[3];
372} __packed;
373
374struct kvaser_msg_flush_queue {
375 u8 tid;
376 u8 channel;
377 u8 flags;
378 u8 padding[3];
379} __packed;
380
381struct leaf_msg_log_message {
382 u8 channel;
383 u8 flags;
384 __le16 time[3];
385 u8 dlc;
386 u8 time_offset;
387 __le32 id;
388 u8 data[8];
389} __packed;
390
391struct kvaser_msg {
392 u8 len;
393 u8 id;
394 union {
395 struct kvaser_msg_simple simple;
396 struct kvaser_msg_cardinfo cardinfo;
397 struct kvaser_msg_cardinfo2 cardinfo2;
398 struct kvaser_msg_busparams busparams;
399
400 struct kvaser_msg_rx_can_header rx_can_header;
401 struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
402
403 union {
404 struct leaf_msg_softinfo softinfo;
405 struct leaf_msg_rx_can rx_can;
406 struct leaf_msg_chip_state_event chip_state_event;
407 struct leaf_msg_tx_acknowledge tx_acknowledge;
408 struct leaf_msg_error_event error_event;
409 struct leaf_msg_log_message log_message;
410 } __packed leaf;
411
412 union {
413 struct usbcan_msg_softinfo softinfo;
414 struct usbcan_msg_rx_can rx_can;
415 struct usbcan_msg_chip_state_event chip_state_event;
416 struct usbcan_msg_tx_acknowledge tx_acknowledge;
417 struct usbcan_msg_error_event error_event;
418 } __packed usbcan;
419
420 struct kvaser_msg_tx_can tx_can;
421 struct kvaser_msg_ctrl_mode ctrl_mode;
422 struct kvaser_msg_flush_queue flush_queue;
423 } u;
424} __packed;
425
426/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
427 * handling. Some discrepancies between the two families exist:
428 *
429 * - USBCAN firmware does not report M16C "error factors"
430 * - USBCAN controllers has difficulties reporting if the raised error
431 * event is for ch0 or ch1. They leave such arbitration to the OS
432 * driver by letting it compare error counters with previous values
433 * and decide the error event's channel. Thus for USBCAN, the channel
434 * field is only advisory.
435 */
436struct kvaser_usb_error_summary {
437 u8 channel, status, txerr, rxerr;
438 union {
439 struct {
440 u8 error_factor;
441 } leaf;
442 struct {
443 u8 other_ch_status;
444 u8 error_state;
445 } usbcan;
446 };
447};
448
449/* Context for an outstanding, not yet ACKed, transmission */
450struct kvaser_usb_tx_urb_context {
451 struct kvaser_usb_net_priv *priv;
452 u32 echo_index;
453 int dlc;
454};
455
456struct kvaser_usb {
457 struct usb_device *udev;
458 struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
459
460 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
461 struct usb_anchor rx_submitted;
462
463 /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
464 * not yet ACKed, transmissions on this device. This value is
465 * also used as a sentinel for marking free tx contexts.
466 */
467 u32 fw_version;
468 unsigned int nchannels;
469 unsigned int max_tx_urbs;
470 enum kvaser_usb_family family;
471
472 bool rxinitdone;
473 void *rxbuf[MAX_RX_URBS];
474 dma_addr_t rxbuf_dma[MAX_RX_URBS];
475};
476
477struct kvaser_usb_net_priv {
478 struct can_priv can;
479 struct can_berr_counter bec;
480
481 struct kvaser_usb *dev;
482 struct net_device *netdev;
483 int channel;
484
485 struct completion start_comp, stop_comp;
486 struct usb_anchor tx_submitted;
487
488 spinlock_t tx_contexts_lock;
489 int active_tx_contexts;
490 struct kvaser_usb_tx_urb_context tx_contexts[];
491};
492
493static const struct usb_device_id kvaser_usb_table[] = {
494 /* Leaf family IDs */
495 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
496 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
497 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
498 .driver_info = KVASER_HAS_TXRX_ERRORS |
499 KVASER_HAS_SILENT_MODE },
500 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
501 .driver_info = KVASER_HAS_TXRX_ERRORS |
502 KVASER_HAS_SILENT_MODE },
503 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
504 .driver_info = KVASER_HAS_TXRX_ERRORS |
505 KVASER_HAS_SILENT_MODE },
506 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
507 .driver_info = KVASER_HAS_TXRX_ERRORS |
508 KVASER_HAS_SILENT_MODE },
509 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
510 .driver_info = KVASER_HAS_TXRX_ERRORS |
511 KVASER_HAS_SILENT_MODE },
512 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
513 .driver_info = KVASER_HAS_TXRX_ERRORS |
514 KVASER_HAS_SILENT_MODE },
515 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
516 .driver_info = KVASER_HAS_TXRX_ERRORS |
517 KVASER_HAS_SILENT_MODE },
518 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
519 .driver_info = KVASER_HAS_TXRX_ERRORS |
520 KVASER_HAS_SILENT_MODE },
521 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
522 .driver_info = KVASER_HAS_TXRX_ERRORS |
523 KVASER_HAS_SILENT_MODE },
524 { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
525 .driver_info = KVASER_HAS_TXRX_ERRORS },
526 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
527 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
528 .driver_info = KVASER_HAS_TXRX_ERRORS |
529 KVASER_HAS_SILENT_MODE },
530 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
531 .driver_info = KVASER_HAS_TXRX_ERRORS },
532 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
533 .driver_info = KVASER_HAS_TXRX_ERRORS },
534 { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
535 .driver_info = KVASER_HAS_TXRX_ERRORS },
536 { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
537 .driver_info = KVASER_HAS_TXRX_ERRORS },
538 { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
539 .driver_info = KVASER_HAS_TXRX_ERRORS },
540 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
541 .driver_info = KVASER_HAS_TXRX_ERRORS },
542 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
543 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
544 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
545 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
546 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
547
548 /* USBCANII family IDs */
549 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
550 .driver_info = KVASER_HAS_TXRX_ERRORS },
551 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
552 .driver_info = KVASER_HAS_TXRX_ERRORS },
553 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
554 .driver_info = KVASER_HAS_TXRX_ERRORS },
555 { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
556 .driver_info = KVASER_HAS_TXRX_ERRORS },
557
558 { }
559};
560MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
561
562static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
563 struct kvaser_msg *msg)
564{
565 int actual_len;
566
567 return usb_bulk_msg(dev->udev,
568 usb_sndbulkpipe(dev->udev,
569 dev->bulk_out->bEndpointAddress),
570 msg, msg->len, &actual_len,
571 USB_SEND_TIMEOUT);
572}
573
574static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
575 struct kvaser_msg *msg)
576{
577 struct kvaser_msg *tmp;
578 void *buf;
579 int actual_len;
580 int err;
581 int pos;
582 unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
583
584 buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
585 if (!buf)
586 return -ENOMEM;
587
588 do {
589 err = usb_bulk_msg(dev->udev,
590 usb_rcvbulkpipe(dev->udev,
591 dev->bulk_in->bEndpointAddress),
592 buf, RX_BUFFER_SIZE, &actual_len,
593 USB_RECV_TIMEOUT);
594 if (err < 0)
595 goto end;
596
597 pos = 0;
598 while (pos <= actual_len - MSG_HEADER_LEN) {
599 tmp = buf + pos;
600
601 /* Handle messages crossing the USB endpoint max packet
602 * size boundary. Check kvaser_usb_read_bulk_callback()
603 * for further details.
604 */
605 if (tmp->len == 0) {
606 pos = round_up(pos, le16_to_cpu(dev->bulk_in->
607 wMaxPacketSize));
608 continue;
609 }
610
611 if (pos + tmp->len > actual_len) {
612 dev_err_ratelimited(dev->udev->dev.parent,
613 "Format error\n");
614 break;
615 }
616
617 if (tmp->id == id) {
618 memcpy(msg, tmp, tmp->len);
619 goto end;
620 }
621
622 pos += tmp->len;
623 }
624 } while (time_before(jiffies, to));
625
626 err = -EINVAL;
627
628end:
629 kfree(buf);
630
631 return err;
632}
633
634static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
635 u8 msg_id, int channel)
636{
637 struct kvaser_msg *msg;
638 int rc;
639
640 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
641 if (!msg)
642 return -ENOMEM;
643
644 msg->id = msg_id;
645 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
646 msg->u.simple.channel = channel;
647 msg->u.simple.tid = 0xff;
648
649 rc = kvaser_usb_send_msg(dev, msg);
650
651 kfree(msg);
652 return rc;
653}
654
655static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
656{
657 struct kvaser_msg msg;
658 int err;
659
660 err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
661 if (err)
662 return err;
663
664 err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
665 if (err)
666 return err;
667
668 switch (dev->family) {
669 case KVASER_LEAF:
670 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
671 dev->max_tx_urbs =
672 le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
673 break;
674 case KVASER_USBCAN:
675 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
676 dev->max_tx_urbs =
677 le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
678 break;
679 }
680
681 return 0;
682}
683
684static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
685{
686 struct kvaser_msg msg;
687 int err;
688
689 err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
690 if (err)
691 return err;
692
693 err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
694 if (err)
695 return err;
696
697 dev->nchannels = msg.u.cardinfo.nchannels;
698 if ((dev->nchannels > MAX_NET_DEVICES) ||
699 (dev->family == KVASER_USBCAN &&
700 dev->nchannels > MAX_USBCAN_NET_DEVICES))
701 return -EINVAL;
702
703 return 0;
704}
705
706static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
707 const struct kvaser_msg *msg)
708{
709 struct net_device_stats *stats;
710 struct kvaser_usb_tx_urb_context *context;
711 struct kvaser_usb_net_priv *priv;
712 struct sk_buff *skb;
713 struct can_frame *cf;
714 unsigned long flags;
715 u8 channel, tid;
716
717 channel = msg->u.tx_acknowledge_header.channel;
718 tid = msg->u.tx_acknowledge_header.tid;
719
720 if (channel >= dev->nchannels) {
721 dev_err(dev->udev->dev.parent,
722 "Invalid channel number (%d)\n", channel);
723 return;
724 }
725
726 priv = dev->nets[channel];
727
728 if (!netif_device_present(priv->netdev))
729 return;
730
731 stats = &priv->netdev->stats;
732
733 context = &priv->tx_contexts[tid % dev->max_tx_urbs];
734
735 /* Sometimes the state change doesn't come after a bus-off event */
736 if (priv->can.restart_ms &&
737 (priv->can.state >= CAN_STATE_BUS_OFF)) {
738 skb = alloc_can_err_skb(priv->netdev, &cf);
739 if (skb) {
740 cf->can_id |= CAN_ERR_RESTARTED;
741
742 stats->rx_packets++;
743 stats->rx_bytes += cf->can_dlc;
744 netif_rx(skb);
745 } else {
746 netdev_err(priv->netdev,
747 "No memory left for err_skb\n");
748 }
749
750 priv->can.can_stats.restarts++;
751 netif_carrier_on(priv->netdev);
752
753 priv->can.state = CAN_STATE_ERROR_ACTIVE;
754 }
755
756 stats->tx_packets++;
757 stats->tx_bytes += context->dlc;
758
759 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
760
761 can_get_echo_skb(priv->netdev, context->echo_index);
762 context->echo_index = dev->max_tx_urbs;
763 --priv->active_tx_contexts;
764 netif_wake_queue(priv->netdev);
765
766 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
767}
768
769static void kvaser_usb_simple_msg_callback(struct urb *urb)
770{
771 struct net_device *netdev = urb->context;
772
773 kfree(urb->transfer_buffer);
774
775 if (urb->status)
776 netdev_warn(netdev, "urb status received: %d\n",
777 urb->status);
778}
779
780static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
781 u8 msg_id)
782{
783 struct kvaser_usb *dev = priv->dev;
784 struct net_device *netdev = priv->netdev;
785 struct kvaser_msg *msg;
786 struct urb *urb;
787 void *buf;
788 int err;
789
790 urb = usb_alloc_urb(0, GFP_ATOMIC);
791 if (!urb)
792 return -ENOMEM;
793
794 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
795 if (!buf) {
796 usb_free_urb(urb);
797 return -ENOMEM;
798 }
799
800 msg = (struct kvaser_msg *)buf;
801 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
802 msg->id = msg_id;
803 msg->u.simple.channel = priv->channel;
804
805 usb_fill_bulk_urb(urb, dev->udev,
806 usb_sndbulkpipe(dev->udev,
807 dev->bulk_out->bEndpointAddress),
808 buf, msg->len,
809 kvaser_usb_simple_msg_callback, netdev);
810 usb_anchor_urb(urb, &priv->tx_submitted);
811
812 err = usb_submit_urb(urb, GFP_ATOMIC);
813 if (err) {
814 netdev_err(netdev, "Error transmitting URB\n");
815 usb_unanchor_urb(urb);
816 kfree(buf);
817 usb_free_urb(urb);
818 return err;
819 }
820
821 usb_free_urb(urb);
822
823 return 0;
824}
825
826static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
827 const struct kvaser_usb_error_summary *es,
828 struct can_frame *cf)
829{
830 struct kvaser_usb *dev = priv->dev;
831 struct net_device_stats *stats = &priv->netdev->stats;
832 enum can_state cur_state, new_state, tx_state, rx_state;
833
834 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
835
836 new_state = cur_state = priv->can.state;
837
838 if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
839 new_state = CAN_STATE_BUS_OFF;
840 else if (es->status & M16C_STATE_BUS_PASSIVE)
841 new_state = CAN_STATE_ERROR_PASSIVE;
842 else if (es->status & M16C_STATE_BUS_ERROR) {
843 /* Guard against spurious error events after a busoff */
844 if (cur_state < CAN_STATE_BUS_OFF) {
845 if ((es->txerr >= 128) || (es->rxerr >= 128))
846 new_state = CAN_STATE_ERROR_PASSIVE;
847 else if ((es->txerr >= 96) || (es->rxerr >= 96))
848 new_state = CAN_STATE_ERROR_WARNING;
849 else if (cur_state > CAN_STATE_ERROR_ACTIVE)
850 new_state = CAN_STATE_ERROR_ACTIVE;
851 }
852 }
853
854 if (!es->status)
855 new_state = CAN_STATE_ERROR_ACTIVE;
856
857 if (new_state != cur_state) {
858 tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
859 rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
860
861 can_change_state(priv->netdev, cf, tx_state, rx_state);
862 }
863
864 if (priv->can.restart_ms &&
865 (cur_state >= CAN_STATE_BUS_OFF) &&
866 (new_state < CAN_STATE_BUS_OFF)) {
867 priv->can.can_stats.restarts++;
868 }
869
870 switch (dev->family) {
871 case KVASER_LEAF:
872 if (es->leaf.error_factor) {
873 priv->can.can_stats.bus_error++;
874 stats->rx_errors++;
875 }
876 break;
877 case KVASER_USBCAN:
878 if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
879 stats->tx_errors++;
880 if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
881 stats->rx_errors++;
882 if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
883 priv->can.can_stats.bus_error++;
884 }
885 break;
886 }
887
888 priv->bec.txerr = es->txerr;
889 priv->bec.rxerr = es->rxerr;
890}
891
892static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
893 const struct kvaser_usb_error_summary *es)
894{
895 struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
896 struct sk_buff *skb;
897 struct net_device_stats *stats;
898 struct kvaser_usb_net_priv *priv;
899 enum can_state old_state, new_state;
900
901 if (es->channel >= dev->nchannels) {
902 dev_err(dev->udev->dev.parent,
903 "Invalid channel number (%d)\n", es->channel);
904 return;
905 }
906
907 priv = dev->nets[es->channel];
908 stats = &priv->netdev->stats;
909
910 /* Update all of the can interface's state and error counters before
911 * trying any memory allocation that can actually fail with -ENOMEM.
912 *
913 * We send a temporary stack-allocated error can frame to
914 * can_change_state() for the very same reason.
915 *
916 * TODO: Split can_change_state() responsibility between updating the
917 * can interface's state and counters, and the setting up of can error
918 * frame ID and data to userspace. Remove stack allocation afterwards.
919 */
920 old_state = priv->can.state;
921 kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
922 new_state = priv->can.state;
923
924 skb = alloc_can_err_skb(priv->netdev, &cf);
925 if (!skb) {
926 stats->rx_dropped++;
927 return;
928 }
929 memcpy(cf, &tmp_cf, sizeof(*cf));
930
931 if (new_state != old_state) {
932 if (es->status &
933 (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
934 if (!priv->can.restart_ms)
935 kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
936 netif_carrier_off(priv->netdev);
937 }
938
939 if (priv->can.restart_ms &&
940 (old_state >= CAN_STATE_BUS_OFF) &&
941 (new_state < CAN_STATE_BUS_OFF)) {
942 cf->can_id |= CAN_ERR_RESTARTED;
943 netif_carrier_on(priv->netdev);
944 }
945 }
946
947 switch (dev->family) {
948 case KVASER_LEAF:
949 if (es->leaf.error_factor) {
950 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
951
952 if (es->leaf.error_factor & M16C_EF_ACKE)
953 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
954 if (es->leaf.error_factor & M16C_EF_CRCE)
955 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
956 if (es->leaf.error_factor & M16C_EF_FORME)
957 cf->data[2] |= CAN_ERR_PROT_FORM;
958 if (es->leaf.error_factor & M16C_EF_STFE)
959 cf->data[2] |= CAN_ERR_PROT_STUFF;
960 if (es->leaf.error_factor & M16C_EF_BITE0)
961 cf->data[2] |= CAN_ERR_PROT_BIT0;
962 if (es->leaf.error_factor & M16C_EF_BITE1)
963 cf->data[2] |= CAN_ERR_PROT_BIT1;
964 if (es->leaf.error_factor & M16C_EF_TRE)
965 cf->data[2] |= CAN_ERR_PROT_TX;
966 }
967 break;
968 case KVASER_USBCAN:
969 if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
970 cf->can_id |= CAN_ERR_BUSERROR;
971 }
972 break;
973 }
974
975 cf->data[6] = es->txerr;
976 cf->data[7] = es->rxerr;
977
978 stats->rx_packets++;
979 stats->rx_bytes += cf->can_dlc;
980 netif_rx(skb);
981}
982
983/* For USBCAN, report error to userspace iff the channels's errors counter
984 * has changed, or we're the only channel seeing a bus error state.
985 */
986static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
987 struct kvaser_usb_error_summary *es)
988{
989 struct kvaser_usb_net_priv *priv;
990 int channel;
991 bool report_error;
992
993 channel = es->channel;
994 if (channel >= dev->nchannels) {
995 dev_err(dev->udev->dev.parent,
996 "Invalid channel number (%d)\n", channel);
997 return;
998 }
999
1000 priv = dev->nets[channel];
1001 report_error = false;
1002
1003 if (es->txerr != priv->bec.txerr) {
1004 es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
1005 report_error = true;
1006 }
1007 if (es->rxerr != priv->bec.rxerr) {
1008 es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
1009 report_error = true;
1010 }
1011 if ((es->status & M16C_STATE_BUS_ERROR) &&
1012 !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
1013 es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
1014 report_error = true;
1015 }
1016
1017 if (report_error)
1018 kvaser_usb_rx_error(dev, es);
1019}
1020
1021static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
1022 const struct kvaser_msg *msg)
1023{
1024 struct kvaser_usb_error_summary es = { };
1025
1026 switch (msg->id) {
1027 /* Sometimes errors are sent as unsolicited chip state events */
1028 case CMD_CHIP_STATE_EVENT:
1029 es.channel = msg->u.usbcan.chip_state_event.channel;
1030 es.status = msg->u.usbcan.chip_state_event.status;
1031 es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
1032 es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
1033 kvaser_usbcan_conditionally_rx_error(dev, &es);
1034 break;
1035
1036 case CMD_CAN_ERROR_EVENT:
1037 es.channel = 0;
1038 es.status = msg->u.usbcan.error_event.status_ch0;
1039 es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
1040 es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
1041 es.usbcan.other_ch_status =
1042 msg->u.usbcan.error_event.status_ch1;
1043 kvaser_usbcan_conditionally_rx_error(dev, &es);
1044
1045 /* The USBCAN firmware supports up to 2 channels.
1046 * Now that ch0 was checked, check if ch1 has any errors.
1047 */
1048 if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
1049 es.channel = 1;
1050 es.status = msg->u.usbcan.error_event.status_ch1;
1051 es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
1052 es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
1053 es.usbcan.other_ch_status =
1054 msg->u.usbcan.error_event.status_ch0;
1055 kvaser_usbcan_conditionally_rx_error(dev, &es);
1056 }
1057 break;
1058
1059 default:
1060 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
1061 msg->id);
1062 }
1063}
1064
1065static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
1066 const struct kvaser_msg *msg)
1067{
1068 struct kvaser_usb_error_summary es = { };
1069
1070 switch (msg->id) {
1071 case CMD_CAN_ERROR_EVENT:
1072 es.channel = msg->u.leaf.error_event.channel;
1073 es.status = msg->u.leaf.error_event.status;
1074 es.txerr = msg->u.leaf.error_event.tx_errors_count;
1075 es.rxerr = msg->u.leaf.error_event.rx_errors_count;
1076 es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
1077 break;
1078 case CMD_LEAF_LOG_MESSAGE:
1079 es.channel = msg->u.leaf.log_message.channel;
1080 es.status = msg->u.leaf.log_message.data[0];
1081 es.txerr = msg->u.leaf.log_message.data[2];
1082 es.rxerr = msg->u.leaf.log_message.data[3];
1083 es.leaf.error_factor = msg->u.leaf.log_message.data[1];
1084 break;
1085 case CMD_CHIP_STATE_EVENT:
1086 es.channel = msg->u.leaf.chip_state_event.channel;
1087 es.status = msg->u.leaf.chip_state_event.status;
1088 es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
1089 es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
1090 es.leaf.error_factor = 0;
1091 break;
1092 default:
1093 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
1094 msg->id);
1095 return;
1096 }
1097
1098 kvaser_usb_rx_error(dev, &es);
1099}
1100
1101static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
1102 const struct kvaser_msg *msg)
1103{
1104 struct can_frame *cf;
1105 struct sk_buff *skb;
1106 struct net_device_stats *stats = &priv->netdev->stats;
1107
1108 if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
1109 MSG_FLAG_NERR)) {
1110 netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
1111 msg->u.rx_can_header.flag);
1112
1113 stats->rx_errors++;
1114 return;
1115 }
1116
1117 if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
1118 stats->rx_over_errors++;
1119 stats->rx_errors++;
1120
1121 skb = alloc_can_err_skb(priv->netdev, &cf);
1122 if (!skb) {
1123 stats->rx_dropped++;
1124 return;
1125 }
1126
1127 cf->can_id |= CAN_ERR_CRTL;
1128 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1129
1130 stats->rx_packets++;
1131 stats->rx_bytes += cf->can_dlc;
1132 netif_rx(skb);
1133 }
1134}
1135
1136static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
1137 const struct kvaser_msg *msg)
1138{
1139 struct kvaser_usb_net_priv *priv;
1140 struct can_frame *cf;
1141 struct sk_buff *skb;
1142 struct net_device_stats *stats;
1143 u8 channel = msg->u.rx_can_header.channel;
1144 const u8 *rx_msg = NULL; /* GCC */
1145
1146 if (channel >= dev->nchannels) {
1147 dev_err(dev->udev->dev.parent,
1148 "Invalid channel number (%d)\n", channel);
1149 return;
1150 }
1151
1152 priv = dev->nets[channel];
1153 stats = &priv->netdev->stats;
1154
1155 if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
1156 (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
1157 kvaser_leaf_rx_error(dev, msg);
1158 return;
1159 } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
1160 MSG_FLAG_NERR |
1161 MSG_FLAG_OVERRUN)) {
1162 kvaser_usb_rx_can_err(priv, msg);
1163 return;
1164 } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
1165 netdev_warn(priv->netdev,
1166 "Unhandled frame (flags: 0x%02x)",
1167 msg->u.rx_can_header.flag);
1168 return;
1169 }
1170
1171 switch (dev->family) {
1172 case KVASER_LEAF:
1173 rx_msg = msg->u.leaf.rx_can.msg;
1174 break;
1175 case KVASER_USBCAN:
1176 rx_msg = msg->u.usbcan.rx_can.msg;
1177 break;
1178 }
1179
1180 skb = alloc_can_skb(priv->netdev, &cf);
1181 if (!skb) {
1182 stats->rx_dropped++;
1183 return;
1184 }
1185
1186 if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
1187 cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
1188 if (cf->can_id & KVASER_EXTENDED_FRAME)
1189 cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1190 else
1191 cf->can_id &= CAN_SFF_MASK;
1192
1193 cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
1194
1195 if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
1196 cf->can_id |= CAN_RTR_FLAG;
1197 else
1198 memcpy(cf->data, &msg->u.leaf.log_message.data,
1199 cf->can_dlc);
1200 } else {
1201 cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
1202
1203 if (msg->id == CMD_RX_EXT_MESSAGE) {
1204 cf->can_id <<= 18;
1205 cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
1206 ((rx_msg[3] & 0xff) << 6) |
1207 (rx_msg[4] & 0x3f);
1208 cf->can_id |= CAN_EFF_FLAG;
1209 }
1210
1211 cf->can_dlc = get_can_dlc(rx_msg[5]);
1212
1213 if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
1214 cf->can_id |= CAN_RTR_FLAG;
1215 else
1216 memcpy(cf->data, &rx_msg[6],
1217 cf->can_dlc);
1218 }
1219
1220 stats->rx_packets++;
1221 stats->rx_bytes += cf->can_dlc;
1222 netif_rx(skb);
1223}
1224
1225static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
1226 const struct kvaser_msg *msg)
1227{
1228 struct kvaser_usb_net_priv *priv;
1229 u8 channel = msg->u.simple.channel;
1230
1231 if (channel >= dev->nchannels) {
1232 dev_err(dev->udev->dev.parent,
1233 "Invalid channel number (%d)\n", channel);
1234 return;
1235 }
1236
1237 priv = dev->nets[channel];
1238
1239 if (completion_done(&priv->start_comp) &&
1240 netif_queue_stopped(priv->netdev)) {
1241 netif_wake_queue(priv->netdev);
1242 } else {
1243 netif_start_queue(priv->netdev);
1244 complete(&priv->start_comp);
1245 }
1246}
1247
1248static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
1249 const struct kvaser_msg *msg)
1250{
1251 struct kvaser_usb_net_priv *priv;
1252 u8 channel = msg->u.simple.channel;
1253
1254 if (channel >= dev->nchannels) {
1255 dev_err(dev->udev->dev.parent,
1256 "Invalid channel number (%d)\n", channel);
1257 return;
1258 }
1259
1260 priv = dev->nets[channel];
1261
1262 complete(&priv->stop_comp);
1263}
1264
1265static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
1266 const struct kvaser_msg *msg)
1267{
1268 switch (msg->id) {
1269 case CMD_START_CHIP_REPLY:
1270 kvaser_usb_start_chip_reply(dev, msg);
1271 break;
1272
1273 case CMD_STOP_CHIP_REPLY:
1274 kvaser_usb_stop_chip_reply(dev, msg);
1275 break;
1276
1277 case CMD_RX_STD_MESSAGE:
1278 case CMD_RX_EXT_MESSAGE:
1279 kvaser_usb_rx_can_msg(dev, msg);
1280 break;
1281
1282 case CMD_LEAF_LOG_MESSAGE:
1283 if (dev->family != KVASER_LEAF)
1284 goto warn;
1285 kvaser_usb_rx_can_msg(dev, msg);
1286 break;
1287
1288 case CMD_CHIP_STATE_EVENT:
1289 case CMD_CAN_ERROR_EVENT:
1290 if (dev->family == KVASER_LEAF)
1291 kvaser_leaf_rx_error(dev, msg);
1292 else
1293 kvaser_usbcan_rx_error(dev, msg);
1294 break;
1295
1296 case CMD_TX_ACKNOWLEDGE:
1297 kvaser_usb_tx_acknowledge(dev, msg);
1298 break;
1299
1300 /* Ignored messages */
1301 case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
1302 if (dev->family != KVASER_USBCAN)
1303 goto warn;
1304 break;
1305
1306 case CMD_FLUSH_QUEUE_REPLY:
1307 if (dev->family != KVASER_LEAF)
1308 goto warn;
1309 break;
1310
1311 default:
1312warn: dev_warn(dev->udev->dev.parent,
1313 "Unhandled message (%d)\n", msg->id);
1314 break;
1315 }
1316}
1317
1318static void kvaser_usb_read_bulk_callback(struct urb *urb)
1319{
1320 struct kvaser_usb *dev = urb->context;
1321 struct kvaser_msg *msg;
1322 int pos = 0;
1323 int err, i;
1324
1325 switch (urb->status) {
1326 case 0:
1327 break;
1328 case -ENOENT:
1329 case -EPIPE:
1330 case -EPROTO:
1331 case -ESHUTDOWN:
1332 return;
1333 default:
1334 dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
1335 urb->status);
1336 goto resubmit_urb;
1337 }
1338
1339 while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
1340 msg = urb->transfer_buffer + pos;
1341
1342 /* The Kvaser firmware can only read and write messages that
1343 * does not cross the USB's endpoint wMaxPacketSize boundary.
1344 * If a follow-up command crosses such boundary, firmware puts
1345 * a placeholder zero-length command in its place then aligns
1346 * the real command to the next max packet size.
1347 *
1348 * Handle such cases or we're going to miss a significant
1349 * number of events in case of a heavy rx load on the bus.
1350 */
1351 if (msg->len == 0) {
1352 pos = round_up(pos, le16_to_cpu(dev->bulk_in->
1353 wMaxPacketSize));
1354 continue;
1355 }
1356
1357 if (pos + msg->len > urb->actual_length) {
1358 dev_err_ratelimited(dev->udev->dev.parent,
1359 "Format error\n");
1360 break;
1361 }
1362
1363 kvaser_usb_handle_message(dev, msg);
1364 pos += msg->len;
1365 }
1366
1367resubmit_urb:
1368 usb_fill_bulk_urb(urb, dev->udev,
1369 usb_rcvbulkpipe(dev->udev,
1370 dev->bulk_in->bEndpointAddress),
1371 urb->transfer_buffer, RX_BUFFER_SIZE,
1372 kvaser_usb_read_bulk_callback, dev);
1373
1374 err = usb_submit_urb(urb, GFP_ATOMIC);
1375 if (err == -ENODEV) {
1376 for (i = 0; i < dev->nchannels; i++) {
1377 if (!dev->nets[i])
1378 continue;
1379
1380 netif_device_detach(dev->nets[i]->netdev);
1381 }
1382 } else if (err) {
1383 dev_err(dev->udev->dev.parent,
1384 "Failed resubmitting read bulk urb: %d\n", err);
1385 }
1386
1387 return;
1388}
1389
1390static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
1391{
1392 int i, err = 0;
1393
1394 if (dev->rxinitdone)
1395 return 0;
1396
1397 for (i = 0; i < MAX_RX_URBS; i++) {
1398 struct urb *urb = NULL;
1399 u8 *buf = NULL;
1400 dma_addr_t buf_dma;
1401
1402 urb = usb_alloc_urb(0, GFP_KERNEL);
1403 if (!urb) {
1404 err = -ENOMEM;
1405 break;
1406 }
1407
1408 buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
1409 GFP_KERNEL, &buf_dma);
1410 if (!buf) {
1411 dev_warn(dev->udev->dev.parent,
1412 "No memory left for USB buffer\n");
1413 usb_free_urb(urb);
1414 err = -ENOMEM;
1415 break;
1416 }
1417
1418 usb_fill_bulk_urb(urb, dev->udev,
1419 usb_rcvbulkpipe(dev->udev,
1420 dev->bulk_in->bEndpointAddress),
1421 buf, RX_BUFFER_SIZE,
1422 kvaser_usb_read_bulk_callback,
1423 dev);
1424 urb->transfer_dma = buf_dma;
1425 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1426 usb_anchor_urb(urb, &dev->rx_submitted);
1427
1428 err = usb_submit_urb(urb, GFP_KERNEL);
1429 if (err) {
1430 usb_unanchor_urb(urb);
1431 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
1432 buf_dma);
1433 usb_free_urb(urb);
1434 break;
1435 }
1436
1437 dev->rxbuf[i] = buf;
1438 dev->rxbuf_dma[i] = buf_dma;
1439
1440 usb_free_urb(urb);
1441 }
1442
1443 if (i == 0) {
1444 dev_warn(dev->udev->dev.parent,
1445 "Cannot setup read URBs, error %d\n", err);
1446 return err;
1447 } else if (i < MAX_RX_URBS) {
1448 dev_warn(dev->udev->dev.parent,
1449 "RX performances may be slow\n");
1450 }
1451
1452 dev->rxinitdone = true;
1453
1454 return 0;
1455}
1456
1457static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
1458{
1459 struct kvaser_msg *msg;
1460 int rc;
1461
1462 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1463 if (!msg)
1464 return -ENOMEM;
1465
1466 msg->id = CMD_SET_CTRL_MODE;
1467 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
1468 msg->u.ctrl_mode.tid = 0xff;
1469 msg->u.ctrl_mode.channel = priv->channel;
1470
1471 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1472 msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
1473 else
1474 msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
1475
1476 rc = kvaser_usb_send_msg(priv->dev, msg);
1477
1478 kfree(msg);
1479 return rc;
1480}
1481
1482static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
1483{
1484 int err;
1485
1486 init_completion(&priv->start_comp);
1487
1488 err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
1489 priv->channel);
1490 if (err)
1491 return err;
1492
1493 if (!wait_for_completion_timeout(&priv->start_comp,
1494 msecs_to_jiffies(START_TIMEOUT)))
1495 return -ETIMEDOUT;
1496
1497 return 0;
1498}
1499
1500static int kvaser_usb_open(struct net_device *netdev)
1501{
1502 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1503 struct kvaser_usb *dev = priv->dev;
1504 int err;
1505
1506 err = open_candev(netdev);
1507 if (err)
1508 return err;
1509
1510 err = kvaser_usb_setup_rx_urbs(dev);
1511 if (err)
1512 goto error;
1513
1514 err = kvaser_usb_set_opt_mode(priv);
1515 if (err)
1516 goto error;
1517
1518 err = kvaser_usb_start_chip(priv);
1519 if (err) {
1520 netdev_warn(netdev, "Cannot start device, error %d\n", err);
1521 goto error;
1522 }
1523
1524 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1525
1526 return 0;
1527
1528error:
1529 close_candev(netdev);
1530 return err;
1531}
1532
1533static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1534{
1535 int i, max_tx_urbs;
1536
1537 max_tx_urbs = priv->dev->max_tx_urbs;
1538
1539 priv->active_tx_contexts = 0;
1540 for (i = 0; i < max_tx_urbs; i++)
1541 priv->tx_contexts[i].echo_index = max_tx_urbs;
1542}
1543
1544/* This method might sleep. Do not call it in the atomic context
1545 * of URB completions.
1546 */
1547static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
1548{
1549 usb_kill_anchored_urbs(&priv->tx_submitted);
1550 kvaser_usb_reset_tx_urb_contexts(priv);
1551}
1552
1553static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1554{
1555 int i;
1556
1557 usb_kill_anchored_urbs(&dev->rx_submitted);
1558
1559 for (i = 0; i < MAX_RX_URBS; i++)
1560 usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
1561 dev->rxbuf[i],
1562 dev->rxbuf_dma[i]);
1563
1564 for (i = 0; i < dev->nchannels; i++) {
1565 struct kvaser_usb_net_priv *priv = dev->nets[i];
1566
1567 if (priv)
1568 kvaser_usb_unlink_tx_urbs(priv);
1569 }
1570}
1571
1572static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
1573{
1574 int err;
1575
1576 init_completion(&priv->stop_comp);
1577
1578 err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
1579 priv->channel);
1580 if (err)
1581 return err;
1582
1583 if (!wait_for_completion_timeout(&priv->stop_comp,
1584 msecs_to_jiffies(STOP_TIMEOUT)))
1585 return -ETIMEDOUT;
1586
1587 return 0;
1588}
1589
1590static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
1591{
1592 struct kvaser_msg *msg;
1593 int rc;
1594
1595 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1596 if (!msg)
1597 return -ENOMEM;
1598
1599 msg->id = CMD_FLUSH_QUEUE;
1600 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
1601 msg->u.flush_queue.channel = priv->channel;
1602 msg->u.flush_queue.flags = 0x00;
1603
1604 rc = kvaser_usb_send_msg(priv->dev, msg);
1605
1606 kfree(msg);
1607 return rc;
1608}
1609
1610static int kvaser_usb_close(struct net_device *netdev)
1611{
1612 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1613 struct kvaser_usb *dev = priv->dev;
1614 int err;
1615
1616 netif_stop_queue(netdev);
1617
1618 err = kvaser_usb_flush_queue(priv);
1619 if (err)
1620 netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
1621
1622 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
1623 if (err)
1624 netdev_warn(netdev, "Cannot reset card, error %d\n", err);
1625
1626 err = kvaser_usb_stop_chip(priv);
1627 if (err)
1628 netdev_warn(netdev, "Cannot stop device, error %d\n", err);
1629
1630 /* reset tx contexts */
1631 kvaser_usb_unlink_tx_urbs(priv);
1632
1633 priv->can.state = CAN_STATE_STOPPED;
1634 close_candev(priv->netdev);
1635
1636 return 0;
1637}
1638
1639static void kvaser_usb_write_bulk_callback(struct urb *urb)
1640{
1641 struct kvaser_usb_tx_urb_context *context = urb->context;
1642 struct kvaser_usb_net_priv *priv;
1643 struct net_device *netdev;
1644
1645 if (WARN_ON(!context))
1646 return;
1647
1648 priv = context->priv;
1649 netdev = priv->netdev;
1650
1651 kfree(urb->transfer_buffer);
1652
1653 if (!netif_device_present(netdev))
1654 return;
1655
1656 if (urb->status)
1657 netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
1658}
1659
1660static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1661 struct net_device *netdev)
1662{
1663 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1664 struct kvaser_usb *dev = priv->dev;
1665 struct net_device_stats *stats = &netdev->stats;
1666 struct can_frame *cf = (struct can_frame *)skb->data;
1667 struct kvaser_usb_tx_urb_context *context = NULL;
1668 struct urb *urb;
1669 void *buf;
1670 struct kvaser_msg *msg;
1671 int i, err, ret = NETDEV_TX_OK;
1672 u8 *msg_tx_can_flags = NULL; /* GCC */
1673 unsigned long flags;
1674
1675 if (can_dropped_invalid_skb(netdev, skb))
1676 return NETDEV_TX_OK;
1677
1678 urb = usb_alloc_urb(0, GFP_ATOMIC);
1679 if (!urb) {
1680 stats->tx_dropped++;
1681 dev_kfree_skb(skb);
1682 return NETDEV_TX_OK;
1683 }
1684
1685 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
1686 if (!buf) {
1687 stats->tx_dropped++;
1688 dev_kfree_skb(skb);
1689 goto freeurb;
1690 }
1691
1692 msg = buf;
1693 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
1694 msg->u.tx_can.channel = priv->channel;
1695
1696 switch (dev->family) {
1697 case KVASER_LEAF:
1698 msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
1699 break;
1700 case KVASER_USBCAN:
1701 msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
1702 break;
1703 }
1704
1705 *msg_tx_can_flags = 0;
1706
1707 if (cf->can_id & CAN_EFF_FLAG) {
1708 msg->id = CMD_TX_EXT_MESSAGE;
1709 msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
1710 msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
1711 msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
1712 msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
1713 msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
1714 } else {
1715 msg->id = CMD_TX_STD_MESSAGE;
1716 msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
1717 msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
1718 }
1719
1720 msg->u.tx_can.msg[5] = cf->can_dlc;
1721 memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
1722
1723 if (cf->can_id & CAN_RTR_FLAG)
1724 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1725
1726 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1727 for (i = 0; i < dev->max_tx_urbs; i++) {
1728 if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
1729 context = &priv->tx_contexts[i];
1730
1731 context->echo_index = i;
1732 can_put_echo_skb(skb, netdev, context->echo_index);
1733 ++priv->active_tx_contexts;
1734 if (priv->active_tx_contexts >= dev->max_tx_urbs)
1735 netif_stop_queue(netdev);
1736
1737 break;
1738 }
1739 }
1740 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1741
1742 /* This should never happen; it implies a flow control bug */
1743 if (!context) {
1744 netdev_warn(netdev, "cannot find free context\n");
1745
1746 kfree(buf);
1747 ret = NETDEV_TX_BUSY;
1748 goto freeurb;
1749 }
1750
1751 context->priv = priv;
1752 context->dlc = cf->can_dlc;
1753
1754 msg->u.tx_can.tid = context->echo_index;
1755
1756 usb_fill_bulk_urb(urb, dev->udev,
1757 usb_sndbulkpipe(dev->udev,
1758 dev->bulk_out->bEndpointAddress),
1759 buf, msg->len,
1760 kvaser_usb_write_bulk_callback, context);
1761 usb_anchor_urb(urb, &priv->tx_submitted);
1762
1763 err = usb_submit_urb(urb, GFP_ATOMIC);
1764 if (unlikely(err)) {
1765 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1766
1767 can_free_echo_skb(netdev, context->echo_index);
1768 context->echo_index = dev->max_tx_urbs;
1769 --priv->active_tx_contexts;
1770 netif_wake_queue(netdev);
1771
1772 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1773
1774 usb_unanchor_urb(urb);
1775 kfree(buf);
1776
1777 stats->tx_dropped++;
1778
1779 if (err == -ENODEV)
1780 netif_device_detach(netdev);
1781 else
1782 netdev_warn(netdev, "Failed tx_urb %d\n", err);
1783
1784 goto freeurb;
1785 }
1786
1787 ret = NETDEV_TX_OK;
1788
1789freeurb:
1790 usb_free_urb(urb);
1791 return ret;
1792}
1793
1794static const struct net_device_ops kvaser_usb_netdev_ops = {
1795 .ndo_open = kvaser_usb_open,
1796 .ndo_stop = kvaser_usb_close,
1797 .ndo_start_xmit = kvaser_usb_start_xmit,
1798 .ndo_change_mtu = can_change_mtu,
1799};
1800
1801static const struct can_bittiming_const kvaser_usb_bittiming_const = {
1802 .name = "kvaser_usb",
1803 .tseg1_min = KVASER_USB_TSEG1_MIN,
1804 .tseg1_max = KVASER_USB_TSEG1_MAX,
1805 .tseg2_min = KVASER_USB_TSEG2_MIN,
1806 .tseg2_max = KVASER_USB_TSEG2_MAX,
1807 .sjw_max = KVASER_USB_SJW_MAX,
1808 .brp_min = KVASER_USB_BRP_MIN,
1809 .brp_max = KVASER_USB_BRP_MAX,
1810 .brp_inc = KVASER_USB_BRP_INC,
1811};
1812
1813static int kvaser_usb_set_bittiming(struct net_device *netdev)
1814{
1815 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1816 struct can_bittiming *bt = &priv->can.bittiming;
1817 struct kvaser_usb *dev = priv->dev;
1818 struct kvaser_msg *msg;
1819 int rc;
1820
1821 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1822 if (!msg)
1823 return -ENOMEM;
1824
1825 msg->id = CMD_SET_BUS_PARAMS;
1826 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
1827 msg->u.busparams.channel = priv->channel;
1828 msg->u.busparams.tid = 0xff;
1829 msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
1830 msg->u.busparams.sjw = bt->sjw;
1831 msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
1832 msg->u.busparams.tseg2 = bt->phase_seg2;
1833
1834 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
1835 msg->u.busparams.no_samp = 3;
1836 else
1837 msg->u.busparams.no_samp = 1;
1838
1839 rc = kvaser_usb_send_msg(dev, msg);
1840
1841 kfree(msg);
1842 return rc;
1843}
1844
1845static int kvaser_usb_set_mode(struct net_device *netdev,
1846 enum can_mode mode)
1847{
1848 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1849 int err;
1850
1851 switch (mode) {
1852 case CAN_MODE_START:
1853 err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
1854 if (err)
1855 return err;
1856 break;
1857 default:
1858 return -EOPNOTSUPP;
1859 }
1860
1861 return 0;
1862}
1863
1864static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
1865 struct can_berr_counter *bec)
1866{
1867 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1868
1869 *bec = priv->bec;
1870
1871 return 0;
1872}
1873
1874static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
1875{
1876 int i;
1877
1878 for (i = 0; i < dev->nchannels; i++) {
1879 if (!dev->nets[i])
1880 continue;
1881
1882 unregister_candev(dev->nets[i]->netdev);
1883 }
1884
1885 kvaser_usb_unlink_all_urbs(dev);
1886
1887 for (i = 0; i < dev->nchannels; i++) {
1888 if (!dev->nets[i])
1889 continue;
1890
1891 free_candev(dev->nets[i]->netdev);
1892 }
1893}
1894
1895static int kvaser_usb_init_one(struct usb_interface *intf,
1896 const struct usb_device_id *id, int channel)
1897{
1898 struct kvaser_usb *dev = usb_get_intfdata(intf);
1899 struct net_device *netdev;
1900 struct kvaser_usb_net_priv *priv;
1901 int err;
1902
1903 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1904 if (err)
1905 return err;
1906
1907 netdev = alloc_candev(sizeof(*priv) +
1908 dev->max_tx_urbs * sizeof(*priv->tx_contexts),
1909 dev->max_tx_urbs);
1910 if (!netdev) {
1911 dev_err(&intf->dev, "Cannot alloc candev\n");
1912 return -ENOMEM;
1913 }
1914
1915 priv = netdev_priv(netdev);
1916
1917 init_usb_anchor(&priv->tx_submitted);
1918 init_completion(&priv->start_comp);
1919 init_completion(&priv->stop_comp);
1920
1921 priv->dev = dev;
1922 priv->netdev = netdev;
1923 priv->channel = channel;
1924
1925 spin_lock_init(&priv->tx_contexts_lock);
1926 kvaser_usb_reset_tx_urb_contexts(priv);
1927
1928 priv->can.state = CAN_STATE_STOPPED;
1929 priv->can.clock.freq = CAN_USB_CLOCK;
1930 priv->can.bittiming_const = &kvaser_usb_bittiming_const;
1931 priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
1932 priv->can.do_set_mode = kvaser_usb_set_mode;
1933 if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
1934 priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
1935 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1936 if (id->driver_info & KVASER_HAS_SILENT_MODE)
1937 priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
1938
1939 netdev->flags |= IFF_ECHO;
1940
1941 netdev->netdev_ops = &kvaser_usb_netdev_ops;
1942
1943 SET_NETDEV_DEV(netdev, &intf->dev);
1944 netdev->dev_id = channel;
1945
1946 dev->nets[channel] = priv;
1947
1948 err = register_candev(netdev);
1949 if (err) {
1950 dev_err(&intf->dev, "Failed to register can device\n");
1951 free_candev(netdev);
1952 dev->nets[channel] = NULL;
1953 return err;
1954 }
1955
1956 netdev_dbg(netdev, "device registered\n");
1957
1958 return 0;
1959}
1960
1961static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
1962 struct usb_endpoint_descriptor **in,
1963 struct usb_endpoint_descriptor **out)
1964{
1965 const struct usb_host_interface *iface_desc;
1966 struct usb_endpoint_descriptor *endpoint;
1967 int i;
1968
1969 iface_desc = &intf->altsetting[0];
1970
1971 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1972 endpoint = &iface_desc->endpoint[i].desc;
1973
1974 if (!*in && usb_endpoint_is_bulk_in(endpoint))
1975 *in = endpoint;
1976
1977 if (!*out && usb_endpoint_is_bulk_out(endpoint))
1978 *out = endpoint;
1979
1980 /* use first bulk endpoint for in and out */
1981 if (*in && *out)
1982 return 0;
1983 }
1984
1985 return -ENODEV;
1986}
1987
1988static int kvaser_usb_probe(struct usb_interface *intf,
1989 const struct usb_device_id *id)
1990{
1991 struct kvaser_usb *dev;
1992 int err = -ENOMEM;
1993 int i, retry = 3;
1994
1995 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
1996 if (!dev)
1997 return -ENOMEM;
1998
1999 if (kvaser_is_leaf(id)) {
2000 dev->family = KVASER_LEAF;
2001 } else if (kvaser_is_usbcan(id)) {
2002 dev->family = KVASER_USBCAN;
2003 } else {
2004 dev_err(&intf->dev,
2005 "Product ID (%d) does not belong to any known Kvaser USB family",
2006 id->idProduct);
2007 return -ENODEV;
2008 }
2009
2010 err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
2011 if (err) {
2012 dev_err(&intf->dev, "Cannot get usb endpoint(s)");
2013 return err;
2014 }
2015
2016 dev->udev = interface_to_usbdev(intf);
2017
2018 init_usb_anchor(&dev->rx_submitted);
2019
2020 usb_set_intfdata(intf, dev);
2021
2022 /* On some x86 laptops, plugging a Kvaser device again after
2023 * an unplug makes the firmware always ignore the very first
2024 * command. For such a case, provide some room for retries
2025 * instead of completely exiting the driver.
2026 */
2027 do {
2028 err = kvaser_usb_get_software_info(dev);
2029 } while (--retry && err == -ETIMEDOUT);
2030
2031 if (err) {
2032 dev_err(&intf->dev,
2033 "Cannot get software infos, error %d\n", err);
2034 return err;
2035 }
2036
2037 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
2038 ((dev->fw_version >> 24) & 0xff),
2039 ((dev->fw_version >> 16) & 0xff),
2040 (dev->fw_version & 0xffff));
2041
2042 dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
2043
2044 err = kvaser_usb_get_card_info(dev);
2045 if (err) {
2046 dev_err(&intf->dev,
2047 "Cannot get card infos, error %d\n", err);
2048 return err;
2049 }
2050
2051 for (i = 0; i < dev->nchannels; i++) {
2052 err = kvaser_usb_init_one(intf, id, i);
2053 if (err) {
2054 kvaser_usb_remove_interfaces(dev);
2055 return err;
2056 }
2057 }
2058
2059 return 0;
2060}
2061
2062static void kvaser_usb_disconnect(struct usb_interface *intf)
2063{
2064 struct kvaser_usb *dev = usb_get_intfdata(intf);
2065
2066 usb_set_intfdata(intf, NULL);
2067
2068 if (!dev)
2069 return;
2070
2071 kvaser_usb_remove_interfaces(dev);
2072}
2073
2074static struct usb_driver kvaser_usb_driver = {
2075 .name = "kvaser_usb",
2076 .probe = kvaser_usb_probe,
2077 .disconnect = kvaser_usb_disconnect,
2078 .id_table = kvaser_usb_table,
2079};
2080
2081module_usb_driver(kvaser_usb_driver);
2082
2083MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
2084MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
2085MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
new file mode 100644
index 000000000000..9f41ddab6a5a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
2kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
new file mode 100644
index 000000000000..390b6bde883c
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -0,0 +1,188 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Parts of this driver are based on the following:
3 * - Kvaser linux leaf driver (version 4.78)
4 * - CAN driver for esd CAN-USB/2
5 * - Kvaser linux usbcanII driver (version 5.3)
6 * - Kvaser linux mhydra driver (version 5.24)
7 *
8 * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
9 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
10 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
11 * Copyright (C) 2015 Valeo S.A.
12 */
13
14#ifndef KVASER_USB_H
15#define KVASER_USB_H
16
17/* Kvaser USB CAN dongles are divided into three major platforms:
18 * - Hydra: Running firmware labeled as 'mhydra'
19 * - Leaf: Based on Renesas M32C or Freescale i.MX28, running firmware labeled
20 * as 'filo'
21 * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
22 */
23
24#include <linux/completion.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
27#include <linux/usb.h>
28
29#include <linux/can.h>
30#include <linux/can/dev.h>
31
32#define KVASER_USB_MAX_RX_URBS 4
33#define KVASER_USB_MAX_TX_URBS 128
34#define KVASER_USB_TIMEOUT 1000 /* msecs */
35#define KVASER_USB_RX_BUFFER_SIZE 3072
36#define KVASER_USB_MAX_NET_DEVICES 5
37
38/* USB devices features */
39#define KVASER_USB_HAS_SILENT_MODE BIT(0)
40#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
41
42/* Device capabilities */
43#define KVASER_USB_CAP_BERR_CAP 0x01
44#define KVASER_USB_CAP_EXT_CAP 0x02
45#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
46
47struct kvaser_usb_dev_cfg;
48
49enum kvaser_usb_leaf_family {
50 KVASER_LEAF,
51 KVASER_USBCAN,
52};
53
54#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
55struct kvaser_usb_dev_card_data_hydra {
56 u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
57 u8 sysdbg_he;
58 spinlock_t transid_lock; /* lock for transid */
59 u16 transid;
60 /* lock for usb_rx_leftover and usb_rx_leftover_len */
61 spinlock_t usb_rx_leftover_lock;
62 u8 usb_rx_leftover[KVASER_USB_HYDRA_MAX_CMD_LEN];
63 u8 usb_rx_leftover_len;
64};
65struct kvaser_usb_dev_card_data {
66 u32 ctrlmode_supported;
67 u32 capabilities;
68 union {
69 struct {
70 enum kvaser_usb_leaf_family family;
71 } leaf;
72 struct kvaser_usb_dev_card_data_hydra hydra;
73 };
74};
75
76/* Context for an outstanding, not yet ACKed, transmission */
77struct kvaser_usb_tx_urb_context {
78 struct kvaser_usb_net_priv *priv;
79 u32 echo_index;
80 int dlc;
81};
82
83struct kvaser_usb {
84 struct usb_device *udev;
85 struct usb_interface *intf;
86 struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
87 const struct kvaser_usb_dev_ops *ops;
88 const struct kvaser_usb_dev_cfg *cfg;
89
90 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
91 struct usb_anchor rx_submitted;
92
93 /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
94 * not yet ACKed, transmissions on this device. This value is
95 * also used as a sentinel for marking free tx contexts.
96 */
97 u32 fw_version;
98 unsigned int nchannels;
99 unsigned int max_tx_urbs;
100 struct kvaser_usb_dev_card_data card_data;
101
102 bool rxinitdone;
103 void *rxbuf[KVASER_USB_MAX_RX_URBS];
104 dma_addr_t rxbuf_dma[KVASER_USB_MAX_RX_URBS];
105};
106
107struct kvaser_usb_net_priv {
108 struct can_priv can;
109 struct can_berr_counter bec;
110
111 struct kvaser_usb *dev;
112 struct net_device *netdev;
113 int channel;
114
115 struct completion start_comp, stop_comp, flush_comp;
116 struct usb_anchor tx_submitted;
117
118 spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
119 int active_tx_contexts;
120 struct kvaser_usb_tx_urb_context tx_contexts[];
121};
122
123/**
124 * struct kvaser_usb_dev_ops - Device specific functions
125 * @dev_set_mode: used for can.do_set_mode
126 * @dev_set_bittiming: used for can.do_set_bittiming
127 * @dev_set_data_bittiming: used for can.do_set_data_bittiming
128 * @dev_get_berr_counter: used for can.do_get_berr_counter
129 *
130 * @dev_setup_endpoints: setup USB in and out endpoints
131 * @dev_init_card: initialize card
132 * @dev_get_software_info: get software info
133 * @dev_get_software_details: get software details
134 * @dev_get_card_info: get card info
135 * @dev_get_capabilities: discover device capabilities
136 *
137 * @dev_set_opt_mode: set ctrlmod
138 * @dev_start_chip: start the CAN controller
139 * @dev_stop_chip: stop the CAN controller
140 * @dev_reset_chip: reset the CAN controller
141 * @dev_flush_queue: flush outstanding CAN messages
142 * @dev_read_bulk_callback: handle incoming commands
143 * @dev_frame_to_cmd: translate struct can_frame into device command
144 */
145struct kvaser_usb_dev_ops {
146 int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
147 int (*dev_set_bittiming)(struct net_device *netdev);
148 int (*dev_set_data_bittiming)(struct net_device *netdev);
149 int (*dev_get_berr_counter)(const struct net_device *netdev,
150 struct can_berr_counter *bec);
151 int (*dev_setup_endpoints)(struct kvaser_usb *dev);
152 int (*dev_init_card)(struct kvaser_usb *dev);
153 int (*dev_get_software_info)(struct kvaser_usb *dev);
154 int (*dev_get_software_details)(struct kvaser_usb *dev);
155 int (*dev_get_card_info)(struct kvaser_usb *dev);
156 int (*dev_get_capabilities)(struct kvaser_usb *dev);
157 int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
158 int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
159 int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
160 int (*dev_reset_chip)(struct kvaser_usb *dev, int channel);
161 int (*dev_flush_queue)(struct kvaser_usb_net_priv *priv);
162 void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
163 int len);
164 void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
165 const struct sk_buff *skb, int *frame_len,
166 int *cmd_len, u16 transid);
167};
168
169struct kvaser_usb_dev_cfg {
170 const struct can_clock clock;
171 const unsigned int timestamp_freq;
172 const struct can_bittiming_const * const bittiming_const;
173 const struct can_bittiming_const * const data_bittiming_const;
174};
175
176extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
177extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
178
179int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
180 int *actual_len);
181
182int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len);
183
184int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
185 int len);
186
187int kvaser_usb_can_rx_over_error(struct net_device *netdev);
188#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
new file mode 100644
index 000000000000..c89c7d4900d7
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -0,0 +1,835 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Parts of this driver are based on the following:
3 * - Kvaser linux leaf driver (version 4.78)
4 * - CAN driver for esd CAN-USB/2
5 * - Kvaser linux usbcanII driver (version 5.3)
6 * - Kvaser linux mhydra driver (version 5.24)
7 *
8 * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
9 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
10 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
11 * Copyright (C) 2015 Valeo S.A.
12 */
13
14#include <linux/completion.h>
15#include <linux/device.h>
16#include <linux/gfp.h>
17#include <linux/if.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/spinlock.h>
22#include <linux/types.h>
23#include <linux/usb.h>
24
25#include <linux/can.h>
26#include <linux/can/dev.h>
27#include <linux/can/error.h>
28#include <linux/can/netlink.h>
29
30#include "kvaser_usb.h"
31
32/* Kvaser USB vendor id. */
33#define KVASER_VENDOR_ID 0x0bfd
34
35/* Kvaser Leaf USB devices product ids */
36#define USB_LEAF_DEVEL_PRODUCT_ID 10
37#define USB_LEAF_LITE_PRODUCT_ID 11
38#define USB_LEAF_PRO_PRODUCT_ID 12
39#define USB_LEAF_SPRO_PRODUCT_ID 14
40#define USB_LEAF_PRO_LS_PRODUCT_ID 15
41#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
42#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
43#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
44#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
45#define USB_MEMO2_DEVEL_PRODUCT_ID 22
46#define USB_MEMO2_HSHS_PRODUCT_ID 23
47#define USB_UPRO_HSHS_PRODUCT_ID 24
48#define USB_LEAF_LITE_GI_PRODUCT_ID 25
49#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
50#define USB_MEMO2_HSLS_PRODUCT_ID 27
51#define USB_LEAF_LITE_CH_PRODUCT_ID 28
52#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
53#define USB_OEM_MERCURY_PRODUCT_ID 34
54#define USB_OEM_LEAF_PRODUCT_ID 35
55#define USB_CAN_R_PRODUCT_ID 39
56#define USB_LEAF_LITE_V2_PRODUCT_ID 288
57#define USB_MINI_PCIE_HS_PRODUCT_ID 289
58#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
59#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
60#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
61
62/* Kvaser USBCan-II devices product ids */
63#define USB_USBCAN_REVB_PRODUCT_ID 2
64#define USB_VCI2_PRODUCT_ID 3
65#define USB_USBCAN2_PRODUCT_ID 4
66#define USB_MEMORATOR_PRODUCT_ID 5
67
68/* Kvaser Minihydra USB devices product ids */
69#define USB_BLACKBIRD_V2_PRODUCT_ID 258
70#define USB_MEMO_PRO_5HS_PRODUCT_ID 260
71#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261
72#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262
73#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263
74#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
75#define USB_MEMO_2HS_PRODUCT_ID 265
76#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
77#define USB_HYBRID_CANLIN_PRODUCT_ID 267
78#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
79#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
80#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
81
82static inline bool kvaser_is_leaf(const struct usb_device_id *id)
83{
84 return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
85 id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
86 (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
87 id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID);
88}
89
90static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
91{
92 return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
93 id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
94}
95
96static inline bool kvaser_is_hydra(const struct usb_device_id *id)
97{
98 return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
99 id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID;
100}
101
102static const struct usb_device_id kvaser_usb_table[] = {
103 /* Leaf USB product IDs */
104 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
105 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
106 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
107 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
108 KVASER_USB_HAS_SILENT_MODE },
109 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
110 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
111 KVASER_USB_HAS_SILENT_MODE },
112 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
113 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
114 KVASER_USB_HAS_SILENT_MODE },
115 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
116 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
117 KVASER_USB_HAS_SILENT_MODE },
118 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
119 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
120 KVASER_USB_HAS_SILENT_MODE },
121 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
122 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
123 KVASER_USB_HAS_SILENT_MODE },
124 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
125 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
126 KVASER_USB_HAS_SILENT_MODE },
127 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
128 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
129 KVASER_USB_HAS_SILENT_MODE },
130 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
131 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
132 KVASER_USB_HAS_SILENT_MODE },
133 { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
134 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
135 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
136 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
137 .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
138 KVASER_USB_HAS_SILENT_MODE },
139 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
140 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
141 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
142 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
143 { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
144 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
145 { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
146 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
147 { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
148 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
149 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
150 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
151 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
152 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
153 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
154 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
155 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
156
157 /* USBCANII USB product IDs */
158 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
159 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
160 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
161 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
162 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
163 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
164 { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
165 .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
166
167 /* Minihydra USB product IDs */
168 { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
169 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
170 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
171 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
172 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
173 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
174 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
175 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
176 { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
177 { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
178 { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
179 { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
180 { }
181};
182MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
183
184int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
185{
186 int actual_len; /* Not used */
187
188 return usb_bulk_msg(dev->udev,
189 usb_sndbulkpipe(dev->udev,
190 dev->bulk_out->bEndpointAddress),
191 cmd, len, &actual_len, KVASER_USB_TIMEOUT);
192}
193
194int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
195 int *actual_len)
196{
197 return usb_bulk_msg(dev->udev,
198 usb_rcvbulkpipe(dev->udev,
199 dev->bulk_in->bEndpointAddress),
200 cmd, len, actual_len, KVASER_USB_TIMEOUT);
201}
202
203static void kvaser_usb_send_cmd_callback(struct urb *urb)
204{
205 struct net_device *netdev = urb->context;
206
207 kfree(urb->transfer_buffer);
208
209 if (urb->status)
210 netdev_warn(netdev, "urb status received: %d\n", urb->status);
211}
212
213int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
214 int len)
215{
216 struct kvaser_usb *dev = priv->dev;
217 struct net_device *netdev = priv->netdev;
218 struct urb *urb;
219 int err;
220
221 urb = usb_alloc_urb(0, GFP_ATOMIC);
222 if (!urb)
223 return -ENOMEM;
224
225 usb_fill_bulk_urb(urb, dev->udev,
226 usb_sndbulkpipe(dev->udev,
227 dev->bulk_out->bEndpointAddress),
228 cmd, len, kvaser_usb_send_cmd_callback, netdev);
229 usb_anchor_urb(urb, &priv->tx_submitted);
230
231 err = usb_submit_urb(urb, GFP_ATOMIC);
232 if (err) {
233 netdev_err(netdev, "Error transmitting URB\n");
234 usb_unanchor_urb(urb);
235 }
236 usb_free_urb(urb);
237
238 return 0;
239}
240
241int kvaser_usb_can_rx_over_error(struct net_device *netdev)
242{
243 struct net_device_stats *stats = &netdev->stats;
244 struct can_frame *cf;
245 struct sk_buff *skb;
246
247 stats->rx_over_errors++;
248 stats->rx_errors++;
249
250 skb = alloc_can_err_skb(netdev, &cf);
251 if (!skb) {
252 stats->rx_dropped++;
253 netdev_warn(netdev, "No memory left for err_skb\n");
254 return -ENOMEM;
255 }
256
257 cf->can_id |= CAN_ERR_CRTL;
258 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
259
260 stats->rx_packets++;
261 stats->rx_bytes += cf->can_dlc;
262 netif_rx(skb);
263
264 return 0;
265}
266
267static void kvaser_usb_read_bulk_callback(struct urb *urb)
268{
269 struct kvaser_usb *dev = urb->context;
270 int err;
271 unsigned int i;
272
273 switch (urb->status) {
274 case 0:
275 break;
276 case -ENOENT:
277 case -EPIPE:
278 case -EPROTO:
279 case -ESHUTDOWN:
280 return;
281 default:
282 dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status);
283 goto resubmit_urb;
284 }
285
286 dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
287 urb->actual_length);
288
289resubmit_urb:
290 usb_fill_bulk_urb(urb, dev->udev,
291 usb_rcvbulkpipe(dev->udev,
292 dev->bulk_in->bEndpointAddress),
293 urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
294 kvaser_usb_read_bulk_callback, dev);
295
296 err = usb_submit_urb(urb, GFP_ATOMIC);
297 if (err == -ENODEV) {
298 for (i = 0; i < dev->nchannels; i++) {
299 if (!dev->nets[i])
300 continue;
301
302 netif_device_detach(dev->nets[i]->netdev);
303 }
304 } else if (err) {
305 dev_err(&dev->intf->dev,
306 "Failed resubmitting read bulk urb: %d\n", err);
307 }
308}
309
310static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
311{
312 int i, err = 0;
313
314 if (dev->rxinitdone)
315 return 0;
316
317 for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) {
318 struct urb *urb = NULL;
319 u8 *buf = NULL;
320 dma_addr_t buf_dma;
321
322 urb = usb_alloc_urb(0, GFP_KERNEL);
323 if (!urb) {
324 err = -ENOMEM;
325 break;
326 }
327
328 buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
329 GFP_KERNEL, &buf_dma);
330 if (!buf) {
331 dev_warn(&dev->intf->dev,
332 "No memory left for USB buffer\n");
333 usb_free_urb(urb);
334 err = -ENOMEM;
335 break;
336 }
337
338 usb_fill_bulk_urb(urb, dev->udev,
339 usb_rcvbulkpipe
340 (dev->udev,
341 dev->bulk_in->bEndpointAddress),
342 buf, KVASER_USB_RX_BUFFER_SIZE,
343 kvaser_usb_read_bulk_callback, dev);
344 urb->transfer_dma = buf_dma;
345 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
346 usb_anchor_urb(urb, &dev->rx_submitted);
347
348 err = usb_submit_urb(urb, GFP_KERNEL);
349 if (err) {
350 usb_unanchor_urb(urb);
351 usb_free_coherent(dev->udev,
352 KVASER_USB_RX_BUFFER_SIZE, buf,
353 buf_dma);
354 usb_free_urb(urb);
355 break;
356 }
357
358 dev->rxbuf[i] = buf;
359 dev->rxbuf_dma[i] = buf_dma;
360
361 usb_free_urb(urb);
362 }
363
364 if (i == 0) {
365 dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n",
366 err);
367 return err;
368 } else if (i < KVASER_USB_MAX_RX_URBS) {
369 dev_warn(&dev->intf->dev, "RX performances may be slow\n");
370 }
371
372 dev->rxinitdone = true;
373
374 return 0;
375}
376
377static int kvaser_usb_open(struct net_device *netdev)
378{
379 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
380 struct kvaser_usb *dev = priv->dev;
381 int err;
382
383 err = open_candev(netdev);
384 if (err)
385 return err;
386
387 err = kvaser_usb_setup_rx_urbs(dev);
388 if (err)
389 goto error;
390
391 err = dev->ops->dev_set_opt_mode(priv);
392 if (err)
393 goto error;
394
395 err = dev->ops->dev_start_chip(priv);
396 if (err) {
397 netdev_warn(netdev, "Cannot start device, error %d\n", err);
398 goto error;
399 }
400
401 priv->can.state = CAN_STATE_ERROR_ACTIVE;
402
403 return 0;
404
405error:
406 close_candev(netdev);
407 return err;
408}
409
410static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
411{
412 int i, max_tx_urbs;
413
414 max_tx_urbs = priv->dev->max_tx_urbs;
415
416 priv->active_tx_contexts = 0;
417 for (i = 0; i < max_tx_urbs; i++)
418 priv->tx_contexts[i].echo_index = max_tx_urbs;
419}
420
421/* This method might sleep. Do not call it in the atomic context
422 * of URB completions.
423 */
424static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
425{
426 usb_kill_anchored_urbs(&priv->tx_submitted);
427 kvaser_usb_reset_tx_urb_contexts(priv);
428}
429
430static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
431{
432 int i;
433
434 usb_kill_anchored_urbs(&dev->rx_submitted);
435
436 for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++)
437 usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
438 dev->rxbuf[i], dev->rxbuf_dma[i]);
439
440 for (i = 0; i < dev->nchannels; i++) {
441 struct kvaser_usb_net_priv *priv = dev->nets[i];
442
443 if (priv)
444 kvaser_usb_unlink_tx_urbs(priv);
445 }
446}
447
448static int kvaser_usb_close(struct net_device *netdev)
449{
450 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
451 struct kvaser_usb *dev = priv->dev;
452 int err;
453
454 netif_stop_queue(netdev);
455
456 err = dev->ops->dev_flush_queue(priv);
457 if (err)
458 netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
459
460 if (dev->ops->dev_reset_chip) {
461 err = dev->ops->dev_reset_chip(dev, priv->channel);
462 if (err)
463 netdev_warn(netdev, "Cannot reset card, error %d\n",
464 err);
465 }
466
467 err = dev->ops->dev_stop_chip(priv);
468 if (err)
469 netdev_warn(netdev, "Cannot stop device, error %d\n", err);
470
471 /* reset tx contexts */
472 kvaser_usb_unlink_tx_urbs(priv);
473
474 priv->can.state = CAN_STATE_STOPPED;
475 close_candev(priv->netdev);
476
477 return 0;
478}
479
480static void kvaser_usb_write_bulk_callback(struct urb *urb)
481{
482 struct kvaser_usb_tx_urb_context *context = urb->context;
483 struct kvaser_usb_net_priv *priv;
484 struct net_device *netdev;
485
486 if (WARN_ON(!context))
487 return;
488
489 priv = context->priv;
490 netdev = priv->netdev;
491
492 kfree(urb->transfer_buffer);
493
494 if (!netif_device_present(netdev))
495 return;
496
497 if (urb->status)
498 netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
499}
500
501static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
502 struct net_device *netdev)
503{
504 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
505 struct kvaser_usb *dev = priv->dev;
506 struct net_device_stats *stats = &netdev->stats;
507 struct kvaser_usb_tx_urb_context *context = NULL;
508 struct urb *urb;
509 void *buf;
510 int cmd_len = 0;
511 int err, ret = NETDEV_TX_OK;
512 unsigned int i;
513 unsigned long flags;
514
515 if (can_dropped_invalid_skb(netdev, skb))
516 return NETDEV_TX_OK;
517
518 urb = usb_alloc_urb(0, GFP_ATOMIC);
519 if (!urb) {
520 stats->tx_dropped++;
521 dev_kfree_skb(skb);
522 return NETDEV_TX_OK;
523 }
524
525 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
526 for (i = 0; i < dev->max_tx_urbs; i++) {
527 if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
528 context = &priv->tx_contexts[i];
529
530 context->echo_index = i;
531 ++priv->active_tx_contexts;
532 if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
533 netif_stop_queue(netdev);
534
535 break;
536 }
537 }
538 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
539
540 /* This should never happen; it implies a flow control bug */
541 if (!context) {
542 netdev_warn(netdev, "cannot find free context\n");
543
544 ret = NETDEV_TX_BUSY;
545 goto freeurb;
546 }
547
548 buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
549 context->echo_index);
550 if (!buf) {
551 stats->tx_dropped++;
552 dev_kfree_skb(skb);
553 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
554
555 context->echo_index = dev->max_tx_urbs;
556 --priv->active_tx_contexts;
557 netif_wake_queue(netdev);
558
559 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
560 goto freeurb;
561 }
562
563 context->priv = priv;
564
565 can_put_echo_skb(skb, netdev, context->echo_index);
566
567 usb_fill_bulk_urb(urb, dev->udev,
568 usb_sndbulkpipe(dev->udev,
569 dev->bulk_out->bEndpointAddress),
570 buf, cmd_len, kvaser_usb_write_bulk_callback,
571 context);
572 usb_anchor_urb(urb, &priv->tx_submitted);
573
574 err = usb_submit_urb(urb, GFP_ATOMIC);
575 if (unlikely(err)) {
576 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
577
578 can_free_echo_skb(netdev, context->echo_index);
579 context->echo_index = dev->max_tx_urbs;
580 --priv->active_tx_contexts;
581 netif_wake_queue(netdev);
582
583 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
584
585 usb_unanchor_urb(urb);
586 kfree(buf);
587
588 stats->tx_dropped++;
589
590 if (err == -ENODEV)
591 netif_device_detach(netdev);
592 else
593 netdev_warn(netdev, "Failed tx_urb %d\n", err);
594
595 goto freeurb;
596 }
597
598 ret = NETDEV_TX_OK;
599
600freeurb:
601 usb_free_urb(urb);
602 return ret;
603}
604
605static const struct net_device_ops kvaser_usb_netdev_ops = {
606 .ndo_open = kvaser_usb_open,
607 .ndo_stop = kvaser_usb_close,
608 .ndo_start_xmit = kvaser_usb_start_xmit,
609 .ndo_change_mtu = can_change_mtu,
610};
611
612static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
613{
614 int i;
615
616 for (i = 0; i < dev->nchannels; i++) {
617 if (!dev->nets[i])
618 continue;
619
620 unregister_candev(dev->nets[i]->netdev);
621 }
622
623 kvaser_usb_unlink_all_urbs(dev);
624
625 for (i = 0; i < dev->nchannels; i++) {
626 if (!dev->nets[i])
627 continue;
628
629 free_candev(dev->nets[i]->netdev);
630 }
631}
632
633static int kvaser_usb_init_one(struct kvaser_usb *dev,
634 const struct usb_device_id *id, int channel)
635{
636 struct net_device *netdev;
637 struct kvaser_usb_net_priv *priv;
638 int err;
639
640 if (dev->ops->dev_reset_chip) {
641 err = dev->ops->dev_reset_chip(dev, channel);
642 if (err)
643 return err;
644 }
645
646 netdev = alloc_candev(sizeof(*priv) +
647 dev->max_tx_urbs * sizeof(*priv->tx_contexts),
648 dev->max_tx_urbs);
649 if (!netdev) {
650 dev_err(&dev->intf->dev, "Cannot alloc candev\n");
651 return -ENOMEM;
652 }
653
654 priv = netdev_priv(netdev);
655
656 init_usb_anchor(&priv->tx_submitted);
657 init_completion(&priv->start_comp);
658 init_completion(&priv->stop_comp);
659 priv->can.ctrlmode_supported = 0;
660
661 priv->dev = dev;
662 priv->netdev = netdev;
663 priv->channel = channel;
664
665 spin_lock_init(&priv->tx_contexts_lock);
666 kvaser_usb_reset_tx_urb_contexts(priv);
667
668 priv->can.state = CAN_STATE_STOPPED;
669 priv->can.clock.freq = dev->cfg->clock.freq;
670 priv->can.bittiming_const = dev->cfg->bittiming_const;
671 priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
672 priv->can.do_set_mode = dev->ops->dev_set_mode;
673 if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
674 (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
675 priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
676 if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
677 priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
678
679 priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
680
681 if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
682 priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
683 priv->can.do_set_data_bittiming =
684 dev->ops->dev_set_data_bittiming;
685 }
686
687 netdev->flags |= IFF_ECHO;
688
689 netdev->netdev_ops = &kvaser_usb_netdev_ops;
690
691 SET_NETDEV_DEV(netdev, &dev->intf->dev);
692 netdev->dev_id = channel;
693
694 dev->nets[channel] = priv;
695
696 err = register_candev(netdev);
697 if (err) {
698 dev_err(&dev->intf->dev, "Failed to register CAN device\n");
699 free_candev(netdev);
700 dev->nets[channel] = NULL;
701 return err;
702 }
703
704 netdev_dbg(netdev, "device registered\n");
705
706 return 0;
707}
708
709static int kvaser_usb_probe(struct usb_interface *intf,
710 const struct usb_device_id *id)
711{
712 struct kvaser_usb *dev;
713 int err;
714 int i;
715
716 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
717 if (!dev)
718 return -ENOMEM;
719
720 if (kvaser_is_leaf(id)) {
721 dev->card_data.leaf.family = KVASER_LEAF;
722 dev->ops = &kvaser_usb_leaf_dev_ops;
723 } else if (kvaser_is_usbcan(id)) {
724 dev->card_data.leaf.family = KVASER_USBCAN;
725 dev->ops = &kvaser_usb_leaf_dev_ops;
726 } else if (kvaser_is_hydra(id)) {
727 dev->ops = &kvaser_usb_hydra_dev_ops;
728 } else {
729 dev_err(&intf->dev,
730 "Product ID (%d) is not a supported Kvaser USB device\n",
731 id->idProduct);
732 return -ENODEV;
733 }
734
735 dev->intf = intf;
736
737 err = dev->ops->dev_setup_endpoints(dev);
738 if (err) {
739 dev_err(&intf->dev, "Cannot get usb endpoint(s)");
740 return err;
741 }
742
743 dev->udev = interface_to_usbdev(intf);
744
745 init_usb_anchor(&dev->rx_submitted);
746
747 usb_set_intfdata(intf, dev);
748
749 dev->card_data.ctrlmode_supported = 0;
750 dev->card_data.capabilities = 0;
751 err = dev->ops->dev_init_card(dev);
752 if (err) {
753 dev_err(&intf->dev,
754 "Failed to initialize card, error %d\n", err);
755 return err;
756 }
757
758 err = dev->ops->dev_get_software_info(dev);
759 if (err) {
760 dev_err(&intf->dev,
761 "Cannot get software info, error %d\n", err);
762 return err;
763 }
764
765 if (dev->ops->dev_get_software_details) {
766 err = dev->ops->dev_get_software_details(dev);
767 if (err) {
768 dev_err(&intf->dev,
769 "Cannot get software details, error %d\n", err);
770 return err;
771 }
772 }
773
774 if (WARN_ON(!dev->cfg))
775 return -ENODEV;
776
777 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
778 ((dev->fw_version >> 24) & 0xff),
779 ((dev->fw_version >> 16) & 0xff),
780 (dev->fw_version & 0xffff));
781
782 dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
783
784 err = dev->ops->dev_get_card_info(dev);
785 if (err) {
786 dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
787 return err;
788 }
789
790 if (dev->ops->dev_get_capabilities) {
791 err = dev->ops->dev_get_capabilities(dev);
792 if (err) {
793 dev_err(&intf->dev,
794 "Cannot get capabilities, error %d\n", err);
795 kvaser_usb_remove_interfaces(dev);
796 return err;
797 }
798 }
799
800 for (i = 0; i < dev->nchannels; i++) {
801 err = kvaser_usb_init_one(dev, id, i);
802 if (err) {
803 kvaser_usb_remove_interfaces(dev);
804 return err;
805 }
806 }
807
808 return 0;
809}
810
811static void kvaser_usb_disconnect(struct usb_interface *intf)
812{
813 struct kvaser_usb *dev = usb_get_intfdata(intf);
814
815 usb_set_intfdata(intf, NULL);
816
817 if (!dev)
818 return;
819
820 kvaser_usb_remove_interfaces(dev);
821}
822
823static struct usb_driver kvaser_usb_driver = {
824 .name = "kvaser_usb",
825 .probe = kvaser_usb_probe,
826 .disconnect = kvaser_usb_disconnect,
827 .id_table = kvaser_usb_table,
828};
829
830module_usb_driver(kvaser_usb_driver);
831
832MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
833MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
834MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
835MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
new file mode 100644
index 000000000000..5fc0be564274
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -0,0 +1,2028 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Parts of this driver are based on the following:
3 * - Kvaser linux mhydra driver (version 5.24)
4 * - CAN driver for esd CAN-USB/2
5 *
6 * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
7 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
8 *
9 * Known issues:
10 * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
11 * reported after a call to do_get_berr_counter(), since firmware does not
12 * distinguish between ERROR_WARNING and ERROR_ACTIVE.
13 * - Hardware timestamps are not set for CAN Tx frames.
14 */
15
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/gfp.h>
19#include <linux/jiffies.h>
20#include <linux/kernel.h>
21#include <linux/netdevice.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/usb.h>
26
27#include <linux/can.h>
28#include <linux/can/dev.h>
29#include <linux/can/error.h>
30#include <linux/can/netlink.h>
31
32#include "kvaser_usb.h"
33
34/* Forward declarations */
35static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan;
36static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
37
38#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82
39#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02
40
41#define KVASER_USB_HYDRA_MAX_TRANSID 0xff
42#define KVASER_USB_HYDRA_MIN_TRANSID 0x01
43
44/* Minihydra command IDs */
45#define CMD_SET_BUSPARAMS_REQ 16
46#define CMD_GET_CHIP_STATE_REQ 19
47#define CMD_CHIP_STATE_EVENT 20
48#define CMD_SET_DRIVERMODE_REQ 21
49#define CMD_START_CHIP_REQ 26
50#define CMD_START_CHIP_RESP 27
51#define CMD_STOP_CHIP_REQ 28
52#define CMD_STOP_CHIP_RESP 29
53#define CMD_TX_CAN_MESSAGE 33
54#define CMD_GET_CARD_INFO_REQ 34
55#define CMD_GET_CARD_INFO_RESP 35
56#define CMD_GET_SOFTWARE_INFO_REQ 38
57#define CMD_GET_SOFTWARE_INFO_RESP 39
58#define CMD_ERROR_EVENT 45
59#define CMD_FLUSH_QUEUE 48
60#define CMD_TX_ACKNOWLEDGE 50
61#define CMD_FLUSH_QUEUE_RESP 66
62#define CMD_SET_BUSPARAMS_FD_REQ 69
63#define CMD_SET_BUSPARAMS_FD_RESP 70
64#define CMD_SET_BUSPARAMS_RESP 85
65#define CMD_GET_CAPABILITIES_REQ 95
66#define CMD_GET_CAPABILITIES_RESP 96
67#define CMD_RX_MESSAGE 106
68#define CMD_MAP_CHANNEL_REQ 200
69#define CMD_MAP_CHANNEL_RESP 201
70#define CMD_GET_SOFTWARE_DETAILS_REQ 202
71#define CMD_GET_SOFTWARE_DETAILS_RESP 203
72#define CMD_EXTENDED 255
73
74/* Minihydra extended command IDs */
75#define CMD_TX_CAN_MESSAGE_FD 224
76#define CMD_TX_ACKNOWLEDGE_FD 225
77#define CMD_RX_MESSAGE_FD 226
78
79/* Hydra commands are handled by different threads in firmware.
80 * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit
81 * address. The address is used in hydra commands to get/set source and
82 * destination HE. There are two predefined HE addresses, the remaining
83 * addresses are different between devices and firmware versions. Hence, we need
84 * to enumerate the addresses (see kvaser_usb_hydra_map_channel()).
85 */
86
87/* Well-known HE addresses */
88#define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00
89#define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e
90
91#define KVASER_USB_HYDRA_TRANSID_CANHE 0x40
92#define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61
93
94struct kvaser_cmd_map_ch_req {
95 char name[16];
96 u8 channel;
97 u8 reserved[11];
98} __packed;
99
100struct kvaser_cmd_map_ch_res {
101 u8 he_addr;
102 u8 channel;
103 u8 reserved[26];
104} __packed;
105
106struct kvaser_cmd_card_info {
107 __le32 serial_number;
108 __le32 clock_res;
109 __le32 mfg_date;
110 __le32 ean[2];
111 u8 hw_version;
112 u8 usb_mode;
113 u8 hw_type;
114 u8 reserved0;
115 u8 nchannels;
116 u8 reserved1[3];
117} __packed;
118
119struct kvaser_cmd_sw_info {
120 u8 reserved0[8];
121 __le16 max_outstanding_tx;
122 u8 reserved1[18];
123} __packed;
124
125struct kvaser_cmd_sw_detail_req {
126 u8 use_ext_cmd;
127 u8 reserved[27];
128} __packed;
129
130/* Software detail flags */
131#define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2)
132#define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4)
133#define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5)
134#define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9)
135#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10)
136#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11)
137#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12)
138struct kvaser_cmd_sw_detail_res {
139 __le32 sw_flags;
140 __le32 sw_version;
141 __le32 sw_name;
142 __le32 ean[2];
143 __le32 max_bitrate;
144 u8 reserved[4];
145} __packed;
146
147/* Sub commands for cap_req and cap_res */
148#define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02
149#define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05
150#define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06
151struct kvaser_cmd_cap_req {
152 __le16 cap_cmd;
153 u8 reserved[26];
154} __packed;
155
156/* Status codes for cap_res */
157#define KVASER_USB_HYDRA_CAP_STAT_OK 0x00
158#define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01
159#define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02
160struct kvaser_cmd_cap_res {
161 __le16 cap_cmd;
162 __le16 status;
163 __le32 mask;
164 __le32 value;
165 u8 reserved[16];
166} __packed;
167
168/* CMD_ERROR_EVENT error codes */
169#define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01
170#define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09
171struct kvaser_cmd_error_event {
172 __le16 timestamp[3];
173 u8 reserved;
174 u8 error_code;
175 __le16 info1;
176 __le16 info2;
177} __packed;
178
179/* Chip state status flags. Used for chip_state_event and err_frame_data. */
180#define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00
181#define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5)
182#define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6)
183struct kvaser_cmd_chip_state_event {
184 __le16 timestamp[3];
185 u8 tx_err_counter;
186 u8 rx_err_counter;
187 u8 bus_status;
188 u8 reserved[19];
189} __packed;
190
191/* Busparam modes */
192#define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00
193#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
194#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
195struct kvaser_cmd_set_busparams {
196 __le32 bitrate;
197 u8 tseg1;
198 u8 tseg2;
199 u8 sjw;
200 u8 nsamples;
201 u8 reserved0[4];
202 __le32 bitrate_d;
203 u8 tseg1_d;
204 u8 tseg2_d;
205 u8 sjw_d;
206 u8 nsamples_d;
207 u8 canfd_mode;
208 u8 reserved1[7];
209} __packed;
210
211/* Ctrl modes */
212#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
213#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
214struct kvaser_cmd_set_ctrlmode {
215 u8 mode;
216 u8 reserved[27];
217} __packed;
218
219struct kvaser_err_frame_data {
220 u8 bus_status;
221 u8 reserved0;
222 u8 tx_err_counter;
223 u8 rx_err_counter;
224 u8 reserved1[4];
225} __packed;
226
227struct kvaser_cmd_rx_can {
228 u8 cmd_len;
229 u8 cmd_no;
230 u8 channel;
231 u8 flags;
232 __le16 timestamp[3];
233 u8 dlc;
234 u8 padding;
235 __le32 id;
236 union {
237 u8 data[8];
238 struct kvaser_err_frame_data err_frame_data;
239 };
240} __packed;
241
242/* Extended CAN ID flag. Used in rx_can and tx_can */
243#define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31)
244struct kvaser_cmd_tx_can {
245 __le32 id;
246 u8 data[8];
247 u8 dlc;
248 u8 flags;
249 __le16 transid;
250 u8 channel;
251 u8 reserved[11];
252} __packed;
253
254struct kvaser_cmd_header {
255 u8 cmd_no;
256 /* The destination HE address is stored in 0..5 of he_addr.
257 * The upper part of source HE address is stored in 6..7 of he_addr, and
258 * the lower part is stored in 12..15 of transid.
259 */
260 u8 he_addr;
261 __le16 transid;
262} __packed;
263
264struct kvaser_cmd {
265 struct kvaser_cmd_header header;
266 union {
267 struct kvaser_cmd_map_ch_req map_ch_req;
268 struct kvaser_cmd_map_ch_res map_ch_res;
269
270 struct kvaser_cmd_card_info card_info;
271 struct kvaser_cmd_sw_info sw_info;
272 struct kvaser_cmd_sw_detail_req sw_detail_req;
273 struct kvaser_cmd_sw_detail_res sw_detail_res;
274
275 struct kvaser_cmd_cap_req cap_req;
276 struct kvaser_cmd_cap_res cap_res;
277
278 struct kvaser_cmd_error_event error_event;
279
280 struct kvaser_cmd_set_busparams set_busparams_req;
281
282 struct kvaser_cmd_chip_state_event chip_state_event;
283
284 struct kvaser_cmd_set_ctrlmode set_ctrlmode;
285
286 struct kvaser_cmd_rx_can rx_can;
287 struct kvaser_cmd_tx_can tx_can;
288 } __packed;
289} __packed;
290
291/* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */
292#define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0)
293#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
294#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
295#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
296/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
297#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
298#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
299#define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16)
300#define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17)
301#define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18)
302
303/* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */
304#define KVASER_USB_KCAN_DATA_DLC_BITS 4
305#define KVASER_USB_KCAN_DATA_DLC_SHIFT 8
306#define KVASER_USB_KCAN_DATA_DLC_MASK \
307 GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \
308 KVASER_USB_KCAN_DATA_DLC_SHIFT, \
309 KVASER_USB_KCAN_DATA_DLC_SHIFT)
310
311#define KVASER_USB_KCAN_DATA_BRS BIT(14)
312#define KVASER_USB_KCAN_DATA_FDF BIT(15)
313#define KVASER_USB_KCAN_DATA_OSM BIT(16)
314#define KVASER_USB_KCAN_DATA_AREQ BIT(31)
315#define KVASER_USB_KCAN_DATA_SRR BIT(31)
316#define KVASER_USB_KCAN_DATA_RTR BIT(29)
317#define KVASER_USB_KCAN_DATA_IDE BIT(30)
318struct kvaser_cmd_ext_rx_can {
319 __le32 flags;
320 __le32 id;
321 __le32 kcan_id;
322 __le32 kcan_header;
323 __le64 timestamp;
324 union {
325 u8 kcan_payload[64];
326 struct kvaser_err_frame_data err_frame_data;
327 };
328} __packed;
329
330struct kvaser_cmd_ext_tx_can {
331 __le32 flags;
332 __le32 id;
333 __le32 kcan_id;
334 __le32 kcan_header;
335 u8 databytes;
336 u8 dlc;
337 u8 reserved[6];
338 u8 kcan_payload[64];
339} __packed;
340
341struct kvaser_cmd_ext_tx_ack {
342 __le32 flags;
343 u8 reserved0[4];
344 __le64 timestamp;
345 u8 reserved1[8];
346} __packed;
347
348/* struct for extended commands (CMD_EXTENDED) */
349struct kvaser_cmd_ext {
350 struct kvaser_cmd_header header;
351 __le16 len;
352 u8 cmd_no_ext;
353 u8 reserved;
354
355 union {
356 struct kvaser_cmd_ext_rx_can rx_can;
357 struct kvaser_cmd_ext_tx_can tx_can;
358 struct kvaser_cmd_ext_tx_ack tx_ack;
359 } __packed;
360} __packed;
361
362static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
363 .name = "kvaser_usb_kcan",
364 .tseg1_min = 1,
365 .tseg1_max = 255,
366 .tseg2_min = 1,
367 .tseg2_max = 32,
368 .sjw_max = 16,
369 .brp_min = 1,
370 .brp_max = 4096,
371 .brp_inc = 1,
372};
373
374static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
375 .name = "kvaser_usb_flex",
376 .tseg1_min = 4,
377 .tseg1_max = 16,
378 .tseg2_min = 2,
379 .tseg2_max = 8,
380 .sjw_max = 4,
381 .brp_min = 1,
382 .brp_max = 256,
383 .brp_inc = 1,
384};
385
386#define KVASER_USB_HYDRA_TRANSID_BITS 12
387#define KVASER_USB_HYDRA_TRANSID_MASK \
388 GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0)
389#define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6)
390#define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0)
391#define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2
392static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd)
393{
394 return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK;
395}
396
397static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd,
398 u16 transid)
399{
400 cmd->header.transid =
401 cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK);
402}
403
404static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd)
405{
406 return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >>
407 KVASER_USB_HYDRA_HE_ADDR_SRC_BITS |
408 le16_to_cpu(cmd->header.transid) >>
409 KVASER_USB_HYDRA_TRANSID_BITS;
410}
411
412static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd,
413 u8 dest_he)
414{
415 cmd->header.he_addr =
416 (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) |
417 (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK);
418}
419
420static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev,
421 const struct kvaser_cmd *cmd)
422{
423 int i;
424 u8 channel = 0xff;
425 u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd);
426
427 for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
428 if (dev->card_data.hydra.channel_to_he[i] == src_he) {
429 channel = i;
430 break;
431 }
432 }
433
434 return channel;
435}
436
437static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev)
438{
439 unsigned long flags;
440 u16 transid;
441 struct kvaser_usb_dev_card_data_hydra *card_data =
442 &dev->card_data.hydra;
443
444 spin_lock_irqsave(&card_data->transid_lock, flags);
445 transid = card_data->transid;
446 if (transid >= KVASER_USB_HYDRA_MAX_TRANSID)
447 transid = KVASER_USB_HYDRA_MIN_TRANSID;
448 else
449 transid++;
450 card_data->transid = transid;
451 spin_unlock_irqrestore(&card_data->transid_lock, flags);
452
453 return transid;
454}
455
456static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd)
457{
458 size_t ret;
459
460 if (cmd->header.cmd_no == CMD_EXTENDED)
461 ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len);
462 else
463 ret = sizeof(struct kvaser_cmd);
464
465 return ret;
466}
467
468static struct kvaser_usb_net_priv *
469kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
470 const struct kvaser_cmd *cmd)
471{
472 struct kvaser_usb_net_priv *priv = NULL;
473 u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd);
474
475 if (channel >= dev->nchannels)
476 dev_err(&dev->intf->dev,
477 "Invalid channel number (%d)\n", channel);
478 else
479 priv = dev->nets[channel];
480
481 return priv;
482}
483
484static ktime_t
485kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
486 const struct kvaser_cmd *cmd)
487{
488 u64 ticks;
489
490 if (cmd->header.cmd_no == CMD_EXTENDED) {
491 struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
492
493 ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
494 } else {
495 ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
496 ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
497 ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
498 }
499
500 return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
501}
502
503static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
504 u8 cmd_no, int channel)
505{
506 struct kvaser_cmd *cmd;
507 int err;
508
509 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
510 if (!cmd)
511 return -ENOMEM;
512
513 cmd->header.cmd_no = cmd_no;
514 if (channel < 0) {
515 kvaser_usb_hydra_set_cmd_dest_he
516 (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
517 } else {
518 if (channel >= KVASER_USB_MAX_NET_DEVICES) {
519 dev_err(&dev->intf->dev, "channel (%d) out of range.\n",
520 channel);
521 err = -EINVAL;
522 goto end;
523 }
524 kvaser_usb_hydra_set_cmd_dest_he
525 (cmd, dev->card_data.hydra.channel_to_he[channel]);
526 }
527 kvaser_usb_hydra_set_cmd_transid
528 (cmd, kvaser_usb_hydra_get_next_transid(dev));
529
530 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
531 if (err)
532 goto end;
533
534end:
535 kfree(cmd);
536
537 return err;
538}
539
540static int
541kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
542 u8 cmd_no)
543{
544 struct kvaser_cmd *cmd;
545 struct kvaser_usb *dev = priv->dev;
546 int err;
547
548 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
549 if (!cmd)
550 return -ENOMEM;
551
552 cmd->header.cmd_no = cmd_no;
553
554 kvaser_usb_hydra_set_cmd_dest_he
555 (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
556 kvaser_usb_hydra_set_cmd_transid
557 (cmd, kvaser_usb_hydra_get_next_transid(dev));
558
559 err = kvaser_usb_send_cmd_async(priv, cmd,
560 kvaser_usb_hydra_cmd_size(cmd));
561 if (err)
562 kfree(cmd);
563
564 return err;
565}
566
567/* This function is used for synchronously waiting on hydra control commands.
568 * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to
569 * handle partial hydra commands. Since hydra control commands are always
570 * non-extended commands.
571 */
572static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no,
573 struct kvaser_cmd *cmd)
574{
575 void *buf;
576 int err;
577 unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
578
579 if (cmd->header.cmd_no == CMD_EXTENDED) {
580 dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n");
581 return -EINVAL;
582 }
583
584 buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
585 if (!buf)
586 return -ENOMEM;
587
588 do {
589 int actual_len = 0;
590 int pos = 0;
591
592 err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
593 &actual_len);
594 if (err < 0)
595 goto end;
596
597 while (pos < actual_len) {
598 struct kvaser_cmd *tmp_cmd;
599 size_t cmd_len;
600
601 tmp_cmd = buf + pos;
602 cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd);
603 if (pos + cmd_len > actual_len) {
604 dev_err_ratelimited(&dev->intf->dev,
605 "Format error\n");
606 break;
607 }
608
609 if (tmp_cmd->header.cmd_no == cmd_no) {
610 memcpy(cmd, tmp_cmd, cmd_len);
611 goto end;
612 }
613 pos += cmd_len;
614 }
615 } while (time_before(jiffies, timeout));
616
617 err = -EINVAL;
618
619end:
620 kfree(buf);
621
622 return err;
623}
624
625static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev,
626 const struct kvaser_cmd *cmd)
627{
628 u8 he, channel;
629 u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
630 struct kvaser_usb_dev_card_data_hydra *card_data =
631 &dev->card_data.hydra;
632
633 if (transid > 0x007f || transid < 0x0040) {
634 dev_err(&dev->intf->dev,
635 "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n",
636 transid);
637 return -EINVAL;
638 }
639
640 switch (transid) {
641 case KVASER_USB_HYDRA_TRANSID_CANHE:
642 case KVASER_USB_HYDRA_TRANSID_CANHE + 1:
643 case KVASER_USB_HYDRA_TRANSID_CANHE + 2:
644 case KVASER_USB_HYDRA_TRANSID_CANHE + 3:
645 case KVASER_USB_HYDRA_TRANSID_CANHE + 4:
646 channel = transid & 0x000f;
647 he = cmd->map_ch_res.he_addr;
648 card_data->channel_to_he[channel] = he;
649 break;
650 case KVASER_USB_HYDRA_TRANSID_SYSDBG:
651 card_data->sysdbg_he = cmd->map_ch_res.he_addr;
652 break;
653 default:
654 dev_warn(&dev->intf->dev,
655 "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n",
656 transid);
657 break;
658 }
659
660 return 0;
661}
662
663static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
664 u8 channel, const char *name)
665{
666 struct kvaser_cmd *cmd;
667 int err;
668
669 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
670 if (!cmd)
671 return -ENOMEM;
672
673 strcpy(cmd->map_ch_req.name, name);
674 cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ;
675 kvaser_usb_hydra_set_cmd_dest_he
676 (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER);
677 cmd->map_ch_req.channel = channel;
678
679 kvaser_usb_hydra_set_cmd_transid(cmd, transid);
680
681 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
682 if (err)
683 goto end;
684
685 err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd);
686 if (err)
687 goto end;
688
689 err = kvaser_usb_hydra_map_channel_resp(dev, cmd);
690 if (err)
691 goto end;
692
693end:
694 kfree(cmd);
695
696 return err;
697}
698
699static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
700 u16 cap_cmd_req, u16 *status)
701{
702 struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
703 struct kvaser_cmd *cmd;
704 u32 value = 0;
705 u32 mask = 0;
706 u16 cap_cmd_res;
707 int err;
708 int i;
709
710 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
711 if (!cmd)
712 return -ENOMEM;
713
714 cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
715 cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
716
717 kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
718 kvaser_usb_hydra_set_cmd_transid
719 (cmd, kvaser_usb_hydra_get_next_transid(dev));
720
721 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
722 if (err)
723 goto end;
724
725 err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
726 if (err)
727 goto end;
728
729 *status = le16_to_cpu(cmd->cap_res.status);
730
731 if (*status != KVASER_USB_HYDRA_CAP_STAT_OK)
732 goto end;
733
734 cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd);
735 switch (cap_cmd_res) {
736 case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
737 case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
738 case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
739 value = le32_to_cpu(cmd->cap_res.value);
740 mask = le32_to_cpu(cmd->cap_res.mask);
741 break;
742 default:
743 dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
744 cap_cmd_res);
745 break;
746 }
747
748 for (i = 0; i < dev->nchannels; i++) {
749 if (BIT(i) & (value & mask)) {
750 switch (cap_cmd_res) {
751 case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
752 card_data->ctrlmode_supported |=
753 CAN_CTRLMODE_LISTENONLY;
754 break;
755 case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
756 card_data->capabilities |=
757 KVASER_USB_CAP_BERR_CAP;
758 break;
759 case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
760 card_data->ctrlmode_supported |=
761 CAN_CTRLMODE_ONE_SHOT;
762 break;
763 }
764 }
765 }
766
767end:
768 kfree(cmd);
769
770 return err;
771}
772
773static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev,
774 const struct kvaser_cmd *cmd)
775{
776 struct kvaser_usb_net_priv *priv;
777
778 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
779 if (!priv)
780 return;
781
782 if (completion_done(&priv->start_comp) &&
783 netif_queue_stopped(priv->netdev)) {
784 netif_wake_queue(priv->netdev);
785 } else {
786 netif_start_queue(priv->netdev);
787 complete(&priv->start_comp);
788 }
789}
790
791static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev,
792 const struct kvaser_cmd *cmd)
793{
794 struct kvaser_usb_net_priv *priv;
795
796 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
797 if (!priv)
798 return;
799
800 complete(&priv->stop_comp);
801}
802
803static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
804 const struct kvaser_cmd *cmd)
805{
806 struct kvaser_usb_net_priv *priv;
807
808 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
809 if (!priv)
810 return;
811
812 complete(&priv->flush_comp);
813}
814
815static void
816kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
817 u8 bus_status,
818 const struct can_berr_counter *bec,
819 enum can_state *new_state)
820{
821 if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) {
822 *new_state = CAN_STATE_BUS_OFF;
823 } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) {
824 *new_state = CAN_STATE_ERROR_PASSIVE;
825 } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) {
826 if (bec->txerr >= 128 || bec->rxerr >= 128) {
827 netdev_warn(priv->netdev,
828 "ERR_ACTIVE but err tx=%u or rx=%u >=128\n",
829 bec->txerr, bec->rxerr);
830 *new_state = CAN_STATE_ERROR_PASSIVE;
831 } else if (bec->txerr >= 96 || bec->rxerr >= 96) {
832 *new_state = CAN_STATE_ERROR_WARNING;
833 } else {
834 *new_state = CAN_STATE_ERROR_ACTIVE;
835 }
836 }
837}
838
839static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
840 u8 bus_status,
841 const struct can_berr_counter *bec)
842{
843 struct net_device *netdev = priv->netdev;
844 struct can_frame *cf;
845 struct sk_buff *skb;
846 struct net_device_stats *stats;
847 enum can_state new_state, old_state;
848
849 old_state = priv->can.state;
850
851 kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec,
852 &new_state);
853
854 if (new_state == old_state)
855 return;
856
857 /* Ignore state change if previous state was STOPPED and the new state
858 * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware
859 * does not distinguish between BUS_OFF and STOPPED.
860 */
861 if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF)
862 return;
863
864 skb = alloc_can_err_skb(netdev, &cf);
865 if (skb) {
866 enum can_state tx_state, rx_state;
867
868 tx_state = (bec->txerr >= bec->rxerr) ?
869 new_state : CAN_STATE_ERROR_ACTIVE;
870 rx_state = (bec->txerr <= bec->rxerr) ?
871 new_state : CAN_STATE_ERROR_ACTIVE;
872 can_change_state(netdev, cf, tx_state, rx_state);
873 }
874
875 if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
876 if (!priv->can.restart_ms)
877 kvaser_usb_hydra_send_simple_cmd_async
878 (priv, CMD_STOP_CHIP_REQ);
879
880 can_bus_off(netdev);
881 }
882
883 if (!skb) {
884 netdev_warn(netdev, "No memory left for err_skb\n");
885 return;
886 }
887
888 if (priv->can.restart_ms &&
889 old_state >= CAN_STATE_BUS_OFF &&
890 new_state < CAN_STATE_BUS_OFF)
891 priv->can.can_stats.restarts++;
892
893 cf->data[6] = bec->txerr;
894 cf->data[7] = bec->rxerr;
895
896 stats = &netdev->stats;
897 stats->rx_packets++;
898 stats->rx_bytes += cf->can_dlc;
899 netif_rx(skb);
900}
901
902static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
903 const struct kvaser_cmd *cmd)
904{
905 struct kvaser_usb_net_priv *priv;
906 struct can_berr_counter bec;
907 u8 bus_status;
908
909 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
910 if (!priv)
911 return;
912
913 bus_status = cmd->chip_state_event.bus_status;
914 bec.txerr = cmd->chip_state_event.tx_err_counter;
915 bec.rxerr = cmd->chip_state_event.rx_err_counter;
916
917 kvaser_usb_hydra_update_state(priv, bus_status, &bec);
918 priv->bec.txerr = bec.txerr;
919 priv->bec.rxerr = bec.rxerr;
920}
921
922static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev,
923 const struct kvaser_cmd *cmd)
924{
925 /* info1 will contain the offending cmd_no */
926 switch (le16_to_cpu(cmd->error_event.info1)) {
927 case CMD_START_CHIP_REQ:
928 dev_warn(&dev->intf->dev,
929 "CMD_START_CHIP_REQ error in parameter\n");
930 break;
931
932 case CMD_STOP_CHIP_REQ:
933 dev_warn(&dev->intf->dev,
934 "CMD_STOP_CHIP_REQ error in parameter\n");
935 break;
936
937 case CMD_FLUSH_QUEUE:
938 dev_warn(&dev->intf->dev,
939 "CMD_FLUSH_QUEUE error in parameter\n");
940 break;
941
942 case CMD_SET_BUSPARAMS_REQ:
943 dev_warn(&dev->intf->dev,
944 "Set bittiming failed. Error in parameter\n");
945 break;
946
947 case CMD_SET_BUSPARAMS_FD_REQ:
948 dev_warn(&dev->intf->dev,
949 "Set data bittiming failed. Error in parameter\n");
950 break;
951
952 default:
953 dev_warn(&dev->intf->dev,
954 "Unhandled parameter error event cmd_no (%u)\n",
955 le16_to_cpu(cmd->error_event.info1));
956 break;
957 }
958}
959
960static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev,
961 const struct kvaser_cmd *cmd)
962{
963 switch (cmd->error_event.error_code) {
964 case KVASER_USB_HYDRA_ERROR_EVENT_PARAM:
965 kvaser_usb_hydra_error_event_parameter(dev, cmd);
966 break;
967
968 case KVASER_USB_HYDRA_ERROR_EVENT_CAN:
969 /* Wrong channel mapping?! This should never happen!
970 * info1 will contain the offending cmd_no
971 */
972 dev_err(&dev->intf->dev,
973 "Received CAN error event for cmd_no (%u)\n",
974 le16_to_cpu(cmd->error_event.info1));
975 break;
976
977 default:
978 dev_warn(&dev->intf->dev,
979 "Unhandled error event (%d)\n",
980 cmd->error_event.error_code);
981 break;
982 }
983}
984
985static void
986kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
987 const struct kvaser_err_frame_data *err_frame_data,
988 ktime_t hwtstamp)
989{
990 struct net_device *netdev = priv->netdev;
991 struct net_device_stats *stats = &netdev->stats;
992 struct can_frame *cf;
993 struct sk_buff *skb;
994 struct skb_shared_hwtstamps *shhwtstamps;
995 struct can_berr_counter bec;
996 enum can_state new_state, old_state;
997 u8 bus_status;
998
999 priv->can.can_stats.bus_error++;
1000 stats->rx_errors++;
1001
1002 bus_status = err_frame_data->bus_status;
1003 bec.txerr = err_frame_data->tx_err_counter;
1004 bec.rxerr = err_frame_data->rx_err_counter;
1005
1006 old_state = priv->can.state;
1007 kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
1008 &new_state);
1009
1010 skb = alloc_can_err_skb(netdev, &cf);
1011
1012 if (new_state != old_state) {
1013 if (skb) {
1014 enum can_state tx_state, rx_state;
1015
1016 tx_state = (bec.txerr >= bec.rxerr) ?
1017 new_state : CAN_STATE_ERROR_ACTIVE;
1018 rx_state = (bec.txerr <= bec.rxerr) ?
1019 new_state : CAN_STATE_ERROR_ACTIVE;
1020
1021 can_change_state(netdev, cf, tx_state, rx_state);
1022
1023 if (priv->can.restart_ms &&
1024 old_state >= CAN_STATE_BUS_OFF &&
1025 new_state < CAN_STATE_BUS_OFF)
1026 cf->can_id |= CAN_ERR_RESTARTED;
1027 }
1028
1029 if (new_state == CAN_STATE_BUS_OFF) {
1030 if (!priv->can.restart_ms)
1031 kvaser_usb_hydra_send_simple_cmd_async
1032 (priv, CMD_STOP_CHIP_REQ);
1033
1034 can_bus_off(netdev);
1035 }
1036 }
1037
1038 if (!skb) {
1039 stats->rx_dropped++;
1040 netdev_warn(netdev, "No memory left for err_skb\n");
1041 return;
1042 }
1043
1044 shhwtstamps = skb_hwtstamps(skb);
1045 shhwtstamps->hwtstamp = hwtstamp;
1046
1047 cf->can_id |= CAN_ERR_BUSERROR;
1048 cf->data[6] = bec.txerr;
1049 cf->data[7] = bec.rxerr;
1050
1051 stats->rx_packets++;
1052 stats->rx_bytes += cf->can_dlc;
1053 netif_rx(skb);
1054
1055 priv->bec.txerr = bec.txerr;
1056 priv->bec.rxerr = bec.rxerr;
1057}
1058
1059static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
1060 const struct kvaser_cmd_ext *cmd)
1061{
1062 struct net_device *netdev = priv->netdev;
1063 struct net_device_stats *stats = &netdev->stats;
1064 struct can_frame *cf;
1065 struct sk_buff *skb;
1066 u32 flags;
1067
1068 skb = alloc_can_err_skb(netdev, &cf);
1069 if (!skb) {
1070 stats->rx_dropped++;
1071 netdev_warn(netdev, "No memory left for err_skb\n");
1072 return;
1073 }
1074
1075 cf->can_id |= CAN_ERR_BUSERROR;
1076 flags = le32_to_cpu(cmd->tx_ack.flags);
1077
1078 if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK)
1079 cf->can_id |= CAN_ERR_ACK;
1080 if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) {
1081 cf->can_id |= CAN_ERR_LOSTARB;
1082 priv->can.can_stats.arbitration_lost++;
1083 }
1084
1085 stats->tx_errors++;
1086 stats->rx_packets++;
1087 stats->rx_bytes += cf->can_dlc;
1088 netif_rx(skb);
1089}
1090
1091static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
1092 const struct kvaser_cmd *cmd)
1093{
1094 struct kvaser_usb_tx_urb_context *context;
1095 struct kvaser_usb_net_priv *priv;
1096 unsigned long irq_flags;
1097 bool one_shot_fail = false;
1098 u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
1099
1100 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
1101 if (!priv)
1102 return;
1103
1104 if (!netif_device_present(priv->netdev))
1105 return;
1106
1107 if (cmd->header.cmd_no == CMD_EXTENDED) {
1108 struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
1109 u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags);
1110
1111 if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK |
1112 KVASER_USB_HYDRA_CF_FLAG_ABL)) {
1113 kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
1114 one_shot_fail = true;
1115 }
1116 }
1117
1118 context = &priv->tx_contexts[transid % dev->max_tx_urbs];
1119 if (!one_shot_fail) {
1120 struct net_device_stats *stats = &priv->netdev->stats;
1121
1122 stats->tx_packets++;
1123 stats->tx_bytes += can_dlc2len(context->dlc);
1124 }
1125
1126 spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
1127
1128 can_get_echo_skb(priv->netdev, context->echo_index);
1129 context->echo_index = dev->max_tx_urbs;
1130 --priv->active_tx_contexts;
1131 netif_wake_queue(priv->netdev);
1132
1133 spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
1134}
1135
1136static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
1137 const struct kvaser_cmd *cmd)
1138{
1139 struct kvaser_usb_net_priv *priv = NULL;
1140 struct can_frame *cf;
1141 struct sk_buff *skb;
1142 struct skb_shared_hwtstamps *shhwtstamps;
1143 struct net_device_stats *stats;
1144 u8 flags;
1145 ktime_t hwtstamp;
1146
1147 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
1148 if (!priv)
1149 return;
1150
1151 stats = &priv->netdev->stats;
1152
1153 flags = cmd->rx_can.flags;
1154 hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
1155
1156 if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
1157 kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
1158 hwtstamp);
1159 return;
1160 }
1161
1162 skb = alloc_can_skb(priv->netdev, &cf);
1163 if (!skb) {
1164 stats->rx_dropped++;
1165 return;
1166 }
1167
1168 shhwtstamps = skb_hwtstamps(skb);
1169 shhwtstamps->hwtstamp = hwtstamp;
1170
1171 cf->can_id = le32_to_cpu(cmd->rx_can.id);
1172
1173 if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) {
1174 cf->can_id &= CAN_EFF_MASK;
1175 cf->can_id |= CAN_EFF_FLAG;
1176 } else {
1177 cf->can_id &= CAN_SFF_MASK;
1178 }
1179
1180 if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
1181 kvaser_usb_can_rx_over_error(priv->netdev);
1182
1183 cf->can_dlc = get_can_dlc(cmd->rx_can.dlc);
1184
1185 if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
1186 cf->can_id |= CAN_RTR_FLAG;
1187 else
1188 memcpy(cf->data, cmd->rx_can.data, cf->can_dlc);
1189
1190 stats->rx_packets++;
1191 stats->rx_bytes += cf->can_dlc;
1192 netif_rx(skb);
1193}
1194
1195static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
1196 const struct kvaser_cmd_ext *cmd)
1197{
1198 struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd;
1199 struct kvaser_usb_net_priv *priv;
1200 struct canfd_frame *cf;
1201 struct sk_buff *skb;
1202 struct skb_shared_hwtstamps *shhwtstamps;
1203 struct net_device_stats *stats;
1204 u32 flags;
1205 u8 dlc;
1206 u32 kcan_header;
1207 ktime_t hwtstamp;
1208
1209 priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd);
1210 if (!priv)
1211 return;
1212
1213 stats = &priv->netdev->stats;
1214
1215 kcan_header = le32_to_cpu(cmd->rx_can.kcan_header);
1216 dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >>
1217 KVASER_USB_KCAN_DATA_DLC_SHIFT;
1218
1219 flags = le32_to_cpu(cmd->rx_can.flags);
1220 hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
1221
1222 if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
1223 kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
1224 hwtstamp);
1225 return;
1226 }
1227
1228 if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF)
1229 skb = alloc_canfd_skb(priv->netdev, &cf);
1230 else
1231 skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf);
1232
1233 if (!skb) {
1234 stats->rx_dropped++;
1235 return;
1236 }
1237
1238 shhwtstamps = skb_hwtstamps(skb);
1239 shhwtstamps->hwtstamp = hwtstamp;
1240
1241 cf->can_id = le32_to_cpu(cmd->rx_can.id);
1242
1243 if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) {
1244 cf->can_id &= CAN_EFF_MASK;
1245 cf->can_id |= CAN_EFF_FLAG;
1246 } else {
1247 cf->can_id &= CAN_SFF_MASK;
1248 }
1249
1250 if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
1251 kvaser_usb_can_rx_over_error(priv->netdev);
1252
1253 if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) {
1254 cf->len = can_dlc2len(get_canfd_dlc(dlc));
1255 if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS)
1256 cf->flags |= CANFD_BRS;
1257 if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
1258 cf->flags |= CANFD_ESI;
1259 } else {
1260 cf->len = get_can_dlc(dlc);
1261 }
1262
1263 if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
1264 cf->can_id |= CAN_RTR_FLAG;
1265 else
1266 memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
1267
1268 stats->rx_packets++;
1269 stats->rx_bytes += cf->len;
1270 netif_rx(skb);
1271}
1272
1273static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
1274 const struct kvaser_cmd *cmd)
1275{
1276 switch (cmd->header.cmd_no) {
1277 case CMD_START_CHIP_RESP:
1278 kvaser_usb_hydra_start_chip_reply(dev, cmd);
1279 break;
1280
1281 case CMD_STOP_CHIP_RESP:
1282 kvaser_usb_hydra_stop_chip_reply(dev, cmd);
1283 break;
1284
1285 case CMD_FLUSH_QUEUE_RESP:
1286 kvaser_usb_hydra_flush_queue_reply(dev, cmd);
1287 break;
1288
1289 case CMD_CHIP_STATE_EVENT:
1290 kvaser_usb_hydra_state_event(dev, cmd);
1291 break;
1292
1293 case CMD_ERROR_EVENT:
1294 kvaser_usb_hydra_error_event(dev, cmd);
1295 break;
1296
1297 case CMD_TX_ACKNOWLEDGE:
1298 kvaser_usb_hydra_tx_acknowledge(dev, cmd);
1299 break;
1300
1301 case CMD_RX_MESSAGE:
1302 kvaser_usb_hydra_rx_msg_std(dev, cmd);
1303 break;
1304
1305 /* Ignored commands */
1306 case CMD_SET_BUSPARAMS_RESP:
1307 case CMD_SET_BUSPARAMS_FD_RESP:
1308 break;
1309
1310 default:
1311 dev_warn(&dev->intf->dev, "Unhandled command (%d)\n",
1312 cmd->header.cmd_no);
1313 break;
1314 }
1315}
1316
1317static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev,
1318 const struct kvaser_cmd_ext *cmd)
1319{
1320 switch (cmd->cmd_no_ext) {
1321 case CMD_TX_ACKNOWLEDGE_FD:
1322 kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd);
1323 break;
1324
1325 case CMD_RX_MESSAGE_FD:
1326 kvaser_usb_hydra_rx_msg_ext(dev, cmd);
1327 break;
1328
1329 default:
1330 dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n",
1331 cmd->header.cmd_no);
1332 break;
1333 }
1334}
1335
1336static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
1337 const struct kvaser_cmd *cmd)
1338{
1339 if (cmd->header.cmd_no == CMD_EXTENDED)
1340 kvaser_usb_hydra_handle_cmd_ext
1341 (dev, (struct kvaser_cmd_ext *)cmd);
1342 else
1343 kvaser_usb_hydra_handle_cmd_std(dev, cmd);
1344}
1345
1346static void *
1347kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
1348 const struct sk_buff *skb, int *frame_len,
1349 int *cmd_len, u16 transid)
1350{
1351 struct kvaser_usb *dev = priv->dev;
1352 struct kvaser_cmd_ext *cmd;
1353 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1354 u8 dlc = can_len2dlc(cf->len);
1355 u8 nbr_of_bytes = cf->len;
1356 u32 flags;
1357 u32 id;
1358 u32 kcan_id;
1359 u32 kcan_header;
1360
1361 *frame_len = nbr_of_bytes;
1362
1363 cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
1364 if (!cmd)
1365 return NULL;
1366
1367 kvaser_usb_hydra_set_cmd_dest_he
1368 ((struct kvaser_cmd *)cmd,
1369 dev->card_data.hydra.channel_to_he[priv->channel]);
1370 kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid);
1371
1372 cmd->header.cmd_no = CMD_EXTENDED;
1373 cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD;
1374
1375 *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) -
1376 sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes,
1377 8);
1378
1379 cmd->len = cpu_to_le16(*cmd_len);
1380
1381 cmd->tx_can.databytes = nbr_of_bytes;
1382 cmd->tx_can.dlc = dlc;
1383
1384 if (cf->can_id & CAN_EFF_FLAG) {
1385 id = cf->can_id & CAN_EFF_MASK;
1386 flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID;
1387 kcan_id = (cf->can_id & CAN_EFF_MASK) |
1388 KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR;
1389 } else {
1390 id = cf->can_id & CAN_SFF_MASK;
1391 flags = 0;
1392 kcan_id = cf->can_id & CAN_SFF_MASK;
1393 }
1394
1395 if (cf->can_id & CAN_ERR_FLAG)
1396 flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
1397
1398 kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) &
1399 KVASER_USB_KCAN_DATA_DLC_MASK) |
1400 KVASER_USB_KCAN_DATA_AREQ |
1401 (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ?
1402 KVASER_USB_KCAN_DATA_OSM : 0);
1403
1404 if (can_is_canfd_skb(skb)) {
1405 kcan_header |= KVASER_USB_KCAN_DATA_FDF |
1406 (cf->flags & CANFD_BRS ?
1407 KVASER_USB_KCAN_DATA_BRS : 0);
1408 } else {
1409 if (cf->can_id & CAN_RTR_FLAG) {
1410 kcan_id |= KVASER_USB_KCAN_DATA_RTR;
1411 cmd->tx_can.databytes = 0;
1412 flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
1413 }
1414 }
1415
1416 cmd->tx_can.kcan_id = cpu_to_le32(kcan_id);
1417 cmd->tx_can.id = cpu_to_le32(id);
1418 cmd->tx_can.flags = cpu_to_le32(flags);
1419 cmd->tx_can.kcan_header = cpu_to_le32(kcan_header);
1420
1421 memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes);
1422
1423 return cmd;
1424}
1425
1426static void *
1427kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
1428 const struct sk_buff *skb, int *frame_len,
1429 int *cmd_len, u16 transid)
1430{
1431 struct kvaser_usb *dev = priv->dev;
1432 struct kvaser_cmd *cmd;
1433 struct can_frame *cf = (struct can_frame *)skb->data;
1434 u32 flags;
1435 u32 id;
1436
1437 *frame_len = cf->can_dlc;
1438
1439 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
1440 if (!cmd)
1441 return NULL;
1442
1443 kvaser_usb_hydra_set_cmd_dest_he
1444 (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
1445 kvaser_usb_hydra_set_cmd_transid(cmd, transid);
1446
1447 cmd->header.cmd_no = CMD_TX_CAN_MESSAGE;
1448
1449 *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8);
1450
1451 if (cf->can_id & CAN_EFF_FLAG) {
1452 id = (cf->can_id & CAN_EFF_MASK);
1453 id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID;
1454 } else {
1455 id = cf->can_id & CAN_SFF_MASK;
1456 }
1457
1458 cmd->tx_can.dlc = cf->can_dlc;
1459
1460 flags = (cf->can_id & CAN_EFF_FLAG ?
1461 KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
1462
1463 if (cf->can_id & CAN_RTR_FLAG)
1464 flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
1465
1466 flags |= (cf->can_id & CAN_ERR_FLAG ?
1467 KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0);
1468
1469 cmd->tx_can.id = cpu_to_le32(id);
1470 cmd->tx_can.flags = flags;
1471
1472 memcpy(cmd->tx_can.data, cf->data, *frame_len);
1473
1474 return cmd;
1475}
1476
1477static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
1478 enum can_mode mode)
1479{
1480 int err = 0;
1481
1482 switch (mode) {
1483 case CAN_MODE_START:
1484 /* CAN controller automatically recovers from BUS_OFF */
1485 break;
1486 default:
1487 err = -EOPNOTSUPP;
1488 }
1489
1490 return err;
1491}
1492
1493static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
1494{
1495 struct kvaser_cmd *cmd;
1496 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1497 struct can_bittiming *bt = &priv->can.bittiming;
1498 struct kvaser_usb *dev = priv->dev;
1499 int tseg1 = bt->prop_seg + bt->phase_seg1;
1500 int tseg2 = bt->phase_seg2;
1501 int sjw = bt->sjw;
1502 int err;
1503
1504 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
1505 if (!cmd)
1506 return -ENOMEM;
1507
1508 cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
1509 cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
1510 cmd->set_busparams_req.sjw = (u8)sjw;
1511 cmd->set_busparams_req.tseg1 = (u8)tseg1;
1512 cmd->set_busparams_req.tseg2 = (u8)tseg2;
1513 cmd->set_busparams_req.nsamples = 1;
1514
1515 kvaser_usb_hydra_set_cmd_dest_he
1516 (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
1517 kvaser_usb_hydra_set_cmd_transid
1518 (cmd, kvaser_usb_hydra_get_next_transid(dev));
1519
1520 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
1521
1522 kfree(cmd);
1523
1524 return err;
1525}
1526
1527static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
1528{
1529 struct kvaser_cmd *cmd;
1530 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1531 struct can_bittiming *dbt = &priv->can.data_bittiming;
1532 struct kvaser_usb *dev = priv->dev;
1533 int tseg1 = dbt->prop_seg + dbt->phase_seg1;
1534 int tseg2 = dbt->phase_seg2;
1535 int sjw = dbt->sjw;
1536 int err;
1537
1538 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
1539 if (!cmd)
1540 return -ENOMEM;
1541
1542 cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
1543 cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
1544 cmd->set_busparams_req.sjw_d = (u8)sjw;
1545 cmd->set_busparams_req.tseg1_d = (u8)tseg1;
1546 cmd->set_busparams_req.tseg2_d = (u8)tseg2;
1547 cmd->set_busparams_req.nsamples_d = 1;
1548
1549 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1550 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
1551 cmd->set_busparams_req.canfd_mode =
1552 KVASER_USB_HYDRA_BUS_MODE_NONISO;
1553 else
1554 cmd->set_busparams_req.canfd_mode =
1555 KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO;
1556 }
1557
1558 kvaser_usb_hydra_set_cmd_dest_he
1559 (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
1560 kvaser_usb_hydra_set_cmd_transid
1561 (cmd, kvaser_usb_hydra_get_next_transid(dev));
1562
1563 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
1564
1565 kfree(cmd);
1566
1567 return err;
1568}
1569
1570static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev,
1571 struct can_berr_counter *bec)
1572{
1573 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1574 int err;
1575
1576 err = kvaser_usb_hydra_send_simple_cmd(priv->dev,
1577 CMD_GET_CHIP_STATE_REQ,
1578 priv->channel);
1579 if (err)
1580 return err;
1581
1582 *bec = priv->bec;
1583
1584 return 0;
1585}
1586
1587static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
1588{
1589 const struct usb_host_interface *iface_desc;
1590 struct usb_endpoint_descriptor *ep;
1591 int i;
1592
1593 iface_desc = &dev->intf->altsetting[0];
1594
1595 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1596 ep = &iface_desc->endpoint[i].desc;
1597
1598 if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) &&
1599 ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR)
1600 dev->bulk_in = ep;
1601
1602 if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) &&
1603 ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR)
1604 dev->bulk_out = ep;
1605
1606 if (dev->bulk_in && dev->bulk_out)
1607 return 0;
1608 }
1609
1610 return -ENODEV;
1611}
1612
1613static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
1614{
1615 int err;
1616 unsigned int i;
1617 struct kvaser_usb_dev_card_data_hydra *card_data =
1618 &dev->card_data.hydra;
1619
1620 card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID;
1621 spin_lock_init(&card_data->transid_lock);
1622
1623 memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN);
1624 card_data->usb_rx_leftover_len = 0;
1625 spin_lock_init(&card_data->usb_rx_leftover_lock);
1626
1627 memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL,
1628 sizeof(card_data->channel_to_he));
1629 card_data->sysdbg_he = 0;
1630
1631 for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
1632 err = kvaser_usb_hydra_map_channel
1633 (dev,
1634 (KVASER_USB_HYDRA_TRANSID_CANHE | i),
1635 i, "CAN");
1636 if (err) {
1637 dev_err(&dev->intf->dev,
1638 "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i);
1639 return err;
1640 }
1641 }
1642
1643 err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG,
1644 0, "SYSDBG");
1645 if (err) {
1646 dev_err(&dev->intf->dev,
1647 "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n");
1648 return err;
1649 }
1650
1651 return 0;
1652}
1653
1654static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
1655{
1656 struct kvaser_cmd cmd;
1657 int err;
1658
1659 err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ,
1660 -1);
1661 if (err)
1662 return err;
1663
1664 memset(&cmd, 0, sizeof(struct kvaser_cmd));
1665 err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd);
1666 if (err)
1667 return err;
1668
1669 dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS,
1670 le16_to_cpu(cmd.sw_info.max_outstanding_tx));
1671
1672 return 0;
1673}
1674
1675static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
1676{
1677 struct kvaser_cmd *cmd;
1678 int err;
1679 u32 flags;
1680 struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
1681
1682 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
1683 if (!cmd)
1684 return -ENOMEM;
1685
1686 cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
1687 cmd->sw_detail_req.use_ext_cmd = 1;
1688 kvaser_usb_hydra_set_cmd_dest_he
1689 (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
1690
1691 kvaser_usb_hydra_set_cmd_transid
1692 (cmd, kvaser_usb_hydra_get_next_transid(dev));
1693
1694 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
1695 if (err)
1696 goto end;
1697
1698 err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP,
1699 cmd);
1700 if (err)
1701 goto end;
1702
1703 dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
1704 flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
1705
1706 if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
1707 dev_err(&dev->intf->dev,
1708 "Bad firmware, device refuse to run!\n");
1709 err = -EINVAL;
1710 goto end;
1711 }
1712
1713 if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA)
1714 dev_info(&dev->intf->dev, "Beta firmware in use\n");
1715
1716 if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP)
1717 card_data->capabilities |= KVASER_USB_CAP_EXT_CAP;
1718
1719 if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD)
1720 card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD;
1721
1722 if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD)
1723 card_data->ctrlmode_supported |= CAN_CTRLMODE_FD;
1724
1725 if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO)
1726 card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
1727
1728 if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M)
1729 dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan;
1730 else
1731 dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc;
1732
1733end:
1734 kfree(cmd);
1735
1736 return err;
1737}
1738
1739static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
1740{
1741 struct kvaser_cmd cmd;
1742 int err;
1743
1744 err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1);
1745 if (err)
1746 return err;
1747
1748 memset(&cmd, 0, sizeof(struct kvaser_cmd));
1749 err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
1750 if (err)
1751 return err;
1752
1753 dev->nchannels = cmd.card_info.nchannels;
1754 if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
1755 return -EINVAL;
1756
1757 return 0;
1758}
1759
1760static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
1761{
1762 int err;
1763 u16 status;
1764
1765 if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
1766 dev_info(&dev->intf->dev,
1767 "No extended capability support. Upgrade your device.\n");
1768 return 0;
1769 }
1770
1771 err = kvaser_usb_hydra_get_single_capability
1772 (dev,
1773 KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE,
1774 &status);
1775 if (err)
1776 return err;
1777 if (status)
1778 dev_info(&dev->intf->dev,
1779 "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n",
1780 status);
1781
1782 err = kvaser_usb_hydra_get_single_capability
1783 (dev,
1784 KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT,
1785 &status);
1786 if (err)
1787 return err;
1788 if (status)
1789 dev_info(&dev->intf->dev,
1790 "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n",
1791 status);
1792
1793 err = kvaser_usb_hydra_get_single_capability
1794 (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT,
1795 &status);
1796 if (err)
1797 return err;
1798 if (status)
1799 dev_info(&dev->intf->dev,
1800 "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n",
1801 status);
1802
1803 return 0;
1804}
1805
1806static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
1807{
1808 struct kvaser_usb *dev = priv->dev;
1809 struct kvaser_cmd *cmd;
1810 int err;
1811
1812 if ((priv->can.ctrlmode &
1813 (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) ==
1814 CAN_CTRLMODE_FD_NON_ISO) {
1815 netdev_warn(priv->netdev,
1816 "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n");
1817 return -EINVAL;
1818 }
1819
1820 cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
1821 if (!cmd)
1822 return -ENOMEM;
1823
1824 cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
1825 kvaser_usb_hydra_set_cmd_dest_he
1826 (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
1827 kvaser_usb_hydra_set_cmd_transid
1828 (cmd, kvaser_usb_hydra_get_next_transid(dev));
1829 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1830 cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN;
1831 else
1832 cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
1833
1834 err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
1835 kfree(cmd);
1836
1837 return err;
1838}
1839
1840static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
1841{
1842 int err;
1843
1844 init_completion(&priv->start_comp);
1845
1846 err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
1847 priv->channel);
1848 if (err)
1849 return err;
1850
1851 if (!wait_for_completion_timeout(&priv->start_comp,
1852 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
1853 return -ETIMEDOUT;
1854
1855 return 0;
1856}
1857
1858static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
1859{
1860 int err;
1861
1862 init_completion(&priv->stop_comp);
1863
1864 /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
1865 * see comment in kvaser_usb_hydra_update_state()
1866 */
1867 priv->can.state = CAN_STATE_STOPPED;
1868
1869 err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ,
1870 priv->channel);
1871 if (err)
1872 return err;
1873
1874 if (!wait_for_completion_timeout(&priv->stop_comp,
1875 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
1876 return -ETIMEDOUT;
1877
1878 return 0;
1879}
1880
1881static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
1882{
1883 int err;
1884
1885 init_completion(&priv->flush_comp);
1886
1887 err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
1888 priv->channel);
1889 if (err)
1890 return err;
1891
1892 if (!wait_for_completion_timeout(&priv->flush_comp,
1893 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
1894 return -ETIMEDOUT;
1895
1896 return 0;
1897}
1898
1899/* A single extended hydra command can be transmitted in multiple transfers
1900 * We have to buffer partial hydra commands, and handle them on next callback.
1901 */
1902static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
1903 void *buf, int len)
1904{
1905 unsigned long irq_flags;
1906 struct kvaser_cmd *cmd;
1907 int pos = 0;
1908 size_t cmd_len;
1909 struct kvaser_usb_dev_card_data_hydra *card_data =
1910 &dev->card_data.hydra;
1911 int usb_rx_leftover_len;
1912 spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock;
1913
1914 spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
1915 usb_rx_leftover_len = card_data->usb_rx_leftover_len;
1916 if (usb_rx_leftover_len) {
1917 int remaining_bytes;
1918
1919 cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover;
1920
1921 cmd_len = kvaser_usb_hydra_cmd_size(cmd);
1922
1923 remaining_bytes = min_t(unsigned int, len,
1924 cmd_len - usb_rx_leftover_len);
1925 /* Make sure we do not overflow usb_rx_leftover */
1926 if (remaining_bytes + usb_rx_leftover_len >
1927 KVASER_USB_HYDRA_MAX_CMD_LEN) {
1928 dev_err(&dev->intf->dev, "Format error\n");
1929 spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
1930 return;
1931 }
1932
1933 memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf,
1934 remaining_bytes);
1935 pos += remaining_bytes;
1936
1937 if (remaining_bytes + usb_rx_leftover_len == cmd_len) {
1938 kvaser_usb_hydra_handle_cmd(dev, cmd);
1939 usb_rx_leftover_len = 0;
1940 } else {
1941 /* Command still not complete */
1942 usb_rx_leftover_len += remaining_bytes;
1943 }
1944 card_data->usb_rx_leftover_len = usb_rx_leftover_len;
1945 }
1946 spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
1947
1948 while (pos < len) {
1949 cmd = buf + pos;
1950
1951 cmd_len = kvaser_usb_hydra_cmd_size(cmd);
1952
1953 if (pos + cmd_len > len) {
1954 /* We got first part of a command */
1955 int leftover_bytes;
1956
1957 leftover_bytes = len - pos;
1958 /* Make sure we do not overflow usb_rx_leftover */
1959 if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) {
1960 dev_err(&dev->intf->dev, "Format error\n");
1961 return;
1962 }
1963 spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
1964 memcpy(card_data->usb_rx_leftover, buf + pos,
1965 leftover_bytes);
1966 card_data->usb_rx_leftover_len = leftover_bytes;
1967 spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
1968 break;
1969 }
1970
1971 kvaser_usb_hydra_handle_cmd(dev, cmd);
1972 pos += cmd_len;
1973 }
1974}
1975
1976static void *
1977kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
1978 const struct sk_buff *skb, int *frame_len,
1979 int *cmd_len, u16 transid)
1980{
1981 void *buf;
1982
1983 if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
1984 buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
1985 cmd_len, transid);
1986 else
1987 buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
1988 cmd_len, transid);
1989
1990 return buf;
1991}
1992
1993const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
1994 .dev_set_mode = kvaser_usb_hydra_set_mode,
1995 .dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
1996 .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
1997 .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
1998 .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
1999 .dev_init_card = kvaser_usb_hydra_init_card,
2000 .dev_get_software_info = kvaser_usb_hydra_get_software_info,
2001 .dev_get_software_details = kvaser_usb_hydra_get_software_details,
2002 .dev_get_card_info = kvaser_usb_hydra_get_card_info,
2003 .dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
2004 .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
2005 .dev_start_chip = kvaser_usb_hydra_start_chip,
2006 .dev_stop_chip = kvaser_usb_hydra_stop_chip,
2007 .dev_reset_chip = NULL,
2008 .dev_flush_queue = kvaser_usb_hydra_flush_queue,
2009 .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback,
2010 .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd,
2011};
2012
2013static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
2014 .clock = {
2015 .freq = 80000000,
2016 },
2017 .timestamp_freq = 80,
2018 .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
2019 .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
2020};
2021
2022static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
2023 .clock = {
2024 .freq = 24000000,
2025 },
2026 .timestamp_freq = 1,
2027 .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
2028};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
new file mode 100644
index 000000000000..07d2f3aa2c02
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -0,0 +1,1358 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Parts of this driver are based on the following:
3 * - Kvaser linux leaf driver (version 4.78)
4 * - CAN driver for esd CAN-USB/2
5 * - Kvaser linux usbcanII driver (version 5.3)
6 *
7 * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
8 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
9 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
10 * Copyright (C) 2015 Valeo S.A.
11 */
12
13#include <linux/completion.h>
14#include <linux/device.h>
15#include <linux/gfp.h>
16#include <linux/jiffies.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include <linux/types.h>
22#include <linux/usb.h>
23
24#include <linux/can.h>
25#include <linux/can/dev.h>
26#include <linux/can/error.h>
27#include <linux/can/netlink.h>
28
29#include "kvaser_usb.h"
30
31/* Forward declaration */
32static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
33
34#define CAN_USB_CLOCK 8000000
35#define MAX_USBCAN_NET_DEVICES 2
36
37/* Command header size */
38#define CMD_HEADER_LEN 2
39
40/* Kvaser CAN message flags */
41#define MSG_FLAG_ERROR_FRAME BIT(0)
42#define MSG_FLAG_OVERRUN BIT(1)
43#define MSG_FLAG_NERR BIT(2)
44#define MSG_FLAG_WAKEUP BIT(3)
45#define MSG_FLAG_REMOTE_FRAME BIT(4)
46#define MSG_FLAG_RESERVED BIT(5)
47#define MSG_FLAG_TX_ACK BIT(6)
48#define MSG_FLAG_TX_REQUEST BIT(7)
49
50/* CAN states (M16C CxSTRH register) */
51#define M16C_STATE_BUS_RESET BIT(0)
52#define M16C_STATE_BUS_ERROR BIT(4)
53#define M16C_STATE_BUS_PASSIVE BIT(5)
54#define M16C_STATE_BUS_OFF BIT(6)
55
56/* Leaf/usbcan command ids */
57#define CMD_RX_STD_MESSAGE 12
58#define CMD_TX_STD_MESSAGE 13
59#define CMD_RX_EXT_MESSAGE 14
60#define CMD_TX_EXT_MESSAGE 15
61#define CMD_SET_BUS_PARAMS 16
62#define CMD_CHIP_STATE_EVENT 20
63#define CMD_SET_CTRL_MODE 21
64#define CMD_RESET_CHIP 24
65#define CMD_START_CHIP 26
66#define CMD_START_CHIP_REPLY 27
67#define CMD_STOP_CHIP 28
68#define CMD_STOP_CHIP_REPLY 29
69
70#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
71
72#define CMD_GET_CARD_INFO 34
73#define CMD_GET_CARD_INFO_REPLY 35
74#define CMD_GET_SOFTWARE_INFO 38
75#define CMD_GET_SOFTWARE_INFO_REPLY 39
76#define CMD_FLUSH_QUEUE 48
77#define CMD_TX_ACKNOWLEDGE 50
78#define CMD_CAN_ERROR_EVENT 51
79#define CMD_FLUSH_QUEUE_REPLY 68
80
81#define CMD_LEAF_LOG_MESSAGE 106
82
83/* error factors */
84#define M16C_EF_ACKE BIT(0)
85#define M16C_EF_CRCE BIT(1)
86#define M16C_EF_FORME BIT(2)
87#define M16C_EF_STFE BIT(3)
88#define M16C_EF_BITE0 BIT(4)
89#define M16C_EF_BITE1 BIT(5)
90#define M16C_EF_RCVE BIT(6)
91#define M16C_EF_TRE BIT(7)
92
93/* Only Leaf-based devices can report M16C error factors,
94 * thus define our own error status flags for USBCANII
95 */
96#define USBCAN_ERROR_STATE_NONE 0
97#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
98#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
99#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
100
101/* bittiming parameters */
102#define KVASER_USB_TSEG1_MIN 1
103#define KVASER_USB_TSEG1_MAX 16
104#define KVASER_USB_TSEG2_MIN 1
105#define KVASER_USB_TSEG2_MAX 8
106#define KVASER_USB_SJW_MAX 4
107#define KVASER_USB_BRP_MIN 1
108#define KVASER_USB_BRP_MAX 64
109#define KVASER_USB_BRP_INC 1
110
111/* ctrl modes */
112#define KVASER_CTRL_MODE_NORMAL 1
113#define KVASER_CTRL_MODE_SILENT 2
114#define KVASER_CTRL_MODE_SELFRECEPTION 3
115#define KVASER_CTRL_MODE_OFF 4
116
117/* Extended CAN identifier flag */
118#define KVASER_EXTENDED_FRAME BIT(31)
119
120struct kvaser_cmd_simple {
121 u8 tid;
122 u8 channel;
123} __packed;
124
125struct kvaser_cmd_cardinfo {
126 u8 tid;
127 u8 nchannels;
128 __le32 serial_number;
129 __le32 padding0;
130 __le32 clock_resolution;
131 __le32 mfgdate;
132 u8 ean[8];
133 u8 hw_revision;
134 union {
135 struct {
136 u8 usb_hs_mode;
137 } __packed leaf1;
138 struct {
139 u8 padding;
140 } __packed usbcan1;
141 } __packed;
142 __le16 padding1;
143} __packed;
144
145struct leaf_cmd_softinfo {
146 u8 tid;
147 u8 padding0;
148 __le32 sw_options;
149 __le32 fw_version;
150 __le16 max_outstanding_tx;
151 __le16 padding1[9];
152} __packed;
153
154struct usbcan_cmd_softinfo {
155 u8 tid;
156 u8 fw_name[5];
157 __le16 max_outstanding_tx;
158 u8 padding[6];
159 __le32 fw_version;
160 __le16 checksum;
161 __le16 sw_options;
162} __packed;
163
164struct kvaser_cmd_busparams {
165 u8 tid;
166 u8 channel;
167 __le32 bitrate;
168 u8 tseg1;
169 u8 tseg2;
170 u8 sjw;
171 u8 no_samp;
172} __packed;
173
174struct kvaser_cmd_tx_can {
175 u8 channel;
176 u8 tid;
177 u8 data[14];
178 union {
179 struct {
180 u8 padding;
181 u8 flags;
182 } __packed leaf;
183 struct {
184 u8 flags;
185 u8 padding;
186 } __packed usbcan;
187 } __packed;
188} __packed;
189
190struct kvaser_cmd_rx_can_header {
191 u8 channel;
192 u8 flag;
193} __packed;
194
195struct leaf_cmd_rx_can {
196 u8 channel;
197 u8 flag;
198
199 __le16 time[3];
200 u8 data[14];
201} __packed;
202
203struct usbcan_cmd_rx_can {
204 u8 channel;
205 u8 flag;
206
207 u8 data[14];
208 __le16 time;
209} __packed;
210
211struct leaf_cmd_chip_state_event {
212 u8 tid;
213 u8 channel;
214
215 __le16 time[3];
216 u8 tx_errors_count;
217 u8 rx_errors_count;
218
219 u8 status;
220 u8 padding[3];
221} __packed;
222
223struct usbcan_cmd_chip_state_event {
224 u8 tid;
225 u8 channel;
226
227 u8 tx_errors_count;
228 u8 rx_errors_count;
229 __le16 time;
230
231 u8 status;
232 u8 padding[3];
233} __packed;
234
235struct kvaser_cmd_tx_acknowledge_header {
236 u8 channel;
237 u8 tid;
238} __packed;
239
240struct leaf_cmd_error_event {
241 u8 tid;
242 u8 flags;
243 __le16 time[3];
244 u8 channel;
245 u8 padding;
246 u8 tx_errors_count;
247 u8 rx_errors_count;
248 u8 status;
249 u8 error_factor;
250} __packed;
251
252struct usbcan_cmd_error_event {
253 u8 tid;
254 u8 padding;
255 u8 tx_errors_count_ch0;
256 u8 rx_errors_count_ch0;
257 u8 tx_errors_count_ch1;
258 u8 rx_errors_count_ch1;
259 u8 status_ch0;
260 u8 status_ch1;
261 __le16 time;
262} __packed;
263
264struct kvaser_cmd_ctrl_mode {
265 u8 tid;
266 u8 channel;
267 u8 ctrl_mode;
268 u8 padding[3];
269} __packed;
270
271struct kvaser_cmd_flush_queue {
272 u8 tid;
273 u8 channel;
274 u8 flags;
275 u8 padding[3];
276} __packed;
277
278struct leaf_cmd_log_message {
279 u8 channel;
280 u8 flags;
281 __le16 time[3];
282 u8 dlc;
283 u8 time_offset;
284 __le32 id;
285 u8 data[8];
286} __packed;
287
288struct kvaser_cmd {
289 u8 len;
290 u8 id;
291 union {
292 struct kvaser_cmd_simple simple;
293 struct kvaser_cmd_cardinfo cardinfo;
294 struct kvaser_cmd_busparams busparams;
295
296 struct kvaser_cmd_rx_can_header rx_can_header;
297 struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
298
299 union {
300 struct leaf_cmd_softinfo softinfo;
301 struct leaf_cmd_rx_can rx_can;
302 struct leaf_cmd_chip_state_event chip_state_event;
303 struct leaf_cmd_error_event error_event;
304 struct leaf_cmd_log_message log_message;
305 } __packed leaf;
306
307 union {
308 struct usbcan_cmd_softinfo softinfo;
309 struct usbcan_cmd_rx_can rx_can;
310 struct usbcan_cmd_chip_state_event chip_state_event;
311 struct usbcan_cmd_error_event error_event;
312 } __packed usbcan;
313
314 struct kvaser_cmd_tx_can tx_can;
315 struct kvaser_cmd_ctrl_mode ctrl_mode;
316 struct kvaser_cmd_flush_queue flush_queue;
317 } u;
318} __packed;
319
320/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
321 * handling. Some discrepancies between the two families exist:
322 *
323 * - USBCAN firmware does not report M16C "error factors"
324 * - USBCAN controllers has difficulties reporting if the raised error
325 * event is for ch0 or ch1. They leave such arbitration to the OS
326 * driver by letting it compare error counters with previous values
327 * and decide the error event's channel. Thus for USBCAN, the channel
328 * field is only advisory.
329 */
330struct kvaser_usb_err_summary {
331 u8 channel, status, txerr, rxerr;
332 union {
333 struct {
334 u8 error_factor;
335 } leaf;
336 struct {
337 u8 other_ch_status;
338 u8 error_state;
339 } usbcan;
340 };
341};
342
343static void *
344kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
345 const struct sk_buff *skb, int *frame_len,
346 int *cmd_len, u16 transid)
347{
348 struct kvaser_usb *dev = priv->dev;
349 struct kvaser_cmd *cmd;
350 u8 *cmd_tx_can_flags = NULL; /* GCC */
351 struct can_frame *cf = (struct can_frame *)skb->data;
352
353 *frame_len = cf->can_dlc;
354
355 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
356 if (cmd) {
357 cmd->u.tx_can.tid = transid & 0xff;
358 cmd->len = *cmd_len = CMD_HEADER_LEN +
359 sizeof(struct kvaser_cmd_tx_can);
360 cmd->u.tx_can.channel = priv->channel;
361
362 switch (dev->card_data.leaf.family) {
363 case KVASER_LEAF:
364 cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
365 break;
366 case KVASER_USBCAN:
367 cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags;
368 break;
369 }
370
371 *cmd_tx_can_flags = 0;
372
373 if (cf->can_id & CAN_EFF_FLAG) {
374 cmd->id = CMD_TX_EXT_MESSAGE;
375 cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f;
376 cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f;
377 cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f;
378 cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff;
379 cmd->u.tx_can.data[4] = cf->can_id & 0x3f;
380 } else {
381 cmd->id = CMD_TX_STD_MESSAGE;
382 cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f;
383 cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
384 }
385
386 cmd->u.tx_can.data[5] = cf->can_dlc;
387 memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc);
388
389 if (cf->can_id & CAN_RTR_FLAG)
390 *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
391 }
392 return cmd;
393}
394
395static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
396 struct kvaser_cmd *cmd)
397{
398 struct kvaser_cmd *tmp;
399 void *buf;
400 int actual_len;
401 int err;
402 int pos;
403 unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
404
405 buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
406 if (!buf)
407 return -ENOMEM;
408
409 do {
410 err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
411 &actual_len);
412 if (err < 0)
413 goto end;
414
415 pos = 0;
416 while (pos <= actual_len - CMD_HEADER_LEN) {
417 tmp = buf + pos;
418
419 /* Handle commands crossing the USB endpoint max packet
420 * size boundary. Check kvaser_usb_read_bulk_callback()
421 * for further details.
422 */
423 if (tmp->len == 0) {
424 pos = round_up(pos,
425 le16_to_cpu
426 (dev->bulk_in->wMaxPacketSize));
427 continue;
428 }
429
430 if (pos + tmp->len > actual_len) {
431 dev_err_ratelimited(&dev->intf->dev,
432 "Format error\n");
433 break;
434 }
435
436 if (tmp->id == id) {
437 memcpy(cmd, tmp, tmp->len);
438 goto end;
439 }
440
441 pos += tmp->len;
442 }
443 } while (time_before(jiffies, to));
444
445 err = -EINVAL;
446
447end:
448 kfree(buf);
449
450 return err;
451}
452
453static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
454 u8 cmd_id, int channel)
455{
456 struct kvaser_cmd *cmd;
457 int rc;
458
459 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
460 if (!cmd)
461 return -ENOMEM;
462
463 cmd->id = cmd_id;
464 cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
465 cmd->u.simple.channel = channel;
466 cmd->u.simple.tid = 0xff;
467
468 rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
469
470 kfree(cmd);
471 return rc;
472}
473
474static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
475{
476 struct kvaser_cmd cmd;
477 int err;
478
479 err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
480 if (err)
481 return err;
482
483 err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd);
484 if (err)
485 return err;
486
487 switch (dev->card_data.leaf.family) {
488 case KVASER_LEAF:
489 dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
490 dev->max_tx_urbs =
491 le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
492 break;
493 case KVASER_USBCAN:
494 dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
495 dev->max_tx_urbs =
496 le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
497 break;
498 }
499
500 return 0;
501}
502
503static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev)
504{
505 int err;
506 int retry = 3;
507
508 /* On some x86 laptops, plugging a Kvaser device again after
509 * an unplug makes the firmware always ignore the very first
510 * command. For such a case, provide some room for retries
511 * instead of completely exiting the driver.
512 */
513 do {
514 err = kvaser_usb_leaf_get_software_info_inner(dev);
515 } while (--retry && err == -ETIMEDOUT);
516
517 return err;
518}
519
520static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
521{
522 struct kvaser_cmd cmd;
523 int err;
524
525 err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0);
526 if (err)
527 return err;
528
529 err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd);
530 if (err)
531 return err;
532
533 dev->nchannels = cmd.u.cardinfo.nchannels;
534 if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
535 (dev->card_data.leaf.family == KVASER_USBCAN &&
536 dev->nchannels > MAX_USBCAN_NET_DEVICES))
537 return -EINVAL;
538
539 return 0;
540}
541
542static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
543 const struct kvaser_cmd *cmd)
544{
545 struct net_device_stats *stats;
546 struct kvaser_usb_tx_urb_context *context;
547 struct kvaser_usb_net_priv *priv;
548 unsigned long flags;
549 u8 channel, tid;
550
551 channel = cmd->u.tx_acknowledge_header.channel;
552 tid = cmd->u.tx_acknowledge_header.tid;
553
554 if (channel >= dev->nchannels) {
555 dev_err(&dev->intf->dev,
556 "Invalid channel number (%d)\n", channel);
557 return;
558 }
559
560 priv = dev->nets[channel];
561
562 if (!netif_device_present(priv->netdev))
563 return;
564
565 stats = &priv->netdev->stats;
566
567 context = &priv->tx_contexts[tid % dev->max_tx_urbs];
568
569 /* Sometimes the state change doesn't come after a bus-off event */
570 if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
571 struct sk_buff *skb;
572 struct can_frame *cf;
573
574 skb = alloc_can_err_skb(priv->netdev, &cf);
575 if (skb) {
576 cf->can_id |= CAN_ERR_RESTARTED;
577
578 stats->rx_packets++;
579 stats->rx_bytes += cf->can_dlc;
580 netif_rx(skb);
581 } else {
582 netdev_err(priv->netdev,
583 "No memory left for err_skb\n");
584 }
585
586 priv->can.can_stats.restarts++;
587 netif_carrier_on(priv->netdev);
588
589 priv->can.state = CAN_STATE_ERROR_ACTIVE;
590 }
591
592 stats->tx_packets++;
593 stats->tx_bytes += context->dlc;
594
595 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
596
597 can_get_echo_skb(priv->netdev, context->echo_index);
598 context->echo_index = dev->max_tx_urbs;
599 --priv->active_tx_contexts;
600 netif_wake_queue(priv->netdev);
601
602 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
603}
604
605static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
606 u8 cmd_id)
607{
608 struct kvaser_cmd *cmd;
609 int err;
610
611 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
612 if (!cmd)
613 return -ENOMEM;
614
615 cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
616 cmd->id = cmd_id;
617 cmd->u.simple.channel = priv->channel;
618
619 err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len);
620 if (err)
621 kfree(cmd);
622
623 return err;
624}
625
626static void
627kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
628 const struct kvaser_usb_err_summary *es,
629 struct can_frame *cf)
630{
631 struct kvaser_usb *dev = priv->dev;
632 struct net_device_stats *stats = &priv->netdev->stats;
633 enum can_state cur_state, new_state, tx_state, rx_state;
634
635 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
636
637 new_state = priv->can.state;
638 cur_state = priv->can.state;
639
640 if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
641 new_state = CAN_STATE_BUS_OFF;
642 } else if (es->status & M16C_STATE_BUS_PASSIVE) {
643 new_state = CAN_STATE_ERROR_PASSIVE;
644 } else if (es->status & M16C_STATE_BUS_ERROR) {
645 /* Guard against spurious error events after a busoff */
646 if (cur_state < CAN_STATE_BUS_OFF) {
647 if (es->txerr >= 128 || es->rxerr >= 128)
648 new_state = CAN_STATE_ERROR_PASSIVE;
649 else if (es->txerr >= 96 || es->rxerr >= 96)
650 new_state = CAN_STATE_ERROR_WARNING;
651 else if (cur_state > CAN_STATE_ERROR_ACTIVE)
652 new_state = CAN_STATE_ERROR_ACTIVE;
653 }
654 }
655
656 if (!es->status)
657 new_state = CAN_STATE_ERROR_ACTIVE;
658
659 if (new_state != cur_state) {
660 tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
661 rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
662
663 can_change_state(priv->netdev, cf, tx_state, rx_state);
664 }
665
666 if (priv->can.restart_ms &&
667 cur_state >= CAN_STATE_BUS_OFF &&
668 new_state < CAN_STATE_BUS_OFF)
669 priv->can.can_stats.restarts++;
670
671 switch (dev->card_data.leaf.family) {
672 case KVASER_LEAF:
673 if (es->leaf.error_factor) {
674 priv->can.can_stats.bus_error++;
675 stats->rx_errors++;
676 }
677 break;
678 case KVASER_USBCAN:
679 if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
680 stats->tx_errors++;
681 if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
682 stats->rx_errors++;
683 if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
684 priv->can.can_stats.bus_error++;
685 break;
686 }
687
688 priv->bec.txerr = es->txerr;
689 priv->bec.rxerr = es->rxerr;
690}
691
692static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
693 const struct kvaser_usb_err_summary *es)
694{
695 struct can_frame *cf;
696 struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
697 .can_dlc = CAN_ERR_DLC };
698 struct sk_buff *skb;
699 struct net_device_stats *stats;
700 struct kvaser_usb_net_priv *priv;
701 enum can_state old_state, new_state;
702
703 if (es->channel >= dev->nchannels) {
704 dev_err(&dev->intf->dev,
705 "Invalid channel number (%d)\n", es->channel);
706 return;
707 }
708
709 priv = dev->nets[es->channel];
710 stats = &priv->netdev->stats;
711
712 /* Update all of the CAN interface's state and error counters before
713 * trying any memory allocation that can actually fail with -ENOMEM.
714 *
715 * We send a temporary stack-allocated error CAN frame to
716 * can_change_state() for the very same reason.
717 *
718 * TODO: Split can_change_state() responsibility between updating the
719 * CAN interface's state and counters, and the setting up of CAN error
720 * frame ID and data to userspace. Remove stack allocation afterwards.
721 */
722 old_state = priv->can.state;
723 kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
724 new_state = priv->can.state;
725
726 skb = alloc_can_err_skb(priv->netdev, &cf);
727 if (!skb) {
728 stats->rx_dropped++;
729 return;
730 }
731 memcpy(cf, &tmp_cf, sizeof(*cf));
732
733 if (new_state != old_state) {
734 if (es->status &
735 (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
736 if (!priv->can.restart_ms)
737 kvaser_usb_leaf_simple_cmd_async(priv,
738 CMD_STOP_CHIP);
739 netif_carrier_off(priv->netdev);
740 }
741
742 if (priv->can.restart_ms &&
743 old_state >= CAN_STATE_BUS_OFF &&
744 new_state < CAN_STATE_BUS_OFF) {
745 cf->can_id |= CAN_ERR_RESTARTED;
746 netif_carrier_on(priv->netdev);
747 }
748 }
749
750 switch (dev->card_data.leaf.family) {
751 case KVASER_LEAF:
752 if (es->leaf.error_factor) {
753 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
754
755 if (es->leaf.error_factor & M16C_EF_ACKE)
756 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
757 if (es->leaf.error_factor & M16C_EF_CRCE)
758 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
759 if (es->leaf.error_factor & M16C_EF_FORME)
760 cf->data[2] |= CAN_ERR_PROT_FORM;
761 if (es->leaf.error_factor & M16C_EF_STFE)
762 cf->data[2] |= CAN_ERR_PROT_STUFF;
763 if (es->leaf.error_factor & M16C_EF_BITE0)
764 cf->data[2] |= CAN_ERR_PROT_BIT0;
765 if (es->leaf.error_factor & M16C_EF_BITE1)
766 cf->data[2] |= CAN_ERR_PROT_BIT1;
767 if (es->leaf.error_factor & M16C_EF_TRE)
768 cf->data[2] |= CAN_ERR_PROT_TX;
769 }
770 break;
771 case KVASER_USBCAN:
772 if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
773 cf->can_id |= CAN_ERR_BUSERROR;
774 break;
775 }
776
777 cf->data[6] = es->txerr;
778 cf->data[7] = es->rxerr;
779
780 stats->rx_packets++;
781 stats->rx_bytes += cf->can_dlc;
782 netif_rx(skb);
783}
784
785/* For USBCAN, report error to userspace if the channels's errors counter
786 * has changed, or we're the only channel seeing a bus error state.
787 */
788static void
789kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
790 struct kvaser_usb_err_summary *es)
791{
792 struct kvaser_usb_net_priv *priv;
793 unsigned int channel;
794 bool report_error;
795
796 channel = es->channel;
797 if (channel >= dev->nchannels) {
798 dev_err(&dev->intf->dev,
799 "Invalid channel number (%d)\n", channel);
800 return;
801 }
802
803 priv = dev->nets[channel];
804 report_error = false;
805
806 if (es->txerr != priv->bec.txerr) {
807 es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
808 report_error = true;
809 }
810 if (es->rxerr != priv->bec.rxerr) {
811 es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
812 report_error = true;
813 }
814 if ((es->status & M16C_STATE_BUS_ERROR) &&
815 !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
816 es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
817 report_error = true;
818 }
819
820 if (report_error)
821 kvaser_usb_leaf_rx_error(dev, es);
822}
823
824static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
825 const struct kvaser_cmd *cmd)
826{
827 struct kvaser_usb_err_summary es = { };
828
829 switch (cmd->id) {
830 /* Sometimes errors are sent as unsolicited chip state events */
831 case CMD_CHIP_STATE_EVENT:
832 es.channel = cmd->u.usbcan.chip_state_event.channel;
833 es.status = cmd->u.usbcan.chip_state_event.status;
834 es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count;
835 es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count;
836 kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
837 break;
838
839 case CMD_CAN_ERROR_EVENT:
840 es.channel = 0;
841 es.status = cmd->u.usbcan.error_event.status_ch0;
842 es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
843 es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
844 es.usbcan.other_ch_status =
845 cmd->u.usbcan.error_event.status_ch1;
846 kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
847
848 /* The USBCAN firmware supports up to 2 channels.
849 * Now that ch0 was checked, check if ch1 has any errors.
850 */
851 if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
852 es.channel = 1;
853 es.status = cmd->u.usbcan.error_event.status_ch1;
854 es.txerr =
855 cmd->u.usbcan.error_event.tx_errors_count_ch1;
856 es.rxerr =
857 cmd->u.usbcan.error_event.rx_errors_count_ch1;
858 es.usbcan.other_ch_status =
859 cmd->u.usbcan.error_event.status_ch0;
860 kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
861 }
862 break;
863
864 default:
865 dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
866 }
867}
868
869static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
870 const struct kvaser_cmd *cmd)
871{
872 struct kvaser_usb_err_summary es = { };
873
874 switch (cmd->id) {
875 case CMD_CAN_ERROR_EVENT:
876 es.channel = cmd->u.leaf.error_event.channel;
877 es.status = cmd->u.leaf.error_event.status;
878 es.txerr = cmd->u.leaf.error_event.tx_errors_count;
879 es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
880 es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
881 break;
882 case CMD_LEAF_LOG_MESSAGE:
883 es.channel = cmd->u.leaf.log_message.channel;
884 es.status = cmd->u.leaf.log_message.data[0];
885 es.txerr = cmd->u.leaf.log_message.data[2];
886 es.rxerr = cmd->u.leaf.log_message.data[3];
887 es.leaf.error_factor = cmd->u.leaf.log_message.data[1];
888 break;
889 case CMD_CHIP_STATE_EVENT:
890 es.channel = cmd->u.leaf.chip_state_event.channel;
891 es.status = cmd->u.leaf.chip_state_event.status;
892 es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count;
893 es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count;
894 es.leaf.error_factor = 0;
895 break;
896 default:
897 dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
898 return;
899 }
900
901 kvaser_usb_leaf_rx_error(dev, &es);
902}
903
904static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv,
905 const struct kvaser_cmd *cmd)
906{
907 if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
908 MSG_FLAG_NERR)) {
909 struct net_device_stats *stats = &priv->netdev->stats;
910
911 netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
912 cmd->u.rx_can_header.flag);
913
914 stats->rx_errors++;
915 return;
916 }
917
918 if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN)
919 kvaser_usb_can_rx_over_error(priv->netdev);
920}
921
922static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
923 const struct kvaser_cmd *cmd)
924{
925 struct kvaser_usb_net_priv *priv;
926 struct can_frame *cf;
927 struct sk_buff *skb;
928 struct net_device_stats *stats;
929 u8 channel = cmd->u.rx_can_header.channel;
930 const u8 *rx_data = NULL; /* GCC */
931
932 if (channel >= dev->nchannels) {
933 dev_err(&dev->intf->dev,
934 "Invalid channel number (%d)\n", channel);
935 return;
936 }
937
938 priv = dev->nets[channel];
939 stats = &priv->netdev->stats;
940
941 if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
942 (dev->card_data.leaf.family == KVASER_LEAF &&
943 cmd->id == CMD_LEAF_LOG_MESSAGE)) {
944 kvaser_usb_leaf_leaf_rx_error(dev, cmd);
945 return;
946 } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
947 MSG_FLAG_NERR |
948 MSG_FLAG_OVERRUN)) {
949 kvaser_usb_leaf_rx_can_err(priv, cmd);
950 return;
951 } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
952 netdev_warn(priv->netdev,
953 "Unhandled frame (flags: 0x%02x)\n",
954 cmd->u.rx_can_header.flag);
955 return;
956 }
957
958 switch (dev->card_data.leaf.family) {
959 case KVASER_LEAF:
960 rx_data = cmd->u.leaf.rx_can.data;
961 break;
962 case KVASER_USBCAN:
963 rx_data = cmd->u.usbcan.rx_can.data;
964 break;
965 }
966
967 skb = alloc_can_skb(priv->netdev, &cf);
968 if (!skb) {
969 stats->rx_dropped++;
970 return;
971 }
972
973 if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
974 CMD_LEAF_LOG_MESSAGE) {
975 cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
976 if (cf->can_id & KVASER_EXTENDED_FRAME)
977 cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
978 else
979 cf->can_id &= CAN_SFF_MASK;
980
981 cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc);
982
983 if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
984 cf->can_id |= CAN_RTR_FLAG;
985 else
986 memcpy(cf->data, &cmd->u.leaf.log_message.data,
987 cf->can_dlc);
988 } else {
989 cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f);
990
991 if (cmd->id == CMD_RX_EXT_MESSAGE) {
992 cf->can_id <<= 18;
993 cf->can_id |= ((rx_data[2] & 0x0f) << 14) |
994 ((rx_data[3] & 0xff) << 6) |
995 (rx_data[4] & 0x3f);
996 cf->can_id |= CAN_EFF_FLAG;
997 }
998
999 cf->can_dlc = get_can_dlc(rx_data[5]);
1000
1001 if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
1002 cf->can_id |= CAN_RTR_FLAG;
1003 else
1004 memcpy(cf->data, &rx_data[6], cf->can_dlc);
1005 }
1006
1007 stats->rx_packets++;
1008 stats->rx_bytes += cf->can_dlc;
1009 netif_rx(skb);
1010}
1011
1012static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
1013 const struct kvaser_cmd *cmd)
1014{
1015 struct kvaser_usb_net_priv *priv;
1016 u8 channel = cmd->u.simple.channel;
1017
1018 if (channel >= dev->nchannels) {
1019 dev_err(&dev->intf->dev,
1020 "Invalid channel number (%d)\n", channel);
1021 return;
1022 }
1023
1024 priv = dev->nets[channel];
1025
1026 if (completion_done(&priv->start_comp) &&
1027 netif_queue_stopped(priv->netdev)) {
1028 netif_wake_queue(priv->netdev);
1029 } else {
1030 netif_start_queue(priv->netdev);
1031 complete(&priv->start_comp);
1032 }
1033}
1034
1035static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
1036 const struct kvaser_cmd *cmd)
1037{
1038 struct kvaser_usb_net_priv *priv;
1039 u8 channel = cmd->u.simple.channel;
1040
1041 if (channel >= dev->nchannels) {
1042 dev_err(&dev->intf->dev,
1043 "Invalid channel number (%d)\n", channel);
1044 return;
1045 }
1046
1047 priv = dev->nets[channel];
1048
1049 complete(&priv->stop_comp);
1050}
1051
1052static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
1053 const struct kvaser_cmd *cmd)
1054{
1055 switch (cmd->id) {
1056 case CMD_START_CHIP_REPLY:
1057 kvaser_usb_leaf_start_chip_reply(dev, cmd);
1058 break;
1059
1060 case CMD_STOP_CHIP_REPLY:
1061 kvaser_usb_leaf_stop_chip_reply(dev, cmd);
1062 break;
1063
1064 case CMD_RX_STD_MESSAGE:
1065 case CMD_RX_EXT_MESSAGE:
1066 kvaser_usb_leaf_rx_can_msg(dev, cmd);
1067 break;
1068
1069 case CMD_LEAF_LOG_MESSAGE:
1070 if (dev->card_data.leaf.family != KVASER_LEAF)
1071 goto warn;
1072 kvaser_usb_leaf_rx_can_msg(dev, cmd);
1073 break;
1074
1075 case CMD_CHIP_STATE_EVENT:
1076 case CMD_CAN_ERROR_EVENT:
1077 if (dev->card_data.leaf.family == KVASER_LEAF)
1078 kvaser_usb_leaf_leaf_rx_error(dev, cmd);
1079 else
1080 kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
1081 break;
1082
1083 case CMD_TX_ACKNOWLEDGE:
1084 kvaser_usb_leaf_tx_acknowledge(dev, cmd);
1085 break;
1086
1087 /* Ignored commands */
1088 case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
1089 if (dev->card_data.leaf.family != KVASER_USBCAN)
1090 goto warn;
1091 break;
1092
1093 case CMD_FLUSH_QUEUE_REPLY:
1094 if (dev->card_data.leaf.family != KVASER_LEAF)
1095 goto warn;
1096 break;
1097
1098 default:
1099warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
1100 break;
1101 }
1102}
1103
1104static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
1105 void *buf, int len)
1106{
1107 struct kvaser_cmd *cmd;
1108 int pos = 0;
1109
1110 while (pos <= len - CMD_HEADER_LEN) {
1111 cmd = buf + pos;
1112
1113 /* The Kvaser firmware can only read and write commands that
1114 * does not cross the USB's endpoint wMaxPacketSize boundary.
1115 * If a follow-up command crosses such boundary, firmware puts
1116 * a placeholder zero-length command in its place then aligns
1117 * the real command to the next max packet size.
1118 *
1119 * Handle such cases or we're going to miss a significant
1120 * number of events in case of a heavy rx load on the bus.
1121 */
1122 if (cmd->len == 0) {
1123 pos = round_up(pos, le16_to_cpu
1124 (dev->bulk_in->wMaxPacketSize));
1125 continue;
1126 }
1127
1128 if (pos + cmd->len > len) {
1129 dev_err_ratelimited(&dev->intf->dev, "Format error\n");
1130 break;
1131 }
1132
1133 kvaser_usb_leaf_handle_command(dev, cmd);
1134 pos += cmd->len;
1135 }
1136}
1137
1138static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
1139{
1140 struct kvaser_cmd *cmd;
1141 int rc;
1142
1143 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1144 if (!cmd)
1145 return -ENOMEM;
1146
1147 cmd->id = CMD_SET_CTRL_MODE;
1148 cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode);
1149 cmd->u.ctrl_mode.tid = 0xff;
1150 cmd->u.ctrl_mode.channel = priv->channel;
1151
1152 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1153 cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
1154 else
1155 cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
1156
1157 rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
1158
1159 kfree(cmd);
1160 return rc;
1161}
1162
1163static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
1164{
1165 int err;
1166
1167 init_completion(&priv->start_comp);
1168
1169 err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
1170 priv->channel);
1171 if (err)
1172 return err;
1173
1174 if (!wait_for_completion_timeout(&priv->start_comp,
1175 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
1176 return -ETIMEDOUT;
1177
1178 return 0;
1179}
1180
1181static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
1182{
1183 int err;
1184
1185 init_completion(&priv->stop_comp);
1186
1187 err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
1188 priv->channel);
1189 if (err)
1190 return err;
1191
1192 if (!wait_for_completion_timeout(&priv->stop_comp,
1193 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
1194 return -ETIMEDOUT;
1195
1196 return 0;
1197}
1198
1199static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel)
1200{
1201 return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel);
1202}
1203
1204static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
1205{
1206 struct kvaser_cmd *cmd;
1207 int rc;
1208
1209 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1210 if (!cmd)
1211 return -ENOMEM;
1212
1213 cmd->id = CMD_FLUSH_QUEUE;
1214 cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue);
1215 cmd->u.flush_queue.channel = priv->channel;
1216 cmd->u.flush_queue.flags = 0x00;
1217
1218 rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
1219
1220 kfree(cmd);
1221 return rc;
1222}
1223
1224static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
1225{
1226 struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
1227
1228 dev->cfg = &kvaser_usb_leaf_dev_cfg;
1229 card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
1230
1231 return 0;
1232}
1233
1234static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
1235 .name = "kvaser_usb",
1236 .tseg1_min = KVASER_USB_TSEG1_MIN,
1237 .tseg1_max = KVASER_USB_TSEG1_MAX,
1238 .tseg2_min = KVASER_USB_TSEG2_MIN,
1239 .tseg2_max = KVASER_USB_TSEG2_MAX,
1240 .sjw_max = KVASER_USB_SJW_MAX,
1241 .brp_min = KVASER_USB_BRP_MIN,
1242 .brp_max = KVASER_USB_BRP_MAX,
1243 .brp_inc = KVASER_USB_BRP_INC,
1244};
1245
1246static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
1247{
1248 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1249 struct can_bittiming *bt = &priv->can.bittiming;
1250 struct kvaser_usb *dev = priv->dev;
1251 struct kvaser_cmd *cmd;
1252 int rc;
1253
1254 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1255 if (!cmd)
1256 return -ENOMEM;
1257
1258 cmd->id = CMD_SET_BUS_PARAMS;
1259 cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
1260 cmd->u.busparams.channel = priv->channel;
1261 cmd->u.busparams.tid = 0xff;
1262 cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
1263 cmd->u.busparams.sjw = bt->sjw;
1264 cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
1265 cmd->u.busparams.tseg2 = bt->phase_seg2;
1266
1267 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
1268 cmd->u.busparams.no_samp = 3;
1269 else
1270 cmd->u.busparams.no_samp = 1;
1271
1272 rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
1273
1274 kfree(cmd);
1275 return rc;
1276}
1277
1278static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
1279 enum can_mode mode)
1280{
1281 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1282 int err;
1283
1284 switch (mode) {
1285 case CAN_MODE_START:
1286 err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
1287 if (err)
1288 return err;
1289 break;
1290 default:
1291 return -EOPNOTSUPP;
1292 }
1293
1294 return 0;
1295}
1296
1297static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev,
1298 struct can_berr_counter *bec)
1299{
1300 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1301
1302 *bec = priv->bec;
1303
1304 return 0;
1305}
1306
1307static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
1308{
1309 const struct usb_host_interface *iface_desc;
1310 struct usb_endpoint_descriptor *endpoint;
1311 int i;
1312
1313 iface_desc = &dev->intf->altsetting[0];
1314
1315 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1316 endpoint = &iface_desc->endpoint[i].desc;
1317
1318 if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint))
1319 dev->bulk_in = endpoint;
1320
1321 if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint))
1322 dev->bulk_out = endpoint;
1323
1324 /* use first bulk endpoint for in and out */
1325 if (dev->bulk_in && dev->bulk_out)
1326 return 0;
1327 }
1328
1329 return -ENODEV;
1330}
1331
1332const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
1333 .dev_set_mode = kvaser_usb_leaf_set_mode,
1334 .dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
1335 .dev_set_data_bittiming = NULL,
1336 .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
1337 .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
1338 .dev_init_card = kvaser_usb_leaf_init_card,
1339 .dev_get_software_info = kvaser_usb_leaf_get_software_info,
1340 .dev_get_software_details = NULL,
1341 .dev_get_card_info = kvaser_usb_leaf_get_card_info,
1342 .dev_get_capabilities = NULL,
1343 .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
1344 .dev_start_chip = kvaser_usb_leaf_start_chip,
1345 .dev_stop_chip = kvaser_usb_leaf_stop_chip,
1346 .dev_reset_chip = kvaser_usb_leaf_reset_chip,
1347 .dev_flush_queue = kvaser_usb_leaf_flush_queue,
1348 .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
1349 .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
1350};
1351
1352static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
1353 .clock = {
1354 .freq = CAN_USB_CLOCK,
1355 },
1356 .timestamp_freq = 1,
1357 .bittiming_const = &kvaser_usb_leaf_bittiming_const,
1358};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index f530a80f5051..13238a72a338 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -423,6 +423,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
423 new_state = CAN_STATE_ERROR_WARNING; 423 new_state = CAN_STATE_ERROR_WARNING;
424 break; 424 break;
425 } 425 }
426 /* else: fall through */
426 427
427 case CAN_STATE_ERROR_WARNING: 428 case CAN_STATE_ERROR_WARNING:
428 if (n & PCAN_USB_ERROR_BUS_HEAVY) { 429 if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 50e911428638..611f9d31be5d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -353,6 +353,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
353 default: 353 default:
354 netdev_warn(netdev, "tx urb submitting failed err=%d\n", 354 netdev_warn(netdev, "tx urb submitting failed err=%d\n",
355 err); 355 err);
356 /* fall through */
356 case -ENOENT: 357 case -ENOENT:
357 /* cable unplugged */ 358 /* cable unplugged */
358 stats->tx_dropped++; 359 stats->tx_dropped++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 0105fbfea273..d516def846ab 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -141,8 +141,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
141 switch (id) { 141 switch (id) {
142 case PCAN_USBPRO_TXMSG8: 142 case PCAN_USBPRO_TXMSG8:
143 i += 4; 143 i += 4;
144 /* fall through */
144 case PCAN_USBPRO_TXMSG4: 145 case PCAN_USBPRO_TXMSG4:
145 i += 4; 146 i += 4;
147 /* fall through */
146 case PCAN_USBPRO_TXMSG0: 148 case PCAN_USBPRO_TXMSG0:
147 *pc++ = va_arg(ap, int); 149 *pc++ = va_arg(ap, int);
148 *pc++ = va_arg(ap, int); 150 *pc++ = va_arg(ap, int);
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
new file mode 100644
index 000000000000..f3d5bda012a1
--- /dev/null
+++ b/drivers/net/can/usb/ucan.c
@@ -0,0 +1,1606 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/* Driver for Theobroma Systems UCAN devices, Protocol Version 3
4 *
5 * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH
6 *
7 *
8 * General Description:
9 *
10 * The USB Device uses three Endpoints:
11 *
12 * CONTROL Endpoint: Is used the setup the device (start, stop,
13 * info, configure).
14 *
15 * IN Endpoint: The device sends CAN Frame Messages and Device
16 * Information using the IN endpoint.
17 *
18 * OUT Endpoint: The driver sends configuration requests, and CAN
19 * Frames on the out endpoint.
20 *
21 * Error Handling:
22 *
23 * If error reporting is turned on the device encodes error into CAN
24 * error frames (see uapi/linux/can/error.h) and sends it using the
25 * IN Endpoint. The driver updates statistics and forward it.
26 */
27
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/can/error.h>
31#include <linux/module.h>
32#include <linux/netdevice.h>
33#include <linux/signal.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/usb.h>
37
38#define UCAN_DRIVER_NAME "ucan"
39#define UCAN_MAX_RX_URBS 8
40/* the CAN controller needs a while to enable/disable the bus */
41#define UCAN_USB_CTL_PIPE_TIMEOUT 1000
42/* this driver currently supports protocol version 3 only */
43#define UCAN_PROTOCOL_VERSION_MIN 3
44#define UCAN_PROTOCOL_VERSION_MAX 3
45
46/* UCAN Message Definitions
47 * ------------------------
48 *
49 * ucan_message_out_t and ucan_message_in_t define the messages
50 * transmitted on the OUT and IN endpoint.
51 *
52 * Multibyte fields are transmitted with little endianness
53 *
54 * INTR Endpoint: a single uint32_t storing the current space in the fifo
55 *
56 * OUT Endpoint: single message of type ucan_message_out_t is
57 * transmitted on the out endpoint
58 *
59 * IN Endpoint: multiple messages ucan_message_in_t concateted in
60 * the following way:
61 *
62 * m[n].len <=> the length if message n(including the header in bytes)
63 * m[n] is is aligned to a 4 byte boundary, hence
64 * offset(m[0]) := 0;
65 * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3
66 *
67 * this implies that
68 * offset(m[n]) % 4 <=> 0
69 */
70
71/* Device Global Commands */
72enum {
73 UCAN_DEVICE_GET_FW_STRING = 0,
74};
75
76/* UCAN Commands */
77enum {
78 /* start the can transceiver - val defines the operation mode */
79 UCAN_COMMAND_START = 0,
80 /* cancel pending transmissions and stop the can transceiver */
81 UCAN_COMMAND_STOP = 1,
82 /* send can transceiver into low-power sleep mode */
83 UCAN_COMMAND_SLEEP = 2,
84 /* wake up can transceiver from low-power sleep mode */
85 UCAN_COMMAND_WAKEUP = 3,
86 /* reset the can transceiver */
87 UCAN_COMMAND_RESET = 4,
88 /* get piece of info from the can transceiver - subcmd defines what
89 * piece
90 */
91 UCAN_COMMAND_GET = 5,
92 /* clear or disable hardware filter - subcmd defines which of the two */
93 UCAN_COMMAND_FILTER = 6,
94 /* Setup bittiming */
95 UCAN_COMMAND_SET_BITTIMING = 7,
96 /* recover from bus-off state */
97 UCAN_COMMAND_RESTART = 8,
98};
99
100/* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap).
101 * Undefined bits must be set to 0.
102 */
103enum {
104 UCAN_MODE_LOOPBACK = BIT(0),
105 UCAN_MODE_SILENT = BIT(1),
106 UCAN_MODE_3_SAMPLES = BIT(2),
107 UCAN_MODE_ONE_SHOT = BIT(3),
108 UCAN_MODE_BERR_REPORT = BIT(4),
109};
110
111/* UCAN_COMMAND_GET subcommands */
112enum {
113 UCAN_COMMAND_GET_INFO = 0,
114 UCAN_COMMAND_GET_PROTOCOL_VERSION = 1,
115};
116
117/* UCAN_COMMAND_FILTER subcommands */
118enum {
119 UCAN_FILTER_CLEAR = 0,
120 UCAN_FILTER_DISABLE = 1,
121 UCAN_FILTER_ENABLE = 2,
122};
123
124/* OUT endpoint message types */
125enum {
126 UCAN_OUT_TX = 2, /* transmit a CAN frame */
127};
128
129/* IN endpoint message types */
130enum {
131 UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */
132 UCAN_IN_RX = 2, /* CAN frame received */
133};
134
135struct ucan_ctl_cmd_start {
136 __le16 mode; /* OR-ing any of UCAN_MODE_* */
137} __packed;
138
139struct ucan_ctl_cmd_set_bittiming {
140 __le32 tq; /* Time quanta (TQ) in nanoseconds */
141 __le16 brp; /* TQ Prescaler */
142 __le16 sample_point; /* Samplepoint on tenth percent */
143 u8 prop_seg; /* Propagation segment in TQs */
144 u8 phase_seg1; /* Phase buffer segment 1 in TQs */
145 u8 phase_seg2; /* Phase buffer segment 2 in TQs */
146 u8 sjw; /* Synchronisation jump width in TQs */
147} __packed;
148
149struct ucan_ctl_cmd_device_info {
150 __le32 freq; /* Clock Frequency for tq generation */
151 u8 tx_fifo; /* Size of the transmission fifo */
152 u8 sjw_max; /* can_bittiming fields... */
153 u8 tseg1_min;
154 u8 tseg1_max;
155 u8 tseg2_min;
156 u8 tseg2_max;
157 __le16 brp_inc;
158 __le32 brp_min;
159 __le32 brp_max; /* ...can_bittiming fields */
160 __le16 ctrlmodes; /* supported control modes */
161 __le16 hwfilter; /* Number of HW filter banks */
162 __le16 rxmboxes; /* Number of receive Mailboxes */
163} __packed;
164
165struct ucan_ctl_cmd_get_protocol_version {
166 __le32 version;
167} __packed;
168
169union ucan_ctl_payload {
170 /* Setup Bittiming
171 * bmRequest == UCAN_COMMAND_START
172 */
173 struct ucan_ctl_cmd_start cmd_start;
174 /* Setup Bittiming
175 * bmRequest == UCAN_COMMAND_SET_BITTIMING
176 */
177 struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming;
178 /* Get Device Information
179 * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO
180 */
181 struct ucan_ctl_cmd_device_info cmd_get_device_info;
182 /* Get Protocol Version
183 * bmRequest == UCAN_COMMAND_GET;
184 * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION
185 */
186 struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
187
188 u8 raw[128];
189} __packed;
190
191enum {
192 UCAN_TX_COMPLETE_SUCCESS = BIT(0),
193};
194
195/* Transmission Complete within ucan_message_in */
196struct ucan_tx_complete_entry_t {
197 u8 echo_index;
198 u8 flags;
199} __packed __aligned(0x2);
200
201/* CAN Data message format within ucan_message_in/out */
202struct ucan_can_msg {
203 /* note DLC is computed by
204 * msg.len - sizeof (msg.len)
205 * - sizeof (msg.type)
206 * - sizeof (msg.can_msg.id)
207 */
208 __le32 id;
209
210 union {
211 u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */
212 u8 dlc; /* RTR dlc */
213 };
214} __packed;
215
216/* OUT Endpoint, outbound messages */
217struct ucan_message_out {
218 __le16 len; /* Length of the content include header */
219 u8 type; /* UCAN_OUT_TX and friends */
220 u8 subtype; /* command sub type */
221
222 union {
223 /* Transmit CAN frame
224 * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
225 * subtype stores the echo id
226 */
227 struct ucan_can_msg can_msg;
228 } msg;
229} __packed __aligned(0x4);
230
231/* IN Endpoint, inbound messages */
232struct ucan_message_in {
233 __le16 len; /* Length of the content include header */
234 u8 type; /* UCAN_IN_RX and friends */
235 u8 subtype; /* command sub type */
236
237 union {
238 /* CAN Frame received
239 * (type == UCAN_IN_RX)
240 * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
241 */
242 struct ucan_can_msg can_msg;
243
244 /* CAN transmission complete
245 * (type == UCAN_IN_TX_COMPLETE)
246 */
247 struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
248 } __aligned(0x4) msg;
249} __packed;
250
251/* Macros to calculate message lengths */
252#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg)
253
254#define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg)
255#define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member))
256
257struct ucan_priv;
258
259/* Context Information for transmission URBs */
260struct ucan_urb_context {
261 struct ucan_priv *up;
262 u8 dlc;
263 bool allocated;
264};
265
266/* Information reported by the USB device */
267struct ucan_device_info {
268 struct can_bittiming_const bittiming_const;
269 u8 tx_fifo;
270};
271
272/* Driver private data */
273struct ucan_priv {
274 /* must be the first member */
275 struct can_priv can;
276
277 /* linux USB device structures */
278 struct usb_device *udev;
279 struct usb_interface *intf;
280 struct net_device *netdev;
281
282 /* lock for can->echo_skb (used around
283 * can_put/get/free_echo_skb
284 */
285 spinlock_t echo_skb_lock;
286
287 /* usb device information information */
288 u8 intf_index;
289 u8 in_ep_addr;
290 u8 out_ep_addr;
291 u16 in_ep_size;
292
293 /* transmission and reception buffers */
294 struct usb_anchor rx_urbs;
295 struct usb_anchor tx_urbs;
296
297 union ucan_ctl_payload *ctl_msg_buffer;
298 struct ucan_device_info device_info;
299
300 /* transmission control information and locks */
301 spinlock_t context_lock;
302 unsigned int available_tx_urbs;
303 struct ucan_urb_context *context_array;
304};
305
306static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len)
307{
308 if (le32_to_cpu(msg->id) & CAN_RTR_FLAG)
309 return get_can_dlc(msg->dlc);
310 else
311 return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
312}
313
314static void ucan_release_context_array(struct ucan_priv *up)
315{
316 if (!up->context_array)
317 return;
318
319 /* lock is not needed because, driver is currently opening or closing */
320 up->available_tx_urbs = 0;
321
322 kfree(up->context_array);
323 up->context_array = NULL;
324}
325
326static int ucan_alloc_context_array(struct ucan_priv *up)
327{
328 int i;
329
330 /* release contexts if any */
331 ucan_release_context_array(up);
332
333 up->context_array = kcalloc(up->device_info.tx_fifo,
334 sizeof(*up->context_array),
335 GFP_KERNEL);
336 if (!up->context_array) {
337 netdev_err(up->netdev,
338 "Not enough memory to allocate tx contexts\n");
339 return -ENOMEM;
340 }
341
342 for (i = 0; i < up->device_info.tx_fifo; i++) {
343 up->context_array[i].allocated = false;
344 up->context_array[i].up = up;
345 }
346
347 /* lock is not needed because, driver is currently opening */
348 up->available_tx_urbs = up->device_info.tx_fifo;
349
350 return 0;
351}
352
353static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up)
354{
355 int i;
356 unsigned long flags;
357 struct ucan_urb_context *ret = NULL;
358
359 if (WARN_ON_ONCE(!up->context_array))
360 return NULL;
361
362 /* execute context operation atomically */
363 spin_lock_irqsave(&up->context_lock, flags);
364
365 for (i = 0; i < up->device_info.tx_fifo; i++) {
366 if (!up->context_array[i].allocated) {
367 /* update context */
368 ret = &up->context_array[i];
369 up->context_array[i].allocated = true;
370
371 /* stop queue if necessary */
372 up->available_tx_urbs--;
373 if (!up->available_tx_urbs)
374 netif_stop_queue(up->netdev);
375
376 break;
377 }
378 }
379
380 spin_unlock_irqrestore(&up->context_lock, flags);
381 return ret;
382}
383
384static bool ucan_release_context(struct ucan_priv *up,
385 struct ucan_urb_context *ctx)
386{
387 unsigned long flags;
388 bool ret = false;
389
390 if (WARN_ON_ONCE(!up->context_array))
391 return false;
392
393 /* execute context operation atomically */
394 spin_lock_irqsave(&up->context_lock, flags);
395
396 /* context was not allocated, maybe the device sent garbage */
397 if (ctx->allocated) {
398 ctx->allocated = false;
399
400 /* check if the queue needs to be woken */
401 if (!up->available_tx_urbs)
402 netif_wake_queue(up->netdev);
403 up->available_tx_urbs++;
404
405 ret = true;
406 }
407
408 spin_unlock_irqrestore(&up->context_lock, flags);
409 return ret;
410}
411
412static int ucan_ctrl_command_out(struct ucan_priv *up,
413 u8 cmd, u16 subcmd, u16 datalen)
414{
415 return usb_control_msg(up->udev,
416 usb_sndctrlpipe(up->udev, 0),
417 cmd,
418 USB_DIR_OUT | USB_TYPE_VENDOR |
419 USB_RECIP_INTERFACE,
420 subcmd,
421 up->intf_index,
422 up->ctl_msg_buffer,
423 datalen,
424 UCAN_USB_CTL_PIPE_TIMEOUT);
425}
426
427static int ucan_device_request_in(struct ucan_priv *up,
428 u8 cmd, u16 subcmd, u16 datalen)
429{
430 return usb_control_msg(up->udev,
431 usb_rcvctrlpipe(up->udev, 0),
432 cmd,
433 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
434 subcmd,
435 0,
436 up->ctl_msg_buffer,
437 datalen,
438 UCAN_USB_CTL_PIPE_TIMEOUT);
439}
440
441/* Parse the device information structure reported by the device and
442 * setup private variables accordingly
443 */
444static void ucan_parse_device_info(struct ucan_priv *up,
445 struct ucan_ctl_cmd_device_info *device_info)
446{
447 struct can_bittiming_const *bittiming =
448 &up->device_info.bittiming_const;
449 u16 ctrlmodes;
450
451 /* store the data */
452 up->can.clock.freq = le32_to_cpu(device_info->freq);
453 up->device_info.tx_fifo = device_info->tx_fifo;
454 strcpy(bittiming->name, "ucan");
455 bittiming->tseg1_min = device_info->tseg1_min;
456 bittiming->tseg1_max = device_info->tseg1_max;
457 bittiming->tseg2_min = device_info->tseg2_min;
458 bittiming->tseg2_max = device_info->tseg2_max;
459 bittiming->sjw_max = device_info->sjw_max;
460 bittiming->brp_min = le32_to_cpu(device_info->brp_min);
461 bittiming->brp_max = le32_to_cpu(device_info->brp_max);
462 bittiming->brp_inc = le16_to_cpu(device_info->brp_inc);
463
464 ctrlmodes = le16_to_cpu(device_info->ctrlmodes);
465
466 up->can.ctrlmode_supported = 0;
467
468 if (ctrlmodes & UCAN_MODE_LOOPBACK)
469 up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
470 if (ctrlmodes & UCAN_MODE_SILENT)
471 up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
472 if (ctrlmodes & UCAN_MODE_3_SAMPLES)
473 up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
474 if (ctrlmodes & UCAN_MODE_ONE_SHOT)
475 up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
476 if (ctrlmodes & UCAN_MODE_BERR_REPORT)
477 up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
478}
479
480/* Handle a CAN error frame that we have received from the device.
481 * Returns true if the can state has changed.
482 */
483static bool ucan_handle_error_frame(struct ucan_priv *up,
484 struct ucan_message_in *m,
485 canid_t canid)
486{
487 enum can_state new_state = up->can.state;
488 struct net_device_stats *net_stats = &up->netdev->stats;
489 struct can_device_stats *can_stats = &up->can.can_stats;
490
491 if (canid & CAN_ERR_LOSTARB)
492 can_stats->arbitration_lost++;
493
494 if (canid & CAN_ERR_BUSERROR)
495 can_stats->bus_error++;
496
497 if (canid & CAN_ERR_ACK)
498 net_stats->tx_errors++;
499
500 if (canid & CAN_ERR_BUSOFF)
501 new_state = CAN_STATE_BUS_OFF;
502
503 /* controller problems, details in data[1] */
504 if (canid & CAN_ERR_CRTL) {
505 u8 d1 = m->msg.can_msg.data[1];
506
507 if (d1 & CAN_ERR_CRTL_RX_OVERFLOW)
508 net_stats->rx_over_errors++;
509
510 /* controller state bits: if multiple are set the worst wins */
511 if (d1 & CAN_ERR_CRTL_ACTIVE)
512 new_state = CAN_STATE_ERROR_ACTIVE;
513
514 if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING))
515 new_state = CAN_STATE_ERROR_WARNING;
516
517 if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE))
518 new_state = CAN_STATE_ERROR_PASSIVE;
519 }
520
521 /* protocol error, details in data[2] */
522 if (canid & CAN_ERR_PROT) {
523 u8 d2 = m->msg.can_msg.data[2];
524
525 if (d2 & CAN_ERR_PROT_TX)
526 net_stats->tx_errors++;
527 else
528 net_stats->rx_errors++;
529 }
530
531 /* no state change - we are done */
532 if (up->can.state == new_state)
533 return false;
534
535 /* we switched into a better state */
536 if (up->can.state > new_state) {
537 up->can.state = new_state;
538 return true;
539 }
540
541 /* we switched into a worse state */
542 up->can.state = new_state;
543 switch (new_state) {
544 case CAN_STATE_BUS_OFF:
545 can_stats->bus_off++;
546 can_bus_off(up->netdev);
547 break;
548 case CAN_STATE_ERROR_PASSIVE:
549 can_stats->error_passive++;
550 break;
551 case CAN_STATE_ERROR_WARNING:
552 can_stats->error_warning++;
553 break;
554 default:
555 break;
556 }
557 return true;
558}
559
560/* Callback on reception of a can frame via the IN endpoint
561 *
562 * This function allocates an skb and transferres it to the Linux
563 * network stack
564 */
565static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
566{
567 int len;
568 canid_t canid;
569 struct can_frame *cf;
570 struct sk_buff *skb;
571 struct net_device_stats *stats = &up->netdev->stats;
572
573 /* get the contents of the length field */
574 len = le16_to_cpu(m->len);
575
576 /* check sanity */
577 if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) {
578 netdev_warn(up->netdev, "invalid input message len: %d\n", len);
579 return;
580 }
581
582 /* handle error frames */
583 canid = le32_to_cpu(m->msg.can_msg.id);
584 if (canid & CAN_ERR_FLAG) {
585 bool busstate_changed = ucan_handle_error_frame(up, m, canid);
586
587 /* if berr-reporting is off only state changes get through */
588 if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
589 !busstate_changed)
590 return;
591 } else {
592 canid_t canid_mask;
593 /* compute the mask for canid */
594 canid_mask = CAN_RTR_FLAG;
595 if (canid & CAN_EFF_FLAG)
596 canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG;
597 else
598 canid_mask |= CAN_SFF_MASK;
599
600 if (canid & ~canid_mask)
601 netdev_warn(up->netdev,
602 "unexpected bits set (canid %x, mask %x)",
603 canid, canid_mask);
604
605 canid &= canid_mask;
606 }
607
608 /* allocate skb */
609 skb = alloc_can_skb(up->netdev, &cf);
610 if (!skb)
611 return;
612
613 /* fill the can frame */
614 cf->can_id = canid;
615
616 /* compute DLC taking RTR_FLAG into account */
617 cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len);
618
619 /* copy the payload of non RTR frames */
620 if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG))
621 memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc);
622
623 /* don't count error frames as real packets */
624 stats->rx_packets++;
625 stats->rx_bytes += cf->can_dlc;
626
627 /* pass it to Linux */
628 netif_rx(skb);
629}
630
631/* callback indicating completed transmission */
632static void ucan_tx_complete_msg(struct ucan_priv *up,
633 struct ucan_message_in *m)
634{
635 unsigned long flags;
636 u16 count, i;
637 u8 echo_index, dlc;
638 u16 len = le16_to_cpu(m->len);
639
640 struct ucan_urb_context *context;
641
642 if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) {
643 netdev_err(up->netdev, "invalid tx complete length\n");
644 return;
645 }
646
647 count = (len - UCAN_IN_HDR_SIZE) / 2;
648 for (i = 0; i < count; i++) {
649 /* we did not submit such echo ids */
650 echo_index = m->msg.can_tx_complete_msg[i].echo_index;
651 if (echo_index >= up->device_info.tx_fifo) {
652 up->netdev->stats.tx_errors++;
653 netdev_err(up->netdev,
654 "invalid echo_index %d received\n",
655 echo_index);
656 continue;
657 }
658
659 /* gather information from the context */
660 context = &up->context_array[echo_index];
661 dlc = READ_ONCE(context->dlc);
662
663 /* Release context and restart queue if necessary.
664 * Also check if the context was allocated
665 */
666 if (!ucan_release_context(up, context))
667 continue;
668
669 spin_lock_irqsave(&up->echo_skb_lock, flags);
670 if (m->msg.can_tx_complete_msg[i].flags &
671 UCAN_TX_COMPLETE_SUCCESS) {
672 /* update statistics */
673 up->netdev->stats.tx_packets++;
674 up->netdev->stats.tx_bytes += dlc;
675 can_get_echo_skb(up->netdev, echo_index);
676 } else {
677 up->netdev->stats.tx_dropped++;
678 can_free_echo_skb(up->netdev, echo_index);
679 }
680 spin_unlock_irqrestore(&up->echo_skb_lock, flags);
681 }
682}
683
684/* callback on reception of a USB message */
685static void ucan_read_bulk_callback(struct urb *urb)
686{
687 int ret;
688 int pos;
689 struct ucan_priv *up = urb->context;
690 struct net_device *netdev = up->netdev;
691 struct ucan_message_in *m;
692
693 /* the device is not up and the driver should not receive any
694 * data on the bulk in pipe
695 */
696 if (WARN_ON(!up->context_array)) {
697 usb_free_coherent(up->udev,
698 up->in_ep_size,
699 urb->transfer_buffer,
700 urb->transfer_dma);
701 return;
702 }
703
704 /* check URB status */
705 switch (urb->status) {
706 case 0:
707 break;
708 case -ENOENT:
709 case -EPIPE:
710 case -EPROTO:
711 case -ESHUTDOWN:
712 case -ETIME:
713 /* urb is not resubmitted -> free dma data */
714 usb_free_coherent(up->udev,
715 up->in_ep_size,
716 urb->transfer_buffer,
717 urb->transfer_dma);
718 netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n",
719 urb->status);
720 return;
721 default:
722 goto resubmit;
723 }
724
725 /* sanity check */
726 if (!netif_device_present(netdev))
727 return;
728
729 /* iterate over input */
730 pos = 0;
731 while (pos < urb->actual_length) {
732 int len;
733
734 /* check sanity (length of header) */
735 if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) {
736 netdev_warn(up->netdev,
737 "invalid message (short; no hdr; l:%d)\n",
738 urb->actual_length);
739 goto resubmit;
740 }
741
742 /* setup the message address */
743 m = (struct ucan_message_in *)
744 ((u8 *)urb->transfer_buffer + pos);
745 len = le16_to_cpu(m->len);
746
747 /* check sanity (length of content) */
748 if (urb->actual_length - pos < len) {
749 netdev_warn(up->netdev,
750 "invalid message (short; no data; l:%d)\n",
751 urb->actual_length);
752 print_hex_dump(KERN_WARNING,
753 "raw data: ",
754 DUMP_PREFIX_ADDRESS,
755 16,
756 1,
757 urb->transfer_buffer,
758 urb->actual_length,
759 true);
760
761 goto resubmit;
762 }
763
764 switch (m->type) {
765 case UCAN_IN_RX:
766 ucan_rx_can_msg(up, m);
767 break;
768 case UCAN_IN_TX_COMPLETE:
769 ucan_tx_complete_msg(up, m);
770 break;
771 default:
772 netdev_warn(up->netdev,
773 "invalid message (type; t:%d)\n",
774 m->type);
775 break;
776 }
777
778 /* proceed to next message */
779 pos += len;
780 /* align to 4 byte boundary */
781 pos = round_up(pos, 4);
782 }
783
784resubmit:
785 /* resubmit urb when done */
786 usb_fill_bulk_urb(urb, up->udev,
787 usb_rcvbulkpipe(up->udev,
788 up->in_ep_addr),
789 urb->transfer_buffer,
790 up->in_ep_size,
791 ucan_read_bulk_callback,
792 up);
793
794 usb_anchor_urb(urb, &up->rx_urbs);
795 ret = usb_submit_urb(urb, GFP_KERNEL);
796
797 if (ret < 0) {
798 netdev_err(up->netdev,
799 "failed resubmitting read bulk urb: %d\n",
800 ret);
801
802 usb_unanchor_urb(urb);
803 usb_free_coherent(up->udev,
804 up->in_ep_size,
805 urb->transfer_buffer,
806 urb->transfer_dma);
807
808 if (ret == -ENODEV)
809 netif_device_detach(netdev);
810 }
811}
812
813/* callback after transmission of a USB message */
814static void ucan_write_bulk_callback(struct urb *urb)
815{
816 unsigned long flags;
817 struct ucan_priv *up;
818 struct ucan_urb_context *context = urb->context;
819
820 /* get the urb context */
821 if (WARN_ON_ONCE(!context))
822 return;
823
824 /* free up our allocated buffer */
825 usb_free_coherent(urb->dev,
826 sizeof(struct ucan_message_out),
827 urb->transfer_buffer,
828 urb->transfer_dma);
829
830 up = context->up;
831 if (WARN_ON_ONCE(!up))
832 return;
833
834 /* sanity check */
835 if (!netif_device_present(up->netdev))
836 return;
837
838 /* transmission failed (USB - the device will not send a TX complete) */
839 if (urb->status) {
840 netdev_warn(up->netdev,
841 "failed to transmit USB message to device: %d\n",
842 urb->status);
843
844 /* update counters an cleanup */
845 spin_lock_irqsave(&up->echo_skb_lock, flags);
846 can_free_echo_skb(up->netdev, context - up->context_array);
847 spin_unlock_irqrestore(&up->echo_skb_lock, flags);
848
849 up->netdev->stats.tx_dropped++;
850
851 /* release context and restart the queue if necessary */
852 if (!ucan_release_context(up, context))
853 netdev_err(up->netdev,
854 "urb failed, failed to release context\n");
855 }
856}
857
858static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs)
859{
860 int i;
861
862 for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
863 if (urbs[i]) {
864 usb_unanchor_urb(urbs[i]);
865 usb_free_coherent(up->udev,
866 up->in_ep_size,
867 urbs[i]->transfer_buffer,
868 urbs[i]->transfer_dma);
869 usb_free_urb(urbs[i]);
870 }
871 }
872
873 memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
874}
875
876static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up,
877 struct urb **urbs)
878{
879 int i;
880
881 memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
882
883 for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
884 void *buf;
885
886 urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
887 if (!urbs[i])
888 goto err;
889
890 buf = usb_alloc_coherent(up->udev,
891 up->in_ep_size,
892 GFP_KERNEL, &urbs[i]->transfer_dma);
893 if (!buf) {
894 /* cleanup this urb */
895 usb_free_urb(urbs[i]);
896 urbs[i] = NULL;
897 goto err;
898 }
899
900 usb_fill_bulk_urb(urbs[i], up->udev,
901 usb_rcvbulkpipe(up->udev,
902 up->in_ep_addr),
903 buf,
904 up->in_ep_size,
905 ucan_read_bulk_callback,
906 up);
907
908 urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
909
910 usb_anchor_urb(urbs[i], &up->rx_urbs);
911 }
912 return 0;
913
914err:
915 /* cleanup other unsubmitted urbs */
916 ucan_cleanup_rx_urbs(up, urbs);
917 return -ENOMEM;
918}
919
920/* Submits rx urbs with the semantic: Either submit all, or cleanup
921 * everything. I case of errors submitted urbs are killed and all urbs in
922 * the array are freed. I case of no errors every entry in the urb
923 * array is set to NULL.
924 */
925static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs)
926{
927 int i, ret;
928
929 /* Iterate over all urbs to submit. On success remove the urb
930 * from the list.
931 */
932 for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
933 ret = usb_submit_urb(urbs[i], GFP_KERNEL);
934 if (ret) {
935 netdev_err(up->netdev,
936 "could not submit urb; code: %d\n",
937 ret);
938 goto err;
939 }
940
941 /* Anchor URB and drop reference, USB core will take
942 * care of freeing it
943 */
944 usb_free_urb(urbs[i]);
945 urbs[i] = NULL;
946 }
947 return 0;
948
949err:
950 /* Cleanup unsubmitted urbs */
951 ucan_cleanup_rx_urbs(up, urbs);
952
953 /* Kill urbs that are already submitted */
954 usb_kill_anchored_urbs(&up->rx_urbs);
955
956 return ret;
957}
958
959/* Open the network device */
960static int ucan_open(struct net_device *netdev)
961{
962 int ret, ret_cleanup;
963 u16 ctrlmode;
964 struct urb *urbs[UCAN_MAX_RX_URBS];
965 struct ucan_priv *up = netdev_priv(netdev);
966
967 ret = ucan_alloc_context_array(up);
968 if (ret)
969 return ret;
970
971 /* Allocate and prepare IN URBS - allocated and anchored
972 * urbs are stored in urbs[] for clean
973 */
974 ret = ucan_prepare_and_anchor_rx_urbs(up, urbs);
975 if (ret)
976 goto err_contexts;
977
978 /* Check the control mode */
979 ctrlmode = 0;
980 if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
981 ctrlmode |= UCAN_MODE_LOOPBACK;
982 if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
983 ctrlmode |= UCAN_MODE_SILENT;
984 if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
985 ctrlmode |= UCAN_MODE_3_SAMPLES;
986 if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
987 ctrlmode |= UCAN_MODE_ONE_SHOT;
988
989 /* Enable this in any case - filtering is down within the
990 * receive path
991 */
992 ctrlmode |= UCAN_MODE_BERR_REPORT;
993 up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode);
994
995 /* Driver is ready to receive data - start the USB device */
996 ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2);
997 if (ret < 0) {
998 netdev_err(up->netdev,
999 "could not start device, code: %d\n",
1000 ret);
1001 goto err_reset;
1002 }
1003
1004 /* Call CAN layer open */
1005 ret = open_candev(netdev);
1006 if (ret)
1007 goto err_stop;
1008
1009 /* Driver is ready to receive data. Submit RX URBS */
1010 ret = ucan_submit_rx_urbs(up, urbs);
1011 if (ret)
1012 goto err_stop;
1013
1014 up->can.state = CAN_STATE_ERROR_ACTIVE;
1015
1016 /* Start the network queue */
1017 netif_start_queue(netdev);
1018
1019 return 0;
1020
1021err_stop:
1022 /* The device have started already stop it */
1023 ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
1024 if (ret_cleanup < 0)
1025 netdev_err(up->netdev,
1026 "could not stop device, code: %d\n",
1027 ret_cleanup);
1028
1029err_reset:
1030 /* The device might have received data, reset it for
1031 * consistent state
1032 */
1033 ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
1034 if (ret_cleanup < 0)
1035 netdev_err(up->netdev,
1036 "could not reset device, code: %d\n",
1037 ret_cleanup);
1038
1039 /* clean up unsubmitted urbs */
1040 ucan_cleanup_rx_urbs(up, urbs);
1041
1042err_contexts:
1043 ucan_release_context_array(up);
1044 return ret;
1045}
1046
1047static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
1048 struct ucan_urb_context *context,
1049 struct can_frame *cf,
1050 u8 echo_index)
1051{
1052 int mlen;
1053 struct urb *urb;
1054 struct ucan_message_out *m;
1055
1056 /* create a URB, and a buffer for it, and copy the data to the URB */
1057 urb = usb_alloc_urb(0, GFP_ATOMIC);
1058 if (!urb) {
1059 netdev_err(up->netdev, "no memory left for URBs\n");
1060 return NULL;
1061 }
1062
1063 m = usb_alloc_coherent(up->udev,
1064 sizeof(struct ucan_message_out),
1065 GFP_ATOMIC,
1066 &urb->transfer_dma);
1067 if (!m) {
1068 netdev_err(up->netdev, "no memory left for USB buffer\n");
1069 usb_free_urb(urb);
1070 return NULL;
1071 }
1072
1073 /* build the USB message */
1074 m->type = UCAN_OUT_TX;
1075 m->msg.can_msg.id = cpu_to_le32(cf->can_id);
1076
1077 if (cf->can_id & CAN_RTR_FLAG) {
1078 mlen = UCAN_OUT_HDR_SIZE +
1079 offsetof(struct ucan_can_msg, dlc) +
1080 sizeof(m->msg.can_msg.dlc);
1081 m->msg.can_msg.dlc = cf->can_dlc;
1082 } else {
1083 mlen = UCAN_OUT_HDR_SIZE +
1084 sizeof(m->msg.can_msg.id) + cf->can_dlc;
1085 memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc);
1086 }
1087 m->len = cpu_to_le16(mlen);
1088
1089 context->dlc = cf->can_dlc;
1090
1091 m->subtype = echo_index;
1092
1093 /* build the urb */
1094 usb_fill_bulk_urb(urb, up->udev,
1095 usb_sndbulkpipe(up->udev,
1096 up->out_ep_addr),
1097 m, mlen, ucan_write_bulk_callback, context);
1098 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1099
1100 return urb;
1101}
1102
1103static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb)
1104{
1105 usb_free_coherent(up->udev, sizeof(struct ucan_message_out),
1106 urb->transfer_buffer, urb->transfer_dma);
1107 usb_free_urb(urb);
1108}
1109
1110/* callback when Linux needs to send a can frame */
1111static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
1112 struct net_device *netdev)
1113{
1114 unsigned long flags;
1115 int ret;
1116 u8 echo_index;
1117 struct urb *urb;
1118 struct ucan_urb_context *context;
1119 struct ucan_priv *up = netdev_priv(netdev);
1120 struct can_frame *cf = (struct can_frame *)skb->data;
1121
1122 /* check skb */
1123 if (can_dropped_invalid_skb(netdev, skb))
1124 return NETDEV_TX_OK;
1125
1126 /* allocate a context and slow down tx path, if fifo state is low */
1127 context = ucan_alloc_context(up);
1128 echo_index = context - up->context_array;
1129
1130 if (WARN_ON_ONCE(!context))
1131 return NETDEV_TX_BUSY;
1132
1133 /* prepare urb for transmission */
1134 urb = ucan_prepare_tx_urb(up, context, cf, echo_index);
1135 if (!urb)
1136 goto drop;
1137
1138 /* put the skb on can loopback stack */
1139 spin_lock_irqsave(&up->echo_skb_lock, flags);
1140 can_put_echo_skb(skb, up->netdev, echo_index);
1141 spin_unlock_irqrestore(&up->echo_skb_lock, flags);
1142
1143 /* transmit it */
1144 usb_anchor_urb(urb, &up->tx_urbs);
1145 ret = usb_submit_urb(urb, GFP_ATOMIC);
1146
1147 /* cleanup urb */
1148 if (ret) {
1149 /* on error, clean up */
1150 usb_unanchor_urb(urb);
1151 ucan_clean_up_tx_urb(up, urb);
1152 if (!ucan_release_context(up, context))
1153 netdev_err(up->netdev,
1154 "xmit err: failed to release context\n");
1155
1156 /* remove the skb from the echo stack - this also
1157 * frees the skb
1158 */
1159 spin_lock_irqsave(&up->echo_skb_lock, flags);
1160 can_free_echo_skb(up->netdev, echo_index);
1161 spin_unlock_irqrestore(&up->echo_skb_lock, flags);
1162
1163 if (ret == -ENODEV) {
1164 netif_device_detach(up->netdev);
1165 } else {
1166 netdev_warn(up->netdev,
1167 "xmit err: failed to submit urb %d\n",
1168 ret);
1169 up->netdev->stats.tx_dropped++;
1170 }
1171 return NETDEV_TX_OK;
1172 }
1173
1174 netif_trans_update(netdev);
1175
1176 /* release ref, as we do not need the urb anymore */
1177 usb_free_urb(urb);
1178
1179 return NETDEV_TX_OK;
1180
1181drop:
1182 if (!ucan_release_context(up, context))
1183 netdev_err(up->netdev,
1184 "xmit drop: failed to release context\n");
1185 dev_kfree_skb(skb);
1186 up->netdev->stats.tx_dropped++;
1187
1188 return NETDEV_TX_OK;
1189}
1190
1191/* Device goes down
1192 *
1193 * Clean up used resources
1194 */
1195static int ucan_close(struct net_device *netdev)
1196{
1197 int ret;
1198 struct ucan_priv *up = netdev_priv(netdev);
1199
1200 up->can.state = CAN_STATE_STOPPED;
1201
1202 /* stop sending data */
1203 usb_kill_anchored_urbs(&up->tx_urbs);
1204
1205 /* stop receiving data */
1206 usb_kill_anchored_urbs(&up->rx_urbs);
1207
1208 /* stop and reset can device */
1209 ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
1210 if (ret < 0)
1211 netdev_err(up->netdev,
1212 "could not stop device, code: %d\n",
1213 ret);
1214
1215 ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
1216 if (ret < 0)
1217 netdev_err(up->netdev,
1218 "could not reset device, code: %d\n",
1219 ret);
1220
1221 netif_stop_queue(netdev);
1222
1223 ucan_release_context_array(up);
1224
1225 close_candev(up->netdev);
1226 return 0;
1227}
1228
1229/* CAN driver callbacks */
1230static const struct net_device_ops ucan_netdev_ops = {
1231 .ndo_open = ucan_open,
1232 .ndo_stop = ucan_close,
1233 .ndo_start_xmit = ucan_start_xmit,
1234 .ndo_change_mtu = can_change_mtu,
1235};
1236
1237/* Request to set bittiming
1238 *
1239 * This function generates an USB set bittiming message and transmits
1240 * it to the device
1241 */
1242static int ucan_set_bittiming(struct net_device *netdev)
1243{
1244 int ret;
1245 struct ucan_priv *up = netdev_priv(netdev);
1246 struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming;
1247
1248 cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming;
1249 cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq);
1250 cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp);
1251 cmd_set_bittiming->sample_point =
1252 cpu_to_le16(up->can.bittiming.sample_point);
1253 cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg;
1254 cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1;
1255 cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2;
1256 cmd_set_bittiming->sjw = up->can.bittiming.sjw;
1257
1258 ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0,
1259 sizeof(*cmd_set_bittiming));
1260 return (ret < 0) ? ret : 0;
1261}
1262
1263/* Restart the device to get it out of BUS-OFF state.
1264 * Called when the user runs "ip link set can1 type can restart".
1265 */
1266static int ucan_set_mode(struct net_device *netdev, enum can_mode mode)
1267{
1268 int ret;
1269 unsigned long flags;
1270 struct ucan_priv *up = netdev_priv(netdev);
1271
1272 switch (mode) {
1273 case CAN_MODE_START:
1274 netdev_dbg(up->netdev, "restarting device\n");
1275
1276 ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0);
1277 up->can.state = CAN_STATE_ERROR_ACTIVE;
1278
1279 /* check if queue can be restarted,
1280 * up->available_tx_urbs must be protected by the
1281 * lock
1282 */
1283 spin_lock_irqsave(&up->context_lock, flags);
1284
1285 if (up->available_tx_urbs > 0)
1286 netif_wake_queue(up->netdev);
1287
1288 spin_unlock_irqrestore(&up->context_lock, flags);
1289
1290 return ret;
1291 default:
1292 return -EOPNOTSUPP;
1293 }
1294}
1295
1296/* Probe the device, reset it and gather general device information */
1297static int ucan_probe(struct usb_interface *intf,
1298 const struct usb_device_id *id)
1299{
1300 int ret;
1301 int i;
1302 u32 protocol_version;
1303 struct usb_device *udev;
1304 struct net_device *netdev;
1305 struct usb_host_interface *iface_desc;
1306 struct ucan_priv *up;
1307 struct usb_endpoint_descriptor *ep;
1308 u16 in_ep_size;
1309 u16 out_ep_size;
1310 u8 in_ep_addr;
1311 u8 out_ep_addr;
1312 union ucan_ctl_payload *ctl_msg_buffer;
1313 char firmware_str[sizeof(union ucan_ctl_payload) + 1];
1314
1315 udev = interface_to_usbdev(intf);
1316
1317 /* Stage 1 - Interface Parsing
1318 * ---------------------------
1319 *
1320 * Identifie the device USB interface descriptor and its
1321 * endpoints. Probing is aborted on errors.
1322 */
1323
1324 /* check if the interface is sane */
1325 iface_desc = intf->cur_altsetting;
1326 if (!iface_desc)
1327 return -ENODEV;
1328
1329 dev_info(&udev->dev,
1330 "%s: probing device on interface #%d\n",
1331 UCAN_DRIVER_NAME,
1332 iface_desc->desc.bInterfaceNumber);
1333
1334 /* interface sanity check */
1335 if (iface_desc->desc.bNumEndpoints != 2) {
1336 dev_err(&udev->dev,
1337 "%s: invalid EP count (%d)",
1338 UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints);
1339 goto err_firmware_needs_update;
1340 }
1341
1342 /* check interface endpoints */
1343 in_ep_addr = 0;
1344 out_ep_addr = 0;
1345 in_ep_size = 0;
1346 out_ep_size = 0;
1347 for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
1348 ep = &iface_desc->endpoint[i].desc;
1349
1350 if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) &&
1351 ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
1352 USB_ENDPOINT_XFER_BULK)) {
1353 /* In Endpoint */
1354 in_ep_addr = ep->bEndpointAddress;
1355 in_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
1356 in_ep_size = le16_to_cpu(ep->wMaxPacketSize);
1357 } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
1358 0) &&
1359 ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
1360 USB_ENDPOINT_XFER_BULK)) {
1361 /* Out Endpoint */
1362 out_ep_addr = ep->bEndpointAddress;
1363 out_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
1364 out_ep_size = le16_to_cpu(ep->wMaxPacketSize);
1365 }
1366 }
1367
1368 /* check if interface is sane */
1369 if (!in_ep_addr || !out_ep_addr) {
1370 dev_err(&udev->dev, "%s: invalid endpoint configuration\n",
1371 UCAN_DRIVER_NAME);
1372 goto err_firmware_needs_update;
1373 }
1374 if (in_ep_size < sizeof(struct ucan_message_in)) {
1375 dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n",
1376 UCAN_DRIVER_NAME);
1377 goto err_firmware_needs_update;
1378 }
1379 if (out_ep_size < sizeof(struct ucan_message_out)) {
1380 dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n",
1381 UCAN_DRIVER_NAME);
1382 goto err_firmware_needs_update;
1383 }
1384
1385 /* Stage 2 - Device Identification
1386 * -------------------------------
1387 *
1388 * The device interface seems to be a ucan device. Do further
1389 * compatibility checks. On error probing is aborted, on
1390 * success this stage leaves the ctl_msg_buffer with the
1391 * reported contents of a GET_INFO command (supported
1392 * bittimings, tx_fifo depth). This information is used in
1393 * Stage 3 for the final driver initialisation.
1394 */
1395
1396 /* Prepare Memory for control transferes */
1397 ctl_msg_buffer = devm_kzalloc(&udev->dev,
1398 sizeof(union ucan_ctl_payload),
1399 GFP_KERNEL);
1400 if (!ctl_msg_buffer) {
1401 dev_err(&udev->dev,
1402 "%s: failed to allocate control pipe memory\n",
1403 UCAN_DRIVER_NAME);
1404 return -ENOMEM;
1405 }
1406
1407 /* get protocol version
1408 *
1409 * note: ucan_ctrl_command_* wrappers cannot be used yet
1410 * because `up` is initialised in Stage 3
1411 */
1412 ret = usb_control_msg(udev,
1413 usb_rcvctrlpipe(udev, 0),
1414 UCAN_COMMAND_GET,
1415 USB_DIR_IN | USB_TYPE_VENDOR |
1416 USB_RECIP_INTERFACE,
1417 UCAN_COMMAND_GET_PROTOCOL_VERSION,
1418 iface_desc->desc.bInterfaceNumber,
1419 ctl_msg_buffer,
1420 sizeof(union ucan_ctl_payload),
1421 UCAN_USB_CTL_PIPE_TIMEOUT);
1422
1423 /* older firmware version do not support this command - those
1424 * are not supported by this drive
1425 */
1426 if (ret != 4) {
1427 dev_err(&udev->dev,
1428 "%s: could not read protocol version, ret=%d\n",
1429 UCAN_DRIVER_NAME, ret);
1430 if (ret >= 0)
1431 ret = -EINVAL;
1432 goto err_firmware_needs_update;
1433 }
1434
1435 /* this driver currently supports protocol version 3 only */
1436 protocol_version =
1437 le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version);
1438 if (protocol_version < UCAN_PROTOCOL_VERSION_MIN ||
1439 protocol_version > UCAN_PROTOCOL_VERSION_MAX) {
1440 dev_err(&udev->dev,
1441 "%s: device protocol version %d is not supported\n",
1442 UCAN_DRIVER_NAME, protocol_version);
1443 goto err_firmware_needs_update;
1444 }
1445
1446 /* request the device information and store it in ctl_msg_buffer
1447 *
1448 * note: ucan_ctrl_command_* wrappers connot be used yet
1449 * because `up` is initialised in Stage 3
1450 */
1451 ret = usb_control_msg(udev,
1452 usb_rcvctrlpipe(udev, 0),
1453 UCAN_COMMAND_GET,
1454 USB_DIR_IN | USB_TYPE_VENDOR |
1455 USB_RECIP_INTERFACE,
1456 UCAN_COMMAND_GET_INFO,
1457 iface_desc->desc.bInterfaceNumber,
1458 ctl_msg_buffer,
1459 sizeof(ctl_msg_buffer->cmd_get_device_info),
1460 UCAN_USB_CTL_PIPE_TIMEOUT);
1461
1462 if (ret < 0) {
1463 dev_err(&udev->dev, "%s: failed to retrieve device info\n",
1464 UCAN_DRIVER_NAME);
1465 goto err_firmware_needs_update;
1466 }
1467 if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) {
1468 dev_err(&udev->dev, "%s: device reported invalid device info\n",
1469 UCAN_DRIVER_NAME);
1470 goto err_firmware_needs_update;
1471 }
1472 if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) {
1473 dev_err(&udev->dev,
1474 "%s: device reported invalid tx-fifo size\n",
1475 UCAN_DRIVER_NAME);
1476 goto err_firmware_needs_update;
1477 }
1478
1479 /* Stage 3 - Driver Initialisation
1480 * -------------------------------
1481 *
1482 * Register device to Linux, prepare private structures and
1483 * reset the device.
1484 */
1485
1486 /* allocate driver resources */
1487 netdev = alloc_candev(sizeof(struct ucan_priv),
1488 ctl_msg_buffer->cmd_get_device_info.tx_fifo);
1489 if (!netdev) {
1490 dev_err(&udev->dev,
1491 "%s: cannot allocate candev\n", UCAN_DRIVER_NAME);
1492 return -ENOMEM;
1493 }
1494
1495 up = netdev_priv(netdev);
1496
1497 /* initialze data */
1498 up->udev = udev;
1499 up->intf = intf;
1500 up->netdev = netdev;
1501 up->intf_index = iface_desc->desc.bInterfaceNumber;
1502 up->in_ep_addr = in_ep_addr;
1503 up->out_ep_addr = out_ep_addr;
1504 up->in_ep_size = in_ep_size;
1505 up->ctl_msg_buffer = ctl_msg_buffer;
1506 up->context_array = NULL;
1507 up->available_tx_urbs = 0;
1508
1509 up->can.state = CAN_STATE_STOPPED;
1510 up->can.bittiming_const = &up->device_info.bittiming_const;
1511 up->can.do_set_bittiming = ucan_set_bittiming;
1512 up->can.do_set_mode = &ucan_set_mode;
1513 spin_lock_init(&up->context_lock);
1514 spin_lock_init(&up->echo_skb_lock);
1515 netdev->netdev_ops = &ucan_netdev_ops;
1516
1517 usb_set_intfdata(intf, up);
1518 SET_NETDEV_DEV(netdev, &intf->dev);
1519
1520 /* parse device information
1521 * the data retrieved in Stage 2 is still available in
1522 * up->ctl_msg_buffer
1523 */
1524 ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
1525
1526 /* just print some device information - if available */
1527 ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
1528 sizeof(union ucan_ctl_payload));
1529 if (ret > 0) {
1530 /* copy string while ensuring zero terminiation */
1531 strncpy(firmware_str, up->ctl_msg_buffer->raw,
1532 sizeof(union ucan_ctl_payload));
1533 firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
1534 } else {
1535 strcpy(firmware_str, "unknown");
1536 }
1537
1538 /* device is compatible, reset it */
1539 ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
1540 if (ret < 0)
1541 goto err_free_candev;
1542
1543 init_usb_anchor(&up->rx_urbs);
1544 init_usb_anchor(&up->tx_urbs);
1545
1546 up->can.state = CAN_STATE_STOPPED;
1547
1548 /* register the device */
1549 ret = register_candev(netdev);
1550 if (ret)
1551 goto err_free_candev;
1552
1553 /* initialisation complete, log device info */
1554 netdev_info(up->netdev, "registered device\n");
1555 netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
1556
1557 /* success */
1558 return 0;
1559
1560err_free_candev:
1561 free_candev(netdev);
1562 return ret;
1563
1564err_firmware_needs_update:
1565 dev_err(&udev->dev,
1566 "%s: probe failed; try to update the device firmware\n",
1567 UCAN_DRIVER_NAME);
1568 return -ENODEV;
1569}
1570
1571/* disconnect the device */
1572static void ucan_disconnect(struct usb_interface *intf)
1573{
1574 struct ucan_priv *up = usb_get_intfdata(intf);
1575
1576 usb_set_intfdata(intf, NULL);
1577
1578 if (up) {
1579 unregister_netdev(up->netdev);
1580 free_candev(up->netdev);
1581 }
1582}
1583
1584static struct usb_device_id ucan_table[] = {
1585 /* Mule (soldered onto compute modules) */
1586 {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)},
1587 /* Seal (standalone USB stick) */
1588 {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)},
1589 {} /* Terminating entry */
1590};
1591
1592MODULE_DEVICE_TABLE(usb, ucan_table);
1593/* driver callbacks */
1594static struct usb_driver ucan_driver = {
1595 .name = UCAN_DRIVER_NAME,
1596 .probe = ucan_probe,
1597 .disconnect = ucan_disconnect,
1598 .id_table = ucan_table,
1599};
1600
1601module_usb_driver(ucan_driver);
1602
1603MODULE_LICENSE("GPL v2");
1604MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>");
1605MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>");
1606MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 89aec07c225f..045f0845e665 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 2012 - 2014 Xilinx, Inc. 3 * Copyright (C) 2012 - 2014 Xilinx, Inc.
4 * Copyright (C) 2009 PetaLogix. All rights reserved. 4 * Copyright (C) 2009 PetaLogix. All rights reserved.
5 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
5 * 6 *
6 * Description: 7 * Description:
7 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 8 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/netdevice.h> 27#include <linux/netdevice.h>
27#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_device.h>
28#include <linux/platform_device.h> 30#include <linux/platform_device.h>
29#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
30#include <linux/string.h> 33#include <linux/string.h>
31#include <linux/types.h> 34#include <linux/types.h>
32#include <linux/can/dev.h> 35#include <linux/can/dev.h>
@@ -48,16 +51,34 @@ enum xcan_reg {
48 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 51 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
49 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 52 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
50 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 53 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
51 XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */ 54
52 XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */ 55 /* not on CAN FD cores */
53 XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */ 56 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
54 XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */ 57 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
55 XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */ 58 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
56 XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */ 59
57 XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */ 60 /* only on CAN FD cores */
58 XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */ 61 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
62 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
63 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
64 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
65 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
59}; 66};
60 67
68#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
69#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
70#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
71#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
72
73#define XCAN_CANFD_FRAME_SIZE 0x48
74#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
75 XCAN_CANFD_FRAME_SIZE * (n))
76#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
77 XCAN_CANFD_FRAME_SIZE * (n))
78
79/* the single TX mailbox used by this driver on CAN FD HW */
80#define XCAN_TX_MAILBOX_IDX 0
81
61/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 82/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
62#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 83#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
63#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 84#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
@@ -67,6 +88,9 @@ enum xcan_reg {
67#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 88#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
68#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 89#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
69#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 90#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
91#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
92#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
93#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
70#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 94#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
71#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 95#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
72#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 96#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
@@ -80,6 +104,7 @@ enum xcan_reg {
80#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 104#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
81#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 105#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
82#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 106#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
107#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
83#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 108#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
84#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 109#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
85#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 110#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
@@ -97,15 +122,15 @@ enum xcan_reg {
97#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 122#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
98#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 123#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
99#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 124#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
100 125#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
101#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ 126#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
102 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ 127#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
103 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
104 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
105 128
106/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 129/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
107#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 130#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
108#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 131#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
132#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
133#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
109#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 134#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
110#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 135#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
111#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 136#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
@@ -115,9 +140,31 @@ enum xcan_reg {
115#define XCAN_FRAME_MAX_DATA_LEN 8 140#define XCAN_FRAME_MAX_DATA_LEN 8
116#define XCAN_TIMEOUT (1 * HZ) 141#define XCAN_TIMEOUT (1 * HZ)
117 142
143/* TX-FIFO-empty interrupt available */
144#define XCAN_FLAG_TXFEMP 0x0001
145/* RX Match Not Finished interrupt available */
146#define XCAN_FLAG_RXMNF 0x0002
147/* Extended acceptance filters with control at 0xE0 */
148#define XCAN_FLAG_EXT_FILTERS 0x0004
149/* TX mailboxes instead of TX FIFO */
150#define XCAN_FLAG_TX_MAILBOXES 0x0008
151/* RX FIFO with each buffer in separate registers at 0x1100
152 * instead of the regular FIFO at 0x50
153 */
154#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
155
156struct xcan_devtype_data {
157 unsigned int flags;
158 const struct can_bittiming_const *bittiming_const;
159 const char *bus_clk_name;
160 unsigned int btr_ts2_shift;
161 unsigned int btr_sjw_shift;
162};
163
118/** 164/**
119 * struct xcan_priv - This definition define CAN driver instance 165 * struct xcan_priv - This definition define CAN driver instance
120 * @can: CAN private data structure. 166 * @can: CAN private data structure.
167 * @tx_lock: Lock for synchronizing TX interrupt handling
121 * @tx_head: Tx CAN packets ready to send on the queue 168 * @tx_head: Tx CAN packets ready to send on the queue
122 * @tx_tail: Tx CAN packets successfully sended on the queue 169 * @tx_tail: Tx CAN packets successfully sended on the queue
123 * @tx_max: Maximum number packets the driver can send 170 * @tx_max: Maximum number packets the driver can send
@@ -129,9 +176,11 @@ enum xcan_reg {
129 * @irq_flags: For request_irq() 176 * @irq_flags: For request_irq()
130 * @bus_clk: Pointer to struct clk 177 * @bus_clk: Pointer to struct clk
131 * @can_clk: Pointer to struct clk 178 * @can_clk: Pointer to struct clk
179 * @devtype: Device type specific constants
132 */ 180 */
133struct xcan_priv { 181struct xcan_priv {
134 struct can_priv can; 182 struct can_priv can;
183 spinlock_t tx_lock;
135 unsigned int tx_head; 184 unsigned int tx_head;
136 unsigned int tx_tail; 185 unsigned int tx_tail;
137 unsigned int tx_max; 186 unsigned int tx_max;
@@ -144,6 +193,7 @@ struct xcan_priv {
144 unsigned long irq_flags; 193 unsigned long irq_flags;
145 struct clk *bus_clk; 194 struct clk *bus_clk;
146 struct clk *can_clk; 195 struct clk *can_clk;
196 struct xcan_devtype_data devtype;
147}; 197};
148 198
149/* CAN Bittiming constants as per Xilinx CAN specs */ 199/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -159,6 +209,18 @@ static const struct can_bittiming_const xcan_bittiming_const = {
159 .brp_inc = 1, 209 .brp_inc = 1,
160}; 210};
161 211
212static const struct can_bittiming_const xcan_bittiming_const_canfd = {
213 .name = DRIVER_NAME,
214 .tseg1_min = 1,
215 .tseg1_max = 64,
216 .tseg2_min = 1,
217 .tseg2_max = 16,
218 .sjw_max = 16,
219 .brp_min = 1,
220 .brp_max = 256,
221 .brp_inc = 1,
222};
223
162/** 224/**
163 * xcan_write_reg_le - Write a value to the device register little endian 225 * xcan_write_reg_le - Write a value to the device register little endian
164 * @priv: Driver private data structure 226 * @priv: Driver private data structure
@@ -214,6 +276,23 @@ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
214} 276}
215 277
216/** 278/**
279 * xcan_rx_int_mask - Get the mask for the receive interrupt
280 * @priv: Driver private data structure
281 *
282 * Return: The receive interrupt mask used by the driver on this HW
283 */
284static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
285{
286 /* RXNEMP is better suited for our use case as it cannot be cleared
287 * while the FIFO is non-empty, but CAN FD HW does not have it
288 */
289 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
290 return XCAN_IXR_RXOK_MASK;
291 else
292 return XCAN_IXR_RXNEMP_MASK;
293}
294
295/**
217 * set_reset_mode - Resets the CAN device mode 296 * set_reset_mode - Resets the CAN device mode
218 * @ndev: Pointer to net_device structure 297 * @ndev: Pointer to net_device structure
219 * 298 *
@@ -238,6 +317,10 @@ static int set_reset_mode(struct net_device *ndev)
238 usleep_range(500, 10000); 317 usleep_range(500, 10000);
239 } 318 }
240 319
320 /* reset clears FIFOs */
321 priv->tx_head = 0;
322 priv->tx_tail = 0;
323
241 return 0; 324 return 0;
242} 325}
243 326
@@ -273,10 +356,10 @@ static int xcan_set_bittiming(struct net_device *ndev)
273 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 356 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
274 357
275 /* Setting Time Segment 2 in BTR Register */ 358 /* Setting Time Segment 2 in BTR Register */
276 btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT; 359 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
277 360
278 /* Setting Synchronous jump width in BTR Register */ 361 /* Setting Synchronous jump width in BTR Register */
279 btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT; 362 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
280 363
281 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 364 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
282 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 365 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
@@ -304,6 +387,7 @@ static int xcan_chip_start(struct net_device *ndev)
304 u32 reg_msr, reg_sr_mask; 387 u32 reg_msr, reg_sr_mask;
305 int err; 388 int err;
306 unsigned long timeout; 389 unsigned long timeout;
390 u32 ier;
307 391
308 /* Check if it is in reset mode */ 392 /* Check if it is in reset mode */
309 err = set_reset_mode(ndev); 393 err = set_reset_mode(ndev);
@@ -315,7 +399,15 @@ static int xcan_chip_start(struct net_device *ndev)
315 return err; 399 return err;
316 400
317 /* Enable interrupts */ 401 /* Enable interrupts */
318 priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL); 402 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
403 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
404 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
405 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
406
407 if (priv->devtype.flags & XCAN_FLAG_RXMNF)
408 ier |= XCAN_IXR_RXMNF_MASK;
409
410 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
319 411
320 /* Check whether it is loopback mode or normal mode */ 412 /* Check whether it is loopback mode or normal mode */
321 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 413 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
@@ -326,6 +418,12 @@ static int xcan_chip_start(struct net_device *ndev)
326 reg_sr_mask = XCAN_SR_NORMAL_MASK; 418 reg_sr_mask = XCAN_SR_NORMAL_MASK;
327 } 419 }
328 420
421 /* enable the first extended filter, if any, as cores with extended
422 * filtering default to non-receipt if all filters are disabled
423 */
424 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
425 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
426
329 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 427 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
330 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 428 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
331 429
@@ -376,33 +474,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
376} 474}
377 475
378/** 476/**
379 * xcan_start_xmit - Starts the transmission 477 * xcan_write_frame - Write a frame to HW
380 * @skb: sk_buff pointer that contains data to be Txed 478 * @skb: sk_buff pointer that contains data to be Txed
381 * @ndev: Pointer to net_device structure 479 * @frame_offset: Register offset to write the frame to
382 *
383 * This function is invoked from upper layers to initiate transmission. This
384 * function uses the next available free txbuff and populates their fields to
385 * start the transmission.
386 *
387 * Return: 0 on success and failure value on error
388 */ 480 */
389static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 481static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
482 int frame_offset)
390{ 483{
391 struct xcan_priv *priv = netdev_priv(ndev);
392 struct net_device_stats *stats = &ndev->stats;
393 struct can_frame *cf = (struct can_frame *)skb->data;
394 u32 id, dlc, data[2] = {0, 0}; 484 u32 id, dlc, data[2] = {0, 0};
395 485 struct can_frame *cf = (struct can_frame *)skb->data;
396 if (can_dropped_invalid_skb(ndev, skb))
397 return NETDEV_TX_OK;
398
399 /* Check if the TX buffer is full */
400 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
401 XCAN_SR_TXFLL_MASK)) {
402 netif_stop_queue(ndev);
403 netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
404 return NETDEV_TX_BUSY;
405 }
406 486
407 /* Watch carefully on the bit sequence */ 487 /* Watch carefully on the bit sequence */
408 if (cf->can_id & CAN_EFF_FLAG) { 488 if (cf->can_id & CAN_EFF_FLAG) {
@@ -438,26 +518,119 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
438 if (cf->can_dlc > 4) 518 if (cf->can_dlc > 4)
439 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 519 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
440 520
441 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 521 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
442 priv->tx_head++; 522 /* If the CAN frame is RTR frame this write triggers transmission
443 523 * (not on CAN FD)
444 /* Write the Frame to Xilinx CAN TX FIFO */ 524 */
445 priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id); 525 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
446 /* If the CAN frame is RTR frame this write triggers tranmission */
447 priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
448 if (!(cf->can_id & CAN_RTR_FLAG)) { 526 if (!(cf->can_id & CAN_RTR_FLAG)) {
449 priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]); 527 priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
528 data[0]);
450 /* If the CAN frame is Standard/Extended frame this 529 /* If the CAN frame is Standard/Extended frame this
451 * write triggers tranmission 530 * write triggers transmission (not on CAN FD)
452 */ 531 */
453 priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]); 532 priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
454 stats->tx_bytes += cf->can_dlc; 533 data[1]);
455 } 534 }
535}
536
537/**
538 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
539 *
540 * Return: 0 on success, -ENOSPC if FIFO is full.
541 */
542static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
543{
544 struct xcan_priv *priv = netdev_priv(ndev);
545 unsigned long flags;
546
547 /* Check if the TX buffer is full */
548 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
549 XCAN_SR_TXFLL_MASK))
550 return -ENOSPC;
551
552 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
553
554 spin_lock_irqsave(&priv->tx_lock, flags);
555
556 priv->tx_head++;
557
558 xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
559
560 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
561 if (priv->tx_max > 1)
562 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
456 563
457 /* Check if the TX buffer is full */ 564 /* Check if the TX buffer is full */
458 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 565 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
459 netif_stop_queue(ndev); 566 netif_stop_queue(ndev);
460 567
568 spin_unlock_irqrestore(&priv->tx_lock, flags);
569
570 return 0;
571}
572
573/**
574 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
575 *
576 * Return: 0 on success, -ENOSPC if there is no space
577 */
578static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
579{
580 struct xcan_priv *priv = netdev_priv(ndev);
581 unsigned long flags;
582
583 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
584 BIT(XCAN_TX_MAILBOX_IDX)))
585 return -ENOSPC;
586
587 can_put_echo_skb(skb, ndev, 0);
588
589 spin_lock_irqsave(&priv->tx_lock, flags);
590
591 priv->tx_head++;
592
593 xcan_write_frame(priv, skb,
594 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
595
596 /* Mark buffer as ready for transmit */
597 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
598
599 netif_stop_queue(ndev);
600
601 spin_unlock_irqrestore(&priv->tx_lock, flags);
602
603 return 0;
604}
605
606/**
607 * xcan_start_xmit - Starts the transmission
608 * @skb: sk_buff pointer that contains data to be Txed
609 * @ndev: Pointer to net_device structure
610 *
611 * This function is invoked from upper layers to initiate transmission.
612 *
613 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
614 */
615static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
616{
617 struct xcan_priv *priv = netdev_priv(ndev);
618 int ret;
619
620 if (can_dropped_invalid_skb(ndev, skb))
621 return NETDEV_TX_OK;
622
623 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
624 ret = xcan_start_xmit_mailbox(skb, ndev);
625 else
626 ret = xcan_start_xmit_fifo(skb, ndev);
627
628 if (ret < 0) {
629 netdev_err(ndev, "BUG!, TX full when queue awake!\n");
630 netif_stop_queue(ndev);
631 return NETDEV_TX_BUSY;
632 }
633
461 return NETDEV_TX_OK; 634 return NETDEV_TX_OK;
462} 635}
463 636
@@ -465,13 +638,14 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
465 * xcan_rx - Is called from CAN isr to complete the received 638 * xcan_rx - Is called from CAN isr to complete the received
466 * frame processing 639 * frame processing
467 * @ndev: Pointer to net_device structure 640 * @ndev: Pointer to net_device structure
641 * @frame_base: Register offset to the frame to be read
468 * 642 *
469 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 643 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
470 * does minimal processing and invokes "netif_receive_skb" to complete further 644 * does minimal processing and invokes "netif_receive_skb" to complete further
471 * processing. 645 * processing.
472 * Return: 1 on success and 0 on failure. 646 * Return: 1 on success and 0 on failure.
473 */ 647 */
474static int xcan_rx(struct net_device *ndev) 648static int xcan_rx(struct net_device *ndev, int frame_base)
475{ 649{
476 struct xcan_priv *priv = netdev_priv(ndev); 650 struct xcan_priv *priv = netdev_priv(ndev);
477 struct net_device_stats *stats = &ndev->stats; 651 struct net_device_stats *stats = &ndev->stats;
@@ -486,9 +660,9 @@ static int xcan_rx(struct net_device *ndev)
486 } 660 }
487 661
488 /* Read a frame from Xilinx zynq CANPS */ 662 /* Read a frame from Xilinx zynq CANPS */
489 id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET); 663 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
490 dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >> 664 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
491 XCAN_DLCR_DLC_SHIFT; 665 XCAN_DLCR_DLC_SHIFT;
492 666
493 /* Change Xilinx CAN data length format to socketCAN data format */ 667 /* Change Xilinx CAN data length format to socketCAN data format */
494 cf->can_dlc = get_can_dlc(dlc); 668 cf->can_dlc = get_can_dlc(dlc);
@@ -511,8 +685,8 @@ static int xcan_rx(struct net_device *ndev)
511 } 685 }
512 686
513 /* DW1/DW2 must always be read to remove message from RXFIFO */ 687 /* DW1/DW2 must always be read to remove message from RXFIFO */
514 data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); 688 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
515 data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); 689 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
516 690
517 if (!(cf->can_id & CAN_RTR_FLAG)) { 691 if (!(cf->can_id & CAN_RTR_FLAG)) {
518 /* Change Xilinx CAN data format to socketCAN data format */ 692 /* Change Xilinx CAN data format to socketCAN data format */
@@ -530,6 +704,103 @@ static int xcan_rx(struct net_device *ndev)
530} 704}
531 705
532/** 706/**
707 * xcan_current_error_state - Get current error state from HW
708 * @ndev: Pointer to net_device structure
709 *
710 * Checks the current CAN error state from the HW. Note that this
711 * only checks for ERROR_PASSIVE and ERROR_WARNING.
712 *
713 * Return:
714 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
715 * otherwise.
716 */
717static enum can_state xcan_current_error_state(struct net_device *ndev)
718{
719 struct xcan_priv *priv = netdev_priv(ndev);
720 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
721
722 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
723 return CAN_STATE_ERROR_PASSIVE;
724 else if (status & XCAN_SR_ERRWRN_MASK)
725 return CAN_STATE_ERROR_WARNING;
726 else
727 return CAN_STATE_ERROR_ACTIVE;
728}
729
730/**
731 * xcan_set_error_state - Set new CAN error state
732 * @ndev: Pointer to net_device structure
733 * @new_state: The new CAN state to be set
734 * @cf: Error frame to be populated or NULL
735 *
736 * Set new CAN error state for the device, updating statistics and
737 * populating the error frame if given.
738 */
739static void xcan_set_error_state(struct net_device *ndev,
740 enum can_state new_state,
741 struct can_frame *cf)
742{
743 struct xcan_priv *priv = netdev_priv(ndev);
744 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
745 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
746 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
747 enum can_state tx_state = txerr >= rxerr ? new_state : 0;
748 enum can_state rx_state = txerr <= rxerr ? new_state : 0;
749
750 /* non-ERROR states are handled elsewhere */
751 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
752 return;
753
754 can_change_state(ndev, cf, tx_state, rx_state);
755
756 if (cf) {
757 cf->data[6] = txerr;
758 cf->data[7] = rxerr;
759 }
760}
761
762/**
763 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
764 * @ndev: Pointer to net_device structure
765 *
766 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
767 * the performed RX/TX has caused it to drop to a lesser state and set
768 * the interface state accordingly.
769 */
770static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
771{
772 struct xcan_priv *priv = netdev_priv(ndev);
773 enum can_state old_state = priv->can.state;
774 enum can_state new_state;
775
776 /* changing error state due to successful frame RX/TX can only
777 * occur from these states
778 */
779 if (old_state != CAN_STATE_ERROR_WARNING &&
780 old_state != CAN_STATE_ERROR_PASSIVE)
781 return;
782
783 new_state = xcan_current_error_state(ndev);
784
785 if (new_state != old_state) {
786 struct sk_buff *skb;
787 struct can_frame *cf;
788
789 skb = alloc_can_err_skb(ndev, &cf);
790
791 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
792
793 if (skb) {
794 struct net_device_stats *stats = &ndev->stats;
795
796 stats->rx_packets++;
797 stats->rx_bytes += cf->can_dlc;
798 netif_rx(skb);
799 }
800 }
801}
802
803/**
533 * xcan_err_interrupt - error frame Isr 804 * xcan_err_interrupt - error frame Isr
534 * @ndev: net_device pointer 805 * @ndev: net_device pointer
535 * @isr: interrupt status register value 806 * @isr: interrupt status register value
@@ -544,16 +815,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
544 struct net_device_stats *stats = &ndev->stats; 815 struct net_device_stats *stats = &ndev->stats;
545 struct can_frame *cf; 816 struct can_frame *cf;
546 struct sk_buff *skb; 817 struct sk_buff *skb;
547 u32 err_status, status, txerr = 0, rxerr = 0; 818 u32 err_status;
548 819
549 skb = alloc_can_err_skb(ndev, &cf); 820 skb = alloc_can_err_skb(ndev, &cf);
550 821
551 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 822 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
552 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 823 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
553 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
554 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
555 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
556 status = priv->read_reg(priv, XCAN_SR_OFFSET);
557 824
558 if (isr & XCAN_IXR_BSOFF_MASK) { 825 if (isr & XCAN_IXR_BSOFF_MASK) {
559 priv->can.state = CAN_STATE_BUS_OFF; 826 priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +830,11 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
563 can_bus_off(ndev); 830 can_bus_off(ndev);
564 if (skb) 831 if (skb)
565 cf->can_id |= CAN_ERR_BUSOFF; 832 cf->can_id |= CAN_ERR_BUSOFF;
566 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { 833 } else {
567 priv->can.state = CAN_STATE_ERROR_PASSIVE; 834 enum can_state new_state = xcan_current_error_state(ndev);
568 priv->can.can_stats.error_passive++; 835
569 if (skb) { 836 if (new_state != priv->can.state)
570 cf->can_id |= CAN_ERR_CRTL; 837 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
571 cf->data[1] = (rxerr > 127) ?
572 CAN_ERR_CRTL_RX_PASSIVE :
573 CAN_ERR_CRTL_TX_PASSIVE;
574 cf->data[6] = txerr;
575 cf->data[7] = rxerr;
576 }
577 } else if (status & XCAN_SR_ERRWRN_MASK) {
578 priv->can.state = CAN_STATE_ERROR_WARNING;
579 priv->can.can_stats.error_warning++;
580 if (skb) {
581 cf->can_id |= CAN_ERR_CRTL;
582 cf->data[1] |= (txerr > rxerr) ?
583 CAN_ERR_CRTL_TX_WARNING :
584 CAN_ERR_CRTL_RX_WARNING;
585 cf->data[6] = txerr;
586 cf->data[7] = rxerr;
587 }
588 } 838 }
589 839
590 /* Check for Arbitration lost interrupt */ 840 /* Check for Arbitration lost interrupt */
@@ -600,13 +850,23 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
600 if (isr & XCAN_IXR_RXOFLW_MASK) { 850 if (isr & XCAN_IXR_RXOFLW_MASK) {
601 stats->rx_over_errors++; 851 stats->rx_over_errors++;
602 stats->rx_errors++; 852 stats->rx_errors++;
603 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
604 if (skb) { 853 if (skb) {
605 cf->can_id |= CAN_ERR_CRTL; 854 cf->can_id |= CAN_ERR_CRTL;
606 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 855 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
607 } 856 }
608 } 857 }
609 858
859 /* Check for RX Match Not Finished interrupt */
860 if (isr & XCAN_IXR_RXMNF_MASK) {
861 stats->rx_dropped++;
862 stats->rx_errors++;
863 netdev_err(ndev, "RX match not finished, frame discarded\n");
864 if (skb) {
865 cf->can_id |= CAN_ERR_CRTL;
866 cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
867 }
868 }
869
610 /* Check for error interrupt */ 870 /* Check for error interrupt */
611 if (isr & XCAN_IXR_ERROR_MASK) { 871 if (isr & XCAN_IXR_ERROR_MASK) {
612 if (skb) 872 if (skb)
@@ -691,6 +951,44 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
691} 951}
692 952
693/** 953/**
954 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
955 *
956 * Return: Register offset of the next frame in RX FIFO.
957 */
958static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
959{
960 int offset;
961
962 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
963 u32 fsr;
964
965 /* clear RXOK before the is-empty check so that any newly
966 * received frame will reassert it without a race
967 */
968 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
969
970 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
971
972 /* check if RX FIFO is empty */
973 if (!(fsr & XCAN_FSR_FL_MASK))
974 return -ENOENT;
975
976 offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
977
978 } else {
979 /* check if RX FIFO is empty */
980 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
981 XCAN_IXR_RXNEMP_MASK))
982 return -ENOENT;
983
984 /* frames are read from a static offset */
985 offset = XCAN_RXFIFO_OFFSET;
986 }
987
988 return offset;
989}
990
991/**
694 * xcan_rx_poll - Poll routine for rx packets (NAPI) 992 * xcan_rx_poll - Poll routine for rx packets (NAPI)
695 * @napi: napi structure pointer 993 * @napi: napi structure pointer
696 * @quota: Max number of rx packets to be processed. 994 * @quota: Max number of rx packets to be processed.
@@ -704,31 +1002,35 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
704{ 1002{
705 struct net_device *ndev = napi->dev; 1003 struct net_device *ndev = napi->dev;
706 struct xcan_priv *priv = netdev_priv(ndev); 1004 struct xcan_priv *priv = netdev_priv(ndev);
707 u32 isr, ier; 1005 u32 ier;
708 int work_done = 0; 1006 int work_done = 0;
709 1007 int frame_offset;
710 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1008
711 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { 1009 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
712 if (isr & XCAN_IXR_RXOK_MASK) { 1010 (work_done < quota)) {
713 priv->write_reg(priv, XCAN_ICR_OFFSET, 1011 work_done += xcan_rx(ndev, frame_offset);
714 XCAN_IXR_RXOK_MASK); 1012
715 work_done += xcan_rx(ndev); 1013 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
716 } else { 1014 /* increment read index */
1015 priv->write_reg(priv, XCAN_FSR_OFFSET,
1016 XCAN_FSR_IRI_MASK);
1017 else
1018 /* clear rx-not-empty (will actually clear only if
1019 * empty)
1020 */
717 priv->write_reg(priv, XCAN_ICR_OFFSET, 1021 priv->write_reg(priv, XCAN_ICR_OFFSET,
718 XCAN_IXR_RXNEMP_MASK); 1022 XCAN_IXR_RXNEMP_MASK);
719 break;
720 }
721 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
722 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
723 } 1023 }
724 1024
725 if (work_done) 1025 if (work_done) {
726 can_led_event(ndev, CAN_LED_EVENT_RX); 1026 can_led_event(ndev, CAN_LED_EVENT_RX);
1027 xcan_update_error_state_after_rxtx(ndev);
1028 }
727 1029
728 if (work_done < quota) { 1030 if (work_done < quota) {
729 napi_complete_done(napi, work_done); 1031 napi_complete_done(napi, work_done);
730 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1032 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
731 ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); 1033 ier |= xcan_rx_int_mask(priv);
732 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1034 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
733 } 1035 }
734 return work_done; 1036 return work_done;
@@ -743,18 +1045,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
743{ 1045{
744 struct xcan_priv *priv = netdev_priv(ndev); 1046 struct xcan_priv *priv = netdev_priv(ndev);
745 struct net_device_stats *stats = &ndev->stats; 1047 struct net_device_stats *stats = &ndev->stats;
1048 unsigned int frames_in_fifo;
1049 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1050 unsigned long flags;
1051 int retries = 0;
1052
1053 /* Synchronize with xmit as we need to know the exact number
1054 * of frames in the FIFO to stay in sync due to the TXFEMP
1055 * handling.
1056 * This also prevents a race between netif_wake_queue() and
1057 * netif_stop_queue().
1058 */
1059 spin_lock_irqsave(&priv->tx_lock, flags);
1060
1061 frames_in_fifo = priv->tx_head - priv->tx_tail;
746 1062
747 while ((priv->tx_head - priv->tx_tail > 0) && 1063 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
748 (isr & XCAN_IXR_TXOK_MASK)) { 1064 /* clear TXOK anyway to avoid getting back here */
749 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1065 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
750 can_get_echo_skb(ndev, priv->tx_tail % 1066 spin_unlock_irqrestore(&priv->tx_lock, flags);
751 priv->tx_max); 1067 return;
1068 }
1069
1070 /* Check if 2 frames were sent (TXOK only means that at least 1
1071 * frame was sent).
1072 */
1073 if (frames_in_fifo > 1) {
1074 WARN_ON(frames_in_fifo > priv->tx_max);
1075
1076 /* Synchronize TXOK and isr so that after the loop:
1077 * (1) isr variable is up-to-date at least up to TXOK clear
1078 * time. This avoids us clearing a TXOK of a second frame
1079 * but not noticing that the FIFO is now empty and thus
1080 * marking only a single frame as sent.
1081 * (2) No TXOK is left. Having one could mean leaving a
1082 * stray TXOK as we might process the associated frame
1083 * via TXFEMP handling as we read TXFEMP *after* TXOK
1084 * clear to satisfy (1).
1085 */
1086 while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
1087 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1088 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1089 }
1090
1091 if (isr & XCAN_IXR_TXFEMP_MASK) {
1092 /* nothing in FIFO anymore */
1093 frames_sent = frames_in_fifo;
1094 }
1095 } else {
1096 /* single frame in fifo, just clear TXOK */
1097 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1098 }
1099
1100 while (frames_sent--) {
1101 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1102 priv->tx_max);
752 priv->tx_tail++; 1103 priv->tx_tail++;
753 stats->tx_packets++; 1104 stats->tx_packets++;
754 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
755 } 1105 }
756 can_led_event(ndev, CAN_LED_EVENT_TX); 1106
757 netif_wake_queue(ndev); 1107 netif_wake_queue(ndev);
1108
1109 spin_unlock_irqrestore(&priv->tx_lock, flags);
1110
1111 can_led_event(ndev, CAN_LED_EVENT_TX);
1112 xcan_update_error_state_after_rxtx(ndev);
758} 1113}
759 1114
760/** 1115/**
@@ -773,6 +1128,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
773 struct net_device *ndev = (struct net_device *)dev_id; 1128 struct net_device *ndev = (struct net_device *)dev_id;
774 struct xcan_priv *priv = netdev_priv(ndev); 1129 struct xcan_priv *priv = netdev_priv(ndev);
775 u32 isr, ier; 1130 u32 isr, ier;
1131 u32 isr_errors;
1132 u32 rx_int_mask = xcan_rx_int_mask(priv);
776 1133
777 /* Get the interrupt status from Xilinx CAN */ 1134 /* Get the interrupt status from Xilinx CAN */
778 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1135 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +1148,18 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
791 xcan_tx_interrupt(ndev, isr); 1148 xcan_tx_interrupt(ndev, isr);
792 1149
793 /* Check for the type of error interrupt and Processing it */ 1150 /* Check for the type of error interrupt and Processing it */
794 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1151 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
795 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { 1152 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
796 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | 1153 XCAN_IXR_RXMNF_MASK);
797 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | 1154 if (isr_errors) {
798 XCAN_IXR_ARBLST_MASK)); 1155 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
799 xcan_err_interrupt(ndev, isr); 1156 xcan_err_interrupt(ndev, isr);
800 } 1157 }
801 1158
802 /* Check for the type of receive interrupt and Processing it */ 1159 /* Check for the type of receive interrupt and Processing it */
803 if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { 1160 if (isr & rx_int_mask) {
804 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1161 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
805 ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); 1162 ier &= ~rx_int_mask;
806 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1163 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
807 napi_schedule(&priv->napi); 1164 napi_schedule(&priv->napi);
808 } 1165 }
@@ -819,13 +1176,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
819static void xcan_chip_stop(struct net_device *ndev) 1176static void xcan_chip_stop(struct net_device *ndev)
820{ 1177{
821 struct xcan_priv *priv = netdev_priv(ndev); 1178 struct xcan_priv *priv = netdev_priv(ndev);
822 u32 ier;
823 1179
824 /* Disable interrupts and leave the can in configuration mode */ 1180 /* Disable interrupts and leave the can in configuration mode */
825 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1181 set_reset_mode(ndev);
826 ier &= ~XCAN_INTR_ALL;
827 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
828 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
829 priv->can.state = CAN_STATE_STOPPED; 1182 priv->can.state = CAN_STATE_STOPPED;
830} 1183}
831 1184
@@ -958,10 +1311,15 @@ static const struct net_device_ops xcan_netdev_ops = {
958 */ 1311 */
959static int __maybe_unused xcan_suspend(struct device *dev) 1312static int __maybe_unused xcan_suspend(struct device *dev)
960{ 1313{
961 if (!device_may_wakeup(dev)) 1314 struct net_device *ndev = dev_get_drvdata(dev);
962 return pm_runtime_force_suspend(dev);
963 1315
964 return 0; 1316 if (netif_running(ndev)) {
1317 netif_stop_queue(ndev);
1318 netif_device_detach(ndev);
1319 xcan_chip_stop(ndev);
1320 }
1321
1322 return pm_runtime_force_suspend(dev);
965} 1323}
966 1324
967/** 1325/**
@@ -973,11 +1331,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
973 */ 1331 */
974static int __maybe_unused xcan_resume(struct device *dev) 1332static int __maybe_unused xcan_resume(struct device *dev)
975{ 1333{
976 if (!device_may_wakeup(dev)) 1334 struct net_device *ndev = dev_get_drvdata(dev);
977 return pm_runtime_force_resume(dev); 1335 int ret;
978 1336
979 return 0; 1337 ret = pm_runtime_force_resume(dev);
1338 if (ret) {
1339 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1340 return ret;
1341 }
1342
1343 if (netif_running(ndev)) {
1344 ret = xcan_chip_start(ndev);
1345 if (ret) {
1346 dev_err(dev, "xcan_chip_start failed on resume\n");
1347 return ret;
1348 }
1349
1350 netif_device_attach(ndev);
1351 netif_start_queue(ndev);
1352 }
980 1353
1354 return 0;
981} 1355}
982 1356
983/** 1357/**
@@ -992,14 +1366,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
992 struct net_device *ndev = dev_get_drvdata(dev); 1366 struct net_device *ndev = dev_get_drvdata(dev);
993 struct xcan_priv *priv = netdev_priv(ndev); 1367 struct xcan_priv *priv = netdev_priv(ndev);
994 1368
995 if (netif_running(ndev)) {
996 netif_stop_queue(ndev);
997 netif_device_detach(ndev);
998 }
999
1000 priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
1001 priv->can.state = CAN_STATE_SLEEPING;
1002
1003 clk_disable_unprepare(priv->bus_clk); 1369 clk_disable_unprepare(priv->bus_clk);
1004 clk_disable_unprepare(priv->can_clk); 1370 clk_disable_unprepare(priv->can_clk);
1005 1371
@@ -1018,7 +1384,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1018 struct net_device *ndev = dev_get_drvdata(dev); 1384 struct net_device *ndev = dev_get_drvdata(dev);
1019 struct xcan_priv *priv = netdev_priv(ndev); 1385 struct xcan_priv *priv = netdev_priv(ndev);
1020 int ret; 1386 int ret;
1021 u32 isr, status;
1022 1387
1023 ret = clk_prepare_enable(priv->bus_clk); 1388 ret = clk_prepare_enable(priv->bus_clk);
1024 if (ret) { 1389 if (ret) {
@@ -1032,27 +1397,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1032 return ret; 1397 return ret;
1033 } 1398 }
1034 1399
1035 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1036 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1037 status = priv->read_reg(priv, XCAN_SR_OFFSET);
1038
1039 if (netif_running(ndev)) {
1040 if (isr & XCAN_IXR_BSOFF_MASK) {
1041 priv->can.state = CAN_STATE_BUS_OFF;
1042 priv->write_reg(priv, XCAN_SRR_OFFSET,
1043 XCAN_SRR_RESET_MASK);
1044 } else if ((status & XCAN_SR_ESTAT_MASK) ==
1045 XCAN_SR_ESTAT_MASK) {
1046 priv->can.state = CAN_STATE_ERROR_PASSIVE;
1047 } else if (status & XCAN_SR_ERRWRN_MASK) {
1048 priv->can.state = CAN_STATE_ERROR_WARNING;
1049 } else {
1050 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1051 }
1052 netif_device_attach(ndev);
1053 netif_start_queue(ndev);
1054 }
1055
1056 return 0; 1400 return 0;
1057} 1401}
1058 1402
@@ -1061,6 +1405,40 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
1061 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1405 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1062}; 1406};
1063 1407
1408static const struct xcan_devtype_data xcan_zynq_data = {
1409 .bittiming_const = &xcan_bittiming_const,
1410 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1411 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1412 .bus_clk_name = "pclk",
1413};
1414
1415static const struct xcan_devtype_data xcan_axi_data = {
1416 .bittiming_const = &xcan_bittiming_const,
1417 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1418 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1419 .bus_clk_name = "s_axi_aclk",
1420};
1421
1422static const struct xcan_devtype_data xcan_canfd_data = {
1423 .flags = XCAN_FLAG_EXT_FILTERS |
1424 XCAN_FLAG_RXMNF |
1425 XCAN_FLAG_TX_MAILBOXES |
1426 XCAN_FLAG_RX_FIFO_MULTI,
1427 .bittiming_const = &xcan_bittiming_const,
1428 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1429 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1430 .bus_clk_name = "s_axi_aclk",
1431};
1432
1433/* Match table for OF platform binding */
1434static const struct of_device_id xcan_of_match[] = {
1435 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1436 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1437 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1438 { /* end of list */ },
1439};
1440MODULE_DEVICE_TABLE(of, xcan_of_match);
1441
1064/** 1442/**
1065 * xcan_probe - Platform registration call 1443 * xcan_probe - Platform registration call
1066 * @pdev: Handle to the platform device structure 1444 * @pdev: Handle to the platform device structure
@@ -1075,8 +1453,13 @@ static int xcan_probe(struct platform_device *pdev)
1075 struct resource *res; /* IO mem resources */ 1453 struct resource *res; /* IO mem resources */
1076 struct net_device *ndev; 1454 struct net_device *ndev;
1077 struct xcan_priv *priv; 1455 struct xcan_priv *priv;
1456 const struct of_device_id *of_id;
1457 const struct xcan_devtype_data *devtype = &xcan_axi_data;
1078 void __iomem *addr; 1458 void __iomem *addr;
1079 int ret, rx_max, tx_max; 1459 int ret;
1460 int rx_max, tx_max;
1461 int hw_tx_max, hw_rx_max;
1462 const char *hw_tx_max_property;
1080 1463
1081 /* Get the virtual base address for the device */ 1464 /* Get the virtual base address for the device */
1082 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1465 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,13 +1469,54 @@ static int xcan_probe(struct platform_device *pdev)
1086 goto err; 1469 goto err;
1087 } 1470 }
1088 1471
1089 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); 1472 of_id = of_match_device(xcan_of_match, &pdev->dev);
1090 if (ret < 0) 1473 if (of_id && of_id->data)
1474 devtype = of_id->data;
1475
1476 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1477 "tx-mailbox-count" : "tx-fifo-depth";
1478
1479 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1480 &hw_tx_max);
1481 if (ret < 0) {
1482 dev_err(&pdev->dev, "missing %s property\n",
1483 hw_tx_max_property);
1091 goto err; 1484 goto err;
1485 }
1092 1486
1093 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max); 1487 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1094 if (ret < 0) 1488 &hw_rx_max);
1489 if (ret < 0) {
1490 dev_err(&pdev->dev,
1491 "missing rx-fifo-depth property (mailbox mode is not supported)\n");
1095 goto err; 1492 goto err;
1493 }
1494
1495 /* With TX FIFO:
1496 *
1497 * There is no way to directly figure out how many frames have been
1498 * sent when the TXOK interrupt is processed. If TXFEMP
1499 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1500 * to determine if 1 or 2 frames have been sent.
1501 * Theoretically we should be able to use TXFWMEMP to determine up
1502 * to 3 frames, but it seems that after putting a second frame in the
1503 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1504 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1505 * sent), which is not a sensible state - possibly TXFWMEMP is not
1506 * completely synchronized with the rest of the bits?
1507 *
1508 * With TX mailboxes:
1509 *
1510 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1511 * we submit frames one at a time.
1512 */
1513 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1514 (devtype->flags & XCAN_FLAG_TXFEMP))
1515 tx_max = min(hw_tx_max, 2);
1516 else
1517 tx_max = 1;
1518
1519 rx_max = hw_rx_max;
1096 1520
1097 /* Create a CAN device instance */ 1521 /* Create a CAN device instance */
1098 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1522 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
@@ -1101,13 +1525,15 @@ static int xcan_probe(struct platform_device *pdev)
1101 1525
1102 priv = netdev_priv(ndev); 1526 priv = netdev_priv(ndev);
1103 priv->dev = &pdev->dev; 1527 priv->dev = &pdev->dev;
1104 priv->can.bittiming_const = &xcan_bittiming_const; 1528 priv->can.bittiming_const = devtype->bittiming_const;
1105 priv->can.do_set_mode = xcan_do_set_mode; 1529 priv->can.do_set_mode = xcan_do_set_mode;
1106 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1530 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1107 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1531 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1108 CAN_CTRLMODE_BERR_REPORTING; 1532 CAN_CTRLMODE_BERR_REPORTING;
1109 priv->reg_base = addr; 1533 priv->reg_base = addr;
1110 priv->tx_max = tx_max; 1534 priv->tx_max = tx_max;
1535 priv->devtype = *devtype;
1536 spin_lock_init(&priv->tx_lock);
1111 1537
1112 /* Get IRQ for the device */ 1538 /* Get IRQ for the device */
1113 ndev->irq = platform_get_irq(pdev, 0); 1539 ndev->irq = platform_get_irq(pdev, 0);
@@ -1124,22 +1550,12 @@ static int xcan_probe(struct platform_device *pdev)
1124 ret = PTR_ERR(priv->can_clk); 1550 ret = PTR_ERR(priv->can_clk);
1125 goto err_free; 1551 goto err_free;
1126 } 1552 }
1127 /* Check for type of CAN device */ 1553
1128 if (of_device_is_compatible(pdev->dev.of_node, 1554 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1129 "xlnx,zynq-can-1.0")) { 1555 if (IS_ERR(priv->bus_clk)) {
1130 priv->bus_clk = devm_clk_get(&pdev->dev, "pclk"); 1556 dev_err(&pdev->dev, "bus clock not found\n");
1131 if (IS_ERR(priv->bus_clk)) { 1557 ret = PTR_ERR(priv->bus_clk);
1132 dev_err(&pdev->dev, "bus clock not found\n"); 1558 goto err_free;
1133 ret = PTR_ERR(priv->bus_clk);
1134 goto err_free;
1135 }
1136 } else {
1137 priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1138 if (IS_ERR(priv->bus_clk)) {
1139 dev_err(&pdev->dev, "bus clock not found\n");
1140 ret = PTR_ERR(priv->bus_clk);
1141 goto err_free;
1142 }
1143 } 1559 }
1144 1560
1145 priv->write_reg = xcan_write_reg_le; 1561 priv->write_reg = xcan_write_reg_le;
@@ -1172,9 +1588,9 @@ static int xcan_probe(struct platform_device *pdev)
1172 1588
1173 pm_runtime_put(&pdev->dev); 1589 pm_runtime_put(&pdev->dev);
1174 1590
1175 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", 1591 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1176 priv->reg_base, ndev->irq, priv->can.clock.freq, 1592 priv->reg_base, ndev->irq, priv->can.clock.freq,
1177 priv->tx_max); 1593 hw_tx_max, priv->tx_max);
1178 1594
1179 return 0; 1595 return 0;
1180 1596
@@ -1208,14 +1624,6 @@ static int xcan_remove(struct platform_device *pdev)
1208 return 0; 1624 return 0;
1209} 1625}
1210 1626
1211/* Match table for OF platform binding */
1212static const struct of_device_id xcan_of_match[] = {
1213 { .compatible = "xlnx,zynq-can-1.0", },
1214 { .compatible = "xlnx,axi-can-1.00.a", },
1215 { /* end of list */ },
1216};
1217MODULE_DEVICE_TABLE(of, xcan_of_match);
1218
1219static struct platform_driver xcan_driver = { 1627static struct platform_driver xcan_driver = {
1220 .probe = xcan_probe, 1628 .probe = xcan_probe,
1221 .remove = xcan_remove, 1629 .remove = xcan_remove,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2b81b97e994f..71bb3aebded4 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -5,7 +5,7 @@ source "drivers/net/dsa/b53/Kconfig"
5 5
6config NET_DSA_BCM_SF2 6config NET_DSA_BCM_SF2
7 tristate "Broadcom Starfighter 2 Ethernet switch support" 7 tristate "Broadcom Starfighter 2 Ethernet switch support"
8 depends on HAS_IOMEM && NET_DSA && OF_MDIO 8 depends on HAS_IOMEM && NET_DSA
9 select NET_DSA_TAG_BRCM 9 select NET_DSA_TAG_BRCM
10 select FIXED_PHY 10 select FIXED_PHY
11 select BCM7XXX_PHY 11 select BCM7XXX_PHY
@@ -23,6 +23,14 @@ config NET_DSA_LOOP
23 This enables support for a fake mock-up switch chip which 23 This enables support for a fake mock-up switch chip which
24 exercises the DSA APIs. 24 exercises the DSA APIs.
25 25
26config NET_DSA_LANTIQ_GSWIP
27 tristate "Lantiq / Intel GSWIP"
28 depends on HAS_IOMEM && NET_DSA
29 select NET_DSA_TAG_GSWIP
30 ---help---
31 This enables support for the Lantiq / Intel GSWIP 2.1 found in
32 the xrx200 / VR9 SoC.
33
26config NET_DSA_MT7530 34config NET_DSA_MT7530
27 tristate "Mediatek MT7530 Ethernet switch support" 35 tristate "Mediatek MT7530 Ethernet switch support"
28 depends on NET_DSA 36 depends on NET_DSA
@@ -52,6 +60,17 @@ config NET_DSA_QCA8K
52 This enables support for the Qualcomm Atheros QCA8K Ethernet 60 This enables support for the Qualcomm Atheros QCA8K Ethernet
53 switch chips. 61 switch chips.
54 62
63config NET_DSA_REALTEK_SMI
64 tristate "Realtek SMI Ethernet switch family support"
65 depends on NET_DSA
66 select FIXED_PHY
67 select IRQ_DOMAIN
68 select REALTEK_PHY
69 select REGMAP
70 ---help---
71 This enables support for the Realtek SMI-based switch
72 chips, currently only RTL8366RB.
73
55config NET_DSA_SMSC_LAN9303 74config NET_DSA_SMSC_LAN9303
56 tristate 75 tristate
57 select NET_DSA_TAG_LAN9303 76 select NET_DSA_TAG_LAN9303
@@ -76,4 +95,15 @@ config NET_DSA_SMSC_LAN9303_MDIO
76 Enable access functions if the SMSC/Microchip LAN9303 is configured 95 Enable access functions if the SMSC/Microchip LAN9303 is configured
77 for MDIO managed mode. 96 for MDIO managed mode.
78 97
98config NET_DSA_VITESSE_VSC73XX
99 tristate "Vitesse VSC7385/7388/7395/7398 support"
100 depends on OF && SPI
101 depends on NET_DSA
102 select FIXED_PHY
103 select VITESSE_PHY
104 select GPIOLIB
105 ---help---
106 This enables support for the Vitesse VSC7385, VSC7388,
107 VSC7395 and VSC7398 SparX integrated ethernet switches.
108
79endmenu 109endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 15c2a831edf1..82e5d794c41f 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -5,12 +5,16 @@ obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
5ifdef CONFIG_NET_DSA_LOOP 5ifdef CONFIG_NET_DSA_LOOP
6obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o 6obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
7endif 7endif
8obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
8obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o 9obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
9obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o 10obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
10obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o 11obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
12obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o
13realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o
11obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o 14obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
12obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o 15obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
13obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o 16obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
17obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
14obj-y += b53/ 18obj-y += b53/
15obj-y += microchip/ 19obj-y += microchip/
16obj-y += mv88e6xxx/ 20obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index 2f988216dab9..d32469283f97 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -23,6 +23,7 @@ config B53_MDIO_DRIVER
23config B53_MMAP_DRIVER 23config B53_MMAP_DRIVER
24 tristate "B53 MMAP connected switch driver" 24 tristate "B53 MMAP connected switch driver"
25 depends on B53 && HAS_IOMEM 25 depends on B53 && HAS_IOMEM
26 default BCM63XX || BMIPS_GENERIC
26 help 27 help
27 Select to enable support for memory-mapped switches like the BCM63XX 28 Select to enable support for memory-mapped switches like the BCM63XX
28 integrated switches. 29 integrated switches.
@@ -30,6 +31,15 @@ config B53_MMAP_DRIVER
30config B53_SRAB_DRIVER 31config B53_SRAB_DRIVER
31 tristate "B53 SRAB connected switch driver" 32 tristate "B53 SRAB connected switch driver"
32 depends on B53 && HAS_IOMEM 33 depends on B53 && HAS_IOMEM
34 depends on B53_SERDES || !B53_SERDES
35 default ARCH_BCM_IPROC
33 help 36 help
34 Select to enable support for memory-mapped Switch Register Access 37 Select to enable support for memory-mapped Switch Register Access
35 Bridge Registers (SRAB) like it is found on the BCM53010 38 Bridge Registers (SRAB) like it is found on the BCM53010
39
40config B53_SERDES
41 tristate "B53 SerDes support"
42 depends on B53
43 default ARCH_BCM_NSP
44 help
45 Select to enable support for SerDes on e.g: Northstar Plus SoCs.
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
index 4256fb42a4dd..b1be13023ae4 100644
--- a/drivers/net/dsa/b53/Makefile
+++ b/drivers/net/dsa/b53/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
5obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o 5obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
6obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o 6obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
7obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o 7obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
8obj-$(CONFIG_B53_SERDES) += b53_serdes.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3da5fca77cbd..0e4bbdcc614f 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -26,6 +26,7 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_data/b53.h> 27#include <linux/platform_data/b53.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/phylink.h>
29#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
30#include <linux/if_bridge.h> 31#include <linux/if_bridge.h>
31#include <net/dsa.h> 32#include <net/dsa.h>
@@ -502,8 +503,14 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
502{ 503{
503 struct b53_device *dev = ds->priv; 504 struct b53_device *dev = ds->priv;
504 unsigned int cpu_port = ds->ports[port].cpu_dp->index; 505 unsigned int cpu_port = ds->ports[port].cpu_dp->index;
506 int ret = 0;
505 u16 pvlan; 507 u16 pvlan;
506 508
509 if (dev->ops->irq_enable)
510 ret = dev->ops->irq_enable(dev, port);
511 if (ret)
512 return ret;
513
507 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 514 /* Clear the Rx and Tx disable bits and set to no spanning tree */
508 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); 515 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
509 516
@@ -536,6 +543,9 @@ void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
536 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg); 543 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
537 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; 544 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
538 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); 545 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
546
547 if (dev->ops->irq_disable)
548 dev->ops->irq_disable(dev, port);
539} 549}
540EXPORT_SYMBOL(b53_disable_port); 550EXPORT_SYMBOL(b53_disable_port);
541 551
@@ -684,7 +694,8 @@ static int b53_switch_reset(struct b53_device *dev)
684 * still use this driver as a library and need to perform the reset 694 * still use this driver as a library and need to perform the reset
685 * earlier. 695 * earlier.
686 */ 696 */
687 if (dev->chip_id == BCM58XX_DEVICE_ID) { 697 if (dev->chip_id == BCM58XX_DEVICE_ID ||
698 dev->chip_id == BCM583XX_DEVICE_ID) {
688 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg); 699 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
689 reg |= SW_RST | EN_SW_RST | EN_CH_RST; 700 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
690 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); 701 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
@@ -754,6 +765,8 @@ static int b53_reset_switch(struct b53_device *priv)
754 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); 765 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
755 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); 766 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
756 767
768 priv->serdes_lane = B53_INVALID_LANE;
769
757 return b53_switch_reset(priv); 770 return b53_switch_reset(priv);
758} 771}
759 772
@@ -806,16 +819,39 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
806 return B53_MIBS_SIZE; 819 return B53_MIBS_SIZE;
807} 820}
808 821
809void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 822static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
823{
824 /* These ports typically do not have built-in PHYs */
825 switch (port) {
826 case B53_CPU_PORT_25:
827 case 7:
828 case B53_CPU_PORT:
829 return NULL;
830 }
831
832 return mdiobus_get_phy(ds->slave_mii_bus, port);
833}
834
835void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
836 uint8_t *data)
810{ 837{
811 struct b53_device *dev = ds->priv; 838 struct b53_device *dev = ds->priv;
812 const struct b53_mib_desc *mibs = b53_get_mib(dev); 839 const struct b53_mib_desc *mibs = b53_get_mib(dev);
813 unsigned int mib_size = b53_get_mib_size(dev); 840 unsigned int mib_size = b53_get_mib_size(dev);
841 struct phy_device *phydev;
814 unsigned int i; 842 unsigned int i;
815 843
816 for (i = 0; i < mib_size; i++) 844 if (stringset == ETH_SS_STATS) {
817 strlcpy(data + i * ETH_GSTRING_LEN, 845 for (i = 0; i < mib_size; i++)
818 mibs[i].name, ETH_GSTRING_LEN); 846 strlcpy(data + i * ETH_GSTRING_LEN,
847 mibs[i].name, ETH_GSTRING_LEN);
848 } else if (stringset == ETH_SS_PHY_STATS) {
849 phydev = b53_get_phy_device(ds, port);
850 if (!phydev)
851 return;
852
853 phy_ethtool_get_strings(phydev, data);
854 }
819} 855}
820EXPORT_SYMBOL(b53_get_strings); 856EXPORT_SYMBOL(b53_get_strings);
821 857
@@ -852,11 +888,34 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
852} 888}
853EXPORT_SYMBOL(b53_get_ethtool_stats); 889EXPORT_SYMBOL(b53_get_ethtool_stats);
854 890
855int b53_get_sset_count(struct dsa_switch *ds, int port) 891void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
892{
893 struct phy_device *phydev;
894
895 phydev = b53_get_phy_device(ds, port);
896 if (!phydev)
897 return;
898
899 phy_ethtool_get_stats(phydev, NULL, data);
900}
901EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
902
903int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
856{ 904{
857 struct b53_device *dev = ds->priv; 905 struct b53_device *dev = ds->priv;
906 struct phy_device *phydev;
858 907
859 return b53_get_mib_size(dev); 908 if (sset == ETH_SS_STATS) {
909 return b53_get_mib_size(dev);
910 } else if (sset == ETH_SS_PHY_STATS) {
911 phydev = b53_get_phy_device(ds, port);
912 if (!phydev)
913 return 0;
914
915 return phy_ethtool_get_sset_count(phydev);
916 }
917
918 return 0;
860} 919}
861EXPORT_SYMBOL(b53_get_sset_count); 920EXPORT_SYMBOL(b53_get_sset_count);
862 921
@@ -891,33 +950,50 @@ static int b53_setup(struct dsa_switch *ds)
891 return ret; 950 return ret;
892} 951}
893 952
894static void b53_adjust_link(struct dsa_switch *ds, int port, 953static void b53_force_link(struct b53_device *dev, int port, int link)
895 struct phy_device *phydev)
896{ 954{
897 struct b53_device *dev = ds->priv; 955 u8 reg, val, off;
898 struct ethtool_eee *p = &dev->ports[port].eee;
899 u8 rgmii_ctrl = 0, reg = 0, off;
900
901 if (!phy_is_pseudo_fixed_link(phydev))
902 return;
903 956
904 /* Override the port settings */ 957 /* Override the port settings */
905 if (port == dev->cpu_port) { 958 if (port == dev->cpu_port) {
906 off = B53_PORT_OVERRIDE_CTRL; 959 off = B53_PORT_OVERRIDE_CTRL;
907 reg = PORT_OVERRIDE_EN; 960 val = PORT_OVERRIDE_EN;
908 } else { 961 } else {
909 off = B53_GMII_PORT_OVERRIDE_CTRL(port); 962 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
910 reg = GMII_PO_EN; 963 val = GMII_PO_EN;
911 } 964 }
912 965
913 /* Set the link UP */ 966 b53_read8(dev, B53_CTRL_PAGE, off, &reg);
914 if (phydev->link) 967 reg |= val;
968 if (link)
915 reg |= PORT_OVERRIDE_LINK; 969 reg |= PORT_OVERRIDE_LINK;
970 else
971 reg &= ~PORT_OVERRIDE_LINK;
972 b53_write8(dev, B53_CTRL_PAGE, off, reg);
973}
974
975static void b53_force_port_config(struct b53_device *dev, int port,
976 int speed, int duplex, int pause)
977{
978 u8 reg, val, off;
979
980 /* Override the port settings */
981 if (port == dev->cpu_port) {
982 off = B53_PORT_OVERRIDE_CTRL;
983 val = PORT_OVERRIDE_EN;
984 } else {
985 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
986 val = GMII_PO_EN;
987 }
916 988
917 if (phydev->duplex == DUPLEX_FULL) 989 b53_read8(dev, B53_CTRL_PAGE, off, &reg);
990 reg |= val;
991 if (duplex == DUPLEX_FULL)
918 reg |= PORT_OVERRIDE_FULL_DUPLEX; 992 reg |= PORT_OVERRIDE_FULL_DUPLEX;
993 else
994 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
919 995
920 switch (phydev->speed) { 996 switch (speed) {
921 case 2000: 997 case 2000:
922 reg |= PORT_OVERRIDE_SPEED_2000M; 998 reg |= PORT_OVERRIDE_SPEED_2000M;
923 /* fallthrough */ 999 /* fallthrough */
@@ -931,21 +1007,41 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
931 reg |= PORT_OVERRIDE_SPEED_10M; 1007 reg |= PORT_OVERRIDE_SPEED_10M;
932 break; 1008 break;
933 default: 1009 default:
934 dev_err(ds->dev, "unknown speed: %d\n", phydev->speed); 1010 dev_err(dev->dev, "unknown speed: %d\n", speed);
935 return; 1011 return;
936 } 1012 }
937 1013
1014 if (pause & MLO_PAUSE_RX)
1015 reg |= PORT_OVERRIDE_RX_FLOW;
1016 if (pause & MLO_PAUSE_TX)
1017 reg |= PORT_OVERRIDE_TX_FLOW;
1018
1019 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1020}
1021
1022static void b53_adjust_link(struct dsa_switch *ds, int port,
1023 struct phy_device *phydev)
1024{
1025 struct b53_device *dev = ds->priv;
1026 struct ethtool_eee *p = &dev->ports[port].eee;
1027 u8 rgmii_ctrl = 0, reg = 0, off;
1028 int pause = 0;
1029
1030 if (!phy_is_pseudo_fixed_link(phydev))
1031 return;
1032
938 /* Enable flow control on BCM5301x's CPU port */ 1033 /* Enable flow control on BCM5301x's CPU port */
939 if (is5301x(dev) && port == dev->cpu_port) 1034 if (is5301x(dev) && port == dev->cpu_port)
940 reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW; 1035 pause = MLO_PAUSE_TXRX_MASK;
941 1036
942 if (phydev->pause) { 1037 if (phydev->pause) {
943 if (phydev->asym_pause) 1038 if (phydev->asym_pause)
944 reg |= PORT_OVERRIDE_TX_FLOW; 1039 pause |= MLO_PAUSE_TX;
945 reg |= PORT_OVERRIDE_RX_FLOW; 1040 pause |= MLO_PAUSE_RX;
946 } 1041 }
947 1042
948 b53_write8(dev, B53_CTRL_PAGE, off, reg); 1043 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
1044 b53_force_link(dev, port, phydev->link);
949 1045
950 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { 1046 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
951 if (port == 8) 1047 if (port == 8)
@@ -1005,16 +1101,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
1005 } 1101 }
1006 } else if (is5301x(dev)) { 1102 } else if (is5301x(dev)) {
1007 if (port != dev->cpu_port) { 1103 if (port != dev->cpu_port) {
1008 u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port); 1104 b53_force_port_config(dev, dev->cpu_port, 2000,
1009 u8 gmii_po; 1105 DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
1010 1106 b53_force_link(dev, dev->cpu_port, 1);
1011 b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po);
1012 gmii_po |= GMII_PO_LINK |
1013 GMII_PO_RX_FLOW |
1014 GMII_PO_TX_FLOW |
1015 GMII_PO_EN |
1016 GMII_PO_SPEED_2000M;
1017 b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
1018 } 1107 }
1019 } 1108 }
1020 1109
@@ -1022,6 +1111,148 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
1022 p->eee_enabled = b53_eee_init(ds, port, phydev); 1111 p->eee_enabled = b53_eee_init(ds, port, phydev);
1023} 1112}
1024 1113
1114void b53_port_event(struct dsa_switch *ds, int port)
1115{
1116 struct b53_device *dev = ds->priv;
1117 bool link;
1118 u16 sts;
1119
1120 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1121 link = !!(sts & BIT(port));
1122 dsa_port_phylink_mac_change(ds, port, link);
1123}
1124EXPORT_SYMBOL(b53_port_event);
1125
1126void b53_phylink_validate(struct dsa_switch *ds, int port,
1127 unsigned long *supported,
1128 struct phylink_link_state *state)
1129{
1130 struct b53_device *dev = ds->priv;
1131 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1132
1133 if (dev->ops->serdes_phylink_validate)
1134 dev->ops->serdes_phylink_validate(dev, port, mask, state);
1135
1136 /* Allow all the expected bits */
1137 phylink_set(mask, Autoneg);
1138 phylink_set_port_modes(mask);
1139 phylink_set(mask, Pause);
1140 phylink_set(mask, Asym_Pause);
1141
1142 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1143 * support Gigabit, including Half duplex.
1144 */
1145 if (state->interface != PHY_INTERFACE_MODE_MII &&
1146 state->interface != PHY_INTERFACE_MODE_REVMII &&
1147 !phy_interface_mode_is_8023z(state->interface) &&
1148 !(is5325(dev) || is5365(dev))) {
1149 phylink_set(mask, 1000baseT_Full);
1150 phylink_set(mask, 1000baseT_Half);
1151 }
1152
1153 if (!phy_interface_mode_is_8023z(state->interface)) {
1154 phylink_set(mask, 10baseT_Half);
1155 phylink_set(mask, 10baseT_Full);
1156 phylink_set(mask, 100baseT_Half);
1157 phylink_set(mask, 100baseT_Full);
1158 }
1159
1160 bitmap_and(supported, supported, mask,
1161 __ETHTOOL_LINK_MODE_MASK_NBITS);
1162 bitmap_and(state->advertising, state->advertising, mask,
1163 __ETHTOOL_LINK_MODE_MASK_NBITS);
1164
1165 phylink_helper_basex_speed(state);
1166}
1167EXPORT_SYMBOL(b53_phylink_validate);
1168
1169int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1170 struct phylink_link_state *state)
1171{
1172 struct b53_device *dev = ds->priv;
1173 int ret = -EOPNOTSUPP;
1174
1175 if ((phy_interface_mode_is_8023z(state->interface) ||
1176 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1177 dev->ops->serdes_link_state)
1178 ret = dev->ops->serdes_link_state(dev, port, state);
1179
1180 return ret;
1181}
1182EXPORT_SYMBOL(b53_phylink_mac_link_state);
1183
1184void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1185 unsigned int mode,
1186 const struct phylink_link_state *state)
1187{
1188 struct b53_device *dev = ds->priv;
1189
1190 if (mode == MLO_AN_PHY)
1191 return;
1192
1193 if (mode == MLO_AN_FIXED) {
1194 b53_force_port_config(dev, port, state->speed,
1195 state->duplex, state->pause);
1196 return;
1197 }
1198
1199 if ((phy_interface_mode_is_8023z(state->interface) ||
1200 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1201 dev->ops->serdes_config)
1202 dev->ops->serdes_config(dev, port, mode, state);
1203}
1204EXPORT_SYMBOL(b53_phylink_mac_config);
1205
1206void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1207{
1208 struct b53_device *dev = ds->priv;
1209
1210 if (dev->ops->serdes_an_restart)
1211 dev->ops->serdes_an_restart(dev, port);
1212}
1213EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1214
1215void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1216 unsigned int mode,
1217 phy_interface_t interface)
1218{
1219 struct b53_device *dev = ds->priv;
1220
1221 if (mode == MLO_AN_PHY)
1222 return;
1223
1224 if (mode == MLO_AN_FIXED) {
1225 b53_force_link(dev, port, false);
1226 return;
1227 }
1228
1229 if (phy_interface_mode_is_8023z(interface) &&
1230 dev->ops->serdes_link_set)
1231 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1232}
1233EXPORT_SYMBOL(b53_phylink_mac_link_down);
1234
1235void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1236 unsigned int mode,
1237 phy_interface_t interface,
1238 struct phy_device *phydev)
1239{
1240 struct b53_device *dev = ds->priv;
1241
1242 if (mode == MLO_AN_PHY)
1243 return;
1244
1245 if (mode == MLO_AN_FIXED) {
1246 b53_force_link(dev, port, true);
1247 return;
1248 }
1249
1250 if (phy_interface_mode_is_8023z(interface) &&
1251 dev->ops->serdes_link_set)
1252 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1253}
1254EXPORT_SYMBOL(b53_phylink_mac_link_up);
1255
1025int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) 1256int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1026{ 1257{
1027 return 0; 1258 return 0;
@@ -1060,7 +1291,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
1060 b53_get_vlan_entry(dev, vid, vl); 1291 b53_get_vlan_entry(dev, vid, vl);
1061 1292
1062 vl->members |= BIT(port); 1293 vl->members |= BIT(port);
1063 if (untagged) 1294 if (untagged && !dsa_is_cpu_port(ds, port))
1064 vl->untag |= BIT(port); 1295 vl->untag |= BIT(port);
1065 else 1296 else
1066 vl->untag &= ~BIT(port); 1297 vl->untag &= ~BIT(port);
@@ -1102,7 +1333,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
1102 pvid = 0; 1333 pvid = 0;
1103 } 1334 }
1104 1335
1105 if (untagged) 1336 if (untagged && !dsa_is_cpu_port(ds, port))
1106 vl->untag &= ~(BIT(port)); 1337 vl->untag &= ~(BIT(port));
1107 1338
1108 b53_set_vlan_entry(dev, vid, vl); 1339 b53_set_vlan_entry(dev, vid, vl);
@@ -1477,7 +1708,7 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
1477} 1708}
1478EXPORT_SYMBOL(b53_br_fast_age); 1709EXPORT_SYMBOL(b53_br_fast_age);
1479 1710
1480static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) 1711static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1481{ 1712{
1482 /* Broadcom switches will accept enabling Broadcom tags on the 1713 /* Broadcom switches will accept enabling Broadcom tags on the
1483 * following ports: 5, 7 and 8, any other port is not supported 1714 * following ports: 5, 7 and 8, any other port is not supported
@@ -1489,10 +1720,19 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
1489 return true; 1720 return true;
1490 } 1721 }
1491 1722
1492 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", port);
1493 return false; 1723 return false;
1494} 1724}
1495 1725
1726static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
1727{
1728 bool ret = b53_possible_cpu_port(ds, port);
1729
1730 if (!ret)
1731 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1732 port);
1733 return ret;
1734}
1735
1496enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port) 1736enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
1497{ 1737{
1498 struct b53_device *dev = ds->priv; 1738 struct b53_device *dev = ds->priv;
@@ -1650,9 +1890,16 @@ static const struct dsa_switch_ops b53_switch_ops = {
1650 .get_strings = b53_get_strings, 1890 .get_strings = b53_get_strings,
1651 .get_ethtool_stats = b53_get_ethtool_stats, 1891 .get_ethtool_stats = b53_get_ethtool_stats,
1652 .get_sset_count = b53_get_sset_count, 1892 .get_sset_count = b53_get_sset_count,
1893 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
1653 .phy_read = b53_phy_read16, 1894 .phy_read = b53_phy_read16,
1654 .phy_write = b53_phy_write16, 1895 .phy_write = b53_phy_write16,
1655 .adjust_link = b53_adjust_link, 1896 .adjust_link = b53_adjust_link,
1897 .phylink_validate = b53_phylink_validate,
1898 .phylink_mac_link_state = b53_phylink_mac_link_state,
1899 .phylink_mac_config = b53_phylink_mac_config,
1900 .phylink_mac_an_restart = b53_phylink_mac_an_restart,
1901 .phylink_mac_link_down = b53_phylink_mac_link_down,
1902 .phylink_mac_link_up = b53_phylink_mac_link_up,
1656 .port_enable = b53_enable_port, 1903 .port_enable = b53_enable_port,
1657 .port_disable = b53_disable_port, 1904 .port_disable = b53_disable_port,
1658 .get_mac_eee = b53_get_mac_eee, 1905 .get_mac_eee = b53_get_mac_eee,
@@ -1880,6 +2127,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
1880 .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 2127 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1881 }, 2128 },
1882 { 2129 {
2130 .chip_id = BCM583XX_DEVICE_ID,
2131 .dev_name = "BCM583xx/11360",
2132 .vlans = 4096,
2133 .enabled_ports = 0x103,
2134 .arl_entries = 4,
2135 .cpu_port = B53_CPU_PORT,
2136 .vta_regs = B53_VTA_REGS,
2137 .duplex_reg = B53_DUPLEX_STAT_GE,
2138 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2139 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2140 },
2141 {
1883 .chip_id = BCM7445_DEVICE_ID, 2142 .chip_id = BCM7445_DEVICE_ID,
1884 .dev_name = "BCM7445", 2143 .dev_name = "BCM7445",
1885 .vlans = 4096, 2144 .vlans = 4096,
@@ -1966,14 +2225,23 @@ static int b53_switch_init(struct b53_device *dev)
1966 dev->num_ports = dev->cpu_port + 1; 2225 dev->num_ports = dev->cpu_port + 1;
1967 dev->enabled_ports |= BIT(dev->cpu_port); 2226 dev->enabled_ports |= BIT(dev->cpu_port);
1968 2227
1969 dev->ports = devm_kzalloc(dev->dev, 2228 /* Include non standard CPU port built-in PHYs to be probed */
1970 sizeof(struct b53_port) * dev->num_ports, 2229 if (is539x(dev) || is531x5(dev)) {
2230 for (i = 0; i < dev->num_ports; i++) {
2231 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2232 !b53_possible_cpu_port(dev->ds, i))
2233 dev->ds->phys_mii_mask |= BIT(i);
2234 }
2235 }
2236
2237 dev->ports = devm_kcalloc(dev->dev,
2238 dev->num_ports, sizeof(struct b53_port),
1971 GFP_KERNEL); 2239 GFP_KERNEL);
1972 if (!dev->ports) 2240 if (!dev->ports)
1973 return -ENOMEM; 2241 return -ENOMEM;
1974 2242
1975 dev->vlans = devm_kzalloc(dev->dev, 2243 dev->vlans = devm_kcalloc(dev->dev,
1976 sizeof(struct b53_vlan) * dev->num_vlans, 2244 dev->num_vlans, sizeof(struct b53_vlan),
1977 GFP_KERNEL); 2245 GFP_KERNEL);
1978 if (!dev->vlans) 2246 if (!dev->vlans)
1979 return -ENOMEM; 2247 return -ENOMEM;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 3b57f47d0e79..ec796482792d 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -29,6 +29,7 @@
29 29
30struct b53_device; 30struct b53_device;
31struct net_device; 31struct net_device;
32struct phylink_link_state;
32 33
33struct b53_io_ops { 34struct b53_io_ops {
34 int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); 35 int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
@@ -43,8 +44,25 @@ struct b53_io_ops {
43 int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value); 44 int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
44 int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value); 45 int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
45 int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value); 46 int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
47 int (*irq_enable)(struct b53_device *dev, int port);
48 void (*irq_disable)(struct b53_device *dev, int port);
49 u8 (*serdes_map_lane)(struct b53_device *dev, int port);
50 int (*serdes_link_state)(struct b53_device *dev, int port,
51 struct phylink_link_state *state);
52 void (*serdes_config)(struct b53_device *dev, int port,
53 unsigned int mode,
54 const struct phylink_link_state *state);
55 void (*serdes_an_restart)(struct b53_device *dev, int port);
56 void (*serdes_link_set)(struct b53_device *dev, int port,
57 unsigned int mode, phy_interface_t interface,
58 bool link_up);
59 void (*serdes_phylink_validate)(struct b53_device *dev, int port,
60 unsigned long *supported,
61 struct phylink_link_state *state);
46}; 62};
47 63
64#define B53_INVALID_LANE 0xff
65
48enum { 66enum {
49 BCM5325_DEVICE_ID = 0x25, 67 BCM5325_DEVICE_ID = 0x25,
50 BCM5365_DEVICE_ID = 0x65, 68 BCM5365_DEVICE_ID = 0x65,
@@ -62,6 +80,7 @@ enum {
62 BCM53018_DEVICE_ID = 0x53018, 80 BCM53018_DEVICE_ID = 0x53018,
63 BCM53019_DEVICE_ID = 0x53019, 81 BCM53019_DEVICE_ID = 0x53019,
64 BCM58XX_DEVICE_ID = 0x5800, 82 BCM58XX_DEVICE_ID = 0x5800,
83 BCM583XX_DEVICE_ID = 0x58300,
65 BCM7445_DEVICE_ID = 0x7445, 84 BCM7445_DEVICE_ID = 0x7445,
66 BCM7278_DEVICE_ID = 0x7278, 85 BCM7278_DEVICE_ID = 0x7278,
67}; 86};
@@ -106,6 +125,7 @@ struct b53_device {
106 /* connect specific data */ 125 /* connect specific data */
107 u8 current_page; 126 u8 current_page;
108 struct device *dev; 127 struct device *dev;
128 u8 serdes_lane;
109 129
110 /* Master MDIO bus we got probed from */ 130 /* Master MDIO bus we got probed from */
111 struct mii_bus *bus; 131 struct mii_bus *bus;
@@ -181,6 +201,7 @@ static inline int is5301x(struct b53_device *dev)
181static inline int is58xx(struct b53_device *dev) 201static inline int is58xx(struct b53_device *dev)
182{ 202{
183 return dev->chip_id == BCM58XX_DEVICE_ID || 203 return dev->chip_id == BCM58XX_DEVICE_ID ||
204 dev->chip_id == BCM583XX_DEVICE_ID ||
184 dev->chip_id == BCM7445_DEVICE_ID || 205 dev->chip_id == BCM7445_DEVICE_ID ||
185 dev->chip_id == BCM7278_DEVICE_ID; 206 dev->chip_id == BCM7278_DEVICE_ID;
186} 207}
@@ -287,13 +308,32 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
287/* Exported functions towards other drivers */ 308/* Exported functions towards other drivers */
288void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port); 309void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port);
289int b53_configure_vlan(struct dsa_switch *ds); 310int b53_configure_vlan(struct dsa_switch *ds);
290void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); 311void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
312 uint8_t *data);
291void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); 313void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
292int b53_get_sset_count(struct dsa_switch *ds, int port); 314int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
315void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
293int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); 316int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
294void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); 317void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
295void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); 318void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
296void b53_br_fast_age(struct dsa_switch *ds, int port); 319void b53_br_fast_age(struct dsa_switch *ds, int port);
320void b53_port_event(struct dsa_switch *ds, int port);
321void b53_phylink_validate(struct dsa_switch *ds, int port,
322 unsigned long *supported,
323 struct phylink_link_state *state);
324int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
325 struct phylink_link_state *state);
326void b53_phylink_mac_config(struct dsa_switch *ds, int port,
327 unsigned int mode,
328 const struct phylink_link_state *state);
329void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port);
330void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
331 unsigned int mode,
332 phy_interface_t interface);
333void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
334 unsigned int mode,
335 phy_interface_t interface,
336 struct phy_device *phydev);
297int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); 337int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
298int b53_vlan_prepare(struct dsa_switch *ds, int port, 338int b53_vlan_prepare(struct dsa_switch *ds, int port,
299 const struct switchdev_obj_port_vlan *vlan); 339 const struct switchdev_obj_port_vlan *vlan);
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
new file mode 100644
index 000000000000..629bf14128a2
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -0,0 +1,214 @@
1// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2/*
3 * Northstar Plus switch SerDes/SGMII PHY main logic
4 *
5 * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/delay.h>
11#include <linux/kernel.h>
12#include <linux/phy.h>
13#include <linux/phylink.h>
14#include <net/dsa.h>
15
16#include "b53_priv.h"
17#include "b53_serdes.h"
18#include "b53_regs.h"
19
20static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
21 u16 value)
22{
23 b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
24 b53_write16(dev, B53_SERDES_PAGE, offset, value);
25}
26
27static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block)
28{
29 u16 value;
30
31 b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
32 b53_read16(dev, B53_SERDES_PAGE, offset, &value);
33
34 return value;
35}
36
37static void b53_serdes_set_lane(struct b53_device *dev, u8 lane)
38{
39 if (dev->serdes_lane == lane)
40 return;
41
42 WARN_ON(lane > 1);
43
44 b53_serdes_write_blk(dev, B53_SERDES_LANE,
45 SERDES_XGXSBLK0_BLOCKADDRESS, lane);
46 dev->serdes_lane = lane;
47}
48
49static void b53_serdes_write(struct b53_device *dev, u8 lane,
50 u8 offset, u16 block, u16 value)
51{
52 b53_serdes_set_lane(dev, lane);
53 b53_serdes_write_blk(dev, offset, block, value);
54}
55
56static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
57 u8 offset, u16 block)
58{
59 b53_serdes_set_lane(dev, lane);
60 return b53_serdes_read_blk(dev, offset, block);
61}
62
63void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
64 const struct phylink_link_state *state)
65{
66 u8 lane = b53_serdes_map_lane(dev, port);
67 u16 reg;
68
69 if (lane == B53_INVALID_LANE)
70 return;
71
72 reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
73 SERDES_DIGITAL_BLK);
74 if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
75 reg |= FIBER_MODE_1000X;
76 else
77 reg &= ~FIBER_MODE_1000X;
78 b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
79 SERDES_DIGITAL_BLK, reg);
80}
81EXPORT_SYMBOL(b53_serdes_config);
82
83void b53_serdes_an_restart(struct b53_device *dev, int port)
84{
85 u8 lane = b53_serdes_map_lane(dev, port);
86 u16 reg;
87
88 if (lane == B53_INVALID_LANE)
89 return;
90
91 reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
92 SERDES_MII_BLK);
93 reg |= BMCR_ANRESTART;
94 b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
95 SERDES_MII_BLK, reg);
96}
97EXPORT_SYMBOL(b53_serdes_an_restart);
98
99int b53_serdes_link_state(struct b53_device *dev, int port,
100 struct phylink_link_state *state)
101{
102 u8 lane = b53_serdes_map_lane(dev, port);
103 u16 dig, bmsr;
104
105 if (lane == B53_INVALID_LANE)
106 return 1;
107
108 dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
109 SERDES_DIGITAL_BLK);
110 bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
111 SERDES_MII_BLK);
112
113 switch ((dig >> SPEED_STATUS_SHIFT) & SPEED_STATUS_MASK) {
114 case SPEED_STATUS_10:
115 state->speed = SPEED_10;
116 break;
117 case SPEED_STATUS_100:
118 state->speed = SPEED_100;
119 break;
120 case SPEED_STATUS_1000:
121 state->speed = SPEED_1000;
122 break;
123 default:
124 case SPEED_STATUS_2500:
125 state->speed = SPEED_2500;
126 break;
127 }
128
129 state->duplex = dig & DUPLEX_STATUS ? DUPLEX_FULL : DUPLEX_HALF;
130 state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
131 state->link = !!(dig & LINK_STATUS);
132 if (dig & PAUSE_RESOLUTION_RX_SIDE)
133 state->pause |= MLO_PAUSE_RX;
134 if (dig & PAUSE_RESOLUTION_TX_SIDE)
135 state->pause |= MLO_PAUSE_TX;
136
137 return 0;
138}
139EXPORT_SYMBOL(b53_serdes_link_state);
140
141void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
142 phy_interface_t interface, bool link_up)
143{
144 u8 lane = b53_serdes_map_lane(dev, port);
145 u16 reg;
146
147 if (lane == B53_INVALID_LANE)
148 return;
149
150 reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
151 SERDES_MII_BLK);
152 if (link_up)
153 reg &= ~BMCR_PDOWN;
154 else
155 reg |= BMCR_PDOWN;
156 b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
157 SERDES_MII_BLK, reg);
158}
159EXPORT_SYMBOL(b53_serdes_link_set);
160
161void b53_serdes_phylink_validate(struct b53_device *dev, int port,
162 unsigned long *supported,
163 struct phylink_link_state *state)
164{
165 u8 lane = b53_serdes_map_lane(dev, port);
166
167 if (lane == B53_INVALID_LANE)
168 return;
169
170 switch (lane) {
171 case 0:
172 phylink_set(supported, 2500baseX_Full);
173 /* fallthrough */
174 case 1:
175 phylink_set(supported, 1000baseX_Full);
176 break;
177 default:
178 break;
179 }
180}
181EXPORT_SYMBOL(b53_serdes_phylink_validate);
182
183int b53_serdes_init(struct b53_device *dev, int port)
184{
185 u8 lane = b53_serdes_map_lane(dev, port);
186 u16 id0, msb, lsb;
187
188 if (lane == B53_INVALID_LANE)
189 return -EINVAL;
190
191 id0 = b53_serdes_read(dev, lane, B53_SERDES_ID0, SERDES_ID0);
192 msb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID1),
193 SERDES_MII_BLK);
194 lsb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID2),
195 SERDES_MII_BLK);
196 if (id0 == 0 || id0 == 0xffff) {
197 dev_err(dev->dev, "SerDes not initialized, check settings\n");
198 return -ENODEV;
199 }
200
201 dev_info(dev->dev,
202 "SerDes lane %d, model: %d, rev %c%d (OUI: 0x%08x)\n",
203 lane, id0 & SERDES_ID0_MODEL_MASK,
204 (id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41,
205 (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
206 (u32)msb << 16 | lsb);
207
208 return 0;
209}
210EXPORT_SYMBOL(b53_serdes_init);
211
212MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>");
213MODULE_DESCRIPTION("B53 Switch SerDes driver");
214MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
new file mode 100644
index 000000000000..3bb4f91aec9e
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -0,0 +1,128 @@
1/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 *
3 * Northstar Plus switch SerDes/SGMII PHY definitions
4 *
5 * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
6 */
7
8#include <linux/phy.h>
9#include <linux/types.h>
10
11/* Non-standard page used to access SerDes PHY registers on NorthStar Plus */
12#define B53_SERDES_PAGE 0x16
13#define B53_SERDES_BLKADDR 0x3e
14#define B53_SERDES_LANE 0x3c
15
16#define B53_SERDES_ID0 0x20
17#define SERDES_ID0_MODEL_MASK 0x3f
18#define SERDES_ID0_REV_NUM_SHIFT 11
19#define SERDES_ID0_REV_NUM_MASK 0x7
20#define SERDES_ID0_REV_LETTER_SHIFT 14
21
22#define B53_SERDES_MII_REG(x) (0x20 + (x) * 2)
23#define B53_SERDES_DIGITAL_CONTROL(x) (0x1e + (x) * 2)
24#define B53_SERDES_DIGITAL_STATUS 0x28
25
26/* SERDES_DIGITAL_CONTROL1 */
27#define FIBER_MODE_1000X BIT(0)
28#define TBI_INTERFACE BIT(1)
29#define SIGNAL_DETECT_EN BIT(2)
30#define INVERT_SIGNAL_DETECT BIT(3)
31#define AUTODET_EN BIT(4)
32#define SGMII_MASTER_MODE BIT(5)
33#define DISABLE_DLL_PWRDOWN BIT(6)
34#define CRC_CHECKER_DIS BIT(7)
35#define COMMA_DET_EN BIT(8)
36#define ZERO_COMMA_DET_EN BIT(9)
37#define REMOTE_LOOPBACK BIT(10)
38#define SEL_RX_PKTS_FOR_CNTR BIT(11)
39#define MASTER_MDIO_PHY_SEL BIT(13)
40#define DISABLE_SIGNAL_DETECT_FLT BIT(14)
41
42/* SERDES_DIGITAL_CONTROL2 */
43#define EN_PARALLEL_DET BIT(0)
44#define DIS_FALSE_LINK BIT(1)
45#define FLT_FORCE_LINK BIT(2)
46#define EN_AUTONEG_ERR_TIMER BIT(3)
47#define DIS_REMOTE_FAULT_SENSING BIT(4)
48#define FORCE_XMIT_DATA BIT(5)
49#define AUTONEG_FAST_TIMERS BIT(6)
50#define DIS_CARRIER_EXTEND BIT(7)
51#define DIS_TRRR_GENERATION BIT(8)
52#define BYPASS_PCS_RX BIT(9)
53#define BYPASS_PCS_TX BIT(10)
54#define TEST_CNTR_EN BIT(11)
55#define TX_PACKET_SEQ_TEST BIT(12)
56#define TX_IDLE_JAM_SEQ_TEST BIT(13)
57#define CLR_BER_CNTR BIT(14)
58
59/* SERDES_DIGITAL_CONTROL3 */
60#define TX_FIFO_RST BIT(0)
61#define FIFO_ELAST_TX_RX_SHIFT 1
62#define FIFO_ELAST_TX_RX_5K 0
63#define FIFO_ELAST_TX_RX_10K 1
64#define FIFO_ELAST_TX_RX_13_5K 2
65#define FIFO_ELAST_TX_RX_18_5K 3
66#define BLOCK_TXEN_MODE BIT(9)
67#define JAM_FALSE_CARRIER_MODE BIT(10)
68#define EXT_PHY_CRS_MODE BIT(11)
69#define INVERT_EXT_PHY_CRS BIT(12)
70#define DISABLE_TX_CRS BIT(13)
71
72/* SERDES_DIGITAL_STATUS */
73#define SGMII_MODE BIT(0)
74#define LINK_STATUS BIT(1)
75#define DUPLEX_STATUS BIT(2)
76#define SPEED_STATUS_SHIFT 3
77#define SPEED_STATUS_10 0
78#define SPEED_STATUS_100 1
79#define SPEED_STATUS_1000 2
80#define SPEED_STATUS_2500 3
81#define SPEED_STATUS_MASK SPEED_STATUS_2500
82#define PAUSE_RESOLUTION_TX_SIDE BIT(5)
83#define PAUSE_RESOLUTION_RX_SIDE BIT(6)
84#define LINK_STATUS_CHANGE BIT(7)
85#define EARLY_END_EXT_DET BIT(8)
86#define CARRIER_EXT_ERR_DET BIT(9)
87#define RX_ERR_DET BIT(10)
88#define TX_ERR_DET BIT(11)
89#define CRC_ERR_DET BIT(12)
90#define FALSE_CARRIER_ERR_DET BIT(13)
91#define RXFIFO_ERR_DET BIT(14)
92#define TXFIFO_ERR_DET BIT(15)
93
94/* Block offsets */
95#define SERDES_DIGITAL_BLK 0x8300
96#define SERDES_ID0 0x8310
97#define SERDES_MII_BLK 0xffe0
98#define SERDES_XGXSBLK0_BLOCKADDRESS 0xffd0
99
100struct phylink_link_state;
101
102static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
103{
104 if (!dev->ops->serdes_map_lane)
105 return B53_INVALID_LANE;
106
107 return dev->ops->serdes_map_lane(dev, port);
108}
109
110int b53_serdes_get_link(struct b53_device *dev, int port);
111int b53_serdes_link_state(struct b53_device *dev, int port,
112 struct phylink_link_state *state);
113void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
114 const struct phylink_link_state *state);
115void b53_serdes_an_restart(struct b53_device *dev, int port);
116void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
117 phy_interface_t interface, bool link_up);
118void b53_serdes_phylink_validate(struct b53_device *dev, int port,
119 unsigned long *supported,
120 struct phylink_link_state *state);
121#if IS_ENABLED(CONFIG_B53_SERDES)
122int b53_serdes_init(struct b53_device *dev, int port);
123#else
124static inline int b53_serdes_init(struct b53_device *dev, int port)
125{
126 return -ENODEV;
127}
128#endif
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index c37ffd1b6833..90f514252987 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -19,11 +19,13 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/interrupt.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/platform_data/b53.h> 24#include <linux/platform_data/b53.h>
24#include <linux/of.h> 25#include <linux/of.h>
25 26
26#include "b53_priv.h" 27#include "b53_priv.h"
28#include "b53_serdes.h"
27 29
28/* command and status register of the SRAB */ 30/* command and status register of the SRAB */
29#define B53_SRAB_CMDSTAT 0x2c 31#define B53_SRAB_CMDSTAT 0x2c
@@ -47,6 +49,7 @@
47 49
48/* command and status register of the SRAB */ 50/* command and status register of the SRAB */
49#define B53_SRAB_CTRLS 0x40 51#define B53_SRAB_CTRLS 0x40
52#define B53_SRAB_CTRLS_HOST_INTR BIT(1)
50#define B53_SRAB_CTRLS_RCAREQ BIT(3) 53#define B53_SRAB_CTRLS_RCAREQ BIT(3)
51#define B53_SRAB_CTRLS_RCAGNT BIT(4) 54#define B53_SRAB_CTRLS_RCAGNT BIT(4)
52#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6) 55#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
@@ -60,8 +63,29 @@
60#define B53_SRAB_P7_SLEEP_TIMER BIT(11) 63#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
61#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12) 64#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
62 65
66/* Port mux configuration registers */
67#define B53_MUX_CONFIG_P5 0x00
68#define MUX_CONFIG_SGMII 0
69#define MUX_CONFIG_MII_LITE 1
70#define MUX_CONFIG_RGMII 2
71#define MUX_CONFIG_GMII 3
72#define MUX_CONFIG_GPHY 4
73#define MUX_CONFIG_INTERNAL 5
74#define MUX_CONFIG_MASK 0x7
75#define B53_MUX_CONFIG_P4 0x04
76
77struct b53_srab_port_priv {
78 int irq;
79 bool irq_enabled;
80 struct b53_device *dev;
81 unsigned int num;
82 phy_interface_t mode;
83};
84
63struct b53_srab_priv { 85struct b53_srab_priv {
64 void __iomem *regs; 86 void __iomem *regs;
87 void __iomem *mux_config;
88 struct b53_srab_port_priv port_intrs[B53_N_PORTS];
65}; 89};
66 90
67static int b53_srab_request_grant(struct b53_device *dev) 91static int b53_srab_request_grant(struct b53_device *dev)
@@ -344,6 +368,81 @@ err:
344 return ret; 368 return ret;
345} 369}
346 370
371static irqreturn_t b53_srab_port_thread(int irq, void *dev_id)
372{
373 struct b53_srab_port_priv *port = dev_id;
374 struct b53_device *dev = port->dev;
375
376 if (port->mode == PHY_INTERFACE_MODE_SGMII)
377 b53_port_event(dev->ds, port->num);
378
379 return IRQ_HANDLED;
380}
381
382static irqreturn_t b53_srab_port_isr(int irq, void *dev_id)
383{
384 struct b53_srab_port_priv *port = dev_id;
385 struct b53_device *dev = port->dev;
386 struct b53_srab_priv *priv = dev->priv;
387
388 /* Acknowledge the interrupt */
389 writel(BIT(port->num), priv->regs + B53_SRAB_INTR);
390
391 return IRQ_WAKE_THREAD;
392}
393
394#if IS_ENABLED(CONFIG_B53_SERDES)
395static u8 b53_srab_serdes_map_lane(struct b53_device *dev, int port)
396{
397 struct b53_srab_priv *priv = dev->priv;
398 struct b53_srab_port_priv *p = &priv->port_intrs[port];
399
400 if (p->mode != PHY_INTERFACE_MODE_SGMII)
401 return B53_INVALID_LANE;
402
403 switch (port) {
404 case 5:
405 return 0;
406 case 4:
407 return 1;
408 default:
409 return B53_INVALID_LANE;
410 }
411}
412#endif
413
414static int b53_srab_irq_enable(struct b53_device *dev, int port)
415{
416 struct b53_srab_priv *priv = dev->priv;
417 struct b53_srab_port_priv *p = &priv->port_intrs[port];
418 int ret = 0;
419
420 /* Interrupt is optional and was not specified, do not make
421 * this fatal
422 */
423 if (p->irq == -ENXIO)
424 return ret;
425
426 ret = request_threaded_irq(p->irq, b53_srab_port_isr,
427 b53_srab_port_thread, 0,
428 dev_name(dev->dev), p);
429 if (!ret)
430 p->irq_enabled = true;
431
432 return ret;
433}
434
435static void b53_srab_irq_disable(struct b53_device *dev, int port)
436{
437 struct b53_srab_priv *priv = dev->priv;
438 struct b53_srab_port_priv *p = &priv->port_intrs[port];
439
440 if (p->irq_enabled) {
441 free_irq(p->irq, p);
442 p->irq_enabled = false;
443 }
444}
445
347static const struct b53_io_ops b53_srab_ops = { 446static const struct b53_io_ops b53_srab_ops = {
348 .read8 = b53_srab_read8, 447 .read8 = b53_srab_read8,
349 .read16 = b53_srab_read16, 448 .read16 = b53_srab_read16,
@@ -355,6 +454,16 @@ static const struct b53_io_ops b53_srab_ops = {
355 .write32 = b53_srab_write32, 454 .write32 = b53_srab_write32,
356 .write48 = b53_srab_write48, 455 .write48 = b53_srab_write48,
357 .write64 = b53_srab_write64, 456 .write64 = b53_srab_write64,
457 .irq_enable = b53_srab_irq_enable,
458 .irq_disable = b53_srab_irq_disable,
459#if IS_ENABLED(CONFIG_B53_SERDES)
460 .serdes_map_lane = b53_srab_serdes_map_lane,
461 .serdes_link_state = b53_serdes_link_state,
462 .serdes_config = b53_serdes_config,
463 .serdes_an_restart = b53_serdes_an_restart,
464 .serdes_link_set = b53_serdes_link_set,
465 .serdes_phylink_validate = b53_serdes_phylink_validate,
466#endif
358}; 467};
359 468
360static const struct of_device_id b53_srab_of_match[] = { 469static const struct of_device_id b53_srab_of_match[] = {
@@ -364,7 +473,7 @@ static const struct of_device_id b53_srab_of_match[] = {
364 { .compatible = "brcm,bcm53018-srab" }, 473 { .compatible = "brcm,bcm53018-srab" },
365 { .compatible = "brcm,bcm53019-srab" }, 474 { .compatible = "brcm,bcm53019-srab" },
366 { .compatible = "brcm,bcm5301x-srab" }, 475 { .compatible = "brcm,bcm5301x-srab" },
367 { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID }, 476 { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID },
368 { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID }, 477 { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
369 { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID }, 478 { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
370 { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID }, 479 { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
@@ -372,12 +481,114 @@ static const struct of_device_id b53_srab_of_match[] = {
372 { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID }, 481 { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
373 { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID }, 482 { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
374 { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID }, 483 { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
375 { .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID }, 484 { .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
376 { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID }, 485 { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
486 { .compatible = "brcm,omega-srab", .data = (void *)BCM583XX_DEVICE_ID },
377 { /* sentinel */ }, 487 { /* sentinel */ },
378}; 488};
379MODULE_DEVICE_TABLE(of, b53_srab_of_match); 489MODULE_DEVICE_TABLE(of, b53_srab_of_match);
380 490
491static void b53_srab_intr_set(struct b53_srab_priv *priv, bool set)
492{
493 u32 reg;
494
495 reg = readl(priv->regs + B53_SRAB_CTRLS);
496 if (set)
497 reg |= B53_SRAB_CTRLS_HOST_INTR;
498 else
499 reg &= ~B53_SRAB_CTRLS_HOST_INTR;
500 writel(reg, priv->regs + B53_SRAB_CTRLS);
501}
502
503static void b53_srab_prepare_irq(struct platform_device *pdev)
504{
505 struct b53_device *dev = platform_get_drvdata(pdev);
506 struct b53_srab_priv *priv = dev->priv;
507 struct b53_srab_port_priv *port;
508 unsigned int i;
509 char *name;
510
511 /* Clear all pending interrupts */
512 writel(0xffffffff, priv->regs + B53_SRAB_INTR);
513
514 if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
515 return;
516
517 for (i = 0; i < B53_N_PORTS; i++) {
518 port = &priv->port_intrs[i];
519
520 /* There is no port 6 */
521 if (i == 6)
522 continue;
523
524 name = kasprintf(GFP_KERNEL, "link_state_p%d", i);
525 if (!name)
526 return;
527
528 port->num = i;
529 port->dev = dev;
530 port->irq = platform_get_irq_byname(pdev, name);
531 kfree(name);
532 }
533
534 b53_srab_intr_set(priv, true);
535}
536
537static void b53_srab_mux_init(struct platform_device *pdev)
538{
539 struct b53_device *dev = platform_get_drvdata(pdev);
540 struct b53_srab_priv *priv = dev->priv;
541 struct b53_srab_port_priv *p;
542 struct resource *r;
543 unsigned int port;
544 u32 reg, off = 0;
545 int ret;
546
547 if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
548 return;
549
550 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
551 priv->mux_config = devm_ioremap_resource(&pdev->dev, r);
552 if (IS_ERR(priv->mux_config))
553 return;
554
555 /* Obtain the port mux configuration so we know which lanes
556 * actually map to SerDes lanes
557 */
558 for (port = 5; port > 3; port--, off += 4) {
559 p = &priv->port_intrs[port];
560
561 reg = readl(priv->mux_config + B53_MUX_CONFIG_P5 + off);
562 switch (reg & MUX_CONFIG_MASK) {
563 case MUX_CONFIG_SGMII:
564 p->mode = PHY_INTERFACE_MODE_SGMII;
565 ret = b53_serdes_init(dev, port);
566 if (ret)
567 continue;
568 break;
569 case MUX_CONFIG_MII_LITE:
570 p->mode = PHY_INTERFACE_MODE_MII;
571 break;
572 case MUX_CONFIG_GMII:
573 p->mode = PHY_INTERFACE_MODE_GMII;
574 break;
575 case MUX_CONFIG_RGMII:
576 p->mode = PHY_INTERFACE_MODE_RGMII;
577 break;
578 case MUX_CONFIG_INTERNAL:
579 p->mode = PHY_INTERFACE_MODE_INTERNAL;
580 break;
581 default:
582 p->mode = PHY_INTERFACE_MODE_NA;
583 break;
584 }
585
586 if (p->mode != PHY_INTERFACE_MODE_NA)
587 dev_info(&pdev->dev, "Port %d mode: %s\n",
588 port, phy_modes(p->mode));
589 }
590}
591
381static int b53_srab_probe(struct platform_device *pdev) 592static int b53_srab_probe(struct platform_device *pdev)
382{ 593{
383 struct b53_platform_data *pdata = pdev->dev.platform_data; 594 struct b53_platform_data *pdata = pdev->dev.platform_data;
@@ -416,13 +627,18 @@ static int b53_srab_probe(struct platform_device *pdev)
416 627
417 platform_set_drvdata(pdev, dev); 628 platform_set_drvdata(pdev, dev);
418 629
630 b53_srab_prepare_irq(pdev);
631 b53_srab_mux_init(pdev);
632
419 return b53_switch_register(dev); 633 return b53_switch_register(dev);
420} 634}
421 635
422static int b53_srab_remove(struct platform_device *pdev) 636static int b53_srab_remove(struct platform_device *pdev)
423{ 637{
424 struct b53_device *dev = platform_get_drvdata(pdev); 638 struct b53_device *dev = platform_get_drvdata(pdev);
639 struct b53_srab_priv *priv = dev->priv;
425 640
641 b53_srab_intr_set(priv, false);
426 if (dev) 642 if (dev)
427 b53_switch_remove(dev); 643 b53_switch_remove(dev);
428 644
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 0378eded31f2..2eb68769562c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -16,6 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/phy.h> 17#include <linux/phy.h>
18#include <linux/phy_fixed.h> 18#include <linux/phy_fixed.h>
19#include <linux/phylink.h>
19#include <linux/mii.h> 20#include <linux/mii.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/of_irq.h> 22#include <linux/of_irq.h>
@@ -165,6 +166,11 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
165 reg &= ~P_TXQ_PSM_VDD(port); 166 reg &= ~P_TXQ_PSM_VDD(port);
166 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 167 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
167 168
169 /* Enable learning */
170 reg = core_readl(priv, CORE_DIS_LEARN);
171 reg &= ~BIT(port);
172 core_writel(priv, reg, CORE_DIS_LEARN);
173
168 /* Enable Broadcom tags for that port if requested */ 174 /* Enable Broadcom tags for that port if requested */
169 if (priv->brcm_tag_mask & BIT(port)) 175 if (priv->brcm_tag_mask & BIT(port))
170 b53_brcm_hdr_setup(ds, port); 176 b53_brcm_hdr_setup(ds, port);
@@ -219,10 +225,15 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
219 struct phy_device *phy) 225 struct phy_device *phy)
220{ 226{
221 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 227 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
222 u32 off, reg; 228 u32 reg;
223 229
224 if (priv->wol_ports_mask & (1 << port)) 230 /* Disable learning while in WoL mode */
231 if (priv->wol_ports_mask & (1 << port)) {
232 reg = core_readl(priv, CORE_DIS_LEARN);
233 reg |= BIT(port);
234 core_writel(priv, reg, CORE_DIS_LEARN);
225 return; 235 return;
236 }
226 237
227 if (port == priv->moca_port) 238 if (port == priv->moca_port)
228 bcm_sf2_port_intr_disable(priv, port); 239 bcm_sf2_port_intr_disable(priv, port);
@@ -230,11 +241,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
230 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 241 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
231 bcm_sf2_gphy_enable_set(ds, false); 242 bcm_sf2_gphy_enable_set(ds, false);
232 243
233 if (dsa_is_cpu_port(ds, port))
234 off = CORE_IMP_CTL;
235 else
236 off = CORE_G_PCTL_PORT(port);
237
238 b53_disable_port(ds, port, phy); 244 b53_disable_port(ds, port, phy);
239 245
240 /* Power down the port memory */ 246 /* Power down the port memory */
@@ -306,7 +312,8 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
306 312
307static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 313static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
308{ 314{
309 struct bcm_sf2_priv *priv = dev_id; 315 struct dsa_switch *ds = dev_id;
316 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
310 317
311 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 318 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
312 ~priv->irq0_mask; 319 ~priv->irq0_mask;
@@ -317,16 +324,21 @@ static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
317 324
318static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 325static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
319{ 326{
320 struct bcm_sf2_priv *priv = dev_id; 327 struct dsa_switch *ds = dev_id;
328 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
321 329
322 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 330 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
323 ~priv->irq1_mask; 331 ~priv->irq1_mask;
324 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 332 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
325 333
326 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) 334 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
327 priv->port_sts[7].link = 1; 335 priv->port_sts[7].link = true;
328 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) 336 dsa_port_phylink_mac_change(ds, 7, true);
329 priv->port_sts[7].link = 0; 337 }
338 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
339 priv->port_sts[7].link = false;
340 dsa_port_phylink_mac_change(ds, 7, false);
341 }
330 342
331 return IRQ_HANDLED; 343 return IRQ_HANDLED;
332} 344}
@@ -443,12 +455,8 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
443 priv->slave_mii_bus->parent = ds->dev->parent; 455 priv->slave_mii_bus->parent = ds->dev->parent;
444 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 456 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
445 457
446 if (dn) 458 err = of_mdiobus_register(priv->slave_mii_bus, dn);
447 err = of_mdiobus_register(priv->slave_mii_bus, dn); 459 if (err && dn)
448 else
449 err = mdiobus_register(priv->slave_mii_bus);
450
451 if (err)
452 of_node_put(dn); 460 of_node_put(dn);
453 461
454 return err; 462 return err;
@@ -457,8 +465,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
457static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 465static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
458{ 466{
459 mdiobus_unregister(priv->slave_mii_bus); 467 mdiobus_unregister(priv->slave_mii_bus);
460 if (priv->master_mii_dn) 468 of_node_put(priv->master_mii_dn);
461 of_node_put(priv->master_mii_dn);
462} 469}
463 470
464static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 471static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
@@ -473,13 +480,56 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
473 return priv->hw_params.gphy_rev; 480 return priv->hw_params.gphy_rev;
474} 481}
475 482
476static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, 483static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
477 struct phy_device *phydev) 484 unsigned long *supported,
485 struct phylink_link_state *state)
486{
487 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
488
489 if (!phy_interface_mode_is_rgmii(state->interface) &&
490 state->interface != PHY_INTERFACE_MODE_MII &&
491 state->interface != PHY_INTERFACE_MODE_REVMII &&
492 state->interface != PHY_INTERFACE_MODE_GMII &&
493 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
494 state->interface != PHY_INTERFACE_MODE_MOCA) {
495 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
496 dev_err(ds->dev,
497 "Unsupported interface: %d\n", state->interface);
498 return;
499 }
500
501 /* Allow all the expected bits */
502 phylink_set(mask, Autoneg);
503 phylink_set_port_modes(mask);
504 phylink_set(mask, Pause);
505 phylink_set(mask, Asym_Pause);
506
507 /* With the exclusion of MII and Reverse MII, we support Gigabit,
508 * including Half duplex
509 */
510 if (state->interface != PHY_INTERFACE_MODE_MII &&
511 state->interface != PHY_INTERFACE_MODE_REVMII) {
512 phylink_set(mask, 1000baseT_Full);
513 phylink_set(mask, 1000baseT_Half);
514 }
515
516 phylink_set(mask, 10baseT_Half);
517 phylink_set(mask, 10baseT_Full);
518 phylink_set(mask, 100baseT_Half);
519 phylink_set(mask, 100baseT_Full);
520
521 bitmap_and(supported, supported, mask,
522 __ETHTOOL_LINK_MODE_MASK_NBITS);
523 bitmap_and(state->advertising, state->advertising, mask,
524 __ETHTOOL_LINK_MODE_MASK_NBITS);
525}
526
527static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
528 unsigned int mode,
529 const struct phylink_link_state *state)
478{ 530{
479 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 531 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
480 struct ethtool_eee *p = &priv->dev->ports[port].eee;
481 u32 id_mode_dis = 0, port_mode; 532 u32 id_mode_dis = 0, port_mode;
482 const char *str = NULL;
483 u32 reg, offset; 533 u32 reg, offset;
484 534
485 if (priv->type == BCM7445_DEVICE_ID) 535 if (priv->type == BCM7445_DEVICE_ID)
@@ -487,62 +537,48 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
487 else 537 else
488 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 538 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
489 539
490 switch (phydev->interface) { 540 switch (state->interface) {
491 case PHY_INTERFACE_MODE_RGMII: 541 case PHY_INTERFACE_MODE_RGMII:
492 str = "RGMII (no delay)";
493 id_mode_dis = 1; 542 id_mode_dis = 1;
543 /* fallthrough */
494 case PHY_INTERFACE_MODE_RGMII_TXID: 544 case PHY_INTERFACE_MODE_RGMII_TXID:
495 if (!str)
496 str = "RGMII (TX delay)";
497 port_mode = EXT_GPHY; 545 port_mode = EXT_GPHY;
498 break; 546 break;
499 case PHY_INTERFACE_MODE_MII: 547 case PHY_INTERFACE_MODE_MII:
500 str = "MII";
501 port_mode = EXT_EPHY; 548 port_mode = EXT_EPHY;
502 break; 549 break;
503 case PHY_INTERFACE_MODE_REVMII: 550 case PHY_INTERFACE_MODE_REVMII:
504 str = "Reverse MII";
505 port_mode = EXT_REVMII; 551 port_mode = EXT_REVMII;
506 break; 552 break;
507 default: 553 default:
508 /* All other PHYs: internal and MoCA */ 554 /* all other PHYs: internal and MoCA */
509 goto force_link;
510 }
511
512 /* If the link is down, just disable the interface to conserve power */
513 if (!phydev->link) {
514 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
515 reg &= ~RGMII_MODE_EN;
516 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
517 goto force_link; 555 goto force_link;
518 } 556 }
519 557
520 /* Clear id_mode_dis bit, and the existing port mode, but 558 /* Clear id_mode_dis bit, and the existing port mode, let
521 * make sure we enable the RGMII block for data to pass 559 * RGMII_MODE_EN bet set by mac_link_{up,down}
522 */ 560 */
523 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 561 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
524 reg &= ~ID_MODE_DIS; 562 reg &= ~ID_MODE_DIS;
525 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 563 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
526 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 564 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
527 565
528 reg |= port_mode | RGMII_MODE_EN; 566 reg |= port_mode;
529 if (id_mode_dis) 567 if (id_mode_dis)
530 reg |= ID_MODE_DIS; 568 reg |= ID_MODE_DIS;
531 569
532 if (phydev->pause) { 570 if (state->pause & MLO_PAUSE_TXRX_MASK) {
533 if (phydev->asym_pause) 571 if (state->pause & MLO_PAUSE_TX)
534 reg |= TX_PAUSE_EN; 572 reg |= TX_PAUSE_EN;
535 reg |= RX_PAUSE_EN; 573 reg |= RX_PAUSE_EN;
536 } 574 }
537 575
538 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 576 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
539 577
540 pr_info("Port %d configured for %s\n", port, str);
541
542force_link: 578force_link:
543 /* Force link settings detected from the PHY */ 579 /* Force link settings detected from the PHY */
544 reg = SW_OVERRIDE; 580 reg = SW_OVERRIDE;
545 switch (phydev->speed) { 581 switch (state->speed) {
546 case SPEED_1000: 582 case SPEED_1000:
547 reg |= SPDSTS_1000 << SPEED_SHIFT; 583 reg |= SPDSTS_1000 << SPEED_SHIFT;
548 break; 584 break;
@@ -551,33 +587,61 @@ force_link:
551 break; 587 break;
552 } 588 }
553 589
554 if (phydev->link) 590 if (state->link)
555 reg |= LINK_STS; 591 reg |= LINK_STS;
556 if (phydev->duplex == DUPLEX_FULL) 592 if (state->duplex == DUPLEX_FULL)
557 reg |= DUPLX_MODE; 593 reg |= DUPLX_MODE;
558 594
559 core_writel(priv, reg, offset); 595 core_writel(priv, reg, offset);
560
561 if (!phydev->is_pseudo_fixed_link)
562 p->eee_enabled = b53_eee_init(ds, port, phydev);
563} 596}
564 597
565static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, 598static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
566 struct fixed_phy_status *status) 599 phy_interface_t interface, bool link)
567{ 600{
568 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 601 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
569 u32 duplex, pause, offset;
570 u32 reg; 602 u32 reg;
571 603
572 if (priv->type == BCM7445_DEVICE_ID) 604 if (!phy_interface_mode_is_rgmii(interface) &&
573 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 605 interface != PHY_INTERFACE_MODE_MII &&
606 interface != PHY_INTERFACE_MODE_REVMII)
607 return;
608
609 /* If the link is down, just disable the interface to conserve power */
610 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
611 if (link)
612 reg |= RGMII_MODE_EN;
574 else 613 else
575 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 614 reg &= ~RGMII_MODE_EN;
615 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
616}
617
618static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
619 unsigned int mode,
620 phy_interface_t interface)
621{
622 bcm_sf2_sw_mac_link_set(ds, port, interface, false);
623}
624
625static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
626 unsigned int mode,
627 phy_interface_t interface,
628 struct phy_device *phydev)
629{
630 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
631 struct ethtool_eee *p = &priv->dev->ports[port].eee;
632
633 bcm_sf2_sw_mac_link_set(ds, port, interface, true);
634
635 if (mode == MLO_AN_PHY && phydev)
636 p->eee_enabled = b53_eee_init(ds, port, phydev);
637}
576 638
577 duplex = core_readl(priv, CORE_DUPSTS); 639static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
578 pause = core_readl(priv, CORE_PAUSESTS); 640 struct phylink_link_state *status)
641{
642 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
579 643
580 status->link = 0; 644 status->link = false;
581 645
582 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 646 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
583 * which means that we need to force the link at the port override 647 * which means that we need to force the link at the port override
@@ -596,28 +660,10 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
596 */ 660 */
597 if (!status->link) 661 if (!status->link)
598 netif_carrier_off(ds->ports[port].slave); 662 netif_carrier_off(ds->ports[port].slave);
599 status->duplex = 1; 663 status->duplex = DUPLEX_FULL;
600 } else { 664 } else {
601 status->link = 1; 665 status->link = true;
602 status->duplex = !!(duplex & (1 << port));
603 }
604
605 reg = core_readl(priv, offset);
606 reg |= SW_OVERRIDE;
607 if (status->link)
608 reg |= LINK_STS;
609 else
610 reg &= ~LINK_STS;
611 core_writel(priv, reg, offset);
612
613 if ((pause & (1 << port)) &&
614 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
615 status->asym_pause = 1;
616 status->pause = 1;
617 } 666 }
618
619 if (pause & (1 << port))
620 status->pause = 1;
621} 667}
622 668
623static void bcm_sf2_enable_acb(struct dsa_switch *ds) 669static void bcm_sf2_enable_acb(struct dsa_switch *ds)
@@ -656,7 +702,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
656static int bcm_sf2_sw_resume(struct dsa_switch *ds) 702static int bcm_sf2_sw_resume(struct dsa_switch *ds)
657{ 703{
658 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 704 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
659 unsigned int port;
660 int ret; 705 int ret;
661 706
662 ret = bcm_sf2_sw_rst(priv); 707 ret = bcm_sf2_sw_rst(priv);
@@ -668,14 +713,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
668 if (priv->hw_params.num_gphy == 1) 713 if (priv->hw_params.num_gphy == 1)
669 bcm_sf2_gphy_enable_set(ds, true); 714 bcm_sf2_gphy_enable_set(ds, true);
670 715
671 for (port = 0; port < DSA_MAX_PORTS; port++) { 716 ds->ops->setup(ds);
672 if (dsa_is_user_port(ds, port))
673 bcm_sf2_port_setup(ds, port, NULL);
674 else if (dsa_is_cpu_port(ds, port))
675 bcm_sf2_imp_setup(ds, port);
676 }
677
678 bcm_sf2_enable_acb(ds);
679 717
680 return 0; 718 return 0;
681} 719}
@@ -859,9 +897,13 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
859 .get_strings = b53_get_strings, 897 .get_strings = b53_get_strings,
860 .get_ethtool_stats = b53_get_ethtool_stats, 898 .get_ethtool_stats = b53_get_ethtool_stats,
861 .get_sset_count = b53_get_sset_count, 899 .get_sset_count = b53_get_sset_count,
900 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
862 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 901 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
863 .adjust_link = bcm_sf2_sw_adjust_link, 902 .phylink_validate = bcm_sf2_sw_validate,
864 .fixed_link_update = bcm_sf2_sw_fixed_link_update, 903 .phylink_mac_config = bcm_sf2_sw_mac_config,
904 .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
905 .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
906 .phylink_fixed_state = bcm_sf2_sw_fixed_state,
865 .suspend = bcm_sf2_sw_suspend, 907 .suspend = bcm_sf2_sw_suspend,
866 .resume = bcm_sf2_sw_resume, 908 .resume = bcm_sf2_sw_resume,
867 .get_wol = bcm_sf2_sw_get_wol, 909 .get_wol = bcm_sf2_sw_get_wol,
@@ -1064,14 +1106,14 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
1064 bcm_sf2_intr_disable(priv); 1106 bcm_sf2_intr_disable(priv);
1065 1107
1066 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1108 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1067 "switch_0", priv); 1109 "switch_0", ds);
1068 if (ret < 0) { 1110 if (ret < 0) {
1069 pr_err("failed to request switch_0 IRQ\n"); 1111 pr_err("failed to request switch_0 IRQ\n");
1070 goto out_mdio; 1112 goto out_mdio;
1071 } 1113 }
1072 1114
1073 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1115 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1074 "switch_1", priv); 1116 "switch_1", ds);
1075 if (ret < 0) { 1117 if (ret < 0) {
1076 pr_err("failed to request switch_1 IRQ\n"); 1118 pr_err("failed to request switch_1 IRQ\n");
1077 goto out_mdio; 1119 goto out_mdio;
@@ -1122,10 +1164,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
1122{ 1164{
1123 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1165 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1124 1166
1125 /* Disable all ports and interrupts */
1126 priv->wol_ports_mask = 0; 1167 priv->wol_ports_mask = 0;
1127 bcm_sf2_sw_suspend(priv->dev->ds);
1128 dsa_unregister_switch(priv->dev->ds); 1168 dsa_unregister_switch(priv->dev->ds);
1169 /* Disable all ports and interrupts */
1170 bcm_sf2_sw_suspend(priv->dev->ds);
1129 bcm_sf2_mdio_unregister(priv); 1171 bcm_sf2_mdio_unregister(priv);
1130 1172
1131 return 0; 1173 return 0;
@@ -1148,16 +1190,14 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1148#ifdef CONFIG_PM_SLEEP 1190#ifdef CONFIG_PM_SLEEP
1149static int bcm_sf2_suspend(struct device *dev) 1191static int bcm_sf2_suspend(struct device *dev)
1150{ 1192{
1151 struct platform_device *pdev = to_platform_device(dev); 1193 struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
1152 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1153 1194
1154 return dsa_switch_suspend(priv->dev->ds); 1195 return dsa_switch_suspend(priv->dev->ds);
1155} 1196}
1156 1197
1157static int bcm_sf2_resume(struct device *dev) 1198static int bcm_sf2_resume(struct device *dev)
1158{ 1199{
1159 struct platform_device *pdev = to_platform_device(dev); 1200 struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
1160 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1161 1201
1162 return dsa_switch_resume(priv->dev->ds); 1202 return dsa_switch_resume(priv->dev->ds);
1163} 1203}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b89acaee12d4..47c5f272a084 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -732,6 +732,8 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
732 struct ethtool_rx_flow_spec *fs) 732 struct ethtool_rx_flow_spec *fs)
733{ 733{
734 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 734 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
735 s8 cpu_port = ds->ports[port].cpu_dp->index;
736 __u64 ring_cookie = fs->ring_cookie;
735 unsigned int queue_num, port_num; 737 unsigned int queue_num, port_num;
736 int ret = -EINVAL; 738 int ret = -EINVAL;
737 739
@@ -748,21 +750,28 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
748 fs->location > bcm_sf2_cfp_rule_size(priv)) 750 fs->location > bcm_sf2_cfp_rule_size(priv))
749 return -EINVAL; 751 return -EINVAL;
750 752
753 /* This rule is a Wake-on-LAN filter and we must specifically
754 * target the CPU port in order for it to be working.
755 */
756 if (ring_cookie == RX_CLS_FLOW_WAKE)
757 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
758
751 /* We do not support discarding packets, check that the 759 /* We do not support discarding packets, check that the
752 * destination port is enabled and that we are within the 760 * destination port is enabled and that we are within the
753 * number of ports supported by the switch 761 * number of ports supported by the switch
754 */ 762 */
755 port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES; 763 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
756 764
757 if (fs->ring_cookie == RX_CLS_FLOW_DISC || 765 if (ring_cookie == RX_CLS_FLOW_DISC ||
758 !dsa_is_user_port(ds, port_num) || 766 !(dsa_is_user_port(ds, port_num) ||
767 dsa_is_cpu_port(ds, port_num)) ||
759 port_num >= priv->hw_params.num_ports) 768 port_num >= priv->hw_params.num_ports)
760 return -EINVAL; 769 return -EINVAL;
761 /* 770 /*
762 * We have a small oddity where Port 6 just does not have a 771 * We have a small oddity where Port 6 just does not have a
763 * valid bit here (so we substract by one). 772 * valid bit here (so we substract by one).
764 */ 773 */
765 queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES; 774 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
766 if (port_num >= 7) 775 if (port_num >= 7)
767 port_num -= 1; 776 port_num -= 1;
768 777
@@ -1187,6 +1196,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1187int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, 1196int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1188 struct ethtool_rxnfc *nfc, u32 *rule_locs) 1197 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1189{ 1198{
1199 struct net_device *p = ds->ports[port].cpu_dp->master;
1190 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1200 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1191 int ret = 0; 1201 int ret = 0;
1192 1202
@@ -1213,12 +1223,23 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1213 1223
1214 mutex_unlock(&priv->cfp.lock); 1224 mutex_unlock(&priv->cfp.lock);
1215 1225
1226 if (ret)
1227 return ret;
1228
1229 /* Pass up the commands to the attached master network device */
1230 if (p->ethtool_ops->get_rxnfc) {
1231 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1232 if (ret == -EOPNOTSUPP)
1233 ret = 0;
1234 }
1235
1216 return ret; 1236 return ret;
1217} 1237}
1218 1238
1219int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, 1239int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1220 struct ethtool_rxnfc *nfc) 1240 struct ethtool_rxnfc *nfc)
1221{ 1241{
1242 struct net_device *p = ds->ports[port].cpu_dp->master;
1222 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 1243 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1223 int ret = 0; 1244 int ret = 0;
1224 1245
@@ -1239,6 +1260,23 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1239 1260
1240 mutex_unlock(&priv->cfp.lock); 1261 mutex_unlock(&priv->cfp.lock);
1241 1262
1263 if (ret)
1264 return ret;
1265
1266 /* Pass up the commands to the attached master network device.
1267 * This can fail, so rollback the operation if we need to.
1268 */
1269 if (p->ethtool_ops->set_rxnfc) {
1270 ret = p->ethtool_ops->set_rxnfc(p, nfc);
1271 if (ret && ret != -EOPNOTSUPP) {
1272 mutex_lock(&priv->cfp.lock);
1273 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1274 mutex_unlock(&priv->cfp.lock);
1275 } else {
1276 ret = 0;
1277 }
1278 }
1279
1242 return ret; 1280 return ret;
1243} 1281}
1244 1282
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 3ccd5a865dcb..0a1e530d52b7 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -168,6 +168,8 @@ enum bcm_sf2_reg_offs {
168#define CORE_SWITCH_CTRL 0x00088 168#define CORE_SWITCH_CTRL 0x00088
169#define MII_DUMB_FWDG_EN (1 << 6) 169#define MII_DUMB_FWDG_EN (1 << 6)
170 170
171#define CORE_DIS_LEARN 0x000f0
172
171#define CORE_SFT_LRN_CTRL 0x000f8 173#define CORE_SFT_LRN_CTRL 0x000f8
172#define SW_LEARN_CNTL(x) (1 << (x)) 174#define SW_LEARN_CNTL(x) (1 << (x))
173 175
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index f77be9f85cb3..816f34d64736 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -67,7 +67,7 @@ static struct phy_device *phydevs[PHY_MAX_ADDR];
67static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds, 67static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
68 int port) 68 int port)
69{ 69{
70 dev_dbg(ds->dev, "%s\n", __func__); 70 dev_dbg(ds->dev, "%s: port: %d\n", __func__, port);
71 71
72 return DSA_TAG_PROTO_NONE; 72 return DSA_TAG_PROTO_NONE;
73} 73}
@@ -86,16 +86,23 @@ static int dsa_loop_setup(struct dsa_switch *ds)
86 return 0; 86 return 0;
87} 87}
88 88
89static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port) 89static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
90{ 90{
91 if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS)
92 return 0;
93
91 return __DSA_LOOP_CNT_MAX; 94 return __DSA_LOOP_CNT_MAX;
92} 95}
93 96
94static void dsa_loop_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 97static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
98 u32 stringset, uint8_t *data)
95{ 99{
96 struct dsa_loop_priv *ps = ds->priv; 100 struct dsa_loop_priv *ps = ds->priv;
97 unsigned int i; 101 unsigned int i;
98 102
103 if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS)
104 return;
105
99 for (i = 0; i < __DSA_LOOP_CNT_MAX; i++) 106 for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
100 memcpy(data + i * ETH_GSTRING_LEN, 107 memcpy(data + i * ETH_GSTRING_LEN,
101 ps->ports[port].mib[i].name, ETH_GSTRING_LEN); 108 ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
@@ -117,8 +124,6 @@ static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum)
117 struct mii_bus *bus = ps->bus; 124 struct mii_bus *bus = ps->bus;
118 int ret; 125 int ret;
119 126
120 dev_dbg(ds->dev, "%s\n", __func__);
121
122 ret = mdiobus_read_nested(bus, ps->port_base + port, regnum); 127 ret = mdiobus_read_nested(bus, ps->port_base + port, regnum);
123 if (ret < 0) 128 if (ret < 0)
124 ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++; 129 ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++;
@@ -135,8 +140,6 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
135 struct mii_bus *bus = ps->bus; 140 struct mii_bus *bus = ps->bus;
136 int ret; 141 int ret;
137 142
138 dev_dbg(ds->dev, "%s\n", __func__);
139
140 ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value); 143 ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
141 if (ret < 0) 144 if (ret < 0)
142 ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++; 145 ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++;
@@ -149,7 +152,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
149static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, 152static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
150 struct net_device *bridge) 153 struct net_device *bridge)
151{ 154{
152 dev_dbg(ds->dev, "%s\n", __func__); 155 dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
156 __func__, port, bridge->name);
153 157
154 return 0; 158 return 0;
155} 159}
@@ -157,19 +161,22 @@ static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
157static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port, 161static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
158 struct net_device *bridge) 162 struct net_device *bridge)
159{ 163{
160 dev_dbg(ds->dev, "%s\n", __func__); 164 dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
165 __func__, port, bridge->name);
161} 166}
162 167
163static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port, 168static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
164 u8 state) 169 u8 state)
165{ 170{
166 dev_dbg(ds->dev, "%s\n", __func__); 171 dev_dbg(ds->dev, "%s: port: %d, state: %d\n",
172 __func__, port, state);
167} 173}
168 174
169static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port, 175static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
170 bool vlan_filtering) 176 bool vlan_filtering)
171{ 177{
172 dev_dbg(ds->dev, "%s\n", __func__); 178 dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
179 __func__, port, vlan_filtering);
173 180
174 return 0; 181 return 0;
175} 182}
@@ -181,7 +188,8 @@ dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
181 struct dsa_loop_priv *ps = ds->priv; 188 struct dsa_loop_priv *ps = ds->priv;
182 struct mii_bus *bus = ps->bus; 189 struct mii_bus *bus = ps->bus;
183 190
184 dev_dbg(ds->dev, "%s\n", __func__); 191 dev_dbg(ds->dev, "%s: port: %d, vlan: %d-%d",
192 __func__, port, vlan->vid_begin, vlan->vid_end);
185 193
186 /* Just do a sleeping operation to make lockdep checks effective */ 194 /* Just do a sleeping operation to make lockdep checks effective */
187 mdiobus_read(bus, ps->port_base + port, MII_BMSR); 195 mdiobus_read(bus, ps->port_base + port, MII_BMSR);
@@ -202,8 +210,6 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
202 struct dsa_loop_vlan *vl; 210 struct dsa_loop_vlan *vl;
203 u16 vid; 211 u16 vid;
204 212
205 dev_dbg(ds->dev, "%s\n", __func__);
206
207 /* Just do a sleeping operation to make lockdep checks effective */ 213 /* Just do a sleeping operation to make lockdep checks effective */
208 mdiobus_read(bus, ps->port_base + port, MII_BMSR); 214 mdiobus_read(bus, ps->port_base + port, MII_BMSR);
209 215
@@ -215,6 +221,9 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
215 vl->untagged |= BIT(port); 221 vl->untagged |= BIT(port);
216 else 222 else
217 vl->untagged &= ~BIT(port); 223 vl->untagged &= ~BIT(port);
224
225 dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
226 __func__, port, vid, untagged ? "un" : "", pvid);
218 } 227 }
219 228
220 if (pvid) 229 if (pvid)
@@ -230,8 +239,6 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
230 struct dsa_loop_vlan *vl; 239 struct dsa_loop_vlan *vl;
231 u16 vid, pvid = ps->pvid; 240 u16 vid, pvid = ps->pvid;
232 241
233 dev_dbg(ds->dev, "%s\n", __func__);
234
235 /* Just do a sleeping operation to make lockdep checks effective */ 242 /* Just do a sleeping operation to make lockdep checks effective */
236 mdiobus_read(bus, ps->port_base + port, MII_BMSR); 243 mdiobus_read(bus, ps->port_base + port, MII_BMSR);
237 244
@@ -244,6 +251,9 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
244 251
245 if (pvid == vid) 252 if (pvid == vid)
246 pvid = 1; 253 pvid = 1;
254
255 dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
256 __func__, port, vid, untagged ? "un" : "", pvid);
247 } 257 }
248 ps->pvid = pvid; 258 ps->pvid = pvid;
249 259
@@ -256,6 +266,7 @@ static const struct dsa_switch_ops dsa_loop_driver = {
256 .get_strings = dsa_loop_get_strings, 266 .get_strings = dsa_loop_get_strings,
257 .get_ethtool_stats = dsa_loop_get_ethtool_stats, 267 .get_ethtool_stats = dsa_loop_get_ethtool_stats,
258 .get_sset_count = dsa_loop_get_sset_count, 268 .get_sset_count = dsa_loop_get_sset_count,
269 .get_ethtool_phy_stats = dsa_loop_get_ethtool_stats,
259 .phy_read = dsa_loop_phy_read, 270 .phy_read = dsa_loop_phy_read,
260 .phy_write = dsa_loop_phy_write, 271 .phy_write = dsa_loop_phy_write,
261 .port_bridge_join = dsa_loop_port_bridge_join, 272 .port_bridge_join = dsa_loop_port_bridge_join,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fefa454f3e56..b4f6e1a67dd9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -977,10 +977,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
977 { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", }, 977 { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
978}; 978};
979 979
980static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 980static void lan9303_get_strings(struct dsa_switch *ds, int port,
981 u32 stringset, uint8_t *data)
981{ 982{
982 unsigned int u; 983 unsigned int u;
983 984
985 if (stringset != ETH_SS_STATS)
986 return;
987
984 for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { 988 for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
985 strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name, 989 strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
986 ETH_GSTRING_LEN); 990 ETH_GSTRING_LEN);
@@ -1007,8 +1011,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
1007 } 1011 }
1008} 1012}
1009 1013
1010static int lan9303_get_sset_count(struct dsa_switch *ds, int port) 1014static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
1011{ 1015{
1016 if (sset != ETH_SS_STATS)
1017 return 0;
1018
1012 return ARRAY_SIZE(lan9303_mib); 1019 return ARRAY_SIZE(lan9303_mib);
1013} 1020}
1014 1021
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
new file mode 100644
index 000000000000..693a67f45bef
--- /dev/null
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -0,0 +1,1167 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
4 *
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
8 */
9
10#include <linux/clk.h>
11#include <linux/etherdevice.h>
12#include <linux/firmware.h>
13#include <linux/if_bridge.h>
14#include <linux/if_vlan.h>
15#include <linux/iopoll.h>
16#include <linux/mfd/syscon.h>
17#include <linux/module.h>
18#include <linux/of_mdio.h>
19#include <linux/of_net.h>
20#include <linux/of_platform.h>
21#include <linux/phy.h>
22#include <linux/phylink.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/reset.h>
26#include <net/dsa.h>
27#include <dt-bindings/mips/lantiq_rcu_gphy.h>
28
29#include "lantiq_pce.h"
30
31/* GSWIP MDIO Registers */
32#define GSWIP_MDIO_GLOB 0x00
33#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
34#define GSWIP_MDIO_CTRL 0x08
35#define GSWIP_MDIO_CTRL_BUSY BIT(12)
36#define GSWIP_MDIO_CTRL_RD BIT(11)
37#define GSWIP_MDIO_CTRL_WR BIT(10)
38#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
39#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
40#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
41#define GSWIP_MDIO_READ 0x09
42#define GSWIP_MDIO_WRITE 0x0A
43#define GSWIP_MDIO_MDC_CFG0 0x0B
44#define GSWIP_MDIO_MDC_CFG1 0x0C
45#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
46#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
47#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
48#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
49#define GSWIP_MDIO_PHY_LINK_UP 0x2000
50#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
51#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
52#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
53#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
54#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
55#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
56#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
57#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
58#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
59#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
60#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
61#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
62#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
63#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
64#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
65#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
66#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
67#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
68#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
69 GSWIP_MDIO_PHY_FCONRX_MASK | \
70 GSWIP_MDIO_PHY_FCONTX_MASK | \
71 GSWIP_MDIO_PHY_LINK_MASK | \
72 GSWIP_MDIO_PHY_SPEED_MASK | \
73 GSWIP_MDIO_PHY_FDUP_MASK)
74
75/* GSWIP MII Registers */
76#define GSWIP_MII_CFG0 0x00
77#define GSWIP_MII_CFG1 0x02
78#define GSWIP_MII_CFG5 0x04
79#define GSWIP_MII_CFG_EN BIT(14)
80#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
81#define GSWIP_MII_CFG_MODE_MIIP 0x0
82#define GSWIP_MII_CFG_MODE_MIIM 0x1
83#define GSWIP_MII_CFG_MODE_RMIIP 0x2
84#define GSWIP_MII_CFG_MODE_RMIIM 0x3
85#define GSWIP_MII_CFG_MODE_RGMII 0x4
86#define GSWIP_MII_CFG_MODE_MASK 0xf
87#define GSWIP_MII_CFG_RATE_M2P5 0x00
88#define GSWIP_MII_CFG_RATE_M25 0x10
89#define GSWIP_MII_CFG_RATE_M125 0x20
90#define GSWIP_MII_CFG_RATE_M50 0x30
91#define GSWIP_MII_CFG_RATE_AUTO 0x40
92#define GSWIP_MII_CFG_RATE_MASK 0x70
93#define GSWIP_MII_PCDU0 0x01
94#define GSWIP_MII_PCDU1 0x03
95#define GSWIP_MII_PCDU5 0x05
96#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
97#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
98
99/* GSWIP Core Registers */
100#define GSWIP_SWRES 0x000
101#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
102#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
103#define GSWIP_VERSION 0x013
104#define GSWIP_VERSION_REV_SHIFT 0
105#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
106#define GSWIP_VERSION_MOD_SHIFT 8
107#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
108#define GSWIP_VERSION_2_0 0x100
109#define GSWIP_VERSION_2_1 0x021
110#define GSWIP_VERSION_2_2 0x122
111#define GSWIP_VERSION_2_2_ETC 0x022
112
113#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
114#define GSWIP_BM_RAM_ADDR 0x044
115#define GSWIP_BM_RAM_CTRL 0x045
116#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
117#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
118#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
119#define GSWIP_BM_QUEUE_GCTRL 0x04A
120#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
121/* buffer management Port Configuration Register */
122#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
123#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
124#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
125/* buffer management Port Control Register */
126#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
127#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
128#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
129
130/* PCE */
131#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
132#define GSWIP_PCE_TBL_MASK 0x448
133#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
134#define GSWIP_PCE_TBL_ADDR 0x44E
135#define GSWIP_PCE_TBL_CTRL 0x44F
136#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
137#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
138#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
139#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
140#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
141#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
142#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
143#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
144#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
145#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
146#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
147#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
148#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
149#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
150#define GSWIP_PCE_GCTRL_0 0x456
151#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
152#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
153#define GSWIP_PCE_GCTRL_1 0x457
154#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
155#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
156#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
157#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11)
158#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
159#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
160#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
161#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
162#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
163#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
164
165#define GSWIP_MAC_FLEN 0x8C5
166#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
167#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
168
169/* Ethernet Switch Fetch DMA Port Control Register */
170#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
171#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
172#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
173#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
174#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
175#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
176#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
177#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
178#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
179
180/* Ethernet Switch Store DMA Port Control Register */
181#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
182#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
183#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
184#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
185
186#define XRX200_GPHY_FW_ALIGN (16 * 1024)
187
188struct gswip_hw_info {
189 int max_ports;
190 int cpu_port;
191};
192
193struct xway_gphy_match_data {
194 char *fe_firmware_name;
195 char *ge_firmware_name;
196};
197
198struct gswip_gphy_fw {
199 struct clk *clk_gate;
200 struct reset_control *reset;
201 u32 fw_addr_offset;
202 char *fw_name;
203};
204
205struct gswip_priv {
206 __iomem void *gswip;
207 __iomem void *mdio;
208 __iomem void *mii;
209 const struct gswip_hw_info *hw_info;
210 const struct xway_gphy_match_data *gphy_fw_name_cfg;
211 struct dsa_switch *ds;
212 struct device *dev;
213 struct regmap *rcu_regmap;
214 int num_gphy_fw;
215 struct gswip_gphy_fw *gphy_fw;
216};
217
218struct gswip_rmon_cnt_desc {
219 unsigned int size;
220 unsigned int offset;
221 const char *name;
222};
223
224#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
225
226static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
227 /** Receive Packet Count (only packets that are accepted and not discarded). */
228 MIB_DESC(1, 0x1F, "RxGoodPkts"),
229 MIB_DESC(1, 0x23, "RxUnicastPkts"),
230 MIB_DESC(1, 0x22, "RxMulticastPkts"),
231 MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
232 MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
233 MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
234 MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
235 MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
236 MIB_DESC(1, 0x20, "RxGoodPausePkts"),
237 MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
238 MIB_DESC(1, 0x12, "Rx64BytePkts"),
239 MIB_DESC(1, 0x13, "Rx127BytePkts"),
240 MIB_DESC(1, 0x14, "Rx255BytePkts"),
241 MIB_DESC(1, 0x15, "Rx511BytePkts"),
242 MIB_DESC(1, 0x16, "Rx1023BytePkts"),
243 /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
244 MIB_DESC(1, 0x17, "RxMaxBytePkts"),
245 MIB_DESC(1, 0x18, "RxDroppedPkts"),
246 MIB_DESC(1, 0x19, "RxFilteredPkts"),
247 MIB_DESC(2, 0x24, "RxGoodBytes"),
248 MIB_DESC(2, 0x26, "RxBadBytes"),
249 MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
250 MIB_DESC(1, 0x0C, "TxGoodPkts"),
251 MIB_DESC(1, 0x06, "TxUnicastPkts"),
252 MIB_DESC(1, 0x07, "TxMulticastPkts"),
253 MIB_DESC(1, 0x00, "Tx64BytePkts"),
254 MIB_DESC(1, 0x01, "Tx127BytePkts"),
255 MIB_DESC(1, 0x02, "Tx255BytePkts"),
256 MIB_DESC(1, 0x03, "Tx511BytePkts"),
257 MIB_DESC(1, 0x04, "Tx1023BytePkts"),
258 /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
259 MIB_DESC(1, 0x05, "TxMaxBytePkts"),
260 MIB_DESC(1, 0x08, "TxSingleCollCount"),
261 MIB_DESC(1, 0x09, "TxMultCollCount"),
262 MIB_DESC(1, 0x0A, "TxLateCollCount"),
263 MIB_DESC(1, 0x0B, "TxExcessCollCount"),
264 MIB_DESC(1, 0x0D, "TxPauseCount"),
265 MIB_DESC(1, 0x10, "TxDroppedPkts"),
266 MIB_DESC(2, 0x0E, "TxGoodBytes"),
267};
268
269static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
270{
271 return __raw_readl(priv->gswip + (offset * 4));
272}
273
274static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
275{
276 __raw_writel(val, priv->gswip + (offset * 4));
277}
278
279static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
280 u32 offset)
281{
282 u32 val = gswip_switch_r(priv, offset);
283
284 val &= ~(clear);
285 val |= set;
286 gswip_switch_w(priv, val, offset);
287}
288
289static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
290 u32 cleared)
291{
292 u32 val;
293
294 return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
295 (val & cleared) == 0, 20, 50000);
296}
297
298static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
299{
300 return __raw_readl(priv->mdio + (offset * 4));
301}
302
303static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
304{
305 __raw_writel(val, priv->mdio + (offset * 4));
306}
307
308static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
309 u32 offset)
310{
311 u32 val = gswip_mdio_r(priv, offset);
312
313 val &= ~(clear);
314 val |= set;
315 gswip_mdio_w(priv, val, offset);
316}
317
318static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
319{
320 return __raw_readl(priv->mii + (offset * 4));
321}
322
323static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
324{
325 __raw_writel(val, priv->mii + (offset * 4));
326}
327
328static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
329 u32 offset)
330{
331 u32 val = gswip_mii_r(priv, offset);
332
333 val &= ~(clear);
334 val |= set;
335 gswip_mii_w(priv, val, offset);
336}
337
338static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
339 int port)
340{
341 switch (port) {
342 case 0:
343 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
344 break;
345 case 1:
346 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
347 break;
348 case 5:
349 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
350 break;
351 }
352}
353
354static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
355 int port)
356{
357 switch (port) {
358 case 0:
359 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
360 break;
361 case 1:
362 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
363 break;
364 case 5:
365 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
366 break;
367 }
368}
369
370static int gswip_mdio_poll(struct gswip_priv *priv)
371{
372 int cnt = 100;
373
374 while (likely(cnt--)) {
375 u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
376
377 if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
378 return 0;
379 usleep_range(20, 40);
380 }
381
382 return -ETIMEDOUT;
383}
384
385static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
386{
387 struct gswip_priv *priv = bus->priv;
388 int err;
389
390 err = gswip_mdio_poll(priv);
391 if (err) {
392 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
393 return err;
394 }
395
396 gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
397 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
398 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
399 (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
400 GSWIP_MDIO_CTRL);
401
402 return 0;
403}
404
405static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
406{
407 struct gswip_priv *priv = bus->priv;
408 int err;
409
410 err = gswip_mdio_poll(priv);
411 if (err) {
412 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
413 return err;
414 }
415
416 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
417 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
418 (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
419 GSWIP_MDIO_CTRL);
420
421 err = gswip_mdio_poll(priv);
422 if (err) {
423 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
424 return err;
425 }
426
427 return gswip_mdio_r(priv, GSWIP_MDIO_READ);
428}
429
430static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
431{
432 struct dsa_switch *ds = priv->ds;
433
434 ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
435 if (!ds->slave_mii_bus)
436 return -ENOMEM;
437
438 ds->slave_mii_bus->priv = priv;
439 ds->slave_mii_bus->read = gswip_mdio_rd;
440 ds->slave_mii_bus->write = gswip_mdio_wr;
441 ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
442 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
443 dev_name(priv->dev));
444 ds->slave_mii_bus->parent = priv->dev;
445 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
446
447 return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
448}
449
450static int gswip_port_enable(struct dsa_switch *ds, int port,
451 struct phy_device *phydev)
452{
453 struct gswip_priv *priv = ds->priv;
454
455 /* RMON Counter Enable for port */
456 gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
457
458 /* enable port fetch/store dma & VLAN Modification */
459 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
460 GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
461 GSWIP_FDMA_PCTRLp(port));
462 gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
463 GSWIP_SDMA_PCTRLp(port));
464 gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
465 GSWIP_PCE_PCTRL_0p(port));
466
467 if (!dsa_is_cpu_port(ds, port)) {
468 u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
469 GSWIP_MDIO_PHY_SPEED_AUTO |
470 GSWIP_MDIO_PHY_FDUP_AUTO |
471 GSWIP_MDIO_PHY_FCONTX_AUTO |
472 GSWIP_MDIO_PHY_FCONRX_AUTO |
473 (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
474
475 gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
476 /* Activate MDIO auto polling */
477 gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
478 }
479
480 return 0;
481}
482
483static void gswip_port_disable(struct dsa_switch *ds, int port,
484 struct phy_device *phy)
485{
486 struct gswip_priv *priv = ds->priv;
487
488 if (!dsa_is_cpu_port(ds, port)) {
489 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
490 GSWIP_MDIO_PHY_LINK_MASK,
491 GSWIP_MDIO_PHYp(port));
492 /* Deactivate MDIO auto polling */
493 gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
494 }
495
496 gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
497 GSWIP_FDMA_PCTRLp(port));
498 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
499 GSWIP_SDMA_PCTRLp(port));
500}
501
502static int gswip_pce_load_microcode(struct gswip_priv *priv)
503{
504 int i;
505 int err;
506
507 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
508 GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
509 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
510 gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
511
512 for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
513 gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
514 gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
515 GSWIP_PCE_TBL_VAL(0));
516 gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
517 GSWIP_PCE_TBL_VAL(1));
518 gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
519 GSWIP_PCE_TBL_VAL(2));
520 gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
521 GSWIP_PCE_TBL_VAL(3));
522
523 /* start the table access: */
524 gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
525 GSWIP_PCE_TBL_CTRL);
526 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
527 GSWIP_PCE_TBL_CTRL_BAS);
528 if (err)
529 return err;
530 }
531
532 /* tell the switch that the microcode is loaded */
533 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
534 GSWIP_PCE_GCTRL_0);
535
536 return 0;
537}
538
539static int gswip_setup(struct dsa_switch *ds)
540{
541 struct gswip_priv *priv = ds->priv;
542 unsigned int cpu_port = priv->hw_info->cpu_port;
543 int i;
544 int err;
545
546 gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
547 usleep_range(5000, 10000);
548 gswip_switch_w(priv, 0, GSWIP_SWRES);
549
550 /* disable port fetch/store dma on all ports */
551 for (i = 0; i < priv->hw_info->max_ports; i++)
552 gswip_port_disable(ds, i, NULL);
553
554 /* enable Switch */
555 gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
556
557 err = gswip_pce_load_microcode(priv);
558 if (err) {
559 dev_err(priv->dev, "writing PCE microcode failed, %i", err);
560 return err;
561 }
562
563 /* Default unknown Broadcast/Multicast/Unicast port maps */
564 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
565 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
566 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
567
568 /* disable PHY auto polling */
569 gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
570 /* Configure the MDIO Clock 2.5 MHz */
571 gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
572
573 /* Disable the xMII link */
574 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
575 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
576 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
577
578 /* enable special tag insertion on cpu port */
579 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
580 GSWIP_FDMA_PCTRLp(cpu_port));
581
582 gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
583 GSWIP_MAC_CTRL_2p(cpu_port));
584 gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
585 gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
586 GSWIP_BM_QUEUE_GCTRL);
587
588 /* VLAN aware Switching */
589 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
590
591 /* Mac Address Table Lock */
592 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK |
593 GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD,
594 GSWIP_PCE_GCTRL_1);
595
596 gswip_port_enable(ds, cpu_port, NULL);
597 return 0;
598}
599
600static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
601 int port)
602{
603 return DSA_TAG_PROTO_GSWIP;
604}
605
606static void gswip_phylink_validate(struct dsa_switch *ds, int port,
607 unsigned long *supported,
608 struct phylink_link_state *state)
609{
610 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
611
612 switch (port) {
613 case 0:
614 case 1:
615 if (!phy_interface_mode_is_rgmii(state->interface) &&
616 state->interface != PHY_INTERFACE_MODE_MII &&
617 state->interface != PHY_INTERFACE_MODE_REVMII &&
618 state->interface != PHY_INTERFACE_MODE_RMII)
619 goto unsupported;
620 break;
621 case 2:
622 case 3:
623 case 4:
624 if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
625 goto unsupported;
626 break;
627 case 5:
628 if (!phy_interface_mode_is_rgmii(state->interface) &&
629 state->interface != PHY_INTERFACE_MODE_INTERNAL)
630 goto unsupported;
631 break;
632 default:
633 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
634 dev_err(ds->dev, "Unsupported port: %i\n", port);
635 return;
636 }
637
638 /* Allow all the expected bits */
639 phylink_set(mask, Autoneg);
640 phylink_set_port_modes(mask);
641 phylink_set(mask, Pause);
642 phylink_set(mask, Asym_Pause);
643
644 /* With the exclusion of MII and Reverse MII, we support Gigabit,
645 * including Half duplex
646 */
647 if (state->interface != PHY_INTERFACE_MODE_MII &&
648 state->interface != PHY_INTERFACE_MODE_REVMII) {
649 phylink_set(mask, 1000baseT_Full);
650 phylink_set(mask, 1000baseT_Half);
651 }
652
653 phylink_set(mask, 10baseT_Half);
654 phylink_set(mask, 10baseT_Full);
655 phylink_set(mask, 100baseT_Half);
656 phylink_set(mask, 100baseT_Full);
657
658 bitmap_and(supported, supported, mask,
659 __ETHTOOL_LINK_MODE_MASK_NBITS);
660 bitmap_and(state->advertising, state->advertising, mask,
661 __ETHTOOL_LINK_MODE_MASK_NBITS);
662 return;
663
664unsupported:
665 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
666 dev_err(ds->dev, "Unsupported interface: %d\n", state->interface);
667 return;
668}
669
670static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
671 unsigned int mode,
672 const struct phylink_link_state *state)
673{
674 struct gswip_priv *priv = ds->priv;
675 u32 miicfg = 0;
676
677 miicfg |= GSWIP_MII_CFG_LDCLKDIS;
678
679 switch (state->interface) {
680 case PHY_INTERFACE_MODE_MII:
681 case PHY_INTERFACE_MODE_INTERNAL:
682 miicfg |= GSWIP_MII_CFG_MODE_MIIM;
683 break;
684 case PHY_INTERFACE_MODE_REVMII:
685 miicfg |= GSWIP_MII_CFG_MODE_MIIP;
686 break;
687 case PHY_INTERFACE_MODE_RMII:
688 miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
689 break;
690 case PHY_INTERFACE_MODE_RGMII:
691 case PHY_INTERFACE_MODE_RGMII_ID:
692 case PHY_INTERFACE_MODE_RGMII_RXID:
693 case PHY_INTERFACE_MODE_RGMII_TXID:
694 miicfg |= GSWIP_MII_CFG_MODE_RGMII;
695 break;
696 default:
697 dev_err(ds->dev,
698 "Unsupported interface: %d\n", state->interface);
699 return;
700 }
701 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
702
703 switch (state->interface) {
704 case PHY_INTERFACE_MODE_RGMII_ID:
705 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
706 GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
707 break;
708 case PHY_INTERFACE_MODE_RGMII_RXID:
709 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
710 break;
711 case PHY_INTERFACE_MODE_RGMII_TXID:
712 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
713 break;
714 default:
715 break;
716 }
717}
718
719static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
720 unsigned int mode,
721 phy_interface_t interface)
722{
723 struct gswip_priv *priv = ds->priv;
724
725 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
726}
727
728static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
729 unsigned int mode,
730 phy_interface_t interface,
731 struct phy_device *phydev)
732{
733 struct gswip_priv *priv = ds->priv;
734
735 /* Enable the xMII interface only for the external PHY */
736 if (interface != PHY_INTERFACE_MODE_INTERNAL)
737 gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
738}
739
740static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
741 uint8_t *data)
742{
743 int i;
744
745 if (stringset != ETH_SS_STATS)
746 return;
747
748 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
749 strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
750 ETH_GSTRING_LEN);
751}
752
753static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
754 u32 index)
755{
756 u32 result;
757 int err;
758
759 gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
760 gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
761 GSWIP_BM_RAM_CTRL_OPMOD,
762 table | GSWIP_BM_RAM_CTRL_BAS,
763 GSWIP_BM_RAM_CTRL);
764
765 err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
766 GSWIP_BM_RAM_CTRL_BAS);
767 if (err) {
768 dev_err(priv->dev, "timeout while reading table: %u, index: %u",
769 table, index);
770 return 0;
771 }
772
773 result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
774 result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
775
776 return result;
777}
778
779static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
780 uint64_t *data)
781{
782 struct gswip_priv *priv = ds->priv;
783 const struct gswip_rmon_cnt_desc *rmon_cnt;
784 int i;
785 u64 high;
786
787 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
788 rmon_cnt = &gswip_rmon_cnt[i];
789
790 data[i] = gswip_bcm_ram_entry_read(priv, port,
791 rmon_cnt->offset);
792 if (rmon_cnt->size == 2) {
793 high = gswip_bcm_ram_entry_read(priv, port,
794 rmon_cnt->offset + 1);
795 data[i] |= high << 32;
796 }
797 }
798}
799
800static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
801{
802 if (sset != ETH_SS_STATS)
803 return 0;
804
805 return ARRAY_SIZE(gswip_rmon_cnt);
806}
807
808static const struct dsa_switch_ops gswip_switch_ops = {
809 .get_tag_protocol = gswip_get_tag_protocol,
810 .setup = gswip_setup,
811 .port_enable = gswip_port_enable,
812 .port_disable = gswip_port_disable,
813 .phylink_validate = gswip_phylink_validate,
814 .phylink_mac_config = gswip_phylink_mac_config,
815 .phylink_mac_link_down = gswip_phylink_mac_link_down,
816 .phylink_mac_link_up = gswip_phylink_mac_link_up,
817 .get_strings = gswip_get_strings,
818 .get_ethtool_stats = gswip_get_ethtool_stats,
819 .get_sset_count = gswip_get_sset_count,
820};
821
822static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
823 .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
824 .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
825};
826
827static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
828 .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
829 .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
830};
831
832static const struct xway_gphy_match_data xrx300_gphy_data = {
833 .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
834 .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
835};
836
837static const struct of_device_id xway_gphy_match[] = {
838 { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
839 { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
840 { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
841 { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
842 { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
843 {},
844};
845
846static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
847{
848 struct device *dev = priv->dev;
849 const struct firmware *fw;
850 void *fw_addr;
851 dma_addr_t dma_addr;
852 dma_addr_t dev_addr;
853 size_t size;
854 int ret;
855
856 ret = clk_prepare_enable(gphy_fw->clk_gate);
857 if (ret)
858 return ret;
859
860 reset_control_assert(gphy_fw->reset);
861
862 ret = request_firmware(&fw, gphy_fw->fw_name, dev);
863 if (ret) {
864 dev_err(dev, "failed to load firmware: %s, error: %i\n",
865 gphy_fw->fw_name, ret);
866 return ret;
867 }
868
869 /* GPHY cores need the firmware code in a persistent and contiguous
870 * memory area with a 16 kB boundary aligned start address.
871 */
872 size = fw->size + XRX200_GPHY_FW_ALIGN;
873
874 fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
875 if (fw_addr) {
876 fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
877 dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
878 memcpy(fw_addr, fw->data, fw->size);
879 } else {
880 dev_err(dev, "failed to alloc firmware memory\n");
881 release_firmware(fw);
882 return -ENOMEM;
883 }
884
885 release_firmware(fw);
886
887 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
888 if (ret)
889 return ret;
890
891 reset_control_deassert(gphy_fw->reset);
892
893 return ret;
894}
895
896static int gswip_gphy_fw_probe(struct gswip_priv *priv,
897 struct gswip_gphy_fw *gphy_fw,
898 struct device_node *gphy_fw_np, int i)
899{
900 struct device *dev = priv->dev;
901 u32 gphy_mode;
902 int ret;
903 char gphyname[10];
904
905 snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
906
907 gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
908 if (IS_ERR(gphy_fw->clk_gate)) {
909 dev_err(dev, "Failed to lookup gate clock\n");
910 return PTR_ERR(gphy_fw->clk_gate);
911 }
912
913 ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
914 if (ret)
915 return ret;
916
917 ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
918 /* Default to GE mode */
919 if (ret)
920 gphy_mode = GPHY_MODE_GE;
921
922 switch (gphy_mode) {
923 case GPHY_MODE_FE:
924 gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
925 break;
926 case GPHY_MODE_GE:
927 gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
928 break;
929 default:
930 dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
931 return -EINVAL;
932 }
933
934 gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
935 if (IS_ERR(gphy_fw->reset)) {
936 if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
937 dev_err(dev, "Failed to lookup gphy reset\n");
938 return PTR_ERR(gphy_fw->reset);
939 }
940
941 return gswip_gphy_fw_load(priv, gphy_fw);
942}
943
944static void gswip_gphy_fw_remove(struct gswip_priv *priv,
945 struct gswip_gphy_fw *gphy_fw)
946{
947 int ret;
948
949 /* check if the device was fully probed */
950 if (!gphy_fw->fw_name)
951 return;
952
953 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
954 if (ret)
955 dev_err(priv->dev, "can not reset GPHY FW pointer");
956
957 clk_disable_unprepare(gphy_fw->clk_gate);
958
959 reset_control_put(gphy_fw->reset);
960}
961
962static int gswip_gphy_fw_list(struct gswip_priv *priv,
963 struct device_node *gphy_fw_list_np, u32 version)
964{
965 struct device *dev = priv->dev;
966 struct device_node *gphy_fw_np;
967 const struct of_device_id *match;
968 int err;
969 int i = 0;
970
971 /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
972 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
973 * needs a different GPHY firmware.
974 */
975 if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
976 switch (version) {
977 case GSWIP_VERSION_2_0:
978 priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
979 break;
980 case GSWIP_VERSION_2_1:
981 priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
982 break;
983 default:
984 dev_err(dev, "unknown GSWIP version: 0x%x", version);
985 return -ENOENT;
986 }
987 }
988
989 match = of_match_node(xway_gphy_match, gphy_fw_list_np);
990 if (match && match->data)
991 priv->gphy_fw_name_cfg = match->data;
992
993 if (!priv->gphy_fw_name_cfg) {
994 dev_err(dev, "GPHY compatible type not supported");
995 return -ENOENT;
996 }
997
998 priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
999 if (!priv->num_gphy_fw)
1000 return -ENOENT;
1001
1002 priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
1003 "lantiq,rcu");
1004 if (IS_ERR(priv->rcu_regmap))
1005 return PTR_ERR(priv->rcu_regmap);
1006
1007 priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
1008 sizeof(*priv->gphy_fw),
1009 GFP_KERNEL | __GFP_ZERO);
1010 if (!priv->gphy_fw)
1011 return -ENOMEM;
1012
1013 for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
1014 err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
1015 gphy_fw_np, i);
1016 if (err)
1017 goto remove_gphy;
1018 i++;
1019 }
1020
1021 return 0;
1022
1023remove_gphy:
1024 for (i = 0; i < priv->num_gphy_fw; i++)
1025 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
1026 return err;
1027}
1028
1029static int gswip_probe(struct platform_device *pdev)
1030{
1031 struct gswip_priv *priv;
1032 struct resource *gswip_res, *mdio_res, *mii_res;
1033 struct device_node *mdio_np, *gphy_fw_np;
1034 struct device *dev = &pdev->dev;
1035 int err;
1036 int i;
1037 u32 version;
1038
1039 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1040 if (!priv)
1041 return -ENOMEM;
1042
1043 gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1044 priv->gswip = devm_ioremap_resource(dev, gswip_res);
1045 if (IS_ERR(priv->gswip))
1046 return PTR_ERR(priv->gswip);
1047
1048 mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1049 priv->mdio = devm_ioremap_resource(dev, mdio_res);
1050 if (IS_ERR(priv->mdio))
1051 return PTR_ERR(priv->mdio);
1052
1053 mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1054 priv->mii = devm_ioremap_resource(dev, mii_res);
1055 if (IS_ERR(priv->mii))
1056 return PTR_ERR(priv->mii);
1057
1058 priv->hw_info = of_device_get_match_data(dev);
1059 if (!priv->hw_info)
1060 return -EINVAL;
1061
1062 priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
1063 if (!priv->ds)
1064 return -ENOMEM;
1065
1066 priv->ds->priv = priv;
1067 priv->ds->ops = &gswip_switch_ops;
1068 priv->dev = dev;
1069 version = gswip_switch_r(priv, GSWIP_VERSION);
1070
1071 /* bring up the mdio bus */
1072 gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL,
1073 "lantiq,gphy-fw");
1074 if (gphy_fw_np) {
1075 err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
1076 if (err) {
1077 dev_err(dev, "gphy fw probe failed\n");
1078 return err;
1079 }
1080 }
1081
1082 /* bring up the mdio bus */
1083 mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
1084 "lantiq,xrx200-mdio");
1085 if (mdio_np) {
1086 err = gswip_mdio(priv, mdio_np);
1087 if (err) {
1088 dev_err(dev, "mdio probe failed\n");
1089 goto gphy_fw;
1090 }
1091 }
1092
1093 err = dsa_register_switch(priv->ds);
1094 if (err) {
1095 dev_err(dev, "dsa switch register failed: %i\n", err);
1096 goto mdio_bus;
1097 }
1098 if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
1099 dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
1100 priv->hw_info->cpu_port);
1101 err = -EINVAL;
1102 goto mdio_bus;
1103 }
1104
1105 platform_set_drvdata(pdev, priv);
1106
1107 dev_info(dev, "probed GSWIP version %lx mod %lx\n",
1108 (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
1109 (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
1110 return 0;
1111
1112mdio_bus:
1113 if (mdio_np)
1114 mdiobus_unregister(priv->ds->slave_mii_bus);
1115gphy_fw:
1116 for (i = 0; i < priv->num_gphy_fw; i++)
1117 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
1118 return err;
1119}
1120
1121static int gswip_remove(struct platform_device *pdev)
1122{
1123 struct gswip_priv *priv = platform_get_drvdata(pdev);
1124 int i;
1125
1126 if (!priv)
1127 return 0;
1128
1129 /* disable the switch */
1130 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
1131
1132 dsa_unregister_switch(priv->ds);
1133
1134 if (priv->ds->slave_mii_bus)
1135 mdiobus_unregister(priv->ds->slave_mii_bus);
1136
1137 for (i = 0; i < priv->num_gphy_fw; i++)
1138 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
1139
1140 return 0;
1141}
1142
1143static const struct gswip_hw_info gswip_xrx200 = {
1144 .max_ports = 7,
1145 .cpu_port = 6,
1146};
1147
1148static const struct of_device_id gswip_of_match[] = {
1149 { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
1150 {},
1151};
1152MODULE_DEVICE_TABLE(of, gswip_of_match);
1153
1154static struct platform_driver gswip_driver = {
1155 .probe = gswip_probe,
1156 .remove = gswip_remove,
1157 .driver = {
1158 .name = "gswip",
1159 .of_match_table = gswip_of_match,
1160 },
1161};
1162
1163module_platform_driver(gswip_driver);
1164
1165MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1166MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
1167MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq_pce.h
new file mode 100644
index 000000000000..180663138e75
--- /dev/null
+++ b/drivers/net/dsa/lantiq_pce.h
@@ -0,0 +1,153 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCE microcode extracted from UGW 7.1.1 switch api
4 *
5 * Copyright (c) 2012, 2014, 2015 Lantiq Deutschland GmbH
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
8 */
9
10enum {
11 OUT_MAC0 = 0,
12 OUT_MAC1,
13 OUT_MAC2,
14 OUT_MAC3,
15 OUT_MAC4,
16 OUT_MAC5,
17 OUT_ETHTYP,
18 OUT_VTAG0,
19 OUT_VTAG1,
20 OUT_ITAG0,
21 OUT_ITAG1, /*10 */
22 OUT_ITAG2,
23 OUT_ITAG3,
24 OUT_IP0,
25 OUT_IP1,
26 OUT_IP2,
27 OUT_IP3,
28 OUT_SIP0,
29 OUT_SIP1,
30 OUT_SIP2,
31 OUT_SIP3, /*20*/
32 OUT_SIP4,
33 OUT_SIP5,
34 OUT_SIP6,
35 OUT_SIP7,
36 OUT_DIP0,
37 OUT_DIP1,
38 OUT_DIP2,
39 OUT_DIP3,
40 OUT_DIP4,
41 OUT_DIP5, /*30*/
42 OUT_DIP6,
43 OUT_DIP7,
44 OUT_SESID,
45 OUT_PROT,
46 OUT_APP0,
47 OUT_APP1,
48 OUT_IGMP0,
49 OUT_IGMP1,
50 OUT_IPOFF, /*39*/
51 OUT_NONE = 63,
52};
53
54/* parser's microcode length type */
55#define INSTR 0
56#define IPV6 1
57#define LENACCU 2
58
59/* parser's microcode flag type */
60enum {
61 FLAG_ITAG = 0,
62 FLAG_VLAN,
63 FLAG_SNAP,
64 FLAG_PPPOE,
65 FLAG_IPV6,
66 FLAG_IPV6FL,
67 FLAG_IPV4,
68 FLAG_IGMP,
69 FLAG_TU,
70 FLAG_HOP,
71 FLAG_NN1, /*10 */
72 FLAG_NN2,
73 FLAG_END,
74 FLAG_NO, /*13*/
75};
76
77struct gswip_pce_microcode {
78 u16 val_3;
79 u16 val_2;
80 u16 val_1;
81 u16 val_0;
82};
83
84#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
85 { val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
86 ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
87static const struct gswip_pce_microcode gswip_pce_microcode[] = {
88 /* value mask ns fields L type flags ipv4_len */
89 MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
90 MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
91 MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
92 MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
93 MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
94 MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
95 MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
96 MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
97 MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
98 MC_ENTRY(0x0000, 0x0000, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
99 MC_ENTRY(0x0600, 0x0600, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
100 MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
101 MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
102 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
103 MC_ENTRY(0x0300, 0xFF00, 41, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
104 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
105 MC_ENTRY(0x0000, 0x0000, 41, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
106 MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
107 MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
108 MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
109 MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
110 MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
111 MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
112 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
113 MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
114 MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
115 MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
116 MC_ENTRY(0x1100, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
117 MC_ENTRY(0x0600, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
118 MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
119 MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
120 MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
121 MC_ENTRY(0x0000, 0x0000, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
122 MC_ENTRY(0x0000, 0x00E0, 35, OUT_NONE, 0, INSTR, FLAG_NO, 0),
123 MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
124 MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
125 MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
126 MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
127 MC_ENTRY(0x0000, 0x0000, 40, OUT_PROT, 1, IPV6, FLAG_NO, 0),
128 MC_ENTRY(0x0000, 0x0000, 40, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
129 MC_ENTRY(0x0000, 0x0000, 41, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
130 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
131 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
132 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
133 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
134 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
135 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
136 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
137 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
138 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
139 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
140 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
141 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
142 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
143 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
144 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
145 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
146 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
147 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
148 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
149 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
150 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
151 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
152 MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
153};
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index bcb3e6c734f2..86b6464b4525 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -439,15 +439,22 @@ static void ksz_disable_port(struct dsa_switch *ds, int port,
439 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); 439 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
440} 440}
441 441
442static int ksz_sset_count(struct dsa_switch *ds, int port) 442static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
443{ 443{
444 if (sset != ETH_SS_STATS)
445 return 0;
446
444 return TOTAL_SWITCH_COUNTER_NUM; 447 return TOTAL_SWITCH_COUNTER_NUM;
445} 448}
446 449
447static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf) 450static void ksz_get_strings(struct dsa_switch *ds, int port,
451 u32 stringset, uint8_t *buf)
448{ 452{
449 int i; 453 int i;
450 454
455 if (stringset != ETH_SS_STATS)
456 return;
457
451 for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { 458 for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
452 memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, 459 memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
453 ETH_GSTRING_LEN); 460 ETH_GSTRING_LEN);
@@ -1095,17 +1102,21 @@ static const struct ksz_chip_data ksz_switch_chips[] = {
1095 .cpu_ports = 0x7F, /* can be configured as cpu port */ 1102 .cpu_ports = 0x7F, /* can be configured as cpu port */
1096 .port_cnt = 7, /* total physical port count */ 1103 .port_cnt = 7, /* total physical port count */
1097 }, 1104 },
1105 {
1106 .chip_id = 0x00989700,
1107 .dev_name = "KSZ9897",
1108 .num_vlans = 4096,
1109 .num_alus = 4096,
1110 .num_statics = 16,
1111 .cpu_ports = 0x7F, /* can be configured as cpu port */
1112 .port_cnt = 7, /* total physical port count */
1113 },
1098}; 1114};
1099 1115
1100static int ksz_switch_init(struct ksz_device *dev) 1116static int ksz_switch_init(struct ksz_device *dev)
1101{ 1117{
1102 int i; 1118 int i;
1103 1119
1104 mutex_init(&dev->reg_mutex);
1105 mutex_init(&dev->stats_mutex);
1106 mutex_init(&dev->alu_mutex);
1107 mutex_init(&dev->vlan_mutex);
1108
1109 dev->ds->ops = &ksz_switch_ops; 1120 dev->ds->ops = &ksz_switch_ops;
1110 1121
1111 for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { 1122 for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@@ -1190,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
1190 if (dev->pdata) 1201 if (dev->pdata)
1191 dev->chip_id = dev->pdata->chip_id; 1202 dev->chip_id = dev->pdata->chip_id;
1192 1203
1204 mutex_init(&dev->reg_mutex);
1205 mutex_init(&dev->stats_mutex);
1206 mutex_init(&dev->alu_mutex);
1207 mutex_init(&dev->vlan_mutex);
1208
1193 if (ksz_switch_detect(dev)) 1209 if (ksz_switch_detect(dev))
1194 return -EINVAL; 1210 return -EINVAL;
1195 1211
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index c51946983bed..8c1778b42701 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -195,6 +195,7 @@ static int ksz_spi_remove(struct spi_device *spi)
195 195
196static const struct of_device_id ksz_dt_ids[] = { 196static const struct of_device_id ksz_dt_ids[] = {
197 { .compatible = "microchip,ksz9477" }, 197 { .compatible = "microchip,ksz9477" },
198 { .compatible = "microchip,ksz9897" },
198 {}, 199 {},
199}; 200};
200MODULE_DEVICE_TABLE(of, ksz_dt_ids); 201MODULE_DEVICE_TABLE(of, ksz_dt_ids);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 80a4dbc3a499..a5de9bffe5be 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -573,10 +573,14 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
573} 573}
574 574
575static void 575static void
576mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 576mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
577 uint8_t *data)
577{ 578{
578 int i; 579 int i;
579 580
581 if (stringset != ETH_SS_STATS)
582 return;
583
580 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) 584 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
581 strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, 585 strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
582 ETH_GSTRING_LEN); 586 ETH_GSTRING_LEN);
@@ -604,8 +608,11 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
604} 608}
605 609
606static int 610static int
607mt7530_get_sset_count(struct dsa_switch *ds, int port) 611mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
608{ 612{
613 if (sset != ETH_SS_STATS)
614 return 0;
615
609 return ARRAY_SIZE(mt7530_mib); 616 return ARRAY_SIZE(mt7530_mib);
610} 617}
611 618
@@ -651,11 +658,7 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
651 if (phydev->asym_pause) 658 if (phydev->asym_pause)
652 rmt_adv |= LPA_PAUSE_ASYM; 659 rmt_adv |= LPA_PAUSE_ASYM;
653 660
654 if (phydev->advertising & ADVERTISED_Pause) 661 lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
655 lcl_adv |= ADVERTISE_PAUSE_CAP;
656 if (phydev->advertising & ADVERTISED_Asym_Pause)
657 lcl_adv |= ADVERTISE_PAUSE_ASYM;
658
659 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 662 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
660 663
661 if (flowctrl & FLOW_CTRL_TX) 664 if (flowctrl & FLOW_CTRL_TX)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 65f10fec25b3..0b3e51f248c2 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
116 /* Reset the switch. */ 116 /* Reset the switch. */
117 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, 117 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
118 GLOBAL_ATU_CONTROL_SWRESET | 118 GLOBAL_ATU_CONTROL_SWRESET |
119 GLOBAL_ATU_CONTROL_ATUSIZE_1024 | 119 GLOBAL_ATU_CONTROL_LEARNDIS);
120 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
121 120
122 /* Wait up to one second for reset to complete. */ 121 /* Wait up to one second for reset to complete. */
123 timeout = jiffies + 1 * HZ; 122 timeout = jiffies + 1 * HZ;
@@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
142 */ 141 */
143 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); 142 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
144 143
145 /* Enable automatic address learning, set the address 144 /* Disable automatic address learning.
146 * database size to 1024 entries, and set the default aging
147 * time to 5 minutes.
148 */ 145 */
149 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, 146 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
150 GLOBAL_ATU_CONTROL_ATUSIZE_1024 | 147 GLOBAL_ATU_CONTROL_LEARNDIS);
151 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
152 148
153 return 0; 149 return 0;
154} 150}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 5b4374f21d76..24fb6a685039 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -28,9 +28,11 @@
28#include <linux/of_device.h> 28#include <linux/of_device.h>
29#include <linux/of_irq.h> 29#include <linux/of_irq.h>
30#include <linux/of_mdio.h> 30#include <linux/of_mdio.h>
31#include <linux/platform_data/mv88e6xxx.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
32#include <linux/gpio/consumer.h> 33#include <linux/gpio/consumer.h>
33#include <linux/phy.h> 34#include <linux/phy.h>
35#include <linux/phylink.h>
34#include <net/dsa.h> 36#include <net/dsa.h>
35 37
36#include "chip.h" 38#include "chip.h"
@@ -341,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
341 .xlate = irq_domain_xlate_twocell, 343 .xlate = irq_domain_xlate_twocell,
342}; 344};
343 345
346/* To be called with reg_lock held */
344static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) 347static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
345{ 348{
346 int irq, virq; 349 int irq, virq;
@@ -360,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
360 363
361static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) 364static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
362{ 365{
363 mv88e6xxx_g1_irq_free_common(chip); 366 /*
364 367 * free_irq must be called without reg_lock taken because the irq
368 * handler takes this lock, too.
369 */
365 free_irq(chip->irq, chip); 370 free_irq(chip->irq, chip);
371
372 mutex_lock(&chip->reg_lock);
373 mv88e6xxx_g1_irq_free_common(chip);
374 mutex_unlock(&chip->reg_lock);
366} 375}
367 376
368static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) 377static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -425,7 +434,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
425 434
426 err = request_threaded_irq(chip->irq, NULL, 435 err = request_threaded_irq(chip->irq, NULL,
427 mv88e6xxx_g1_irq_thread_fn, 436 mv88e6xxx_g1_irq_thread_fn,
428 IRQF_ONESHOT, 437 IRQF_ONESHOT | IRQF_SHARED,
429 dev_name(chip->dev), chip); 438 dev_name(chip->dev), chip);
430 if (err) 439 if (err)
431 mv88e6xxx_g1_irq_free_common(chip); 440 mv88e6xxx_g1_irq_free_common(chip);
@@ -467,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
467 476
468static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) 477static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
469{ 478{
470 mv88e6xxx_g1_irq_free_common(chip);
471
472 kthread_cancel_delayed_work_sync(&chip->irq_poll_work); 479 kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
473 kthread_destroy_worker(chip->kworker); 480 kthread_destroy_worker(chip->kworker);
481
482 mutex_lock(&chip->reg_lock);
483 mv88e6xxx_g1_irq_free_common(chip);
484 mutex_unlock(&chip->reg_lock);
474} 485}
475 486
476int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) 487int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -513,7 +524,7 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
513} 524}
514 525
515static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, 526static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
516 int link, int speed, int duplex, 527 int link, int speed, int duplex, int pause,
517 phy_interface_t mode) 528 phy_interface_t mode)
518{ 529{
519 int err; 530 int err;
@@ -532,6 +543,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
532 goto restore_link; 543 goto restore_link;
533 } 544 }
534 545
546 if (chip->info->ops->port_set_pause) {
547 err = chip->info->ops->port_set_pause(chip, port, pause);
548 if (err)
549 goto restore_link;
550 }
551
535 if (chip->info->ops->port_set_duplex) { 552 if (chip->info->ops->port_set_duplex) {
536 err = chip->info->ops->port_set_duplex(chip, port, duplex); 553 err = chip->info->ops->port_set_duplex(chip, port, duplex);
537 if (err && err != -EOPNOTSUPP) 554 if (err && err != -EOPNOTSUPP)
@@ -558,6 +575,13 @@ restore_link:
558 return err; 575 return err;
559} 576}
560 577
578static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
579{
580 struct mv88e6xxx_chip *chip = ds->priv;
581
582 return port < chip->info->num_internal_phys;
583}
584
561/* We expect the switch to perform auto negotiation if there is a real 585/* We expect the switch to perform auto negotiation if there is a real
562 * phy. However, in the case of a fixed link phy, we force the port 586 * phy. However, in the case of a fixed link phy, we force the port
563 * settings from the fixed link settings. 587 * settings from the fixed link settings.
@@ -568,18 +592,187 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
568 struct mv88e6xxx_chip *chip = ds->priv; 592 struct mv88e6xxx_chip *chip = ds->priv;
569 int err; 593 int err;
570 594
571 if (!phy_is_pseudo_fixed_link(phydev)) 595 if (!phy_is_pseudo_fixed_link(phydev) &&
596 mv88e6xxx_phy_is_internal(ds, port))
572 return; 597 return;
573 598
574 mutex_lock(&chip->reg_lock); 599 mutex_lock(&chip->reg_lock);
575 err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed, 600 err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
576 phydev->duplex, phydev->interface); 601 phydev->duplex, phydev->pause,
602 phydev->interface);
603 mutex_unlock(&chip->reg_lock);
604
605 if (err && err != -EOPNOTSUPP)
606 dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
607}
608
609static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
610 unsigned long *mask,
611 struct phylink_link_state *state)
612{
613 if (!phy_interface_mode_is_8023z(state->interface)) {
614 /* 10M and 100M are only supported in non-802.3z mode */
615 phylink_set(mask, 10baseT_Half);
616 phylink_set(mask, 10baseT_Full);
617 phylink_set(mask, 100baseT_Half);
618 phylink_set(mask, 100baseT_Full);
619 }
620}
621
622static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
623 unsigned long *mask,
624 struct phylink_link_state *state)
625{
626 /* FIXME: if the port is in 1000Base-X mode, then it only supports
627 * 1000M FD speeds. In this case, CMODE will indicate 5.
628 */
629 phylink_set(mask, 1000baseT_Full);
630 phylink_set(mask, 1000baseX_Full);
631
632 mv88e6065_phylink_validate(chip, port, mask, state);
633}
634
635static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
636 unsigned long *mask,
637 struct phylink_link_state *state)
638{
639 /* No ethtool bits for 200Mbps */
640 phylink_set(mask, 1000baseT_Full);
641 phylink_set(mask, 1000baseX_Full);
642
643 mv88e6065_phylink_validate(chip, port, mask, state);
644}
645
646static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
647 unsigned long *mask,
648 struct phylink_link_state *state)
649{
650 if (port >= 9)
651 phylink_set(mask, 2500baseX_Full);
652
653 /* No ethtool bits for 200Mbps */
654 phylink_set(mask, 1000baseT_Full);
655 phylink_set(mask, 1000baseX_Full);
656
657 mv88e6065_phylink_validate(chip, port, mask, state);
658}
659
660static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
661 unsigned long *mask,
662 struct phylink_link_state *state)
663{
664 if (port >= 9) {
665 phylink_set(mask, 10000baseT_Full);
666 phylink_set(mask, 10000baseKR_Full);
667 }
668
669 mv88e6390_phylink_validate(chip, port, mask, state);
670}
671
672static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
673 unsigned long *supported,
674 struct phylink_link_state *state)
675{
676 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
677 struct mv88e6xxx_chip *chip = ds->priv;
678
679 /* Allow all the expected bits */
680 phylink_set(mask, Autoneg);
681 phylink_set(mask, Pause);
682 phylink_set_port_modes(mask);
683
684 if (chip->info->ops->phylink_validate)
685 chip->info->ops->phylink_validate(chip, port, mask, state);
686
687 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
688 bitmap_and(state->advertising, state->advertising, mask,
689 __ETHTOOL_LINK_MODE_MASK_NBITS);
690
691 /* We can only operate at 2500BaseX or 1000BaseX. If requested
692 * to advertise both, only report advertising at 2500BaseX.
693 */
694 phylink_helper_basex_speed(state);
695}
696
697static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
698 struct phylink_link_state *state)
699{
700 struct mv88e6xxx_chip *chip = ds->priv;
701 int err;
702
703 mutex_lock(&chip->reg_lock);
704 if (chip->info->ops->port_link_state)
705 err = chip->info->ops->port_link_state(chip, port, state);
706 else
707 err = -EOPNOTSUPP;
708 mutex_unlock(&chip->reg_lock);
709
710 return err;
711}
712
713static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
714 unsigned int mode,
715 const struct phylink_link_state *state)
716{
717 struct mv88e6xxx_chip *chip = ds->priv;
718 int speed, duplex, link, pause, err;
719
720 if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
721 return;
722
723 if (mode == MLO_AN_FIXED) {
724 link = LINK_FORCED_UP;
725 speed = state->speed;
726 duplex = state->duplex;
727 } else if (!mv88e6xxx_phy_is_internal(ds, port)) {
728 link = state->link;
729 speed = state->speed;
730 duplex = state->duplex;
731 } else {
732 speed = SPEED_UNFORCED;
733 duplex = DUPLEX_UNFORCED;
734 link = LINK_UNFORCED;
735 }
736 pause = !!phylink_test(state->advertising, Pause);
737
738 mutex_lock(&chip->reg_lock);
739 err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
740 state->interface);
577 mutex_unlock(&chip->reg_lock); 741 mutex_unlock(&chip->reg_lock);
578 742
579 if (err && err != -EOPNOTSUPP) 743 if (err && err != -EOPNOTSUPP)
580 dev_err(ds->dev, "p%d: failed to configure MAC\n", port); 744 dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
581} 745}
582 746
747static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link)
748{
749 struct mv88e6xxx_chip *chip = ds->priv;
750 int err;
751
752 mutex_lock(&chip->reg_lock);
753 err = chip->info->ops->port_set_link(chip, port, link);
754 mutex_unlock(&chip->reg_lock);
755
756 if (err)
757 dev_err(chip->dev, "p%d: failed to force MAC link\n", port);
758}
759
760static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
761 unsigned int mode,
762 phy_interface_t interface)
763{
764 if (mode == MLO_AN_FIXED)
765 mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN);
766}
767
768static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
769 unsigned int mode, phy_interface_t interface,
770 struct phy_device *phydev)
771{
772 if (mode == MLO_AN_FIXED)
773 mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP);
774}
775
583static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) 776static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
584{ 777{
585 if (!chip->info->ops->stats_snapshot) 778 if (!chip->info->ops->stats_snapshot)
@@ -665,13 +858,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
665 case STATS_TYPE_PORT: 858 case STATS_TYPE_PORT:
666 err = mv88e6xxx_port_read(chip, port, s->reg, &reg); 859 err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
667 if (err) 860 if (err)
668 return UINT64_MAX; 861 return U64_MAX;
669 862
670 low = reg; 863 low = reg;
671 if (s->size == 4) { 864 if (s->size == 4) {
672 err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg); 865 err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
673 if (err) 866 if (err)
674 return UINT64_MAX; 867 return U64_MAX;
675 high = reg; 868 high = reg;
676 } 869 }
677 break; 870 break;
@@ -685,7 +878,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
685 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 878 mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
686 break; 879 break;
687 default: 880 default:
688 return UINT64_MAX; 881 return U64_MAX;
689 } 882 }
690 value = (((u64)high) << 16) | low; 883 value = (((u64)high) << 16) | low;
691 return value; 884 return value;
@@ -742,11 +935,14 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
742} 935}
743 936
744static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, 937static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
745 uint8_t *data) 938 u32 stringset, uint8_t *data)
746{ 939{
747 struct mv88e6xxx_chip *chip = ds->priv; 940 struct mv88e6xxx_chip *chip = ds->priv;
748 int count = 0; 941 int count = 0;
749 942
943 if (stringset != ETH_SS_STATS)
944 return;
945
750 mutex_lock(&chip->reg_lock); 946 mutex_lock(&chip->reg_lock);
751 947
752 if (chip->info->ops->stats_get_strings) 948 if (chip->info->ops->stats_get_strings)
@@ -789,12 +985,15 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
789 STATS_TYPE_BANK1); 985 STATS_TYPE_BANK1);
790} 986}
791 987
792static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port) 988static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
793{ 989{
794 struct mv88e6xxx_chip *chip = ds->priv; 990 struct mv88e6xxx_chip *chip = ds->priv;
795 int serdes_count = 0; 991 int serdes_count = 0;
796 int count = 0; 992 int count = 0;
797 993
994 if (sset != ETH_SS_STATS)
995 return 0;
996
798 mutex_lock(&chip->reg_lock); 997 mutex_lock(&chip->reg_lock);
799 if (chip->info->ops->stats_get_sset_count) 998 if (chip->info->ops->stats_get_sset_count)
800 count = chip->info->ops->stats_get_sset_count(chip); 999 count = chip->info->ops->stats_get_sset_count(chip);
@@ -911,14 +1110,6 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
911 1110
912} 1111}
913 1112
914static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip)
915{
916 if (chip->info->ops->stats_set_histogram)
917 return chip->info->ops->stats_set_histogram(chip);
918
919 return 0;
920}
921
922static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) 1113static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
923{ 1114{
924 return 32 * sizeof(u16); 1115 return 32 * sizeof(u16);
@@ -933,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
933 u16 *p = _p; 1124 u16 *p = _p;
934 int i; 1125 int i;
935 1126
936 regs->version = 0; 1127 regs->version = chip->info->prod_num;
937 1128
938 memset(p, 0xff, 32 * sizeof(u16)); 1129 memset(p, 0xff, 32 * sizeof(u16));
939 1130
@@ -1020,6 +1211,76 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1020 dev_err(ds->dev, "p%d: failed to update state\n", port); 1211 dev_err(ds->dev, "p%d: failed to update state\n", port);
1021} 1212}
1022 1213
1214static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
1215{
1216 int err;
1217
1218 if (chip->info->ops->ieee_pri_map) {
1219 err = chip->info->ops->ieee_pri_map(chip);
1220 if (err)
1221 return err;
1222 }
1223
1224 if (chip->info->ops->ip_pri_map) {
1225 err = chip->info->ops->ip_pri_map(chip);
1226 if (err)
1227 return err;
1228 }
1229
1230 return 0;
1231}
1232
1233static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
1234{
1235 int target, port;
1236 int err;
1237
1238 if (!chip->info->global2_addr)
1239 return 0;
1240
1241 /* Initialize the routing port to the 32 possible target devices */
1242 for (target = 0; target < 32; target++) {
1243 port = 0x1f;
1244 if (target < DSA_MAX_SWITCHES)
1245 if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
1246 port = chip->ds->rtable[target];
1247
1248 err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
1249 if (err)
1250 return err;
1251 }
1252
1253 if (chip->info->ops->set_cascade_port) {
1254 port = MV88E6XXX_CASCADE_PORT_MULTIPLE;
1255 err = chip->info->ops->set_cascade_port(chip, port);
1256 if (err)
1257 return err;
1258 }
1259
1260 err = mv88e6xxx_g1_set_device_number(chip, chip->ds->index);
1261 if (err)
1262 return err;
1263
1264 return 0;
1265}
1266
1267static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip)
1268{
1269 /* Clear all trunk masks and mapping */
1270 if (chip->info->global2_addr)
1271 return mv88e6xxx_g2_trunk_clear(chip);
1272
1273 return 0;
1274}
1275
1276static int mv88e6xxx_rmu_setup(struct mv88e6xxx_chip *chip)
1277{
1278 if (chip->info->ops->rmu_disable)
1279 return chip->info->ops->rmu_disable(chip);
1280
1281 return 0;
1282}
1283
1023static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) 1284static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip)
1024{ 1285{
1025 if (chip->info->ops->pot_clear) 1286 if (chip->info->ops->pot_clear)
@@ -1924,6 +2185,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
1924 int err; 2185 int err;
1925 u16 reg; 2186 u16 reg;
1926 2187
2188 chip->ports[port].chip = chip;
2189 chip->ports[port].port = port;
2190
1927 /* MAC Forcing register: don't force link, speed, duplex or flow control 2191 /* MAC Forcing register: don't force link, speed, duplex or flow control
1928 * state to any particular values on physical ports, but force the CPU 2192 * state to any particular values on physical ports, but force the CPU
1929 * port and all DSA ports to their maximum bandwidth and full duplex. 2193 * port and all DSA ports to their maximum bandwidth and full duplex.
@@ -1931,10 +2195,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
1931 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2195 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
1932 err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP, 2196 err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
1933 SPEED_MAX, DUPLEX_FULL, 2197 SPEED_MAX, DUPLEX_FULL,
2198 PAUSE_OFF,
1934 PHY_INTERFACE_MODE_NA); 2199 PHY_INTERFACE_MODE_NA);
1935 else 2200 else
1936 err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED, 2201 err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
1937 SPEED_UNFORCED, DUPLEX_UNFORCED, 2202 SPEED_UNFORCED, DUPLEX_UNFORCED,
2203 PAUSE_ON,
1938 PHY_INTERFACE_MODE_NA); 2204 PHY_INTERFACE_MODE_NA);
1939 if (err) 2205 if (err)
1940 return err; 2206 return err;
@@ -2083,7 +2349,12 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
2083 int err; 2349 int err;
2084 2350
2085 mutex_lock(&chip->reg_lock); 2351 mutex_lock(&chip->reg_lock);
2352
2086 err = mv88e6xxx_serdes_power(chip, port, true); 2353 err = mv88e6xxx_serdes_power(chip, port, true);
2354
2355 if (!err && chip->info->ops->serdes_irq_setup)
2356 err = chip->info->ops->serdes_irq_setup(chip, port);
2357
2087 mutex_unlock(&chip->reg_lock); 2358 mutex_unlock(&chip->reg_lock);
2088 2359
2089 return err; 2360 return err;
@@ -2095,8 +2366,13 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port,
2095 struct mv88e6xxx_chip *chip = ds->priv; 2366 struct mv88e6xxx_chip *chip = ds->priv;
2096 2367
2097 mutex_lock(&chip->reg_lock); 2368 mutex_lock(&chip->reg_lock);
2369
2370 if (chip->info->ops->serdes_irq_free)
2371 chip->info->ops->serdes_irq_free(chip, port);
2372
2098 if (mv88e6xxx_serdes_power(chip, port, false)) 2373 if (mv88e6xxx_serdes_power(chip, port, false))
2099 dev_err(chip->dev, "failed to power off SERDES\n"); 2374 dev_err(chip->dev, "failed to power off SERDES\n");
2375
2100 mutex_unlock(&chip->reg_lock); 2376 mutex_unlock(&chip->reg_lock);
2101} 2377}
2102 2378
@@ -2113,53 +2389,16 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
2113 return err; 2389 return err;
2114} 2390}
2115 2391
2116static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) 2392static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
2117{ 2393{
2118 struct dsa_switch *ds = chip->ds;
2119 int err; 2394 int err;
2120 2395
2121 /* Disable remote management, and set the switch's DSA device number. */
2122 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
2123 MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
2124 (ds->index & 0x1f));
2125 if (err)
2126 return err;
2127
2128 /* Configure the IP ToS mapping registers. */
2129 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
2130 if (err)
2131 return err;
2132 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
2133 if (err)
2134 return err;
2135 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
2136 if (err)
2137 return err;
2138 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
2139 if (err)
2140 return err;
2141 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
2142 if (err)
2143 return err;
2144 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
2145 if (err)
2146 return err;
2147 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
2148 if (err)
2149 return err;
2150 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
2151 if (err)
2152 return err;
2153
2154 /* Configure the IEEE 802.1p priority mapping register. */
2155 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
2156 if (err)
2157 return err;
2158
2159 /* Initialize the statistics unit */ 2396 /* Initialize the statistics unit */
2160 err = mv88e6xxx_stats_set_histogram(chip); 2397 if (chip->info->ops->stats_set_histogram) {
2161 if (err) 2398 err = chip->info->ops->stats_set_histogram(chip);
2162 return err; 2399 if (err)
2400 return err;
2401 }
2163 2402
2164 return mv88e6xxx_g1_stats_clear(chip); 2403 return mv88e6xxx_g1_stats_clear(chip);
2165} 2404}
@@ -2167,6 +2406,7 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
2167static int mv88e6xxx_setup(struct dsa_switch *ds) 2406static int mv88e6xxx_setup(struct dsa_switch *ds)
2168{ 2407{
2169 struct mv88e6xxx_chip *chip = ds->priv; 2408 struct mv88e6xxx_chip *chip = ds->priv;
2409 u8 cmode;
2170 int err; 2410 int err;
2171 int i; 2411 int i;
2172 2412
@@ -2175,6 +2415,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2175 2415
2176 mutex_lock(&chip->reg_lock); 2416 mutex_lock(&chip->reg_lock);
2177 2417
2418 /* Cache the cmode of each port. */
2419 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2420 if (chip->info->ops->port_get_cmode) {
2421 err = chip->info->ops->port_get_cmode(chip, i, &cmode);
2422 if (err)
2423 goto unlock;
2424
2425 chip->ports[i].cmode = cmode;
2426 }
2427 }
2428
2178 /* Setup Switch Port Registers */ 2429 /* Setup Switch Port Registers */
2179 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { 2430 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2180 if (dsa_is_unused_port(ds, i)) 2431 if (dsa_is_unused_port(ds, i))
@@ -2185,18 +2436,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2185 goto unlock; 2436 goto unlock;
2186 } 2437 }
2187 2438
2188 /* Setup Switch Global 1 Registers */
2189 err = mv88e6xxx_g1_setup(chip);
2190 if (err)
2191 goto unlock;
2192
2193 /* Setup Switch Global 2 Registers */
2194 if (chip->info->global2_addr) {
2195 err = mv88e6xxx_g2_setup(chip);
2196 if (err)
2197 goto unlock;
2198 }
2199
2200 err = mv88e6xxx_irl_setup(chip); 2439 err = mv88e6xxx_irl_setup(chip);
2201 if (err) 2440 if (err)
2202 goto unlock; 2441 goto unlock;
@@ -2229,10 +2468,26 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2229 if (err) 2468 if (err)
2230 goto unlock; 2469 goto unlock;
2231 2470
2471 err = mv88e6xxx_rmu_setup(chip);
2472 if (err)
2473 goto unlock;
2474
2232 err = mv88e6xxx_rsvd2cpu_setup(chip); 2475 err = mv88e6xxx_rsvd2cpu_setup(chip);
2233 if (err) 2476 if (err)
2234 goto unlock; 2477 goto unlock;
2235 2478
2479 err = mv88e6xxx_trunk_setup(chip);
2480 if (err)
2481 goto unlock;
2482
2483 err = mv88e6xxx_devmap_setup(chip);
2484 if (err)
2485 goto unlock;
2486
2487 err = mv88e6xxx_pri_setup(chip);
2488 if (err)
2489 goto unlock;
2490
2236 /* Setup PTP Hardware Clock and timestamping */ 2491 /* Setup PTP Hardware Clock and timestamping */
2237 if (chip->info->ptp_support) { 2492 if (chip->info->ptp_support) {
2238 err = mv88e6xxx_ptp_setup(chip); 2493 err = mv88e6xxx_ptp_setup(chip);
@@ -2244,6 +2499,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2244 goto unlock; 2499 goto unlock;
2245 } 2500 }
2246 2501
2502 err = mv88e6xxx_stats_setup(chip);
2503 if (err)
2504 goto unlock;
2505
2247unlock: 2506unlock:
2248 mutex_unlock(&chip->reg_lock); 2507 mutex_unlock(&chip->reg_lock);
2249 2508
@@ -2337,10 +2596,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
2337 return err; 2596 return err;
2338 } 2597 }
2339 2598
2340 if (np) 2599 err = of_mdiobus_register(bus, np);
2341 err = of_mdiobus_register(bus, np);
2342 else
2343 err = mdiobus_register(bus);
2344 if (err) { 2600 if (err) {
2345 dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); 2601 dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
2346 mv88e6xxx_g2_irq_mdio_free(chip, bus); 2602 mv88e6xxx_g2_irq_mdio_free(chip, bus);
@@ -2460,6 +2716,8 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
2460 2716
2461static const struct mv88e6xxx_ops mv88e6085_ops = { 2717static const struct mv88e6xxx_ops mv88e6085_ops = {
2462 /* MV88E6XXX_FAMILY_6097 */ 2718 /* MV88E6XXX_FAMILY_6097 */
2719 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2720 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2463 .irl_init_all = mv88e6352_g2_irl_init_all, 2721 .irl_init_all = mv88e6352_g2_irl_init_all,
2464 .set_switch_mac = mv88e6xxx_g1_set_switch_mac, 2722 .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
2465 .phy_read = mv88e6185_phy_ppu_read, 2723 .phy_read = mv88e6185_phy_ppu_read,
@@ -2475,6 +2733,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
2475 .port_pause_limit = mv88e6097_port_pause_limit, 2733 .port_pause_limit = mv88e6097_port_pause_limit,
2476 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 2734 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2477 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 2735 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
2736 .port_link_state = mv88e6352_port_link_state,
2737 .port_get_cmode = mv88e6185_port_get_cmode,
2478 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2738 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
2479 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2739 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2480 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2740 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2488,12 +2748,16 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
2488 .ppu_enable = mv88e6185_g1_ppu_enable, 2748 .ppu_enable = mv88e6185_g1_ppu_enable,
2489 .ppu_disable = mv88e6185_g1_ppu_disable, 2749 .ppu_disable = mv88e6185_g1_ppu_disable,
2490 .reset = mv88e6185_g1_reset, 2750 .reset = mv88e6185_g1_reset,
2751 .rmu_disable = mv88e6085_g1_rmu_disable,
2491 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2752 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2492 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2753 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2754 .phylink_validate = mv88e6185_phylink_validate,
2493}; 2755};
2494 2756
2495static const struct mv88e6xxx_ops mv88e6095_ops = { 2757static const struct mv88e6xxx_ops mv88e6095_ops = {
2496 /* MV88E6XXX_FAMILY_6095 */ 2758 /* MV88E6XXX_FAMILY_6095 */
2759 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2760 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2497 .set_switch_mac = mv88e6xxx_g1_set_switch_mac, 2761 .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
2498 .phy_read = mv88e6185_phy_ppu_read, 2762 .phy_read = mv88e6185_phy_ppu_read,
2499 .phy_write = mv88e6185_phy_ppu_write, 2763 .phy_write = mv88e6185_phy_ppu_write,
@@ -2503,6 +2767,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
2503 .port_set_frame_mode = mv88e6085_port_set_frame_mode, 2767 .port_set_frame_mode = mv88e6085_port_set_frame_mode,
2504 .port_set_egress_floods = mv88e6185_port_set_egress_floods, 2768 .port_set_egress_floods = mv88e6185_port_set_egress_floods,
2505 .port_set_upstream_port = mv88e6095_port_set_upstream_port, 2769 .port_set_upstream_port = mv88e6095_port_set_upstream_port,
2770 .port_link_state = mv88e6185_port_link_state,
2771 .port_get_cmode = mv88e6185_port_get_cmode,
2506 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2772 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
2507 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2773 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2508 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2774 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2514,10 +2780,13 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
2514 .reset = mv88e6185_g1_reset, 2780 .reset = mv88e6185_g1_reset,
2515 .vtu_getnext = mv88e6185_g1_vtu_getnext, 2781 .vtu_getnext = mv88e6185_g1_vtu_getnext,
2516 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, 2782 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
2783 .phylink_validate = mv88e6185_phylink_validate,
2517}; 2784};
2518 2785
2519static const struct mv88e6xxx_ops mv88e6097_ops = { 2786static const struct mv88e6xxx_ops mv88e6097_ops = {
2520 /* MV88E6XXX_FAMILY_6097 */ 2787 /* MV88E6XXX_FAMILY_6097 */
2788 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2789 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2521 .irl_init_all = mv88e6352_g2_irl_init_all, 2790 .irl_init_all = mv88e6352_g2_irl_init_all,
2522 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 2791 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
2523 .phy_read = mv88e6xxx_g2_smi_phy_read, 2792 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2534,6 +2803,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
2534 .port_pause_limit = mv88e6097_port_pause_limit, 2803 .port_pause_limit = mv88e6097_port_pause_limit,
2535 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 2804 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2536 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 2805 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
2806 .port_link_state = mv88e6352_port_link_state,
2807 .port_get_cmode = mv88e6185_port_get_cmode,
2537 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2808 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
2538 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2809 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2539 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2810 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2545,12 +2816,16 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
2545 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, 2816 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
2546 .pot_clear = mv88e6xxx_g2_pot_clear, 2817 .pot_clear = mv88e6xxx_g2_pot_clear,
2547 .reset = mv88e6352_g1_reset, 2818 .reset = mv88e6352_g1_reset,
2819 .rmu_disable = mv88e6085_g1_rmu_disable,
2548 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2820 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2549 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2821 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2822 .phylink_validate = mv88e6185_phylink_validate,
2550}; 2823};
2551 2824
2552static const struct mv88e6xxx_ops mv88e6123_ops = { 2825static const struct mv88e6xxx_ops mv88e6123_ops = {
2553 /* MV88E6XXX_FAMILY_6165 */ 2826 /* MV88E6XXX_FAMILY_6165 */
2827 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2828 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2554 .irl_init_all = mv88e6352_g2_irl_init_all, 2829 .irl_init_all = mv88e6352_g2_irl_init_all,
2555 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 2830 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
2556 .phy_read = mv88e6xxx_g2_smi_phy_read, 2831 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2562,6 +2837,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
2562 .port_set_egress_floods = mv88e6352_port_set_egress_floods, 2837 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
2563 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 2838 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2564 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 2839 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
2840 .port_link_state = mv88e6352_port_link_state,
2841 .port_get_cmode = mv88e6185_port_get_cmode,
2565 .stats_snapshot = mv88e6320_g1_stats_snapshot, 2842 .stats_snapshot = mv88e6320_g1_stats_snapshot,
2566 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2843 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2567 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2844 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2575,10 +2852,13 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
2575 .reset = mv88e6352_g1_reset, 2852 .reset = mv88e6352_g1_reset,
2576 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2853 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2577 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2854 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2855 .phylink_validate = mv88e6185_phylink_validate,
2578}; 2856};
2579 2857
2580static const struct mv88e6xxx_ops mv88e6131_ops = { 2858static const struct mv88e6xxx_ops mv88e6131_ops = {
2581 /* MV88E6XXX_FAMILY_6185 */ 2859 /* MV88E6XXX_FAMILY_6185 */
2860 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2861 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2582 .set_switch_mac = mv88e6xxx_g1_set_switch_mac, 2862 .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
2583 .phy_read = mv88e6185_phy_ppu_read, 2863 .phy_read = mv88e6185_phy_ppu_read,
2584 .phy_write = mv88e6185_phy_ppu_write, 2864 .phy_write = mv88e6185_phy_ppu_write,
@@ -2593,6 +2873,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
2593 .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, 2873 .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
2594 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 2874 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
2595 .port_pause_limit = mv88e6097_port_pause_limit, 2875 .port_pause_limit = mv88e6097_port_pause_limit,
2876 .port_set_pause = mv88e6185_port_set_pause,
2877 .port_link_state = mv88e6352_port_link_state,
2878 .port_get_cmode = mv88e6185_port_get_cmode,
2596 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2879 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
2597 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2880 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2598 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2881 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2603,14 +2886,18 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
2603 .watchdog_ops = &mv88e6097_watchdog_ops, 2886 .watchdog_ops = &mv88e6097_watchdog_ops,
2604 .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, 2887 .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
2605 .ppu_enable = mv88e6185_g1_ppu_enable, 2888 .ppu_enable = mv88e6185_g1_ppu_enable,
2889 .set_cascade_port = mv88e6185_g1_set_cascade_port,
2606 .ppu_disable = mv88e6185_g1_ppu_disable, 2890 .ppu_disable = mv88e6185_g1_ppu_disable,
2607 .reset = mv88e6185_g1_reset, 2891 .reset = mv88e6185_g1_reset,
2608 .vtu_getnext = mv88e6185_g1_vtu_getnext, 2892 .vtu_getnext = mv88e6185_g1_vtu_getnext,
2609 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, 2893 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
2894 .phylink_validate = mv88e6185_phylink_validate,
2610}; 2895};
2611 2896
2612static const struct mv88e6xxx_ops mv88e6141_ops = { 2897static const struct mv88e6xxx_ops mv88e6141_ops = {
2613 /* MV88E6XXX_FAMILY_6341 */ 2898 /* MV88E6XXX_FAMILY_6341 */
2899 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2900 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2614 .irl_init_all = mv88e6352_g2_irl_init_all, 2901 .irl_init_all = mv88e6352_g2_irl_init_all,
2615 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 2902 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2616 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 2903 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -2620,7 +2907,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
2620 .port_set_link = mv88e6xxx_port_set_link, 2907 .port_set_link = mv88e6xxx_port_set_link,
2621 .port_set_duplex = mv88e6xxx_port_set_duplex, 2908 .port_set_duplex = mv88e6xxx_port_set_duplex,
2622 .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, 2909 .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
2623 .port_set_speed = mv88e6390_port_set_speed, 2910 .port_set_speed = mv88e6341_port_set_speed,
2624 .port_tag_remap = mv88e6095_port_tag_remap, 2911 .port_tag_remap = mv88e6095_port_tag_remap,
2625 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 2912 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
2626 .port_set_egress_floods = mv88e6352_port_set_egress_floods, 2913 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -2630,6 +2917,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
2630 .port_pause_limit = mv88e6097_port_pause_limit, 2917 .port_pause_limit = mv88e6097_port_pause_limit,
2631 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 2918 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2632 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 2919 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
2920 .port_link_state = mv88e6352_port_link_state,
2921 .port_get_cmode = mv88e6352_port_get_cmode,
2633 .stats_snapshot = mv88e6390_g1_stats_snapshot, 2922 .stats_snapshot = mv88e6390_g1_stats_snapshot,
2634 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2923 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2635 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 2924 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2643,11 +2932,15 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
2643 .reset = mv88e6352_g1_reset, 2932 .reset = mv88e6352_g1_reset,
2644 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2933 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2645 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2934 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2935 .serdes_power = mv88e6341_serdes_power,
2646 .gpio_ops = &mv88e6352_gpio_ops, 2936 .gpio_ops = &mv88e6352_gpio_ops,
2937 .phylink_validate = mv88e6390_phylink_validate,
2647}; 2938};
2648 2939
2649static const struct mv88e6xxx_ops mv88e6161_ops = { 2940static const struct mv88e6xxx_ops mv88e6161_ops = {
2650 /* MV88E6XXX_FAMILY_6165 */ 2941 /* MV88E6XXX_FAMILY_6165 */
2942 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2943 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2651 .irl_init_all = mv88e6352_g2_irl_init_all, 2944 .irl_init_all = mv88e6352_g2_irl_init_all,
2652 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 2945 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
2653 .phy_read = mv88e6xxx_g2_smi_phy_read, 2946 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2664,6 +2957,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
2664 .port_pause_limit = mv88e6097_port_pause_limit, 2957 .port_pause_limit = mv88e6097_port_pause_limit,
2665 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 2958 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2666 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 2959 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
2960 .port_link_state = mv88e6352_port_link_state,
2961 .port_get_cmode = mv88e6185_port_get_cmode,
2667 .stats_snapshot = mv88e6320_g1_stats_snapshot, 2962 .stats_snapshot = mv88e6320_g1_stats_snapshot,
2668 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2963 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2669 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2964 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2677,10 +2972,15 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
2677 .reset = mv88e6352_g1_reset, 2972 .reset = mv88e6352_g1_reset,
2678 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2973 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2679 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2974 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2975 .avb_ops = &mv88e6165_avb_ops,
2976 .ptp_ops = &mv88e6165_ptp_ops,
2977 .phylink_validate = mv88e6185_phylink_validate,
2680}; 2978};
2681 2979
2682static const struct mv88e6xxx_ops mv88e6165_ops = { 2980static const struct mv88e6xxx_ops mv88e6165_ops = {
2683 /* MV88E6XXX_FAMILY_6165 */ 2981 /* MV88E6XXX_FAMILY_6165 */
2982 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
2983 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2684 .irl_init_all = mv88e6352_g2_irl_init_all, 2984 .irl_init_all = mv88e6352_g2_irl_init_all,
2685 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 2985 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
2686 .phy_read = mv88e6165_phy_read, 2986 .phy_read = mv88e6165_phy_read,
@@ -2690,6 +2990,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
2690 .port_set_speed = mv88e6185_port_set_speed, 2990 .port_set_speed = mv88e6185_port_set_speed,
2691 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 2991 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2692 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 2992 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
2993 .port_link_state = mv88e6352_port_link_state,
2994 .port_get_cmode = mv88e6185_port_get_cmode,
2693 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 2995 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
2694 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 2996 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2695 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 2997 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2703,10 +3005,15 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
2703 .reset = mv88e6352_g1_reset, 3005 .reset = mv88e6352_g1_reset,
2704 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3006 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2705 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3007 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3008 .avb_ops = &mv88e6165_avb_ops,
3009 .ptp_ops = &mv88e6165_ptp_ops,
3010 .phylink_validate = mv88e6185_phylink_validate,
2706}; 3011};
2707 3012
2708static const struct mv88e6xxx_ops mv88e6171_ops = { 3013static const struct mv88e6xxx_ops mv88e6171_ops = {
2709 /* MV88E6XXX_FAMILY_6351 */ 3014 /* MV88E6XXX_FAMILY_6351 */
3015 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3016 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2710 .irl_init_all = mv88e6352_g2_irl_init_all, 3017 .irl_init_all = mv88e6352_g2_irl_init_all,
2711 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 3018 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
2712 .phy_read = mv88e6xxx_g2_smi_phy_read, 3019 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2724,6 +3031,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
2724 .port_pause_limit = mv88e6097_port_pause_limit, 3031 .port_pause_limit = mv88e6097_port_pause_limit,
2725 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3032 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2726 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3033 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3034 .port_link_state = mv88e6352_port_link_state,
3035 .port_get_cmode = mv88e6352_port_get_cmode,
2727 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3036 .stats_snapshot = mv88e6320_g1_stats_snapshot,
2728 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3037 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2729 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3038 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2737,10 +3046,13 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
2737 .reset = mv88e6352_g1_reset, 3046 .reset = mv88e6352_g1_reset,
2738 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3047 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2739 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3048 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3049 .phylink_validate = mv88e6185_phylink_validate,
2740}; 3050};
2741 3051
2742static const struct mv88e6xxx_ops mv88e6172_ops = { 3052static const struct mv88e6xxx_ops mv88e6172_ops = {
2743 /* MV88E6XXX_FAMILY_6352 */ 3053 /* MV88E6XXX_FAMILY_6352 */
3054 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3055 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2744 .irl_init_all = mv88e6352_g2_irl_init_all, 3056 .irl_init_all = mv88e6352_g2_irl_init_all,
2745 .get_eeprom = mv88e6xxx_g2_get_eeprom16, 3057 .get_eeprom = mv88e6xxx_g2_get_eeprom16,
2746 .set_eeprom = mv88e6xxx_g2_set_eeprom16, 3058 .set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -2760,6 +3072,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
2760 .port_pause_limit = mv88e6097_port_pause_limit, 3072 .port_pause_limit = mv88e6097_port_pause_limit,
2761 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3073 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2762 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3074 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3075 .port_link_state = mv88e6352_port_link_state,
3076 .port_get_cmode = mv88e6352_port_get_cmode,
2763 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3077 .stats_snapshot = mv88e6320_g1_stats_snapshot,
2764 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3078 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2765 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3079 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2771,14 +3085,18 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
2771 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, 3085 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
2772 .pot_clear = mv88e6xxx_g2_pot_clear, 3086 .pot_clear = mv88e6xxx_g2_pot_clear,
2773 .reset = mv88e6352_g1_reset, 3087 .reset = mv88e6352_g1_reset,
3088 .rmu_disable = mv88e6352_g1_rmu_disable,
2774 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3089 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2775 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3090 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2776 .serdes_power = mv88e6352_serdes_power, 3091 .serdes_power = mv88e6352_serdes_power,
2777 .gpio_ops = &mv88e6352_gpio_ops, 3092 .gpio_ops = &mv88e6352_gpio_ops,
3093 .phylink_validate = mv88e6352_phylink_validate,
2778}; 3094};
2779 3095
2780static const struct mv88e6xxx_ops mv88e6175_ops = { 3096static const struct mv88e6xxx_ops mv88e6175_ops = {
2781 /* MV88E6XXX_FAMILY_6351 */ 3097 /* MV88E6XXX_FAMILY_6351 */
3098 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3099 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2782 .irl_init_all = mv88e6352_g2_irl_init_all, 3100 .irl_init_all = mv88e6352_g2_irl_init_all,
2783 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 3101 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
2784 .phy_read = mv88e6xxx_g2_smi_phy_read, 3102 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2796,6 +3114,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
2796 .port_pause_limit = mv88e6097_port_pause_limit, 3114 .port_pause_limit = mv88e6097_port_pause_limit,
2797 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3115 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2798 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3116 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3117 .port_link_state = mv88e6352_port_link_state,
3118 .port_get_cmode = mv88e6352_port_get_cmode,
2799 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3119 .stats_snapshot = mv88e6320_g1_stats_snapshot,
2800 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3120 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2801 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3121 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2809,10 +3129,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
2809 .reset = mv88e6352_g1_reset, 3129 .reset = mv88e6352_g1_reset,
2810 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3130 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2811 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3131 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3132 .phylink_validate = mv88e6185_phylink_validate,
2812}; 3133};
2813 3134
2814static const struct mv88e6xxx_ops mv88e6176_ops = { 3135static const struct mv88e6xxx_ops mv88e6176_ops = {
2815 /* MV88E6XXX_FAMILY_6352 */ 3136 /* MV88E6XXX_FAMILY_6352 */
3137 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3138 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2816 .irl_init_all = mv88e6352_g2_irl_init_all, 3139 .irl_init_all = mv88e6352_g2_irl_init_all,
2817 .get_eeprom = mv88e6xxx_g2_get_eeprom16, 3140 .get_eeprom = mv88e6xxx_g2_get_eeprom16,
2818 .set_eeprom = mv88e6xxx_g2_set_eeprom16, 3141 .set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -2832,6 +3155,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
2832 .port_pause_limit = mv88e6097_port_pause_limit, 3155 .port_pause_limit = mv88e6097_port_pause_limit,
2833 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3156 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2834 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3157 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3158 .port_link_state = mv88e6352_port_link_state,
3159 .port_get_cmode = mv88e6352_port_get_cmode,
2835 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3160 .stats_snapshot = mv88e6320_g1_stats_snapshot,
2836 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3161 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2837 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3162 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2843,14 +3168,20 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
2843 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, 3168 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
2844 .pot_clear = mv88e6xxx_g2_pot_clear, 3169 .pot_clear = mv88e6xxx_g2_pot_clear,
2845 .reset = mv88e6352_g1_reset, 3170 .reset = mv88e6352_g1_reset,
3171 .rmu_disable = mv88e6352_g1_rmu_disable,
2846 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3172 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2847 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3173 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2848 .serdes_power = mv88e6352_serdes_power, 3174 .serdes_power = mv88e6352_serdes_power,
3175 .serdes_irq_setup = mv88e6352_serdes_irq_setup,
3176 .serdes_irq_free = mv88e6352_serdes_irq_free,
2849 .gpio_ops = &mv88e6352_gpio_ops, 3177 .gpio_ops = &mv88e6352_gpio_ops,
3178 .phylink_validate = mv88e6352_phylink_validate,
2850}; 3179};
2851 3180
2852static const struct mv88e6xxx_ops mv88e6185_ops = { 3181static const struct mv88e6xxx_ops mv88e6185_ops = {
2853 /* MV88E6XXX_FAMILY_6185 */ 3182 /* MV88E6XXX_FAMILY_6185 */
3183 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3184 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2854 .set_switch_mac = mv88e6xxx_g1_set_switch_mac, 3185 .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
2855 .phy_read = mv88e6185_phy_ppu_read, 3186 .phy_read = mv88e6185_phy_ppu_read,
2856 .phy_write = mv88e6185_phy_ppu_write, 3187 .phy_write = mv88e6185_phy_ppu_write,
@@ -2861,6 +3192,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
2861 .port_set_egress_floods = mv88e6185_port_set_egress_floods, 3192 .port_set_egress_floods = mv88e6185_port_set_egress_floods,
2862 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, 3193 .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
2863 .port_set_upstream_port = mv88e6095_port_set_upstream_port, 3194 .port_set_upstream_port = mv88e6095_port_set_upstream_port,
3195 .port_set_pause = mv88e6185_port_set_pause,
3196 .port_link_state = mv88e6185_port_link_state,
3197 .port_get_cmode = mv88e6185_port_get_cmode,
2864 .stats_snapshot = mv88e6xxx_g1_stats_snapshot, 3198 .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
2865 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3199 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
2866 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3200 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2870,11 +3204,13 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
2870 .set_egress_port = mv88e6095_g1_set_egress_port, 3204 .set_egress_port = mv88e6095_g1_set_egress_port,
2871 .watchdog_ops = &mv88e6097_watchdog_ops, 3205 .watchdog_ops = &mv88e6097_watchdog_ops,
2872 .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, 3206 .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
3207 .set_cascade_port = mv88e6185_g1_set_cascade_port,
2873 .ppu_enable = mv88e6185_g1_ppu_enable, 3208 .ppu_enable = mv88e6185_g1_ppu_enable,
2874 .ppu_disable = mv88e6185_g1_ppu_disable, 3209 .ppu_disable = mv88e6185_g1_ppu_disable,
2875 .reset = mv88e6185_g1_reset, 3210 .reset = mv88e6185_g1_reset,
2876 .vtu_getnext = mv88e6185_g1_vtu_getnext, 3211 .vtu_getnext = mv88e6185_g1_vtu_getnext,
2877 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, 3212 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
3213 .phylink_validate = mv88e6185_phylink_validate,
2878}; 3214};
2879 3215
2880static const struct mv88e6xxx_ops mv88e6190_ops = { 3216static const struct mv88e6xxx_ops mv88e6190_ops = {
@@ -2896,6 +3232,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
2896 .port_pause_limit = mv88e6390_port_pause_limit, 3232 .port_pause_limit = mv88e6390_port_pause_limit,
2897 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3233 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2898 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3234 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3235 .port_link_state = mv88e6352_port_link_state,
3236 .port_get_cmode = mv88e6352_port_get_cmode,
2899 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3237 .stats_snapshot = mv88e6390_g1_stats_snapshot,
2900 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3238 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
2901 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3239 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2907,10 +3245,14 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
2907 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, 3245 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
2908 .pot_clear = mv88e6xxx_g2_pot_clear, 3246 .pot_clear = mv88e6xxx_g2_pot_clear,
2909 .reset = mv88e6352_g1_reset, 3247 .reset = mv88e6352_g1_reset,
3248 .rmu_disable = mv88e6390_g1_rmu_disable,
2910 .vtu_getnext = mv88e6390_g1_vtu_getnext, 3249 .vtu_getnext = mv88e6390_g1_vtu_getnext,
2911 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, 3250 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
2912 .serdes_power = mv88e6390_serdes_power, 3251 .serdes_power = mv88e6390_serdes_power,
3252 .serdes_irq_setup = mv88e6390_serdes_irq_setup,
3253 .serdes_irq_free = mv88e6390_serdes_irq_free,
2913 .gpio_ops = &mv88e6352_gpio_ops, 3254 .gpio_ops = &mv88e6352_gpio_ops,
3255 .phylink_validate = mv88e6390_phylink_validate,
2914}; 3256};
2915 3257
2916static const struct mv88e6xxx_ops mv88e6190x_ops = { 3258static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -2932,6 +3274,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
2932 .port_pause_limit = mv88e6390_port_pause_limit, 3274 .port_pause_limit = mv88e6390_port_pause_limit,
2933 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3275 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2934 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3276 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3277 .port_link_state = mv88e6352_port_link_state,
3278 .port_get_cmode = mv88e6352_port_get_cmode,
2935 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3279 .stats_snapshot = mv88e6390_g1_stats_snapshot,
2936 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3280 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
2937 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3281 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2943,10 +3287,14 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
2943 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, 3287 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
2944 .pot_clear = mv88e6xxx_g2_pot_clear, 3288 .pot_clear = mv88e6xxx_g2_pot_clear,
2945 .reset = mv88e6352_g1_reset, 3289 .reset = mv88e6352_g1_reset,
3290 .rmu_disable = mv88e6390_g1_rmu_disable,
2946 .vtu_getnext = mv88e6390_g1_vtu_getnext, 3291 .vtu_getnext = mv88e6390_g1_vtu_getnext,
2947 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, 3292 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
2948 .serdes_power = mv88e6390_serdes_power, 3293 .serdes_power = mv88e6390x_serdes_power,
3294 .serdes_irq_setup = mv88e6390_serdes_irq_setup,
3295 .serdes_irq_free = mv88e6390_serdes_irq_free,
2949 .gpio_ops = &mv88e6352_gpio_ops, 3296 .gpio_ops = &mv88e6352_gpio_ops,
3297 .phylink_validate = mv88e6390x_phylink_validate,
2950}; 3298};
2951 3299
2952static const struct mv88e6xxx_ops mv88e6191_ops = { 3300static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -2968,6 +3316,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
2968 .port_pause_limit = mv88e6390_port_pause_limit, 3316 .port_pause_limit = mv88e6390_port_pause_limit,
2969 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3317 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2970 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3318 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3319 .port_link_state = mv88e6352_port_link_state,
3320 .port_get_cmode = mv88e6352_port_get_cmode,
2971 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3321 .stats_snapshot = mv88e6390_g1_stats_snapshot,
2972 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3322 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
2973 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3323 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2979,13 +3329,21 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
2979 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, 3329 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
2980 .pot_clear = mv88e6xxx_g2_pot_clear, 3330 .pot_clear = mv88e6xxx_g2_pot_clear,
2981 .reset = mv88e6352_g1_reset, 3331 .reset = mv88e6352_g1_reset,
3332 .rmu_disable = mv88e6390_g1_rmu_disable,
2982 .vtu_getnext = mv88e6390_g1_vtu_getnext, 3333 .vtu_getnext = mv88e6390_g1_vtu_getnext,
2983 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, 3334 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
2984 .serdes_power = mv88e6390_serdes_power, 3335 .serdes_power = mv88e6390_serdes_power,
3336 .serdes_irq_setup = mv88e6390_serdes_irq_setup,
3337 .serdes_irq_free = mv88e6390_serdes_irq_free,
3338 .avb_ops = &mv88e6390_avb_ops,
3339 .ptp_ops = &mv88e6352_ptp_ops,
3340 .phylink_validate = mv88e6390_phylink_validate,
2985}; 3341};
2986 3342
2987static const struct mv88e6xxx_ops mv88e6240_ops = { 3343static const struct mv88e6xxx_ops mv88e6240_ops = {
2988 /* MV88E6XXX_FAMILY_6352 */ 3344 /* MV88E6XXX_FAMILY_6352 */
3345 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3346 .ip_pri_map = mv88e6085_g1_ip_pri_map,
2989 .irl_init_all = mv88e6352_g2_irl_init_all, 3347 .irl_init_all = mv88e6352_g2_irl_init_all,
2990 .get_eeprom = mv88e6xxx_g2_get_eeprom16, 3348 .get_eeprom = mv88e6xxx_g2_get_eeprom16,
2991 .set_eeprom = mv88e6xxx_g2_set_eeprom16, 3349 .set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3005,6 +3363,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
3005 .port_pause_limit = mv88e6097_port_pause_limit, 3363 .port_pause_limit = mv88e6097_port_pause_limit,
3006 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3364 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3007 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3365 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3366 .port_link_state = mv88e6352_port_link_state,
3367 .port_get_cmode = mv88e6352_port_get_cmode,
3008 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3368 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3009 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3369 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3010 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3370 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3016,11 +3376,16 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
3016 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, 3376 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
3017 .pot_clear = mv88e6xxx_g2_pot_clear, 3377 .pot_clear = mv88e6xxx_g2_pot_clear,
3018 .reset = mv88e6352_g1_reset, 3378 .reset = mv88e6352_g1_reset,
3379 .rmu_disable = mv88e6352_g1_rmu_disable,
3019 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3380 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3020 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3381 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3021 .serdes_power = mv88e6352_serdes_power, 3382 .serdes_power = mv88e6352_serdes_power,
3383 .serdes_irq_setup = mv88e6352_serdes_irq_setup,
3384 .serdes_irq_free = mv88e6352_serdes_irq_free,
3022 .gpio_ops = &mv88e6352_gpio_ops, 3385 .gpio_ops = &mv88e6352_gpio_ops,
3023 .avb_ops = &mv88e6352_avb_ops, 3386 .avb_ops = &mv88e6352_avb_ops,
3387 .ptp_ops = &mv88e6352_ptp_ops,
3388 .phylink_validate = mv88e6352_phylink_validate,
3024}; 3389};
3025 3390
3026static const struct mv88e6xxx_ops mv88e6290_ops = { 3391static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -3043,6 +3408,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
3043 .port_set_cmode = mv88e6390x_port_set_cmode, 3408 .port_set_cmode = mv88e6390x_port_set_cmode,
3044 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3409 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3045 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3410 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3411 .port_link_state = mv88e6352_port_link_state,
3412 .port_get_cmode = mv88e6352_port_get_cmode,
3046 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3413 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3047 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3414 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3048 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3415 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3054,15 +3421,22 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
3054 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, 3421 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
3055 .pot_clear = mv88e6xxx_g2_pot_clear, 3422 .pot_clear = mv88e6xxx_g2_pot_clear,
3056 .reset = mv88e6352_g1_reset, 3423 .reset = mv88e6352_g1_reset,
3424 .rmu_disable = mv88e6390_g1_rmu_disable,
3057 .vtu_getnext = mv88e6390_g1_vtu_getnext, 3425 .vtu_getnext = mv88e6390_g1_vtu_getnext,
3058 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, 3426 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
3059 .serdes_power = mv88e6390_serdes_power, 3427 .serdes_power = mv88e6390_serdes_power,
3428 .serdes_irq_setup = mv88e6390_serdes_irq_setup,
3429 .serdes_irq_free = mv88e6390_serdes_irq_free,
3060 .gpio_ops = &mv88e6352_gpio_ops, 3430 .gpio_ops = &mv88e6352_gpio_ops,
3061 .avb_ops = &mv88e6390_avb_ops, 3431 .avb_ops = &mv88e6390_avb_ops,
3432 .ptp_ops = &mv88e6352_ptp_ops,
3433 .phylink_validate = mv88e6390_phylink_validate,
3062}; 3434};
3063 3435
3064static const struct mv88e6xxx_ops mv88e6320_ops = { 3436static const struct mv88e6xxx_ops mv88e6320_ops = {
3065 /* MV88E6XXX_FAMILY_6320 */ 3437 /* MV88E6XXX_FAMILY_6320 */
3438 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3439 .ip_pri_map = mv88e6085_g1_ip_pri_map,
3066 .irl_init_all = mv88e6352_g2_irl_init_all, 3440 .irl_init_all = mv88e6352_g2_irl_init_all,
3067 .get_eeprom = mv88e6xxx_g2_get_eeprom16, 3441 .get_eeprom = mv88e6xxx_g2_get_eeprom16,
3068 .set_eeprom = mv88e6xxx_g2_set_eeprom16, 3442 .set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3081,6 +3455,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
3081 .port_pause_limit = mv88e6097_port_pause_limit, 3455 .port_pause_limit = mv88e6097_port_pause_limit,
3082 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3456 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3083 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3457 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3458 .port_link_state = mv88e6352_port_link_state,
3459 .port_get_cmode = mv88e6352_port_get_cmode,
3084 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3460 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3085 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3461 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3086 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3462 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3095,10 +3471,14 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
3095 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, 3471 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
3096 .gpio_ops = &mv88e6352_gpio_ops, 3472 .gpio_ops = &mv88e6352_gpio_ops,
3097 .avb_ops = &mv88e6352_avb_ops, 3473 .avb_ops = &mv88e6352_avb_ops,
3474 .ptp_ops = &mv88e6352_ptp_ops,
3475 .phylink_validate = mv88e6185_phylink_validate,
3098}; 3476};
3099 3477
3100static const struct mv88e6xxx_ops mv88e6321_ops = { 3478static const struct mv88e6xxx_ops mv88e6321_ops = {
3101 /* MV88E6XXX_FAMILY_6320 */ 3479 /* MV88E6XXX_FAMILY_6320 */
3480 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3481 .ip_pri_map = mv88e6085_g1_ip_pri_map,
3102 .irl_init_all = mv88e6352_g2_irl_init_all, 3482 .irl_init_all = mv88e6352_g2_irl_init_all,
3103 .get_eeprom = mv88e6xxx_g2_get_eeprom16, 3483 .get_eeprom = mv88e6xxx_g2_get_eeprom16,
3104 .set_eeprom = mv88e6xxx_g2_set_eeprom16, 3484 .set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3117,6 +3497,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
3117 .port_pause_limit = mv88e6097_port_pause_limit, 3497 .port_pause_limit = mv88e6097_port_pause_limit,
3118 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3498 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3119 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3499 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3500 .port_link_state = mv88e6352_port_link_state,
3501 .port_get_cmode = mv88e6352_port_get_cmode,
3120 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3502 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3121 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3503 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3122 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3504 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3129,10 +3511,14 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
3129 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, 3511 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
3130 .gpio_ops = &mv88e6352_gpio_ops, 3512 .gpio_ops = &mv88e6352_gpio_ops,
3131 .avb_ops = &mv88e6352_avb_ops, 3513 .avb_ops = &mv88e6352_avb_ops,
3514 .ptp_ops = &mv88e6352_ptp_ops,
3515 .phylink_validate = mv88e6185_phylink_validate,
3132}; 3516};
3133 3517
3134static const struct mv88e6xxx_ops mv88e6341_ops = { 3518static const struct mv88e6xxx_ops mv88e6341_ops = {
3135 /* MV88E6XXX_FAMILY_6341 */ 3519 /* MV88E6XXX_FAMILY_6341 */
3520 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3521 .ip_pri_map = mv88e6085_g1_ip_pri_map,
3136 .irl_init_all = mv88e6352_g2_irl_init_all, 3522 .irl_init_all = mv88e6352_g2_irl_init_all,
3137 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3523 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3138 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3524 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3142,7 +3528,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3142 .port_set_link = mv88e6xxx_port_set_link, 3528 .port_set_link = mv88e6xxx_port_set_link,
3143 .port_set_duplex = mv88e6xxx_port_set_duplex, 3529 .port_set_duplex = mv88e6xxx_port_set_duplex,
3144 .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, 3530 .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
3145 .port_set_speed = mv88e6390_port_set_speed, 3531 .port_set_speed = mv88e6341_port_set_speed,
3146 .port_tag_remap = mv88e6095_port_tag_remap, 3532 .port_tag_remap = mv88e6095_port_tag_remap,
3147 .port_set_frame_mode = mv88e6351_port_set_frame_mode, 3533 .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3148 .port_set_egress_floods = mv88e6352_port_set_egress_floods, 3534 .port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3152,6 +3538,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3152 .port_pause_limit = mv88e6097_port_pause_limit, 3538 .port_pause_limit = mv88e6097_port_pause_limit,
3153 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3539 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3154 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3540 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3541 .port_link_state = mv88e6352_port_link_state,
3542 .port_get_cmode = mv88e6352_port_get_cmode,
3155 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3543 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3156 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3544 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3157 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3545 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3165,12 +3553,17 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3165 .reset = mv88e6352_g1_reset, 3553 .reset = mv88e6352_g1_reset,
3166 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3554 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3167 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3555 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3556 .serdes_power = mv88e6341_serdes_power,
3168 .gpio_ops = &mv88e6352_gpio_ops, 3557 .gpio_ops = &mv88e6352_gpio_ops,
3169 .avb_ops = &mv88e6390_avb_ops, 3558 .avb_ops = &mv88e6390_avb_ops,
3559 .ptp_ops = &mv88e6352_ptp_ops,
3560 .phylink_validate = mv88e6390_phylink_validate,
3170}; 3561};
3171 3562
3172static const struct mv88e6xxx_ops mv88e6350_ops = { 3563static const struct mv88e6xxx_ops mv88e6350_ops = {
3173 /* MV88E6XXX_FAMILY_6351 */ 3564 /* MV88E6XXX_FAMILY_6351 */
3565 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3566 .ip_pri_map = mv88e6085_g1_ip_pri_map,
3174 .irl_init_all = mv88e6352_g2_irl_init_all, 3567 .irl_init_all = mv88e6352_g2_irl_init_all,
3175 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 3568 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
3176 .phy_read = mv88e6xxx_g2_smi_phy_read, 3569 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -3188,6 +3581,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
3188 .port_pause_limit = mv88e6097_port_pause_limit, 3581 .port_pause_limit = mv88e6097_port_pause_limit,
3189 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3582 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3190 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3583 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3584 .port_link_state = mv88e6352_port_link_state,
3585 .port_get_cmode = mv88e6352_port_get_cmode,
3191 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3586 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3192 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3587 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3193 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3588 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3201,10 +3596,13 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
3201 .reset = mv88e6352_g1_reset, 3596 .reset = mv88e6352_g1_reset,
3202 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3597 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3203 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3598 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3599 .phylink_validate = mv88e6185_phylink_validate,
3204}; 3600};
3205 3601
3206static const struct mv88e6xxx_ops mv88e6351_ops = { 3602static const struct mv88e6xxx_ops mv88e6351_ops = {
3207 /* MV88E6XXX_FAMILY_6351 */ 3603 /* MV88E6XXX_FAMILY_6351 */
3604 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3605 .ip_pri_map = mv88e6085_g1_ip_pri_map,
3208 .irl_init_all = mv88e6352_g2_irl_init_all, 3606 .irl_init_all = mv88e6352_g2_irl_init_all,
3209 .set_switch_mac = mv88e6xxx_g2_set_switch_mac, 3607 .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
3210 .phy_read = mv88e6xxx_g2_smi_phy_read, 3608 .phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -3222,6 +3620,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
3222 .port_pause_limit = mv88e6097_port_pause_limit, 3620 .port_pause_limit = mv88e6097_port_pause_limit,
3223 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3621 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3224 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3622 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3623 .port_link_state = mv88e6352_port_link_state,
3624 .port_get_cmode = mv88e6352_port_get_cmode,
3225 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3625 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3226 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3626 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3227 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3627 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3236,10 +3636,14 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
3236 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3636 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3237 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3637 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3238 .avb_ops = &mv88e6352_avb_ops, 3638 .avb_ops = &mv88e6352_avb_ops,
3639 .ptp_ops = &mv88e6352_ptp_ops,
3640 .phylink_validate = mv88e6185_phylink_validate,
3239}; 3641};
3240 3642
3241static const struct mv88e6xxx_ops mv88e6352_ops = { 3643static const struct mv88e6xxx_ops mv88e6352_ops = {
3242 /* MV88E6XXX_FAMILY_6352 */ 3644 /* MV88E6XXX_FAMILY_6352 */
3645 .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
3646 .ip_pri_map = mv88e6085_g1_ip_pri_map,
3243 .irl_init_all = mv88e6352_g2_irl_init_all, 3647 .irl_init_all = mv88e6352_g2_irl_init_all,
3244 .get_eeprom = mv88e6xxx_g2_get_eeprom16, 3648 .get_eeprom = mv88e6xxx_g2_get_eeprom16,
3245 .set_eeprom = mv88e6xxx_g2_set_eeprom16, 3649 .set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3259,6 +3663,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
3259 .port_pause_limit = mv88e6097_port_pause_limit, 3663 .port_pause_limit = mv88e6097_port_pause_limit,
3260 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3664 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3261 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3665 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3666 .port_link_state = mv88e6352_port_link_state,
3667 .port_get_cmode = mv88e6352_port_get_cmode,
3262 .stats_snapshot = mv88e6320_g1_stats_snapshot, 3668 .stats_snapshot = mv88e6320_g1_stats_snapshot,
3263 .stats_set_histogram = mv88e6095_g1_stats_set_histogram, 3669 .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
3264 .stats_get_sset_count = mv88e6095_stats_get_sset_count, 3670 .stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3270,14 +3676,19 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
3270 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, 3676 .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
3271 .pot_clear = mv88e6xxx_g2_pot_clear, 3677 .pot_clear = mv88e6xxx_g2_pot_clear,
3272 .reset = mv88e6352_g1_reset, 3678 .reset = mv88e6352_g1_reset,
3679 .rmu_disable = mv88e6352_g1_rmu_disable,
3273 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3680 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3274 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3681 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3275 .serdes_power = mv88e6352_serdes_power, 3682 .serdes_power = mv88e6352_serdes_power,
3683 .serdes_irq_setup = mv88e6352_serdes_irq_setup,
3684 .serdes_irq_free = mv88e6352_serdes_irq_free,
3276 .gpio_ops = &mv88e6352_gpio_ops, 3685 .gpio_ops = &mv88e6352_gpio_ops,
3277 .avb_ops = &mv88e6352_avb_ops, 3686 .avb_ops = &mv88e6352_avb_ops,
3687 .ptp_ops = &mv88e6352_ptp_ops,
3278 .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, 3688 .serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
3279 .serdes_get_strings = mv88e6352_serdes_get_strings, 3689 .serdes_get_strings = mv88e6352_serdes_get_strings,
3280 .serdes_get_stats = mv88e6352_serdes_get_stats, 3690 .serdes_get_stats = mv88e6352_serdes_get_stats,
3691 .phylink_validate = mv88e6352_phylink_validate,
3281}; 3692};
3282 3693
3283static const struct mv88e6xxx_ops mv88e6390_ops = { 3694static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3302,6 +3713,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
3302 .port_set_cmode = mv88e6390x_port_set_cmode, 3713 .port_set_cmode = mv88e6390x_port_set_cmode,
3303 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3714 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3304 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3715 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3716 .port_link_state = mv88e6352_port_link_state,
3717 .port_get_cmode = mv88e6352_port_get_cmode,
3305 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3718 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3306 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3719 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3307 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3720 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3313,11 +3726,16 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
3313 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, 3726 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
3314 .pot_clear = mv88e6xxx_g2_pot_clear, 3727 .pot_clear = mv88e6xxx_g2_pot_clear,
3315 .reset = mv88e6352_g1_reset, 3728 .reset = mv88e6352_g1_reset,
3729 .rmu_disable = mv88e6390_g1_rmu_disable,
3316 .vtu_getnext = mv88e6390_g1_vtu_getnext, 3730 .vtu_getnext = mv88e6390_g1_vtu_getnext,
3317 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, 3731 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
3318 .serdes_power = mv88e6390_serdes_power, 3732 .serdes_power = mv88e6390_serdes_power,
3733 .serdes_irq_setup = mv88e6390_serdes_irq_setup,
3734 .serdes_irq_free = mv88e6390_serdes_irq_free,
3319 .gpio_ops = &mv88e6352_gpio_ops, 3735 .gpio_ops = &mv88e6352_gpio_ops,
3320 .avb_ops = &mv88e6390_avb_ops, 3736 .avb_ops = &mv88e6390_avb_ops,
3737 .ptp_ops = &mv88e6352_ptp_ops,
3738 .phylink_validate = mv88e6390_phylink_validate,
3321}; 3739};
3322 3740
3323static const struct mv88e6xxx_ops mv88e6390x_ops = { 3741static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3342,6 +3760,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
3342 .port_set_cmode = mv88e6390x_port_set_cmode, 3760 .port_set_cmode = mv88e6390x_port_set_cmode,
3343 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3761 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3344 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3762 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3763 .port_link_state = mv88e6352_port_link_state,
3764 .port_get_cmode = mv88e6352_port_get_cmode,
3345 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3765 .stats_snapshot = mv88e6390_g1_stats_snapshot,
3346 .stats_set_histogram = mv88e6390_g1_stats_set_histogram, 3766 .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
3347 .stats_get_sset_count = mv88e6320_stats_get_sset_count, 3767 .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3353,11 +3773,16 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
3353 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, 3773 .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
3354 .pot_clear = mv88e6xxx_g2_pot_clear, 3774 .pot_clear = mv88e6xxx_g2_pot_clear,
3355 .reset = mv88e6352_g1_reset, 3775 .reset = mv88e6352_g1_reset,
3776 .rmu_disable = mv88e6390_g1_rmu_disable,
3356 .vtu_getnext = mv88e6390_g1_vtu_getnext, 3777 .vtu_getnext = mv88e6390_g1_vtu_getnext,
3357 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, 3778 .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
3358 .serdes_power = mv88e6390_serdes_power, 3779 .serdes_power = mv88e6390x_serdes_power,
3780 .serdes_irq_setup = mv88e6390_serdes_irq_setup,
3781 .serdes_irq_free = mv88e6390_serdes_irq_free,
3359 .gpio_ops = &mv88e6352_gpio_ops, 3782 .gpio_ops = &mv88e6352_gpio_ops,
3360 .avb_ops = &mv88e6390_avb_ops, 3783 .avb_ops = &mv88e6390_avb_ops,
3784 .ptp_ops = &mv88e6352_ptp_ops,
3785 .phylink_validate = mv88e6390x_phylink_validate,
3361}; 3786};
3362 3787
3363static const struct mv88e6xxx_info mv88e6xxx_table[] = { 3788static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -3509,6 +3934,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3509 .pvt = true, 3934 .pvt = true,
3510 .multi_chip = true, 3935 .multi_chip = true,
3511 .tag_protocol = DSA_TAG_PROTO_EDSA, 3936 .tag_protocol = DSA_TAG_PROTO_EDSA,
3937 .ptp_support = true,
3512 .ops = &mv88e6161_ops, 3938 .ops = &mv88e6161_ops,
3513 }, 3939 },
3514 3940
@@ -3531,6 +3957,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
3531 .pvt = true, 3957 .pvt = true,
3532 .multi_chip = true, 3958 .multi_chip = true,
3533 .tag_protocol = DSA_TAG_PROTO_DSA, 3959 .tag_protocol = DSA_TAG_PROTO_DSA,
3960 .ptp_support = true,
3534 .ops = &mv88e6165_ops, 3961 .ops = &mv88e6165_ops,
3535 }, 3962 },
3536 3963
@@ -4125,6 +4552,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
4125 .get_tag_protocol = mv88e6xxx_get_tag_protocol, 4552 .get_tag_protocol = mv88e6xxx_get_tag_protocol,
4126 .setup = mv88e6xxx_setup, 4553 .setup = mv88e6xxx_setup,
4127 .adjust_link = mv88e6xxx_adjust_link, 4554 .adjust_link = mv88e6xxx_adjust_link,
4555 .phylink_validate = mv88e6xxx_validate,
4556 .phylink_mac_link_state = mv88e6xxx_link_state,
4557 .phylink_mac_config = mv88e6xxx_mac_config,
4558 .phylink_mac_link_down = mv88e6xxx_mac_link_down,
4559 .phylink_mac_link_up = mv88e6xxx_mac_link_up,
4128 .get_strings = mv88e6xxx_get_strings, 4560 .get_strings = mv88e6xxx_get_strings,
4129 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, 4561 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
4130 .get_sset_count = mv88e6xxx_get_sset_count, 4562 .get_sset_count = mv88e6xxx_get_sset_count,
@@ -4175,6 +4607,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
4175 return -ENOMEM; 4607 return -ENOMEM;
4176 4608
4177 ds->priv = chip; 4609 ds->priv = chip;
4610 ds->dev = dev;
4178 ds->ops = &mv88e6xxx_switch_ops; 4611 ds->ops = &mv88e6xxx_switch_ops;
4179 ds->ageing_time_min = chip->info->age_time_coeff; 4612 ds->ageing_time_min = chip->info->age_time_coeff;
4180 ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; 4613 ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
@@ -4189,42 +4622,85 @@ static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
4189 dsa_unregister_switch(chip->ds); 4622 dsa_unregister_switch(chip->ds);
4190} 4623}
4191 4624
4625static const void *pdata_device_get_match_data(struct device *dev)
4626{
4627 const struct of_device_id *matches = dev->driver->of_match_table;
4628 const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data;
4629
4630 for (; matches->name[0] || matches->type[0] || matches->compatible[0];
4631 matches++) {
4632 if (!strcmp(pdata->compatible, matches->compatible))
4633 return matches->data;
4634 }
4635 return NULL;
4636}
4637
4192static int mv88e6xxx_probe(struct mdio_device *mdiodev) 4638static int mv88e6xxx_probe(struct mdio_device *mdiodev)
4193{ 4639{
4640 struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data;
4641 const struct mv88e6xxx_info *compat_info = NULL;
4194 struct device *dev = &mdiodev->dev; 4642 struct device *dev = &mdiodev->dev;
4195 struct device_node *np = dev->of_node; 4643 struct device_node *np = dev->of_node;
4196 const struct mv88e6xxx_info *compat_info;
4197 struct mv88e6xxx_chip *chip; 4644 struct mv88e6xxx_chip *chip;
4198 u32 eeprom_len; 4645 int port;
4199 int err; 4646 int err;
4200 4647
4201 compat_info = of_device_get_match_data(dev); 4648 if (!np && !pdata)
4649 return -EINVAL;
4650
4651 if (np)
4652 compat_info = of_device_get_match_data(dev);
4653
4654 if (pdata) {
4655 compat_info = pdata_device_get_match_data(dev);
4656
4657 if (!pdata->netdev)
4658 return -EINVAL;
4659
4660 for (port = 0; port < DSA_MAX_PORTS; port++) {
4661 if (!(pdata->enabled_ports & (1 << port)))
4662 continue;
4663 if (strcmp(pdata->cd.port_names[port], "cpu"))
4664 continue;
4665 pdata->cd.netdev[port] = &pdata->netdev->dev;
4666 break;
4667 }
4668 }
4669
4202 if (!compat_info) 4670 if (!compat_info)
4203 return -EINVAL; 4671 return -EINVAL;
4204 4672
4205 chip = mv88e6xxx_alloc_chip(dev); 4673 chip = mv88e6xxx_alloc_chip(dev);
4206 if (!chip) 4674 if (!chip) {
4207 return -ENOMEM; 4675 err = -ENOMEM;
4676 goto out;
4677 }
4208 4678
4209 chip->info = compat_info; 4679 chip->info = compat_info;
4210 4680
4211 err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); 4681 err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
4212 if (err) 4682 if (err)
4213 return err; 4683 goto out;
4214 4684
4215 chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); 4685 chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
4216 if (IS_ERR(chip->reset)) 4686 if (IS_ERR(chip->reset)) {
4217 return PTR_ERR(chip->reset); 4687 err = PTR_ERR(chip->reset);
4688 goto out;
4689 }
4218 4690
4219 err = mv88e6xxx_detect(chip); 4691 err = mv88e6xxx_detect(chip);
4220 if (err) 4692 if (err)
4221 return err; 4693 goto out;
4222 4694
4223 mv88e6xxx_phy_init(chip); 4695 mv88e6xxx_phy_init(chip);
4224 4696
4225 if (chip->info->ops->get_eeprom && 4697 if (chip->info->ops->get_eeprom) {
4226 !of_property_read_u32(np, "eeprom-length", &eeprom_len)) 4698 if (np)
4227 chip->eeprom_len = eeprom_len; 4699 of_property_read_u32(np, "eeprom-length",
4700 &chip->eeprom_len);
4701 else
4702 chip->eeprom_len = pdata->eeprom_len;
4703 }
4228 4704
4229 mutex_lock(&chip->reg_lock); 4705 mutex_lock(&chip->reg_lock);
4230 err = mv88e6xxx_switch_reset(chip); 4706 err = mv88e6xxx_switch_reset(chip);
@@ -4286,13 +4762,14 @@ out_g2_irq:
4286 if (chip->info->g2_irqs > 0) 4762 if (chip->info->g2_irqs > 0)
4287 mv88e6xxx_g2_irq_free(chip); 4763 mv88e6xxx_g2_irq_free(chip);
4288out_g1_irq: 4764out_g1_irq:
4289 mutex_lock(&chip->reg_lock);
4290 if (chip->irq > 0) 4765 if (chip->irq > 0)
4291 mv88e6xxx_g1_irq_free(chip); 4766 mv88e6xxx_g1_irq_free(chip);
4292 else 4767 else
4293 mv88e6xxx_irq_poll_free(chip); 4768 mv88e6xxx_irq_poll_free(chip);
4294 mutex_unlock(&chip->reg_lock);
4295out: 4769out:
4770 if (pdata)
4771 dev_put(pdata->netdev);
4772
4296 return err; 4773 return err;
4297} 4774}
4298 4775
@@ -4316,12 +4793,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
4316 if (chip->info->g2_irqs > 0) 4793 if (chip->info->g2_irqs > 0)
4317 mv88e6xxx_g2_irq_free(chip); 4794 mv88e6xxx_g2_irq_free(chip);
4318 4795
4319 mutex_lock(&chip->reg_lock);
4320 if (chip->irq > 0) 4796 if (chip->irq > 0)
4321 mv88e6xxx_g1_irq_free(chip); 4797 mv88e6xxx_g1_irq_free(chip);
4322 else 4798 else
4323 mv88e6xxx_irq_poll_free(chip); 4799 mv88e6xxx_irq_poll_free(chip);
4324 mutex_unlock(&chip->reg_lock);
4325} 4800}
4326 4801
4327static const struct of_device_id mv88e6xxx_of_match[] = { 4802static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 12b7f4649b25..f9ecb7872d32 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -21,10 +21,6 @@
21#include <linux/timecounter.h> 21#include <linux/timecounter.h>
22#include <net/dsa.h> 22#include <net/dsa.h>
23 23
24#ifndef UINT64_MAX
25#define UINT64_MAX (u64)(~((u64)0))
26#endif
27
28#define SMI_CMD 0x00 24#define SMI_CMD 0x00
29#define SMI_CMD_BUSY BIT(15) 25#define SMI_CMD_BUSY BIT(15)
30#define SMI_CMD_CLAUSE_22 BIT(12) 26#define SMI_CMD_CLAUSE_22 BIT(12)
@@ -159,6 +155,7 @@ struct mv88e6xxx_bus_ops;
159struct mv88e6xxx_irq_ops; 155struct mv88e6xxx_irq_ops;
160struct mv88e6xxx_gpio_ops; 156struct mv88e6xxx_gpio_ops;
161struct mv88e6xxx_avb_ops; 157struct mv88e6xxx_avb_ops;
158struct mv88e6xxx_ptp_ops;
162 159
163struct mv88e6xxx_irq { 160struct mv88e6xxx_irq {
164 u16 masked; 161 u16 masked;
@@ -194,12 +191,16 @@ struct mv88e6xxx_port_hwtstamp {
194}; 191};
195 192
196struct mv88e6xxx_port { 193struct mv88e6xxx_port {
194 struct mv88e6xxx_chip *chip;
195 int port;
197 u64 serdes_stats[2]; 196 u64 serdes_stats[2];
198 u64 atu_member_violation; 197 u64 atu_member_violation;
199 u64 atu_miss_violation; 198 u64 atu_miss_violation;
200 u64 atu_full_violation; 199 u64 atu_full_violation;
201 u64 vtu_member_violation; 200 u64 vtu_member_violation;
202 u64 vtu_miss_violation; 201 u64 vtu_miss_violation;
202 u8 cmode;
203 int serdes_irq;
203}; 204};
204 205
205struct mv88e6xxx_chip { 206struct mv88e6xxx_chip {
@@ -242,7 +243,7 @@ struct mv88e6xxx_chip {
242 struct gpio_desc *reset; 243 struct gpio_desc *reset;
243 244
244 /* set to size of eeprom if supported by the switch */ 245 /* set to size of eeprom if supported by the switch */
245 int eeprom_len; 246 u32 eeprom_len;
246 247
247 /* List of mdio busses */ 248 /* List of mdio busses */
248 struct list_head mdios; 249 struct list_head mdios;
@@ -277,6 +278,7 @@ struct mv88e6xxx_chip {
277 struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO]; 278 struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
278 u16 trig_config; 279 u16 trig_config;
279 u16 evcap_config; 280 u16 evcap_config;
281 u16 enable_count;
280 282
281 /* Per-port timestamping resources. */ 283 /* Per-port timestamping resources. */
282 struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS]; 284 struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
@@ -298,6 +300,9 @@ struct mv88e6xxx_mdio_bus {
298}; 300};
299 301
300struct mv88e6xxx_ops { 302struct mv88e6xxx_ops {
303 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
304 int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
305
301 /* Ingress Rate Limit unit (IRL) operations */ 306 /* Ingress Rate Limit unit (IRL) operations */
302 int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port); 307 int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port);
303 308
@@ -350,6 +355,13 @@ struct mv88e6xxx_ops {
350 */ 355 */
351 int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup); 356 int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
352 357
358#define PAUSE_ON 1
359#define PAUSE_OFF 0
360
361 /* Enable/disable sending Pause */
362 int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
363 int pause);
364
353#define SPEED_MAX INT_MAX 365#define SPEED_MAX INT_MAX
354#define SPEED_UNFORCED -2 366#define SPEED_UNFORCED -2
355 367
@@ -382,12 +394,16 @@ struct mv88e6xxx_ops {
382 */ 394 */
383 int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port, 395 int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
384 phy_interface_t mode); 396 phy_interface_t mode);
397 int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
385 398
386 /* Some devices have a per port register indicating what is 399 /* Some devices have a per port register indicating what is
387 * the upstream port this port should forward to. 400 * the upstream port this port should forward to.
388 */ 401 */
389 int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port, 402 int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
390 int upstream_port); 403 int upstream_port);
404 /* Return the port link state, as required by phylink */
405 int (*port_link_state)(struct mv88e6xxx_chip *chip, int port,
406 struct phylink_link_state *state);
391 407
392 /* Snapshot the statistics for a port. The statistics can then 408 /* Snapshot the statistics for a port. The statistics can then
393 * be read back a leisure but still with a consistent view. 409 * be read back a leisure but still with a consistent view.
@@ -406,6 +422,12 @@ struct mv88e6xxx_ops {
406 uint64_t *data); 422 uint64_t *data);
407 int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port); 423 int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
408 int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); 424 int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
425
426#define MV88E6XXX_CASCADE_PORT_NONE 0xe
427#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf
428
429 int (*set_cascade_port)(struct mv88e6xxx_chip *chip, int port);
430
409 const struct mv88e6xxx_irq_ops *watchdog_ops; 431 const struct mv88e6xxx_irq_ops *watchdog_ops;
410 432
411 int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); 433 int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
@@ -413,6 +435,10 @@ struct mv88e6xxx_ops {
413 /* Power on/off a SERDES interface */ 435 /* Power on/off a SERDES interface */
414 int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on); 436 int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
415 437
438 /* SERDES interrupt handling */
439 int (*serdes_irq_setup)(struct mv88e6xxx_chip *chip, int port);
440 void (*serdes_irq_free)(struct mv88e6xxx_chip *chip, int port);
441
416 /* Statistics from the SERDES interface */ 442 /* Statistics from the SERDES interface */
417 int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); 443 int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
418 int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port, 444 int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
@@ -431,6 +457,17 @@ struct mv88e6xxx_ops {
431 457
432 /* Interface to the AVB/PTP registers */ 458 /* Interface to the AVB/PTP registers */
433 const struct mv88e6xxx_avb_ops *avb_ops; 459 const struct mv88e6xxx_avb_ops *avb_ops;
460
461 /* Remote Management Unit operations */
462 int (*rmu_disable)(struct mv88e6xxx_chip *chip);
463
464 /* Precision Time Protocol operations */
465 const struct mv88e6xxx_ptp_ops *ptp_ops;
466
467 /* Phylink */
468 void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
469 unsigned long *mask,
470 struct phylink_link_state *state);
434}; 471};
435 472
436struct mv88e6xxx_irq_ops { 473struct mv88e6xxx_irq_ops {
@@ -478,6 +515,24 @@ struct mv88e6xxx_avb_ops {
478 int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data); 515 int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
479}; 516};
480 517
518struct mv88e6xxx_ptp_ops {
519 u64 (*clock_read)(const struct cyclecounter *cc);
520 int (*ptp_enable)(struct ptp_clock_info *ptp,
521 struct ptp_clock_request *rq, int on);
522 int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
523 enum ptp_pin_function func, unsigned int chan);
524 void (*event_work)(struct work_struct *ugly);
525 int (*port_enable)(struct mv88e6xxx_chip *chip, int port);
526 int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
527 int (*global_enable)(struct mv88e6xxx_chip *chip);
528 int (*global_disable)(struct mv88e6xxx_chip *chip);
529 int n_ext_ts;
530 int arr0_sts_reg;
531 int arr1_sts_reg;
532 int dep_sts_reg;
533 u32 rx_filters;
534};
535
481#define STATS_TYPE_PORT BIT(0) 536#define STATS_TYPE_PORT BIT(0)
482#define STATS_TYPE_BANK0 BIT(1) 537#define STATS_TYPE_BANK0 BIT(1)
483#define STATS_TYPE_BANK1 BIT(2) 538#define STATS_TYPE_BANK1 BIT(2)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index b43bd6476632..38e399e0f30e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -241,6 +241,64 @@ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
241 return mv88e6185_g1_wait_ppu_disabled(chip); 241 return mv88e6185_g1_wait_ppu_disabled(chip);
242} 242}
243 243
244/* Offset 0x10: IP-PRI Mapping Register 0
245 * Offset 0x11: IP-PRI Mapping Register 1
246 * Offset 0x12: IP-PRI Mapping Register 2
247 * Offset 0x13: IP-PRI Mapping Register 3
248 * Offset 0x14: IP-PRI Mapping Register 4
249 * Offset 0x15: IP-PRI Mapping Register 5
250 * Offset 0x16: IP-PRI Mapping Register 6
251 * Offset 0x17: IP-PRI Mapping Register 7
252 */
253
254int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip)
255{
256 int err;
257
258 /* Reset the IP TOS/DiffServ/Traffic priorities to defaults */
259 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
260 if (err)
261 return err;
262
263 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
264 if (err)
265 return err;
266
267 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
268 if (err)
269 return err;
270
271 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
272 if (err)
273 return err;
274
275 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
276 if (err)
277 return err;
278
279 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
280 if (err)
281 return err;
282
283 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
284 if (err)
285 return err;
286
287 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
288 if (err)
289 return err;
290
291 return 0;
292}
293
294/* Offset 0x18: IEEE-PRI Register */
295
296int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
297{
298 /* Reset the IEEE Tag priorities to defaults */
299 return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
300}
301
244/* Offset 0x1a: Monitor Control */ 302/* Offset 0x1a: Monitor Control */
245/* Offset 0x1a: Monitor & MGMT Control on some devices */ 303/* Offset 0x1a: Monitor & MGMT Control on some devices */
246 304
@@ -350,20 +408,59 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
350 408
351/* Offset 0x1c: Global Control 2 */ 409/* Offset 0x1c: Global Control 2 */
352 410
353int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) 411static int mv88e6xxx_g1_ctl2_mask(struct mv88e6xxx_chip *chip, u16 mask,
412 u16 val)
354{ 413{
355 u16 val; 414 u16 reg;
356 int err; 415 int err;
357 416
358 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &val); 417 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &reg);
359 if (err) 418 if (err)
360 return err; 419 return err;
361 420
362 val |= MV88E6XXX_G1_CTL2_HIST_RX_TX; 421 reg &= ~mask;
422 reg |= val & mask;
363 423
364 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, val); 424 return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, reg);
425}
365 426
366 return err; 427int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port)
428{
429 const u16 mask = MV88E6185_G1_CTL2_CASCADE_PORT_MASK;
430
431 return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask));
432}
433
434int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip)
435{
436 return mv88e6xxx_g1_ctl2_mask(chip, MV88E6085_G1_CTL2_P10RM |
437 MV88E6085_G1_CTL2_RM_ENABLE, 0);
438}
439
440int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip)
441{
442 return mv88e6xxx_g1_ctl2_mask(chip, MV88E6352_G1_CTL2_RMU_MODE_MASK,
443 MV88E6352_G1_CTL2_RMU_MODE_DISABLED);
444}
445
446int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip)
447{
448 return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_RMU_MODE_MASK,
449 MV88E6390_G1_CTL2_RMU_MODE_DISABLED);
450}
451
452int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
453{
454 return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK,
455 MV88E6390_G1_CTL2_HIST_MODE_RX |
456 MV88E6390_G1_CTL2_HIST_MODE_TX);
457}
458
459int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
460{
461 return mv88e6xxx_g1_ctl2_mask(chip,
462 MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK,
463 index);
367} 464}
368 465
369/* Offset 0x1d: Statistics Operation 2 */ 466/* Offset 0x1d: Statistics Operation 2 */
@@ -470,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
470 if (err) 567 if (err)
471 return err; 568 return err;
472 569
570 /* Keep the histogram mode bits */
571 val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
473 val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; 572 val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
474 573
475 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); 574 err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 6aee7316fea6..bef01331266f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -128,7 +128,7 @@
128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) 129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) 130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) 131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) 132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
133 133
134/* Offset 0x0C: ATU Data Register */ 134/* Offset 0x0C: ATU Data Register */
@@ -201,11 +201,35 @@
201 201
202/* Offset 0x1C: Global Control 2 */ 202/* Offset 0x1C: Global Control 2 */
203#define MV88E6XXX_G1_CTL2 0x1c 203#define MV88E6XXX_G1_CTL2 0x1c
204#define MV88E6XXX_G1_CTL2_NO_CASCADE 0xe000 204#define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000
205#define MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE 0xf000 205#define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000
206#define MV88E6XXX_G1_CTL2_HIST_RX 0x0040 206#define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000
207#define MV88E6XXX_G1_CTL2_HIST_TX 0x0080 207#define MV88E6352_G1_CTL2_HEADER_TYPE_MASK 0xc000
208#define MV88E6XXX_G1_CTL2_HIST_RX_TX 0x00c0 208#define MV88E6352_G1_CTL2_HEADER_TYPE_ORIG 0x0000
209#define MV88E6352_G1_CTL2_HEADER_TYPE_MGMT 0x4000
210#define MV88E6390_G1_CTL2_HEADER_TYPE_LAG 0x8000
211#define MV88E6352_G1_CTL2_RMU_MODE_MASK 0x3000
212#define MV88E6352_G1_CTL2_RMU_MODE_DISABLED 0x0000
213#define MV88E6352_G1_CTL2_RMU_MODE_PORT_4 0x1000
214#define MV88E6352_G1_CTL2_RMU_MODE_PORT_5 0x2000
215#define MV88E6352_G1_CTL2_RMU_MODE_PORT_6 0x3000
216#define MV88E6085_G1_CTL2_DA_CHECK 0x4000
217#define MV88E6085_G1_CTL2_P10RM 0x2000
218#define MV88E6085_G1_CTL2_RM_ENABLE 0x1000
219#define MV88E6352_G1_CTL2_DA_CHECK 0x0800
220#define MV88E6390_G1_CTL2_RMU_MODE_MASK 0x0700
221#define MV88E6390_G1_CTL2_RMU_MODE_PORT_0 0x0000
222#define MV88E6390_G1_CTL2_RMU_MODE_PORT_1 0x0100
223#define MV88E6390_G1_CTL2_RMU_MODE_PORT_9 0x0200
224#define MV88E6390_G1_CTL2_RMU_MODE_PORT_10 0x0300
225#define MV88E6390_G1_CTL2_RMU_MODE_ALL_DSA 0x0600
226#define MV88E6390_G1_CTL2_RMU_MODE_DISABLED 0x0700
227#define MV88E6390_G1_CTL2_HIST_MODE_MASK 0x00c0
228#define MV88E6390_G1_CTL2_HIST_MODE_RX 0x0040
229#define MV88E6390_G1_CTL2_HIST_MODE_TX 0x0080
230#define MV88E6352_G1_CTL2_CTR_MODE_MASK 0x0060
231#define MV88E6390_G1_CTL2_CTR_MODE 0x0020
232#define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f
209 233
210/* Offset 0x1D: Stats Operation Register */ 234/* Offset 0x1D: Stats Operation Register */
211#define MV88E6XXX_G1_STATS_OP 0x1d 235#define MV88E6XXX_G1_STATS_OP 0x1d
@@ -253,6 +277,17 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
253int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); 277int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
254int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); 278int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
255 279
280int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
281int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
282
283int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port);
284
285int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip);
286int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip);
287int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip);
288
289int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index);
290
256int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all); 291int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
257int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, 292int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
258 unsigned int msecs); 293 unsigned int msecs);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 307410898fc9..5200e4bdce93 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
349 chip->ports[entry.portvec].atu_member_violation++; 349 chip->ports[entry.portvec].atu_member_violation++;
350 } 350 }
351 351
352 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 353 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 354 "ATU miss violation for %pM portvec %x\n",
355 entry.mac, entry.portvec); 355 entry.mac, entry.portvec);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 8d22d66d84b7..91a3cb2452ac 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -119,37 +119,17 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
119 119
120/* Offset 0x06: Device Mapping Table register */ 120/* Offset 0x06: Device Mapping Table register */
121 121
122static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, 122int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
123 int target, int port) 123 int port)
124{ 124{
125 u16 val = (target << 8) | (port & 0xf); 125 u16 val = (target << 8) | (port & 0x1f);
126 /* Modern chips use 5 bits to define a device mapping port,
127 * but bit 4 is reserved on older chips, so it is safe to use.
128 */
126 129
127 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val); 130 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
128} 131}
129 132
130static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
131{
132 int target, port;
133 int err;
134
135 /* Initialize the routing port to the 32 possible target devices */
136 for (target = 0; target < 32; ++target) {
137 port = 0xf;
138
139 if (target < DSA_MAX_SWITCHES) {
140 port = chip->ds->rtable[target];
141 if (port == DSA_RTABLE_NONE)
142 port = 0xf;
143 }
144
145 err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
146 if (err)
147 break;
148 }
149
150 return err;
151}
152
153/* Offset 0x07: Trunk Mask Table register */ 133/* Offset 0x07: Trunk Mask Table register */
154 134
155static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, 135static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
@@ -174,7 +154,7 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
174 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val); 154 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
175} 155}
176 156
177static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip) 157int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
178{ 158{
179 const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1; 159 const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
180 int i, err; 160 int i, err;
@@ -1067,9 +1047,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
1067{ 1047{
1068 int err, irq, virq; 1048 int err, irq, virq;
1069 1049
1070 if (!chip->dev->of_node)
1071 return -EINVAL;
1072
1073 chip->g2_irq.domain = irq_domain_add_simple( 1050 chip->g2_irq.domain = irq_domain_add_simple(
1074 chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip); 1051 chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
1075 if (!chip->g2_irq.domain) 1052 if (!chip->g2_irq.domain)
@@ -1138,31 +1115,3 @@ void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
1138 for (phy = 0; phy < chip->info->num_internal_phys; phy++) 1115 for (phy = 0; phy < chip->info->num_internal_phys; phy++)
1139 irq_dispose_mapping(bus->irq[phy]); 1116 irq_dispose_mapping(bus->irq[phy]);
1140} 1117}
1141
1142int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
1143{
1144 u16 reg;
1145 int err;
1146
1147 /* Ignore removed tag data on doubly tagged packets, disable
1148 * flow control messages, force flow control priority to the
1149 * highest, and send all special multicast frames to the CPU
1150 * port at the highest priority.
1151 */
1152 reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4);
1153 err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg);
1154 if (err)
1155 return err;
1156
1157 /* Program the DSA routing table. */
1158 err = mv88e6xxx_g2_set_device_mapping(chip);
1159 if (err)
1160 return err;
1161
1162 /* Clear all trunk masks and mapping. */
1163 err = mv88e6xxx_g2_clear_trunk(chip);
1164 if (err)
1165 return err;
1166
1167 return 0;
1168}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 520ec70d32e8..194660d8c783 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -60,7 +60,8 @@
60#define MV88E6XXX_G2_DEVICE_MAPPING 0x06 60#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
61#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000 61#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
62#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00 62#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00
63#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f 63#define MV88E6352_G2_DEVICE_MAPPING_PORT_MASK 0x000f
64#define MV88E6390_G2_DEVICE_MAPPING_PORT_MASK 0x001f
64 65
65/* Offset 0x07: Trunk Mask Table Register */ 66/* Offset 0x07: Trunk Mask Table Register */
66#define MV88E6XXX_G2_TRUNK_MASK 0x07 67#define MV88E6XXX_G2_TRUNK_MASK 0x07
@@ -159,6 +160,7 @@
159#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000 160#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000
160#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00 161#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00
161#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe 162#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe
163#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
162#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf 164#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
163#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00 165#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00
164#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e 166#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e
@@ -313,7 +315,6 @@ int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
313 int src_port, u16 data); 315 int src_port, u16 data);
314int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); 316int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
315 317
316int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
317int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); 318int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
318void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); 319void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
319 320
@@ -327,9 +328,15 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
327 328
328int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); 329int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
329 330
331int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
332
333int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
334 int port);
335
330extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; 336extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
331extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; 337extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
332 338
339extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
333extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops; 340extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
334extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops; 341extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
335 342
@@ -441,11 +448,6 @@ static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
441 return -EOPNOTSUPP; 448 return -EOPNOTSUPP;
442} 449}
443 450
444static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
445{
446 return -EOPNOTSUPP;
447}
448
449static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) 451static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
450{ 452{
451 return -EOPNOTSUPP; 453 return -EOPNOTSUPP;
@@ -484,6 +486,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
484static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; 486static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
485static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; 487static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
486 488
489static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
487static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {}; 490static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {};
488static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {}; 491static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {};
489 492
@@ -495,6 +498,17 @@ static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
495 return -EOPNOTSUPP; 498 return -EOPNOTSUPP;
496} 499}
497 500
501static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
502{
503 return -EOPNOTSUPP;
504}
505
506static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
507 int target, int port)
508{
509 return -EOPNOTSUPP;
510}
511
498#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ 512#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
499 513
500#endif /* _MV88E6XXX_GLOBAL2_H */ 514#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
index 2e398ccb88ca..672b503a67e1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_avb.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -130,6 +130,31 @@ const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {
130 .tai_write = mv88e6352_g2_avb_tai_write, 130 .tai_write = mv88e6352_g2_avb_tai_write,
131}; 131};
132 132
133static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
134 u16 *data, int len)
135{
136 return mv88e6352_g2_avb_port_ptp_read(chip,
137 MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
138 addr, data, len);
139}
140
141static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
142 u16 data)
143{
144 return mv88e6352_g2_avb_port_ptp_write(chip,
145 MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
146 addr, data);
147}
148
149const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {
150 .port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
151 .port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
152 .ptp_read = mv88e6352_g2_avb_ptp_read,
153 .ptp_write = mv88e6352_g2_avb_ptp_write,
154 .tai_read = mv88e6165_g2_avb_tai_read,
155 .tai_write = mv88e6165_g2_avb_tai_write,
156};
157
133static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, 158static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
134 int port, int addr, u16 *data, 159 int port, int addr, u16 *data,
135 int len) 160 int len)
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index a036c490b7ce..a17c16a2ab78 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -51,17 +51,30 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr,
51 return chip->info->ops->avb_ops->ptp_write(chip, addr, data); 51 return chip->info->ops->avb_ops->ptp_write(chip, addr, data);
52} 52}
53 53
54static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
55 u16 *data)
56{
57 if (!chip->info->ops->avb_ops->ptp_read)
58 return -EOPNOTSUPP;
59
60 return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1);
61}
62
54/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX 63/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
55 * timestamp. When working properly, hardware will produce a timestamp 64 * timestamp. When working properly, hardware will produce a timestamp
56 * within 1ms. Software may enounter delays due to MDIO contention, so 65 * within 1ms. Software may enounter delays due to MDIO contention, so
57 * the timeout is set accordingly. 66 * the timeout is set accordingly.
58 */ 67 */
59#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20) 68#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
60 69
61int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, 70int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
62 struct ethtool_ts_info *info) 71 struct ethtool_ts_info *info)
63{ 72{
64 struct mv88e6xxx_chip *chip = ds->priv; 73 const struct mv88e6xxx_ptp_ops *ptp_ops;
74 struct mv88e6xxx_chip *chip;
75
76 chip = ds->priv;
77 ptp_ops = chip->info->ops->ptp_ops;
65 78
66 if (!chip->info->ptp_support) 79 if (!chip->info->ptp_support)
67 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
@@ -74,17 +87,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
74 info->tx_types = 87 info->tx_types =
75 (1 << HWTSTAMP_TX_OFF) | 88 (1 << HWTSTAMP_TX_OFF) |
76 (1 << HWTSTAMP_TX_ON); 89 (1 << HWTSTAMP_TX_ON);
77 info->rx_filters = 90 info->rx_filters = ptp_ops->rx_filters;
78 (1 << HWTSTAMP_FILTER_NONE) |
79 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
80 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
81 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
82 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
83 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
84 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
85 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
86 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
87 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
88 91
89 return 0; 92 return 0;
90} 93}
@@ -92,10 +95,9 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
92static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, 95static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
93 struct hwtstamp_config *config) 96 struct hwtstamp_config *config)
94{ 97{
98 const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
95 struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; 99 struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
96 bool tstamp_enable = false; 100 bool tstamp_enable = false;
97 u16 port_config0;
98 int err;
99 101
100 /* Prevent the TX/RX paths from trying to interact with the 102 /* Prevent the TX/RX paths from trying to interact with the
101 * timestamp hardware while we reconfigure it. 103 * timestamp hardware while we reconfigure it.
@@ -120,6 +122,14 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
120 /* The switch supports timestamping both L2 and L4; one cannot be 122 /* The switch supports timestamping both L2 and L4; one cannot be
121 * disabled independently of the other. 123 * disabled independently of the other.
122 */ 124 */
125
126 if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) {
127 config->rx_filter = HWTSTAMP_FILTER_NONE;
128 dev_dbg(chip->dev, "Unsupported rx_filter %d\n",
129 config->rx_filter);
130 return -ERANGE;
131 }
132
123 switch (config->rx_filter) { 133 switch (config->rx_filter) {
124 case HWTSTAMP_FILTER_NONE: 134 case HWTSTAMP_FILTER_NONE:
125 tstamp_enable = false; 135 tstamp_enable = false;
@@ -141,24 +151,22 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
141 return -ERANGE; 151 return -ERANGE;
142 } 152 }
143 153
154 mutex_lock(&chip->reg_lock);
144 if (tstamp_enable) { 155 if (tstamp_enable) {
145 /* Disable transportSpecific value matching, so that packets 156 chip->enable_count += 1;
146 * with either 1588 (0) and 802.1AS (1) will be timestamped. 157 if (chip->enable_count == 1 && ptp_ops->global_enable)
147 */ 158 ptp_ops->global_enable(chip);
148 port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH; 159 if (ptp_ops->port_enable)
160 ptp_ops->port_enable(chip, port);
149 } else { 161 } else {
150 /* Disable PTP. This disables both RX and TX timestamping. */ 162 if (ptp_ops->port_disable)
151 port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP; 163 ptp_ops->port_disable(chip, port);
164 chip->enable_count -= 1;
165 if (chip->enable_count == 0 && ptp_ops->global_disable)
166 ptp_ops->global_disable(chip);
152 } 167 }
153
154 mutex_lock(&chip->reg_lock);
155 err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
156 port_config0);
157 mutex_unlock(&chip->reg_lock); 168 mutex_unlock(&chip->reg_lock);
158 169
159 if (err < 0)
160 return err;
161
162 /* Once hardware has been configured, enable timestamp checks 170 /* Once hardware has been configured, enable timestamp checks
163 * in the RX/TX paths. 171 * in the RX/TX paths.
164 */ 172 */
@@ -338,17 +346,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
338static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip, 346static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
339 struct mv88e6xxx_port_hwtstamp *ps) 347 struct mv88e6xxx_port_hwtstamp *ps)
340{ 348{
349 const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
341 struct sk_buff *skb; 350 struct sk_buff *skb;
342 351
343 skb = skb_dequeue(&ps->rx_queue); 352 skb = skb_dequeue(&ps->rx_queue);
344 353
345 if (skb) 354 if (skb)
346 mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS, 355 mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg,
347 &ps->rx_queue); 356 &ps->rx_queue);
348 357
349 skb = skb_dequeue(&ps->rx_queue2); 358 skb = skb_dequeue(&ps->rx_queue2);
350 if (skb) 359 if (skb)
351 mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS, 360 mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg,
352 &ps->rx_queue2); 361 &ps->rx_queue2);
353} 362}
354 363
@@ -389,6 +398,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
389static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, 398static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
390 struct mv88e6xxx_port_hwtstamp *ps) 399 struct mv88e6xxx_port_hwtstamp *ps)
391{ 400{
401 const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
392 struct skb_shared_hwtstamps shhwtstamps; 402 struct skb_shared_hwtstamps shhwtstamps;
393 u16 departure_block[4], status; 403 u16 departure_block[4], status;
394 struct sk_buff *tmp_skb; 404 struct sk_buff *tmp_skb;
@@ -401,7 +411,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
401 411
402 mutex_lock(&chip->reg_lock); 412 mutex_lock(&chip->reg_lock);
403 err = mv88e6xxx_port_ptp_read(chip, ps->port_id, 413 err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
404 MV88E6XXX_PORT_PTP_DEP_STS, 414 ptp_ops->dep_sts_reg,
405 departure_block, 415 departure_block,
406 ARRAY_SIZE(departure_block)); 416 ARRAY_SIZE(departure_block));
407 mutex_unlock(&chip->reg_lock); 417 mutex_unlock(&chip->reg_lock);
@@ -425,8 +435,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
425 435
426 /* We have the timestamp; go ahead and clear valid now */ 436 /* We have the timestamp; go ahead and clear valid now */
427 mutex_lock(&chip->reg_lock); 437 mutex_lock(&chip->reg_lock);
428 mv88e6xxx_port_ptp_write(chip, ps->port_id, 438 mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
429 MV88E6XXX_PORT_PTP_DEP_STS, 0);
430 mutex_unlock(&chip->reg_lock); 439 mutex_unlock(&chip->reg_lock);
431 440
432 status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK; 441 status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
@@ -522,8 +531,48 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
522 return true; 531 return true;
523} 532}
524 533
534int mv88e6165_global_disable(struct mv88e6xxx_chip *chip)
535{
536 u16 val;
537 int err;
538
539 err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
540 if (err)
541 return err;
542 val |= MV88E6165_PTP_CFG_DISABLE_PTP;
543
544 return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
545}
546
547int mv88e6165_global_enable(struct mv88e6xxx_chip *chip)
548{
549 u16 val;
550 int err;
551
552 err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
553 if (err)
554 return err;
555
556 val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK);
557
558 return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
559}
560
561int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port)
562{
563 return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
564 MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
565}
566
567int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port)
568{
569 return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
570 MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH);
571}
572
525static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) 573static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
526{ 574{
575 const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
527 struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; 576 struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
528 577
529 ps->port_id = port; 578 ps->port_id = port;
@@ -531,12 +580,15 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
531 skb_queue_head_init(&ps->rx_queue); 580 skb_queue_head_init(&ps->rx_queue);
532 skb_queue_head_init(&ps->rx_queue2); 581 skb_queue_head_init(&ps->rx_queue2);
533 582
534 return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, 583 if (ptp_ops->port_disable)
535 MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); 584 return ptp_ops->port_disable(chip, port);
585
586 return 0;
536} 587}
537 588
538int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) 589int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
539{ 590{
591 const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
540 int err; 592 int err;
541 int i; 593 int i;
542 594
@@ -547,6 +599,18 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
547 return err; 599 return err;
548 } 600 }
549 601
602 /* Disable PTP globally */
603 if (ptp_ops->global_disable) {
604 err = ptp_ops->global_disable(chip);
605 if (err)
606 return err;
607 }
608
609 /* Set the ethertype of L2 PTP messages */
610 err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
611 if (err)
612 return err;
613
550 /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to 614 /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to
551 * timestamp. This affects all ports that have timestamping enabled, 615 * timestamp. This affects all ports that have timestamping enabled,
552 * but the timestamp config is per-port; thus we configure all events 616 * but the timestamp config is per-port; thus we configure all events
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index bc71c9212a08..b9a72661bcc4 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -19,7 +19,7 @@
19 19
20#include "chip.h" 20#include "chip.h"
21 21
22/* Global PTP registers */ 22/* Global 6352 PTP registers */
23/* Offset 0x00: PTP EtherType */ 23/* Offset 0x00: PTP EtherType */
24#define MV88E6XXX_PTP_ETHERTYPE 0x00 24#define MV88E6XXX_PTP_ETHERTYPE 0x00
25 25
@@ -34,6 +34,12 @@
34/* Offset 0x02: Timestamp Arrival Capture Pointers */ 34/* Offset 0x02: Timestamp Arrival Capture Pointers */
35#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02 35#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02
36 36
37/* Offset 0x05: PTP Global Configuration */
38#define MV88E6165_PTP_CFG 0x05
39#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000
40#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1)
41#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0)
42
37/* Offset 0x07: PTP Global Configuration */ 43/* Offset 0x07: PTP Global Configuration */
38#define MV88E6341_PTP_CFG 0x07 44#define MV88E6341_PTP_CFG 0x07
39#define MV88E6341_PTP_CFG_UPDATE 0x8000 45#define MV88E6341_PTP_CFG_UPDATE 0x8000
@@ -46,7 +52,7 @@
46/* Offset 0x08: PTP Interrupt Status */ 52/* Offset 0x08: PTP Interrupt Status */
47#define MV88E6XXX_PTP_IRQ_STATUS 0x08 53#define MV88E6XXX_PTP_IRQ_STATUS 0x08
48 54
49/* Per-Port PTP Registers */ 55/* Per-Port 6352 PTP Registers */
50/* Offset 0x00: PTP Configuration 0 */ 56/* Offset 0x00: PTP Configuration 0 */
51#define MV88E6XXX_PORT_PTP_CFG0 0x00 57#define MV88E6XXX_PORT_PTP_CFG0 0x00
52#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12 58#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12
@@ -123,6 +129,10 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
123 129
124int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); 130int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
125void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); 131void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
132int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
133int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port);
134int mv88e6165_global_enable(struct mv88e6xxx_chip *chip);
135int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
126 136
127#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ 137#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
128 138
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 46af8052e535..152a65d46e0b 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
110 err = mv88e6xxx_phy_page_get(chip, phy, page); 110 err = mv88e6xxx_phy_page_get(chip, phy, page);
111 if (!err) { 111 if (!err) {
112 err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page); 112 err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
113 if (!err)
114 err = mv88e6xxx_phy_write(chip, phy, reg, val);
115
113 mv88e6xxx_phy_page_put(chip, phy); 116 mv88e6xxx_phy_page_put(chip, phy);
114 } 117 }
115 118
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 6315774d72b3..cd7db60a508b 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -15,9 +15,11 @@
15#include <linux/bitfield.h> 15#include <linux/bitfield.h>
16#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
17#include <linux/phy.h> 17#include <linux/phy.h>
18#include <linux/phylink.h>
18 19
19#include "chip.h" 20#include "chip.h"
20#include "port.h" 21#include "port.h"
22#include "serdes.h"
21 23
22int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, 24int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
23 u16 *val) 25 u16 *val)
@@ -35,6 +37,29 @@ int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
35 return mv88e6xxx_write(chip, addr, reg, val); 37 return mv88e6xxx_write(chip, addr, reg, val);
36} 38}
37 39
40/* Offset 0x00: MAC (or PCS or Physical) Status Register
41 *
42 * For most devices, this is read only. However the 6185 has the MyPause
43 * bit read/write.
44 */
45int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
46 int pause)
47{
48 u16 reg;
49 int err;
50
51 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
52 if (err)
53 return err;
54
55 if (pause)
56 reg |= MV88E6XXX_PORT_STS_MY_PAUSE;
57 else
58 reg &= ~MV88E6XXX_PORT_STS_MY_PAUSE;
59
60 return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
61}
62
38/* Offset 0x01: MAC (or PCS or Physical) Control Register 63/* Offset 0x01: MAC (or PCS or Physical) Control Register
39 * 64 *
40 * Link, Duplex and Flow Control have one force bit, one value bit. 65 * Link, Duplex and Flow Control have one force bit, one value bit.
@@ -203,8 +228,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
203 ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000; 228 ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
204 break; 229 break;
205 case 2500: 230 case 2500:
206 ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | 231 if (alt_bit)
207 MV88E6390_PORT_MAC_CTL_ALTSPEED; 232 ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
233 MV88E6390_PORT_MAC_CTL_ALTSPEED;
234 else
235 ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
208 break; 236 break;
209 case 10000: 237 case 10000:
210 /* all bits set, fall through... */ 238 /* all bits set, fall through... */
@@ -266,6 +294,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
266 return mv88e6xxx_port_set_speed(chip, port, speed, false, false); 294 return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
267} 295}
268 296
297/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
298int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
299{
300 if (speed == SPEED_MAX)
301 speed = port < 5 ? 1000 : 2500;
302
303 if (speed > 2500)
304 return -EOPNOTSUPP;
305
306 if (speed == 200 && port != 0)
307 return -EOPNOTSUPP;
308
309 if (speed == 2500 && port < 5)
310 return -EOPNOTSUPP;
311
312 return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
313}
314
269/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */ 315/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
270int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) 316int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
271{ 317{
@@ -317,8 +363,9 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
317int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, 363int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
318 phy_interface_t mode) 364 phy_interface_t mode)
319{ 365{
320 u16 reg; 366 int lane;
321 u16 cmode; 367 u16 cmode;
368 u16 reg;
322 int err; 369 int err;
323 370
324 if (mode == PHY_INTERFACE_MODE_NA) 371 if (mode == PHY_INTERFACE_MODE_NA)
@@ -348,6 +395,20 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
348 cmode = 0; 395 cmode = 0;
349 } 396 }
350 397
398 lane = mv88e6390x_serdes_get_lane(chip, port);
399 if (lane < 0)
400 return lane;
401
402 if (chip->ports[port].serdes_irq) {
403 err = mv88e6390_serdes_irq_disable(chip, port, lane);
404 if (err)
405 return err;
406 }
407
408 err = mv88e6390_serdes_power(chip, port, false);
409 if (err)
410 return err;
411
351 if (cmode) { 412 if (cmode) {
352 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg); 413 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
353 if (err) 414 if (err)
@@ -359,12 +420,38 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
359 err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg); 420 err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
360 if (err) 421 if (err)
361 return err; 422 return err;
423
424 err = mv88e6390_serdes_power(chip, port, true);
425 if (err)
426 return err;
427
428 if (chip->ports[port].serdes_irq) {
429 err = mv88e6390_serdes_irq_enable(chip, port, lane);
430 if (err)
431 return err;
432 }
362 } 433 }
363 434
435 chip->ports[port].cmode = cmode;
436
437 return 0;
438}
439
440int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
441{
442 int err;
443 u16 reg;
444
445 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
446 if (err)
447 return err;
448
449 *cmode = reg & MV88E6185_PORT_STS_CMODE_MASK;
450
364 return 0; 451 return 0;
365} 452}
366 453
367int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) 454int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
368{ 455{
369 int err; 456 int err;
370 u16 reg; 457 u16 reg;
@@ -378,6 +465,80 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
378 return 0; 465 return 0;
379} 466}
380 467
468int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
469 struct phylink_link_state *state)
470{
471 int err;
472 u16 reg;
473
474 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
475 if (err)
476 return err;
477
478 switch (reg & MV88E6XXX_PORT_STS_SPEED_MASK) {
479 case MV88E6XXX_PORT_STS_SPEED_10:
480 state->speed = SPEED_10;
481 break;
482 case MV88E6XXX_PORT_STS_SPEED_100:
483 state->speed = SPEED_100;
484 break;
485 case MV88E6XXX_PORT_STS_SPEED_1000:
486 state->speed = SPEED_1000;
487 break;
488 case MV88E6XXX_PORT_STS_SPEED_10000:
489 if ((reg & MV88E6XXX_PORT_STS_CMODE_MASK) ==
490 MV88E6XXX_PORT_STS_CMODE_2500BASEX)
491 state->speed = SPEED_2500;
492 else
493 state->speed = SPEED_10000;
494 break;
495 }
496
497 state->duplex = reg & MV88E6XXX_PORT_STS_DUPLEX ?
498 DUPLEX_FULL : DUPLEX_HALF;
499 state->link = !!(reg & MV88E6XXX_PORT_STS_LINK);
500 state->an_enabled = 1;
501 state->an_complete = state->link;
502
503 return 0;
504}
505
506int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
507 struct phylink_link_state *state)
508{
509 if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
510 u8 cmode = chip->ports[port].cmode;
511
512 /* When a port is in "Cross-chip serdes" mode, it uses
513 * 1000Base-X full duplex mode, but there is no automatic
514 * link detection. Use the sync OK status for link (as it
515 * would do for 1000Base-X mode.)
516 */
517 if (cmode == MV88E6185_PORT_STS_CMODE_SERDES) {
518 u16 mac;
519 int err;
520
521 err = mv88e6xxx_port_read(chip, port,
522 MV88E6XXX_PORT_MAC_CTL, &mac);
523 if (err)
524 return err;
525
526 state->link = !!(mac & MV88E6185_PORT_MAC_CTL_SYNC_OK);
527 state->an_enabled = 1;
528 state->an_complete =
529 !!(mac & MV88E6185_PORT_MAC_CTL_AN_DONE);
530 state->duplex =
531 state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
532 state->speed =
533 state->link ? SPEED_1000 : SPEED_UNKNOWN;
534
535 return 0;
536 }
537 }
538
539 return mv88e6352_port_link_state(chip, port, state);
540}
541
381/* Offset 0x02: Jamming Control 542/* Offset 0x02: Jamming Control
382 * 543 *
383 * Do not limit the period of time that this port can be paused for by 544 * Do not limit the period of time that this port can be paused for by
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index b16d5f0e6e9c..36904c9bf955 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -29,6 +29,7 @@
29#define MV88E6XXX_PORT_STS_SPEED_10 0x0000 29#define MV88E6XXX_PORT_STS_SPEED_10 0x0000
30#define MV88E6XXX_PORT_STS_SPEED_100 0x0100 30#define MV88E6XXX_PORT_STS_SPEED_100 0x0100
31#define MV88E6XXX_PORT_STS_SPEED_1000 0x0200 31#define MV88E6XXX_PORT_STS_SPEED_1000 0x0200
32#define MV88E6XXX_PORT_STS_SPEED_10000 0x0300
32#define MV88E6352_PORT_STS_EEE 0x0040 33#define MV88E6352_PORT_STS_EEE 0x0040
33#define MV88E6165_PORT_STS_AM_DIS 0x0040 34#define MV88E6165_PORT_STS_AM_DIS 0x0040
34#define MV88E6185_PORT_STS_MGMII 0x0040 35#define MV88E6185_PORT_STS_MGMII 0x0040
@@ -41,14 +42,28 @@
41#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b 42#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
42#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c 43#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
43#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d 44#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d
45#define MV88E6185_PORT_STS_CDUPLEX 0x0008
46#define MV88E6185_PORT_STS_CMODE_MASK 0x0007
47#define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000
48#define MV88E6185_PORT_STS_CMODE_MII_100_FD_PS 0x0001
49#define MV88E6185_PORT_STS_CMODE_MII_100 0x0002
50#define MV88E6185_PORT_STS_CMODE_MII_10 0x0003
51#define MV88E6185_PORT_STS_CMODE_SERDES 0x0004
52#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
53#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
54#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
44 55
45/* Offset 0x01: MAC (or PCS or Physical) Control Register */ 56/* Offset 0x01: MAC (or PCS or Physical) Control Register */
46#define MV88E6XXX_PORT_MAC_CTL 0x01 57#define MV88E6XXX_PORT_MAC_CTL 0x01
47#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000 58#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000
48#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000 59#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000
60#define MV88E6185_PORT_MAC_CTL_SYNC_OK 0x4000
49#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000 61#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000
50#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000 62#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000
51#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000 63#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000
64#define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400
65#define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200
66#define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100
52#define MV88E6XXX_PORT_MAC_CTL_FC 0x0080 67#define MV88E6XXX_PORT_MAC_CTL_FC 0x0080
53#define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040 68#define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040
54#define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020 69#define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020
@@ -241,6 +256,8 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
241int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, 256int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
242 u16 val); 257 u16 val);
243 258
259int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
260 int pause);
244int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, 261int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
245 phy_interface_t mode); 262 phy_interface_t mode);
246int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, 263int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
@@ -252,6 +269,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
252 269
253int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); 270int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
254int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); 271int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
272int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
255int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); 273int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
256int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); 274int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
257int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); 275int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
@@ -294,7 +312,12 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
294 u8 out); 312 u8 out);
295int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, 313int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
296 phy_interface_t mode); 314 phy_interface_t mode);
297int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); 315int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
316int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
317int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
318 struct phylink_link_state *state);
319int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
320 struct phylink_link_state *state);
298int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); 321int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
299int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, 322int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
300 int upstream_port); 323 int upstream_port);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index bd85e2c390e1..4b336d8d4c67 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -16,6 +16,7 @@
16 16
17#include "chip.h" 17#include "chip.h"
18#include "global2.h" 18#include "global2.h"
19#include "hwtstamp.h"
19#include "ptp.h" 20#include "ptp.h"
20 21
21/* Raw timestamps are in units of 8-ns clock periods. */ 22/* Raw timestamps are in units of 8-ns clock periods. */
@@ -50,7 +51,7 @@ static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data)
50} 51}
51 52
52/* TODO: places where this are called should be using pinctrl */ 53/* TODO: places where this are called should be using pinctrl */
53static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, 54static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
54 int func, int input) 55 int func, int input)
55{ 56{
56 int err; 57 int err;
@@ -65,7 +66,7 @@ static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
65 return chip->info->ops->gpio_ops->set_pctl(chip, pin, func); 66 return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
66} 67}
67 68
68static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) 69static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
69{ 70{
70 struct mv88e6xxx_chip *chip = cc_to_chip(cc); 71 struct mv88e6xxx_chip *chip = cc_to_chip(cc);
71 u16 phc_time[2]; 72 u16 phc_time[2];
@@ -79,13 +80,27 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
79 return ((u32)phc_time[1] << 16) | phc_time[0]; 80 return ((u32)phc_time[1] << 16) | phc_time[0];
80} 81}
81 82
82/* mv88e6xxx_config_eventcap - configure TAI event capture 83static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
84{
85 struct mv88e6xxx_chip *chip = cc_to_chip(cc);
86 u16 phc_time[2];
87 int err;
88
89 err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
90 ARRAY_SIZE(phc_time));
91 if (err)
92 return 0;
93 else
94 return ((u32)phc_time[1] << 16) | phc_time[0];
95}
96
97/* mv88e6352_config_eventcap - configure TAI event capture
83 * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external) 98 * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
84 * @rising: zero for falling-edge trigger, else rising-edge trigger 99 * @rising: zero for falling-edge trigger, else rising-edge trigger
85 * 100 *
86 * This will also reset the capture sequence counter. 101 * This will also reset the capture sequence counter.
87 */ 102 */
88static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, 103static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
89 int rising) 104 int rising)
90{ 105{
91 u16 global_config; 106 u16 global_config;
@@ -118,7 +133,7 @@ static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event,
118 return err; 133 return err;
119} 134}
120 135
121static void mv88e6xxx_tai_event_work(struct work_struct *ugly) 136static void mv88e6352_tai_event_work(struct work_struct *ugly)
122{ 137{
123 struct delayed_work *dw = to_delayed_work(ugly); 138 struct delayed_work *dw = to_delayed_work(ugly);
124 struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw); 139 struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw);
@@ -232,7 +247,7 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
232 return 0; 247 return 0;
233} 248}
234 249
235static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, 250static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
236 struct ptp_clock_request *rq, int on) 251 struct ptp_clock_request *rq, int on)
237{ 252{
238 int rising = (rq->extts.flags & PTP_RISING_EDGE); 253 int rising = (rq->extts.flags & PTP_RISING_EDGE);
@@ -250,18 +265,18 @@ static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip,
250 if (on) { 265 if (on) {
251 func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ; 266 func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
252 267
253 err = mv88e6xxx_set_gpio_func(chip, pin, func, true); 268 err = mv88e6352_set_gpio_func(chip, pin, func, true);
254 if (err) 269 if (err)
255 goto out; 270 goto out;
256 271
257 schedule_delayed_work(&chip->tai_event_work, 272 schedule_delayed_work(&chip->tai_event_work,
258 TAI_EVENT_WORK_INTERVAL); 273 TAI_EVENT_WORK_INTERVAL);
259 274
260 err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); 275 err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
261 } else { 276 } else {
262 func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO; 277 func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
263 278
264 err = mv88e6xxx_set_gpio_func(chip, pin, func, true); 279 err = mv88e6352_set_gpio_func(chip, pin, func, true);
265 280
266 cancel_delayed_work_sync(&chip->tai_event_work); 281 cancel_delayed_work_sync(&chip->tai_event_work);
267 } 282 }
@@ -272,20 +287,20 @@ out:
272 return err; 287 return err;
273} 288}
274 289
275static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp, 290static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp,
276 struct ptp_clock_request *rq, int on) 291 struct ptp_clock_request *rq, int on)
277{ 292{
278 struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); 293 struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
279 294
280 switch (rq->type) { 295 switch (rq->type) {
281 case PTP_CLK_REQ_EXTTS: 296 case PTP_CLK_REQ_EXTTS:
282 return mv88e6xxx_ptp_enable_extts(chip, rq, on); 297 return mv88e6352_ptp_enable_extts(chip, rq, on);
283 default: 298 default:
284 return -EOPNOTSUPP; 299 return -EOPNOTSUPP;
285 } 300 }
286} 301}
287 302
288static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, 303static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
289 enum ptp_pin_function func, unsigned int chan) 304 enum ptp_pin_function func, unsigned int chan)
290{ 305{
291 switch (func) { 306 switch (func) {
@@ -299,6 +314,55 @@ static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
299 return 0; 314 return 0;
300} 315}
301 316
317const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
318 .clock_read = mv88e6352_ptp_clock_read,
319 .ptp_enable = mv88e6352_ptp_enable,
320 .ptp_verify = mv88e6352_ptp_verify,
321 .event_work = mv88e6352_tai_event_work,
322 .port_enable = mv88e6352_hwtstamp_port_enable,
323 .port_disable = mv88e6352_hwtstamp_port_disable,
324 .n_ext_ts = 1,
325 .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
326 .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
327 .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
328 .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
329 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
330 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
331 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
332 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
333 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
334 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
335 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
336 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
337 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
338};
339
340const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
341 .clock_read = mv88e6165_ptp_clock_read,
342 .global_enable = mv88e6165_global_enable,
343 .global_disable = mv88e6165_global_disable,
344 .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
345 .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
346 .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
347 .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
348 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
349 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
350 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
351 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
352 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
353 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
354};
355
356static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
357{
358 struct mv88e6xxx_chip *chip = cc_to_chip(cc);
359
360 if (chip->info->ops->ptp_ops->clock_read)
361 return chip->info->ops->ptp_ops->clock_read(cc);
362
363 return 0;
364}
365
302/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3 366/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
303 * seconds; this task forces periodic reads so that we don't miss any. 367 * seconds; this task forces periodic reads so that we don't miss any.
304 */ 368 */
@@ -317,6 +381,7 @@ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
317 381
318int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) 382int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
319{ 383{
384 const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
320 int i; 385 int i;
321 386
322 /* Set up the cycle counter */ 387 /* Set up the cycle counter */
@@ -330,14 +395,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
330 ktime_to_ns(ktime_get_real())); 395 ktime_to_ns(ktime_get_real()));
331 396
332 INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check); 397 INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check);
333 INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work); 398 if (ptp_ops->event_work)
399 INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work);
334 400
335 chip->ptp_clock_info.owner = THIS_MODULE; 401 chip->ptp_clock_info.owner = THIS_MODULE;
336 snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), 402 snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
337 dev_name(chip->dev)); 403 dev_name(chip->dev));
338 chip->ptp_clock_info.max_adj = 1000000; 404 chip->ptp_clock_info.max_adj = 1000000;
339 405
340 chip->ptp_clock_info.n_ext_ts = 1; 406 chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
341 chip->ptp_clock_info.n_per_out = 0; 407 chip->ptp_clock_info.n_per_out = 0;
342 chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip); 408 chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip);
343 chip->ptp_clock_info.pps = 0; 409 chip->ptp_clock_info.pps = 0;
@@ -355,8 +421,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
355 chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; 421 chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
356 chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; 422 chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
357 chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime; 423 chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime;
358 chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable; 424 chip->ptp_clock_info.enable = ptp_ops->ptp_enable;
359 chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify; 425 chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
360 chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work; 426 chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
361 427
362 chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev); 428 chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
@@ -373,7 +439,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
373{ 439{
374 if (chip->ptp_clock) { 440 if (chip->ptp_clock) {
375 cancel_delayed_work_sync(&chip->overflow_work); 441 cancel_delayed_work_sync(&chip->overflow_work);
376 cancel_delayed_work_sync(&chip->tai_event_work); 442 if (chip->info->ops->ptp_ops->event_work)
443 cancel_delayed_work_sync(&chip->tai_event_work);
377 444
378 ptp_clock_unregister(chip->ptp_clock); 445 ptp_clock_unregister(chip->ptp_clock);
379 chip->ptp_clock = NULL; 446 chip->ptp_clock = NULL;
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 10f271ab650d..28a030840517 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -78,6 +78,71 @@
78/* Offset 0x12: Lock Status */ 78/* Offset 0x12: Lock Status */
79#define MV88E6XXX_TAI_LOCK_STATUS 0x12 79#define MV88E6XXX_TAI_LOCK_STATUS 0x12
80 80
81/* Offset 0x00: Ether Type */
82#define MV88E6XXX_PTP_GC_ETYPE 0x00
83
84/* 6165 Global Control Registers */
85/* Offset 0x00: Ether Type */
86#define MV88E6XXX_PTP_GC_ETYPE 0x00
87
88/* Offset 0x01: Message ID */
89#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
90
91/* Offset 0x02: Time Stamp Arrive Time */
92#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
93
94/* Offset 0x03: Port Arrival Interrupt Enable */
95#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
96
97/* Offset 0x04: Port Departure Interrupt Enable */
98#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
99
100/* Offset 0x05: Configuration */
101#define MV88E6XXX_PTP_GC_CONFIG 0x05
102#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
103#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
104
105/* Offset 0x8: Interrupt Status */
106#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
107
108/* Offset 0x9/0xa: Global Time */
109#define MV88E6XXX_PTP_GC_TIME_LO 0x09
110#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
111
112/* 6165 Per Port Registers */
113/* Offset 0: Arrival Time 0 Status */
114#define MV88E6165_PORT_PTP_ARR0_STS 0x00
115
116/* Offset 0x01/0x02: PTP Arrival 0 Time */
117#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
118#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
119
120/* Offset 0x03: PTP Arrival 0 Sequence ID */
121#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
122
123/* Offset 0x04: PTP Arrival 1 Status */
124#define MV88E6165_PORT_PTP_ARR1_STS 0x04
125
126/* Offset 0x05/0x6E: PTP Arrival 1 Time */
127#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
128#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
129
130/* Offset 0x07: PTP Arrival 1 Sequence ID */
131#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
132
133/* Offset 0x08: PTP Departure Status */
134#define MV88E6165_PORT_PTP_DEP_STS 0x08
135
136/* Offset 0x09/0x0a: PTP Deperture Time */
137#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
138#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
139
140/* Offset 0x0b: PTP Departure Sequence ID */
141#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
142
143/* Offset 0x0d: Port Status */
144#define MV88E6164_PORT_STATUS 0x0d
145
81#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP 146#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
82 147
83long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp); 148long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
@@ -87,6 +152,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
87#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ 152#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
88 ptp_clock_info) 153 ptp_clock_info)
89 154
155extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
156extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
157
90#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ 158#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
91 159
92static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) 160static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
@@ -103,6 +171,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
103{ 171{
104} 172}
105 173
174static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
175static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
176
106#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ 177#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
107 178
108#endif /* _MV88E6XXX_PTP_H */ 179#endif /* _MV88E6XXX_PTP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index fb058fd35c0d..bb69650ff772 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -11,6 +11,8 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/interrupt.h>
15#include <linux/irqdomain.h>
14#include <linux/mii.h> 16#include <linux/mii.h>
15 17
16#include "chip.h" 18#include "chip.h"
@@ -35,6 +37,22 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
35 reg, val); 37 reg, val);
36} 38}
37 39
40static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
41 int lane, int device, int reg, u16 *val)
42{
43 int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
44
45 return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
46}
47
48static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
49 int lane, int device, int reg, u16 val)
50{
51 int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
52
53 return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
54}
55
38static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on) 56static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
39{ 57{
40 u16 val, new_val; 58 u16 val, new_val;
@@ -57,14 +75,7 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
57 75
58static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port) 76static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
59{ 77{
60 u8 cmode; 78 u8 cmode = chip->ports[port].cmode;
61 int err;
62
63 err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
64 if (err) {
65 dev_err(chip->dev, "failed to read cmode\n");
66 return false;
67 }
68 79
69 if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) || 80 if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) ||
70 (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) || 81 (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) ||
@@ -174,16 +185,226 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
174 return ARRAY_SIZE(mv88e6352_serdes_hw_stats); 185 return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
175} 186}
176 187
188static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
189{
190 struct dsa_switch *ds = chip->ds;
191 u16 status;
192 bool up;
193
194 mv88e6352_serdes_read(chip, MII_BMSR, &status);
195
196 /* Status must be read twice in order to give the current link
197 * status. Otherwise the change in link status since the last
198 * read of the register is returned.
199 */
200 mv88e6352_serdes_read(chip, MII_BMSR, &status);
201
202 up = status & BMSR_LSTATUS;
203
204 dsa_port_phylink_mac_change(ds, port, up);
205}
206
207static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id)
208{
209 struct mv88e6xxx_port *port = dev_id;
210 struct mv88e6xxx_chip *chip = port->chip;
211 irqreturn_t ret = IRQ_NONE;
212 u16 status;
213 int err;
214
215 mutex_lock(&chip->reg_lock);
216
217 err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
218 if (err)
219 goto out;
220
221 if (status & MV88E6352_SERDES_INT_LINK_CHANGE) {
222 ret = IRQ_HANDLED;
223 mv88e6352_serdes_irq_link(chip, port->port);
224 }
225out:
226 mutex_unlock(&chip->reg_lock);
227
228 return ret;
229}
230
231static int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip)
232{
233 return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE,
234 MV88E6352_SERDES_INT_LINK_CHANGE);
235}
236
237static int mv88e6352_serdes_irq_disable(struct mv88e6xxx_chip *chip)
238{
239 return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, 0);
240}
241
242int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
243{
244 int err;
245
246 if (!mv88e6352_port_has_serdes(chip, port))
247 return 0;
248
249 chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
250 MV88E6352_SERDES_IRQ);
251 if (chip->ports[port].serdes_irq < 0) {
252 dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
253 chip->ports[port].serdes_irq);
254 return chip->ports[port].serdes_irq;
255 }
256
257 /* Requesting the IRQ will trigger irq callbacks. So we cannot
258 * hold the reg_lock.
259 */
260 mutex_unlock(&chip->reg_lock);
261 err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
262 mv88e6352_serdes_thread_fn,
263 IRQF_ONESHOT, "mv88e6xxx-serdes",
264 &chip->ports[port]);
265 mutex_lock(&chip->reg_lock);
266
267 if (err) {
268 dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
269 err);
270 return err;
271 }
272
273 return mv88e6352_serdes_irq_enable(chip);
274}
275
276void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
277{
278 if (!mv88e6352_port_has_serdes(chip, port))
279 return;
280
281 mv88e6352_serdes_irq_disable(chip);
282
283 /* Freeing the IRQ will trigger irq callbacks. So we cannot
284 * hold the reg_lock.
285 */
286 mutex_unlock(&chip->reg_lock);
287 free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
288 mutex_lock(&chip->reg_lock);
289
290 chip->ports[port].serdes_irq = 0;
291}
292
293/* Return the SERDES lane address a port is using. Only Ports 9 and 10
294 * have SERDES lanes. Returns -ENODEV if a port does not have a lane.
295 */
296static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
297{
298 u8 cmode = chip->ports[port].cmode;
299
300 switch (port) {
301 case 9:
302 if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
303 cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
304 cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
305 return MV88E6390_PORT9_LANE0;
306 return -ENODEV;
307 case 10:
308 if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
309 cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
310 cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
311 return MV88E6390_PORT10_LANE0;
312 return -ENODEV;
313 default:
314 return -ENODEV;
315 }
316}
317
318/* Return the SERDES lane address a port is using. Ports 9 and 10 can
319 * use multiple lanes. If so, return the first lane the port uses.
320 * Returns -ENODEV if a port does not have a lane.
321 */
322int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
323{
324 u8 cmode_port9, cmode_port10, cmode_port;
325
326 cmode_port9 = chip->ports[9].cmode;
327 cmode_port10 = chip->ports[10].cmode;
328 cmode_port = chip->ports[port].cmode;
329
330 switch (port) {
331 case 2:
332 if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
333 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
334 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
335 if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
336 return MV88E6390_PORT9_LANE1;
337 return -ENODEV;
338 case 3:
339 if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
340 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
341 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
342 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
343 if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
344 return MV88E6390_PORT9_LANE2;
345 return -ENODEV;
346 case 4:
347 if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
348 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
349 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
350 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
351 if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
352 return MV88E6390_PORT9_LANE3;
353 return -ENODEV;
354 case 5:
355 if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
356 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
357 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
358 if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
359 return MV88E6390_PORT10_LANE1;
360 return -ENODEV;
361 case 6:
362 if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
363 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
364 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
365 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
366 if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
367 return MV88E6390_PORT10_LANE2;
368 return -ENODEV;
369 case 7:
370 if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
371 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
372 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
373 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
374 if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
375 return MV88E6390_PORT10_LANE3;
376 return -ENODEV;
377 case 9:
378 if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
379 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
380 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
381 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
382 cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
383 return MV88E6390_PORT9_LANE0;
384 return -ENODEV;
385 case 10:
386 if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
387 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
388 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
389 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
390 cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
391 return MV88E6390_PORT10_LANE0;
392 return -ENODEV;
393 default:
394 return -ENODEV;
395 }
396}
397
177/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ 398/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
178static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on) 399static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
400 bool on)
179{ 401{
180 u16 val, new_val; 402 u16 val, new_val;
181 int reg_c45;
182 int err; 403 int err;
183 404
184 reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE | 405 err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
185 MV88E6390_PCS_CONTROL_1; 406 MV88E6390_PCS_CONTROL_1, &val);
186 err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val); 407
187 if (err) 408 if (err)
188 return err; 409 return err;
189 410
@@ -195,22 +416,21 @@ static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
195 new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN; 416 new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
196 417
197 if (val != new_val) 418 if (val != new_val)
198 err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val); 419 err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
420 MV88E6390_PCS_CONTROL_1, new_val);
199 421
200 return err; 422 return err;
201} 423}
202 424
203/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */ 425/* Set the power on/off for SGMII and 1000Base-X */
204static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr, 426static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
205 bool on) 427 bool on)
206{ 428{
207 u16 val, new_val; 429 u16 val, new_val;
208 int reg_c45;
209 int err; 430 int err;
210 431
211 reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE | 432 err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
212 MV88E6390_SGMII_CONTROL; 433 MV88E6390_SGMII_CONTROL, &val);
213 err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
214 if (err) 434 if (err)
215 return err; 435 return err;
216 436
@@ -222,107 +442,261 @@ static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
222 new_val = val | MV88E6390_SGMII_CONTROL_PDOWN; 442 new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
223 443
224 if (val != new_val) 444 if (val != new_val)
225 err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val); 445 err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
446 MV88E6390_SGMII_CONTROL, new_val);
226 447
227 return err; 448 return err;
228} 449}
229 450
230static int mv88e6390_serdes_lower(struct mv88e6xxx_chip *chip, u8 cmode, 451static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port,
231 int port_donor, int lane, bool rxaui, bool on) 452 int lane, bool on)
232{ 453{
233 int err; 454 u8 cmode = chip->ports[port].cmode;
234 u8 cmode_donor;
235 455
236 err = mv88e6xxx_port_get_cmode(chip, port_donor, &cmode_donor); 456 switch (cmode) {
237 if (err)
238 return err;
239
240 switch (cmode_donor) {
241 case MV88E6XXX_PORT_STS_CMODE_RXAUI:
242 if (!rxaui)
243 break;
244 /* Fall through */
245 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
246 case MV88E6XXX_PORT_STS_CMODE_SGMII: 457 case MV88E6XXX_PORT_STS_CMODE_SGMII:
458 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
247 case MV88E6XXX_PORT_STS_CMODE_2500BASEX: 459 case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
248 if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || 460 return mv88e6390_serdes_power_sgmii(chip, lane, on);
249 cmode == MV88E6XXX_PORT_STS_CMODE_SGMII) 461 case MV88E6XXX_PORT_STS_CMODE_XAUI:
250 return mv88e6390_serdes_sgmii(chip, lane, on); 462 case MV88E6XXX_PORT_STS_CMODE_RXAUI:
463 return mv88e6390_serdes_power_10g(chip, lane, on);
464 }
465
466 return 0;
467}
468
469int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
470{
471 int lane;
472
473 lane = mv88e6390_serdes_get_lane(chip, port);
474 if (lane == -ENODEV)
475 return 0;
476
477 if (lane < 0)
478 return lane;
479
480 switch (port) {
481 case 9 ... 10:
482 return mv88e6390_serdes_power_lane(chip, port, lane, on);
251 } 483 }
484
252 return 0; 485 return 0;
253} 486}
254 487
255static int mv88e6390_serdes_port9(struct mv88e6xxx_chip *chip, u8 cmode, 488int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
256 bool on) 489{
490 int lane;
491
492 lane = mv88e6390x_serdes_get_lane(chip, port);
493 if (lane == -ENODEV)
494 return 0;
495
496 if (lane < 0)
497 return lane;
498
499 switch (port) {
500 case 2 ... 4:
501 case 5 ... 7:
502 case 9 ... 10:
503 return mv88e6390_serdes_power_lane(chip, port, lane, on);
504 }
505
506 return 0;
507}
508
509static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
510 int port, int lane)
511{
512 struct dsa_switch *ds = chip->ds;
513 u16 status;
514 bool up;
515
516 mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
517 MV88E6390_SGMII_STATUS, &status);
518
519 /* Status must be read twice in order to give the current link
520 * status. Otherwise the change in link status since the last
521 * read of the register is returned.
522 */
523 mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
524 MV88E6390_SGMII_STATUS, &status);
525 up = status & MV88E6390_SGMII_STATUS_LINK;
526
527 dsa_port_phylink_mac_change(ds, port, up);
528}
529
530static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
531 int lane)
257{ 532{
533 return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
534 MV88E6390_SGMII_INT_ENABLE,
535 MV88E6390_SGMII_INT_LINK_DOWN |
536 MV88E6390_SGMII_INT_LINK_UP);
537}
538
539static int mv88e6390_serdes_irq_disable_sgmii(struct mv88e6xxx_chip *chip,
540 int lane)
541{
542 return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
543 MV88E6390_SGMII_INT_ENABLE, 0);
544}
545
546int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
547 int lane)
548{
549 u8 cmode = chip->ports[port].cmode;
550 int err = 0;
551
258 switch (cmode) { 552 switch (cmode) {
553 case MV88E6XXX_PORT_STS_CMODE_SGMII:
259 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: 554 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
555 case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
556 err = mv88e6390_serdes_irq_enable_sgmii(chip, lane);
557 }
558
559 return err;
560}
561
562int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
563 int lane)
564{
565 u8 cmode = chip->ports[port].cmode;
566 int err = 0;
567
568 switch (cmode) {
260 case MV88E6XXX_PORT_STS_CMODE_SGMII: 569 case MV88E6XXX_PORT_STS_CMODE_SGMII:
261 return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT9_LANE0, on); 570 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
262 case MV88E6XXX_PORT_STS_CMODE_XAUI:
263 case MV88E6XXX_PORT_STS_CMODE_RXAUI:
264 case MV88E6XXX_PORT_STS_CMODE_2500BASEX: 571 case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
265 return mv88e6390_serdes_10g(chip, MV88E6390_PORT9_LANE0, on); 572 err = mv88e6390_serdes_irq_disable_sgmii(chip, lane);
266 } 573 }
267 574
268 return 0; 575 return err;
269} 576}
270 577
271static int mv88e6390_serdes_port10(struct mv88e6xxx_chip *chip, u8 cmode, 578static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
272 bool on) 579 int lane, u16 *status)
273{ 580{
581 int err;
582
583 err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
584 MV88E6390_SGMII_INT_STATUS, status);
585
586 return err;
587}
588
589static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
590{
591 struct mv88e6xxx_port *port = dev_id;
592 struct mv88e6xxx_chip *chip = port->chip;
593 irqreturn_t ret = IRQ_NONE;
594 u8 cmode = port->cmode;
595 u16 status;
596 int lane;
597 int err;
598
599 lane = mv88e6390x_serdes_get_lane(chip, port->port);
600
601 mutex_lock(&chip->reg_lock);
602
274 switch (cmode) { 603 switch (cmode) {
275 case MV88E6XXX_PORT_STS_CMODE_SGMII: 604 case MV88E6XXX_PORT_STS_CMODE_SGMII:
276 return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT10_LANE0, on);
277 case MV88E6XXX_PORT_STS_CMODE_XAUI:
278 case MV88E6XXX_PORT_STS_CMODE_RXAUI:
279 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X: 605 case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
280 case MV88E6XXX_PORT_STS_CMODE_2500BASEX: 606 case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
281 return mv88e6390_serdes_10g(chip, MV88E6390_PORT10_LANE0, on); 607 err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
608 if (err)
609 goto out;
610 if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
611 MV88E6390_SGMII_INT_LINK_UP)) {
612 ret = IRQ_HANDLED;
613 mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane);
614 }
282 } 615 }
616out:
617 mutex_unlock(&chip->reg_lock);
283 618
284 return 0; 619 return ret;
285} 620}
286 621
287int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) 622int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
288{ 623{
289 u8 cmode; 624 int lane;
290 int err; 625 int err;
291 626
292 err = mv88e6xxx_port_get_cmode(chip, port, &cmode); 627 /* Only support ports 9 and 10 at the moment */
293 if (err) 628 if (port < 9)
294 return err; 629 return 0;
295 630
296 switch (port) { 631 lane = mv88e6390x_serdes_get_lane(chip, port);
297 case 2: 632
298 return mv88e6390_serdes_lower(chip, cmode, 9, 633 if (lane == -ENODEV)
299 MV88E6390_PORT9_LANE1, 634 return 0;
300 false, on); 635
301 case 3: 636 if (lane < 0)
302 return mv88e6390_serdes_lower(chip, cmode, 9, 637 return lane;
303 MV88E6390_PORT9_LANE2, 638
304 true, on); 639 chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
305 case 4: 640 port);
306 return mv88e6390_serdes_lower(chip, cmode, 9, 641 if (chip->ports[port].serdes_irq < 0) {
307 MV88E6390_PORT9_LANE3, 642 dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
308 true, on); 643 chip->ports[port].serdes_irq);
309 case 5: 644 return chip->ports[port].serdes_irq;
310 return mv88e6390_serdes_lower(chip, cmode, 10, 645 }
311 MV88E6390_PORT10_LANE1, 646
312 false, on); 647 /* Requesting the IRQ will trigger irq callbacks. So we cannot
313 case 6: 648 * hold the reg_lock.
314 return mv88e6390_serdes_lower(chip, cmode, 10, 649 */
315 MV88E6390_PORT10_LANE2, 650 mutex_unlock(&chip->reg_lock);
316 true, on); 651 err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
317 case 7: 652 mv88e6390_serdes_thread_fn,
318 return mv88e6390_serdes_lower(chip, cmode, 10, 653 IRQF_ONESHOT, "mv88e6xxx-serdes",
319 MV88E6390_PORT10_LANE3, 654 &chip->ports[port]);
320 true, on); 655 mutex_lock(&chip->reg_lock);
321 case 9: 656
322 return mv88e6390_serdes_port9(chip, cmode, on); 657 if (err) {
323 case 10: 658 dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
324 return mv88e6390_serdes_port10(chip, cmode, on); 659 err);
660 return err;
325 } 661 }
326 662
663 return mv88e6390_serdes_irq_enable(chip, port, lane);
664}
665
666void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
667{
668 int lane = mv88e6390x_serdes_get_lane(chip, port);
669
670 if (port < 9)
671 return;
672
673 if (lane < 0)
674 return;
675
676 mv88e6390_serdes_irq_disable(chip, port, lane);
677
678 /* Freeing the IRQ will trigger irq callbacks. So we cannot
679 * hold the reg_lock.
680 */
681 mutex_unlock(&chip->reg_lock);
682 free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
683 mutex_lock(&chip->reg_lock);
684
685 chip->ports[port].serdes_irq = 0;
686}
687
688int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
689{
690 u8 cmode = chip->ports[port].cmode;
691
692 if (port != 5)
693 return 0;
694
695 if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
696 cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
697 cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
698 return mv88e6390_serdes_power_sgmii(chip, MV88E6341_ADDR_SERDES,
699 on);
700
327 return 0; 701 return 0;
328} 702}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 1897c01c6e19..7870c5a9ef12 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -18,6 +18,21 @@
18 18
19#define MV88E6352_ADDR_SERDES 0x0f 19#define MV88E6352_ADDR_SERDES 0x0f
20#define MV88E6352_SERDES_PAGE_FIBER 0x01 20#define MV88E6352_SERDES_PAGE_FIBER 0x01
21#define MV88E6352_SERDES_IRQ 0x0b
22#define MV88E6352_SERDES_INT_ENABLE 0x12
23#define MV88E6352_SERDES_INT_SPEED_CHANGE BIT(14)
24#define MV88E6352_SERDES_INT_DUPLEX_CHANGE BIT(13)
25#define MV88E6352_SERDES_INT_PAGE_RX BIT(12)
26#define MV88E6352_SERDES_INT_AN_COMPLETE BIT(11)
27#define MV88E6352_SERDES_INT_LINK_CHANGE BIT(10)
28#define MV88E6352_SERDES_INT_SYMBOL_ERROR BIT(9)
29#define MV88E6352_SERDES_INT_FALSE_CARRIER BIT(8)
30#define MV88E6352_SERDES_INT_FIFO_OVER_UNDER BIT(7)
31#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
32#define MV88E6352_SERDES_INT_STATUS 0x13
33
34
35#define MV88E6341_ADDR_SERDES 0x15
21 36
22#define MV88E6390_PORT9_LANE0 0x09 37#define MV88E6390_PORT9_LANE0 0x09
23#define MV88E6390_PORT9_LANE1 0x12 38#define MV88E6390_PORT9_LANE1 0x12
@@ -27,7 +42,6 @@
27#define MV88E6390_PORT10_LANE1 0x15 42#define MV88E6390_PORT10_LANE1 0x15
28#define MV88E6390_PORT10_LANE2 0x16 43#define MV88E6390_PORT10_LANE2 0x16
29#define MV88E6390_PORT10_LANE3 0x17 44#define MV88E6390_PORT10_LANE3 0x17
30#define MV88E6390_SERDES_DEVICE (4 << 16)
31 45
32/* 10GBASE-R and 10GBASE-X4/X2 */ 46/* 10GBASE-R and 10GBASE-X4/X2 */
33#define MV88E6390_PCS_CONTROL_1 0x1000 47#define MV88E6390_PCS_CONTROL_1 0x1000
@@ -41,12 +55,39 @@
41#define MV88E6390_SGMII_CONTROL_RESET BIT(15) 55#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
42#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14) 56#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
43#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11) 57#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
58#define MV88E6390_SGMII_STATUS 0x2001
59#define MV88E6390_SGMII_STATUS_AN_DONE BIT(5)
60#define MV88E6390_SGMII_STATUS_REMOTE_FAULT BIT(4)
61#define MV88E6390_SGMII_STATUS_LINK BIT(2)
62#define MV88E6390_SGMII_INT_ENABLE 0xa001
63#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14)
64#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13)
65#define MV88E6390_SGMII_INT_PAGE_RX BIT(12)
66#define MV88E6390_SGMII_INT_AN_COMPLETE BIT(11)
67#define MV88E6390_SGMII_INT_LINK_DOWN BIT(10)
68#define MV88E6390_SGMII_INT_LINK_UP BIT(9)
69#define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8)
70#define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7)
71#define MV88E6390_SGMII_INT_STATUS 0xa002
44 72
73int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
74int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
45int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); 75int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
46int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); 76int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
77int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
78int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
79void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
47int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); 80int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
48int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, 81int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
49 int port, uint8_t *data); 82 int port, uint8_t *data);
50int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, 83int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
51 uint64_t *data); 84 uint64_t *data);
85int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
86 int lane);
87int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
88 int lane);
89int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
90void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
91
92
52#endif 93#endif
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 600d5ad1fbde..7e97e620bd44 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1,17 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> 4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 5 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 * Copyright (c) 2016 John Crispin <john@phrozen.org> 6 * Copyright (c) 2016 John Crispin <john@phrozen.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 7 */
16 8
17#include <linux/module.h> 9#include <linux/module.h>
@@ -473,10 +465,10 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
473static void 465static void
474qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) 466qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
475{ 467{
476 u32 mask = QCA8K_PORT_STATUS_TXMAC; 468 u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
477 469
478 /* Port 0 and 6 have no internal PHY */ 470 /* Port 0 and 6 have no internal PHY */
479 if ((port > 0) && (port < 6)) 471 if (port > 0 && port < 6)
480 mask |= QCA8K_PORT_STATUS_LINK_AUTO; 472 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
481 473
482 if (enable) 474 if (enable)
@@ -490,6 +482,7 @@ qca8k_setup(struct dsa_switch *ds)
490{ 482{
491 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; 483 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
492 int ret, i, phy_mode = -1; 484 int ret, i, phy_mode = -1;
485 u32 mask;
493 486
494 /* Make sure that port 0 is the cpu port */ 487 /* Make sure that port 0 is the cpu port */
495 if (!dsa_is_cpu_port(ds, 0)) { 488 if (!dsa_is_cpu_port(ds, 0)) {
@@ -515,7 +508,10 @@ qca8k_setup(struct dsa_switch *ds)
515 if (ret < 0) 508 if (ret < 0)
516 return ret; 509 return ret;
517 510
518 /* Enable CPU Port */ 511 /* Enable CPU Port, force it to maximum bandwidth and full-duplex */
512 mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW |
513 QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX;
514 qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask);
519 qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, 515 qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
520 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); 516 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
521 qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); 517 qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
@@ -583,6 +579,47 @@ qca8k_setup(struct dsa_switch *ds)
583 return 0; 579 return 0;
584} 580}
585 581
582static void
583qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
584{
585 struct qca8k_priv *priv = ds->priv;
586 u32 reg;
587
588 /* Force fixed-link setting for CPU port, skip others. */
589 if (!phy_is_pseudo_fixed_link(phy))
590 return;
591
592 /* Set port speed */
593 switch (phy->speed) {
594 case 10:
595 reg = QCA8K_PORT_STATUS_SPEED_10;
596 break;
597 case 100:
598 reg = QCA8K_PORT_STATUS_SPEED_100;
599 break;
600 case 1000:
601 reg = QCA8K_PORT_STATUS_SPEED_1000;
602 break;
603 default:
604 dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n",
605 port, phy->speed);
606 return;
607 }
608
609 /* Set duplex mode */
610 if (phy->duplex == DUPLEX_FULL)
611 reg |= QCA8K_PORT_STATUS_DUPLEX;
612
613 /* Force flow control */
614 if (dsa_is_cpu_port(ds, port))
615 reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW;
616
617 /* Force link down before changing MAC options */
618 qca8k_port_set_status(priv, port, 0);
619 qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
620 qca8k_port_set_status(priv, port, 1);
621}
622
586static int 623static int
587qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) 624qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
588{ 625{
@@ -600,10 +637,13 @@ qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
600} 637}
601 638
602static void 639static void
603qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 640qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
604{ 641{
605 int i; 642 int i;
606 643
644 if (stringset != ETH_SS_STATS)
645 return;
646
607 for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) 647 for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
608 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, 648 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
609 ETH_GSTRING_LEN); 649 ETH_GSTRING_LEN);
@@ -631,8 +671,11 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
631} 671}
632 672
633static int 673static int
634qca8k_get_sset_count(struct dsa_switch *ds, int port) 674qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
635{ 675{
676 if (sset != ETH_SS_STATS)
677 return 0;
678
636 return ARRAY_SIZE(ar8327_mib); 679 return ARRAY_SIZE(ar8327_mib);
637} 680}
638 681
@@ -831,6 +874,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port)
831static const struct dsa_switch_ops qca8k_switch_ops = { 874static const struct dsa_switch_ops qca8k_switch_ops = {
832 .get_tag_protocol = qca8k_get_tag_protocol, 875 .get_tag_protocol = qca8k_get_tag_protocol,
833 .setup = qca8k_setup, 876 .setup = qca8k_setup,
877 .adjust_link = qca8k_adjust_link,
834 .get_strings = qca8k_get_strings, 878 .get_strings = qca8k_get_strings,
835 .phy_read = qca8k_phy_read, 879 .phy_read = qca8k_phy_read,
836 .phy_write = qca8k_phy_write, 880 .phy_write = qca8k_phy_write,
@@ -862,6 +906,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
862 return -ENOMEM; 906 return -ENOMEM;
863 907
864 priv->bus = mdiodev->bus; 908 priv->bus = mdiodev->bus;
909 priv->dev = &mdiodev->dev;
865 910
866 /* read the switches ID register */ 911 /* read the switches ID register */
867 id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); 912 id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
@@ -910,8 +955,7 @@ qca8k_set_pm(struct qca8k_priv *priv, int enable)
910 955
911static int qca8k_suspend(struct device *dev) 956static int qca8k_suspend(struct device *dev)
912{ 957{
913 struct platform_device *pdev = to_platform_device(dev); 958 struct qca8k_priv *priv = dev_get_drvdata(dev);
914 struct qca8k_priv *priv = platform_get_drvdata(pdev);
915 959
916 qca8k_set_pm(priv, 0); 960 qca8k_set_pm(priv, 0);
917 961
@@ -920,8 +964,7 @@ static int qca8k_suspend(struct device *dev)
920 964
921static int qca8k_resume(struct device *dev) 965static int qca8k_resume(struct device *dev)
922{ 966{
923 struct platform_device *pdev = to_platform_device(dev); 967 struct qca8k_priv *priv = dev_get_drvdata(dev);
924 struct qca8k_priv *priv = platform_get_drvdata(pdev);
925 968
926 qca8k_set_pm(priv, 1); 969 qca8k_set_pm(priv, 1);
927 970
@@ -933,6 +976,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
933 qca8k_suspend, qca8k_resume); 976 qca8k_suspend, qca8k_resume);
934 977
935static const struct of_device_id qca8k_of_match[] = { 978static const struct of_device_id qca8k_of_match[] = {
979 { .compatible = "qca,qca8334" },
936 { .compatible = "qca,qca8337" }, 980 { .compatible = "qca,qca8337" },
937 { /* sentinel */ }, 981 { /* sentinel */ },
938}; 982};
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 1cf8a920d4ff..613fe5c50236 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -51,8 +51,10 @@
51#define QCA8K_GOL_MAC_ADDR0 0x60 51#define QCA8K_GOL_MAC_ADDR0 0x60
52#define QCA8K_GOL_MAC_ADDR1 0x64 52#define QCA8K_GOL_MAC_ADDR1 0x64
53#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) 53#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
54#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0) 54#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
55#define QCA8K_PORT_STATUS_SPEED_S 0 55#define QCA8K_PORT_STATUS_SPEED_10 0
56#define QCA8K_PORT_STATUS_SPEED_100 0x1
57#define QCA8K_PORT_STATUS_SPEED_1000 0x2
56#define QCA8K_PORT_STATUS_TXMAC BIT(2) 58#define QCA8K_PORT_STATUS_TXMAC BIT(2)
57#define QCA8K_PORT_STATUS_RXMAC BIT(3) 59#define QCA8K_PORT_STATUS_RXMAC BIT(3)
58#define QCA8K_PORT_STATUS_TXFLOW BIT(4) 60#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
@@ -165,6 +167,7 @@ struct qca8k_priv {
165 struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; 167 struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
166 struct dsa_switch *ds; 168 struct dsa_switch *ds;
167 struct mutex reg_mutex; 169 struct mutex reg_mutex;
170 struct device *dev;
168}; 171};
169 172
170struct qca8k_mib_desc { 173struct qca8k_mib_desc {
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
new file mode 100644
index 000000000000..b4b839a1d095
--- /dev/null
+++ b/drivers/net/dsa/realtek-smi.c
@@ -0,0 +1,489 @@
1// SPDX-License-Identifier: GPL-2.0+
2/* Realtek Simple Management Interface (SMI) driver
3 * It can be discussed how "simple" this interface is.
4 *
5 * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
6 * but the protocol is not MDIO at all. Instead it is a Realtek
7 * pecularity that need to bit-bang the lines in a special way to
8 * communicate with the switch.
9 *
10 * ASICs we intend to support with this driver:
11 *
12 * RTL8366 - The original version, apparently
13 * RTL8369 - Similar enough to have the same datsheet as RTL8366
14 * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
15 * different register layout from the other two
16 * RTL8366S - Is this "RTL8366 super"?
17 * RTL8367 - Has an OpenWRT driver as well
18 * RTL8368S - Seems to be an alternative name for RTL8366RB
19 * RTL8370 - Also uses SMI
20 *
21 * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
22 * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
23 * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
24 * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
25 * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/device.h>
31#include <linux/spinlock.h>
32#include <linux/skbuff.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/of_mdio.h>
36#include <linux/delay.h>
37#include <linux/gpio/consumer.h>
38#include <linux/platform_device.h>
39#include <linux/regmap.h>
40#include <linux/bitops.h>
41#include <linux/if_bridge.h>
42
43#include "realtek-smi.h"
44
45#define REALTEK_SMI_ACK_RETRY_COUNT 5
46#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
47#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
48
49static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
50{
51 ndelay(smi->clk_delay);
52}
53
54static void realtek_smi_start(struct realtek_smi *smi)
55{
56 /* Set GPIO pins to output mode, with initial state:
57 * SCK = 0, SDA = 1
58 */
59 gpiod_direction_output(smi->mdc, 0);
60 gpiod_direction_output(smi->mdio, 1);
61 realtek_smi_clk_delay(smi);
62
63 /* CLK 1: 0 -> 1, 1 -> 0 */
64 gpiod_set_value(smi->mdc, 1);
65 realtek_smi_clk_delay(smi);
66 gpiod_set_value(smi->mdc, 0);
67 realtek_smi_clk_delay(smi);
68
69 /* CLK 2: */
70 gpiod_set_value(smi->mdc, 1);
71 realtek_smi_clk_delay(smi);
72 gpiod_set_value(smi->mdio, 0);
73 realtek_smi_clk_delay(smi);
74 gpiod_set_value(smi->mdc, 0);
75 realtek_smi_clk_delay(smi);
76 gpiod_set_value(smi->mdio, 1);
77}
78
79static void realtek_smi_stop(struct realtek_smi *smi)
80{
81 realtek_smi_clk_delay(smi);
82 gpiod_set_value(smi->mdio, 0);
83 gpiod_set_value(smi->mdc, 1);
84 realtek_smi_clk_delay(smi);
85 gpiod_set_value(smi->mdio, 1);
86 realtek_smi_clk_delay(smi);
87 gpiod_set_value(smi->mdc, 1);
88 realtek_smi_clk_delay(smi);
89 gpiod_set_value(smi->mdc, 0);
90 realtek_smi_clk_delay(smi);
91 gpiod_set_value(smi->mdc, 1);
92
93 /* Add a click */
94 realtek_smi_clk_delay(smi);
95 gpiod_set_value(smi->mdc, 0);
96 realtek_smi_clk_delay(smi);
97 gpiod_set_value(smi->mdc, 1);
98
99 /* Set GPIO pins to input mode */
100 gpiod_direction_input(smi->mdio);
101 gpiod_direction_input(smi->mdc);
102}
103
104static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
105{
106 for (; len > 0; len--) {
107 realtek_smi_clk_delay(smi);
108
109 /* Prepare data */
110 gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
111 realtek_smi_clk_delay(smi);
112
113 /* Clocking */
114 gpiod_set_value(smi->mdc, 1);
115 realtek_smi_clk_delay(smi);
116 gpiod_set_value(smi->mdc, 0);
117 }
118}
119
120static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
121{
122 gpiod_direction_input(smi->mdio);
123
124 for (*data = 0; len > 0; len--) {
125 u32 u;
126
127 realtek_smi_clk_delay(smi);
128
129 /* Clocking */
130 gpiod_set_value(smi->mdc, 1);
131 realtek_smi_clk_delay(smi);
132 u = !!gpiod_get_value(smi->mdio);
133 gpiod_set_value(smi->mdc, 0);
134
135 *data |= (u << (len - 1));
136 }
137
138 gpiod_direction_output(smi->mdio, 0);
139}
140
141static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
142{
143 int retry_cnt;
144
145 retry_cnt = 0;
146 do {
147 u32 ack;
148
149 realtek_smi_read_bits(smi, 1, &ack);
150 if (ack == 0)
151 break;
152
153 if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
154 dev_err(smi->dev, "ACK timeout\n");
155 return -ETIMEDOUT;
156 }
157 } while (1);
158
159 return 0;
160}
161
162static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
163{
164 realtek_smi_write_bits(smi, data, 8);
165 return realtek_smi_wait_for_ack(smi);
166}
167
168static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
169{
170 realtek_smi_write_bits(smi, data, 8);
171 return 0;
172}
173
174static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
175{
176 u32 t;
177
178 /* Read data */
179 realtek_smi_read_bits(smi, 8, &t);
180 *data = (t & 0xff);
181
182 /* Send an ACK */
183 realtek_smi_write_bits(smi, 0x00, 1);
184
185 return 0;
186}
187
188static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
189{
190 u32 t;
191
192 /* Read data */
193 realtek_smi_read_bits(smi, 8, &t);
194 *data = (t & 0xff);
195
196 /* Send an ACK */
197 realtek_smi_write_bits(smi, 0x01, 1);
198
199 return 0;
200}
201
202static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
203{
204 unsigned long flags;
205 u8 lo = 0;
206 u8 hi = 0;
207 int ret;
208
209 spin_lock_irqsave(&smi->lock, flags);
210
211 realtek_smi_start(smi);
212
213 /* Send READ command */
214 ret = realtek_smi_write_byte(smi, smi->cmd_read);
215 if (ret)
216 goto out;
217
218 /* Set ADDR[7:0] */
219 ret = realtek_smi_write_byte(smi, addr & 0xff);
220 if (ret)
221 goto out;
222
223 /* Set ADDR[15:8] */
224 ret = realtek_smi_write_byte(smi, addr >> 8);
225 if (ret)
226 goto out;
227
228 /* Read DATA[7:0] */
229 realtek_smi_read_byte0(smi, &lo);
230 /* Read DATA[15:8] */
231 realtek_smi_read_byte1(smi, &hi);
232
233 *data = ((u32)lo) | (((u32)hi) << 8);
234
235 ret = 0;
236
237 out:
238 realtek_smi_stop(smi);
239 spin_unlock_irqrestore(&smi->lock, flags);
240
241 return ret;
242}
243
244static int realtek_smi_write_reg(struct realtek_smi *smi,
245 u32 addr, u32 data, bool ack)
246{
247 unsigned long flags;
248 int ret;
249
250 spin_lock_irqsave(&smi->lock, flags);
251
252 realtek_smi_start(smi);
253
254 /* Send WRITE command */
255 ret = realtek_smi_write_byte(smi, smi->cmd_write);
256 if (ret)
257 goto out;
258
259 /* Set ADDR[7:0] */
260 ret = realtek_smi_write_byte(smi, addr & 0xff);
261 if (ret)
262 goto out;
263
264 /* Set ADDR[15:8] */
265 ret = realtek_smi_write_byte(smi, addr >> 8);
266 if (ret)
267 goto out;
268
269 /* Write DATA[7:0] */
270 ret = realtek_smi_write_byte(smi, data & 0xff);
271 if (ret)
272 goto out;
273
274 /* Write DATA[15:8] */
275 if (ack)
276 ret = realtek_smi_write_byte(smi, data >> 8);
277 else
278 ret = realtek_smi_write_byte_noack(smi, data >> 8);
279 if (ret)
280 goto out;
281
282 ret = 0;
283
284 out:
285 realtek_smi_stop(smi);
286 spin_unlock_irqrestore(&smi->lock, flags);
287
288 return ret;
289}
290
291/* There is one single case when we need to use this accessor and that
292 * is when issueing soft reset. Since the device reset as soon as we write
293 * that bit, no ACK will come back for natural reasons.
294 */
295int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
296 u32 data)
297{
298 return realtek_smi_write_reg(smi, addr, data, false);
299}
300EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
301
302/* Regmap accessors */
303
304static int realtek_smi_write(void *ctx, u32 reg, u32 val)
305{
306 struct realtek_smi *smi = ctx;
307
308 return realtek_smi_write_reg(smi, reg, val, true);
309}
310
311static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
312{
313 struct realtek_smi *smi = ctx;
314
315 return realtek_smi_read_reg(smi, reg, val);
316}
317
318static const struct regmap_config realtek_smi_mdio_regmap_config = {
319 .reg_bits = 10, /* A4..A0 R4..R0 */
320 .val_bits = 16,
321 .reg_stride = 1,
322 /* PHY regs are at 0x8000 */
323 .max_register = 0xffff,
324 .reg_format_endian = REGMAP_ENDIAN_BIG,
325 .reg_read = realtek_smi_read,
326 .reg_write = realtek_smi_write,
327 .cache_type = REGCACHE_NONE,
328};
329
330static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
331{
332 struct realtek_smi *smi = bus->priv;
333
334 return smi->ops->phy_read(smi, addr, regnum);
335}
336
337static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
338 u16 val)
339{
340 struct realtek_smi *smi = bus->priv;
341
342 return smi->ops->phy_write(smi, addr, regnum, val);
343}
344
345int realtek_smi_setup_mdio(struct realtek_smi *smi)
346{
347 struct device_node *mdio_np;
348 int ret;
349
350 mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
351 "realtek,smi-mdio");
352 if (!mdio_np) {
353 dev_err(smi->dev, "no MDIO bus node\n");
354 return -ENODEV;
355 }
356
357 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
358 if (!smi->slave_mii_bus)
359 return -ENOMEM;
360 smi->slave_mii_bus->priv = smi;
361 smi->slave_mii_bus->name = "SMI slave MII";
362 smi->slave_mii_bus->read = realtek_smi_mdio_read;
363 smi->slave_mii_bus->write = realtek_smi_mdio_write;
364 snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
365 smi->ds->index);
366 smi->slave_mii_bus->dev.of_node = mdio_np;
367 smi->slave_mii_bus->parent = smi->dev;
368 smi->ds->slave_mii_bus = smi->slave_mii_bus;
369
370 ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
371 if (ret) {
372 dev_err(smi->dev, "unable to register MDIO bus %s\n",
373 smi->slave_mii_bus->id);
374 of_node_put(mdio_np);
375 }
376
377 return 0;
378}
379
380static int realtek_smi_probe(struct platform_device *pdev)
381{
382 const struct realtek_smi_variant *var;
383 struct device *dev = &pdev->dev;
384 struct realtek_smi *smi;
385 struct device_node *np;
386 int ret;
387
388 var = of_device_get_match_data(dev);
389 np = dev->of_node;
390
391 smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
392 if (!smi)
393 return -ENOMEM;
394 smi->map = devm_regmap_init(dev, NULL, smi,
395 &realtek_smi_mdio_regmap_config);
396 if (IS_ERR(smi->map)) {
397 ret = PTR_ERR(smi->map);
398 dev_err(dev, "regmap init failed: %d\n", ret);
399 return ret;
400 }
401
402 /* Link forward and backward */
403 smi->dev = dev;
404 smi->clk_delay = var->clk_delay;
405 smi->cmd_read = var->cmd_read;
406 smi->cmd_write = var->cmd_write;
407 smi->ops = var->ops;
408
409 dev_set_drvdata(dev, smi);
410 spin_lock_init(&smi->lock);
411
412 /* TODO: if power is software controlled, set up any regulators here */
413
414 /* Assert then deassert RESET */
415 smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
416 if (IS_ERR(smi->reset)) {
417 dev_err(dev, "failed to get RESET GPIO\n");
418 return PTR_ERR(smi->reset);
419 }
420 msleep(REALTEK_SMI_HW_STOP_DELAY);
421 gpiod_set_value(smi->reset, 0);
422 msleep(REALTEK_SMI_HW_START_DELAY);
423 dev_info(dev, "deasserted RESET\n");
424
425 /* Fetch MDIO pins */
426 smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
427 if (IS_ERR(smi->mdc))
428 return PTR_ERR(smi->mdc);
429 smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
430 if (IS_ERR(smi->mdio))
431 return PTR_ERR(smi->mdio);
432
433 smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
434
435 ret = smi->ops->detect(smi);
436 if (ret) {
437 dev_err(dev, "unable to detect switch\n");
438 return ret;
439 }
440
441 smi->ds = dsa_switch_alloc(dev, smi->num_ports);
442 if (!smi->ds)
443 return -ENOMEM;
444 smi->ds->priv = smi;
445
446 smi->ds->ops = var->ds_ops;
447 ret = dsa_register_switch(smi->ds);
448 if (ret) {
449 dev_err(dev, "unable to register switch ret = %d\n", ret);
450 return ret;
451 }
452 return 0;
453}
454
455static int realtek_smi_remove(struct platform_device *pdev)
456{
457 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
458
459 dsa_unregister_switch(smi->ds);
460 gpiod_set_value(smi->reset, 1);
461
462 return 0;
463}
464
465static const struct of_device_id realtek_smi_of_match[] = {
466 {
467 .compatible = "realtek,rtl8366rb",
468 .data = &rtl8366rb_variant,
469 },
470 {
471 /* FIXME: add support for RTL8366S and more */
472 .compatible = "realtek,rtl8366s",
473 .data = NULL,
474 },
475 { /* sentinel */ },
476};
477MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
478
479static struct platform_driver realtek_smi_driver = {
480 .driver = {
481 .name = "realtek-smi",
482 .of_match_table = of_match_ptr(realtek_smi_of_match),
483 },
484 .probe = realtek_smi_probe,
485 .remove = realtek_smi_remove,
486};
487module_platform_driver(realtek_smi_driver);
488
489MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h
new file mode 100644
index 000000000000..9a63b51e1d82
--- /dev/null
+++ b/drivers/net/dsa/realtek-smi.h
@@ -0,0 +1,144 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/* Realtek SMI interface driver defines
3 *
4 * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
5 * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
6 */
7
8#ifndef _REALTEK_SMI_H
9#define _REALTEK_SMI_H
10
11#include <linux/phy.h>
12#include <linux/platform_device.h>
13#include <linux/gpio/consumer.h>
14#include <net/dsa.h>
15
16struct realtek_smi_ops;
17struct dentry;
18struct inode;
19struct file;
20
21struct rtl8366_mib_counter {
22 unsigned int base;
23 unsigned int offset;
24 unsigned int length;
25 const char *name;
26};
27
28struct rtl8366_vlan_mc {
29 u16 vid;
30 u16 untag;
31 u16 member;
32 u8 fid;
33 u8 priority;
34};
35
36struct rtl8366_vlan_4k {
37 u16 vid;
38 u16 untag;
39 u16 member;
40 u8 fid;
41};
42
43struct realtek_smi {
44 struct device *dev;
45 struct gpio_desc *reset;
46 struct gpio_desc *mdc;
47 struct gpio_desc *mdio;
48 struct regmap *map;
49 struct mii_bus *slave_mii_bus;
50
51 unsigned int clk_delay;
52 u8 cmd_read;
53 u8 cmd_write;
54 spinlock_t lock; /* Locks around command writes */
55 struct dsa_switch *ds;
56 struct irq_domain *irqdomain;
57 bool leds_disabled;
58
59 unsigned int cpu_port;
60 unsigned int num_ports;
61 unsigned int num_vlan_mc;
62 unsigned int num_mib_counters;
63 struct rtl8366_mib_counter *mib_counters;
64
65 const struct realtek_smi_ops *ops;
66
67 int vlan_enabled;
68 int vlan4k_enabled;
69
70 char buf[4096];
71};
72
73/**
74 * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
75 * @detect: detects the chiptype
76 */
77struct realtek_smi_ops {
78 int (*detect)(struct realtek_smi *smi);
79 int (*reset_chip)(struct realtek_smi *smi);
80 int (*setup)(struct realtek_smi *smi);
81 void (*cleanup)(struct realtek_smi *smi);
82 int (*get_mib_counter)(struct realtek_smi *smi,
83 int port,
84 struct rtl8366_mib_counter *mib,
85 u64 *mibvalue);
86 int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
87 struct rtl8366_vlan_mc *vlanmc);
88 int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
89 const struct rtl8366_vlan_mc *vlanmc);
90 int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
91 struct rtl8366_vlan_4k *vlan4k);
92 int (*set_vlan_4k)(struct realtek_smi *smi,
93 const struct rtl8366_vlan_4k *vlan4k);
94 int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
95 int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
96 bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
97 int (*enable_vlan)(struct realtek_smi *smi, bool enable);
98 int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
99 int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
100 int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
101 int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
102 u16 val);
103};
104
105struct realtek_smi_variant {
106 const struct dsa_switch_ops *ds_ops;
107 const struct realtek_smi_ops *ops;
108 unsigned int clk_delay;
109 u8 cmd_read;
110 u8 cmd_write;
111};
112
113/* SMI core calls */
114int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
115 u32 data);
116int realtek_smi_setup_mdio(struct realtek_smi *smi);
117
118/* RTL8366 library helpers */
119int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
120int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
121 u32 untag, u32 fid);
122int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
123int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
124 unsigned int vid);
125int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
126int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
127int rtl8366_reset_vlan(struct realtek_smi *smi);
128int rtl8366_init_vlan(struct realtek_smi *smi);
129int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
130 bool vlan_filtering);
131int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
132 const struct switchdev_obj_port_vlan *vlan);
133void rtl8366_vlan_add(struct dsa_switch *ds, int port,
134 const struct switchdev_obj_port_vlan *vlan);
135int rtl8366_vlan_del(struct dsa_switch *ds, int port,
136 const struct switchdev_obj_port_vlan *vlan);
137void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
138 uint8_t *data);
139int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
140void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
141
142extern const struct realtek_smi_variant rtl8366rb_variant;
143
144#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
new file mode 100644
index 000000000000..6dedd43442cc
--- /dev/null
+++ b/drivers/net/dsa/rtl8366.c
@@ -0,0 +1,515 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Realtek SMI library helpers for the RTL8366x variants
3 * RTL8366RB and RTL8366S
4 *
5 * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
6 * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
7 * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
8 * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
9 * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
10 */
11#include <linux/if_bridge.h>
12#include <net/dsa.h>
13
14#include "realtek-smi.h"
15
16int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
17{
18 int ret;
19 int i;
20
21 *used = 0;
22 for (i = 0; i < smi->num_ports; i++) {
23 int index = 0;
24
25 ret = smi->ops->get_mc_index(smi, i, &index);
26 if (ret)
27 return ret;
28
29 if (mc_index == index) {
30 *used = 1;
31 break;
32 }
33 }
34
35 return 0;
36}
37EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
38
39int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
40 u32 untag, u32 fid)
41{
42 struct rtl8366_vlan_4k vlan4k;
43 int ret;
44 int i;
45
46 /* Update the 4K table */
47 ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
48 if (ret)
49 return ret;
50
51 vlan4k.member = member;
52 vlan4k.untag = untag;
53 vlan4k.fid = fid;
54 ret = smi->ops->set_vlan_4k(smi, &vlan4k);
55 if (ret)
56 return ret;
57
58 /* Try to find an existing MC entry for this VID */
59 for (i = 0; i < smi->num_vlan_mc; i++) {
60 struct rtl8366_vlan_mc vlanmc;
61
62 ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
63 if (ret)
64 return ret;
65
66 if (vid == vlanmc.vid) {
67 /* update the MC entry */
68 vlanmc.member = member;
69 vlanmc.untag = untag;
70 vlanmc.fid = fid;
71
72 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
73 break;
74 }
75 }
76
77 return ret;
78}
79EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
80
81int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
82{
83 struct rtl8366_vlan_mc vlanmc;
84 int ret;
85 int index;
86
87 ret = smi->ops->get_mc_index(smi, port, &index);
88 if (ret)
89 return ret;
90
91 ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
92 if (ret)
93 return ret;
94
95 *val = vlanmc.vid;
96 return 0;
97}
98EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
99
100int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
101 unsigned int vid)
102{
103 struct rtl8366_vlan_mc vlanmc;
104 struct rtl8366_vlan_4k vlan4k;
105 int ret;
106 int i;
107
108 /* Try to find an existing MC entry for this VID */
109 for (i = 0; i < smi->num_vlan_mc; i++) {
110 ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
111 if (ret)
112 return ret;
113
114 if (vid == vlanmc.vid) {
115 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
116 if (ret)
117 return ret;
118
119 ret = smi->ops->set_mc_index(smi, port, i);
120 return ret;
121 }
122 }
123
124 /* We have no MC entry for this VID, try to find an empty one */
125 for (i = 0; i < smi->num_vlan_mc; i++) {
126 ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
127 if (ret)
128 return ret;
129
130 if (vlanmc.vid == 0 && vlanmc.member == 0) {
131 /* Update the entry from the 4K table */
132 ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
133 if (ret)
134 return ret;
135
136 vlanmc.vid = vid;
137 vlanmc.member = vlan4k.member;
138 vlanmc.untag = vlan4k.untag;
139 vlanmc.fid = vlan4k.fid;
140 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
141 if (ret)
142 return ret;
143
144 ret = smi->ops->set_mc_index(smi, port, i);
145 return ret;
146 }
147 }
148
149 /* MC table is full, try to find an unused entry and replace it */
150 for (i = 0; i < smi->num_vlan_mc; i++) {
151 int used;
152
153 ret = rtl8366_mc_is_used(smi, i, &used);
154 if (ret)
155 return ret;
156
157 if (!used) {
158 /* Update the entry from the 4K table */
159 ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
160 if (ret)
161 return ret;
162
163 vlanmc.vid = vid;
164 vlanmc.member = vlan4k.member;
165 vlanmc.untag = vlan4k.untag;
166 vlanmc.fid = vlan4k.fid;
167 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
168 if (ret)
169 return ret;
170
171 ret = smi->ops->set_mc_index(smi, port, i);
172 return ret;
173 }
174 }
175
176 dev_err(smi->dev,
177 "all VLAN member configurations are in use\n");
178
179 return -ENOSPC;
180}
181EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
182
183int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
184{
185 int ret;
186
187 /* To enable 4k VLAN, ordinary VLAN must be enabled first,
188 * but if we disable 4k VLAN it is fine to leave ordinary
189 * VLAN enabled.
190 */
191 if (enable) {
192 /* Make sure VLAN is ON */
193 ret = smi->ops->enable_vlan(smi, true);
194 if (ret)
195 return ret;
196
197 smi->vlan_enabled = true;
198 }
199
200 ret = smi->ops->enable_vlan4k(smi, enable);
201 if (ret)
202 return ret;
203
204 smi->vlan4k_enabled = enable;
205 return 0;
206}
207EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
208
209int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
210{
211 int ret;
212
213 ret = smi->ops->enable_vlan(smi, enable);
214 if (ret)
215 return ret;
216
217 smi->vlan_enabled = enable;
218
219 /* If we turn VLAN off, make sure that we turn off
220 * 4k VLAN as well, if that happened to be on.
221 */
222 if (!enable) {
223 smi->vlan4k_enabled = false;
224 ret = smi->ops->enable_vlan4k(smi, false);
225 }
226
227 return ret;
228}
229EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
230
231int rtl8366_reset_vlan(struct realtek_smi *smi)
232{
233 struct rtl8366_vlan_mc vlanmc;
234 int ret;
235 int i;
236
237 rtl8366_enable_vlan(smi, false);
238 rtl8366_enable_vlan4k(smi, false);
239
240 /* Clear the 16 VLAN member configurations */
241 vlanmc.vid = 0;
242 vlanmc.priority = 0;
243 vlanmc.member = 0;
244 vlanmc.untag = 0;
245 vlanmc.fid = 0;
246 for (i = 0; i < smi->num_vlan_mc; i++) {
247 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
248 if (ret)
249 return ret;
250 }
251
252 return 0;
253}
254EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
255
256int rtl8366_init_vlan(struct realtek_smi *smi)
257{
258 int port;
259 int ret;
260
261 ret = rtl8366_reset_vlan(smi);
262 if (ret)
263 return ret;
264
265 /* Loop over the available ports, for each port, associate
266 * it with the VLAN (port+1)
267 */
268 for (port = 0; port < smi->num_ports; port++) {
269 u32 mask;
270
271 if (port == smi->cpu_port)
272 /* For the CPU port, make all ports members of this
273 * VLAN.
274 */
275 mask = GENMASK(smi->num_ports - 1, 0);
276 else
277 /* For all other ports, enable itself plus the
278 * CPU port.
279 */
280 mask = BIT(port) | BIT(smi->cpu_port);
281
282 /* For each port, set the port as member of VLAN (port+1)
283 * and untagged, except for the CPU port: the CPU port (5) is
284 * member of VLAN 6 and so are ALL the other ports as well.
285 * Use filter 0 (no filter).
286 */
287 dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
288 (port + 1), port, mask);
289 ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
290 if (ret)
291 return ret;
292
293 dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
294 (port + 1), port, (port + 1));
295 ret = rtl8366_set_pvid(smi, port, (port + 1));
296 if (ret)
297 return ret;
298 }
299
300 return rtl8366_enable_vlan(smi, true);
301}
302EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
303
304int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
305{
306 struct realtek_smi *smi = ds->priv;
307 struct rtl8366_vlan_4k vlan4k;
308 int ret;
309
310 if (!smi->ops->is_vlan_valid(smi, port))
311 return -EINVAL;
312
313 dev_info(smi->dev, "%s filtering on port %d\n",
314 vlan_filtering ? "enable" : "disable",
315 port);
316
317 /* TODO:
318 * The hardware support filter ID (FID) 0..7, I have no clue how to
319 * support this in the driver when the callback only says on/off.
320 */
321 ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
322 if (ret)
323 return ret;
324
325 /* Just set the filter to FID 1 for now then */
326 ret = rtl8366_set_vlan(smi, port,
327 vlan4k.member,
328 vlan4k.untag,
329 1);
330 if (ret)
331 return ret;
332
333 return 0;
334}
335EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
336
337int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
338 const struct switchdev_obj_port_vlan *vlan)
339{
340 struct realtek_smi *smi = ds->priv;
341 int ret;
342
343 if (!smi->ops->is_vlan_valid(smi, port))
344 return -EINVAL;
345
346 dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
347 vlan->vid_begin, vlan->vid_end);
348
349 /* Enable VLAN in the hardware
350 * FIXME: what's with this 4k business?
351 * Just rtl8366_enable_vlan() seems inconclusive.
352 */
353 ret = rtl8366_enable_vlan4k(smi, true);
354 if (ret)
355 return ret;
356
357 return 0;
358}
359EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
360
361void rtl8366_vlan_add(struct dsa_switch *ds, int port,
362 const struct switchdev_obj_port_vlan *vlan)
363{
364 bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
365 bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
366 struct realtek_smi *smi = ds->priv;
367 u32 member = 0;
368 u32 untag = 0;
369 u16 vid;
370 int ret;
371
372 if (!smi->ops->is_vlan_valid(smi, port))
373 return;
374
375 dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
376 port,
377 untagged ? "untagged" : "tagged",
378 pvid ? " PVID" : "no PVID");
379
380 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
381 dev_err(smi->dev, "port is DSA or CPU port\n");
382
383 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
384 int pvid_val = 0;
385
386 dev_info(smi->dev, "add VLAN %04x\n", vid);
387 member |= BIT(port);
388
389 if (untagged)
390 untag |= BIT(port);
391
392 /* To ensure that we have a valid MC entry for this VLAN,
393 * initialize the port VLAN ID here.
394 */
395 ret = rtl8366_get_pvid(smi, port, &pvid_val);
396 if (ret < 0) {
397 dev_err(smi->dev, "could not lookup PVID for port %d\n",
398 port);
399 return;
400 }
401 if (pvid_val == 0) {
402 ret = rtl8366_set_pvid(smi, port, vid);
403 if (ret < 0)
404 return;
405 }
406 }
407
408 ret = rtl8366_set_vlan(smi, port, member, untag, 0);
409 if (ret)
410 dev_err(smi->dev,
411 "failed to set up VLAN %04x",
412 vid);
413}
414EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
415
416int rtl8366_vlan_del(struct dsa_switch *ds, int port,
417 const struct switchdev_obj_port_vlan *vlan)
418{
419 struct realtek_smi *smi = ds->priv;
420 u16 vid;
421 int ret;
422
423 dev_info(smi->dev, "del VLAN on port %d\n", port);
424
425 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
426 int i;
427
428 dev_info(smi->dev, "del VLAN %04x\n", vid);
429
430 for (i = 0; i < smi->num_vlan_mc; i++) {
431 struct rtl8366_vlan_mc vlanmc;
432
433 ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
434 if (ret)
435 return ret;
436
437 if (vid == vlanmc.vid) {
438 /* clear VLAN member configurations */
439 vlanmc.vid = 0;
440 vlanmc.priority = 0;
441 vlanmc.member = 0;
442 vlanmc.untag = 0;
443 vlanmc.fid = 0;
444
445 ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
446 if (ret) {
447 dev_err(smi->dev,
448 "failed to remove VLAN %04x\n",
449 vid);
450 return ret;
451 }
452 break;
453 }
454 }
455 }
456
457 return 0;
458}
459EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
460
461void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
462 uint8_t *data)
463{
464 struct realtek_smi *smi = ds->priv;
465 struct rtl8366_mib_counter *mib;
466 int i;
467
468 if (port >= smi->num_ports)
469 return;
470
471 for (i = 0; i < smi->num_mib_counters; i++) {
472 mib = &smi->mib_counters[i];
473 strncpy(data + i * ETH_GSTRING_LEN,
474 mib->name, ETH_GSTRING_LEN);
475 }
476}
477EXPORT_SYMBOL_GPL(rtl8366_get_strings);
478
479int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
480{
481 struct realtek_smi *smi = ds->priv;
482
483 /* We only support SS_STATS */
484 if (sset != ETH_SS_STATS)
485 return 0;
486 if (port >= smi->num_ports)
487 return -EINVAL;
488
489 return smi->num_mib_counters;
490}
491EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
492
493void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
494{
495 struct realtek_smi *smi = ds->priv;
496 int i;
497 int ret;
498
499 if (port >= smi->num_ports)
500 return;
501
502 for (i = 0; i < smi->num_mib_counters; i++) {
503 struct rtl8366_mib_counter *mib;
504 u64 mibvalue = 0;
505
506 mib = &smi->mib_counters[i];
507 ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
508 if (ret) {
509 dev_err(smi->dev, "error reading MIB counter %s\n",
510 mib->name);
511 }
512 data[i] = mibvalue;
513 }
514}
515EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
new file mode 100644
index 000000000000..a4d5049df692
--- /dev/null
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -0,0 +1,1454 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
3 *
4 * This is a sparsely documented chip, the only viable documentation seems
5 * to be a patched up code drop from the vendor that appear in various
6 * GPL source trees.
7 *
8 * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
9 * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
10 * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
11 * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
12 * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
13 */
14
15#include <linux/bitops.h>
16#include <linux/etherdevice.h>
17#include <linux/interrupt.h>
18#include <linux/irqdomain.h>
19#include <linux/irqchip/chained_irq.h>
20#include <linux/of_irq.h>
21#include <linux/regmap.h>
22
23#include "realtek-smi.h"
24
25#define RTL8366RB_PORT_NUM_CPU 5
26#define RTL8366RB_NUM_PORTS 6
27#define RTL8366RB_PHY_NO_MAX 4
28#define RTL8366RB_PHY_ADDR_MAX 31
29
30/* Switch Global Configuration register */
31#define RTL8366RB_SGCR 0x0000
32#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
33#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
34#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
35#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
36#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
37#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
38#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3)
39#define RTL8366RB_SGCR_EN_VLAN BIT(13)
40#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
41
42/* Port Enable Control register */
43#define RTL8366RB_PECR 0x0001
44
45/* Switch Security Control registers */
46#define RTL8366RB_SSCR0 0x0002
47#define RTL8366RB_SSCR1 0x0003
48#define RTL8366RB_SSCR2 0x0004
49#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
50
51/* Port Mode Control registers */
52#define RTL8366RB_PMC0 0x0005
53#define RTL8366RB_PMC0_SPI BIT(0)
54#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
55#define RTL8366RB_PMC0_PROBE BIT(2)
56#define RTL8366RB_PMC0_DIS_BISR BIT(3)
57#define RTL8366RB_PMC0_ADCTEST BIT(4)
58#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
59#define RTL8366RB_PMC0_EN_SCAN BIT(6)
60#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
61#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
62#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
63#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
64#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
65#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
66#define RTL8366RB_PMC1 0x0006
67
68/* Port Mirror Control Register */
69#define RTL8366RB_PMCR 0x0007
70#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
71#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
72#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
73#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
74#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
75#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
76#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
77#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
78
79/* bits 0..7 = port 0, bits 8..15 = port 1 */
80#define RTL8366RB_PAACR0 0x0010
81/* bits 0..7 = port 2, bits 8..15 = port 3 */
82#define RTL8366RB_PAACR1 0x0011
83/* bits 0..7 = port 4, bits 8..15 = port 5 */
84#define RTL8366RB_PAACR2 0x0012
85#define RTL8366RB_PAACR_SPEED_10M 0
86#define RTL8366RB_PAACR_SPEED_100M 1
87#define RTL8366RB_PAACR_SPEED_1000M 2
88#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
89#define RTL8366RB_PAACR_LINK_UP BIT(4)
90#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
91#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
92#define RTL8366RB_PAACR_AN BIT(7)
93
94#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
95 RTL8366RB_PAACR_FULL_DUPLEX | \
96 RTL8366RB_PAACR_LINK_UP | \
97 RTL8366RB_PAACR_TX_PAUSE | \
98 RTL8366RB_PAACR_RX_PAUSE)
99
100/* bits 0..7 = port 0, bits 8..15 = port 1 */
101#define RTL8366RB_PSTAT0 0x0014
102/* bits 0..7 = port 2, bits 8..15 = port 3 */
103#define RTL8366RB_PSTAT1 0x0015
104/* bits 0..7 = port 4, bits 8..15 = port 5 */
105#define RTL8366RB_PSTAT2 0x0016
106
107#define RTL8366RB_POWER_SAVING_REG 0x0021
108
109/* CPU port control reg */
110#define RTL8368RB_CPU_CTRL_REG 0x0061
111#define RTL8368RB_CPU_PORTS_MSK 0x00FF
112/* Enables inserting custom tag length/type 0x8899 */
113#define RTL8368RB_CPU_INSTAG BIT(15)
114
115#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
116#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
117#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
118
119#define RTL8366RB_RESET_CTRL_REG 0x0100
120#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
121#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
122
123#define RTL8366RB_CHIP_ID_REG 0x0509
124#define RTL8366RB_CHIP_ID_8366 0x5937
125#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
126#define RTL8366RB_CHIP_VERSION_MASK 0xf
127
128/* PHY registers control */
129#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
130#define RTL8366RB_PHY_CTRL_READ BIT(0)
131#define RTL8366RB_PHY_CTRL_WRITE 0
132#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
133#define RTL8366RB_PHY_INT_BUSY BIT(0)
134#define RTL8366RB_PHY_EXT_BUSY BIT(4)
135#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
136#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
137#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
138#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
139
140#define RTL8366RB_PHY_REG_MASK 0x1f
141#define RTL8366RB_PHY_PAGE_OFFSET 5
142#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
143#define RTL8366RB_PHY_NO_OFFSET 9
144#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
145
146#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
147
148/* LED control registers */
149#define RTL8366RB_LED_BLINKRATE_REG 0x0430
150#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
151#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
152#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
153#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
154#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
155#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
156#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
157
158#define RTL8366RB_LED_CTRL_REG 0x0431
159#define RTL8366RB_LED_OFF 0x0
160#define RTL8366RB_LED_DUP_COL 0x1
161#define RTL8366RB_LED_LINK_ACT 0x2
162#define RTL8366RB_LED_SPD1000 0x3
163#define RTL8366RB_LED_SPD100 0x4
164#define RTL8366RB_LED_SPD10 0x5
165#define RTL8366RB_LED_SPD1000_ACT 0x6
166#define RTL8366RB_LED_SPD100_ACT 0x7
167#define RTL8366RB_LED_SPD10_ACT 0x8
168#define RTL8366RB_LED_SPD100_10_ACT 0x9
169#define RTL8366RB_LED_FIBER 0xa
170#define RTL8366RB_LED_AN_FAULT 0xb
171#define RTL8366RB_LED_LINK_RX 0xc
172#define RTL8366RB_LED_LINK_TX 0xd
173#define RTL8366RB_LED_MASTER 0xe
174#define RTL8366RB_LED_FORCE 0xf
175#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
176#define RTL8366RB_LED_1_OFFSET 6
177#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
178#define RTL8366RB_LED_3_OFFSET 6
179
180#define RTL8366RB_MIB_COUNT 33
181#define RTL8366RB_GLOBAL_MIB_COUNT 1
182#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
183#define RTL8366RB_MIB_COUNTER_BASE 0x1000
184#define RTL8366RB_MIB_CTRL_REG 0x13F0
185#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
186#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
187#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
188#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
189#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
190
191#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
192#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
193 (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
194#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
195#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
196
197#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
198#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
199
200#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
201#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
202#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
203
204#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
205
206#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
207#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
208#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
209#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
210#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
211#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
212#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
213
214#define RTL8366RB_NUM_VLANS 16
215#define RTL8366RB_NUM_LEDGROUPS 4
216#define RTL8366RB_NUM_VIDS 4096
217#define RTL8366RB_PRIORITYMAX 7
218#define RTL8366RB_FIDMAX 7
219
220#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
221#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
222#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
223#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
224#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
225
226#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
227
228#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
229 RTL8366RB_PORT_2 | \
230 RTL8366RB_PORT_3 | \
231 RTL8366RB_PORT_4 | \
232 RTL8366RB_PORT_5 | \
233 RTL8366RB_PORT_CPU)
234
235#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
236 RTL8366RB_PORT_2 | \
237 RTL8366RB_PORT_3 | \
238 RTL8366RB_PORT_4 | \
239 RTL8366RB_PORT_5)
240
241#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
242 RTL8366RB_PORT_2 | \
243 RTL8366RB_PORT_3 | \
244 RTL8366RB_PORT_4)
245
246#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
247
248/* First configuration word per member config, VID and prio */
249#define RTL8366RB_VLAN_VID_MASK 0xfff
250#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
251#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
252/* Second configuration word per member config, member and untagged */
253#define RTL8366RB_VLAN_UNTAG_SHIFT 8
254#define RTL8366RB_VLAN_UNTAG_MASK 0xff
255#define RTL8366RB_VLAN_MEMBER_MASK 0xff
256/* Third config word per member config, STAG currently unused */
257#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
258#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
259#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
260#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
261#define RTL8366RB_VLAN_FID_MASK 0x7
262
263/* Port ingress bandwidth control */
264#define RTL8366RB_IB_BASE 0x0200
265#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
266#define RTL8366RB_IB_BDTH_MASK 0x3fff
267#define RTL8366RB_IB_PREIFG BIT(14)
268
269/* Port egress bandwidth control */
270#define RTL8366RB_EB_BASE 0x02d1
271#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
272#define RTL8366RB_EB_BDTH_MASK 0x3fff
273#define RTL8366RB_EB_PREIFG_REG 0x02f8
274#define RTL8366RB_EB_PREIFG BIT(9)
275
276#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
277#define RTL8366RB_BDTH_UNIT 64
278#define RTL8366RB_BDTH_REG_DEFAULT 16383
279
280/* QOS */
281#define RTL8366RB_QOS BIT(15)
282/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
283#define RTL8366RB_QOS_DEFAULT_PREIFG 1
284
285/* Interrupt handling */
286#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
287#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
288#define RTL8366RB_P4_RGMII_LED BIT(2)
289#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
290#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
291#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
292#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
293#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
294#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
295#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
296 RTL8366RB_INTERRUPT_ACLEXCEED | \
297 RTL8366RB_INTERRUPT_STORMEXCEED | \
298 RTL8366RB_INTERRUPT_P4_FIBER | \
299 RTL8366RB_INTERRUPT_P4_UTP)
300#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
301#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
302
303/* bits 0..5 enable force when cleared */
304#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
305
306#define RTL8366RB_OAM_PARSER_REG 0x0F14
307#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
308
309#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
310#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
311#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
312#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
313
314static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
315 { 0, 0, 4, "IfInOctets" },
316 { 0, 4, 4, "EtherStatsOctets" },
317 { 0, 8, 2, "EtherStatsUnderSizePkts" },
318 { 0, 10, 2, "EtherFragments" },
319 { 0, 12, 2, "EtherStatsPkts64Octets" },
320 { 0, 14, 2, "EtherStatsPkts65to127Octets" },
321 { 0, 16, 2, "EtherStatsPkts128to255Octets" },
322 { 0, 18, 2, "EtherStatsPkts256to511Octets" },
323 { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
324 { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
325 { 0, 24, 2, "EtherOversizeStats" },
326 { 0, 26, 2, "EtherStatsJabbers" },
327 { 0, 28, 2, "IfInUcastPkts" },
328 { 0, 30, 2, "EtherStatsMulticastPkts" },
329 { 0, 32, 2, "EtherStatsBroadcastPkts" },
330 { 0, 34, 2, "EtherStatsDropEvents" },
331 { 0, 36, 2, "Dot3StatsFCSErrors" },
332 { 0, 38, 2, "Dot3StatsSymbolErrors" },
333 { 0, 40, 2, "Dot3InPauseFrames" },
334 { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
335 { 0, 44, 4, "IfOutOctets" },
336 { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
337 { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
338 { 0, 52, 2, "Dot3sDeferredTransmissions" },
339 { 0, 54, 2, "Dot3StatsLateCollisions" },
340 { 0, 56, 2, "EtherStatsCollisions" },
341 { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
342 { 0, 60, 2, "Dot3OutPauseFrames" },
343 { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
344 { 0, 64, 2, "Dot1dTpPortInDiscards" },
345 { 0, 66, 2, "IfOutUcastPkts" },
346 { 0, 68, 2, "IfOutMulticastPkts" },
347 { 0, 70, 2, "IfOutBroadcastPkts" },
348};
349
350static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
351 int port,
352 struct rtl8366_mib_counter *mib,
353 u64 *mibvalue)
354{
355 u32 addr, val;
356 int ret;
357 int i;
358
359 addr = RTL8366RB_MIB_COUNTER_BASE +
360 RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
361 mib->offset;
362
363 /* Writing access counter address first
364 * then ASIC will prepare 64bits counter wait for being retrived
365 */
366 ret = regmap_write(smi->map, addr, 0); /* Write whatever */
367 if (ret)
368 return ret;
369
370 /* Read MIB control register */
371 ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
372 if (ret)
373 return -EIO;
374
375 if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
376 return -EBUSY;
377
378 if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
379 return -EIO;
380
381 /* Read each individual MIB 16 bits at the time */
382 *mibvalue = 0;
383 for (i = mib->length; i > 0; i--) {
384 ret = regmap_read(smi->map, addr + (i - 1), &val);
385 if (ret)
386 return ret;
387 *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
388 }
389 return 0;
390}
391
392static u32 rtl8366rb_get_irqmask(struct irq_data *d)
393{
394 int line = irqd_to_hwirq(d);
395 u32 val;
396
397 /* For line interrupts we combine link down in bits
398 * 6..11 with link up in bits 0..5 into one interrupt.
399 */
400 if (line < 12)
401 val = BIT(line) | BIT(line + 6);
402 else
403 val = BIT(line);
404 return val;
405}
406
407static void rtl8366rb_mask_irq(struct irq_data *d)
408{
409 struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
410 int ret;
411
412 ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
413 rtl8366rb_get_irqmask(d), 0);
414 if (ret)
415 dev_err(smi->dev, "could not mask IRQ\n");
416}
417
418static void rtl8366rb_unmask_irq(struct irq_data *d)
419{
420 struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
421 int ret;
422
423 ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
424 rtl8366rb_get_irqmask(d),
425 rtl8366rb_get_irqmask(d));
426 if (ret)
427 dev_err(smi->dev, "could not unmask IRQ\n");
428}
429
430static irqreturn_t rtl8366rb_irq(int irq, void *data)
431{
432 struct realtek_smi *smi = data;
433 u32 stat;
434 int ret;
435
436 /* This clears the IRQ status register */
437 ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
438 &stat);
439 if (ret) {
440 dev_err(smi->dev, "can't read interrupt status\n");
441 return IRQ_NONE;
442 }
443 stat &= RTL8366RB_INTERRUPT_VALID;
444 if (!stat)
445 return IRQ_NONE;
446 while (stat) {
447 int line = __ffs(stat);
448 int child_irq;
449
450 stat &= ~BIT(line);
451 /* For line interrupts we combine link down in bits
452 * 6..11 with link up in bits 0..5 into one interrupt.
453 */
454 if (line < 12 && line > 5)
455 line -= 5;
456 child_irq = irq_find_mapping(smi->irqdomain, line);
457 handle_nested_irq(child_irq);
458 }
459 return IRQ_HANDLED;
460}
461
462static struct irq_chip rtl8366rb_irq_chip = {
463 .name = "RTL8366RB",
464 .irq_mask = rtl8366rb_mask_irq,
465 .irq_unmask = rtl8366rb_unmask_irq,
466};
467
468static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
469 irq_hw_number_t hwirq)
470{
471 irq_set_chip_data(irq, domain->host_data);
472 irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
473 irq_set_nested_thread(irq, 1);
474 irq_set_noprobe(irq);
475
476 return 0;
477}
478
479static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
480{
481 irq_set_nested_thread(irq, 0);
482 irq_set_chip_and_handler(irq, NULL, NULL);
483 irq_set_chip_data(irq, NULL);
484}
485
486static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
487 .map = rtl8366rb_irq_map,
488 .unmap = rtl8366rb_irq_unmap,
489 .xlate = irq_domain_xlate_onecell,
490};
491
492static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
493{
494 struct device_node *intc;
495 unsigned long irq_trig;
496 int irq;
497 int ret;
498 u32 val;
499 int i;
500
501 intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
502 if (!intc) {
503 dev_err(smi->dev, "missing child interrupt-controller node\n");
504 return -EINVAL;
505 }
506 /* RB8366RB IRQs cascade off this one */
507 irq = of_irq_get(intc, 0);
508 if (irq <= 0) {
509 dev_err(smi->dev, "failed to get parent IRQ\n");
510 return irq ? irq : -EINVAL;
511 }
512
513 /* This clears the IRQ status register */
514 ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
515 &val);
516 if (ret) {
517 dev_err(smi->dev, "can't read interrupt status\n");
518 return ret;
519 }
520
521 /* Fetch IRQ edge information from the descriptor */
522 irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
523 switch (irq_trig) {
524 case IRQF_TRIGGER_RISING:
525 case IRQF_TRIGGER_HIGH:
526 dev_info(smi->dev, "active high/rising IRQ\n");
527 val = 0;
528 break;
529 case IRQF_TRIGGER_FALLING:
530 case IRQF_TRIGGER_LOW:
531 dev_info(smi->dev, "active low/falling IRQ\n");
532 val = RTL8366RB_INTERRUPT_POLARITY;
533 break;
534 }
535 ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
536 RTL8366RB_INTERRUPT_POLARITY,
537 val);
538 if (ret) {
539 dev_err(smi->dev, "could not configure IRQ polarity\n");
540 return ret;
541 }
542
543 ret = devm_request_threaded_irq(smi->dev, irq, NULL,
544 rtl8366rb_irq, IRQF_ONESHOT,
545 "RTL8366RB", smi);
546 if (ret) {
547 dev_err(smi->dev, "unable to request irq: %d\n", ret);
548 return ret;
549 }
550 smi->irqdomain = irq_domain_add_linear(intc,
551 RTL8366RB_NUM_INTERRUPT,
552 &rtl8366rb_irqdomain_ops,
553 smi);
554 if (!smi->irqdomain) {
555 dev_err(smi->dev, "failed to create IRQ domain\n");
556 return -EINVAL;
557 }
558 for (i = 0; i < smi->num_ports; i++)
559 irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
560
561 return 0;
562}
563
564static int rtl8366rb_set_addr(struct realtek_smi *smi)
565{
566 u8 addr[ETH_ALEN];
567 u16 val;
568 int ret;
569
570 eth_random_addr(addr);
571
572 dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
573 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
574 val = addr[0] << 8 | addr[1];
575 ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
576 if (ret)
577 return ret;
578 val = addr[2] << 8 | addr[3];
579 ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
580 if (ret)
581 return ret;
582 val = addr[4] << 8 | addr[5];
583 ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
584 if (ret)
585 return ret;
586
587 return 0;
588}
589
590/* Found in a vendor driver */
591
592/* For the "version 0" early silicon, appear in most source releases */
593static const u16 rtl8366rb_init_jam_ver_0[] = {
594 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF,
595 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF,
596 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688,
597 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830,
598 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8,
599 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000,
600 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A,
601 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A,
602 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A,
603 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A,
604 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A,
605 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A,
606 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961,
607 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146,
608 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146,
609 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320,
610 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146,
611 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146,
612 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146,
613 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294,
614 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294,
615 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294,
616 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294,
617 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448,
618 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B,
619 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD,
620 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C,
621 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13,
622 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF,
623};
624
625/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
626static const u16 rtl8366rb_init_jam_ver_1[] = {
627 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C,
628 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
629 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585,
630 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135,
631 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F,
632 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20,
633 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000,
634 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140,
635 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A,
636 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0,
637 0xBE79, 0x3C3C, 0xBE00, 0x1340,
638};
639
640/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
641static const u16 rtl8366rb_init_jam_ver_2[] = {
642 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
643 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14,
644 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85,
645 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881,
646 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885,
647 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
648 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320,
649 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
650 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007,
651 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140,
652 0xBE00, 0x1340, 0x0F51, 0x0010,
653};
654
655/* Appears in a DDWRT code dump */
656static const u16 rtl8366rb_init_jam_ver_3[] = {
657 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
658 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
659 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B,
660 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B,
661 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185,
662 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585,
663 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185,
664 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51,
665 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800,
666 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60,
667 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000,
668 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
669 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320,
670 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
671 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140,
672 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160,
673 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000,
674};
675
676/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
677static const u16 rtl8366rb_init_jam_f5d8235[] = {
678 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF,
679 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F,
680 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F,
681 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F,
682 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3,
683 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304,
684 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500,
685 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00,
686 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF,
687};
688
689/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
690static const u16 rtl8366rb_init_jam_dgn3500[] = {
691 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017,
692 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000,
693 0x0401, 0x0000, 0x0431, 0x0960,
694};
695
696/* This jam table activates "green ethernet", which means low power mode
697 * and is claimed to detect the cable length and not use more power than
698 * necessary, and the ports should enter power saving mode 10 seconds after
699 * a cable is disconnected. Seems to always be the same.
700 */
701static const u16 rtl8366rb_green_jam[][2] = {
702 {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
703 {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
704 {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
705};
706
707static int rtl8366rb_setup(struct dsa_switch *ds)
708{
709 struct realtek_smi *smi = ds->priv;
710 const u16 *jam_table;
711 u32 chip_ver = 0;
712 u32 chip_id = 0;
713 int jam_size;
714 u32 val;
715 int ret;
716 int i;
717
718 ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
719 if (ret) {
720 dev_err(smi->dev, "unable to read chip id\n");
721 return ret;
722 }
723
724 switch (chip_id) {
725 case RTL8366RB_CHIP_ID_8366:
726 break;
727 default:
728 dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
729 return -ENODEV;
730 }
731
732 ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
733 &chip_ver);
734 if (ret) {
735 dev_err(smi->dev, "unable to read chip version\n");
736 return ret;
737 }
738
739 dev_info(smi->dev, "RTL%04x ver %u chip found\n",
740 chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
741
742 /* Do the init dance using the right jam table */
743 switch (chip_ver) {
744 case 0:
745 jam_table = rtl8366rb_init_jam_ver_0;
746 jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
747 break;
748 case 1:
749 jam_table = rtl8366rb_init_jam_ver_1;
750 jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
751 break;
752 case 2:
753 jam_table = rtl8366rb_init_jam_ver_2;
754 jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
755 break;
756 default:
757 jam_table = rtl8366rb_init_jam_ver_3;
758 jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
759 break;
760 }
761
762 /* Special jam tables for special routers
763 * TODO: are these necessary? Maintainers, please test
764 * without them, using just the off-the-shelf tables.
765 */
766 if (of_machine_is_compatible("belkin,f5d8235-v1")) {
767 jam_table = rtl8366rb_init_jam_f5d8235;
768 jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
769 }
770 if (of_machine_is_compatible("netgear,dgn3500") ||
771 of_machine_is_compatible("netgear,dgn3500b")) {
772 jam_table = rtl8366rb_init_jam_dgn3500;
773 jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
774 }
775
776 i = 0;
777 while (i < jam_size) {
778 if ((jam_table[i] & 0xBE00) == 0xBE00) {
779 ret = regmap_read(smi->map,
780 RTL8366RB_PHY_ACCESS_BUSY_REG,
781 &val);
782 if (ret)
783 return ret;
784 if (!(val & RTL8366RB_PHY_INT_BUSY)) {
785 ret = regmap_write(smi->map,
786 RTL8366RB_PHY_ACCESS_CTRL_REG,
787 RTL8366RB_PHY_CTRL_WRITE);
788 if (ret)
789 return ret;
790 }
791 }
792 dev_dbg(smi->dev, "jam %04x into register %04x\n",
793 jam_table[i + 1],
794 jam_table[i]);
795 ret = regmap_write(smi->map,
796 jam_table[i],
797 jam_table[i + 1]);
798 if (ret)
799 return ret;
800 i += 2;
801 }
802
803 /* Set up the "green ethernet" feature */
804 i = 0;
805 while (i < ARRAY_SIZE(rtl8366rb_green_jam)) {
806 ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG,
807 &val);
808 if (ret)
809 return ret;
810 if (!(val & RTL8366RB_PHY_INT_BUSY)) {
811 ret = regmap_write(smi->map,
812 RTL8366RB_PHY_ACCESS_CTRL_REG,
813 RTL8366RB_PHY_CTRL_WRITE);
814 if (ret)
815 return ret;
816 ret = regmap_write(smi->map,
817 rtl8366rb_green_jam[i][0],
818 rtl8366rb_green_jam[i][1]);
819 if (ret)
820 return ret;
821 i++;
822 }
823 }
824 ret = regmap_write(smi->map,
825 RTL8366RB_GREEN_FEATURE_REG,
826 (chip_ver == 1) ? 0x0007 : 0x0003);
827 if (ret)
828 return ret;
829
830 /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
831 ret = regmap_write(smi->map, 0x0c, 0x240);
832 if (ret)
833 return ret;
834 ret = regmap_write(smi->map, 0x0d, 0x240);
835 if (ret)
836 return ret;
837
838 /* Set some random MAC address */
839 ret = rtl8366rb_set_addr(smi);
840 if (ret)
841 return ret;
842
843 /* Enable CPU port and enable inserting CPU tag
844 *
845 * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour
846 * of the switch totally and it will start talking Realtek RRCP
847 * internally. It is probably possible to experiment with this,
848 * but then the kernel needs to understand and handle RRCP first.
849 */
850 ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
851 0xFFFF,
852 RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port));
853 if (ret)
854 return ret;
855
856 /* Make sure we default-enable the fixed CPU port */
857 ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
858 BIT(smi->cpu_port),
859 0);
860 if (ret)
861 return ret;
862
863 /* Set maximum packet length to 1536 bytes */
864 ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
865 RTL8366RB_SGCR_MAX_LENGTH_MASK,
866 RTL8366RB_SGCR_MAX_LENGTH_1536);
867 if (ret)
868 return ret;
869
870 /* Enable learning for all ports */
871 ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
872 if (ret)
873 return ret;
874
875 /* Enable auto ageing for all ports */
876 ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
877 if (ret)
878 return ret;
879
880 /* Port 4 setup: this enables Port 4, usually the WAN port,
881 * common PHY IO mode is apparently mode 0, and this is not what
882 * the port is initialized to. There is no explanation of the
883 * IO modes in the Realtek source code, if your WAN port is
884 * connected to something exotic such as fiber, then this might
885 * be worth experimenting with.
886 */
887 ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
888 RTL8366RB_PMC0_P4_IOMODE_MASK,
889 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
890 if (ret)
891 return ret;
892
893 /* Discard VLAN tagged packets if the port is not a member of
894 * the VLAN with which the packets is associated.
895 */
896 ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
897 RTL8366RB_PORT_ALL);
898 if (ret)
899 return ret;
900
901 /* Don't drop packets whose DA has not been learned */
902 ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
903 RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
904 if (ret)
905 return ret;
906
907 /* Set blinking, TODO: make this configurable */
908 ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
909 RTL8366RB_LED_BLINKRATE_MASK,
910 RTL8366RB_LED_BLINKRATE_56MS);
911 if (ret)
912 return ret;
913
914 /* Set up LED activity:
915 * Each port has 4 LEDs, we configure all ports to the same
916 * behaviour (no individual config) but we can set up each
917 * LED separately.
918 */
919 if (smi->leds_disabled) {
920 /* Turn everything off */
921 regmap_update_bits(smi->map,
922 RTL8366RB_LED_0_1_CTRL_REG,
923 0x0FFF, 0);
924 regmap_update_bits(smi->map,
925 RTL8366RB_LED_2_3_CTRL_REG,
926 0x0FFF, 0);
927 regmap_update_bits(smi->map,
928 RTL8366RB_INTERRUPT_CONTROL_REG,
929 RTL8366RB_P4_RGMII_LED,
930 0);
931 val = RTL8366RB_LED_OFF;
932 } else {
933 /* TODO: make this configurable per LED */
934 val = RTL8366RB_LED_FORCE;
935 }
936 for (i = 0; i < 4; i++) {
937 ret = regmap_update_bits(smi->map,
938 RTL8366RB_LED_CTRL_REG,
939 0xf << (i * 4),
940 val << (i * 4));
941 if (ret)
942 return ret;
943 }
944
945 ret = rtl8366_init_vlan(smi);
946 if (ret)
947 return ret;
948
949 ret = rtl8366rb_setup_cascaded_irq(smi);
950 if (ret)
951 dev_info(smi->dev, "no interrupt support\n");
952
953 ret = realtek_smi_setup_mdio(smi);
954 if (ret) {
955 dev_info(smi->dev, "could not set up MDIO bus\n");
956 return -ENODEV;
957 }
958
959 return 0;
960}
961
962static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
963 int port)
964{
965 /* For now, the RTL switches are handled without any custom tags.
966 *
967 * It is possible to turn on "custom tags" by removing the
968 * RTL8368RB_CPU_INSTAG flag when enabling the port but what it
969 * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek
970 * Remote Control Protocol (RRCP) start to appear on the CPU port of
971 * the device. So this is not the ordinary few extra bytes in the
972 * frame. Instead it appears that the switch starts to talk Realtek
973 * RRCP internally which means a pretty complex RRCP implementation
974 * decoding and responding the RRCP protocol is needed to exploit this.
975 *
976 * The OpenRRCP project (dormant since 2009) have reverse-egineered
977 * parts of the protocol.
978 */
979 return DSA_TAG_PROTO_NONE;
980}
981
982static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
983 struct phy_device *phydev)
984{
985 struct realtek_smi *smi = ds->priv;
986 int ret;
987
988 if (port != smi->cpu_port)
989 return;
990
991 dev_info(smi->dev, "adjust link on CPU port (%d)\n", port);
992
993 /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
994 ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
995 BIT(port), BIT(port));
996 if (ret)
997 return;
998
999 ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
1000 0xFF00U,
1001 RTL8366RB_PAACR_CPU_PORT << 8);
1002 if (ret)
1003 return;
1004
1005 /* Enable the CPU port */
1006 ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
1007 0);
1008 if (ret)
1009 return;
1010}
1011
1012static void rb8366rb_set_port_led(struct realtek_smi *smi,
1013 int port, bool enable)
1014{
1015 u16 val = enable ? 0x3f : 0;
1016 int ret;
1017
1018 if (smi->leds_disabled)
1019 return;
1020
1021 switch (port) {
1022 case 0:
1023 ret = regmap_update_bits(smi->map,
1024 RTL8366RB_LED_0_1_CTRL_REG,
1025 0x3F, val);
1026 break;
1027 case 1:
1028 ret = regmap_update_bits(smi->map,
1029 RTL8366RB_LED_0_1_CTRL_REG,
1030 0x3F << RTL8366RB_LED_1_OFFSET,
1031 val << RTL8366RB_LED_1_OFFSET);
1032 break;
1033 case 2:
1034 ret = regmap_update_bits(smi->map,
1035 RTL8366RB_LED_2_3_CTRL_REG,
1036 0x3F, val);
1037 break;
1038 case 3:
1039 ret = regmap_update_bits(smi->map,
1040 RTL8366RB_LED_2_3_CTRL_REG,
1041 0x3F << RTL8366RB_LED_3_OFFSET,
1042 val << RTL8366RB_LED_3_OFFSET);
1043 break;
1044 case 4:
1045 ret = regmap_update_bits(smi->map,
1046 RTL8366RB_INTERRUPT_CONTROL_REG,
1047 RTL8366RB_P4_RGMII_LED,
1048 enable ? RTL8366RB_P4_RGMII_LED : 0);
1049 break;
1050 default:
1051 dev_err(smi->dev, "no LED for port %d\n", port);
1052 return;
1053 }
1054 if (ret)
1055 dev_err(smi->dev, "error updating LED on port %d\n", port);
1056}
1057
1058static int
1059rtl8366rb_port_enable(struct dsa_switch *ds, int port,
1060 struct phy_device *phy)
1061{
1062 struct realtek_smi *smi = ds->priv;
1063 int ret;
1064
1065 dev_dbg(smi->dev, "enable port %d\n", port);
1066 ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
1067 0);
1068 if (ret)
1069 return ret;
1070
1071 rb8366rb_set_port_led(smi, port, true);
1072 return 0;
1073}
1074
1075static void
1076rtl8366rb_port_disable(struct dsa_switch *ds, int port,
1077 struct phy_device *phy)
1078{
1079 struct realtek_smi *smi = ds->priv;
1080 int ret;
1081
1082 dev_dbg(smi->dev, "disable port %d\n", port);
1083 ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
1084 BIT(port));
1085 if (ret)
1086 return;
1087
1088 rb8366rb_set_port_led(smi, port, false);
1089}
1090
1091static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
1092 struct rtl8366_vlan_4k *vlan4k)
1093{
1094 u32 data[3];
1095 int ret;
1096 int i;
1097
1098 memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
1099
1100 if (vid >= RTL8366RB_NUM_VIDS)
1101 return -EINVAL;
1102
1103 /* write VID */
1104 ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
1105 vid & RTL8366RB_VLAN_VID_MASK);
1106 if (ret)
1107 return ret;
1108
1109 /* write table access control word */
1110 ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
1111 RTL8366RB_TABLE_VLAN_READ_CTRL);
1112 if (ret)
1113 return ret;
1114
1115 for (i = 0; i < 3; i++) {
1116 ret = regmap_read(smi->map,
1117 RTL8366RB_VLAN_TABLE_READ_BASE + i,
1118 &data[i]);
1119 if (ret)
1120 return ret;
1121 }
1122
1123 vlan4k->vid = vid;
1124 vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
1125 RTL8366RB_VLAN_UNTAG_MASK;
1126 vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
1127 vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
1128
1129 return 0;
1130}
1131
1132static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
1133 const struct rtl8366_vlan_4k *vlan4k)
1134{
1135 u32 data[3];
1136 int ret;
1137 int i;
1138
1139 if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
1140 vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
1141 vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
1142 vlan4k->fid > RTL8366RB_FIDMAX)
1143 return -EINVAL;
1144
1145 data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
1146 data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
1147 ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
1148 RTL8366RB_VLAN_UNTAG_SHIFT);
1149 data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
1150
1151 for (i = 0; i < 3; i++) {
1152 ret = regmap_write(smi->map,
1153 RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
1154 data[i]);
1155 if (ret)
1156 return ret;
1157 }
1158
1159 /* write table access control word */
1160 ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
1161 RTL8366RB_TABLE_VLAN_WRITE_CTRL);
1162
1163 return ret;
1164}
1165
1166static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
1167 struct rtl8366_vlan_mc *vlanmc)
1168{
1169 u32 data[3];
1170 int ret;
1171 int i;
1172
1173 memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
1174
1175 if (index >= RTL8366RB_NUM_VLANS)
1176 return -EINVAL;
1177
1178 for (i = 0; i < 3; i++) {
1179 ret = regmap_read(smi->map,
1180 RTL8366RB_VLAN_MC_BASE(index) + i,
1181 &data[i]);
1182 if (ret)
1183 return ret;
1184 }
1185
1186 vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
1187 vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
1188 RTL8366RB_VLAN_PRIORITY_MASK;
1189 vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
1190 RTL8366RB_VLAN_UNTAG_MASK;
1191 vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
1192 vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
1193
1194 return 0;
1195}
1196
1197static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
1198 const struct rtl8366_vlan_mc *vlanmc)
1199{
1200 u32 data[3];
1201 int ret;
1202 int i;
1203
1204 if (index >= RTL8366RB_NUM_VLANS ||
1205 vlanmc->vid >= RTL8366RB_NUM_VIDS ||
1206 vlanmc->priority > RTL8366RB_PRIORITYMAX ||
1207 vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
1208 vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
1209 vlanmc->fid > RTL8366RB_FIDMAX)
1210 return -EINVAL;
1211
1212 data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
1213 ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
1214 RTL8366RB_VLAN_PRIORITY_SHIFT);
1215 data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
1216 ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
1217 RTL8366RB_VLAN_UNTAG_SHIFT);
1218 data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
1219
1220 for (i = 0; i < 3; i++) {
1221 ret = regmap_write(smi->map,
1222 RTL8366RB_VLAN_MC_BASE(index) + i,
1223 data[i]);
1224 if (ret)
1225 return ret;
1226 }
1227
1228 return 0;
1229}
1230
1231static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
1232{
1233 u32 data;
1234 int ret;
1235
1236 if (port >= smi->num_ports)
1237 return -EINVAL;
1238
1239 ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
1240 &data);
1241 if (ret)
1242 return ret;
1243
1244 *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
1245 RTL8366RB_PORT_VLAN_CTRL_MASK;
1246
1247 return 0;
1248}
1249
1250static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
1251{
1252 if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
1253 return -EINVAL;
1254
1255 return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
1256 RTL8366RB_PORT_VLAN_CTRL_MASK <<
1257 RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
1258 (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
1259 RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
1260}
1261
1262static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
1263{
1264 unsigned int max = RTL8366RB_NUM_VLANS;
1265
1266 if (smi->vlan4k_enabled)
1267 max = RTL8366RB_NUM_VIDS - 1;
1268
1269 if (vlan == 0 || vlan >= max)
1270 return false;
1271
1272 return true;
1273}
1274
1275static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
1276{
1277 dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
1278 return regmap_update_bits(smi->map,
1279 RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
1280 enable ? RTL8366RB_SGCR_EN_VLAN : 0);
1281}
1282
1283static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
1284{
1285 dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
1286 return regmap_update_bits(smi->map, RTL8366RB_SGCR,
1287 RTL8366RB_SGCR_EN_VLAN_4KTB,
1288 enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
1289}
1290
1291static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
1292{
1293 u32 val;
1294 u32 reg;
1295 int ret;
1296
1297 if (phy > RTL8366RB_PHY_NO_MAX)
1298 return -EINVAL;
1299
1300 ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
1301 RTL8366RB_PHY_CTRL_READ);
1302 if (ret)
1303 return ret;
1304
1305 reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
1306
1307 ret = regmap_write(smi->map, reg, 0);
1308 if (ret) {
1309 dev_err(smi->dev,
1310 "failed to write PHY%d reg %04x @ %04x, ret %d\n",
1311 phy, regnum, reg, ret);
1312 return ret;
1313 }
1314
1315 ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
1316 if (ret)
1317 return ret;
1318
1319 dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
1320 phy, regnum, reg, val);
1321
1322 return val;
1323}
1324
1325static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
1326 u16 val)
1327{
1328 u32 reg;
1329 int ret;
1330
1331 if (phy > RTL8366RB_PHY_NO_MAX)
1332 return -EINVAL;
1333
1334 ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
1335 RTL8366RB_PHY_CTRL_WRITE);
1336 if (ret)
1337 return ret;
1338
1339 reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
1340
1341 dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
1342 phy, regnum, reg, val);
1343
1344 ret = regmap_write(smi->map, reg, val);
1345 if (ret)
1346 return ret;
1347
1348 return 0;
1349}
1350
1351static int rtl8366rb_reset_chip(struct realtek_smi *smi)
1352{
1353 int timeout = 10;
1354 u32 val;
1355 int ret;
1356
1357 realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
1358 RTL8366RB_CHIP_CTRL_RESET_HW);
1359 do {
1360 usleep_range(20000, 25000);
1361 ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
1362 if (ret)
1363 return ret;
1364
1365 if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
1366 break;
1367 } while (--timeout);
1368
1369 if (!timeout) {
1370 dev_err(smi->dev, "timeout waiting for the switch to reset\n");
1371 return -EIO;
1372 }
1373
1374 return 0;
1375}
1376
1377static int rtl8366rb_detect(struct realtek_smi *smi)
1378{
1379 struct device *dev = smi->dev;
1380 int ret;
1381 u32 val;
1382
1383 /* Detect device */
1384 ret = regmap_read(smi->map, 0x5c, &val);
1385 if (ret) {
1386 dev_err(dev, "can't get chip ID (%d)\n", ret);
1387 return ret;
1388 }
1389
1390 switch (val) {
1391 case 0x6027:
1392 dev_info(dev, "found an RTL8366S switch\n");
1393 dev_err(dev, "this switch is not yet supported, submit patches!\n");
1394 return -ENODEV;
1395 case 0x5937:
1396 dev_info(dev, "found an RTL8366RB switch\n");
1397 smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
1398 smi->num_ports = RTL8366RB_NUM_PORTS;
1399 smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
1400 smi->mib_counters = rtl8366rb_mib_counters;
1401 smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
1402 break;
1403 default:
1404 dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
1405 val);
1406 break;
1407 }
1408
1409 ret = rtl8366rb_reset_chip(smi);
1410 if (ret)
1411 return ret;
1412
1413 return 0;
1414}
1415
1416static const struct dsa_switch_ops rtl8366rb_switch_ops = {
1417 .get_tag_protocol = rtl8366_get_tag_protocol,
1418 .setup = rtl8366rb_setup,
1419 .adjust_link = rtl8366rb_adjust_link,
1420 .get_strings = rtl8366_get_strings,
1421 .get_ethtool_stats = rtl8366_get_ethtool_stats,
1422 .get_sset_count = rtl8366_get_sset_count,
1423 .port_vlan_filtering = rtl8366_vlan_filtering,
1424 .port_vlan_prepare = rtl8366_vlan_prepare,
1425 .port_vlan_add = rtl8366_vlan_add,
1426 .port_vlan_del = rtl8366_vlan_del,
1427 .port_enable = rtl8366rb_port_enable,
1428 .port_disable = rtl8366rb_port_disable,
1429};
1430
1431static const struct realtek_smi_ops rtl8366rb_smi_ops = {
1432 .detect = rtl8366rb_detect,
1433 .get_vlan_mc = rtl8366rb_get_vlan_mc,
1434 .set_vlan_mc = rtl8366rb_set_vlan_mc,
1435 .get_vlan_4k = rtl8366rb_get_vlan_4k,
1436 .set_vlan_4k = rtl8366rb_set_vlan_4k,
1437 .get_mc_index = rtl8366rb_get_mc_index,
1438 .set_mc_index = rtl8366rb_set_mc_index,
1439 .get_mib_counter = rtl8366rb_get_mib_counter,
1440 .is_vlan_valid = rtl8366rb_is_vlan_valid,
1441 .enable_vlan = rtl8366rb_enable_vlan,
1442 .enable_vlan4k = rtl8366rb_enable_vlan4k,
1443 .phy_read = rtl8366rb_phy_read,
1444 .phy_write = rtl8366rb_phy_write,
1445};
1446
1447const struct realtek_smi_variant rtl8366rb_variant = {
1448 .ds_ops = &rtl8366rb_switch_ops,
1449 .ops = &rtl8366rb_smi_ops,
1450 .clk_delay = 10,
1451 .cmd_read = 0xa9,
1452 .cmd_write = 0xa8,
1453};
1454EXPORT_SYMBOL_GPL(rtl8366rb_variant);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c
new file mode 100644
index 000000000000..9f1b5f2e8a64
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx.c
@@ -0,0 +1,1365 @@
1// SPDX-License-Identifier: GPL-2.0
2/* DSA driver for:
3 * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
4 * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
5 * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
6 * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
7 *
8 * These switches have a built-in 8051 CPU and can download and execute a
9 * firmware in this CPU. They can also be configured to use an external CPU
10 * handling the switch in a memory-mapped manner by connecting to that external
11 * CPU's memory bus.
12 *
13 * This driver (currently) only takes control of the switch chip over SPI and
14 * configures it to route packages around when connected to a CPU port. The
15 * chip has embedded PHYs and VLAN support so we model it using DSA.
16 *
17 * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
18 * Includes portions of code from the firmware uploader by:
19 * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
20 */
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/device.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_mdio.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29#include <linux/bitops.h>
30#include <linux/if_bridge.h>
31#include <linux/etherdevice.h>
32#include <linux/gpio/consumer.h>
33#include <linux/gpio/driver.h>
34#include <linux/random.h>
35#include <net/dsa.h>
36
37#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */
38#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
39#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
40#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
41#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
42#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
43#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
44
45#define CPU_PORT 6 /* CPU port */
46
47/* MAC Block registers */
48#define VSC73XX_MAC_CFG 0x00
49#define VSC73XX_MACHDXGAP 0x02
50#define VSC73XX_FCCONF 0x04
51#define VSC73XX_FCMACHI 0x08
52#define VSC73XX_FCMACLO 0x0c
53#define VSC73XX_MAXLEN 0x10
54#define VSC73XX_ADVPORTM 0x19
55#define VSC73XX_TXUPDCFG 0x24
56#define VSC73XX_TXQ_SELECT_CFG 0x28
57#define VSC73XX_RXOCT 0x50
58#define VSC73XX_TXOCT 0x51
59#define VSC73XX_C_RX0 0x52
60#define VSC73XX_C_RX1 0x53
61#define VSC73XX_C_RX2 0x54
62#define VSC73XX_C_TX0 0x55
63#define VSC73XX_C_TX1 0x56
64#define VSC73XX_C_TX2 0x57
65#define VSC73XX_C_CFG 0x58
66#define VSC73XX_CAT_DROP 0x6e
67#define VSC73XX_CAT_PR_MISC_L2 0x6f
68#define VSC73XX_CAT_PR_USR_PRIO 0x75
69#define VSC73XX_Q_MISC_CONF 0xdf
70
71/* MAC_CFG register bits */
72#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31)
73#define VSC73XX_MAC_CFG_PORT_RST BIT(29)
74#define VSC73XX_MAC_CFG_TX_EN BIT(28)
75#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27)
76#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19)
77#define VSC73XX_MAC_CFG_SEED_OFFSET 19
78#define VSC73XX_MAC_CFG_FDX BIT(18)
79#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17)
80#define VSC73XX_MAC_CFG_RX_EN BIT(16)
81#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15)
82#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14)
83#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */
84#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6)
85#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6
86#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
87#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
88#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5)
89#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4)
90#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0)
91#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0
92#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1
93#define VSC73XX_MAC_CFG_CLK_SEL_100M 2
94#define VSC73XX_MAC_CFG_CLK_SEL_10M 3
95#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4
96
97#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \
98 VSC73XX_MAC_CFG_GIGA_MODE | \
99 VSC73XX_MAC_CFG_TX_IPG_1000M | \
100 VSC73XX_MAC_CFG_CLK_SEL_EXT)
101#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \
102 VSC73XX_MAC_CFG_TX_IPG_100_10M | \
103 VSC73XX_MAC_CFG_CLK_SEL_EXT)
104#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \
105 VSC73XX_MAC_CFG_CLK_SEL_EXT)
106#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \
107 VSC73XX_MAC_CFG_GIGA_MODE | \
108 VSC73XX_MAC_CFG_TX_IPG_1000M | \
109 VSC73XX_MAC_CFG_CLK_SEL_1000M)
110#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \
111 VSC73XX_MAC_CFG_MAC_RX_RST | \
112 VSC73XX_MAC_CFG_MAC_TX_RST)
113
114/* Flow control register bits */
115#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17)
116#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16)
117#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0)
118
119/* ADVPORTM advanced port setup register bits */
120#define VSC73XX_ADVPORTM_IFG_PPM BIT(7)
121#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6)
122#define VSC73XX_ADVPORTM_EXT_PORT BIT(5)
123#define VSC73XX_ADVPORTM_INV_GTX BIT(4)
124#define VSC73XX_ADVPORTM_ENA_GTX BIT(3)
125#define VSC73XX_ADVPORTM_DDR_MODE BIT(2)
126#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
127#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
128
129/* CAT_DROP categorizer frame dropping register bits */
130#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
131#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
132#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3)
133#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2)
134#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1)
135#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0)
136
137#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31)
138#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1)
139#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
140#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
141
142/* Frame analyzer block 2 registers */
143#define VSC73XX_STORMLIMIT 0x02
144#define VSC73XX_ADVLEARN 0x03
145#define VSC73XX_IFLODMSK 0x04
146#define VSC73XX_VLANMASK 0x05
147#define VSC73XX_MACHDATA 0x06
148#define VSC73XX_MACLDATA 0x07
149#define VSC73XX_ANMOVED 0x08
150#define VSC73XX_ANAGEFIL 0x09
151#define VSC73XX_ANEVENTS 0x0a
152#define VSC73XX_ANCNTMASK 0x0b
153#define VSC73XX_ANCNTVAL 0x0c
154#define VSC73XX_LEARNMASK 0x0d
155#define VSC73XX_UFLODMASK 0x0e
156#define VSC73XX_MFLODMASK 0x0f
157#define VSC73XX_RECVMASK 0x10
158#define VSC73XX_AGGRCTRL 0x20
159#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */
160#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */
161#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */
162#define VSC73XX_CAPENAB 0xa0
163#define VSC73XX_MACACCESS 0xb0
164#define VSC73XX_IPMCACCESS 0xb1
165#define VSC73XX_MACTINDX 0xc0
166#define VSC73XX_VLANACCESS 0xd0
167#define VSC73XX_VLANTIDX 0xe0
168#define VSC73XX_AGENCTRL 0xf0
169#define VSC73XX_CAPRST 0xff
170
171#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
172#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
173#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
174#define VSC73XX_MACACCESS_AGED_FLAG BIT(11)
175#define VSC73XX_MACACCESS_VALID BIT(10)
176#define VSC73XX_MACACCESS_LOCKED BIT(9)
177#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3)
178#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0)
179#define VSC73XX_MACACCESS_CMD_IDLE 0
180#define VSC73XX_MACACCESS_CMD_LEARN 1
181#define VSC73XX_MACACCESS_CMD_FORGET 2
182#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3
183#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4
184#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5
185#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6
186#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7
187
188#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30)
189#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
190#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
191#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
192#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
193#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
194#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
195#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
196#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
197
198/* MII block 3 registers */
199#define VSC73XX_MII_STAT 0x0
200#define VSC73XX_MII_CMD 0x1
201#define VSC73XX_MII_DATA 0x2
202
203/* Arbiter block 5 registers */
204#define VSC73XX_ARBEMPTY 0x0c
205#define VSC73XX_ARBDISC 0x0e
206#define VSC73XX_SBACKWDROP 0x12
207#define VSC73XX_DBACKWDROP 0x13
208#define VSC73XX_ARBBURSTPROB 0x15
209
210/* System block 7 registers */
211#define VSC73XX_ICPU_SIPAD 0x01
212#define VSC73XX_GMIIDELAY 0x05
213#define VSC73XX_ICPU_CTRL 0x10
214#define VSC73XX_ICPU_ADDR 0x11
215#define VSC73XX_ICPU_SRAM 0x12
216#define VSC73XX_HWSEM 0x13
217#define VSC73XX_GLORESET 0x14
218#define VSC73XX_ICPU_MBOX_VAL 0x15
219#define VSC73XX_ICPU_MBOX_SET 0x16
220#define VSC73XX_ICPU_MBOX_CLR 0x17
221#define VSC73XX_CHIPID 0x18
222#define VSC73XX_GPIO 0x34
223
224#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0
225#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1
226#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2
227#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3
228
229#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4)
230#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4)
231#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4)
232#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4)
233
234#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31)
235#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8)
236#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7)
237#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6)
238#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3)
239#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2)
240#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1)
241#define VSC73XX_ICPU_CTRL_SRST BIT(0)
242
243#define VSC73XX_CHIPID_ID_SHIFT 12
244#define VSC73XX_CHIPID_ID_MASK 0xffff
245#define VSC73XX_CHIPID_REV_SHIFT 28
246#define VSC73XX_CHIPID_REV_MASK 0xf
247#define VSC73XX_CHIPID_ID_7385 0x7385
248#define VSC73XX_CHIPID_ID_7388 0x7388
249#define VSC73XX_CHIPID_ID_7395 0x7395
250#define VSC73XX_CHIPID_ID_7398 0x7398
251
252#define VSC73XX_GLORESET_STROBE BIT(4)
253#define VSC73XX_GLORESET_ICPU_LOCK BIT(3)
254#define VSC73XX_GLORESET_MEM_LOCK BIT(2)
255#define VSC73XX_GLORESET_PHY_RESET BIT(1)
256#define VSC73XX_GLORESET_MASTER_RESET BIT(0)
257
258#define VSC73XX_CMD_MODE_READ 0
259#define VSC73XX_CMD_MODE_WRITE 1
260#define VSC73XX_CMD_MODE_SHIFT 4
261#define VSC73XX_CMD_BLOCK_SHIFT 5
262#define VSC73XX_CMD_BLOCK_MASK 0x7
263#define VSC73XX_CMD_SUBBLOCK_MASK 0xf
264
265#define VSC7385_CLOCK_DELAY ((3 << 4) | 3)
266#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3)
267
268#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \
269 VSC73XX_ICPU_CTRL_BOOT_EN | \
270 VSC73XX_ICPU_CTRL_EXT_ACC_EN)
271
272#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \
273 VSC73XX_ICPU_CTRL_BOOT_EN | \
274 VSC73XX_ICPU_CTRL_CLK_EN | \
275 VSC73XX_ICPU_CTRL_SRST)
276
277/**
278 * struct vsc73xx - VSC73xx state container
279 */
280struct vsc73xx {
281 struct device *dev;
282 struct gpio_desc *reset;
283 struct spi_device *spi;
284 struct dsa_switch *ds;
285 struct gpio_chip gc;
286 u16 chipid;
287 u8 addr[ETH_ALEN];
288 struct mutex lock; /* Protects SPI traffic */
289};
290
291#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
292#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
293#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
294#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
295#define IS_739X(a) (IS_7395(a) || IS_7398(a))
296
297struct vsc73xx_counter {
298 u8 counter;
299 const char *name;
300};
301
302/* Counters are named according to the MIB standards where applicable.
303 * Some counters are custom, non-standard. The standard counters are
304 * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
305 * 30A Counters.
306 */
307static const struct vsc73xx_counter vsc73xx_rx_counters[] = {
308 { 0, "RxEtherStatsPkts" },
309 { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */
310 { 2, "RxTotalErrorPackets" }, /* non-standard counter */
311 { 3, "RxEtherStatsBroadcastPkts" },
312 { 4, "RxEtherStatsMulticastPkts" },
313 { 5, "RxEtherStatsPkts64Octets" },
314 { 6, "RxEtherStatsPkts65to127Octets" },
315 { 7, "RxEtherStatsPkts128to255Octets" },
316 { 8, "RxEtherStatsPkts256to511Octets" },
317 { 9, "RxEtherStatsPkts512to1023Octets" },
318 { 10, "RxEtherStatsPkts1024to1518Octets" },
319 { 11, "RxJumboFrames" }, /* non-standard counter */
320 { 12, "RxaPauseMACControlFramesTransmitted" },
321 { 13, "RxFIFODrops" }, /* non-standard counter */
322 { 14, "RxBackwardDrops" }, /* non-standard counter */
323 { 15, "RxClassifierDrops" }, /* non-standard counter */
324 { 16, "RxEtherStatsCRCAlignErrors" },
325 { 17, "RxEtherStatsUndersizePkts" },
326 { 18, "RxEtherStatsOversizePkts" },
327 { 19, "RxEtherStatsFragments" },
328 { 20, "RxEtherStatsJabbers" },
329 { 21, "RxaMACControlFramesReceived" },
330 /* 22-24 are undefined */
331 { 25, "RxaFramesReceivedOK" },
332 { 26, "RxQoSClass0" }, /* non-standard counter */
333 { 27, "RxQoSClass1" }, /* non-standard counter */
334 { 28, "RxQoSClass2" }, /* non-standard counter */
335 { 29, "RxQoSClass3" }, /* non-standard counter */
336};
337
338static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
339 { 0, "TxEtherStatsPkts" },
340 { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */
341 { 2, "TxTotalErrorPackets" }, /* non-standard counter */
342 { 3, "TxEtherStatsBroadcastPkts" },
343 { 4, "TxEtherStatsMulticastPkts" },
344 { 5, "TxEtherStatsPkts64Octets" },
345 { 6, "TxEtherStatsPkts65to127Octets" },
346 { 7, "TxEtherStatsPkts128to255Octets" },
347 { 8, "TxEtherStatsPkts256to511Octets" },
348 { 9, "TxEtherStatsPkts512to1023Octets" },
349 { 10, "TxEtherStatsPkts1024to1518Octets" },
350 { 11, "TxJumboFrames" }, /* non-standard counter */
351 { 12, "TxaPauseMACControlFramesTransmitted" },
352 { 13, "TxFIFODrops" }, /* non-standard counter */
353 { 14, "TxDrops" }, /* non-standard counter */
354 { 15, "TxEtherStatsCollisions" },
355 { 16, "TxEtherStatsCRCAlignErrors" },
356 { 17, "TxEtherStatsUndersizePkts" },
357 { 18, "TxEtherStatsOversizePkts" },
358 { 19, "TxEtherStatsFragments" },
359 { 20, "TxEtherStatsJabbers" },
360 /* 21-24 are undefined */
361 { 25, "TxaFramesReceivedOK" },
362 { 26, "TxQoSClass0" }, /* non-standard counter */
363 { 27, "TxQoSClass1" }, /* non-standard counter */
364 { 28, "TxQoSClass2" }, /* non-standard counter */
365 { 29, "TxQoSClass3" }, /* non-standard counter */
366};
367
368static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
369{
370 switch (block) {
371 case VSC73XX_BLOCK_MAC:
372 switch (subblock) {
373 case 0 ... 4:
374 case 6:
375 return 1;
376 }
377 break;
378
379 case VSC73XX_BLOCK_ANALYZER:
380 case VSC73XX_BLOCK_SYSTEM:
381 switch (subblock) {
382 case 0:
383 return 1;
384 }
385 break;
386
387 case VSC73XX_BLOCK_MII:
388 case VSC73XX_BLOCK_CAPTURE:
389 case VSC73XX_BLOCK_ARBITER:
390 switch (subblock) {
391 case 0 ... 1:
392 return 1;
393 }
394 break;
395 }
396
397 return 0;
398}
399
400static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
401{
402 u8 ret;
403
404 ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT;
405 ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT;
406 ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK;
407
408 return ret;
409}
410
411static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
412 u32 *val)
413{
414 struct spi_transfer t[2];
415 struct spi_message m;
416 u8 cmd[4];
417 u8 buf[4];
418 int ret;
419
420 if (!vsc73xx_is_addr_valid(block, subblock))
421 return -EINVAL;
422
423 spi_message_init(&m);
424
425 memset(&t, 0, sizeof(t));
426
427 t[0].tx_buf = cmd;
428 t[0].len = sizeof(cmd);
429 spi_message_add_tail(&t[0], &m);
430
431 t[1].rx_buf = buf;
432 t[1].len = sizeof(buf);
433 spi_message_add_tail(&t[1], &m);
434
435 cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock);
436 cmd[1] = reg;
437 cmd[2] = 0;
438 cmd[3] = 0;
439
440 mutex_lock(&vsc->lock);
441 ret = spi_sync(vsc->spi, &m);
442 mutex_unlock(&vsc->lock);
443
444 if (ret)
445 return ret;
446
447 *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
448
449 return 0;
450}
451
452static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
453 u32 val)
454{
455 struct spi_transfer t[2];
456 struct spi_message m;
457 u8 cmd[2];
458 u8 buf[4];
459 int ret;
460
461 if (!vsc73xx_is_addr_valid(block, subblock))
462 return -EINVAL;
463
464 spi_message_init(&m);
465
466 memset(&t, 0, sizeof(t));
467
468 t[0].tx_buf = cmd;
469 t[0].len = sizeof(cmd);
470 spi_message_add_tail(&t[0], &m);
471
472 t[1].tx_buf = buf;
473 t[1].len = sizeof(buf);
474 spi_message_add_tail(&t[1], &m);
475
476 cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock);
477 cmd[1] = reg;
478
479 buf[0] = (val >> 24) & 0xff;
480 buf[1] = (val >> 16) & 0xff;
481 buf[2] = (val >> 8) & 0xff;
482 buf[3] = val & 0xff;
483
484 mutex_lock(&vsc->lock);
485 ret = spi_sync(vsc->spi, &m);
486 mutex_unlock(&vsc->lock);
487
488 return ret;
489}
490
491static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
492 u8 reg, u32 mask, u32 val)
493{
494 u32 tmp, orig;
495 int ret;
496
497 /* Same read-modify-write algorithm as e.g. regmap */
498 ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
499 if (ret)
500 return ret;
501 tmp = orig & ~mask;
502 tmp |= val & mask;
503 return vsc73xx_write(vsc, block, subblock, reg, tmp);
504}
505
506static int vsc73xx_detect(struct vsc73xx *vsc)
507{
508 bool icpu_si_boot_en;
509 bool icpu_pi_en;
510 u32 val;
511 u32 rev;
512 int ret;
513 u32 id;
514
515 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
516 VSC73XX_ICPU_MBOX_VAL, &val);
517 if (ret) {
518 dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret);
519 return ret;
520 }
521
522 if (val == 0xffffffff) {
523 dev_info(vsc->dev, "chip seems dead, assert reset\n");
524 gpiod_set_value_cansleep(vsc->reset, 1);
525 /* Reset pulse should be 20ns minimum, according to datasheet
526 * table 245, so 10us should be fine
527 */
528 usleep_range(10, 100);
529 gpiod_set_value_cansleep(vsc->reset, 0);
530 /* Wait 20ms according to datasheet table 245 */
531 msleep(20);
532
533 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
534 VSC73XX_ICPU_MBOX_VAL, &val);
535 if (val == 0xffffffff) {
536 dev_err(vsc->dev, "seems not to help, giving up\n");
537 return -ENODEV;
538 }
539 }
540
541 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
542 VSC73XX_CHIPID, &val);
543 if (ret) {
544 dev_err(vsc->dev, "unable to read chip id (%d)\n", ret);
545 return ret;
546 }
547
548 id = (val >> VSC73XX_CHIPID_ID_SHIFT) &
549 VSC73XX_CHIPID_ID_MASK;
550 switch (id) {
551 case VSC73XX_CHIPID_ID_7385:
552 case VSC73XX_CHIPID_ID_7388:
553 case VSC73XX_CHIPID_ID_7395:
554 case VSC73XX_CHIPID_ID_7398:
555 break;
556 default:
557 dev_err(vsc->dev, "unsupported chip, id=%04x\n", id);
558 return -ENODEV;
559 }
560
561 vsc->chipid = id;
562 rev = (val >> VSC73XX_CHIPID_REV_SHIFT) &
563 VSC73XX_CHIPID_REV_MASK;
564 dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev);
565
566 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
567 VSC73XX_ICPU_CTRL, &val);
568 if (ret) {
569 dev_err(vsc->dev, "unable to read iCPU control\n");
570 return ret;
571 }
572
573 /* The iCPU can always be used but can boot in different ways.
574 * If it is initially disabled and has no external memory,
575 * we are in control and can do whatever we like, else we
576 * are probably in trouble (we need some way to communicate
577 * with the running firmware) so we bail out for now.
578 */
579 icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN);
580 icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN);
581 if (icpu_si_boot_en && icpu_pi_en) {
582 dev_err(vsc->dev,
583 "iCPU enabled boots from SI, has external memory\n");
584 dev_err(vsc->dev, "no idea how to deal with this\n");
585 return -ENODEV;
586 }
587 if (icpu_si_boot_en && !icpu_pi_en) {
588 dev_err(vsc->dev,
589 "iCPU enabled boots from SI, no external memory\n");
590 dev_err(vsc->dev, "no idea how to deal with this\n");
591 return -ENODEV;
592 }
593 if (!icpu_si_boot_en && icpu_pi_en) {
594 dev_err(vsc->dev,
595 "iCPU enabled, boots from PI external memory\n");
596 dev_err(vsc->dev, "no idea how to deal with this\n");
597 return -ENODEV;
598 }
599 /* !icpu_si_boot_en && !cpu_pi_en */
600 dev_info(vsc->dev, "iCPU disabled, no external memory\n");
601
602 return 0;
603}
604
605static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
606{
607 struct vsc73xx *vsc = ds->priv;
608 u32 cmd;
609 u32 val;
610 int ret;
611
612 /* Setting bit 26 means "read" */
613 cmd = BIT(26) | (phy << 21) | (regnum << 16);
614 ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
615 if (ret)
616 return ret;
617 msleep(2);
618 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
619 if (ret)
620 return ret;
621 if (val & BIT(16)) {
622 dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
623 regnum, phy);
624 return -EIO;
625 }
626 val &= 0xFFFFU;
627
628 dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
629 regnum, phy, val);
630
631 return val;
632}
633
634static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
635 u16 val)
636{
637 struct vsc73xx *vsc = ds->priv;
638 u32 cmd;
639 int ret;
640
641 /* It was found through tedious experiments that this router
642 * chip really hates to have it's PHYs reset. They
643 * never recover if that happens: autonegotiation stops
644 * working after a reset. Just filter out this command.
645 * (Resetting the whole chip is OK.)
646 */
647 if (regnum == 0 && (val & BIT(15))) {
648 dev_info(vsc->dev, "reset PHY - disallowed\n");
649 return 0;
650 }
651
652 cmd = (phy << 21) | (regnum << 16);
653 ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
654 if (ret)
655 return ret;
656
657 dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n",
658 val, regnum, phy);
659 return 0;
660}
661
662static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
663 int port)
664{
665 /* The switch internally uses a 8 byte header with length,
666 * source port, tag, LPA and priority. This is supposedly
667 * only accessible when operating the switch using the internal
668 * CPU or with an external CPU mapping the device in, but not
669 * when operating the switch over SPI and putting frames in/out
670 * on port 6 (the CPU port). So far we must assume that we
671 * cannot access the tag. (See "Internal frame header" section
672 * 3.9.1 in the manual.)
673 */
674 return DSA_TAG_PROTO_NONE;
675}
676
677static int vsc73xx_setup(struct dsa_switch *ds)
678{
679 struct vsc73xx *vsc = ds->priv;
680 int i;
681
682 dev_info(vsc->dev, "set up the switch\n");
683
684 /* Issue RESET */
685 vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
686 VSC73XX_GLORESET_MASTER_RESET);
687 usleep_range(125, 200);
688
689 /* Initialize memory, initialize RAM bank 0..15 except 6 and 7
690 * This sequence appears in the
691 * VSC7385 SparX-G5 datasheet section 6.6.1
692 * VSC7395 SparX-G5e datasheet section 6.6.1
693 * "initialization sequence".
694 * No explanation is given to the 0x1010400 magic number.
695 */
696 for (i = 0; i <= 15; i++) {
697 if (i != 6 && i != 7) {
698 vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT,
699 2,
700 0, 0x1010400 + i);
701 mdelay(1);
702 }
703 }
704 mdelay(30);
705
706 /* Clear MAC table */
707 vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
708 VSC73XX_MACACCESS,
709 VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
710
711 /* Clear VLAN table */
712 vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
713 VSC73XX_VLANACCESS,
714 VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
715
716 msleep(40);
717
718 /* Use 20KiB buffers on all ports on VSC7395
719 * The VSC7385 has 16KiB buffers and that is the
720 * default if we don't set this up explicitly.
721 * Port "31" is "all ports".
722 */
723 if (IS_739X(vsc))
724 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f,
725 VSC73XX_Q_MISC_CONF,
726 VSC73XX_Q_MISC_CONF_EXTENT_MEM);
727
728 /* Put all ports into reset until enabled */
729 for (i = 0; i < 7; i++) {
730 if (i == 5)
731 continue;
732 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4,
733 VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
734 }
735
736 /* MII delay, set both GTX and RX delay to 2 ns */
737 vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
738 VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
739 VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
740 /* Enable reception of frames on all ports */
741 vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
742 0x5f);
743 /* IP multicast flood mask (table 144) */
744 vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
745 0xff);
746
747 mdelay(50);
748
749 /* Release reset from the internal PHYs */
750 vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
751 VSC73XX_GLORESET_PHY_RESET);
752
753 udelay(4);
754
755 return 0;
756}
757
758static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
759{
760 u32 val;
761
762 /* MAC configure, first reset the port and then write defaults */
763 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
764 port,
765 VSC73XX_MAC_CFG,
766 VSC73XX_MAC_CFG_RESET);
767
768 /* Take up the port in 1Gbit mode by default, this will be
769 * augmented after auto-negotiation on the PHY-facing
770 * ports.
771 */
772 if (port == CPU_PORT)
773 val = VSC73XX_MAC_CFG_1000M_F_RGMII;
774 else
775 val = VSC73XX_MAC_CFG_1000M_F_PHY;
776
777 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
778 port,
779 VSC73XX_MAC_CFG,
780 val |
781 VSC73XX_MAC_CFG_TX_EN |
782 VSC73XX_MAC_CFG_RX_EN);
783
784 /* Max length, we can do up to 9.6 KiB, so allow that.
785 * According to application not "VSC7398 Jumbo Frames" setting
786 * up the MTU to 9.6 KB does not affect the performance on standard
787 * frames, so just enable it. It is clear from the application note
788 * that "9.6 kilobytes" == 9600 bytes.
789 */
790 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
791 port,
792 VSC73XX_MAXLEN, 9600);
793
794 /* Flow control for the CPU port:
795 * Use a zero delay pause frame when pause condition is left
796 * Obey pause control frames
797 */
798 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
799 port,
800 VSC73XX_FCCONF,
801 VSC73XX_FCCONF_ZERO_PAUSE_EN |
802 VSC73XX_FCCONF_FLOW_CTRL_OBEY);
803
804 /* Issue pause control frames on PHY facing ports.
805 * Allow early initiation of MAC transmission if the amount
806 * of egress data is below 512 bytes on CPU port.
807 * FIXME: enable 20KiB buffers?
808 */
809 if (port == CPU_PORT)
810 val = VSC73XX_Q_MISC_CONF_EARLY_TX_512;
811 else
812 val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE;
813 val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM;
814 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
815 port,
816 VSC73XX_Q_MISC_CONF,
817 val);
818
819 /* Flow control MAC: a MAC address used in flow control frames */
820 val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]);
821 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
822 port,
823 VSC73XX_FCMACHI,
824 val);
825 val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]);
826 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
827 port,
828 VSC73XX_FCMACLO,
829 val);
830
831 /* Tell the categorizer to forward pause frames, not control
832 * frame. Do not drop anything.
833 */
834 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
835 port,
836 VSC73XX_CAT_DROP,
837 VSC73XX_CAT_DROP_FWD_PAUSE_ENA);
838
839 /* Clear all counters */
840 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
841 port, VSC73XX_C_RX0, 0);
842}
843
844static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
845 int port, struct phy_device *phydev,
846 u32 initval)
847{
848 u32 val = initval;
849 u8 seed;
850
851 /* Reset this port FIXME: break out subroutine */
852 val |= VSC73XX_MAC_CFG_RESET;
853 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
854
855 /* Seed the port randomness with randomness */
856 get_random_bytes(&seed, 1);
857 val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
858 val |= VSC73XX_MAC_CFG_SEED_LOAD;
859 val |= VSC73XX_MAC_CFG_WEXC_DIS;
860 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
861
862 /* Flow control for the PHY facing ports:
863 * Use a zero delay pause frame when pause condition is left
864 * Obey pause control frames
865 * When generating pause frames, use 0xff as pause value
866 */
867 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
868 VSC73XX_FCCONF_ZERO_PAUSE_EN |
869 VSC73XX_FCCONF_FLOW_CTRL_OBEY |
870 0xff);
871
872 /* Disallow backward dropping of frames from this port */
873 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
874 VSC73XX_SBACKWDROP, BIT(port), 0);
875
876 /* Enable TX, RX, deassert reset, stop loading seed */
877 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
878 VSC73XX_MAC_CFG,
879 VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
880 VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
881 VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
882}
883
884static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
885 struct phy_device *phydev)
886{
887 struct vsc73xx *vsc = ds->priv;
888 u32 val;
889
890 /* Special handling of the CPU-facing port */
891 if (port == CPU_PORT) {
892 /* Other ports are already initialized but not this one */
893 vsc73xx_init_port(vsc, CPU_PORT);
894 /* Select the external port for this interface (EXT_PORT)
895 * Enable the GMII GTX external clock
896 * Use double data rate (DDR mode)
897 */
898 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
899 CPU_PORT,
900 VSC73XX_ADVPORTM,
901 VSC73XX_ADVPORTM_EXT_PORT |
902 VSC73XX_ADVPORTM_ENA_GTX |
903 VSC73XX_ADVPORTM_DDR_MODE);
904 }
905
906 /* This is the MAC confiuration that always need to happen
907 * after a PHY or the CPU port comes up or down.
908 */
909 if (!phydev->link) {
910 int maxloop = 10;
911
912 dev_dbg(vsc->dev, "port %d: went down\n",
913 port);
914
915 /* Disable RX on this port */
916 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
917 VSC73XX_MAC_CFG,
918 VSC73XX_MAC_CFG_RX_EN, 0);
919
920 /* Discard packets */
921 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
922 VSC73XX_ARBDISC, BIT(port), BIT(port));
923
924 /* Wait until queue is empty */
925 vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
926 VSC73XX_ARBEMPTY, &val);
927 while (!(val & BIT(port))) {
928 msleep(1);
929 vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
930 VSC73XX_ARBEMPTY, &val);
931 if (--maxloop == 0) {
932 dev_err(vsc->dev,
933 "timeout waiting for block arbiter\n");
934 /* Continue anyway */
935 break;
936 }
937 }
938
939 /* Put this port into reset */
940 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
941 VSC73XX_MAC_CFG_RESET);
942
943 /* Accept packets again */
944 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
945 VSC73XX_ARBDISC, BIT(port), 0);
946
947 /* Allow backward dropping of frames from this port */
948 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
949 VSC73XX_SBACKWDROP, BIT(port), BIT(port));
950
951 /* Receive mask (disable forwarding) */
952 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
953 VSC73XX_RECVMASK, BIT(port), 0);
954
955 return;
956 }
957
958 /* Figure out what speed was negotiated */
959 if (phydev->speed == SPEED_1000) {
960 dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
961 port);
962
963 /* Set up default for internal port or external RGMII */
964 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
965 val = VSC73XX_MAC_CFG_1000M_F_RGMII;
966 else
967 val = VSC73XX_MAC_CFG_1000M_F_PHY;
968 vsc73xx_adjust_enable_port(vsc, port, phydev, val);
969 } else if (phydev->speed == SPEED_100) {
970 if (phydev->duplex == DUPLEX_FULL) {
971 val = VSC73XX_MAC_CFG_100_10M_F_PHY;
972 dev_dbg(vsc->dev,
973 "port %d: 100 Mbit full duplex mode\n",
974 port);
975 } else {
976 val = VSC73XX_MAC_CFG_100_10M_H_PHY;
977 dev_dbg(vsc->dev,
978 "port %d: 100 Mbit half duplex mode\n",
979 port);
980 }
981 vsc73xx_adjust_enable_port(vsc, port, phydev, val);
982 } else if (phydev->speed == SPEED_10) {
983 if (phydev->duplex == DUPLEX_FULL) {
984 val = VSC73XX_MAC_CFG_100_10M_F_PHY;
985 dev_dbg(vsc->dev,
986 "port %d: 10 Mbit full duplex mode\n",
987 port);
988 } else {
989 val = VSC73XX_MAC_CFG_100_10M_H_PHY;
990 dev_dbg(vsc->dev,
991 "port %d: 10 Mbit half duplex mode\n",
992 port);
993 }
994 vsc73xx_adjust_enable_port(vsc, port, phydev, val);
995 } else {
996 dev_err(vsc->dev,
997 "could not adjust link: unknown speed\n");
998 }
999
1000 /* Enable port (forwarding) in the receieve mask */
1001 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
1002 VSC73XX_RECVMASK, BIT(port), BIT(port));
1003}
1004
1005static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
1006 struct phy_device *phy)
1007{
1008 struct vsc73xx *vsc = ds->priv;
1009
1010 dev_info(vsc->dev, "enable port %d\n", port);
1011 vsc73xx_init_port(vsc, port);
1012
1013 return 0;
1014}
1015
1016static void vsc73xx_port_disable(struct dsa_switch *ds, int port,
1017 struct phy_device *phy)
1018{
1019 struct vsc73xx *vsc = ds->priv;
1020
1021 /* Just put the port into reset */
1022 vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
1023 VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
1024}
1025
1026static const struct vsc73xx_counter *
1027vsc73xx_find_counter(struct vsc73xx *vsc,
1028 u8 counter,
1029 bool tx)
1030{
1031 const struct vsc73xx_counter *cnts;
1032 int num_cnts;
1033 int i;
1034
1035 if (tx) {
1036 cnts = vsc73xx_tx_counters;
1037 num_cnts = ARRAY_SIZE(vsc73xx_tx_counters);
1038 } else {
1039 cnts = vsc73xx_rx_counters;
1040 num_cnts = ARRAY_SIZE(vsc73xx_rx_counters);
1041 }
1042
1043 for (i = 0; i < num_cnts; i++) {
1044 const struct vsc73xx_counter *cnt;
1045
1046 cnt = &cnts[i];
1047 if (cnt->counter == counter)
1048 return cnt;
1049 }
1050
1051 return NULL;
1052}
1053
1054static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1055 uint8_t *data)
1056{
1057 const struct vsc73xx_counter *cnt;
1058 struct vsc73xx *vsc = ds->priv;
1059 u8 indices[6];
1060 int i, j;
1061 u32 val;
1062 int ret;
1063
1064 if (stringset != ETH_SS_STATS)
1065 return;
1066
1067 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
1068 VSC73XX_C_CFG, &val);
1069 if (ret)
1070 return;
1071
1072 indices[0] = (val & 0x1f); /* RX counter 0 */
1073 indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */
1074 indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */
1075 indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */
1076 indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */
1077 indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
1078
1079 /* The first counters is the RX octets */
1080 j = 0;
1081 strncpy(data + j * ETH_GSTRING_LEN,
1082 "RxEtherStatsOctets", ETH_GSTRING_LEN);
1083 j++;
1084
1085 /* Each port supports recording 3 RX counters and 3 TX counters,
1086 * figure out what counters we use in this set-up and return the
1087 * names of them. The hardware default counters will be number of
1088 * packets on RX/TX, combined broadcast+multicast packets RX/TX and
1089 * total error packets RX/TX.
1090 */
1091 for (i = 0; i < 3; i++) {
1092 cnt = vsc73xx_find_counter(vsc, indices[i], false);
1093 if (cnt)
1094 strncpy(data + j * ETH_GSTRING_LEN,
1095 cnt->name, ETH_GSTRING_LEN);
1096 j++;
1097 }
1098
1099 /* TX stats begins with the number of TX octets */
1100 strncpy(data + j * ETH_GSTRING_LEN,
1101 "TxEtherStatsOctets", ETH_GSTRING_LEN);
1102 j++;
1103
1104 for (i = 3; i < 6; i++) {
1105 cnt = vsc73xx_find_counter(vsc, indices[i], true);
1106 if (cnt)
1107 strncpy(data + j * ETH_GSTRING_LEN,
1108 cnt->name, ETH_GSTRING_LEN);
1109 j++;
1110 }
1111}
1112
1113static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
1114{
1115 /* We only support SS_STATS */
1116 if (sset != ETH_SS_STATS)
1117 return 0;
1118 /* RX and TX packets, then 3 RX counters, 3 TX counters */
1119 return 8;
1120}
1121
1122static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
1123 uint64_t *data)
1124{
1125 struct vsc73xx *vsc = ds->priv;
1126 u8 regs[] = {
1127 VSC73XX_RXOCT,
1128 VSC73XX_C_RX0,
1129 VSC73XX_C_RX1,
1130 VSC73XX_C_RX2,
1131 VSC73XX_TXOCT,
1132 VSC73XX_C_TX0,
1133 VSC73XX_C_TX1,
1134 VSC73XX_C_TX2,
1135 };
1136 u32 val;
1137 int ret;
1138 int i;
1139
1140 for (i = 0; i < ARRAY_SIZE(regs); i++) {
1141 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
1142 regs[i], &val);
1143 if (ret) {
1144 dev_err(vsc->dev, "error reading counter %d\n", i);
1145 return;
1146 }
1147 data[i] = val;
1148 }
1149}
1150
1151static const struct dsa_switch_ops vsc73xx_ds_ops = {
1152 .get_tag_protocol = vsc73xx_get_tag_protocol,
1153 .setup = vsc73xx_setup,
1154 .phy_read = vsc73xx_phy_read,
1155 .phy_write = vsc73xx_phy_write,
1156 .adjust_link = vsc73xx_adjust_link,
1157 .get_strings = vsc73xx_get_strings,
1158 .get_ethtool_stats = vsc73xx_get_ethtool_stats,
1159 .get_sset_count = vsc73xx_get_sset_count,
1160 .port_enable = vsc73xx_port_enable,
1161 .port_disable = vsc73xx_port_disable,
1162};
1163
1164static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
1165{
1166 struct vsc73xx *vsc = gpiochip_get_data(chip);
1167 u32 val;
1168 int ret;
1169
1170 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
1171 VSC73XX_GPIO, &val);
1172 if (ret)
1173 return ret;
1174
1175 return !!(val & BIT(offset));
1176}
1177
1178static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
1179 int val)
1180{
1181 struct vsc73xx *vsc = gpiochip_get_data(chip);
1182 u32 tmp = val ? BIT(offset) : 0;
1183
1184 vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
1185 VSC73XX_GPIO, BIT(offset), tmp);
1186}
1187
1188static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
1189 unsigned int offset, int val)
1190{
1191 struct vsc73xx *vsc = gpiochip_get_data(chip);
1192 u32 tmp = val ? BIT(offset) : 0;
1193
1194 return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
1195 VSC73XX_GPIO, BIT(offset + 4) | BIT(offset),
1196 BIT(offset + 4) | tmp);
1197}
1198
1199static int vsc73xx_gpio_direction_input(struct gpio_chip *chip,
1200 unsigned int offset)
1201{
1202 struct vsc73xx *vsc = gpiochip_get_data(chip);
1203
1204 return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
1205 VSC73XX_GPIO, BIT(offset + 4),
1206 0);
1207}
1208
1209static int vsc73xx_gpio_get_direction(struct gpio_chip *chip,
1210 unsigned int offset)
1211{
1212 struct vsc73xx *vsc = gpiochip_get_data(chip);
1213 u32 val;
1214 int ret;
1215
1216 ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
1217 VSC73XX_GPIO, &val);
1218 if (ret)
1219 return ret;
1220
1221 return !(val & BIT(offset + 4));
1222}
1223
1224static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
1225{
1226 int ret;
1227
1228 vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
1229 vsc->chipid);
1230 vsc->gc.ngpio = 4;
1231 vsc->gc.owner = THIS_MODULE;
1232 vsc->gc.parent = vsc->dev;
1233 vsc->gc.of_node = vsc->dev->of_node;
1234 vsc->gc.base = -1;
1235 vsc->gc.get = vsc73xx_gpio_get;
1236 vsc->gc.set = vsc73xx_gpio_set;
1237 vsc->gc.direction_input = vsc73xx_gpio_direction_input;
1238 vsc->gc.direction_output = vsc73xx_gpio_direction_output;
1239 vsc->gc.get_direction = vsc73xx_gpio_get_direction;
1240 vsc->gc.can_sleep = true;
1241 ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc);
1242 if (ret) {
1243 dev_err(vsc->dev, "unable to register GPIO chip\n");
1244 return ret;
1245 }
1246 return 0;
1247}
1248
1249static int vsc73xx_probe(struct spi_device *spi)
1250{
1251 struct device *dev = &spi->dev;
1252 struct vsc73xx *vsc;
1253 int ret;
1254
1255 vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL);
1256 if (!vsc)
1257 return -ENOMEM;
1258
1259 spi_set_drvdata(spi, vsc);
1260 vsc->spi = spi_dev_get(spi);
1261 vsc->dev = dev;
1262 mutex_init(&vsc->lock);
1263
1264 /* Release reset, if any */
1265 vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
1266 if (IS_ERR(vsc->reset)) {
1267 dev_err(dev, "failed to get RESET GPIO\n");
1268 return PTR_ERR(vsc->reset);
1269 }
1270 if (vsc->reset)
1271 /* Wait 20ms according to datasheet table 245 */
1272 msleep(20);
1273
1274 spi->mode = SPI_MODE_0;
1275 spi->bits_per_word = 8;
1276 ret = spi_setup(spi);
1277 if (ret < 0) {
1278 dev_err(dev, "spi setup failed.\n");
1279 return ret;
1280 }
1281
1282 ret = vsc73xx_detect(vsc);
1283 if (ret) {
1284 dev_err(dev, "no chip found (%d)\n", ret);
1285 return -ENODEV;
1286 }
1287
1288 eth_random_addr(vsc->addr);
1289 dev_info(vsc->dev,
1290 "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
1291 vsc->addr[0], vsc->addr[1], vsc->addr[2],
1292 vsc->addr[3], vsc->addr[4], vsc->addr[5]);
1293
1294 /* The VSC7395 switch chips have 5+1 ports which means 5
1295 * ordinary ports and a sixth CPU port facing the processor
1296 * with an RGMII interface. These ports are numbered 0..4
1297 * and 6, so they leave a "hole" in the port map for port 5,
1298 * which is invalid.
1299 *
1300 * The VSC7398 has 8 ports, port 7 is again the CPU port.
1301 *
1302 * We allocate 8 ports and avoid access to the nonexistant
1303 * ports.
1304 */
1305 vsc->ds = dsa_switch_alloc(dev, 8);
1306 if (!vsc->ds)
1307 return -ENOMEM;
1308 vsc->ds->priv = vsc;
1309
1310 vsc->ds->ops = &vsc73xx_ds_ops;
1311 ret = dsa_register_switch(vsc->ds);
1312 if (ret) {
1313 dev_err(dev, "unable to register switch (%d)\n", ret);
1314 return ret;
1315 }
1316
1317 ret = vsc73xx_gpio_probe(vsc);
1318 if (ret) {
1319 dsa_unregister_switch(vsc->ds);
1320 return ret;
1321 }
1322
1323 return 0;
1324}
1325
1326static int vsc73xx_remove(struct spi_device *spi)
1327{
1328 struct vsc73xx *vsc = spi_get_drvdata(spi);
1329
1330 dsa_unregister_switch(vsc->ds);
1331 gpiod_set_value(vsc->reset, 1);
1332
1333 return 0;
1334}
1335
1336static const struct of_device_id vsc73xx_of_match[] = {
1337 {
1338 .compatible = "vitesse,vsc7385",
1339 },
1340 {
1341 .compatible = "vitesse,vsc7388",
1342 },
1343 {
1344 .compatible = "vitesse,vsc7395",
1345 },
1346 {
1347 .compatible = "vitesse,vsc7398",
1348 },
1349 { },
1350};
1351MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
1352
1353static struct spi_driver vsc73xx_driver = {
1354 .probe = vsc73xx_probe,
1355 .remove = vsc73xx_remove,
1356 .driver = {
1357 .name = "vsc73xx",
1358 .of_match_table = vsc73xx_of_match,
1359 },
1360};
1361module_spi_driver(vsc73xx_driver);
1362
1363MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
1364MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
1365MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 176861bd2252..5bc168314ea2 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -765,8 +765,9 @@ static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
765 struct net_device *dev); 765 struct net_device *dev);
766static int vortex_rx(struct net_device *dev); 766static int vortex_rx(struct net_device *dev);
767static int boomerang_rx(struct net_device *dev); 767static int boomerang_rx(struct net_device *dev);
768static irqreturn_t vortex_interrupt(int irq, void *dev_id); 768static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
769static irqreturn_t boomerang_interrupt(int irq, void *dev_id); 769static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
770static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
770static int vortex_close(struct net_device *dev); 771static int vortex_close(struct net_device *dev);
771static void dump_tx_ring(struct net_device *dev); 772static void dump_tx_ring(struct net_device *dev);
772static void update_stats(void __iomem *ioaddr, struct net_device *dev); 773static void update_stats(void __iomem *ioaddr, struct net_device *dev);
@@ -838,11 +839,7 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
838#ifdef CONFIG_NET_POLL_CONTROLLER 839#ifdef CONFIG_NET_POLL_CONTROLLER
839static void poll_vortex(struct net_device *dev) 840static void poll_vortex(struct net_device *dev)
840{ 841{
841 struct vortex_private *vp = netdev_priv(dev); 842 vortex_boomerang_interrupt(dev->irq, dev);
842 unsigned long flags;
843 local_irq_save(flags);
844 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
845 local_irq_restore(flags);
846} 843}
847#endif 844#endif
848 845
@@ -1728,8 +1725,7 @@ vortex_open(struct net_device *dev)
1728 dma_addr_t dma; 1725 dma_addr_t dma;
1729 1726
1730 /* Use the now-standard shared IRQ implementation. */ 1727 /* Use the now-standard shared IRQ implementation. */
1731 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1728 if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
1732 boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
1733 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); 1729 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1734 goto err; 1730 goto err;
1735 } 1731 }
@@ -1904,18 +1900,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1904 pr_err("%s: Interrupt posted but not delivered --" 1900 pr_err("%s: Interrupt posted but not delivered --"
1905 " IRQ blocked by another device?\n", dev->name); 1901 " IRQ blocked by another device?\n", dev->name);
1906 /* Bad idea here.. but we might as well handle a few events. */ 1902 /* Bad idea here.. but we might as well handle a few events. */
1907 { 1903 vortex_boomerang_interrupt(dev->irq, dev);
1908 /*
1909 * Block interrupts because vortex_interrupt does a bare spin_lock()
1910 */
1911 unsigned long flags;
1912 local_irq_save(flags);
1913 if (vp->full_bus_master_tx)
1914 boomerang_interrupt(dev->irq, dev);
1915 else
1916 vortex_interrupt(dev->irq, dev);
1917 local_irq_restore(flags);
1918 }
1919 } 1904 }
1920 1905
1921 if (vortex_debug > 0) 1906 if (vortex_debug > 0)
@@ -2266,9 +2251,8 @@ out_dma_err:
2266 */ 2251 */
2267 2252
2268static irqreturn_t 2253static irqreturn_t
2269vortex_interrupt(int irq, void *dev_id) 2254_vortex_interrupt(int irq, struct net_device *dev)
2270{ 2255{
2271 struct net_device *dev = dev_id;
2272 struct vortex_private *vp = netdev_priv(dev); 2256 struct vortex_private *vp = netdev_priv(dev);
2273 void __iomem *ioaddr; 2257 void __iomem *ioaddr;
2274 int status; 2258 int status;
@@ -2277,7 +2261,6 @@ vortex_interrupt(int irq, void *dev_id)
2277 unsigned int bytes_compl = 0, pkts_compl = 0; 2261 unsigned int bytes_compl = 0, pkts_compl = 0;
2278 2262
2279 ioaddr = vp->ioaddr; 2263 ioaddr = vp->ioaddr;
2280 spin_lock(&vp->lock);
2281 2264
2282 status = ioread16(ioaddr + EL3_STATUS); 2265 status = ioread16(ioaddr + EL3_STATUS);
2283 2266
@@ -2375,7 +2358,6 @@ vortex_interrupt(int irq, void *dev_id)
2375 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2358 pr_debug("%s: exiting interrupt, status %4.4x.\n",
2376 dev->name, status); 2359 dev->name, status);
2377handler_exit: 2360handler_exit:
2378 spin_unlock(&vp->lock);
2379 return IRQ_RETVAL(handled); 2361 return IRQ_RETVAL(handled);
2380} 2362}
2381 2363
@@ -2385,9 +2367,8 @@ handler_exit:
2385 */ 2367 */
2386 2368
2387static irqreturn_t 2369static irqreturn_t
2388boomerang_interrupt(int irq, void *dev_id) 2370_boomerang_interrupt(int irq, struct net_device *dev)
2389{ 2371{
2390 struct net_device *dev = dev_id;
2391 struct vortex_private *vp = netdev_priv(dev); 2372 struct vortex_private *vp = netdev_priv(dev);
2392 void __iomem *ioaddr; 2373 void __iomem *ioaddr;
2393 int status; 2374 int status;
@@ -2397,12 +2378,6 @@ boomerang_interrupt(int irq, void *dev_id)
2397 2378
2398 ioaddr = vp->ioaddr; 2379 ioaddr = vp->ioaddr;
2399 2380
2400
2401 /*
2402 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2403 * and boomerang_start_xmit
2404 */
2405 spin_lock(&vp->lock);
2406 vp->handling_irq = 1; 2381 vp->handling_irq = 1;
2407 2382
2408 status = ioread16(ioaddr + EL3_STATUS); 2383 status = ioread16(ioaddr + EL3_STATUS);
@@ -2521,10 +2496,29 @@ boomerang_interrupt(int irq, void *dev_id)
2521 dev->name, status); 2496 dev->name, status);
2522handler_exit: 2497handler_exit:
2523 vp->handling_irq = 0; 2498 vp->handling_irq = 0;
2524 spin_unlock(&vp->lock);
2525 return IRQ_RETVAL(handled); 2499 return IRQ_RETVAL(handled);
2526} 2500}
2527 2501
2502static irqreturn_t
2503vortex_boomerang_interrupt(int irq, void *dev_id)
2504{
2505 struct net_device *dev = dev_id;
2506 struct vortex_private *vp = netdev_priv(dev);
2507 unsigned long flags;
2508 irqreturn_t ret;
2509
2510 spin_lock_irqsave(&vp->lock, flags);
2511
2512 if (vp->full_bus_master_rx)
2513 ret = _boomerang_interrupt(dev->irq, dev);
2514 else
2515 ret = _vortex_interrupt(dev->irq, dev);
2516
2517 spin_unlock_irqrestore(&vp->lock, flags);
2518
2519 return ret;
2520}
2521
2528static int vortex_rx(struct net_device *dev) 2522static int vortex_rx(struct net_device *dev)
2529{ 2523{
2530 struct vortex_private *vp = netdev_priv(dev); 2524 struct vortex_private *vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
32 32
33config 3C515 33config 3C515
34 tristate "3c515 ISA \"Fast EtherLink\"" 34 tristate "3c515 ISA \"Fast EtherLink\""
35 depends on ISA && ISA_DMA_API 35 depends on ISA && ISA_DMA_API && !PPC32
36 ---help--- 36 ---help---
37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet 37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
38 network card, say Y here. 38 network card, say Y here.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 9fee7c83ef9f..f2f0264c58ba 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -29,8 +29,8 @@ config PCMCIA_AXNET
29 called axnet_cs. If unsure, say N. 29 called axnet_cs. If unsure, say N.
30 30
31config AX88796 31config AX88796
32 tristate "ASIX AX88796 NE2000 clone support" 32 tristate "ASIX AX88796 NE2000 clone support" if !ZORRO
33 depends on (ARM || MIPS || SUPERH) 33 depends on (ARM || MIPS || SUPERH || ZORRO || COMPILE_TEST)
34 select CRC32 34 select CRC32
35 select PHYLIB 35 select PHYLIB
36 select MDIO_BITBANG 36 select MDIO_BITBANG
@@ -45,6 +45,19 @@ config AX88796_93CX6
45 ---help--- 45 ---help---
46 Select this if your platform comes with an external 93CX6 eeprom. 46 Select this if your platform comes with an external 93CX6 eeprom.
47 47
48config XSURF100
49 tristate "Amiga XSurf 100 AX88796/NE2000 clone support"
50 depends on ZORRO
51 select AX88796
52 select ASIX_PHY
53 help
54 This driver is for the Individual Computers X-Surf 100 Ethernet
55 card (based on the Asix AX88796 chip). If you have such a card,
56 say Y. Otherwise, say N.
57
58 To compile this driver as a module, choose M here: the module
59 will be called xsurf100.
60
48config HYDRA 61config HYDRA
49 tristate "Hydra support" 62 tristate "Hydra support"
50 depends on ZORRO 63 depends on ZORRO
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 1d650e66cc6e..85c83c566ec6 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
16obj-$(CONFIG_STNIC) += stnic.o 8390.o 16obj-$(CONFIG_STNIC) += stnic.o 8390.o
17obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 17obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
18obj-$(CONFIG_WD80x3) += wd.o 8390.o 18obj-$(CONFIG_WD80x3) += wd.o 8390.o
19obj-$(CONFIG_XSURF100) += xsurf100.o
19obj-$(CONFIG_ZORRO8390) += zorro8390.o 20obj-$(CONFIG_ZORRO8390) += zorro8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index da61cf3cb3a9..3dcc61821ed5 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -163,6 +163,21 @@ static void ax_reset_8390(struct net_device *dev)
163 ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ 163 ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */
164} 164}
165 165
166/* Wrapper for __ei_interrupt for platforms that have a platform-specific
167 * way to find out whether the interrupt request might be caused by
168 * the ax88796 chip.
169 */
170static irqreturn_t ax_ei_interrupt_filtered(int irq, void *dev_id)
171{
172 struct net_device *dev = dev_id;
173 struct ax_device *ax = to_ax_dev(dev);
174 struct platform_device *pdev = to_platform_device(dev->dev.parent);
175
176 if (!ax->plat->check_irq(pdev))
177 return IRQ_NONE;
178
179 return ax_ei_interrupt(irq, dev_id);
180}
166 181
167static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 182static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
168 int ring_page) 183 int ring_page)
@@ -362,9 +377,7 @@ static int ax_mii_probe(struct net_device *dev)
362 return ret; 377 return ret;
363 } 378 }
364 379
365 /* mask with MAC supported features */ 380 phy_set_max_speed(phy_dev, SPEED_100);
366 phy_dev->supported &= PHY_BASIC_FEATURES;
367 phy_dev->advertising = phy_dev->supported;
368 381
369 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 382 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
370 phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); 383 phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
@@ -387,6 +400,90 @@ static void ax_phy_switch(struct net_device *dev, int on)
387 ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); 400 ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
388} 401}
389 402
403static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
404{
405 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
406
407 if (level)
408 ax->reg_memr |= AX_MEMR_MDC;
409 else
410 ax->reg_memr &= ~AX_MEMR_MDC;
411
412 ei_outb(ax->reg_memr, ax->addr_memr);
413}
414
415static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
416{
417 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
418
419 if (output)
420 ax->reg_memr &= ~AX_MEMR_MDIR;
421 else
422 ax->reg_memr |= AX_MEMR_MDIR;
423
424 ei_outb(ax->reg_memr, ax->addr_memr);
425}
426
427static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
428{
429 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
430
431 if (value)
432 ax->reg_memr |= AX_MEMR_MDO;
433 else
434 ax->reg_memr &= ~AX_MEMR_MDO;
435
436 ei_outb(ax->reg_memr, ax->addr_memr);
437}
438
439static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
440{
441 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
442 int reg_memr = ei_inb(ax->addr_memr);
443
444 return reg_memr & AX_MEMR_MDI ? 1 : 0;
445}
446
447static const struct mdiobb_ops bb_ops = {
448 .owner = THIS_MODULE,
449 .set_mdc = ax_bb_mdc,
450 .set_mdio_dir = ax_bb_dir,
451 .set_mdio_data = ax_bb_set_data,
452 .get_mdio_data = ax_bb_get_data,
453};
454
455static int ax_mii_init(struct net_device *dev)
456{
457 struct platform_device *pdev = to_platform_device(dev->dev.parent);
458 struct ei_device *ei_local = netdev_priv(dev);
459 struct ax_device *ax = to_ax_dev(dev);
460 int err;
461
462 ax->bb_ctrl.ops = &bb_ops;
463 ax->addr_memr = ei_local->mem + AX_MEMR;
464 ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
465 if (!ax->mii_bus) {
466 err = -ENOMEM;
467 goto out;
468 }
469
470 ax->mii_bus->name = "ax88796_mii_bus";
471 ax->mii_bus->parent = dev->dev.parent;
472 snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
473 pdev->name, pdev->id);
474
475 err = mdiobus_register(ax->mii_bus);
476 if (err)
477 goto out_free_mdio_bitbang;
478
479 return 0;
480
481 out_free_mdio_bitbang:
482 free_mdio_bitbang(ax->mii_bus);
483 out:
484 return err;
485}
486
390static int ax_open(struct net_device *dev) 487static int ax_open(struct net_device *dev)
391{ 488{
392 struct ax_device *ax = to_ax_dev(dev); 489 struct ax_device *ax = to_ax_dev(dev);
@@ -394,8 +491,16 @@ static int ax_open(struct net_device *dev)
394 491
395 netdev_dbg(dev, "open\n"); 492 netdev_dbg(dev, "open\n");
396 493
397 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, 494 ret = ax_mii_init(dev);
398 dev->name, dev); 495 if (ret)
496 goto failed_mii;
497
498 if (ax->plat->check_irq)
499 ret = request_irq(dev->irq, ax_ei_interrupt_filtered,
500 ax->irqflags, dev->name, dev);
501 else
502 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
503 dev->name, dev);
399 if (ret) 504 if (ret)
400 goto failed_request_irq; 505 goto failed_request_irq;
401 506
@@ -421,6 +526,10 @@ static int ax_open(struct net_device *dev)
421 ax_phy_switch(dev, 0); 526 ax_phy_switch(dev, 0);
422 free_irq(dev->irq, dev); 527 free_irq(dev->irq, dev);
423 failed_request_irq: 528 failed_request_irq:
529 /* unregister mdiobus */
530 mdiobus_unregister(ax->mii_bus);
531 free_mdio_bitbang(ax->mii_bus);
532 failed_mii:
424 return ret; 533 return ret;
425} 534}
426 535
@@ -440,6 +549,9 @@ static int ax_close(struct net_device *dev)
440 phy_disconnect(dev->phydev); 549 phy_disconnect(dev->phydev);
441 550
442 free_irq(dev->irq, dev); 551 free_irq(dev->irq, dev);
552
553 mdiobus_unregister(ax->mii_bus);
554 free_mdio_bitbang(ax->mii_bus);
443 return 0; 555 return 0;
444} 556}
445 557
@@ -539,92 +651,8 @@ static const struct net_device_ops ax_netdev_ops = {
539#endif 651#endif
540}; 652};
541 653
542static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
543{
544 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
545
546 if (level)
547 ax->reg_memr |= AX_MEMR_MDC;
548 else
549 ax->reg_memr &= ~AX_MEMR_MDC;
550
551 ei_outb(ax->reg_memr, ax->addr_memr);
552}
553
554static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
555{
556 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
557
558 if (output)
559 ax->reg_memr &= ~AX_MEMR_MDIR;
560 else
561 ax->reg_memr |= AX_MEMR_MDIR;
562
563 ei_outb(ax->reg_memr, ax->addr_memr);
564}
565
566static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
567{
568 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
569
570 if (value)
571 ax->reg_memr |= AX_MEMR_MDO;
572 else
573 ax->reg_memr &= ~AX_MEMR_MDO;
574
575 ei_outb(ax->reg_memr, ax->addr_memr);
576}
577
578static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
579{
580 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
581 int reg_memr = ei_inb(ax->addr_memr);
582
583 return reg_memr & AX_MEMR_MDI ? 1 : 0;
584}
585
586static const struct mdiobb_ops bb_ops = {
587 .owner = THIS_MODULE,
588 .set_mdc = ax_bb_mdc,
589 .set_mdio_dir = ax_bb_dir,
590 .set_mdio_data = ax_bb_set_data,
591 .get_mdio_data = ax_bb_get_data,
592};
593
594/* setup code */ 654/* setup code */
595 655
596static int ax_mii_init(struct net_device *dev)
597{
598 struct platform_device *pdev = to_platform_device(dev->dev.parent);
599 struct ei_device *ei_local = netdev_priv(dev);
600 struct ax_device *ax = to_ax_dev(dev);
601 int err;
602
603 ax->bb_ctrl.ops = &bb_ops;
604 ax->addr_memr = ei_local->mem + AX_MEMR;
605 ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
606 if (!ax->mii_bus) {
607 err = -ENOMEM;
608 goto out;
609 }
610
611 ax->mii_bus->name = "ax88796_mii_bus";
612 ax->mii_bus->parent = dev->dev.parent;
613 snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
614 pdev->name, pdev->id);
615
616 err = mdiobus_register(ax->mii_bus);
617 if (err)
618 goto out_free_mdio_bitbang;
619
620 return 0;
621
622 out_free_mdio_bitbang:
623 free_mdio_bitbang(ax->mii_bus);
624 out:
625 return err;
626}
627
628static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) 656static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
629{ 657{
630 void __iomem *ioaddr = ei_local->mem; 658 void __iomem *ioaddr = ei_local->mem;
@@ -669,10 +697,16 @@ static int ax_init_dev(struct net_device *dev)
669 if (ax->plat->flags & AXFLG_HAS_EEPROM) { 697 if (ax->plat->flags & AXFLG_HAS_EEPROM) {
670 unsigned char SA_prom[32]; 698 unsigned char SA_prom[32];
671 699
700 ei_outb(6, ioaddr + EN0_RCNTLO);
701 ei_outb(0, ioaddr + EN0_RCNTHI);
702 ei_outb(0, ioaddr + EN0_RSARLO);
703 ei_outb(0, ioaddr + EN0_RSARHI);
704 ei_outb(E8390_RREAD + E8390_START, ioaddr + NE_CMD);
672 for (i = 0; i < sizeof(SA_prom); i += 2) { 705 for (i = 0; i < sizeof(SA_prom); i += 2) {
673 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); 706 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
674 SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); 707 SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
675 } 708 }
709 ei_outb(ENISR_RDC, ioaddr + EN0_ISR); /* Ack intr. */
676 710
677 if (ax->plat->wordlength == 2) 711 if (ax->plat->wordlength == 2)
678 for (i = 0; i < 16; i++) 712 for (i = 0; i < 16; i++)
@@ -741,18 +775,20 @@ static int ax_init_dev(struct net_device *dev)
741#endif 775#endif
742 776
743 ei_local->reset_8390 = &ax_reset_8390; 777 ei_local->reset_8390 = &ax_reset_8390;
744 ei_local->block_input = &ax_block_input; 778 if (ax->plat->block_input)
745 ei_local->block_output = &ax_block_output; 779 ei_local->block_input = ax->plat->block_input;
780 else
781 ei_local->block_input = &ax_block_input;
782 if (ax->plat->block_output)
783 ei_local->block_output = ax->plat->block_output;
784 else
785 ei_local->block_output = &ax_block_output;
746 ei_local->get_8390_hdr = &ax_get_8390_hdr; 786 ei_local->get_8390_hdr = &ax_get_8390_hdr;
747 ei_local->priv = 0; 787 ei_local->priv = 0;
748 788
749 dev->netdev_ops = &ax_netdev_ops; 789 dev->netdev_ops = &ax_netdev_ops;
750 dev->ethtool_ops = &ax_ethtool_ops; 790 dev->ethtool_ops = &ax_ethtool_ops;
751 791
752 ret = ax_mii_init(dev);
753 if (ret)
754 goto err_out;
755
756 ax_NS8390_init(dev, 0); 792 ax_NS8390_init(dev, 0);
757 793
758 ret = register_netdev(dev); 794 ret = register_netdev(dev);
@@ -777,7 +813,6 @@ static int ax_remove(struct platform_device *pdev)
777 struct resource *mem; 813 struct resource *mem;
778 814
779 unregister_netdev(dev); 815 unregister_netdev(dev);
780 free_irq(dev->irq, dev);
781 816
782 iounmap(ei_local->mem); 817 iounmap(ei_local->mem);
783 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 818 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -789,6 +824,7 @@ static int ax_remove(struct platform_device *pdev)
789 release_mem_region(mem->start, resource_size(mem)); 824 release_mem_region(mem->start, resource_size(mem));
790 } 825 }
791 826
827 platform_set_drvdata(pdev, NULL);
792 free_netdev(dev); 828 free_netdev(dev);
793 829
794 return 0; 830 return 0;
@@ -835,6 +871,9 @@ static int ax_probe(struct platform_device *pdev)
835 dev->irq = irq->start; 871 dev->irq = irq->start;
836 ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; 872 ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
837 873
874 if (irq->flags & IORESOURCE_IRQ_SHAREABLE)
875 ax->irqflags |= IRQF_SHARED;
876
838 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 877 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
839 if (!mem) { 878 if (!mem) {
840 dev_err(&pdev->dev, "no MEM specified\n"); 879 dev_err(&pdev->dev, "no MEM specified\n");
@@ -919,6 +958,7 @@ static int ax_probe(struct platform_device *pdev)
919 release_mem_region(mem->start, mem_size); 958 release_mem_region(mem->start, mem_size);
920 959
921 exit_mem: 960 exit_mem:
961 platform_set_drvdata(pdev, NULL);
922 free_netdev(dev); 962 free_netdev(dev);
923 963
924 return ret; 964 return ret;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d422a124cd7c..0b6bbf63f7ca 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -610,6 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
610 switch (cmd) { 610 switch (cmd) {
611 case SIOCGMIIPHY: 611 case SIOCGMIIPHY:
612 data->phy_id = info->phy_id; 612 data->phy_id = info->phy_id;
613 /* Fall through */
613 case SIOCGMIIREG: /* Read MII PHY register. */ 614 case SIOCGMIIREG: /* Read MII PHY register. */
614 data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); 615 data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
615 return 0; 616 return 0;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 32e9627e3880..77191a281866 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -564,26 +564,29 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
564 sizeof(info->bus_info)); 564 sizeof(info->bus_info));
565} 565}
566 566
567static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 567static int etherh_get_link_ksettings(struct net_device *dev,
568 struct ethtool_link_ksettings *cmd)
568{ 569{
569 cmd->supported = etherh_priv(dev)->supported; 570 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
570 ethtool_cmd_speed_set(cmd, SPEED_10); 571 etherh_priv(dev)->supported);
571 cmd->duplex = DUPLEX_HALF; 572 cmd->base.speed = SPEED_10;
572 cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; 573 cmd->base.duplex = DUPLEX_HALF;
573 cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? 574 cmd->base.port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
574 AUTONEG_ENABLE : AUTONEG_DISABLE); 575 cmd->base.autoneg = (dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE :
576 AUTONEG_DISABLE);
575 return 0; 577 return 0;
576} 578}
577 579
578static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 580static int etherh_set_link_ksettings(struct net_device *dev,
581 const struct ethtool_link_ksettings *cmd)
579{ 582{
580 switch (cmd->autoneg) { 583 switch (cmd->base.autoneg) {
581 case AUTONEG_ENABLE: 584 case AUTONEG_ENABLE:
582 dev->flags |= IFF_AUTOMEDIA; 585 dev->flags |= IFF_AUTOMEDIA;
583 break; 586 break;
584 587
585 case AUTONEG_DISABLE: 588 case AUTONEG_DISABLE:
586 switch (cmd->port) { 589 switch (cmd->base.port) {
587 case PORT_TP: 590 case PORT_TP:
588 dev->if_port = IF_PORT_10BASET; 591 dev->if_port = IF_PORT_10BASET;
589 break; 592 break;
@@ -622,12 +625,12 @@ static void etherh_set_msglevel(struct net_device *dev, u32 v)
622} 625}
623 626
624static const struct ethtool_ops etherh_ethtool_ops = { 627static const struct ethtool_ops etherh_ethtool_ops = {
625 .get_settings = etherh_get_settings, 628 .get_drvinfo = etherh_get_drvinfo,
626 .set_settings = etherh_set_settings, 629 .get_ts_info = ethtool_op_get_ts_info,
627 .get_drvinfo = etherh_get_drvinfo, 630 .get_msglevel = etherh_get_msglevel,
628 .get_ts_info = ethtool_op_get_ts_info, 631 .set_msglevel = etherh_set_msglevel,
629 .get_msglevel = etherh_get_msglevel, 632 .get_link_ksettings = etherh_get_link_ksettings,
630 .set_msglevel = etherh_set_msglevel, 633 .set_link_ksettings = etherh_set_link_ksettings,
631}; 634};
632 635
633static const struct net_device_ops etherh_netdev_ops = { 636static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b6d735bf8011..342ae08ec3c2 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
153static void dayna_block_output(struct net_device *dev, int count, 153static void dayna_block_output(struct net_device *dev, int count,
154 const unsigned char *buf, int start_page); 154 const unsigned char *buf, int start_page);
155 155
156#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
157#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
158
159#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) 156#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
160 157
161/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 158/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
239 unsigned long outdata = 0xA5A0B5B0; 236 unsigned long outdata = 0xA5A0B5B0;
240 unsigned long indata = 0x00000000; 237 unsigned long indata = 0x00000000;
241 /* Try writing 32 bits */ 238 /* Try writing 32 bits */
242 memcpy_toio(membase, &outdata, 4); 239 memcpy_toio((void __iomem *)membase, &outdata, 4);
243 /* Now compare them */ 240 /* Now compare them */
244 if (memcmp_withio(&outdata, membase, 4) == 0) 241 if (memcmp_withio(&outdata, membase, 4) == 0)
245 return ACCESS_32; 242 return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
711 struct e8390_pkt_hdr *hdr, int ring_page) 708 struct e8390_pkt_hdr *hdr, int ring_page)
712{ 709{
713 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 710 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
714 memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); 711 memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
715 /* Fix endianness */ 712 /* Fix endianness */
716 hdr->count = swab16(hdr->count); 713 hdr->count = swab16(hdr->count);
717} 714}
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
725 if (xfer_start + count > ei_status.rmem_end) { 722 if (xfer_start + count > ei_status.rmem_end) {
726 /* We must wrap the input move. */ 723 /* We must wrap the input move. */
727 int semi_count = ei_status.rmem_end - xfer_start; 724 int semi_count = ei_status.rmem_end - xfer_start;
728 memcpy_fromio(skb->data, dev->mem_start + xfer_base, 725 memcpy_fromio(skb->data,
726 (void __iomem *)dev->mem_start + xfer_base,
729 semi_count); 727 semi_count);
730 count -= semi_count; 728 count -= semi_count;
731 memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, 729 memcpy_fromio(skb->data + semi_count,
732 count); 730 (void __iomem *)ei_status.rmem_start, count);
733 } else { 731 } else {
734 memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); 732 memcpy_fromio(skb->data,
733 (void __iomem *)dev->mem_start + xfer_base,
734 count);
735 } 735 }
736} 736}
737 737
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
740{ 740{
741 long shmem = (start_page - WD_START_PG)<<8; 741 long shmem = (start_page - WD_START_PG)<<8;
742 742
743 memcpy_toio(dev->mem_start + shmem, buf, count); 743 memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
744} 744}
745 745
746/* dayna block input/output */ 746/* dayna block input/output */
diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c
new file mode 100644
index 000000000000..e2c963821ffe
--- /dev/null
+++ b/drivers/net/ethernet/8390/xsurf100.c
@@ -0,0 +1,382 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/module.h>
3#include <linux/netdevice.h>
4#include <linux/platform_device.h>
5#include <linux/zorro.h>
6#include <net/ax88796.h>
7#include <asm/amigaints.h>
8
9#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \
10 ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0)
11
12#define XS100_IRQSTATUS_BASE 0x40
13#define XS100_8390_BASE 0x800
14
15/* Longword-access area. Translated to 2 16-bit access cycles by the
16 * X-Surf 100 FPGA
17 */
18#define XS100_8390_DATA32_BASE 0x8000
19#define XS100_8390_DATA32_SIZE 0x2000
20/* Sub-Areas for fast data register access; addresses relative to area begin */
21#define XS100_8390_DATA_READ32_BASE 0x0880
22#define XS100_8390_DATA_WRITE32_BASE 0x0C80
23#define XS100_8390_DATA_AREA_SIZE 0x80
24
25#define __NS8390_init ax_NS8390_init
26
27/* force unsigned long back to 'void __iomem *' */
28#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
29
30#define ei_inb(_a) z_readb(ax_convert_addr(_a))
31#define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a))
32
33#define ei_inw(_a) z_readw(ax_convert_addr(_a))
34#define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a))
35
36#define ei_inb_p(_a) ei_inb(_a)
37#define ei_outb_p(_v, _a) ei_outb(_v, _a)
38
39/* define EI_SHIFT() to take into account our register offsets */
40#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
41
42/* Ensure we have our RCR base value */
43#define AX88796_PLATFORM
44
45static unsigned char version[] =
46 "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
47
48#include "lib8390.c"
49
50/* from ne.c */
51#define NE_CMD EI_SHIFT(0x00)
52#define NE_RESET EI_SHIFT(0x1f)
53#define NE_DATAPORT EI_SHIFT(0x10)
54
55struct xsurf100_ax_plat_data {
56 struct ax_plat_data ax;
57 void __iomem *base_regs;
58 void __iomem *data_area;
59};
60
61static int is_xsurf100_network_irq(struct platform_device *pdev)
62{
63 struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
64
65 return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0;
66}
67
68/* These functions guarantee that the iomem is accessed with 32 bit
69 * cycles only. z_memcpy_fromio / z_memcpy_toio don't
70 */
71static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes)
72{
73 while (bytes > 32) {
74 asm __volatile__
75 ("movem.l (%0)+,%%d0-%%d7\n"
76 "movem.l %%d0-%%d7,(%1)\n"
77 "adda.l #32,%1" : "=a"(src), "=a"(dst)
78 : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4",
79 "d5", "d6", "d7", "memory");
80 bytes -= 32;
81 }
82 while (bytes) {
83 *(uint32_t *)dst = z_readl(src);
84 src += 4;
85 dst += 4;
86 bytes -= 4;
87 }
88}
89
90static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes)
91{
92 while (bytes) {
93 z_writel(*(const uint32_t *)src, dst);
94 src += 4;
95 dst += 4;
96 bytes -= 4;
97 }
98}
99
100static void xs100_write(struct net_device *dev, const void *src,
101 unsigned int count)
102{
103 struct ei_device *ei_local = netdev_priv(dev);
104 struct platform_device *pdev = to_platform_device(dev->dev.parent);
105 struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
106
107 /* copy whole blocks */
108 while (count > XS100_8390_DATA_AREA_SIZE) {
109 z_memcpy_toio32(xs100->data_area +
110 XS100_8390_DATA_WRITE32_BASE, src,
111 XS100_8390_DATA_AREA_SIZE);
112 src += XS100_8390_DATA_AREA_SIZE;
113 count -= XS100_8390_DATA_AREA_SIZE;
114 }
115 /* copy whole dwords */
116 z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE,
117 src, count & ~3);
118 src += count & ~3;
119 if (count & 2) {
120 ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT);
121 src += 2;
122 }
123 if (count & 1)
124 ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT);
125}
126
127static void xs100_read(struct net_device *dev, void *dst, unsigned int count)
128{
129 struct ei_device *ei_local = netdev_priv(dev);
130 struct platform_device *pdev = to_platform_device(dev->dev.parent);
131 struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
132
133 /* copy whole blocks */
134 while (count > XS100_8390_DATA_AREA_SIZE) {
135 z_memcpy_fromio32(dst, xs100->data_area +
136 XS100_8390_DATA_READ32_BASE,
137 XS100_8390_DATA_AREA_SIZE);
138 dst += XS100_8390_DATA_AREA_SIZE;
139 count -= XS100_8390_DATA_AREA_SIZE;
140 }
141 /* copy whole dwords */
142 z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE,
143 count & ~3);
144 dst += count & ~3;
145 if (count & 2) {
146 *(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT);
147 dst += 2;
148 }
149 if (count & 1)
150 *(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT);
151}
152
153/* Block input and output, similar to the Crynwr packet driver. If
154 * you are porting to a new ethercard, look at the packet driver
155 * source for hints. The NEx000 doesn't share the on-board packet
156 * memory -- you have to put the packet out through the "remote DMA"
157 * dataport using ei_outb.
158 */
159static void xs100_block_input(struct net_device *dev, int count,
160 struct sk_buff *skb, int ring_offset)
161{
162 struct ei_device *ei_local = netdev_priv(dev);
163 void __iomem *nic_base = ei_local->mem;
164 char *buf = skb->data;
165
166 if (ei_local->dmaing) {
167 netdev_err(dev,
168 "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
169 __func__,
170 ei_local->dmaing, ei_local->irqlock);
171 return;
172 }
173
174 ei_local->dmaing |= 0x01;
175
176 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
177 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
178 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
179 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
180 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
181 ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
182
183 xs100_read(dev, buf, count);
184
185 ei_local->dmaing &= ~1;
186}
187
188static void xs100_block_output(struct net_device *dev, int count,
189 const unsigned char *buf, const int start_page)
190{
191 struct ei_device *ei_local = netdev_priv(dev);
192 void __iomem *nic_base = ei_local->mem;
193 unsigned long dma_start;
194
195 /* Round the count up for word writes. Do we need to do this?
196 * What effect will an odd byte count have on the 8390? I
197 * should check someday.
198 */
199 if (ei_local->word16 && (count & 0x01))
200 count++;
201
202 /* This *shouldn't* happen. If it does, it's the last thing
203 * you'll see
204 */
205 if (ei_local->dmaing) {
206 netdev_err(dev,
207 "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
208 __func__,
209 ei_local->dmaing, ei_local->irqlock);
210 return;
211 }
212
213 ei_local->dmaing |= 0x01;
214 /* We should already be in page 0, but to be safe... */
215 ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD);
216
217 ei_outb(ENISR_RDC, nic_base + EN0_ISR);
218
219 /* Now the normal output. */
220 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
221 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
222 ei_outb(0x00, nic_base + EN0_RSARLO);
223 ei_outb(start_page, nic_base + EN0_RSARHI);
224
225 ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
226
227 xs100_write(dev, buf, count);
228
229 dma_start = jiffies;
230
231 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
232 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
233 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
234 ei_local->reset_8390(dev);
235 ax_NS8390_init(dev, 1);
236 break;
237 }
238 }
239
240 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
241 ei_local->dmaing &= ~0x01;
242}
243
244static int xsurf100_probe(struct zorro_dev *zdev,
245 const struct zorro_device_id *ent)
246{
247 struct platform_device *pdev;
248 struct xsurf100_ax_plat_data ax88796_data;
249 struct resource res[2] = {
250 DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL,
251 IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE),
252 DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE,
253 4 * 0x20)
254 };
255 int reg;
256 /* This table is referenced in the device structure, so it must
257 * outlive the scope of xsurf100_probe.
258 */
259 static u32 reg_offsets[32];
260 int ret = 0;
261
262 /* X-Surf 100 control and 32 bit ring buffer data access areas.
263 * These resources are not used by the ax88796 driver, so must
264 * be requested here and passed via platform data.
265 */
266
267 if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) {
268 dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n");
269 return -ENXIO;
270 }
271
272 if (!request_mem_region(zdev->resource.start +
273 XS100_8390_DATA32_BASE,
274 XS100_8390_DATA32_SIZE,
275 "X-Surf 100 32-bit data access")) {
276 dev_err(&zdev->dev, "cannot reserve 32-bit area\n");
277 ret = -ENXIO;
278 goto exit_req;
279 }
280
281 for (reg = 0; reg < 0x20; reg++)
282 reg_offsets[reg] = 4 * reg;
283
284 memset(&ax88796_data, 0, sizeof(ax88796_data));
285 ax88796_data.ax.flags = AXFLG_HAS_EEPROM;
286 ax88796_data.ax.wordlength = 2;
287 ax88796_data.ax.dcr_val = 0x48;
288 ax88796_data.ax.rcr_val = 0x40;
289 ax88796_data.ax.reg_offsets = reg_offsets;
290 ax88796_data.ax.check_irq = is_xsurf100_network_irq;
291 ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100);
292
293 /* error handling for ioremap regs */
294 if (!ax88796_data.base_regs) {
295 dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n",
296 &zdev->resource);
297
298 ret = -ENXIO;
299 goto exit_req2;
300 }
301
302 ax88796_data.data_area = ioremap(zdev->resource.start +
303 XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE);
304
305 /* error handling for ioremap data */
306 if (!ax88796_data.data_area) {
307 dev_err(&zdev->dev,
308 "Cannot ioremap area %pR offset %x (32-bit access)\n",
309 &zdev->resource, XS100_8390_DATA32_BASE);
310
311 ret = -ENXIO;
312 goto exit_mem;
313 }
314
315 ax88796_data.ax.block_output = xs100_block_output;
316 ax88796_data.ax.block_input = xs100_block_input;
317
318 pdev = platform_device_register_resndata(&zdev->dev, "ax88796",
319 zdev->slotaddr, res, 2,
320 &ax88796_data,
321 sizeof(ax88796_data));
322
323 if (IS_ERR(pdev)) {
324 dev_err(&zdev->dev, "cannot register platform device\n");
325 ret = -ENXIO;
326 goto exit_mem2;
327 }
328
329 zorro_set_drvdata(zdev, pdev);
330
331 if (!ret)
332 return 0;
333
334 exit_mem2:
335 iounmap(ax88796_data.data_area);
336
337 exit_mem:
338 iounmap(ax88796_data.base_regs);
339
340 exit_req2:
341 release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
342 XS100_8390_DATA32_SIZE);
343
344 exit_req:
345 release_mem_region(zdev->resource.start, 0x100);
346
347 return ret;
348}
349
350static void xsurf100_remove(struct zorro_dev *zdev)
351{
352 struct platform_device *pdev = zorro_get_drvdata(zdev);
353 struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
354
355 platform_device_unregister(pdev);
356
357 iounmap(xs100->base_regs);
358 release_mem_region(zdev->resource.start, 0x100);
359 iounmap(xs100->data_area);
360 release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
361 XS100_8390_DATA32_SIZE);
362}
363
364static const struct zorro_device_id xsurf100_zorro_tbl[] = {
365 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, },
366 { 0 }
367};
368
369MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl);
370
371static struct zorro_driver xsurf100_driver = {
372 .name = "xsurf100",
373 .id_table = xsurf100_zorro_tbl,
374 .probe = xsurf100_probe,
375 .remove = xsurf100_remove,
376};
377
378module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver);
379
380MODULE_DESCRIPTION("X-Surf 100 driver");
381MODULE_AUTHOR("Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>");
382MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 603a5704dab8..885e00d17807 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,9 +33,9 @@ source "drivers/net/ethernet/aquantia/Kconfig"
33source "drivers/net/ethernet/arc/Kconfig" 33source "drivers/net/ethernet/arc/Kconfig"
34source "drivers/net/ethernet/atheros/Kconfig" 34source "drivers/net/ethernet/atheros/Kconfig"
35source "drivers/net/ethernet/aurora/Kconfig" 35source "drivers/net/ethernet/aurora/Kconfig"
36source "drivers/net/ethernet/cadence/Kconfig"
37source "drivers/net/ethernet/broadcom/Kconfig" 36source "drivers/net/ethernet/broadcom/Kconfig"
38source "drivers/net/ethernet/brocade/Kconfig" 37source "drivers/net/ethernet/brocade/Kconfig"
38source "drivers/net/ethernet/cadence/Kconfig"
39source "drivers/net/ethernet/calxeda/Kconfig" 39source "drivers/net/ethernet/calxeda/Kconfig"
40source "drivers/net/ethernet/cavium/Kconfig" 40source "drivers/net/ethernet/cavium/Kconfig"
41source "drivers/net/ethernet/chelsio/Kconfig" 41source "drivers/net/ethernet/chelsio/Kconfig"
@@ -72,16 +72,15 @@ source "drivers/net/ethernet/dec/Kconfig"
72source "drivers/net/ethernet/dlink/Kconfig" 72source "drivers/net/ethernet/dlink/Kconfig"
73source "drivers/net/ethernet/emulex/Kconfig" 73source "drivers/net/ethernet/emulex/Kconfig"
74source "drivers/net/ethernet/ezchip/Kconfig" 74source "drivers/net/ethernet/ezchip/Kconfig"
75source "drivers/net/ethernet/neterion/Kconfig"
76source "drivers/net/ethernet/faraday/Kconfig" 75source "drivers/net/ethernet/faraday/Kconfig"
77source "drivers/net/ethernet/freescale/Kconfig" 76source "drivers/net/ethernet/freescale/Kconfig"
78source "drivers/net/ethernet/fujitsu/Kconfig" 77source "drivers/net/ethernet/fujitsu/Kconfig"
79source "drivers/net/ethernet/hisilicon/Kconfig" 78source "drivers/net/ethernet/hisilicon/Kconfig"
80source "drivers/net/ethernet/hp/Kconfig" 79source "drivers/net/ethernet/hp/Kconfig"
81source "drivers/net/ethernet/huawei/Kconfig" 80source "drivers/net/ethernet/huawei/Kconfig"
81source "drivers/net/ethernet/i825xx/Kconfig"
82source "drivers/net/ethernet/ibm/Kconfig" 82source "drivers/net/ethernet/ibm/Kconfig"
83source "drivers/net/ethernet/intel/Kconfig" 83source "drivers/net/ethernet/intel/Kconfig"
84source "drivers/net/ethernet/i825xx/Kconfig"
85source "drivers/net/ethernet/xscale/Kconfig" 84source "drivers/net/ethernet/xscale/Kconfig"
86 85
87config JME 86config JME
@@ -109,12 +108,20 @@ config LANTIQ_ETOP
109 ---help--- 108 ---help---
110 Support for the MII0 inside the Lantiq SoC 109 Support for the MII0 inside the Lantiq SoC
111 110
111config LANTIQ_XRX200
112 tristate "Lantiq / Intel xRX200 PMAC network driver"
113 depends on SOC_TYPE_XWAY
114 ---help---
115 Support for the PMAC of the Gigabit switch (GSWIP) inside the
116 Lantiq / Intel VRX200 VDSL SoC
117
112source "drivers/net/ethernet/marvell/Kconfig" 118source "drivers/net/ethernet/marvell/Kconfig"
113source "drivers/net/ethernet/mediatek/Kconfig" 119source "drivers/net/ethernet/mediatek/Kconfig"
114source "drivers/net/ethernet/mellanox/Kconfig" 120source "drivers/net/ethernet/mellanox/Kconfig"
115source "drivers/net/ethernet/micrel/Kconfig" 121source "drivers/net/ethernet/micrel/Kconfig"
116source "drivers/net/ethernet/microchip/Kconfig" 122source "drivers/net/ethernet/microchip/Kconfig"
117source "drivers/net/ethernet/moxa/Kconfig" 123source "drivers/net/ethernet/moxa/Kconfig"
124source "drivers/net/ethernet/mscc/Kconfig"
118source "drivers/net/ethernet/myricom/Kconfig" 125source "drivers/net/ethernet/myricom/Kconfig"
119 126
120config FEALNX 127config FEALNX
@@ -127,6 +134,7 @@ config FEALNX
127 cards. <http://www.myson.com.tw/> 134 cards. <http://www.myson.com.tw/>
128 135
129source "drivers/net/ethernet/natsemi/Kconfig" 136source "drivers/net/ethernet/natsemi/Kconfig"
137source "drivers/net/ethernet/neterion/Kconfig"
130source "drivers/net/ethernet/netronome/Kconfig" 138source "drivers/net/ethernet/netronome/Kconfig"
131source "drivers/net/ethernet/ni/Kconfig" 139source "drivers/net/ethernet/ni/Kconfig"
132source "drivers/net/ethernet/8390/Kconfig" 140source "drivers/net/ethernet/8390/Kconfig"
@@ -160,20 +168,21 @@ source "drivers/net/ethernet/packetengines/Kconfig"
160source "drivers/net/ethernet/pasemi/Kconfig" 168source "drivers/net/ethernet/pasemi/Kconfig"
161source "drivers/net/ethernet/qlogic/Kconfig" 169source "drivers/net/ethernet/qlogic/Kconfig"
162source "drivers/net/ethernet/qualcomm/Kconfig" 170source "drivers/net/ethernet/qualcomm/Kconfig"
171source "drivers/net/ethernet/rdc/Kconfig"
163source "drivers/net/ethernet/realtek/Kconfig" 172source "drivers/net/ethernet/realtek/Kconfig"
164source "drivers/net/ethernet/renesas/Kconfig" 173source "drivers/net/ethernet/renesas/Kconfig"
165source "drivers/net/ethernet/rdc/Kconfig"
166source "drivers/net/ethernet/rocker/Kconfig" 174source "drivers/net/ethernet/rocker/Kconfig"
167source "drivers/net/ethernet/samsung/Kconfig" 175source "drivers/net/ethernet/samsung/Kconfig"
168source "drivers/net/ethernet/seeq/Kconfig" 176source "drivers/net/ethernet/seeq/Kconfig"
169source "drivers/net/ethernet/silan/Kconfig"
170source "drivers/net/ethernet/sis/Kconfig"
171source "drivers/net/ethernet/sfc/Kconfig" 177source "drivers/net/ethernet/sfc/Kconfig"
172source "drivers/net/ethernet/sgi/Kconfig" 178source "drivers/net/ethernet/sgi/Kconfig"
179source "drivers/net/ethernet/silan/Kconfig"
180source "drivers/net/ethernet/sis/Kconfig"
173source "drivers/net/ethernet/smsc/Kconfig" 181source "drivers/net/ethernet/smsc/Kconfig"
174source "drivers/net/ethernet/socionext/Kconfig" 182source "drivers/net/ethernet/socionext/Kconfig"
175source "drivers/net/ethernet/stmicro/Kconfig" 183source "drivers/net/ethernet/stmicro/Kconfig"
176source "drivers/net/ethernet/sun/Kconfig" 184source "drivers/net/ethernet/sun/Kconfig"
185source "drivers/net/ethernet/synopsys/Kconfig"
177source "drivers/net/ethernet/tehuti/Kconfig" 186source "drivers/net/ethernet/tehuti/Kconfig"
178source "drivers/net/ethernet/ti/Kconfig" 187source "drivers/net/ethernet/ti/Kconfig"
179source "drivers/net/ethernet/toshiba/Kconfig" 188source "drivers/net/ethernet/toshiba/Kconfig"
@@ -182,6 +191,5 @@ source "drivers/net/ethernet/via/Kconfig"
182source "drivers/net/ethernet/wiznet/Kconfig" 191source "drivers/net/ethernet/wiznet/Kconfig"
183source "drivers/net/ethernet/xilinx/Kconfig" 192source "drivers/net/ethernet/xilinx/Kconfig"
184source "drivers/net/ethernet/xircom/Kconfig" 193source "drivers/net/ethernet/xircom/Kconfig"
185source "drivers/net/ethernet/synopsys/Kconfig"
186 194
187endif # ETHERNET 195endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2bfd2eea50bf..7b5bf9682066 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
20obj-$(CONFIG_NET_VENDOR_ARC) += arc/ 20obj-$(CONFIG_NET_VENDOR_ARC) += arc/
21obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ 21obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
22obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ 22obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
23obj-$(CONFIG_NET_CADENCE) += cadence/ 23obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
24obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 24obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
25obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ 25obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
26obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ 26obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/
36obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ 36obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
37obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ 37obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
38obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/ 38obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
39obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
40obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ 39obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
41obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ 40obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
42obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ 41obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
@@ -50,15 +49,18 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
50obj-$(CONFIG_JME) += jme.o 49obj-$(CONFIG_JME) += jme.o
51obj-$(CONFIG_KORINA) += korina.o 50obj-$(CONFIG_KORINA) += korina.o
52obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o 51obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
52obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
53obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ 53obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
54obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ 54obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
55obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ 55obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
56obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ 56obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
57obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ 57obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
58obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
58obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ 59obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
59obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ 60obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
60obj-$(CONFIG_FEALNX) += fealnx.o 61obj-$(CONFIG_FEALNX) += fealnx.o
61obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ 62obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
63obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
62obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ 64obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
63obj-$(CONFIG_NET_VENDOR_NI) += ni/ 65obj-$(CONFIG_NET_VENDOR_NI) += ni/
64obj-$(CONFIG_NET_NETX) += netx-eth.o 66obj-$(CONFIG_NET_NETX) += netx-eth.o
@@ -67,7 +69,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
67obj-$(CONFIG_LPC_ENET) += nxp/ 69obj-$(CONFIG_LPC_ENET) += nxp/
68obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ 70obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
69obj-$(CONFIG_ETHOC) += ethoc.o 71obj-$(CONFIG_ETHOC) += ethoc.o
70obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ 72obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/
71obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ 73obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
72obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ 74obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
73obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ 75obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
@@ -79,8 +81,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
79obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 81obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
80obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ 82obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
81obj-$(CONFIG_NET_VENDOR_SIS) += sis/ 83obj-$(CONFIG_NET_VENDOR_SIS) += sis/
82obj-$(CONFIG_SFC) += sfc/ 84obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
83obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
84obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ 85obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
85obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ 86obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
86obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ 87obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 3872ab96b80a..097467f44b0d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev,
802 int mii_status; 802 int mii_status;
803 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) { 803 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
804 mdio_write(dev, phy, MII_BMCR, BMCR_RESET); 804 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
805 mdelay(100); 805 msleep(100);
806 boguscnt = 1000; 806 boguscnt = 1000;
807 while (--boguscnt > 0) 807 while (--boguscnt > 0)
808 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0) 808 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 4309be3724ad..7c9348a26cbb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1279,9 +1279,9 @@ static int greth_mdio_probe(struct net_device *dev)
1279 } 1279 }
1280 1280
1281 if (greth->gbit_mac) 1281 if (greth->gbit_mac)
1282 phy->supported &= PHY_GBIT_FEATURES; 1282 phy_set_max_speed(phy, SPEED_1000);
1283 else 1283 else
1284 phy->supported &= PHY_BASIC_FEATURES; 1284 phy_set_max_speed(phy, SPEED_100);
1285 1285
1286 phy->advertising = phy->supported; 1286 phy->advertising = phy->supported;
1287 1287
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 48220b6c600d..ea34bcb868b5 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3258,19 +3258,11 @@ static int et131x_mii_probe(struct net_device *netdev)
3258 return PTR_ERR(phydev); 3258 return PTR_ERR(phydev);
3259 } 3259 }
3260 3260
3261 phydev->supported &= (SUPPORTED_10baseT_Half | 3261 phy_set_max_speed(phydev, SPEED_100);
3262 SUPPORTED_10baseT_Full |
3263 SUPPORTED_100baseT_Half |
3264 SUPPORTED_100baseT_Full |
3265 SUPPORTED_Autoneg |
3266 SUPPORTED_MII |
3267 SUPPORTED_TP);
3268 3262
3269 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 3263 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3270 phydev->supported |= SUPPORTED_1000baseT_Half | 3264 phy_set_max_speed(phydev, SPEED_1000);
3271 SUPPORTED_1000baseT_Full;
3272 3265
3273 phydev->advertising = phydev->supported;
3274 phydev->autoneg = AUTONEG_ENABLE; 3266 phydev->autoneg = AUTONEG_ENABLE;
3275 3267
3276 phy_attached_info(phydev); 3268 phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index d0c388cfd52f..3add305d34b4 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -8,7 +8,6 @@
8#include <linux/spinlock_types.h> 8#include <linux/spinlock_types.h>
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/netdevice.h>
12#include <linux/list.h> 11#include <linux/list.h>
13#include <linux/u64_stats_sync.h> 12#include <linux/u64_stats_sync.h>
14 13
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 3143de45baaa..e1acafa82214 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -172,8 +172,7 @@ static int emac_mdio_probe(struct net_device *dev)
172 } 172 }
173 173
174 /* mask with MAC supported features */ 174 /* mask with MAC supported features */
175 phydev->supported &= PHY_BASIC_FEATURES; 175 phy_set_max_speed(phydev, SPEED_100);
176 phydev->advertising = phydev->supported;
177 176
178 db->link = 0; 177 db->link = 0;
179 db->speed = 0; 178 db->speed = 0;
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8f71b79b4949..4f11f98347ed 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -551,6 +551,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
551 ap->name); 551 ap->name);
552 break; 552 break;
553 } 553 }
554 /* Fall through */
554 case PCI_VENDOR_ID_SGI: 555 case PCI_VENDOR_ID_SGI:
555 printk(KERN_INFO "%s: SGI AceNIC ", ap->name); 556 printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
556 break; 557 break;
@@ -1933,7 +1934,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1933 while (idx != rxretprd) { 1934 while (idx != rxretprd) {
1934 struct ring_info *rip; 1935 struct ring_info *rip;
1935 struct sk_buff *skb; 1936 struct sk_buff *skb;
1936 struct rx_desc *rxdesc, *retdesc; 1937 struct rx_desc *retdesc;
1937 u32 skbidx; 1938 u32 skbidx;
1938 int bd_flags, desc_type, mapsize; 1939 int bd_flags, desc_type, mapsize;
1939 u16 csum; 1940 u16 csum;
@@ -1959,19 +1960,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1959 case 0: 1960 case 0:
1960 rip = &ap->skb->rx_std_skbuff[skbidx]; 1961 rip = &ap->skb->rx_std_skbuff[skbidx];
1961 mapsize = ACE_STD_BUFSIZE; 1962 mapsize = ACE_STD_BUFSIZE;
1962 rxdesc = &ap->rx_std_ring[skbidx];
1963 std_count++; 1963 std_count++;
1964 break; 1964 break;
1965 case BD_FLG_JUMBO: 1965 case BD_FLG_JUMBO:
1966 rip = &ap->skb->rx_jumbo_skbuff[skbidx]; 1966 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1967 mapsize = ACE_JUMBO_BUFSIZE; 1967 mapsize = ACE_JUMBO_BUFSIZE;
1968 rxdesc = &ap->rx_jumbo_ring[skbidx];
1969 atomic_dec(&ap->cur_jumbo_bufs); 1968 atomic_dec(&ap->cur_jumbo_bufs);
1970 break; 1969 break;
1971 case BD_FLG_MINI: 1970 case BD_FLG_MINI:
1972 rip = &ap->skb->rx_mini_skbuff[skbidx]; 1971 rip = &ap->skb->rx_mini_skbuff[skbidx];
1973 mapsize = ACE_MINI_BUFSIZE; 1972 mapsize = ACE_MINI_BUFSIZE;
1974 rxdesc = &ap->rx_mini_ring[skbidx];
1975 mini_count++; 1973 mini_count++;
1976 break; 1974 break;
1977 default: 1975 default:
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index baca8f704a45..02921d877c08 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -835,13 +835,10 @@ static int init_phy(struct net_device *dev)
835 } 835 }
836 836
837 /* Stop Advertising 1000BASE Capability if interface is not GMII 837 /* Stop Advertising 1000BASE Capability if interface is not GMII
838 * Note: Checkpatch throws CHECKs for the camel case defines below,
839 * it's ok to ignore.
840 */ 838 */
841 if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || 839 if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
842 (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) 840 (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
843 phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 841 phy_set_max_speed(phydev, SPEED_100);
844 SUPPORTED_1000baseT_Full);
845 842
846 /* Broken HW is sometimes missing the pull-up resistor on the 843 /* Broken HW is sometimes missing the pull-up resistor on the
847 * MDIO line, which results in reads to non-existent devices returning 844 * MDIO line, which results in reads to non-existent devices returning
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index 99b30353541a..9e87d7b8360f 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON
17 17
18config ENA_ETHERNET 18config ENA_ETHERNET
19 tristate "Elastic Network Adapter (ENA) support" 19 tristate "Elastic Network Adapter (ENA) support"
20 depends on (PCI_MSI && X86) 20 depends on PCI_MSI && !CPU_BIG_ENDIAN
21 ---help--- 21 ---help---
22 This driver supports Elastic Network Adapter (ENA)" 22 This driver supports Elastic Network Adapter (ENA)"
23 23
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4532e574ebcd..9f80b73f90b1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -32,115 +32,81 @@
32#ifndef _ENA_ADMIN_H_ 32#ifndef _ENA_ADMIN_H_
33#define _ENA_ADMIN_H_ 33#define _ENA_ADMIN_H_
34 34
35enum ena_admin_aq_opcode {
36 ENA_ADMIN_CREATE_SQ = 1,
37
38 ENA_ADMIN_DESTROY_SQ = 2,
39
40 ENA_ADMIN_CREATE_CQ = 3,
41
42 ENA_ADMIN_DESTROY_CQ = 4,
43 35
44 ENA_ADMIN_GET_FEATURE = 8, 36enum ena_admin_aq_opcode {
45 37 ENA_ADMIN_CREATE_SQ = 1,
46 ENA_ADMIN_SET_FEATURE = 9, 38 ENA_ADMIN_DESTROY_SQ = 2,
47 39 ENA_ADMIN_CREATE_CQ = 3,
48 ENA_ADMIN_GET_STATS = 11, 40 ENA_ADMIN_DESTROY_CQ = 4,
41 ENA_ADMIN_GET_FEATURE = 8,
42 ENA_ADMIN_SET_FEATURE = 9,
43 ENA_ADMIN_GET_STATS = 11,
49}; 44};
50 45
51enum ena_admin_aq_completion_status { 46enum ena_admin_aq_completion_status {
52 ENA_ADMIN_SUCCESS = 0, 47 ENA_ADMIN_SUCCESS = 0,
53 48 ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
54 ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, 49 ENA_ADMIN_BAD_OPCODE = 2,
55 50 ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
56 ENA_ADMIN_BAD_OPCODE = 2, 51 ENA_ADMIN_MALFORMED_REQUEST = 4,
57
58 ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
59
60 ENA_ADMIN_MALFORMED_REQUEST = 4,
61
62 /* Additional status is provided in ACQ entry extended_status */ 52 /* Additional status is provided in ACQ entry extended_status */
63 ENA_ADMIN_ILLEGAL_PARAMETER = 5, 53 ENA_ADMIN_ILLEGAL_PARAMETER = 5,
64 54 ENA_ADMIN_UNKNOWN_ERROR = 6,
65 ENA_ADMIN_UNKNOWN_ERROR = 6, 55 ENA_ADMIN_RESOURCE_BUSY = 7,
66}; 56};
67 57
68enum ena_admin_aq_feature_id { 58enum ena_admin_aq_feature_id {
69 ENA_ADMIN_DEVICE_ATTRIBUTES = 1, 59 ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
70 60 ENA_ADMIN_MAX_QUEUES_NUM = 2,
71 ENA_ADMIN_MAX_QUEUES_NUM = 2, 61 ENA_ADMIN_HW_HINTS = 3,
72 62 ENA_ADMIN_LLQ = 4,
73 ENA_ADMIN_HW_HINTS = 3, 63 ENA_ADMIN_RSS_HASH_FUNCTION = 10,
74 64 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
75 ENA_ADMIN_RSS_HASH_FUNCTION = 10, 65 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
76 66 ENA_ADMIN_MTU = 14,
77 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, 67 ENA_ADMIN_RSS_HASH_INPUT = 18,
78 68 ENA_ADMIN_INTERRUPT_MODERATION = 20,
79 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, 69 ENA_ADMIN_AENQ_CONFIG = 26,
80 70 ENA_ADMIN_LINK_CONFIG = 27,
81 ENA_ADMIN_MTU = 14, 71 ENA_ADMIN_HOST_ATTR_CONFIG = 28,
82 72 ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
83 ENA_ADMIN_RSS_HASH_INPUT = 18,
84
85 ENA_ADMIN_INTERRUPT_MODERATION = 20,
86
87 ENA_ADMIN_AENQ_CONFIG = 26,
88
89 ENA_ADMIN_LINK_CONFIG = 27,
90
91 ENA_ADMIN_HOST_ATTR_CONFIG = 28,
92
93 ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
94}; 73};
95 74
96enum ena_admin_placement_policy_type { 75enum ena_admin_placement_policy_type {
97 /* descriptors and headers are in host memory */ 76 /* descriptors and headers are in host memory */
98 ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, 77 ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
99
100 /* descriptors and headers are in device memory (a.k.a Low Latency 78 /* descriptors and headers are in device memory (a.k.a Low Latency
101 * Queue) 79 * Queue)
102 */ 80 */
103 ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, 81 ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
104}; 82};
105 83
106enum ena_admin_link_types { 84enum ena_admin_link_types {
107 ENA_ADMIN_LINK_SPEED_1G = 0x1, 85 ENA_ADMIN_LINK_SPEED_1G = 0x1,
108 86 ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
109 ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, 87 ENA_ADMIN_LINK_SPEED_5G = 0x4,
110 88 ENA_ADMIN_LINK_SPEED_10G = 0x8,
111 ENA_ADMIN_LINK_SPEED_5G = 0x4, 89 ENA_ADMIN_LINK_SPEED_25G = 0x10,
112 90 ENA_ADMIN_LINK_SPEED_40G = 0x20,
113 ENA_ADMIN_LINK_SPEED_10G = 0x8, 91 ENA_ADMIN_LINK_SPEED_50G = 0x40,
114 92 ENA_ADMIN_LINK_SPEED_100G = 0x80,
115 ENA_ADMIN_LINK_SPEED_25G = 0x10, 93 ENA_ADMIN_LINK_SPEED_200G = 0x100,
116 94 ENA_ADMIN_LINK_SPEED_400G = 0x200,
117 ENA_ADMIN_LINK_SPEED_40G = 0x20,
118
119 ENA_ADMIN_LINK_SPEED_50G = 0x40,
120
121 ENA_ADMIN_LINK_SPEED_100G = 0x80,
122
123 ENA_ADMIN_LINK_SPEED_200G = 0x100,
124
125 ENA_ADMIN_LINK_SPEED_400G = 0x200,
126}; 95};
127 96
128enum ena_admin_completion_policy_type { 97enum ena_admin_completion_policy_type {
129 /* completion queue entry for each sq descriptor */ 98 /* completion queue entry for each sq descriptor */
130 ENA_ADMIN_COMPLETION_POLICY_DESC = 0, 99 ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
131
132 /* completion queue entry upon request in sq descriptor */ 100 /* completion queue entry upon request in sq descriptor */
133 ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, 101 ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
134
135 /* current queue head pointer is updated in OS memory upon sq 102 /* current queue head pointer is updated in OS memory upon sq
136 * descriptor request 103 * descriptor request
137 */ 104 */
138 ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, 105 ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
139
140 /* current queue head pointer is updated in OS memory for each sq 106 /* current queue head pointer is updated in OS memory for each sq
141 * descriptor 107 * descriptor
142 */ 108 */
143 ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, 109 ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
144}; 110};
145 111
146/* basic stats return ena_admin_basic_stats while extanded stats return a 112/* basic stats return ena_admin_basic_stats while extanded stats return a
@@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type {
148 * device id 114 * device id
149 */ 115 */
150enum ena_admin_get_stats_type { 116enum ena_admin_get_stats_type {
151 ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, 117 ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
152 118 ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
153 ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
154}; 119};
155 120
156enum ena_admin_get_stats_scope { 121enum ena_admin_get_stats_scope {
157 ENA_ADMIN_SPECIFIC_QUEUE = 0, 122 ENA_ADMIN_SPECIFIC_QUEUE = 0,
158 123 ENA_ADMIN_ETH_TRAFFIC = 1,
159 ENA_ADMIN_ETH_TRAFFIC = 1,
160}; 124};
161 125
162struct ena_admin_aq_common_desc { 126struct ena_admin_aq_common_desc {
@@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc {
227 191
228 u16 extended_status; 192 u16 extended_status;
229 193
230 /* serves as a hint what AQ entries can be revoked */ 194 /* indicates to the driver which AQ entry has been consumed by the
195 * device and could be reused
196 */
231 u16 sq_head_indx; 197 u16 sq_head_indx;
232}; 198};
233 199
@@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd {
296}; 262};
297 263
298enum ena_admin_sq_direction { 264enum ena_admin_sq_direction {
299 ENA_ADMIN_SQ_DIRECTION_TX = 1, 265 ENA_ADMIN_SQ_DIRECTION_TX = 1,
300 266 ENA_ADMIN_SQ_DIRECTION_RX = 2,
301 ENA_ADMIN_SQ_DIRECTION_RX = 2,
302}; 267};
303 268
304struct ena_admin_acq_create_sq_resp_desc { 269struct ena_admin_acq_create_sq_resp_desc {
@@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc {
483 u32 max_mtu; 448 u32 max_mtu;
484}; 449};
485 450
451enum ena_admin_llq_header_location {
452 /* header is in descriptor list */
453 ENA_ADMIN_INLINE_HEADER = 1,
454 /* header in a separate ring, implies 16B descriptor list entry */
455 ENA_ADMIN_HEADER_RING = 2,
456};
457
458enum ena_admin_llq_ring_entry_size {
459 ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1,
460 ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2,
461 ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4,
462};
463
464enum ena_admin_llq_num_descs_before_header {
465 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
466 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
467 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
468 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
469 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
470};
471
472/* packet descriptor list entry always starts with one or more descriptors,
473 * followed by a header. The rest of the descriptors are located in the
474 * beginning of the subsequent entry. Stride refers to how the rest of the
475 * descriptors are placed. This field is relevant only for inline header
476 * mode
477 */
478enum ena_admin_llq_stride_ctrl {
479 ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1,
480 ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
481};
482
483struct ena_admin_feature_llq_desc {
484 u32 max_llq_num;
485
486 u32 max_llq_depth;
487
488 /* specify the header locations the device supports. bitfield of
489 * enum ena_admin_llq_header_location.
490 */
491 u16 header_location_ctrl_supported;
492
493 /* the header location the driver selected to use. */
494 u16 header_location_ctrl_enabled;
495
496 /* if inline header is specified - this is the size of descriptor
497 * list entry. If header in a separate ring is specified - this is
498 * the size of header ring entry. bitfield of enum
499 * ena_admin_llq_ring_entry_size. specify the entry sizes the device
500 * supports
501 */
502 u16 entry_size_ctrl_supported;
503
504 /* the entry size the driver selected to use. */
505 u16 entry_size_ctrl_enabled;
506
507 /* valid only if inline header is specified. First entry associated
508 * with the packet includes descriptors and header. Rest of the
509 * entries occupied by descriptors. This parameter defines the max
510 * number of descriptors precedding the header in the first entry.
511 * The field is bitfield of enum
512 * ena_admin_llq_num_descs_before_header and specify the values the
513 * device supports
514 */
515 u16 desc_num_before_header_supported;
516
517 /* the desire field the driver selected to use */
518 u16 desc_num_before_header_enabled;
519
520 /* valid only if inline was chosen. bitfield of enum
521 * ena_admin_llq_stride_ctrl
522 */
523 u16 descriptors_stride_ctrl_supported;
524
525 /* the stride control the driver selected to use */
526 u16 descriptors_stride_ctrl_enabled;
527};
528
486struct ena_admin_queue_feature_desc { 529struct ena_admin_queue_feature_desc {
487 /* including LLQs */
488 u32 max_sq_num; 530 u32 max_sq_num;
489 531
490 u32 max_sq_depth; 532 u32 max_sq_depth;
@@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc {
493 535
494 u32 max_cq_depth; 536 u32 max_cq_depth;
495 537
496 u32 max_llq_num; 538 u32 max_legacy_llq_num;
497 539
498 u32 max_llq_depth; 540 u32 max_legacy_llq_depth;
499 541
500 u32 max_header_size; 542 u32 max_header_size;
501 543
@@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc {
583}; 625};
584 626
585enum ena_admin_hash_functions { 627enum ena_admin_hash_functions {
586 ENA_ADMIN_TOEPLITZ = 1, 628 ENA_ADMIN_TOEPLITZ = 1,
587 629 ENA_ADMIN_CRC32 = 2,
588 ENA_ADMIN_CRC32 = 2,
589}; 630};
590 631
591struct ena_admin_feature_rss_flow_hash_control { 632struct ena_admin_feature_rss_flow_hash_control {
@@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function {
611 652
612/* RSS flow hash protocols */ 653/* RSS flow hash protocols */
613enum ena_admin_flow_hash_proto { 654enum ena_admin_flow_hash_proto {
614 ENA_ADMIN_RSS_TCP4 = 0, 655 ENA_ADMIN_RSS_TCP4 = 0,
615 656 ENA_ADMIN_RSS_UDP4 = 1,
616 ENA_ADMIN_RSS_UDP4 = 1, 657 ENA_ADMIN_RSS_TCP6 = 2,
617 658 ENA_ADMIN_RSS_UDP6 = 3,
618 ENA_ADMIN_RSS_TCP6 = 2, 659 ENA_ADMIN_RSS_IP4 = 4,
619 660 ENA_ADMIN_RSS_IP6 = 5,
620 ENA_ADMIN_RSS_UDP6 = 3, 661 ENA_ADMIN_RSS_IP4_FRAG = 6,
621 662 ENA_ADMIN_RSS_NOT_IP = 7,
622 ENA_ADMIN_RSS_IP4 = 4,
623
624 ENA_ADMIN_RSS_IP6 = 5,
625
626 ENA_ADMIN_RSS_IP4_FRAG = 6,
627
628 ENA_ADMIN_RSS_NOT_IP = 7,
629
630 /* TCPv6 with extension header */ 663 /* TCPv6 with extension header */
631 ENA_ADMIN_RSS_TCP6_EX = 8, 664 ENA_ADMIN_RSS_TCP6_EX = 8,
632
633 /* IPv6 with extension header */ 665 /* IPv6 with extension header */
634 ENA_ADMIN_RSS_IP6_EX = 9, 666 ENA_ADMIN_RSS_IP6_EX = 9,
635 667 ENA_ADMIN_RSS_PROTO_NUM = 16,
636 ENA_ADMIN_RSS_PROTO_NUM = 16,
637}; 668};
638 669
639/* RSS flow hash fields */ 670/* RSS flow hash fields */
640enum ena_admin_flow_hash_fields { 671enum ena_admin_flow_hash_fields {
641 /* Ethernet Dest Addr */ 672 /* Ethernet Dest Addr */
642 ENA_ADMIN_RSS_L2_DA = BIT(0), 673 ENA_ADMIN_RSS_L2_DA = BIT(0),
643
644 /* Ethernet Src Addr */ 674 /* Ethernet Src Addr */
645 ENA_ADMIN_RSS_L2_SA = BIT(1), 675 ENA_ADMIN_RSS_L2_SA = BIT(1),
646
647 /* ipv4/6 Dest Addr */ 676 /* ipv4/6 Dest Addr */
648 ENA_ADMIN_RSS_L3_DA = BIT(2), 677 ENA_ADMIN_RSS_L3_DA = BIT(2),
649
650 /* ipv4/6 Src Addr */ 678 /* ipv4/6 Src Addr */
651 ENA_ADMIN_RSS_L3_SA = BIT(3), 679 ENA_ADMIN_RSS_L3_SA = BIT(3),
652
653 /* tcp/udp Dest Port */ 680 /* tcp/udp Dest Port */
654 ENA_ADMIN_RSS_L4_DP = BIT(4), 681 ENA_ADMIN_RSS_L4_DP = BIT(4),
655
656 /* tcp/udp Src Port */ 682 /* tcp/udp Src Port */
657 ENA_ADMIN_RSS_L4_SP = BIT(5), 683 ENA_ADMIN_RSS_L4_SP = BIT(5),
658}; 684};
659 685
660struct ena_admin_proto_input { 686struct ena_admin_proto_input {
@@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input {
693}; 719};
694 720
695enum ena_admin_os_type { 721enum ena_admin_os_type {
696 ENA_ADMIN_OS_LINUX = 1, 722 ENA_ADMIN_OS_LINUX = 1,
697 723 ENA_ADMIN_OS_WIN = 2,
698 ENA_ADMIN_OS_WIN = 2, 724 ENA_ADMIN_OS_DPDK = 3,
699 725 ENA_ADMIN_OS_FREEBSD = 4,
700 ENA_ADMIN_OS_DPDK = 3, 726 ENA_ADMIN_OS_IPXE = 5,
701 727 ENA_ADMIN_OS_ESXI = 6,
702 ENA_ADMIN_OS_FREEBSD = 4, 728 ENA_ADMIN_OS_GROUPS_NUM = 6,
703
704 ENA_ADMIN_OS_IPXE = 5,
705}; 729};
706 730
707struct ena_admin_host_info { 731struct ena_admin_host_info {
@@ -723,11 +747,27 @@ struct ena_admin_host_info {
723 /* 7:0 : major 747 /* 7:0 : major
724 * 15:8 : minor 748 * 15:8 : minor
725 * 23:16 : sub_minor 749 * 23:16 : sub_minor
750 * 31:24 : module_type
726 */ 751 */
727 u32 driver_version; 752 u32 driver_version;
728 753
729 /* features bitmap */ 754 /* features bitmap */
730 u32 supported_network_features[4]; 755 u32 supported_network_features[2];
756
757 /* ENA spec version of driver */
758 u16 ena_spec_version;
759
760 /* ENA device's Bus, Device and Function
761 * 2:0 : function
762 * 7:3 : device
763 * 15:8 : bus
764 */
765 u16 bdf;
766
767 /* Number of CPUs */
768 u16 num_cpus;
769
770 u16 reserved;
731}; 771};
732 772
733struct ena_admin_rss_ind_table_entry { 773struct ena_admin_rss_ind_table_entry {
@@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp {
800 840
801 struct ena_admin_device_attr_feature_desc dev_attr; 841 struct ena_admin_device_attr_feature_desc dev_attr;
802 842
843 struct ena_admin_feature_llq_desc llq;
844
803 struct ena_admin_queue_feature_desc max_queue; 845 struct ena_admin_queue_feature_desc max_queue;
804 846
805 struct ena_admin_feature_aenq_desc aenq; 847 struct ena_admin_feature_aenq_desc aenq;
@@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd {
847 889
848 /* rss indirection table */ 890 /* rss indirection table */
849 struct ena_admin_feature_rss_ind_table ind_table; 891 struct ena_admin_feature_rss_ind_table ind_table;
892
893 /* LLQ configuration */
894 struct ena_admin_feature_llq_desc llq;
850 } u; 895 } u;
851}; 896};
852 897
@@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc {
875 920
876/* asynchronous event notification groups */ 921/* asynchronous event notification groups */
877enum ena_admin_aenq_group { 922enum ena_admin_aenq_group {
878 ENA_ADMIN_LINK_CHANGE = 0, 923 ENA_ADMIN_LINK_CHANGE = 0,
879 924 ENA_ADMIN_FATAL_ERROR = 1,
880 ENA_ADMIN_FATAL_ERROR = 1, 925 ENA_ADMIN_WARNING = 2,
881 926 ENA_ADMIN_NOTIFICATION = 3,
882 ENA_ADMIN_WARNING = 2, 927 ENA_ADMIN_KEEP_ALIVE = 4,
883 928 ENA_ADMIN_AENQ_GROUPS_NUM = 5,
884 ENA_ADMIN_NOTIFICATION = 3,
885
886 ENA_ADMIN_KEEP_ALIVE = 4,
887
888 ENA_ADMIN_AENQ_GROUPS_NUM = 5,
889}; 929};
890 930
891enum ena_admin_aenq_notification_syndrom { 931enum ena_admin_aenq_notification_syndrom {
892 ENA_ADMIN_SUSPEND = 0, 932 ENA_ADMIN_SUSPEND = 0,
893 933 ENA_ADMIN_RESUME = 1,
894 ENA_ADMIN_RESUME = 1, 934 ENA_ADMIN_UPDATE_HINTS = 2,
895
896 ENA_ADMIN_UPDATE_HINTS = 2,
897}; 935};
898 936
899struct ena_admin_aenq_entry { 937struct ena_admin_aenq_entry {
@@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp {
928}; 966};
929 967
930/* aq_common_desc */ 968/* aq_common_desc */
931#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) 969#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
932#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) 970#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
933#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 971#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
934#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) 972#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
935#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 973#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
936#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) 974#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
937 975
938/* sq */ 976/* sq */
939#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 977#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
940#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) 978#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
941 979
942/* acq_common_desc */ 980/* acq_common_desc */
943#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) 981#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
944#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) 982#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
945 983
946/* aq_create_sq_cmd */ 984/* aq_create_sq_cmd */
947#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 985#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
948#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) 986#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
949#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) 987#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
950#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 988#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
951#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) 989#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
952#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) 990#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
953 991
954/* aq_create_cq_cmd */ 992/* aq_create_cq_cmd */
@@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp {
957#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) 995#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
958 996
959/* get_set_feature_common_desc */ 997/* get_set_feature_common_desc */
960#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) 998#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
961 999
962/* get_feature_link_desc */ 1000/* get_feature_link_desc */
963#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) 1001#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
964#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 1002#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
965#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) 1003#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
966 1004
967/* feature_offload_desc */ 1005/* feature_offload_desc */
968#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) 1006#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
@@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp {
974#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) 1012#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
975#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 1013#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
976#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) 1014#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
977#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 1015#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
978#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) 1016#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
979#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 1017#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
980#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) 1018#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
981#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 1019#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
982#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) 1020#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
983#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) 1021#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
984#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 1022#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
985#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) 1023#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
986#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 1024#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
987#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) 1025#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
988#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 1026#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
989#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) 1027#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
990 1028
991/* feature_rss_flow_hash_function */ 1029/* feature_rss_flow_hash_function */
992#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) 1030#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
@@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp {
994 1032
995/* feature_rss_flow_hash_input */ 1033/* feature_rss_flow_hash_input */
996#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 1034#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
997#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) 1035#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
998#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 1036#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
999#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) 1037#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
1000#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 1038#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
1001#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) 1039#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
1002#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 1040#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
1003#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) 1041#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
1004 1042
1005/* host_info */ 1043/* host_info */
1006#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) 1044#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
1007#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 1045#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
1008#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) 1046#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
1009#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 1047#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
1010#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) 1048#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
1049#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24
1050#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24)
1051#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
1052#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3
1053#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
1054#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
1055#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
1011 1056
1012/* aenq_common_desc */ 1057/* aenq_common_desc */
1013#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) 1058#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
1014 1059
1015/* aenq_link_change_desc */ 1060/* aenq_link_change_desc */
1016#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) 1061#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
1017 1062
1018#endif /*_ENA_ADMIN_H_ */ 1063#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1b9d3130af4d..420cede41ca4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,9 +41,6 @@
41#define ENA_ASYNC_QUEUE_DEPTH 16 41#define ENA_ASYNC_QUEUE_DEPTH 16
42#define ENA_ADMIN_QUEUE_DEPTH 32 42#define ENA_ADMIN_QUEUE_DEPTH 32
43 43
44#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
47 44
48#define ENA_CTRL_MAJOR 0 45#define ENA_CTRL_MAJOR 0
49#define ENA_CTRL_MINOR 0 46#define ENA_CTRL_MINOR 0
@@ -61,6 +58,8 @@
61 58
62#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 59#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
63 60
61#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
62
64#define ENA_REGS_ADMIN_INTR_MASK 1 63#define ENA_REGS_ADMIN_INTR_MASK 1
65 64
66#define ENA_POLL_MS 5 65#define ENA_POLL_MS 5
@@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
236 tail_masked = admin_queue->sq.tail & queue_size_mask; 235 tail_masked = admin_queue->sq.tail & queue_size_mask;
237 236
238 /* In case of queue FULL */ 237 /* In case of queue FULL */
239 cnt = atomic_read(&admin_queue->outstanding_cmds); 238 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
240 if (cnt >= admin_queue->q_depth) { 239 if (cnt >= admin_queue->q_depth) {
241 pr_debug("admin queue is full.\n"); 240 pr_debug("admin queue is full.\n");
242 admin_queue->stats.out_of_space++; 241 admin_queue->stats.out_of_space++;
@@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
305 struct ena_admin_acq_entry *comp, 304 struct ena_admin_acq_entry *comp,
306 size_t comp_size_in_bytes) 305 size_t comp_size_in_bytes)
307{ 306{
308 unsigned long flags; 307 unsigned long flags = 0;
309 struct ena_comp_ctx *comp_ctx; 308 struct ena_comp_ctx *comp_ctx;
310 309
311 spin_lock_irqsave(&admin_queue->q_lock, flags); 310 spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -333,6 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
333 332
334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
335 334
335 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size = 336 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) : 338 sizeof(struct ena_eth_io_tx_desc) :
@@ -354,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
354 &io_sq->desc_addr.phys_addr, 354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL); 355 GFP_KERNEL);
356 } 356 }
357 } else { 357
358 if (!io_sq->desc_addr.virt_addr) {
359 pr_err("memory allocation failed");
360 return -ENOMEM;
361 }
362 }
363
364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
365 /* Allocate bounce buffers */
366 io_sq->bounce_buf_ctrl.buffer_size =
367 ena_dev->llq_info.desc_list_entry_size;
368 io_sq->bounce_buf_ctrl.buffers_num =
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
370 io_sq->bounce_buf_ctrl.next_to_use = 0;
371
372 size = io_sq->bounce_buf_ctrl.buffer_size *
373 io_sq->bounce_buf_ctrl.buffers_num;
374
358 dev_node = dev_to_node(ena_dev->dmadev); 375 dev_node = dev_to_node(ena_dev->dmadev);
359 set_dev_node(ena_dev->dmadev, ctx->numa_node); 376 set_dev_node(ena_dev->dmadev, ctx->numa_node);
360 io_sq->desc_addr.virt_addr = 377 io_sq->bounce_buf_ctrl.base_buffer =
361 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
362 set_dev_node(ena_dev->dmadev, dev_node); 379 set_dev_node(ena_dev->dmadev, dev_node);
363 if (!io_sq->desc_addr.virt_addr) { 380 if (!io_sq->bounce_buf_ctrl.base_buffer)
364 io_sq->desc_addr.virt_addr = 381 io_sq->bounce_buf_ctrl.base_buffer =
365 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
383
384 if (!io_sq->bounce_buf_ctrl.base_buffer) {
385 pr_err("bounce buffer memory allocation failed");
386 return -ENOMEM;
366 } 387 }
367 }
368 388
369 if (!io_sq->desc_addr.virt_addr) { 389 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
370 pr_err("memory allocation failed"); 390 sizeof(io_sq->llq_info));
371 return -ENOMEM; 391
392 /* Initiate the first bounce buffer */
393 io_sq->llq_buf_ctrl.curr_bounce_buf =
394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
396 0x0, io_sq->llq_info.desc_list_entry_size);
397 io_sq->llq_buf_ctrl.descs_left_in_line =
398 io_sq->llq_info.descs_num_before_header;
372 } 399 }
373 400
374 io_sq->tail = 0; 401 io_sq->tail = 0;
@@ -458,12 +485,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
458 cqe = &admin_queue->cq.entries[head_masked]; 485 cqe = &admin_queue->cq.entries[head_masked];
459 486
460 /* Go over all the completions */ 487 /* Go over all the completions */
461 while ((cqe->acq_common_descriptor.flags & 488 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
462 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 489 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
463 /* Do not read the rest of the completion entry before the 490 /* Do not read the rest of the completion entry before the
464 * phase bit was validated 491 * phase bit was validated
465 */ 492 */
466 rmb(); 493 dma_rmb();
467 ena_com_handle_single_admin_completion(admin_queue, cqe); 494 ena_com_handle_single_admin_completion(admin_queue, cqe);
468 495
469 head_masked++; 496 head_masked++;
@@ -510,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
510static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 537static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
511 struct ena_com_admin_queue *admin_queue) 538 struct ena_com_admin_queue *admin_queue)
512{ 539{
513 unsigned long flags, timeout; 540 unsigned long flags = 0;
541 unsigned long timeout;
514 int ret; 542 int ret;
515 543
516 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); 544 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -556,10 +584,160 @@ err:
556 return ret; 584 return ret;
557} 585}
558 586
587/**
588 * Set the LLQ configurations of the firmware
589 *
590 * The driver provides only the enabled feature values to the device,
591 * which in turn, checks if they are supported.
592 */
593static int ena_com_set_llq(struct ena_com_dev *ena_dev)
594{
595 struct ena_com_admin_queue *admin_queue;
596 struct ena_admin_set_feat_cmd cmd;
597 struct ena_admin_set_feat_resp resp;
598 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
599 int ret;
600
601 memset(&cmd, 0x0, sizeof(cmd));
602 admin_queue = &ena_dev->admin_queue;
603
604 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
605 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
606
607 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
608 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
609 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
610 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
611
612 ret = ena_com_execute_admin_command(admin_queue,
613 (struct ena_admin_aq_entry *)&cmd,
614 sizeof(cmd),
615 (struct ena_admin_acq_entry *)&resp,
616 sizeof(resp));
617
618 if (unlikely(ret))
619 pr_err("Failed to set LLQ configurations: %d\n", ret);
620
621 return ret;
622}
623
624static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
625 struct ena_admin_feature_llq_desc *llq_features,
626 struct ena_llq_configurations *llq_default_cfg)
627{
628 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
629 u16 supported_feat;
630 int rc;
631
632 memset(llq_info, 0, sizeof(*llq_info));
633
634 supported_feat = llq_features->header_location_ctrl_supported;
635
636 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
637 llq_info->header_location_ctrl =
638 llq_default_cfg->llq_header_location;
639 } else {
640 pr_err("Invalid header location control, supported: 0x%x\n",
641 supported_feat);
642 return -EINVAL;
643 }
644
645 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
646 supported_feat = llq_features->descriptors_stride_ctrl_supported;
647 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
648 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
649 } else {
650 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
651 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
652 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
653 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
654 } else {
655 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
656 supported_feat);
657 return -EINVAL;
658 }
659
660 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
661 llq_default_cfg->llq_stride_ctrl, supported_feat,
662 llq_info->desc_stride_ctrl);
663 }
664 } else {
665 llq_info->desc_stride_ctrl = 0;
666 }
667
668 supported_feat = llq_features->entry_size_ctrl_supported;
669 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
670 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
671 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
672 } else {
673 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
674 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
675 llq_info->desc_list_entry_size = 128;
676 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
677 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
678 llq_info->desc_list_entry_size = 192;
679 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
680 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
681 llq_info->desc_list_entry_size = 256;
682 } else {
683 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
684 supported_feat);
685 return -EINVAL;
686 }
687
688 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
689 llq_default_cfg->llq_ring_entry_size, supported_feat,
690 llq_info->desc_list_entry_size);
691 }
692 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
693 /* The desc list entry size should be whole multiply of 8
694 * This requirement comes from __iowrite64_copy()
695 */
696 pr_err("illegal entry size %d\n",
697 llq_info->desc_list_entry_size);
698 return -EINVAL;
699 }
700
701 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
702 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
703 sizeof(struct ena_eth_io_tx_desc);
704 else
705 llq_info->descs_per_entry = 1;
706
707 supported_feat = llq_features->desc_num_before_header_supported;
708 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
709 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
710 } else {
711 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
712 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
713 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
714 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
715 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
719 } else {
720 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
721 supported_feat);
722 return -EINVAL;
723 }
724
725 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
726 llq_default_cfg->llq_num_decs_before_header,
727 supported_feat, llq_info->descs_num_before_header);
728 }
729
730 rc = ena_com_set_llq(ena_dev);
731 if (rc)
732 pr_err("Cannot set LLQ configuration: %d\n", rc);
733
734 return 0;
735}
736
559static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 737static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
560 struct ena_com_admin_queue *admin_queue) 738 struct ena_com_admin_queue *admin_queue)
561{ 739{
562 unsigned long flags; 740 unsigned long flags = 0;
563 int ret; 741 int ret;
564 742
565 wait_for_completion_timeout(&comp_ctx->wait_event, 743 wait_for_completion_timeout(&comp_ctx->wait_event,
@@ -605,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
605 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 783 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
606 mmio_read->read_resp; 784 mmio_read->read_resp;
607 u32 mmio_read_reg, ret, i; 785 u32 mmio_read_reg, ret, i;
608 unsigned long flags; 786 unsigned long flags = 0;
609 u32 timeout = mmio_read->reg_read_to; 787 u32 timeout = mmio_read->reg_read_to;
610 788
611 might_sleep(); 789 might_sleep();
@@ -626,17 +804,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
626 mmio_read_reg |= mmio_read->seq_num & 804 mmio_read_reg |= mmio_read->seq_num &
627 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 805 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
628 806
629 /* make sure read_resp->req_id get updated before the hw can write 807 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
630 * there
631 */
632 wmb();
633
634 writel_relaxed(mmio_read_reg,
635 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
636 808
637 mmiowb();
638 for (i = 0; i < timeout; i++) { 809 for (i = 0; i < timeout; i++) {
639 if (read_resp->req_id == mmio_read->seq_num) 810 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
640 break; 811 break;
641 812
642 udelay(1); 813 udelay(1);
@@ -734,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
734 if (io_sq->desc_addr.virt_addr) { 905 if (io_sq->desc_addr.virt_addr) {
735 size = io_sq->desc_entry_size * io_sq->q_depth; 906 size = io_sq->desc_entry_size * io_sq->q_depth;
736 907
737 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 908 dma_free_coherent(ena_dev->dmadev, size,
738 dma_free_coherent(ena_dev->dmadev, size, 909 io_sq->desc_addr.virt_addr,
739 io_sq->desc_addr.virt_addr, 910 io_sq->desc_addr.phys_addr);
740 io_sq->desc_addr.phys_addr);
741 else
742 devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
743 911
744 io_sq->desc_addr.virt_addr = NULL; 912 io_sq->desc_addr.virt_addr = NULL;
745 } 913 }
914
915 if (io_sq->bounce_buf_ctrl.base_buffer) {
916 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
917 io_sq->bounce_buf_ctrl.base_buffer = NULL;
918 }
746} 919}
747 920
748static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 921static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1254,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1254void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1427void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1255{ 1428{
1256 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1429 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1257 unsigned long flags; 1430 unsigned long flags = 0;
1258 1431
1259 spin_lock_irqsave(&admin_queue->q_lock, flags); 1432 spin_lock_irqsave(&admin_queue->q_lock, flags);
1260 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { 1433 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
@@ -1298,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1298void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1471void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1299{ 1472{
1300 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1473 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1301 unsigned long flags; 1474 unsigned long flags = 0;
1302 1475
1303 spin_lock_irqsave(&admin_queue->q_lock, flags); 1476 spin_lock_irqsave(&admin_queue->q_lock, flags);
1304 ena_dev->admin_queue.running_state = state; 1477 ena_dev->admin_queue.running_state = state;
@@ -1332,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1332 } 1505 }
1333 1506
1334 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1507 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1335 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", 1508 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1336 get_resp.u.aenq.supported_groups, groups_flag); 1509 get_resp.u.aenq.supported_groups, groups_flag);
1337 return -EOPNOTSUPP; 1510 return -EOPNOTSUPP;
1338 } 1511 }
@@ -1406,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
1406 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1579 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1407 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1580 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1408 1581
1409 if (ver < MIN_ENA_VER) {
1410 pr_err("ENA version is lower than the minimal version the driver supports\n");
1411 return -1;
1412 }
1413
1414 pr_info("ena controller version: %d.%d.%d implementation version %d\n", 1582 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1415 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> 1583 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1416 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1584 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1485,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1485 sizeof(*mmio_read->read_resp), 1653 sizeof(*mmio_read->read_resp),
1486 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1487 if (unlikely(!mmio_read->read_resp)) 1655 if (unlikely(!mmio_read->read_resp))
1488 return -ENOMEM; 1656 goto err;
1489 1657
1490 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1658 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1491 1659
@@ -1494,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1494 mmio_read->readless_supported = true; 1662 mmio_read->readless_supported = true;
1495 1663
1496 return 0; 1664 return 0;
1665
1666err:
1667
1668 return -ENOMEM;
1497} 1669}
1498 1670
1499void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1671void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1529,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1529} 1701}
1530 1702
1531int ena_com_admin_init(struct ena_com_dev *ena_dev, 1703int ena_com_admin_init(struct ena_com_dev *ena_dev,
1532 struct ena_aenq_handlers *aenq_handlers, 1704 struct ena_aenq_handlers *aenq_handlers)
1533 bool init_spinlock)
1534{ 1705{
1535 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1706 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1536 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1707 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
@@ -1556,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
1556 1727
1557 atomic_set(&admin_queue->outstanding_cmds, 0); 1728 atomic_set(&admin_queue->outstanding_cmds, 0);
1558 1729
1559 if (init_spinlock) 1730 spin_lock_init(&admin_queue->q_lock);
1560 spin_lock_init(&admin_queue->q_lock);
1561 1731
1562 ret = ena_com_init_comp_ctxt(admin_queue); 1732 ret = ena_com_init_comp_ctxt(admin_queue);
1563 if (ret) 1733 if (ret)
@@ -1754,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1754 else 1924 else
1755 return rc; 1925 return rc;
1756 1926
1927 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
1928 if (!rc)
1929 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1930 sizeof(get_resp.u.llq));
1931 else if (rc == -EOPNOTSUPP)
1932 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1933 else
1934 return rc;
1935
1757 return 0; 1936 return 0;
1758} 1937}
1759 1938
@@ -1785,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1785 struct ena_admin_aenq_entry *aenq_e; 1964 struct ena_admin_aenq_entry *aenq_e;
1786 struct ena_admin_aenq_common_desc *aenq_common; 1965 struct ena_admin_aenq_common_desc *aenq_common;
1787 struct ena_com_aenq *aenq = &dev->aenq; 1966 struct ena_com_aenq *aenq = &dev->aenq;
1967 unsigned long long timestamp;
1788 ena_aenq_handler handler_cb; 1968 ena_aenq_handler handler_cb;
1789 u16 masked_head, processed = 0; 1969 u16 masked_head, processed = 0;
1790 u8 phase; 1970 u8 phase;
@@ -1795,12 +1975,18 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1795 aenq_common = &aenq_e->aenq_common_desc; 1975 aenq_common = &aenq_e->aenq_common_desc;
1796 1976
1797 /* Go over all the events */ 1977 /* Go over all the events */
1798 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1978 while ((READ_ONCE(aenq_common->flags) &
1799 phase) { 1979 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1980 /* Make sure the phase bit (ownership) is as expected before
1981 * reading the rest of the descriptor.
1982 */
1983 dma_rmb();
1984
1985 timestamp =
1986 (unsigned long long)aenq_common->timestamp_low |
1987 ((unsigned long long)aenq_common->timestamp_high << 32);
1800 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 1988 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1801 aenq_common->group, aenq_common->syndrom, 1989 aenq_common->group, aenq_common->syndrom, timestamp);
1802 (u64)aenq_common->timestamp_low +
1803 ((u64)aenq_common->timestamp_high << 32));
1804 1990
1805 /* Handle specific event*/ 1991 /* Handle specific event*/
1806 handler_cb = ena_com_get_specific_aenq_cb(dev, 1992 handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -2442,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2442 if (unlikely(!host_attr->host_info)) 2628 if (unlikely(!host_attr->host_info))
2443 return -ENOMEM; 2629 return -ENOMEM;
2444 2630
2631 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2632 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2633 (ENA_COMMON_SPEC_VERSION_MINOR));
2634
2445 return 0; 2635 return 0;
2446} 2636}
2447 2637
@@ -2713,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2713 intr_moder_tbl[level].pkts_per_interval; 2903 intr_moder_tbl[level].pkts_per_interval;
2714 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; 2904 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2715} 2905}
2906
2907int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2908 struct ena_admin_feature_llq_desc *llq_features,
2909 struct ena_llq_configurations *llq_default_cfg)
2910{
2911 int rc;
2912 int size;
2913
2914 if (!llq_features->max_llq_num) {
2915 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2916 return 0;
2917 }
2918
2919 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2920 if (rc)
2921 return rc;
2922
2923 /* Validate the descriptor is not too big */
2924 size = ena_dev->tx_max_header_size;
2925 size += ena_dev->llq_info.descs_num_before_header *
2926 sizeof(struct ena_eth_io_tx_desc);
2927
2928 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
2929 pr_err("the size of the LLQ entry is smaller than needed\n");
2930 return -EINVAL;
2931 }
2932
2933 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2934
2935 return 0;
2936}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 7b784f8a06a6..078d6f2b4f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -37,6 +37,8 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/gfp.h> 39#include <linux/gfp.h>
40#include <linux/io.h>
41#include <linux/prefetch.h>
40#include <linux/sched.h> 42#include <linux/sched.h>
41#include <linux/sizes.h> 43#include <linux/sizes.h>
42#include <linux/spinlock.h> 44#include <linux/spinlock.h>
@@ -108,6 +110,14 @@ enum ena_intr_moder_level {
108 ENA_INTR_MAX_NUM_OF_LEVELS, 110 ENA_INTR_MAX_NUM_OF_LEVELS,
109}; 111};
110 112
113struct ena_llq_configurations {
114 enum ena_admin_llq_header_location llq_header_location;
115 enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
116 enum ena_admin_llq_stride_ctrl llq_stride_ctrl;
117 enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
118 u16 llq_ring_entry_size_value;
119};
120
111struct ena_intr_moder_entry { 121struct ena_intr_moder_entry {
112 unsigned int intr_moder_interval; 122 unsigned int intr_moder_interval;
113 unsigned int pkts_per_interval; 123 unsigned int pkts_per_interval;
@@ -142,6 +152,15 @@ struct ena_com_tx_meta {
142 u16 l4_hdr_len; /* In words */ 152 u16 l4_hdr_len; /* In words */
143}; 153};
144 154
155struct ena_com_llq_info {
156 u16 header_location_ctrl;
157 u16 desc_stride_ctrl;
158 u16 desc_list_entry_size_ctrl;
159 u16 desc_list_entry_size;
160 u16 descs_num_before_header;
161 u16 descs_per_entry;
162};
163
145struct ena_com_io_cq { 164struct ena_com_io_cq {
146 struct ena_com_io_desc_addr cdesc_addr; 165 struct ena_com_io_desc_addr cdesc_addr;
147 166
@@ -179,6 +198,20 @@ struct ena_com_io_cq {
179 198
180} ____cacheline_aligned; 199} ____cacheline_aligned;
181 200
201struct ena_com_io_bounce_buffer_control {
202 u8 *base_buffer;
203 u16 next_to_use;
204 u16 buffer_size;
205 u16 buffers_num; /* Must be a power of 2 */
206};
207
208/* This struct is to keep tracking the current location of the next llq entry */
209struct ena_com_llq_pkt_ctrl {
210 u8 *curr_bounce_buf;
211 u16 idx;
212 u16 descs_left_in_line;
213};
214
182struct ena_com_io_sq { 215struct ena_com_io_sq {
183 struct ena_com_io_desc_addr desc_addr; 216 struct ena_com_io_desc_addr desc_addr;
184 217
@@ -190,6 +223,9 @@ struct ena_com_io_sq {
190 223
191 u32 msix_vector; 224 u32 msix_vector;
192 struct ena_com_tx_meta cached_tx_meta; 225 struct ena_com_tx_meta cached_tx_meta;
226 struct ena_com_llq_info llq_info;
227 struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
228 struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
193 229
194 u16 q_depth; 230 u16 q_depth;
195 u16 qid; 231 u16 qid;
@@ -197,6 +233,7 @@ struct ena_com_io_sq {
197 u16 idx; 233 u16 idx;
198 u16 tail; 234 u16 tail;
199 u16 next_to_comp; 235 u16 next_to_comp;
236 u16 llq_last_copy_tail;
200 u32 tx_max_header_size; 237 u32 tx_max_header_size;
201 u8 phase; 238 u8 phase;
202 u8 desc_entry_size; 239 u8 desc_entry_size;
@@ -334,6 +371,8 @@ struct ena_com_dev {
334 u16 intr_delay_resolution; 371 u16 intr_delay_resolution;
335 u32 intr_moder_tx_interval; 372 u32 intr_moder_tx_interval;
336 struct ena_intr_moder_entry *intr_moder_tbl; 373 struct ena_intr_moder_entry *intr_moder_tbl;
374
375 struct ena_com_llq_info llq_info;
337}; 376};
338 377
339struct ena_com_dev_get_features_ctx { 378struct ena_com_dev_get_features_ctx {
@@ -342,6 +381,7 @@ struct ena_com_dev_get_features_ctx {
342 struct ena_admin_feature_aenq_desc aenq; 381 struct ena_admin_feature_aenq_desc aenq;
343 struct ena_admin_feature_offload_desc offload; 382 struct ena_admin_feature_offload_desc offload;
344 struct ena_admin_ena_hw_hints hw_hints; 383 struct ena_admin_ena_hw_hints hw_hints;
384 struct ena_admin_feature_llq_desc llq;
345}; 385};
346 386
347struct ena_com_create_io_ctx { 387struct ena_com_create_io_ctx {
@@ -397,8 +437,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
397/* ena_com_admin_init - Init the admin and the async queues 437/* ena_com_admin_init - Init the admin and the async queues
398 * @ena_dev: ENA communication layer struct 438 * @ena_dev: ENA communication layer struct
399 * @aenq_handlers: Those handlers to be called upon event. 439 * @aenq_handlers: Those handlers to be called upon event.
400 * @init_spinlock: Indicate if this method should init the admin spinlock or
401 * the spinlock was init before (for example, in a case of FLR).
402 * 440 *
403 * Initialize the admin submission and completion queues. 441 * Initialize the admin submission and completion queues.
404 * Initialize the asynchronous events notification queues. 442 * Initialize the asynchronous events notification queues.
@@ -406,8 +444,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
406 * @return - 0 on success, negative value on failure. 444 * @return - 0 on success, negative value on failure.
407 */ 445 */
408int ena_com_admin_init(struct ena_com_dev *ena_dev, 446int ena_com_admin_init(struct ena_com_dev *ena_dev,
409 struct ena_aenq_handlers *aenq_handlers, 447 struct ena_aenq_handlers *aenq_handlers);
410 bool init_spinlock);
411 448
412/* ena_com_admin_destroy - Destroy the admin and the async events queues. 449/* ena_com_admin_destroy - Destroy the admin and the async events queues.
413 * @ena_dev: ENA communication layer struct 450 * @ena_dev: ENA communication layer struct
@@ -935,6 +972,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
935 enum ena_intr_moder_level level, 972 enum ena_intr_moder_level level,
936 struct ena_intr_moder_entry *entry); 973 struct ena_intr_moder_entry *entry);
937 974
975/* ena_com_config_dev_mode - Configure the placement policy of the device.
976 * @ena_dev: ENA communication layer struct
977 * @llq_features: LLQ feature descriptor, retrieve via
978 * ena_com_get_dev_attr_feat.
979 * @ena_llq_config: The default driver LLQ parameters configurations
980 */
981int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
982 struct ena_admin_feature_llq_desc *llq_features,
983 struct ena_llq_configurations *llq_default_config);
984
938static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) 985static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
939{ 986{
940 return ena_dev->adaptive_coalescing; 987 return ena_dev->adaptive_coalescing;
@@ -1044,4 +1091,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
1044 intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; 1091 intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
1045} 1092}
1046 1093
1094static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
1095{
1096 u16 size, buffers_num;
1097 u8 *buf;
1098
1099 size = bounce_buf_ctrl->buffer_size;
1100 buffers_num = bounce_buf_ctrl->buffers_num;
1101
1102 buf = bounce_buf_ctrl->base_buffer +
1103 (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
1104
1105 prefetchw(bounce_buf_ctrl->base_buffer +
1106 (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
1107
1108 return buf;
1109}
1110
1047#endif /* !(ENA_COM) */ 1111#endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index bb8d73676eab..23beb7e7ed7b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -32,8 +32,8 @@
32#ifndef _ENA_COMMON_H_ 32#ifndef _ENA_COMMON_H_
33#define _ENA_COMMON_H_ 33#define _ENA_COMMON_H_
34 34
35#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ 35#define ENA_COMMON_SPEC_VERSION_MAJOR 2
36#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ 36#define ENA_COMMON_SPEC_VERSION_MINOR 0
37 37
38/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ 38/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
39struct ena_common_mem_addr { 39struct ena_common_mem_addr {
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ea149c134e15..f6c2d3855be8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -51,19 +51,15 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
51 if (desc_phase != expected_phase) 51 if (desc_phase != expected_phase)
52 return NULL; 52 return NULL;
53 53
54 return cdesc; 54 /* Make sure we read the rest of the descriptor after the phase bit
55} 55 * has been read
56 56 */
57static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) 57 dma_rmb();
58{
59 io_cq->head++;
60 58
61 /* Switch phase bit in case of wrap around */ 59 return cdesc;
62 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
63 io_cq->phase ^= 1;
64} 60}
65 61
66static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) 62static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
67{ 63{
68 u16 tail_masked; 64 u16 tail_masked;
69 u32 offset; 65 u32 offset;
@@ -75,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
75 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); 71 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
76} 72}
77 73
78static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) 74static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
75 u8 *bounce_buffer)
79{ 76{
80 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); 77 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
81 u32 offset = tail_masked * io_sq->desc_entry_size;
82 78
83 /* In case this queue isn't a LLQ */ 79 u16 dst_tail_mask;
84 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 80 u32 dst_offset;
85 return;
86 81
87 memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, 82 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
88 io_sq->desc_addr.virt_addr + offset, 83 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
89 io_sq->desc_entry_size); 84
90} 85 /* Make sure everything was written into the bounce buffer before
86 * writing the bounce buffer to the device
87 */
88 wmb();
89
90 /* The line is completed. Copy it to dev */
91 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
92 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
91 93
92static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
93{
94 io_sq->tail++; 94 io_sq->tail++;
95 95
96 /* Switch phase bit in case of wrap around */ 96 /* Switch phase bit in case of wrap around */
97 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) 97 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
98 io_sq->phase ^= 1; 98 io_sq->phase ^= 1;
99
100 return 0;
99} 101}
100 102
101static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, 103static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
102 u8 *head_src, u16 header_len) 104 u8 *header_src,
105 u16 header_len)
103{ 106{
104 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); 107 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
105 u8 __iomem *dev_head_addr = 108 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
106 io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); 109 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
110 u16 header_offset;
107 111
108 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 112 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
109 return 0; 113 return 0;
110 114
111 if (unlikely(!io_sq->header_addr)) { 115 header_offset =
112 pr_err("Push buffer header ptr is NULL\n"); 116 llq_info->descs_num_before_header * io_sq->desc_entry_size;
113 return -EINVAL; 117
118 if (unlikely((header_offset + header_len) >
119 llq_info->desc_list_entry_size)) {
120 pr_err("trying to write header larger than llq entry can accommodate\n");
121 return -EFAULT;
122 }
123
124 if (unlikely(!bounce_buffer)) {
125 pr_err("bounce buffer is NULL\n");
126 return -EFAULT;
127 }
128
129 memcpy(bounce_buffer + header_offset, header_src, header_len);
130
131 return 0;
132}
133
134static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
135{
136 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
137 u8 *bounce_buffer;
138 void *sq_desc;
139
140 bounce_buffer = pkt_ctrl->curr_bounce_buf;
141
142 if (unlikely(!bounce_buffer)) {
143 pr_err("bounce buffer is NULL\n");
144 return NULL;
114 } 145 }
115 146
116 memcpy_toio(dev_head_addr, head_src, header_len); 147 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
148 pkt_ctrl->idx++;
149 pkt_ctrl->descs_left_in_line--;
150
151 return sq_desc;
152}
153
154static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
155{
156 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
157 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
158 int rc;
159
160 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
161 return 0;
162
163 /* bounce buffer was used, so write it and get a new one */
164 if (pkt_ctrl->idx) {
165 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
166 pkt_ctrl->curr_bounce_buf);
167 if (unlikely(rc))
168 return rc;
169
170 pkt_ctrl->curr_bounce_buf =
171 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
172 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
173 0x0, llq_info->desc_list_entry_size);
174 }
175
176 pkt_ctrl->idx = 0;
177 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
178 return 0;
179}
180
181static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
182{
183 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
184 return get_sq_desc_llq(io_sq);
185
186 return get_sq_desc_regular_queue(io_sq);
187}
188
189static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
190{
191 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
192 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
193 int rc;
194
195 if (!pkt_ctrl->descs_left_in_line) {
196 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
197 pkt_ctrl->curr_bounce_buf);
198 if (unlikely(rc))
199 return rc;
200
201 pkt_ctrl->curr_bounce_buf =
202 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
203 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
204 0x0, llq_info->desc_list_entry_size);
205
206 pkt_ctrl->idx = 0;
207 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
208 pkt_ctrl->descs_left_in_line = 1;
209 else
210 pkt_ctrl->descs_left_in_line =
211 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
212 }
213
214 return 0;
215}
216
217static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
218{
219 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
220 return ena_com_sq_update_llq_tail(io_sq);
221
222 io_sq->tail++;
223
224 /* Switch phase bit in case of wrap around */
225 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
226 io_sq->phase ^= 1;
117 227
118 return 0; 228 return 0;
119} 229}
@@ -181,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
181 return false; 291 return false;
182} 292}
183 293
184static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, 294static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
185 struct ena_com_tx_ctx *ena_tx_ctx) 295 struct ena_com_tx_ctx *ena_tx_ctx)
186{ 296{
187 struct ena_eth_io_tx_meta_desc *meta_desc = NULL; 297 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
188 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 298 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -227,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
227 memcpy(&io_sq->cached_tx_meta, ena_meta, 337 memcpy(&io_sq->cached_tx_meta, ena_meta,
228 sizeof(struct ena_com_tx_meta)); 338 sizeof(struct ena_com_tx_meta));
229 339
230 ena_com_copy_curr_sq_desc_to_dev(io_sq); 340 return ena_com_sq_update_tail(io_sq);
231 ena_com_sq_update_tail(io_sq);
232} 341}
233 342
234static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, 343static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -240,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
240 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> 349 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
241 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; 350 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
242 ena_rx_ctx->l3_csum_err = 351 ena_rx_ctx->l3_csum_err =
243 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> 352 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
244 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; 353 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
245 ena_rx_ctx->l4_csum_err = 354 ena_rx_ctx->l4_csum_err =
246 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> 355 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
247 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; 356 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
357 ena_rx_ctx->l4_csum_checked =
358 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
359 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
248 ena_rx_ctx->hash = cdesc->hash; 360 ena_rx_ctx->hash = cdesc->hash;
249 ena_rx_ctx->frag = 361 ena_rx_ctx->frag =
250 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> 362 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
@@ -266,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
266{ 378{
267 struct ena_eth_io_tx_desc *desc = NULL; 379 struct ena_eth_io_tx_desc *desc = NULL;
268 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; 380 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
269 void *push_header = ena_tx_ctx->push_header; 381 void *buffer_to_push = ena_tx_ctx->push_header;
270 u16 header_len = ena_tx_ctx->header_len; 382 u16 header_len = ena_tx_ctx->header_len;
271 u16 num_bufs = ena_tx_ctx->num_bufs; 383 u16 num_bufs = ena_tx_ctx->num_bufs;
272 int total_desc, i, rc; 384 u16 start_tail = io_sq->tail;
385 int i, rc;
273 bool have_meta; 386 bool have_meta;
274 u64 addr_hi; 387 u64 addr_hi;
275 388
276 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); 389 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
277 390
278 /* num_bufs +1 for potential meta desc */ 391 /* num_bufs +1 for potential meta desc */
279 if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { 392 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
280 pr_err("Not enough space in the tx queue\n"); 393 pr_debug("Not enough space in the tx queue\n");
281 return -ENOMEM; 394 return -ENOMEM;
282 } 395 }
283 396
@@ -287,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
287 return -EINVAL; 400 return -EINVAL;
288 } 401 }
289 402
290 /* start with pushing the header (if needed) */ 403 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
291 rc = ena_com_write_header(io_sq, push_header, header_len); 404 !buffer_to_push))
405 return -EINVAL;
406
407 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
292 if (unlikely(rc)) 408 if (unlikely(rc))
293 return rc; 409 return rc;
294 410
295 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, 411 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
296 ena_tx_ctx); 412 ena_tx_ctx);
297 if (have_meta) 413 if (have_meta) {
298 ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); 414 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
415 if (unlikely(rc))
416 return rc;
417 }
299 418
300 /* If the caller doesn't want send packets */ 419 /* If the caller doesn't want to send packets */
301 if (unlikely(!num_bufs && !header_len)) { 420 if (unlikely(!num_bufs && !header_len)) {
302 *nb_hw_desc = have_meta ? 0 : 1; 421 rc = ena_com_close_bounce_buffer(io_sq);
303 return 0; 422 *nb_hw_desc = io_sq->tail - start_tail;
423 return rc;
304 } 424 }
305 425
306 desc = get_sq_desc(io_sq); 426 desc = get_sq_desc(io_sq);
427 if (unlikely(!desc))
428 return -EFAULT;
307 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); 429 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
308 430
309 /* Set first desc when we don't have meta descriptor */ 431 /* Set first desc when we don't have meta descriptor */
@@ -355,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
355 for (i = 0; i < num_bufs; i++) { 477 for (i = 0; i < num_bufs; i++) {
356 /* The first desc share the same desc as the header */ 478 /* The first desc share the same desc as the header */
357 if (likely(i != 0)) { 479 if (likely(i != 0)) {
358 ena_com_copy_curr_sq_desc_to_dev(io_sq); 480 rc = ena_com_sq_update_tail(io_sq);
359 ena_com_sq_update_tail(io_sq); 481 if (unlikely(rc))
482 return rc;
360 483
361 desc = get_sq_desc(io_sq); 484 desc = get_sq_desc(io_sq);
485 if (unlikely(!desc))
486 return -EFAULT;
487
362 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); 488 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
363 489
364 desc->len_ctrl |= (io_sq->phase << 490 desc->len_ctrl |= (io_sq->phase <<
@@ -381,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
381 /* set the last desc indicator */ 507 /* set the last desc indicator */
382 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; 508 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
383 509
384 ena_com_copy_curr_sq_desc_to_dev(io_sq); 510 rc = ena_com_sq_update_tail(io_sq);
385 511 if (unlikely(rc))
386 ena_com_sq_update_tail(io_sq); 512 return rc;
387 513
388 total_desc = max_t(u16, num_bufs, 1); 514 rc = ena_com_close_bounce_buffer(io_sq);
389 total_desc += have_meta ? 1 : 0;
390 515
391 *nb_hw_desc = total_desc; 516 *nb_hw_desc = io_sq->tail - start_tail;
392 return 0; 517 return rc;
393} 518}
394 519
395int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 520int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
@@ -448,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
448 573
449 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); 574 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
450 575
451 if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) 576 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
452 return -ENOSPC; 577 return -ENOSPC;
453 578
454 desc = get_sq_desc(io_sq); 579 desc = get_sq_desc(io_sq);
580 if (unlikely(!desc))
581 return -EFAULT;
582
455 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); 583 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
456 584
457 desc->length = ena_buf->len; 585 desc->length = ena_buf->len;
458 586
459 desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; 587 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
460 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; 588 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
461 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; 589 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
462 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; 590 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
@@ -467,42 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
467 desc->buff_addr_hi = 595 desc->buff_addr_hi =
468 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); 596 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
469 597
470 ena_com_sq_update_tail(io_sq); 598 return ena_com_sq_update_tail(io_sq);
471
472 return 0;
473}
474
475int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
476{
477 u8 expected_phase, cdesc_phase;
478 struct ena_eth_io_tx_cdesc *cdesc;
479 u16 masked_head;
480
481 masked_head = io_cq->head & (io_cq->q_depth - 1);
482 expected_phase = io_cq->phase;
483
484 cdesc = (struct ena_eth_io_tx_cdesc *)
485 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
486 (masked_head * io_cq->cdesc_entry_size_in_bytes));
487
488 /* When the current completion descriptor phase isn't the same as the
489 * expected, it mean that the device still didn't update
490 * this completion.
491 */
492 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
493 if (cdesc_phase != expected_phase)
494 return -EAGAIN;
495
496 if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
497 pr_err("Invalid req id %d\n", cdesc->req_id);
498 return -EINVAL;
499 }
500
501 ena_com_cq_inc_head(io_cq);
502
503 *req_id = READ_ONCE(cdesc->req_id);
504
505 return 0;
506} 599}
507 600
508bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) 601bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 6fdc753d9483..340d02b64ca6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -67,6 +67,7 @@ struct ena_com_rx_ctx {
67 enum ena_eth_io_l4_proto_index l4_proto; 67 enum ena_eth_io_l4_proto_index l4_proto;
68 bool l3_csum_err; 68 bool l3_csum_err;
69 bool l4_csum_err; 69 bool l4_csum_err;
70 u8 l4_csum_checked;
70 /* fragmented packet */ 71 /* fragmented packet */
71 bool frag; 72 bool frag;
72 u32 hash; 73 u32 hash;
@@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
86 struct ena_com_buf *ena_buf, 87 struct ena_com_buf *ena_buf,
87 u16 req_id); 88 u16 req_id);
88 89
89int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
90
91bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); 90bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
92 91
93static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, 92static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
96 writel(intr_reg->intr_control, io_cq->unmask_reg); 95 writel(intr_reg->intr_control, io_cq->unmask_reg);
97} 96}
98 97
99static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) 98static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
100{ 99{
101 u16 tail, next_to_comp, cnt; 100 u16 tail, next_to_comp, cnt;
102 101
@@ -107,20 +106,33 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
107 return io_sq->q_depth - 1 - cnt; 106 return io_sq->q_depth - 1 - cnt;
108} 107}
109 108
110static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, 109/* Check if the submission queue has enough space to hold required_buffers */
111 bool relaxed) 110static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
111 u16 required_buffers)
112{ 112{
113 u16 tail; 113 int temp;
114 114
115 tail = io_sq->tail; 115 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
116 return ena_com_free_desc(io_sq) >= required_buffers;
117
118 /* This calculation doesn't need to be 100% accurate. So to reduce
119 * the calculation overhead just Subtract 2 lines from the free descs
120 * (one for the header line and one to compensate the devision
121 * down calculation.
122 */
123 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
124
125 return ena_com_free_desc(io_sq) > temp;
126}
127
128static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
129{
130 u16 tail = io_sq->tail;
116 131
117 pr_debug("write submission queue doorbell for queue: %d tail: %d\n", 132 pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
118 io_sq->qid, tail); 133 io_sq->qid, tail);
119 134
120 if (relaxed) 135 writel(tail, io_sq->db_addr);
121 writel_relaxed(tail, io_sq->db_addr);
122 else
123 writel(tail, io_sq->db_addr);
124 136
125 return 0; 137 return 0;
126} 138}
@@ -163,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
163 io_sq->next_to_comp += elem; 175 io_sq->next_to_comp += elem;
164} 176}
165 177
178static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
179{
180 io_cq->head++;
181
182 /* Switch phase bit in case of wrap around */
183 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
184 io_cq->phase ^= 1;
185}
186
187static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
188 u16 *req_id)
189{
190 u8 expected_phase, cdesc_phase;
191 struct ena_eth_io_tx_cdesc *cdesc;
192 u16 masked_head;
193
194 masked_head = io_cq->head & (io_cq->q_depth - 1);
195 expected_phase = io_cq->phase;
196
197 cdesc = (struct ena_eth_io_tx_cdesc *)
198 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
199 (masked_head * io_cq->cdesc_entry_size_in_bytes));
200
201 /* When the current completion descriptor phase isn't the same as the
202 * expected, it mean that the device still didn't update
203 * this completion.
204 */
205 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
206 if (cdesc_phase != expected_phase)
207 return -EAGAIN;
208
209 dma_rmb();
210
211 *req_id = READ_ONCE(cdesc->req_id);
212 if (unlikely(*req_id >= io_cq->q_depth)) {
213 pr_err("Invalid req id %d\n", cdesc->req_id);
214 return -EINVAL;
215 }
216
217 ena_com_cq_inc_head(io_cq);
218
219 return 0;
220}
221
166#endif /* ENA_ETH_COM_H_ */ 222#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index f320c58793a5..00e0f056a741 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -33,25 +33,18 @@
33#define _ENA_ETH_IO_H_ 33#define _ENA_ETH_IO_H_
34 34
35enum ena_eth_io_l3_proto_index { 35enum ena_eth_io_l3_proto_index {
36 ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, 36 ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
37 37 ENA_ETH_IO_L3_PROTO_IPV4 = 8,
38 ENA_ETH_IO_L3_PROTO_IPV4 = 8, 38 ENA_ETH_IO_L3_PROTO_IPV6 = 11,
39 39 ENA_ETH_IO_L3_PROTO_FCOE = 21,
40 ENA_ETH_IO_L3_PROTO_IPV6 = 11, 40 ENA_ETH_IO_L3_PROTO_ROCE = 22,
41
42 ENA_ETH_IO_L3_PROTO_FCOE = 21,
43
44 ENA_ETH_IO_L3_PROTO_ROCE = 22,
45}; 41};
46 42
47enum ena_eth_io_l4_proto_index { 43enum ena_eth_io_l4_proto_index {
48 ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, 44 ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
49 45 ENA_ETH_IO_L4_PROTO_TCP = 12,
50 ENA_ETH_IO_L4_PROTO_TCP = 12, 46 ENA_ETH_IO_L4_PROTO_UDP = 13,
51 47 ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
52 ENA_ETH_IO_L4_PROTO_UDP = 13,
53
54 ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
55}; 48};
56 49
57struct ena_eth_io_tx_desc { 50struct ena_eth_io_tx_desc {
@@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base {
242 * checksum error detected, or, the controller didn't 235 * checksum error detected, or, the controller didn't
243 * validate the checksum. This bit is valid only when 236 * validate the checksum. This bit is valid only when
244 * l4_proto_idx indicates TCP/UDP packet, and, 237 * l4_proto_idx indicates TCP/UDP packet, and,
245 * ipv4_frag is not set 238 * ipv4_frag is not set. This bit is valid only when
239 * l4_csum_checked below is set.
246 * 15 : ipv4_frag - Indicates IPv4 fragmented packet 240 * 15 : ipv4_frag - Indicates IPv4 fragmented packet
247 * 23:16 : reserved16 241 * 16 : l4_csum_checked - L4 checksum was verified
242 * (could be OK or error), when cleared the status of
243 * checksum is unknown
244 * 23:17 : reserved17 - MBZ
248 * 24 : phase 245 * 24 : phase
249 * 25 : l3_csum2 - second checksum engine result 246 * 25 : l3_csum2 - second checksum engine result
250 * 26 : first - Indicates first descriptor in 247 * 26 : first - Indicates first descriptor in
@@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg {
303}; 300};
304 301
305/* tx_desc */ 302/* tx_desc */
306#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) 303#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
307#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 304#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
308#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) 305#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
309#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 306#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
310#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) 307#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
311#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 308#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
312#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) 309#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
313#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 310#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
314#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) 311#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
315#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 312#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
316#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) 313#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
317#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 314#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
318#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) 315#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
319#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) 316#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
320#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 317#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
321#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) 318#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
322#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 319#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
323#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) 320#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
324#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 321#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
325#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) 322#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
326#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 323#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
327#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) 324#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
328#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 325#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
329#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) 326#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
330#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 327#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
331#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) 328#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
332#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 329#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
333#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) 330#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
334#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 331#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
335#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) 332#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
336#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) 333#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
337#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 334#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
338#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) 335#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
339 336
340/* tx_meta_desc */ 337/* tx_meta_desc */
341#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) 338#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
342#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 339#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
343#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) 340#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
344#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 341#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
345#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) 342#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
346#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 343#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
347#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) 344#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
348#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 345#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
349#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) 346#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
350#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 347#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
351#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) 348#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
352#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 349#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
353#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) 350#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
354#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 351#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
355#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) 352#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
356#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 353#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
357#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) 354#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
358#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 355#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
359#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) 356#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
360#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) 357#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
361#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) 358#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
362#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 359#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
363#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) 360#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
364#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 361#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
365#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) 362#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
366#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 363#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
367#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) 364#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
368 365
369/* tx_cdesc */ 366/* tx_cdesc */
370#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) 367#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
371 368
372/* rx_desc */ 369/* rx_desc */
373#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) 370#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
374#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 371#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
375#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) 372#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
376#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 373#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
377#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) 374#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
378#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 375#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
379#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) 376#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
380 377
381/* rx_cdesc_base */ 378/* rx_cdesc_base */
382#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) 379#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
383#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 380#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
384#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) 381#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
385#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 382#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
386#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) 383#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
387#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 384#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
388#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) 385#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
389#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 386#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
390#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) 387#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
391#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 388#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
392#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) 389#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
393#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 390#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16
394#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) 391#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16)
395#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 392#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
396#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) 393#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
397#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 394#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
398#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) 395#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
399#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 396#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
400#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) 397#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
401#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 398#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
402#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) 399#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
400#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
401#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
403 402
404/* intr_reg */ 403/* intr_reg */
405#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) 404#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
406#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 405#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
407#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) 406#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
408#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 407#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
409#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) 408#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
410 409
411/* numa_node_cfg_reg */ 410/* numa_node_cfg_reg */
412#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) 411#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
413#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 412#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
414#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) 413#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
415 414
416#endif /*_ENA_ETH_IO_H_ */ 415#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 060cb18fa659..f3a5a384e6e8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
81 ENA_STAT_TX_ENTRY(doorbells), 81 ENA_STAT_TX_ENTRY(doorbells),
82 ENA_STAT_TX_ENTRY(prepare_ctx_err), 82 ENA_STAT_TX_ENTRY(prepare_ctx_err),
83 ENA_STAT_TX_ENTRY(bad_req_id), 83 ENA_STAT_TX_ENTRY(bad_req_id),
84 ENA_STAT_TX_ENTRY(llq_buffer_copy),
84 ENA_STAT_TX_ENTRY(missed_tx), 85 ENA_STAT_TX_ENTRY(missed_tx),
85}; 86};
86 87
@@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
96 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 97 ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
97 ENA_STAT_RX_ENTRY(bad_req_id), 98 ENA_STAT_RX_ENTRY(bad_req_id),
98 ENA_STAT_RX_ENTRY(empty_rx_ring), 99 ENA_STAT_RX_ENTRY(empty_rx_ring),
100 ENA_STAT_RX_ENTRY(csum_unchecked),
99}; 101};
100 102
101static const struct ena_stats ena_stats_ena_com_strings[] = { 103static const struct ena_stats ena_stats_ena_com_strings[] = {
@@ -838,8 +840,8 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
838 return; 840 return;
839 } 841 }
840 842
841 strings_buf = devm_kzalloc(&adapter->pdev->dev, 843 strings_buf = devm_kcalloc(&adapter->pdev->dev,
842 strings_num * ETH_GSTRING_LEN, 844 ETH_GSTRING_LEN, strings_num,
843 GFP_ATOMIC); 845 GFP_ATOMIC);
844 if (!strings_buf) { 846 if (!strings_buf) {
845 netif_err(adapter, drv, netdev, 847 netif_err(adapter, drv, netdev,
@@ -847,8 +849,8 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
847 return; 849 return;
848 } 850 }
849 851
850 data_buf = devm_kzalloc(&adapter->pdev->dev, 852 data_buf = devm_kcalloc(&adapter->pdev->dev,
851 strings_num * sizeof(u64), 853 strings_num, sizeof(u64),
852 GFP_ATOMIC); 854 GFP_ATOMIC);
853 if (!data_buf) { 855 if (!data_buf) {
854 netif_err(adapter, drv, netdev, 856 netif_err(adapter, drv, netdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a822e70c2af3..a70bb1bb90e7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -39,7 +39,6 @@
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/kernel.h> 40#include <linux/kernel.h>
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/moduleparam.h>
43#include <linux/numa.h> 42#include <linux/numa.h>
44#include <linux/pci.h> 43#include <linux/pci.h>
45#include <linux/utsname.h> 44#include <linux/utsname.h>
@@ -76,7 +75,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 75
77static int ena_rss_init_default(struct ena_adapter *adapter); 76static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter); 77static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter); 78static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
80static int ena_restore_device(struct ena_adapter *adapter); 79static int ena_restore_device(struct ena_adapter *adapter);
81 80
82static void ena_tx_timeout(struct net_device *dev) 81static void ena_tx_timeout(struct net_device *dev)
@@ -238,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
238 } 237 }
239 } 238 }
240 239
240 size = tx_ring->tx_max_header_size;
241 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
242 if (!tx_ring->push_buf_intermediate_buf) {
243 tx_ring->push_buf_intermediate_buf = vzalloc(size);
244 if (!tx_ring->push_buf_intermediate_buf) {
245 vfree(tx_ring->tx_buffer_info);
246 vfree(tx_ring->free_tx_ids);
247 return -ENOMEM;
248 }
249 }
250
241 /* Req id ring for TX out of order completions */ 251 /* Req id ring for TX out of order completions */
242 for (i = 0; i < tx_ring->ring_size; i++) 252 for (i = 0; i < tx_ring->ring_size; i++)
243 tx_ring->free_tx_ids[i] = i; 253 tx_ring->free_tx_ids[i] = i;
@@ -266,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
266 276
267 vfree(tx_ring->free_tx_ids); 277 vfree(tx_ring->free_tx_ids);
268 tx_ring->free_tx_ids = NULL; 278 tx_ring->free_tx_ids = NULL;
279
280 vfree(tx_ring->push_buf_intermediate_buf);
281 tx_ring->push_buf_intermediate_buf = NULL;
269} 282}
270 283
271/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues 284/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
@@ -461,7 +474,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
461 return -ENOMEM; 474 return -ENOMEM;
462 } 475 }
463 476
464 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, 477 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
465 DMA_FROM_DEVICE); 478 DMA_FROM_DEVICE);
466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 479 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
467 u64_stats_update_begin(&rx_ring->syncp); 480 u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +491,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
478 rx_info->page_offset = 0; 491 rx_info->page_offset = 0;
479 ena_buf = &rx_info->ena_buf; 492 ena_buf = &rx_info->ena_buf;
480 ena_buf->paddr = dma; 493 ena_buf->paddr = dma;
481 ena_buf->len = PAGE_SIZE; 494 ena_buf->len = ENA_PAGE_SIZE;
482 495
483 return 0; 496 return 0;
484} 497}
@@ -495,7 +508,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
495 return; 508 return;
496 } 509 }
497 510
498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, 511 dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
499 DMA_FROM_DEVICE); 512 DMA_FROM_DEVICE);
500 513
501 __free_page(page); 514 __free_page(page);
@@ -551,14 +564,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
551 rx_ring->qid, i, num); 564 rx_ring->qid, i, num);
552 } 565 }
553 566
554 if (likely(i)) { 567 /* ena_com_write_sq_doorbell issues a wmb() */
555 /* Add memory barrier to make sure the desc were written before 568 if (likely(i))
556 * issue a doorbell 569 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
557 */
558 wmb();
559 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
560 mmiowb();
561 }
562 570
563 rx_ring->next_to_use = next_to_use; 571 rx_ring->next_to_use = next_to_use;
564 572
@@ -608,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
608 ena_free_rx_bufs(adapter, i); 616 ena_free_rx_bufs(adapter, i);
609} 617}
610 618
619static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
620 struct ena_tx_buffer *tx_info)
621{
622 struct ena_com_buf *ena_buf;
623 u32 cnt;
624 int i;
625
626 ena_buf = tx_info->bufs;
627 cnt = tx_info->num_of_bufs;
628
629 if (unlikely(!cnt))
630 return;
631
632 if (tx_info->map_linear_data) {
633 dma_unmap_single(tx_ring->dev,
634 dma_unmap_addr(ena_buf, paddr),
635 dma_unmap_len(ena_buf, len),
636 DMA_TO_DEVICE);
637 ena_buf++;
638 cnt--;
639 }
640
641 /* unmap remaining mapped pages */
642 for (i = 0; i < cnt; i++) {
643 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
644 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
645 ena_buf++;
646 }
647}
648
611/* ena_free_tx_bufs - Free Tx Buffers per Queue 649/* ena_free_tx_bufs - Free Tx Buffers per Queue
612 * @tx_ring: TX ring for which buffers be freed 650 * @tx_ring: TX ring for which buffers be freed
613 */ 651 */
@@ -618,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
618 656
619 for (i = 0; i < tx_ring->ring_size; i++) { 657 for (i = 0; i < tx_ring->ring_size; i++) {
620 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; 658 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
621 struct ena_com_buf *ena_buf;
622 int nr_frags;
623 int j;
624 659
625 if (!tx_info->skb) 660 if (!tx_info->skb)
626 continue; 661 continue;
@@ -636,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
636 tx_ring->qid, i); 671 tx_ring->qid, i);
637 } 672 }
638 673
639 ena_buf = tx_info->bufs; 674 ena_unmap_tx_skb(tx_ring, tx_info);
640 dma_unmap_single(tx_ring->dev,
641 ena_buf->paddr,
642 ena_buf->len,
643 DMA_TO_DEVICE);
644
645 /* unmap remaining mapped pages */
646 nr_frags = tx_info->num_of_bufs - 1;
647 for (j = 0; j < nr_frags; j++) {
648 ena_buf++;
649 dma_unmap_page(tx_ring->dev,
650 ena_buf->paddr,
651 ena_buf->len,
652 DMA_TO_DEVICE);
653 }
654 675
655 dev_kfree_skb_any(tx_info->skb); 676 dev_kfree_skb_any(tx_info->skb);
656 } 677 }
@@ -741,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
741 while (tx_pkts < budget) { 762 while (tx_pkts < budget) {
742 struct ena_tx_buffer *tx_info; 763 struct ena_tx_buffer *tx_info;
743 struct sk_buff *skb; 764 struct sk_buff *skb;
744 struct ena_com_buf *ena_buf;
745 int i, nr_frags;
746 765
747 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, 766 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
748 &req_id); 767 &req_id);
@@ -762,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
762 tx_info->skb = NULL; 781 tx_info->skb = NULL;
763 tx_info->last_jiffies = 0; 782 tx_info->last_jiffies = 0;
764 783
765 if (likely(tx_info->num_of_bufs != 0)) { 784 ena_unmap_tx_skb(tx_ring, tx_info);
766 ena_buf = tx_info->bufs;
767
768 dma_unmap_single(tx_ring->dev,
769 dma_unmap_addr(ena_buf, paddr),
770 dma_unmap_len(ena_buf, len),
771 DMA_TO_DEVICE);
772
773 /* unmap remaining mapped pages */
774 nr_frags = tx_info->num_of_bufs - 1;
775 for (i = 0; i < nr_frags; i++) {
776 ena_buf++;
777 dma_unmap_page(tx_ring->dev,
778 dma_unmap_addr(ena_buf, paddr),
779 dma_unmap_len(ena_buf, len),
780 DMA_TO_DEVICE);
781 }
782 }
783 785
784 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, 786 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
785 "tx_poll: q %d skb %p completed\n", tx_ring->qid, 787 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -810,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
810 */ 812 */
811 smp_mb(); 813 smp_mb();
812 814
813 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > 815 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
814 ENA_TX_WAKEUP_THRESH; 816 ENA_TX_WAKEUP_THRESH);
815 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { 817 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
816 __netif_tx_lock(txq, smp_processor_id()); 818 __netif_tx_lock(txq, smp_processor_id());
817 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > 819 above_thresh =
818 ENA_TX_WAKEUP_THRESH; 820 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
821 ENA_TX_WAKEUP_THRESH);
819 if (netif_tx_queue_stopped(txq) && above_thresh) { 822 if (netif_tx_queue_stopped(txq) && above_thresh) {
820 netif_tx_wake_queue(txq); 823 netif_tx_wake_queue(txq);
821 u64_stats_update_begin(&tx_ring->syncp); 824 u64_stats_update_begin(&tx_ring->syncp);
@@ -916,10 +919,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
916 do { 919 do {
917 dma_unmap_page(rx_ring->dev, 920 dma_unmap_page(rx_ring->dev,
918 dma_unmap_addr(&rx_info->ena_buf, paddr), 921 dma_unmap_addr(&rx_info->ena_buf, paddr),
919 PAGE_SIZE, DMA_FROM_DEVICE); 922 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
920 923
921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 924 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
922 rx_info->page_offset, len, PAGE_SIZE); 925 rx_info->page_offset, len, ENA_PAGE_SIZE);
923 926
924 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 927 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
925 "rx skb updated. len %d. data_len %d\n", 928 "rx skb updated. len %d. data_len %d\n",
@@ -991,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
991 return; 994 return;
992 } 995 }
993 996
994 skb->ip_summed = CHECKSUM_UNNECESSARY; 997 if (likely(ena_rx_ctx->l4_csum_checked)) {
998 skb->ip_summed = CHECKSUM_UNNECESSARY;
999 } else {
1000 u64_stats_update_begin(&rx_ring->syncp);
1001 rx_ring->rx_stats.csum_unchecked++;
1002 u64_stats_update_end(&rx_ring->syncp);
1003 skb->ip_summed = CHECKSUM_NONE;
1004 }
1005 } else {
1006 skb->ip_summed = CHECKSUM_NONE;
1007 return;
995 } 1008 }
1009
996} 1010}
997 1011
998static void ena_set_rx_hash(struct ena_ring *rx_ring, 1012static void ena_set_rx_hash(struct ena_ring *rx_ring,
@@ -1107,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1107 1121
1108 rx_ring->next_to_clean = next_to_clean; 1122 rx_ring->next_to_clean = next_to_clean;
1109 1123
1110 refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); 1124 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
1111 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; 1125 refill_threshold =
1126 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1127 ENA_RX_REFILL_THRESH_PACKET);
1112 1128
1113 /* Optimization, try to batch new rx buffers */ 1129 /* Optimization, try to batch new rx buffers */
1114 if (refill_required > refill_threshold) { 1130 if (refill_required > refill_threshold) {
@@ -1305,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1305 1321
1306 /* Reserved the max msix vectors we might need */ 1322 /* Reserved the max msix vectors we might need */
1307 msix_vecs = ENA_MAX_MSIX_VEC(num_queues); 1323 msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1308
1309 netif_dbg(adapter, probe, adapter->netdev, 1324 netif_dbg(adapter, probe, adapter->netdev,
1310 "trying to enable MSI-X, vectors %d\n", msix_vecs); 1325 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1311 1326
@@ -1580,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1580 if (rc) 1595 if (rc)
1581 return rc; 1596 return rc;
1582 1597
1583 ena_init_napi(adapter);
1584
1585 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); 1598 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1586 1599
1587 ena_refill_all_rx_bufs(adapter); 1600 ena_refill_all_rx_bufs(adapter);
@@ -1598,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter)
1598 1611
1599static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) 1612static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1600{ 1613{
1601 struct ena_com_create_io_ctx ctx = { 0 }; 1614 struct ena_com_create_io_ctx ctx;
1602 struct ena_com_dev *ena_dev; 1615 struct ena_com_dev *ena_dev;
1603 struct ena_ring *tx_ring; 1616 struct ena_ring *tx_ring;
1604 u32 msix_vector; 1617 u32 msix_vector;
@@ -1611,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1611 msix_vector = ENA_IO_IRQ_IDX(qid); 1624 msix_vector = ENA_IO_IRQ_IDX(qid);
1612 ena_qid = ENA_IO_TXQ_IDX(qid); 1625 ena_qid = ENA_IO_TXQ_IDX(qid);
1613 1626
1627 memset(&ctx, 0x0, sizeof(ctx));
1628
1614 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1629 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1615 ctx.qid = ena_qid; 1630 ctx.qid = ena_qid;
1616 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1631 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
@@ -1664,7 +1679,7 @@ create_err:
1664static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) 1679static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1665{ 1680{
1666 struct ena_com_dev *ena_dev; 1681 struct ena_com_dev *ena_dev;
1667 struct ena_com_create_io_ctx ctx = { 0 }; 1682 struct ena_com_create_io_ctx ctx;
1668 struct ena_ring *rx_ring; 1683 struct ena_ring *rx_ring;
1669 u32 msix_vector; 1684 u32 msix_vector;
1670 u16 ena_qid; 1685 u16 ena_qid;
@@ -1676,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1676 msix_vector = ENA_IO_IRQ_IDX(qid); 1691 msix_vector = ENA_IO_IRQ_IDX(qid);
1677 ena_qid = ENA_IO_RXQ_IDX(qid); 1692 ena_qid = ENA_IO_RXQ_IDX(qid);
1678 1693
1694 memset(&ctx, 0x0, sizeof(ctx));
1695
1679 ctx.qid = ena_qid; 1696 ctx.qid = ena_qid;
1680 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1697 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1681 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1698 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -1735,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter)
1735 1752
1736 ena_setup_io_intr(adapter); 1753 ena_setup_io_intr(adapter);
1737 1754
1755 /* napi poll functions should be initialized before running
1756 * request_irq(), to handle a rare condition where there is a pending
1757 * interrupt, causing the ISR to fire immediately while the poll
1758 * function wasn't set yet, causing a null dereference
1759 */
1760 ena_init_napi(adapter);
1761
1738 rc = ena_request_io_irq(adapter); 1762 rc = ena_request_io_irq(adapter);
1739 if (rc) 1763 if (rc)
1740 goto err_req_irq; 1764 goto err_req_irq;
@@ -1824,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
1824 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 1848 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1825 if (rc) 1849 if (rc)
1826 dev_err(&adapter->pdev->dev, "Device reset failed\n"); 1850 dev_err(&adapter->pdev->dev, "Device reset failed\n");
1851 /* stop submitting admin commands on a device that was reset */
1852 ena_com_set_admin_running_state(adapter->ena_dev, false);
1827 } 1853 }
1828 1854
1829 ena_destroy_all_io_queues(adapter); 1855 ena_destroy_all_io_queues(adapter);
@@ -1890,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
1890 1916
1891 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); 1917 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1892 1918
1919 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
1920 return 0;
1921
1893 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 1922 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1894 ena_down(adapter); 1923 ena_down(adapter);
1895 1924
@@ -1900,7 +1929,7 @@ static int ena_close(struct net_device *netdev)
1900 "Destroy failure, restarting device\n"); 1929 "Destroy failure, restarting device\n");
1901 ena_dump_stats_to_dmesg(adapter); 1930 ena_dump_stats_to_dmesg(adapter);
1902 /* rtnl lock already obtained in dev_ioctl() layer */ 1931 /* rtnl lock already obtained in dev_ioctl() layer */
1903 ena_destroy_device(adapter); 1932 ena_destroy_device(adapter, false);
1904 ena_restore_device(adapter); 1933 ena_restore_device(adapter);
1905 } 1934 }
1906 1935
@@ -1986,73 +2015,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1986 return rc; 2015 return rc;
1987} 2016}
1988 2017
1989/* Called with netif_tx_lock. */ 2018static int ena_tx_map_skb(struct ena_ring *tx_ring,
1990static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) 2019 struct ena_tx_buffer *tx_info,
2020 struct sk_buff *skb,
2021 void **push_hdr,
2022 u16 *header_len)
1991{ 2023{
1992 struct ena_adapter *adapter = netdev_priv(dev); 2024 struct ena_adapter *adapter = tx_ring->adapter;
1993 struct ena_tx_buffer *tx_info;
1994 struct ena_com_tx_ctx ena_tx_ctx;
1995 struct ena_ring *tx_ring;
1996 struct netdev_queue *txq;
1997 struct ena_com_buf *ena_buf; 2025 struct ena_com_buf *ena_buf;
1998 void *push_hdr;
1999 u32 len, last_frag;
2000 u16 next_to_use;
2001 u16 req_id;
2002 u16 push_len;
2003 u16 header_len;
2004 dma_addr_t dma; 2026 dma_addr_t dma;
2005 int qid, rc, nb_hw_desc; 2027 u32 skb_head_len, frag_len, last_frag;
2006 int i = -1; 2028 u16 push_len = 0;
2007 2029 u16 delta = 0;
2008 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); 2030 int i = 0;
2009 /* Determine which tx ring we will be placed on */
2010 qid = skb_get_queue_mapping(skb);
2011 tx_ring = &adapter->tx_ring[qid];
2012 txq = netdev_get_tx_queue(dev, qid);
2013
2014 rc = ena_check_and_linearize_skb(tx_ring, skb);
2015 if (unlikely(rc))
2016 goto error_drop_packet;
2017
2018 skb_tx_timestamp(skb);
2019 len = skb_headlen(skb);
2020 2031
2021 next_to_use = tx_ring->next_to_use; 2032 skb_head_len = skb_headlen(skb);
2022 req_id = tx_ring->free_tx_ids[next_to_use];
2023 tx_info = &tx_ring->tx_buffer_info[req_id];
2024 tx_info->num_of_bufs = 0;
2025
2026 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2027 ena_buf = tx_info->bufs;
2028 tx_info->skb = skb; 2033 tx_info->skb = skb;
2034 ena_buf = tx_info->bufs;
2029 2035
2030 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2036 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2031 /* prepared the push buffer */ 2037 /* When the device is LLQ mode, the driver will copy
2032 push_len = min_t(u32, len, tx_ring->tx_max_header_size); 2038 * the header into the device memory space.
2033 header_len = push_len; 2039 * the ena_com layer assume the header is in a linear
2034 push_hdr = skb->data; 2040 * memory space.
2041 * This assumption might be wrong since part of the header
2042 * can be in the fragmented buffers.
2043 * Use skb_header_pointer to make sure the header is in a
2044 * linear memory space.
2045 */
2046
2047 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2048 *push_hdr = skb_header_pointer(skb, 0, push_len,
2049 tx_ring->push_buf_intermediate_buf);
2050 *header_len = push_len;
2051 if (unlikely(skb->data != *push_hdr)) {
2052 u64_stats_update_begin(&tx_ring->syncp);
2053 tx_ring->tx_stats.llq_buffer_copy++;
2054 u64_stats_update_end(&tx_ring->syncp);
2055
2056 delta = push_len - skb_head_len;
2057 }
2035 } else { 2058 } else {
2036 push_len = 0; 2059 *push_hdr = NULL;
2037 header_len = min_t(u32, len, tx_ring->tx_max_header_size); 2060 *header_len = min_t(u32, skb_head_len,
2038 push_hdr = NULL; 2061 tx_ring->tx_max_header_size);
2039 } 2062 }
2040 2063
2041 netif_dbg(adapter, tx_queued, dev, 2064 netif_dbg(adapter, tx_queued, adapter->netdev,
2042 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, 2065 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2043 push_hdr, push_len); 2066 *push_hdr, push_len);
2044 2067
2045 if (len > push_len) { 2068 if (skb_head_len > push_len) {
2046 dma = dma_map_single(tx_ring->dev, skb->data + push_len, 2069 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2047 len - push_len, DMA_TO_DEVICE); 2070 skb_head_len - push_len, DMA_TO_DEVICE);
2048 if (dma_mapping_error(tx_ring->dev, dma)) 2071 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2049 goto error_report_dma_error; 2072 goto error_report_dma_error;
2050 2073
2051 ena_buf->paddr = dma; 2074 ena_buf->paddr = dma;
2052 ena_buf->len = len - push_len; 2075 ena_buf->len = skb_head_len - push_len;
2053 2076
2054 ena_buf++; 2077 ena_buf++;
2055 tx_info->num_of_bufs++; 2078 tx_info->num_of_bufs++;
2079 tx_info->map_linear_data = 1;
2080 } else {
2081 tx_info->map_linear_data = 0;
2056 } 2082 }
2057 2083
2058 last_frag = skb_shinfo(skb)->nr_frags; 2084 last_frag = skb_shinfo(skb)->nr_frags;
@@ -2060,18 +2086,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2060 for (i = 0; i < last_frag; i++) { 2086 for (i = 0; i < last_frag; i++) {
2061 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2087 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2062 2088
2063 len = skb_frag_size(frag); 2089 frag_len = skb_frag_size(frag);
2064 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, 2090
2065 DMA_TO_DEVICE); 2091 if (unlikely(delta >= frag_len)) {
2066 if (dma_mapping_error(tx_ring->dev, dma)) 2092 delta -= frag_len;
2093 continue;
2094 }
2095
2096 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2097 frag_len - delta, DMA_TO_DEVICE);
2098 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2067 goto error_report_dma_error; 2099 goto error_report_dma_error;
2068 2100
2069 ena_buf->paddr = dma; 2101 ena_buf->paddr = dma;
2070 ena_buf->len = len; 2102 ena_buf->len = frag_len - delta;
2071 ena_buf++; 2103 ena_buf++;
2104 tx_info->num_of_bufs++;
2105 delta = 0;
2072 } 2106 }
2073 2107
2074 tx_info->num_of_bufs += last_frag; 2108 return 0;
2109
2110error_report_dma_error:
2111 u64_stats_update_begin(&tx_ring->syncp);
2112 tx_ring->tx_stats.dma_mapping_err++;
2113 u64_stats_update_end(&tx_ring->syncp);
2114 netdev_warn(adapter->netdev, "failed to map skb\n");
2115
2116 tx_info->skb = NULL;
2117
2118 tx_info->num_of_bufs += i;
2119 ena_unmap_tx_skb(tx_ring, tx_info);
2120
2121 return -EINVAL;
2122}
2123
2124/* Called with netif_tx_lock. */
2125static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2126{
2127 struct ena_adapter *adapter = netdev_priv(dev);
2128 struct ena_tx_buffer *tx_info;
2129 struct ena_com_tx_ctx ena_tx_ctx;
2130 struct ena_ring *tx_ring;
2131 struct netdev_queue *txq;
2132 void *push_hdr;
2133 u16 next_to_use, req_id, header_len;
2134 int qid, rc, nb_hw_desc;
2135
2136 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2137 /* Determine which tx ring we will be placed on */
2138 qid = skb_get_queue_mapping(skb);
2139 tx_ring = &adapter->tx_ring[qid];
2140 txq = netdev_get_tx_queue(dev, qid);
2141
2142 rc = ena_check_and_linearize_skb(tx_ring, skb);
2143 if (unlikely(rc))
2144 goto error_drop_packet;
2145
2146 skb_tx_timestamp(skb);
2147
2148 next_to_use = tx_ring->next_to_use;
2149 req_id = tx_ring->free_tx_ids[next_to_use];
2150 tx_info = &tx_ring->tx_buffer_info[req_id];
2151 tx_info->num_of_bufs = 0;
2152
2153 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2154
2155 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2156 if (unlikely(rc))
2157 goto error_drop_packet;
2075 2158
2076 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2159 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2077 ena_tx_ctx.ena_bufs = tx_info->bufs; 2160 ena_tx_ctx.ena_bufs = tx_info->bufs;
@@ -2087,14 +2170,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2087 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2170 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2088 &nb_hw_desc); 2171 &nb_hw_desc);
2089 2172
2173 /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
2174 * since the number of free descriptors in the queue is checked
2175 * after sending the previous packet. In case there isn't enough
2176 * space in the queue for the next packet, it is stopped
2177 * until there is again enough available space in the queue.
2178 * All other failure reasons of ena_com_prepare_tx() are fatal
2179 * and therefore require a device reset.
2180 */
2090 if (unlikely(rc)) { 2181 if (unlikely(rc)) {
2091 netif_err(adapter, tx_queued, dev, 2182 netif_err(adapter, tx_queued, dev,
2092 "failed to prepare tx bufs\n"); 2183 "failed to prepare tx bufs\n");
2093 u64_stats_update_begin(&tx_ring->syncp); 2184 u64_stats_update_begin(&tx_ring->syncp);
2094 tx_ring->tx_stats.queue_stop++;
2095 tx_ring->tx_stats.prepare_ctx_err++; 2185 tx_ring->tx_stats.prepare_ctx_err++;
2096 u64_stats_update_end(&tx_ring->syncp); 2186 u64_stats_update_end(&tx_ring->syncp);
2097 netif_tx_stop_queue(txq); 2187 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
2188 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2098 goto error_unmap_dma; 2189 goto error_unmap_dma;
2099 } 2190 }
2100 2191
@@ -2112,18 +2203,12 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2112 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2203 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2113 tx_ring->ring_size); 2204 tx_ring->ring_size);
2114 2205
2115 /* This WMB is aimed to:
2116 * 1 - perform smp barrier before reading next_to_completion
2117 * 2 - make sure the desc were written before trigger DB
2118 */
2119 wmb();
2120
2121 /* stop the queue when no more space available, the packet can have up 2206 /* stop the queue when no more space available, the packet can have up
2122 * to sgl_size + 2. one for the meta descriptor and one for header 2207 * to sgl_size + 2. one for the meta descriptor and one for header
2123 * (if the header is larger than tx_max_header_size). 2208 * (if the header is larger than tx_max_header_size).
2124 */ 2209 */
2125 if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < 2210 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2126 (tx_ring->sgl_size + 2))) { 2211 tx_ring->sgl_size + 2))) {
2127 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", 2212 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2128 __func__, qid); 2213 __func__, qid);
2129 2214
@@ -2136,13 +2221,14 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2136 * stop the queue but meanwhile clean_tx_irq updates 2221 * stop the queue but meanwhile clean_tx_irq updates
2137 * next_to_completion and terminates. 2222 * next_to_completion and terminates.
2138 * The queue will remain stopped forever. 2223 * The queue will remain stopped forever.
2139 * To solve this issue this function perform rmb, check 2224 * To solve this issue add a mb() to make sure that
2140 * the wakeup condition and wake up the queue if needed. 2225 * netif_tx_stop_queue() write is vissible before checking if
2226 * there is additional space in the queue.
2141 */ 2227 */
2142 smp_rmb(); 2228 smp_mb();
2143 2229
2144 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) 2230 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2145 > ENA_TX_WAKEUP_THRESH) { 2231 ENA_TX_WAKEUP_THRESH)) {
2146 netif_tx_wake_queue(txq); 2232 netif_tx_wake_queue(txq);
2147 u64_stats_update_begin(&tx_ring->syncp); 2233 u64_stats_update_begin(&tx_ring->syncp);
2148 tx_ring->tx_stats.queue_wakeup++; 2234 tx_ring->tx_stats.queue_wakeup++;
@@ -2151,8 +2237,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2151 } 2237 }
2152 2238
2153 if (netif_xmit_stopped(txq) || !skb->xmit_more) { 2239 if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2154 /* trigger the dma engine */ 2240 /* trigger the dma engine. ena_com_write_sq_doorbell()
2155 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); 2241 * has a mb
2242 */
2243 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2156 u64_stats_update_begin(&tx_ring->syncp); 2244 u64_stats_update_begin(&tx_ring->syncp);
2157 tx_ring->tx_stats.doorbells++; 2245 tx_ring->tx_stats.doorbells++;
2158 u64_stats_update_end(&tx_ring->syncp); 2246 u64_stats_update_end(&tx_ring->syncp);
@@ -2160,60 +2248,18 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2160 2248
2161 return NETDEV_TX_OK; 2249 return NETDEV_TX_OK;
2162 2250
2163error_report_dma_error:
2164 u64_stats_update_begin(&tx_ring->syncp);
2165 tx_ring->tx_stats.dma_mapping_err++;
2166 u64_stats_update_end(&tx_ring->syncp);
2167 netdev_warn(adapter->netdev, "failed to map skb\n");
2168
2169 tx_info->skb = NULL;
2170
2171error_unmap_dma: 2251error_unmap_dma:
2172 if (i >= 0) { 2252 ena_unmap_tx_skb(tx_ring, tx_info);
2173 /* save value of frag that failed */ 2253 tx_info->skb = NULL;
2174 last_frag = i;
2175
2176 /* start back at beginning and unmap skb */
2177 tx_info->skb = NULL;
2178 ena_buf = tx_info->bufs;
2179 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2180 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2181
2182 /* unmap remaining mapped pages */
2183 for (i = 0; i < last_frag; i++) {
2184 ena_buf++;
2185 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2186 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2187 }
2188 }
2189 2254
2190error_drop_packet: 2255error_drop_packet:
2191
2192 dev_kfree_skb(skb); 2256 dev_kfree_skb(skb);
2193 return NETDEV_TX_OK; 2257 return NETDEV_TX_OK;
2194} 2258}
2195 2259
2196#ifdef CONFIG_NET_POLL_CONTROLLER
2197static void ena_netpoll(struct net_device *netdev)
2198{
2199 struct ena_adapter *adapter = netdev_priv(netdev);
2200 int i;
2201
2202 /* Dont schedule NAPI if the driver is in the middle of reset
2203 * or netdev is down.
2204 */
2205
2206 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2207 test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2208 return;
2209
2210 for (i = 0; i < adapter->num_queues; i++)
2211 napi_schedule(&adapter->ena_napi[i].napi);
2212}
2213#endif /* CONFIG_NET_POLL_CONTROLLER */
2214
2215static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 2260static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2216 void *accel_priv, select_queue_fallback_t fallback) 2261 struct net_device *sb_dev,
2262 select_queue_fallback_t fallback)
2217{ 2263{
2218 u16 qid; 2264 u16 qid;
2219 /* we suspect that this is good for in--kernel network services that 2265 /* we suspect that this is good for in--kernel network services that
@@ -2223,12 +2269,13 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2223 if (skb_rx_queue_recorded(skb)) 2269 if (skb_rx_queue_recorded(skb))
2224 qid = skb_get_rx_queue(skb); 2270 qid = skb_get_rx_queue(skb);
2225 else 2271 else
2226 qid = fallback(dev, skb); 2272 qid = fallback(dev, skb, NULL);
2227 2273
2228 return qid; 2274 return qid;
2229} 2275}
2230 2276
2231static void ena_config_host_info(struct ena_com_dev *ena_dev) 2277static void ena_config_host_info(struct ena_com_dev *ena_dev,
2278 struct pci_dev *pdev)
2232{ 2279{
2233 struct ena_admin_host_info *host_info; 2280 struct ena_admin_host_info *host_info;
2234 int rc; 2281 int rc;
@@ -2242,6 +2289,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
2242 2289
2243 host_info = ena_dev->host_attr.host_info; 2290 host_info = ena_dev->host_attr.host_info;
2244 2291
2292 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
2245 host_info->os_type = ENA_ADMIN_OS_LINUX; 2293 host_info->os_type = ENA_ADMIN_OS_LINUX;
2246 host_info->kernel_ver = LINUX_VERSION_CODE; 2294 host_info->kernel_ver = LINUX_VERSION_CODE;
2247 strncpy(host_info->kernel_ver_str, utsname()->version, 2295 strncpy(host_info->kernel_ver_str, utsname()->version,
@@ -2252,7 +2300,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
2252 host_info->driver_version = 2300 host_info->driver_version =
2253 (DRV_MODULE_VER_MAJOR) | 2301 (DRV_MODULE_VER_MAJOR) |
2254 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 2302 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2255 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 2303 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2304 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2305 host_info->num_cpus = num_online_cpus();
2256 2306
2257 rc = ena_com_set_host_attributes(ena_dev); 2307 rc = ena_com_set_host_attributes(ena_dev);
2258 if (rc) { 2308 if (rc) {
@@ -2376,9 +2426,6 @@ static const struct net_device_ops ena_netdev_ops = {
2376 .ndo_change_mtu = ena_change_mtu, 2426 .ndo_change_mtu = ena_change_mtu,
2377 .ndo_set_mac_address = NULL, 2427 .ndo_set_mac_address = NULL,
2378 .ndo_validate_addr = eth_validate_addr, 2428 .ndo_validate_addr = eth_validate_addr,
2379#ifdef CONFIG_NET_POLL_CONTROLLER
2380 .ndo_poll_controller = ena_netpoll,
2381#endif /* CONFIG_NET_POLL_CONTROLLER */
2382}; 2429};
2383 2430
2384static int ena_device_validate_params(struct ena_adapter *adapter, 2431static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2466,7 +2513,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2466 } 2513 }
2467 2514
2468 /* ENA admin level init */ 2515 /* ENA admin level init */
2469 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); 2516 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2470 if (rc) { 2517 if (rc) {
2471 dev_err(dev, 2518 dev_err(dev,
2472 "Can not initialize ena admin queue with device\n"); 2519 "Can not initialize ena admin queue with device\n");
@@ -2479,7 +2526,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2479 */ 2526 */
2480 ena_com_set_admin_polling_mode(ena_dev, true); 2527 ena_com_set_admin_polling_mode(ena_dev, true);
2481 2528
2482 ena_config_host_info(ena_dev); 2529 ena_config_host_info(ena_dev, pdev);
2483 2530
2484 /* Get Device Attributes*/ 2531 /* Get Device Attributes*/
2485 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 2532 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
@@ -2549,28 +2596,29 @@ err_disable_msix:
2549 return rc; 2596 return rc;
2550} 2597}
2551 2598
2552static void ena_destroy_device(struct ena_adapter *adapter) 2599static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2553{ 2600{
2554 struct net_device *netdev = adapter->netdev; 2601 struct net_device *netdev = adapter->netdev;
2555 struct ena_com_dev *ena_dev = adapter->ena_dev; 2602 struct ena_com_dev *ena_dev = adapter->ena_dev;
2556 bool dev_up; 2603 bool dev_up;
2557 2604
2605 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2606 return;
2607
2558 netif_carrier_off(netdev); 2608 netif_carrier_off(netdev);
2559 2609
2560 del_timer_sync(&adapter->timer_service); 2610 del_timer_sync(&adapter->timer_service);
2561 2611
2562 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2612 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2563 adapter->dev_up_before_reset = dev_up; 2613 adapter->dev_up_before_reset = dev_up;
2564 2614 if (!graceful)
2565 ena_com_set_admin_running_state(ena_dev, false); 2615 ena_com_set_admin_running_state(ena_dev, false);
2566 2616
2567 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2617 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2568 ena_down(adapter); 2618 ena_down(adapter);
2569 2619
2570 /* Before releasing the ENA resources, a device reset is required. 2620 /* Stop the device from sending AENQ events (in case reset flag is set
2571 * (to prevent the device from accessing them). 2621 * and device is up, ena_down() already reset the device.
2572 * In case the reset flag is set and the device is up, ena_down()
2573 * already perform the reset, so it can be skipped.
2574 */ 2622 */
2575 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) 2623 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2576 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); 2624 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@@ -2590,6 +2638,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2590 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 2638 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2591 2639
2592 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2640 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2641 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2593} 2642}
2594 2643
2595static int ena_restore_device(struct ena_adapter *adapter) 2644static int ena_restore_device(struct ena_adapter *adapter)
@@ -2634,15 +2683,22 @@ static int ena_restore_device(struct ena_adapter *adapter)
2634 } 2683 }
2635 } 2684 }
2636 2685
2686 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2637 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2638 dev_err(&pdev->dev, "Device reset completed successfully\n"); 2688 dev_err(&pdev->dev,
2689 "Device reset completed successfully, Driver info: %s\n",
2690 version);
2639 2691
2640 return rc; 2692 return rc;
2641err_disable_msix: 2693err_disable_msix:
2642 ena_free_mgmnt_irq(adapter); 2694 ena_free_mgmnt_irq(adapter);
2643 ena_disable_msix(adapter); 2695 ena_disable_msix(adapter);
2644err_device_destroy: 2696err_device_destroy:
2697 ena_com_abort_admin_commands(ena_dev);
2698 ena_com_wait_for_abort_completion(ena_dev);
2645 ena_com_admin_destroy(ena_dev); 2699 ena_com_admin_destroy(ena_dev);
2700 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2701 ena_com_mmio_reg_read_request_destroy(ena_dev);
2646err: 2702err:
2647 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2703 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2648 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); 2704 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -2664,7 +2720,7 @@ static void ena_fw_reset_device(struct work_struct *work)
2664 return; 2720 return;
2665 } 2721 }
2666 rtnl_lock(); 2722 rtnl_lock();
2667 ena_destroy_device(adapter); 2723 ena_destroy_device(adapter, false);
2668 ena_restore_device(adapter); 2724 ena_restore_device(adapter);
2669 rtnl_unlock(); 2725 rtnl_unlock();
2670} 2726}
@@ -2824,7 +2880,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2824 rx_ring = &adapter->rx_ring[i]; 2880 rx_ring = &adapter->rx_ring[i];
2825 2881
2826 refill_required = 2882 refill_required =
2827 ena_com_sq_empty_space(rx_ring->ena_com_io_sq); 2883 ena_com_free_desc(rx_ring->ena_com_io_sq);
2828 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 2884 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2829 rx_ring->empty_rx_queue++; 2885 rx_ring->empty_rx_queue++;
2830 2886
@@ -2969,20 +3025,10 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
2969 int io_sq_num, io_queue_num; 3025 int io_sq_num, io_queue_num;
2970 3026
2971 /* In case of LLQ use the llq number in the get feature cmd */ 3027 /* In case of LLQ use the llq number in the get feature cmd */
2972 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3028 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2973 io_sq_num = get_feat_ctx->max_queues.max_llq_num; 3029 io_sq_num = get_feat_ctx->llq.max_llq_num;
2974 3030 else
2975 if (io_sq_num == 0) {
2976 dev_err(&pdev->dev,
2977 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2978
2979 ena_dev->tx_mem_queue_type =
2980 ENA_ADMIN_PLACEMENT_POLICY_HOST;
2981 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2982 }
2983 } else {
2984 io_sq_num = get_feat_ctx->max_queues.max_sq_num; 3031 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2985 }
2986 3032
2987 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); 3033 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2988 io_queue_num = min_t(int, io_queue_num, io_sq_num); 3034 io_queue_num = min_t(int, io_queue_num, io_sq_num);
@@ -2998,18 +3044,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
2998 return io_queue_num; 3044 return io_queue_num;
2999} 3045}
3000 3046
3001static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, 3047static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3002 struct ena_com_dev_get_features_ctx *get_feat_ctx) 3048 struct ena_com_dev *ena_dev,
3049 struct ena_admin_feature_llq_desc *llq,
3050 struct ena_llq_configurations *llq_default_configurations)
3003{ 3051{
3004 bool has_mem_bar; 3052 bool has_mem_bar;
3053 int rc;
3054 u32 llq_feature_mask;
3055
3056 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3057 if (!(ena_dev->supported_features & llq_feature_mask)) {
3058 dev_err(&pdev->dev,
3059 "LLQ is not supported Fallback to host mode policy.\n");
3060 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3061 return 0;
3062 }
3005 3063
3006 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); 3064 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3007 3065
3008 /* Enable push mode if device supports LLQ */ 3066 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3009 if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) 3067 if (unlikely(rc)) {
3010 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 3068 dev_err(&pdev->dev,
3011 else 3069 "Failed to configure the device mode. Fallback to host mode policy.\n");
3070 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3071 return 0;
3072 }
3073
3074 /* Nothing to config, exit */
3075 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3076 return 0;
3077
3078 if (!has_mem_bar) {
3079 dev_err(&pdev->dev,
3080 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3012 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3081 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3082 return 0;
3083 }
3084
3085 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3086 pci_resource_start(pdev, ENA_MEM_BAR),
3087 pci_resource_len(pdev, ENA_MEM_BAR));
3088
3089 if (!ena_dev->mem_bar)
3090 return -EFAULT;
3091
3092 return 0;
3013} 3093}
3014 3094
3015static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, 3095static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
@@ -3122,18 +3202,20 @@ err_rss_init:
3122 3202
3123static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) 3203static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3124{ 3204{
3125 int release_bars; 3205 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3126
3127 if (ena_dev->mem_bar)
3128 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3129 3206
3130 if (ena_dev->reg_bar)
3131 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3132
3133 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3134 pci_release_selected_regions(pdev, release_bars); 3207 pci_release_selected_regions(pdev, release_bars);
3135} 3208}
3136 3209
3210static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3211{
3212 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3213 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3214 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3215 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3216 llq_config->llq_ring_entry_size_value = 128;
3217}
3218
3137static int ena_calc_queue_size(struct pci_dev *pdev, 3219static int ena_calc_queue_size(struct pci_dev *pdev,
3138 struct ena_com_dev *ena_dev, 3220 struct ena_com_dev *ena_dev,
3139 u16 *max_tx_sgl_size, 3221 u16 *max_tx_sgl_size,
@@ -3149,7 +3231,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
3149 3231
3150 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 3232 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3151 queue_size = min_t(u32, queue_size, 3233 queue_size = min_t(u32, queue_size,
3152 get_feat_ctx->max_queues.max_llq_depth); 3234 get_feat_ctx->llq.max_llq_depth);
3153 3235
3154 queue_size = rounddown_pow_of_two(queue_size); 3236 queue_size = rounddown_pow_of_two(queue_size);
3155 3237
@@ -3182,7 +3264,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3182 static int version_printed; 3264 static int version_printed;
3183 struct net_device *netdev; 3265 struct net_device *netdev;
3184 struct ena_adapter *adapter; 3266 struct ena_adapter *adapter;
3267 struct ena_llq_configurations llq_config;
3185 struct ena_com_dev *ena_dev = NULL; 3268 struct ena_com_dev *ena_dev = NULL;
3269 char *queue_type_str;
3186 static int adapters_found; 3270 static int adapters_found;
3187 int io_queue_num, bars, rc; 3271 int io_queue_num, bars, rc;
3188 int queue_size; 3272 int queue_size;
@@ -3236,16 +3320,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3236 goto err_free_region; 3320 goto err_free_region;
3237 } 3321 }
3238 3322
3239 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); 3323 set_default_llq_configurations(&llq_config);
3240 3324
3241 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3325 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3242 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, 3326 &llq_config);
3243 pci_resource_start(pdev, ENA_MEM_BAR), 3327 if (rc) {
3244 pci_resource_len(pdev, ENA_MEM_BAR)); 3328 dev_err(&pdev->dev, "ena device init failed\n");
3245 if (!ena_dev->mem_bar) { 3329 goto err_device_destroy;
3246 rc = -EFAULT;
3247 goto err_device_destroy;
3248 }
3249 } 3330 }
3250 3331
3251 /* initial Tx interrupt delay, Assumes 1 usec granularity. 3332 /* initial Tx interrupt delay, Assumes 1 usec granularity.
@@ -3260,8 +3341,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3260 goto err_device_destroy; 3341 goto err_device_destroy;
3261 } 3342 }
3262 3343
3263 dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", 3344 dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
3264 io_queue_num, queue_size); 3345 io_queue_num, queue_size,
3346 (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
3347 "ENABLED" : "DISABLED");
3265 3348
3266 /* dev zeroed in init_etherdev */ 3349 /* dev zeroed in init_etherdev */
3267 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); 3350 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
@@ -3351,9 +3434,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3351 timer_setup(&adapter->timer_service, ena_timer_service, 0); 3434 timer_setup(&adapter->timer_service, ena_timer_service, 0);
3352 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 3435 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3353 3436
3354 dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", 3437 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3438 queue_type_str = "Regular";
3439 else
3440 queue_type_str = "Low Latency";
3441
3442 dev_info(&pdev->dev,
3443 "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
3355 DEVICE_NAME, (long)pci_resource_start(pdev, 0), 3444 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3356 netdev->dev_addr, io_queue_num); 3445 netdev->dev_addr, io_queue_num, queue_type_str);
3357 3446
3358 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 3447 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3359 3448
@@ -3366,6 +3455,8 @@ err_rss:
3366 ena_com_rss_destroy(ena_dev); 3455 ena_com_rss_destroy(ena_dev);
3367err_free_msix: 3456err_free_msix:
3368 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); 3457 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3458 /* stop submitting admin commands on a device that was reset */
3459 ena_com_set_admin_running_state(ena_dev, false);
3369 ena_free_mgmnt_irq(adapter); 3460 ena_free_mgmnt_irq(adapter);
3370 ena_disable_msix(adapter); 3461 ena_disable_msix(adapter);
3371err_worker_destroy: 3462err_worker_destroy:
@@ -3386,32 +3477,6 @@ err_disable_device:
3386} 3477}
3387 3478
3388/*****************************************************************************/ 3479/*****************************************************************************/
3389static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
3390{
3391 int rc;
3392
3393 if (numvfs > 0) {
3394 rc = pci_enable_sriov(dev, numvfs);
3395 if (rc != 0) {
3396 dev_err(&dev->dev,
3397 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
3398 numvfs, rc);
3399 return rc;
3400 }
3401
3402 return numvfs;
3403 }
3404
3405 if (numvfs == 0) {
3406 pci_disable_sriov(dev);
3407 return 0;
3408 }
3409
3410 return -EINVAL;
3411}
3412
3413/*****************************************************************************/
3414/*****************************************************************************/
3415 3480
3416/* ena_remove - Device Removal Routine 3481/* ena_remove - Device Removal Routine
3417 * @pdev: PCI device information struct 3482 * @pdev: PCI device information struct
@@ -3434,30 +3499,18 @@ static void ena_remove(struct pci_dev *pdev)
3434 netdev->rx_cpu_rmap = NULL; 3499 netdev->rx_cpu_rmap = NULL;
3435 } 3500 }
3436#endif /* CONFIG_RFS_ACCEL */ 3501#endif /* CONFIG_RFS_ACCEL */
3437
3438 unregister_netdev(netdev);
3439 del_timer_sync(&adapter->timer_service); 3502 del_timer_sync(&adapter->timer_service);
3440 3503
3441 cancel_work_sync(&adapter->reset_task); 3504 cancel_work_sync(&adapter->reset_task);
3442 3505
3443 /* Reset the device only if the device is running. */ 3506 rtnl_lock();
3444 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) 3507 ena_destroy_device(adapter, true);
3445 ena_com_dev_reset(ena_dev, adapter->reset_reason); 3508 rtnl_unlock();
3446
3447 ena_free_mgmnt_irq(adapter);
3448 3509
3449 ena_disable_msix(adapter); 3510 unregister_netdev(netdev);
3450 3511
3451 free_netdev(netdev); 3512 free_netdev(netdev);
3452 3513
3453 ena_com_mmio_reg_read_request_destroy(ena_dev);
3454
3455 ena_com_abort_admin_commands(ena_dev);
3456
3457 ena_com_wait_for_abort_completion(ena_dev);
3458
3459 ena_com_admin_destroy(ena_dev);
3460
3461 ena_com_rss_destroy(ena_dev); 3514 ena_com_rss_destroy(ena_dev);
3462 3515
3463 ena_com_delete_debug_area(ena_dev); 3516 ena_com_delete_debug_area(ena_dev);
@@ -3492,7 +3545,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
3492 "ignoring device reset request as the device is being suspended\n"); 3545 "ignoring device reset request as the device is being suspended\n");
3493 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3546 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3494 } 3547 }
3495 ena_destroy_device(adapter); 3548 ena_destroy_device(adapter, true);
3496 rtnl_unlock(); 3549 rtnl_unlock();
3497 return 0; 3550 return 0;
3498} 3551}
@@ -3526,7 +3579,7 @@ static struct pci_driver ena_pci_driver = {
3526 .suspend = ena_suspend, 3579 .suspend = ena_suspend,
3527 .resume = ena_resume, 3580 .resume = ena_resume,
3528#endif 3581#endif
3529 .sriov_configure = ena_sriov_configure, 3582 .sriov_configure = pci_sriov_configure_simple,
3530}; 3583};
3531 3584
3532static int __init ena_init(void) 3585static int __init ena_init(void)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index f1972b5ab650..dc8b6173d8d8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -43,9 +43,9 @@
43#include "ena_com.h" 43#include "ena_com.h"
44#include "ena_eth_com.h" 44#include "ena_eth_com.h"
45 45
46#define DRV_MODULE_VER_MAJOR 1 46#define DRV_MODULE_VER_MAJOR 2
47#define DRV_MODULE_VER_MINOR 5 47#define DRV_MODULE_VER_MINOR 0
48#define DRV_MODULE_VER_SUBMINOR 0 48#define DRV_MODULE_VER_SUBMINOR 2
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
@@ -61,6 +61,17 @@
61#define ENA_ADMIN_MSIX_VEC 1 61#define ENA_ADMIN_MSIX_VEC 1
62#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues)) 62#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
63 63
64/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
65 * driver passes 0.
66 * Since the max packet size the ENA handles is ~9kB limit the buffer length to
67 * 16kB.
68 */
69#if PAGE_SIZE > SZ_16K
70#define ENA_PAGE_SIZE SZ_16K
71#else
72#define ENA_PAGE_SIZE PAGE_SIZE
73#endif
74
64#define ENA_MIN_MSIX_VEC 2 75#define ENA_MIN_MSIX_VEC 2
65 76
66#define ENA_REG_BAR 0 77#define ENA_REG_BAR 0
@@ -70,7 +81,7 @@
70#define ENA_DEFAULT_RING_SIZE (1024) 81#define ENA_DEFAULT_RING_SIZE (1024)
71 82
72#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) 83#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
73#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) 84#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
74 85
75/* limit the buffer size to 600 bytes to handle MTU changes from very 86/* limit the buffer size to 600 bytes to handle MTU changes from very
76 * small to very large, in which case the number of buffers per packet 87 * small to very large, in which case the number of buffers per packet
@@ -95,10 +106,11 @@
95 */ 106 */
96#define ENA_TX_POLL_BUDGET_DIVIDER 4 107#define ENA_TX_POLL_BUDGET_DIVIDER 4
97 108
98/* Refill Rx queue when number of available descriptors is below 109/* Refill Rx queue when number of required descriptors is above
99 * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER 110 * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET
100 */ 111 */
101#define ENA_RX_REFILL_THRESH_DIVIDER 8 112#define ENA_RX_REFILL_THRESH_DIVIDER 8
113#define ENA_RX_REFILL_THRESH_PACKET 256
102 114
103/* Number of queues to check for missing queues per timer service */ 115/* Number of queues to check for missing queues per timer service */
104#define ENA_MONITORED_TX_QUEUES 4 116#define ENA_MONITORED_TX_QUEUES 4
@@ -151,6 +163,9 @@ struct ena_tx_buffer {
151 /* num of buffers used by this skb */ 163 /* num of buffers used by this skb */
152 u32 num_of_bufs; 164 u32 num_of_bufs;
153 165
166 /* Indicate if bufs[0] map the linear data of the skb. */
167 u8 map_linear_data;
168
154 /* Used for detect missing tx packets to limit the number of prints */ 169 /* Used for detect missing tx packets to limit the number of prints */
155 u32 print_once; 170 u32 print_once;
156 /* Save the last jiffies to detect missing tx packets 171 /* Save the last jiffies to detect missing tx packets
@@ -186,6 +201,7 @@ struct ena_stats_tx {
186 u64 tx_poll; 201 u64 tx_poll;
187 u64 doorbells; 202 u64 doorbells;
188 u64 bad_req_id; 203 u64 bad_req_id;
204 u64 llq_buffer_copy;
189 u64 missed_tx; 205 u64 missed_tx;
190}; 206};
191 207
@@ -201,6 +217,7 @@ struct ena_stats_rx {
201 u64 rx_copybreak_pkt; 217 u64 rx_copybreak_pkt;
202 u64 bad_req_id; 218 u64 bad_req_id;
203 u64 empty_rx_ring; 219 u64 empty_rx_ring;
220 u64 csum_unchecked;
204}; 221};
205 222
206struct ena_ring { 223struct ena_ring {
@@ -257,6 +274,8 @@ struct ena_ring {
257 struct ena_stats_tx tx_stats; 274 struct ena_stats_tx tx_stats;
258 struct ena_stats_rx rx_stats; 275 struct ena_stats_rx rx_stats;
259 }; 276 };
277
278 u8 *push_buf_intermediate_buf;
260 int empty_rx_queue; 279 int empty_rx_queue;
261} ____cacheline_aligned; 280} ____cacheline_aligned;
262 281
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 48ca97fbe7bc..04fcafcc059c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -33,137 +33,125 @@
33#define _ENA_REGS_H_ 33#define _ENA_REGS_H_
34 34
35enum ena_regs_reset_reason_types { 35enum ena_regs_reset_reason_types {
36 ENA_REGS_RESET_NORMAL = 0, 36 ENA_REGS_RESET_NORMAL = 0,
37 37 ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
38 ENA_REGS_RESET_KEEP_ALIVE_TO = 1, 38 ENA_REGS_RESET_ADMIN_TO = 2,
39 39 ENA_REGS_RESET_MISS_TX_CMPL = 3,
40 ENA_REGS_RESET_ADMIN_TO = 2, 40 ENA_REGS_RESET_INV_RX_REQ_ID = 4,
41 41 ENA_REGS_RESET_INV_TX_REQ_ID = 5,
42 ENA_REGS_RESET_MISS_TX_CMPL = 3, 42 ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
43 43 ENA_REGS_RESET_INIT_ERR = 7,
44 ENA_REGS_RESET_INV_RX_REQ_ID = 4, 44 ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
45 45 ENA_REGS_RESET_OS_TRIGGER = 9,
46 ENA_REGS_RESET_INV_TX_REQ_ID = 5, 46 ENA_REGS_RESET_OS_NETDEV_WD = 10,
47 47 ENA_REGS_RESET_SHUTDOWN = 11,
48 ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, 48 ENA_REGS_RESET_USER_TRIGGER = 12,
49 49 ENA_REGS_RESET_GENERIC = 13,
50 ENA_REGS_RESET_INIT_ERR = 7, 50 ENA_REGS_RESET_MISS_INTERRUPT = 14,
51
52 ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
53
54 ENA_REGS_RESET_OS_TRIGGER = 9,
55
56 ENA_REGS_RESET_OS_NETDEV_WD = 10,
57
58 ENA_REGS_RESET_SHUTDOWN = 11,
59
60 ENA_REGS_RESET_USER_TRIGGER = 12,
61
62 ENA_REGS_RESET_GENERIC = 13,
63
64 ENA_REGS_RESET_MISS_INTERRUPT = 14,
65}; 51};
66 52
67/* ena_registers offsets */ 53/* ena_registers offsets */
68#define ENA_REGS_VERSION_OFF 0x0 54
69#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 55/* 0 base */
70#define ENA_REGS_CAPS_OFF 0x8 56#define ENA_REGS_VERSION_OFF 0x0
71#define ENA_REGS_CAPS_EXT_OFF 0xc 57#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
72#define ENA_REGS_AQ_BASE_LO_OFF 0x10 58#define ENA_REGS_CAPS_OFF 0x8
73#define ENA_REGS_AQ_BASE_HI_OFF 0x14 59#define ENA_REGS_CAPS_EXT_OFF 0xc
74#define ENA_REGS_AQ_CAPS_OFF 0x18 60#define ENA_REGS_AQ_BASE_LO_OFF 0x10
75#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 61#define ENA_REGS_AQ_BASE_HI_OFF 0x14
76#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 62#define ENA_REGS_AQ_CAPS_OFF 0x18
77#define ENA_REGS_ACQ_CAPS_OFF 0x28 63#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
78#define ENA_REGS_AQ_DB_OFF 0x2c 64#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
79#define ENA_REGS_ACQ_TAIL_OFF 0x30 65#define ENA_REGS_ACQ_CAPS_OFF 0x28
80#define ENA_REGS_AENQ_CAPS_OFF 0x34 66#define ENA_REGS_AQ_DB_OFF 0x2c
81#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 67#define ENA_REGS_ACQ_TAIL_OFF 0x30
82#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c 68#define ENA_REGS_AENQ_CAPS_OFF 0x34
83#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 69#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
84#define ENA_REGS_AENQ_TAIL_OFF 0x44 70#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
85#define ENA_REGS_INTR_MASK_OFF 0x4c 71#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
86#define ENA_REGS_DEV_CTL_OFF 0x54 72#define ENA_REGS_AENQ_TAIL_OFF 0x44
87#define ENA_REGS_DEV_STS_OFF 0x58 73#define ENA_REGS_INTR_MASK_OFF 0x4c
88#define ENA_REGS_MMIO_REG_READ_OFF 0x5c 74#define ENA_REGS_DEV_CTL_OFF 0x54
89#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 75#define ENA_REGS_DEV_STS_OFF 0x58
90#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 76#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
91#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 77#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
78#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
79#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
92 80
93/* version register */ 81/* version register */
94#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff 82#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
95#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 83#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
96#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 84#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
97 85
98/* controller_version register */ 86/* controller_version register */
99#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff 87#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
100#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 88#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
101#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 89#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
102#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 90#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
103#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 91#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
104#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 92#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
105#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 93#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
106 94
107/* caps register */ 95/* caps register */
108#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 96#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
109#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 97#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
110#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e 98#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
111#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 99#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
112#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 100#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
113#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 101#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
114#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 102#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
115 103
116/* aq_caps register */ 104/* aq_caps register */
117#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff 105#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
118#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 106#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
119#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 107#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
120 108
121/* acq_caps register */ 109/* acq_caps register */
122#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff 110#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
123#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 111#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
124#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 112#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
125 113
126/* aenq_caps register */ 114/* aenq_caps register */
127#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff 115#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
128#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 116#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
129#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 117#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
130 118
131/* dev_ctl register */ 119/* dev_ctl register */
132#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 120#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
133#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 121#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
134#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 122#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
135#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 123#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
136#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 124#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
137#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 125#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
138#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 126#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
139#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 127#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
140#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 128#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
141 129
142/* dev_sts register */ 130/* dev_sts register */
143#define ENA_REGS_DEV_STS_READY_MASK 0x1 131#define ENA_REGS_DEV_STS_READY_MASK 0x1
144#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 132#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
145#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 133#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
146#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 134#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
147#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 135#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
148#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 136#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
149#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 137#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
150#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 138#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
151#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 139#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
152#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 140#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
153#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 141#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
154#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 142#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
155#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 143#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
156#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 144#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
157#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 145#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
158 146
159/* mmio_reg_read register */ 147/* mmio_reg_read register */
160#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff 148#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
161#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 149#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
162#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 150#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
163 151
164/* rss_ind_entry_update register */ 152/* rss_ind_entry_update register */
165#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff 153#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
166#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 154#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
167#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 155#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
168 156
169#endif /*_ENA_REGS_H_ */ 157#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d5c15e8bb3de..9e5cf5583c87 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
44 44
45config LANCE 45config LANCE
46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
47 depends on ISA && ISA_DMA_API && !ARM 47 depends on ISA && ISA_DMA_API && !ARM && !PPC32
48 ---help--- 48 ---help---
49 If you have a network (Ethernet) card of this type, say Y here. 49 If you have a network (Ethernet) card of this type, say Y here.
50 Some LinkSys cards are of this type. 50 Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
138 138
139config NI65 139config NI65
140 tristate "NI6510 support" 140 tristate "NI6510 support"
141 depends on ISA && ISA_DMA_API && !ARM 141 depends on ISA && ISA_DMA_API && !ARM && !PPC32
142 ---help--- 142 ---help---
143 If you have a network (Ethernet) card of this type, say Y here. 143 If you have a network (Ethernet) card of this type, say Y here.
144 144
@@ -173,7 +173,7 @@ config SUNLANCE
173 173
174config AMD_XGBE 174config AMD_XGBE
175 tristate "AMD 10GbE Ethernet driver" 175 tristate "AMD 10GbE Ethernet driver"
176 depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA 176 depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
177 depends on X86 || ARM64 || COMPILE_TEST 177 depends on X86 || ARM64 || COMPILE_TEST
178 select BITREVERSE 178 select BITREVERSE
179 select CRC32 179 select CRC32
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 01d132c02ff9..265039c57023 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev)
440/* 440/*
441 * Transmit a packet 441 * Transmit a packet
442 */ 442 */
443static int 443static netdev_tx_t
444am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) 444am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
445{ 445{
446 struct dev_priv *priv = netdev_priv(dev); 446 struct dev_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index c99e3e845ac0..a90080f12e67 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1074,16 +1074,12 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
1074 amd8111e_set_coalesce(dev,TX_INTR_COAL); 1074 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1075 coal_conf->tx_coal_type = MEDIUM_COALESCE; 1075 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1076 } 1076 }
1077 1077 } else if (tx_pkt_size >= 1024) {
1078 } 1078 if (coal_conf->tx_coal_type != HIGH_COALESCE) {
1079 else if(tx_pkt_size >= 1024){ 1079 coal_conf->tx_timeout = 4;
1080 if (tx_pkt_size >= 1024){ 1080 coal_conf->tx_event_count = 8;
1081 if(coal_conf->tx_coal_type != HIGH_COALESCE){ 1081 amd8111e_set_coalesce(dev, TX_INTR_COAL);
1082 coal_conf->tx_timeout = 4; 1082 coal_conf->tx_coal_type = HIGH_COALESCE;
1083 coal_conf->tx_event_count = 8;
1084 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1085 coal_conf->tx_coal_type = HIGH_COALESCE;
1086 }
1087 } 1083 }
1088 } 1084 }
1089 } 1085 }
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index c5b81268c284..d3d44e07afbc 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
339 *init_rec ); 339 *init_rec );
340static int lance_open( struct net_device *dev ); 340static int lance_open( struct net_device *dev );
341static void lance_init_ring( struct net_device *dev ); 341static void lance_init_ring( struct net_device *dev );
342static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); 342static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
343 struct net_device *dev);
343static irqreturn_t lance_interrupt( int irq, void *dev_id ); 344static irqreturn_t lance_interrupt( int irq, void *dev_id );
344static int lance_rx( struct net_device *dev ); 345static int lance_rx( struct net_device *dev );
345static int lance_close( struct net_device *dev ); 346static int lance_close( struct net_device *dev );
@@ -769,7 +770,8 @@ static void lance_tx_timeout (struct net_device *dev)
769 770
770/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ 771/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
771 772
772static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) 773static netdev_tx_t
774lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
773{ 775{
774 struct lance_private *lp = netdev_priv(dev); 776 struct lance_private *lp = netdev_priv(dev);
775 struct lance_ioreg *IO = lp->iobase; 777 struct lance_ioreg *IO = lp->iobase;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 73ca8879ada7..7c1eb304c27e 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -564,17 +564,7 @@ static int au1000_mii_probe(struct net_device *dev)
564 return PTR_ERR(phydev); 564 return PTR_ERR(phydev);
565 } 565 }
566 566
567 /* mask with MAC supported features */ 567 phy_set_max_speed(phydev, SPEED_100);
568 phydev->supported &= (SUPPORTED_10baseT_Half
569 | SUPPORTED_10baseT_Full
570 | SUPPORTED_100baseT_Half
571 | SUPPORTED_100baseT_Full
572 | SUPPORTED_Autoneg
573 /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
574 | SUPPORTED_MII
575 | SUPPORTED_TP);
576
577 phydev->advertising = phydev->supported;
578 568
579 aup->old_link = 0; 569 aup->old_link = 0;
580 aup->old_speed = 0; 570 aup->old_speed = 0;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 116997a8b593..9f23703dd509 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -894,7 +894,7 @@ static void lance_tx_timeout(struct net_device *dev)
894 netif_wake_queue(dev); 894 netif_wake_queue(dev);
895} 895}
896 896
897static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) 897static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
898{ 898{
899 struct lance_private *lp = netdev_priv(dev); 899 struct lance_private *lp = netdev_priv(dev);
900 volatile struct lance_regs *ll = lp->ll; 900 volatile struct lance_regs *ll = lp->ll;
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
1031 int i, ret; 1031 int i, ret;
1032 unsigned long esar_base; 1032 unsigned long esar_base;
1033 unsigned char *esar; 1033 unsigned char *esar;
1034 const char *desc;
1034 1035
1035 if (dec_lance_debug && version_printed++ == 0) 1036 if (dec_lance_debug && version_printed++ == 0)
1036 printk(version); 1037 printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
1216 */ 1217 */
1217 switch (type) { 1218 switch (type) {
1218 case ASIC_LANCE: 1219 case ASIC_LANCE:
1219 printk("%s: IOASIC onboard LANCE", name); 1220 desc = "IOASIC onboard LANCE";
1220 break; 1221 break;
1221 case PMAD_LANCE: 1222 case PMAD_LANCE:
1222 printk("%s: PMAD-AA", name); 1223 desc = "PMAD-AA";
1223 break; 1224 break;
1224 case PMAX_LANCE: 1225 case PMAX_LANCE:
1225 printk("%s: PMAX onboard LANCE", name); 1226 desc = "PMAX onboard LANCE";
1226 break; 1227 break;
1227 } 1228 }
1228 for (i = 0; i < 6; i++) 1229 for (i = 0; i < 6; i++)
1229 dev->dev_addr[i] = esar[i * 4]; 1230 dev->dev_addr[i] = esar[i * 4];
1230 1231
1231 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); 1232 printk("%s: %s, addr = %pM, irq = %d\n",
1233 name, desc, dev->dev_addr, dev->irq);
1232 1234
1233 dev->netdev_ops = &lance_netdev_ops; 1235 dev->netdev_ops = &lance_netdev_ops;
1234 dev->watchdog_timeo = 5*HZ; 1236 dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 12a6a93d221b..b56d84c7df46 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -551,13 +551,13 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
551 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 551 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
552 dev->ml_priv = lp; 552 dev->ml_priv = lp;
553 lp->name = chipname; 553 lp->name = chipname;
554 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, 554 lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
555 GFP_DMA | GFP_KERNEL); 555 GFP_DMA | GFP_KERNEL);
556 if (!lp->rx_buffs) 556 if (!lp->rx_buffs)
557 goto out_lp; 557 goto out_lp;
558 if (lance_need_isa_bounce_buffers) { 558 if (lance_need_isa_bounce_buffers) {
559 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, 559 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
560 GFP_DMA | GFP_KERNEL); 560 GFP_DMA | GFP_KERNEL);
561 if (!lp->tx_bounce_buffs) 561 if (!lp->tx_bounce_buffs)
562 goto out_rx; 562 goto out_rx;
563 } else 563 } else
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index e248d1ab3e47..8931ce6bab7b 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -435,10 +435,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
435 } 435 }
436 if(cards[i].vendor_id) { 436 if(cards[i].vendor_id) {
437 for(j=0;j<3;j++) 437 for(j=0;j<3;j++)
438 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) { 438 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
439 release_region(ioaddr, cards[i].total_size); 439 release_region(ioaddr, cards[i].total_size);
440 continue;
441 }
442 } 440 }
443 break; 441 break;
444 } 442 }
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index be198cc0b10c..f5ad12c10934 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
2036 } 2036 }
2037 2037
2038 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), 2038 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
2039 GFP_ATOMIC); 2039 GFP_KERNEL);
2040 if (!lp->tx_dma_addr) 2040 if (!lp->tx_dma_addr)
2041 return -ENOMEM; 2041 return -ENOMEM;
2042 2042
2043 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), 2043 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
2044 GFP_ATOMIC); 2044 GFP_KERNEL);
2045 if (!lp->rx_dma_addr) 2045 if (!lp->rx_dma_addr)
2046 return -ENOMEM; 2046 return -ENOMEM;
2047 2047
2048 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), 2048 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
2049 GFP_ATOMIC); 2049 GFP_KERNEL);
2050 if (!lp->tx_skbuff) 2050 if (!lp->tx_skbuff)
2051 return -ENOMEM; 2051 return -ENOMEM;
2052 2052
2053 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), 2053 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
2054 GFP_ATOMIC); 2054 GFP_KERNEL);
2055 if (!lp->rx_skbuff) 2055 if (!lp->rx_skbuff)
2056 return -ENOMEM; 2056 return -ENOMEM;
2057 2057
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 77b1db267730..da7e3d4f4166 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -236,7 +236,8 @@ struct lance_private {
236static int lance_probe( struct net_device *dev); 236static int lance_probe( struct net_device *dev);
237static int lance_open( struct net_device *dev ); 237static int lance_open( struct net_device *dev );
238static void lance_init_ring( struct net_device *dev ); 238static void lance_init_ring( struct net_device *dev );
239static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); 239static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
240 struct net_device *dev);
240static irqreturn_t lance_interrupt( int irq, void *dev_id); 241static irqreturn_t lance_interrupt( int irq, void *dev_id);
241static int lance_rx( struct net_device *dev ); 242static int lance_rx( struct net_device *dev );
242static int lance_close( struct net_device *dev ); 243static int lance_close( struct net_device *dev );
@@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev )
511} 512}
512 513
513 514
514static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) 515static netdev_tx_t
516lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
515{ 517{
516 struct lance_private *lp = netdev_priv(dev); 518 struct lance_private *lp = netdev_priv(dev);
517 int entry, len; 519 int entry, len;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index cdd7a611479b..9d4899826823 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev)
1106 netif_wake_queue(dev); 1106 netif_wake_queue(dev);
1107} 1107}
1108 1108
1109static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) 1109static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1110{ 1110{
1111 struct lance_private *lp = netdev_priv(dev); 1111 struct lance_private *lp = netdev_priv(dev);
1112 int entry, skblen, len; 1112 int entry, skblen, len;
@@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
1419 1419
1420 prop = of_get_property(nd, "tpe-link-test?", NULL); 1420 prop = of_get_property(nd, "tpe-link-test?", NULL);
1421 if (!prop) 1421 if (!prop)
1422 goto no_link_test; 1422 goto node_put;
1423 1423
1424 if (strcmp(prop, "true")) { 1424 if (strcmp(prop, "true")) {
1425 printk(KERN_NOTICE "SunLance: warning: overriding option " 1425 printk(KERN_NOTICE "SunLance: warning: overriding option "
@@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
1428 "to ecd@skynet.be\n"); 1428 "to ecd@skynet.be\n");
1429 auxio_set_lte(AUXIO_LTE_ON); 1429 auxio_set_lte(AUXIO_LTE_ON);
1430 } 1430 }
1431node_put:
1432 of_node_put(nd);
1431no_link_test: 1433no_link_test:
1432 lp->auto_select = 1; 1434 lp->auto_select = 1;
1433 lp->tpe = 0; 1435 lp->tpe = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index cc1e4f820e64..533094233659 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
289 struct page *pages = NULL; 289 struct page *pages = NULL;
290 dma_addr_t pages_dma; 290 dma_addr_t pages_dma;
291 gfp_t gfp; 291 gfp_t gfp;
292 int order, ret; 292 int order;
293 293
294again: 294again:
295 order = alloc_order; 295 order = alloc_order;
@@ -316,10 +316,9 @@ again:
316 /* Map the pages */ 316 /* Map the pages */
317 pages_dma = dma_map_page(pdata->dev, pages, 0, 317 pages_dma = dma_map_page(pdata->dev, pages, 0,
318 PAGE_SIZE << order, DMA_FROM_DEVICE); 318 PAGE_SIZE << order, DMA_FROM_DEVICE);
319 ret = dma_mapping_error(pdata->dev, pages_dma); 319 if (dma_mapping_error(pdata->dev, pages_dma)) {
320 if (ret) {
321 put_page(pages); 320 put_page(pages);
322 return ret; 321 return -ENOMEM;
323 } 322 }
324 323
325 pa->pages = pages; 324 pa->pages = pages;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index e107e180e2c8..1e929a1e4ca7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -119,6 +119,7 @@
119#include <linux/clk.h> 119#include <linux/clk.h>
120#include <linux/bitrev.h> 120#include <linux/bitrev.h>
121#include <linux/crc32.h> 121#include <linux/crc32.h>
122#include <linux/crc32poly.h>
122 123
123#include "xgbe.h" 124#include "xgbe.h"
124#include "xgbe-common.h" 125#include "xgbe-common.h"
@@ -887,7 +888,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
887 888
888static u32 xgbe_vid_crc32_le(__le16 vid_le) 889static u32 xgbe_vid_crc32_le(__le16 vid_le)
889{ 890{
890 u32 poly = 0xedb88320; /* CRCPOLY_LE */
891 u32 crc = ~0; 891 u32 crc = ~0;
892 u32 temp = 0; 892 u32 temp = 0;
893 unsigned char *data = (unsigned char *)&vid_le; 893 unsigned char *data = (unsigned char *)&vid_le;
@@ -904,7 +904,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le)
904 data_byte >>= 1; 904 data_byte >>= 1;
905 905
906 if (temp) 906 if (temp)
907 crc ^= poly; 907 crc ^= CRC32_POLY_LE;
908 } 908 }
909 909
910 return crc; 910 return crc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7c204f05b418..0cc911f928b1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -119,7 +119,6 @@
119#include <linux/tcp.h> 119#include <linux/tcp.h>
120#include <linux/if_vlan.h> 120#include <linux/if_vlan.h>
121#include <linux/interrupt.h> 121#include <linux/interrupt.h>
122#include <net/busy_poll.h>
123#include <linux/clk.h> 122#include <linux/clk.h>
124#include <linux/if_ether.h> 123#include <linux/if_ether.h>
125#include <linux/net_tstamp.h> 124#include <linux/net_tstamp.h>
@@ -1312,14 +1311,83 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1312 return 0; 1311 return 0;
1313} 1312}
1314 1313
1314static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1315{
1316 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1317
1318 /* Free the ring descriptors and buffers */
1319 desc_if->free_ring_resources(pdata);
1320
1321 /* Free the channel and ring structures */
1322 xgbe_free_channels(pdata);
1323}
1324
1325static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1326{
1327 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1328 struct net_device *netdev = pdata->netdev;
1329 int ret;
1330
1331 if (pdata->new_tx_ring_count) {
1332 pdata->tx_ring_count = pdata->new_tx_ring_count;
1333 pdata->tx_q_count = pdata->tx_ring_count;
1334 pdata->new_tx_ring_count = 0;
1335 }
1336
1337 if (pdata->new_rx_ring_count) {
1338 pdata->rx_ring_count = pdata->new_rx_ring_count;
1339 pdata->new_rx_ring_count = 0;
1340 }
1341
1342 /* Calculate the Rx buffer size before allocating rings */
1343 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1344
1345 /* Allocate the channel and ring structures */
1346 ret = xgbe_alloc_channels(pdata);
1347 if (ret)
1348 return ret;
1349
1350 /* Allocate the ring descriptors and buffers */
1351 ret = desc_if->alloc_ring_resources(pdata);
1352 if (ret)
1353 goto err_channels;
1354
1355 /* Initialize the service and Tx timers */
1356 xgbe_init_timers(pdata);
1357
1358 return 0;
1359
1360err_channels:
1361 xgbe_free_memory(pdata);
1362
1363 return ret;
1364}
1365
1315static int xgbe_start(struct xgbe_prv_data *pdata) 1366static int xgbe_start(struct xgbe_prv_data *pdata)
1316{ 1367{
1317 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1368 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1318 struct xgbe_phy_if *phy_if = &pdata->phy_if; 1369 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1319 struct net_device *netdev = pdata->netdev; 1370 struct net_device *netdev = pdata->netdev;
1371 unsigned int i;
1320 int ret; 1372 int ret;
1321 1373
1322 DBGPR("-->xgbe_start\n"); 1374 /* Set the number of queues */
1375 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1376 if (ret) {
1377 netdev_err(netdev, "error setting real tx queue count\n");
1378 return ret;
1379 }
1380
1381 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1382 if (ret) {
1383 netdev_err(netdev, "error setting real rx queue count\n");
1384 return ret;
1385 }
1386
1387 /* Set RSS lookup table data for programming */
1388 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1389 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1390 i % pdata->rx_ring_count);
1323 1391
1324 ret = hw_if->init(pdata); 1392 ret = hw_if->init(pdata);
1325 if (ret) 1393 if (ret)
@@ -1347,8 +1415,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
1347 1415
1348 clear_bit(XGBE_STOPPED, &pdata->dev_state); 1416 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1349 1417
1350 DBGPR("<--xgbe_start\n");
1351
1352 return 0; 1418 return 0;
1353 1419
1354err_irqs: 1420err_irqs:
@@ -1426,10 +1492,22 @@ static void xgbe_stopdev(struct work_struct *work)
1426 netdev_alert(pdata->netdev, "device stopped\n"); 1492 netdev_alert(pdata->netdev, "device stopped\n");
1427} 1493}
1428 1494
1429static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1495void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1430{ 1496{
1431 DBGPR("-->xgbe_restart_dev\n"); 1497 /* If not running, "restart" will happen on open */
1498 if (!netif_running(pdata->netdev))
1499 return;
1500
1501 xgbe_stop(pdata);
1432 1502
1503 xgbe_free_memory(pdata);
1504 xgbe_alloc_memory(pdata);
1505
1506 xgbe_start(pdata);
1507}
1508
1509void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1510{
1433 /* If not running, "restart" will happen on open */ 1511 /* If not running, "restart" will happen on open */
1434 if (!netif_running(pdata->netdev)) 1512 if (!netif_running(pdata->netdev))
1435 return; 1513 return;
@@ -1440,8 +1518,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1440 xgbe_free_rx_data(pdata); 1518 xgbe_free_rx_data(pdata);
1441 1519
1442 xgbe_start(pdata); 1520 xgbe_start(pdata);
1443
1444 DBGPR("<--xgbe_restart_dev\n");
1445} 1521}
1446 1522
1447static void xgbe_restart(struct work_struct *work) 1523static void xgbe_restart(struct work_struct *work)
@@ -1827,11 +1903,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1827static int xgbe_open(struct net_device *netdev) 1903static int xgbe_open(struct net_device *netdev)
1828{ 1904{
1829 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1905 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1830 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1831 int ret; 1906 int ret;
1832 1907
1833 DBGPR("-->xgbe_open\n");
1834
1835 /* Create the various names based on netdev name */ 1908 /* Create the various names based on netdev name */
1836 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", 1909 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1837 netdev_name(netdev)); 1910 netdev_name(netdev));
@@ -1876,43 +1949,25 @@ static int xgbe_open(struct net_device *netdev)
1876 goto err_sysclk; 1949 goto err_sysclk;
1877 } 1950 }
1878 1951
1879 /* Calculate the Rx buffer size before allocating rings */
1880 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1881 if (ret < 0)
1882 goto err_ptpclk;
1883 pdata->rx_buf_size = ret;
1884
1885 /* Allocate the channel and ring structures */
1886 ret = xgbe_alloc_channels(pdata);
1887 if (ret)
1888 goto err_ptpclk;
1889
1890 /* Allocate the ring descriptors and buffers */
1891 ret = desc_if->alloc_ring_resources(pdata);
1892 if (ret)
1893 goto err_channels;
1894
1895 INIT_WORK(&pdata->service_work, xgbe_service); 1952 INIT_WORK(&pdata->service_work, xgbe_service);
1896 INIT_WORK(&pdata->restart_work, xgbe_restart); 1953 INIT_WORK(&pdata->restart_work, xgbe_restart);
1897 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); 1954 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1898 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1955 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1899 xgbe_init_timers(pdata); 1956
1957 ret = xgbe_alloc_memory(pdata);
1958 if (ret)
1959 goto err_ptpclk;
1900 1960
1901 ret = xgbe_start(pdata); 1961 ret = xgbe_start(pdata);
1902 if (ret) 1962 if (ret)
1903 goto err_rings; 1963 goto err_mem;
1904 1964
1905 clear_bit(XGBE_DOWN, &pdata->dev_state); 1965 clear_bit(XGBE_DOWN, &pdata->dev_state);
1906 1966
1907 DBGPR("<--xgbe_open\n");
1908
1909 return 0; 1967 return 0;
1910 1968
1911err_rings: 1969err_mem:
1912 desc_if->free_ring_resources(pdata); 1970 xgbe_free_memory(pdata);
1913
1914err_channels:
1915 xgbe_free_channels(pdata);
1916 1971
1917err_ptpclk: 1972err_ptpclk:
1918 clk_disable_unprepare(pdata->ptpclk); 1973 clk_disable_unprepare(pdata->ptpclk);
@@ -1932,18 +1987,11 @@ err_dev_wq:
1932static int xgbe_close(struct net_device *netdev) 1987static int xgbe_close(struct net_device *netdev)
1933{ 1988{
1934 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1989 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1935 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1936
1937 DBGPR("-->xgbe_close\n");
1938 1990
1939 /* Stop the device */ 1991 /* Stop the device */
1940 xgbe_stop(pdata); 1992 xgbe_stop(pdata);
1941 1993
1942 /* Free the ring descriptors and buffers */ 1994 xgbe_free_memory(pdata);
1943 desc_if->free_ring_resources(pdata);
1944
1945 /* Free the channel and ring structures */
1946 xgbe_free_channels(pdata);
1947 1995
1948 /* Disable the clocks */ 1996 /* Disable the clocks */
1949 clk_disable_unprepare(pdata->ptpclk); 1997 clk_disable_unprepare(pdata->ptpclk);
@@ -1957,12 +2005,10 @@ static int xgbe_close(struct net_device *netdev)
1957 2005
1958 set_bit(XGBE_DOWN, &pdata->dev_state); 2006 set_bit(XGBE_DOWN, &pdata->dev_state);
1959 2007
1960 DBGPR("<--xgbe_close\n");
1961
1962 return 0; 2008 return 0;
1963} 2009}
1964 2010
1965static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) 2011static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1966{ 2012{
1967 struct xgbe_prv_data *pdata = netdev_priv(netdev); 2013 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1968 struct xgbe_hw_if *hw_if = &pdata->hw_if; 2014 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -1971,7 +2017,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1971 struct xgbe_ring *ring; 2017 struct xgbe_ring *ring;
1972 struct xgbe_packet_data *packet; 2018 struct xgbe_packet_data *packet;
1973 struct netdev_queue *txq; 2019 struct netdev_queue *txq;
1974 int ret; 2020 netdev_tx_t ret;
1975 2021
1976 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); 2022 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1977 2023
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index ff397bb25042..a880f10e3e70 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -626,6 +626,217 @@ static int xgbe_get_ts_info(struct net_device *netdev,
626 return 0; 626 return 0;
627} 627}
628 628
629static int xgbe_get_module_info(struct net_device *netdev,
630 struct ethtool_modinfo *modinfo)
631{
632 struct xgbe_prv_data *pdata = netdev_priv(netdev);
633
634 return pdata->phy_if.module_info(pdata, modinfo);
635}
636
637static int xgbe_get_module_eeprom(struct net_device *netdev,
638 struct ethtool_eeprom *eeprom, u8 *data)
639{
640 struct xgbe_prv_data *pdata = netdev_priv(netdev);
641
642 return pdata->phy_if.module_eeprom(pdata, eeprom, data);
643}
644
645static void xgbe_get_ringparam(struct net_device *netdev,
646 struct ethtool_ringparam *ringparam)
647{
648 struct xgbe_prv_data *pdata = netdev_priv(netdev);
649
650 ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
651 ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
652 ringparam->rx_pending = pdata->rx_desc_count;
653 ringparam->tx_pending = pdata->tx_desc_count;
654}
655
656static int xgbe_set_ringparam(struct net_device *netdev,
657 struct ethtool_ringparam *ringparam)
658{
659 struct xgbe_prv_data *pdata = netdev_priv(netdev);
660 unsigned int rx, tx;
661
662 if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
663 netdev_err(netdev, "unsupported ring parameter\n");
664 return -EINVAL;
665 }
666
667 if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
668 (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
669 netdev_err(netdev,
670 "rx ring parameter must be between %u and %u\n",
671 XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
672 return -EINVAL;
673 }
674
675 if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
676 (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
677 netdev_err(netdev,
678 "tx ring parameter must be between %u and %u\n",
679 XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
680 return -EINVAL;
681 }
682
683 rx = __rounddown_pow_of_two(ringparam->rx_pending);
684 if (rx != ringparam->rx_pending)
685 netdev_notice(netdev,
686 "rx ring parameter rounded to power of two: %u\n",
687 rx);
688
689 tx = __rounddown_pow_of_two(ringparam->tx_pending);
690 if (tx != ringparam->tx_pending)
691 netdev_notice(netdev,
692 "tx ring parameter rounded to power of two: %u\n",
693 tx);
694
695 if ((rx == pdata->rx_desc_count) &&
696 (tx == pdata->tx_desc_count))
697 goto out;
698
699 pdata->rx_desc_count = rx;
700 pdata->tx_desc_count = tx;
701
702 xgbe_restart_dev(pdata);
703
704out:
705 return 0;
706}
707
708static void xgbe_get_channels(struct net_device *netdev,
709 struct ethtool_channels *channels)
710{
711 struct xgbe_prv_data *pdata = netdev_priv(netdev);
712 unsigned int rx, tx, combined;
713
714 /* Calculate maximums allowed:
715 * - Take into account the number of available IRQs
716 * - Do not take into account the number of online CPUs so that
717 * the user can over-subscribe if desired
718 * - Tx is additionally limited by the number of hardware queues
719 */
720 rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
721 rx = min(rx, pdata->channel_irq_count);
722 tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
723 tx = min(tx, pdata->channel_irq_count);
724 tx = min(tx, pdata->tx_max_q_count);
725
726 combined = min(rx, tx);
727
728 channels->max_combined = combined;
729 channels->max_rx = rx ? rx - 1 : 0;
730 channels->max_tx = tx ? tx - 1 : 0;
731
732 /* Get current settings based on device state */
733 rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
734 tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
735
736 combined = min(rx, tx);
737 rx -= combined;
738 tx -= combined;
739
740 channels->combined_count = combined;
741 channels->rx_count = rx;
742 channels->tx_count = tx;
743}
744
745static void xgbe_print_set_channels_input(struct net_device *netdev,
746 struct ethtool_channels *channels)
747{
748 netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
749 channels->combined_count, channels->rx_count,
750 channels->tx_count);
751}
752
753static int xgbe_set_channels(struct net_device *netdev,
754 struct ethtool_channels *channels)
755{
756 struct xgbe_prv_data *pdata = netdev_priv(netdev);
757 unsigned int rx, rx_curr, tx, tx_curr, combined;
758
759 /* Calculate maximums allowed:
760 * - Take into account the number of available IRQs
761 * - Do not take into account the number of online CPUs so that
762 * the user can over-subscribe if desired
763 * - Tx is additionally limited by the number of hardware queues
764 */
765 rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
766 rx = min(rx, pdata->channel_irq_count);
767 tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
768 tx = min(tx, pdata->tx_max_q_count);
769 tx = min(tx, pdata->channel_irq_count);
770
771 combined = min(rx, tx);
772
773 /* Should not be setting other count */
774 if (channels->other_count) {
775 netdev_err(netdev,
776 "other channel count must be zero\n");
777 return -EINVAL;
778 }
779
780 /* Require at least one Combined (Rx and Tx) channel */
781 if (!channels->combined_count) {
782 netdev_err(netdev,
783 "at least one combined Rx/Tx channel is required\n");
784 xgbe_print_set_channels_input(netdev, channels);
785 return -EINVAL;
786 }
787
788 /* Check combined channels */
789 if (channels->combined_count > combined) {
790 netdev_err(netdev,
791 "combined channel count cannot exceed %u\n",
792 combined);
793 xgbe_print_set_channels_input(netdev, channels);
794 return -EINVAL;
795 }
796
797 /* Can have some Rx-only or Tx-only channels, but not both */
798 if (channels->rx_count && channels->tx_count) {
799 netdev_err(netdev,
800 "cannot specify both Rx-only and Tx-only channels\n");
801 xgbe_print_set_channels_input(netdev, channels);
802 return -EINVAL;
803 }
804
805 /* Check that we don't exceed the maximum number of channels */
806 if ((channels->combined_count + channels->rx_count) > rx) {
807 netdev_err(netdev,
808 "total Rx channels (%u) requested exceeds maximum available (%u)\n",
809 channels->combined_count + channels->rx_count, rx);
810 xgbe_print_set_channels_input(netdev, channels);
811 return -EINVAL;
812 }
813
814 if ((channels->combined_count + channels->tx_count) > tx) {
815 netdev_err(netdev,
816 "total Tx channels (%u) requested exceeds maximum available (%u)\n",
817 channels->combined_count + channels->tx_count, tx);
818 xgbe_print_set_channels_input(netdev, channels);
819 return -EINVAL;
820 }
821
822 rx = channels->combined_count + channels->rx_count;
823 tx = channels->combined_count + channels->tx_count;
824
825 rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
826 tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
827
828 if ((rx == rx_curr) && (tx == tx_curr))
829 goto out;
830
831 pdata->new_rx_ring_count = rx;
832 pdata->new_tx_ring_count = tx;
833
834 xgbe_full_restart_dev(pdata);
835
836out:
837 return 0;
838}
839
629static const struct ethtool_ops xgbe_ethtool_ops = { 840static const struct ethtool_ops xgbe_ethtool_ops = {
630 .get_drvinfo = xgbe_get_drvinfo, 841 .get_drvinfo = xgbe_get_drvinfo,
631 .get_msglevel = xgbe_get_msglevel, 842 .get_msglevel = xgbe_get_msglevel,
@@ -646,6 +857,12 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
646 .get_ts_info = xgbe_get_ts_info, 857 .get_ts_info = xgbe_get_ts_info,
647 .get_link_ksettings = xgbe_get_link_ksettings, 858 .get_link_ksettings = xgbe_get_link_ksettings,
648 .set_link_ksettings = xgbe_set_link_ksettings, 859 .set_link_ksettings = xgbe_set_link_ksettings,
860 .get_module_info = xgbe_get_module_info,
861 .get_module_eeprom = xgbe_get_module_eeprom,
862 .get_ringparam = xgbe_get_ringparam,
863 .set_ringparam = xgbe_set_ringparam,
864 .get_channels = xgbe_get_channels,
865 .set_channels = xgbe_set_channels,
649}; 866};
650 867
651const struct ethtool_ops *xgbe_get_ethtool_ops(void) 868const struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 441d0973957b..b41f23679a08 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
265{ 265{
266 struct net_device *netdev = pdata->netdev; 266 struct net_device *netdev = pdata->netdev;
267 struct device *dev = pdata->dev; 267 struct device *dev = pdata->dev;
268 unsigned int i;
269 int ret; 268 int ret;
270 269
271 netdev->irq = pdata->dev_irq; 270 netdev->irq = pdata->dev_irq;
@@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
324 pdata->tx_ring_count, pdata->rx_ring_count); 323 pdata->tx_ring_count, pdata->rx_ring_count);
325 } 324 }
326 325
327 /* Set the number of queues */ 326 /* Initialize RSS hash key */
328 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
329 if (ret) {
330 dev_err(dev, "error setting real tx queue count\n");
331 return ret;
332 }
333
334 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
335 if (ret) {
336 dev_err(dev, "error setting real rx queue count\n");
337 return ret;
338 }
339
340 /* Initialize RSS hash key and lookup table */
341 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); 327 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
342 328
343 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
344 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
345 i % pdata->rx_ring_count);
346
347 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 329 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
348 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 330 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 331 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 1b45cd73a258..8a3a60bb2688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -126,6 +126,24 @@
126#include "xgbe.h" 126#include "xgbe.h"
127#include "xgbe-common.h" 127#include "xgbe-common.h"
128 128
129static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
130 struct ethtool_eeprom *eeprom, u8 *data)
131{
132 if (!pdata->phy_if.phy_impl.module_eeprom)
133 return -ENXIO;
134
135 return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data);
136}
137
138static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
139 struct ethtool_modinfo *modinfo)
140{
141 if (!pdata->phy_if.phy_impl.module_info)
142 return -ENXIO;
143
144 return pdata->phy_if.phy_impl.module_info(pdata, modinfo);
145}
146
129static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) 147static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
130{ 148{
131 int reg; 149 int reg;
@@ -198,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
198 xgbe_an37_clear_interrupts(pdata); 216 xgbe_an37_clear_interrupts(pdata);
199} 217}
200 218
201static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
202{
203 unsigned int reg;
204
205 reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
206
207 reg |= XGBE_KR_TRAINING_ENABLE;
208 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
209}
210
211static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
212{
213 unsigned int reg;
214
215 reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
216
217 reg &= ~XGBE_KR_TRAINING_ENABLE;
218 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
219}
220
221static void xgbe_kr_mode(struct xgbe_prv_data *pdata) 219static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
222{ 220{
223 /* Enable KR training */
224 xgbe_an73_enable_kr_training(pdata);
225
226 /* Set MAC to 10G speed */ 221 /* Set MAC to 10G speed */
227 pdata->hw_if.set_speed(pdata, SPEED_10000); 222 pdata->hw_if.set_speed(pdata, SPEED_10000);
228 223
@@ -232,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
232 227
233static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) 228static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
234{ 229{
235 /* Disable KR training */
236 xgbe_an73_disable_kr_training(pdata);
237
238 /* Set MAC to 2.5G speed */ 230 /* Set MAC to 2.5G speed */
239 pdata->hw_if.set_speed(pdata, SPEED_2500); 231 pdata->hw_if.set_speed(pdata, SPEED_2500);
240 232
@@ -244,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
244 236
245static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) 237static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
246{ 238{
247 /* Disable KR training */
248 xgbe_an73_disable_kr_training(pdata);
249
250 /* Set MAC to 1G speed */ 239 /* Set MAC to 1G speed */
251 pdata->hw_if.set_speed(pdata, SPEED_1000); 240 pdata->hw_if.set_speed(pdata, SPEED_1000);
252 241
@@ -260,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
260 if (pdata->kr_redrv) 249 if (pdata->kr_redrv)
261 return xgbe_kr_mode(pdata); 250 return xgbe_kr_mode(pdata);
262 251
263 /* Disable KR training */
264 xgbe_an73_disable_kr_training(pdata);
265
266 /* Set MAC to 10G speed */ 252 /* Set MAC to 10G speed */
267 pdata->hw_if.set_speed(pdata, SPEED_10000); 253 pdata->hw_if.set_speed(pdata, SPEED_10000);
268 254
@@ -272,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
272 258
273static void xgbe_x_mode(struct xgbe_prv_data *pdata) 259static void xgbe_x_mode(struct xgbe_prv_data *pdata)
274{ 260{
275 /* Disable KR training */
276 xgbe_an73_disable_kr_training(pdata);
277
278 /* Set MAC to 1G speed */ 261 /* Set MAC to 1G speed */
279 pdata->hw_if.set_speed(pdata, SPEED_1000); 262 pdata->hw_if.set_speed(pdata, SPEED_1000);
280 263
@@ -284,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata)
284 267
285static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) 268static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
286{ 269{
287 /* Disable KR training */
288 xgbe_an73_disable_kr_training(pdata);
289
290 /* Set MAC to 1G speed */ 270 /* Set MAC to 1G speed */
291 pdata->hw_if.set_speed(pdata, SPEED_1000); 271 pdata->hw_if.set_speed(pdata, SPEED_1000);
292 272
@@ -296,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
296 276
297static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) 277static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
298{ 278{
299 /* Disable KR training */
300 xgbe_an73_disable_kr_training(pdata);
301
302 /* Set MAC to 1G speed */ 279 /* Set MAC to 1G speed */
303 pdata->hw_if.set_speed(pdata, SPEED_1000); 280 pdata->hw_if.set_speed(pdata, SPEED_1000);
304 281
@@ -354,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
354 xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); 331 xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
355} 332}
356 333
357static void xgbe_set_mode(struct xgbe_prv_data *pdata, 334static bool xgbe_set_mode(struct xgbe_prv_data *pdata,
358 enum xgbe_mode mode) 335 enum xgbe_mode mode)
359{ 336{
360 if (mode == xgbe_cur_mode(pdata)) 337 if (mode == xgbe_cur_mode(pdata))
361 return; 338 return false;
362 339
363 xgbe_change_mode(pdata, mode); 340 xgbe_change_mode(pdata, mode);
341
342 return true;
364} 343}
365 344
366static bool xgbe_use_mode(struct xgbe_prv_data *pdata, 345static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
@@ -407,6 +386,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
407{ 386{
408 unsigned int reg; 387 unsigned int reg;
409 388
389 /* Disable KR training for now */
390 reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
391 reg &= ~XGBE_KR_TRAINING_ENABLE;
392 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
393
394 /* Update AN settings */
410 reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); 395 reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
411 reg &= ~MDIO_AN_CTRL1_ENABLE; 396 reg &= ~MDIO_AN_CTRL1_ENABLE;
412 397
@@ -504,21 +489,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
504 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); 489 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
505 490
506 /* Start KR training */ 491 /* Start KR training */
507 reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 492 if (pdata->phy_if.phy_impl.kr_training_pre)
508 if (reg & XGBE_KR_TRAINING_ENABLE) { 493 pdata->phy_if.phy_impl.kr_training_pre(pdata);
509 if (pdata->phy_if.phy_impl.kr_training_pre)
510 pdata->phy_if.phy_impl.kr_training_pre(pdata);
511 494
512 reg |= XGBE_KR_TRAINING_START; 495 reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
513 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, 496 reg |= XGBE_KR_TRAINING_ENABLE;
514 reg); 497 reg |= XGBE_KR_TRAINING_START;
498 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
515 499
516 netif_dbg(pdata, link, pdata->netdev, 500 netif_dbg(pdata, link, pdata->netdev,
517 "KR training initiated\n"); 501 "KR training initiated\n");
518 502
519 if (pdata->phy_if.phy_impl.kr_training_post) 503 if (pdata->phy_if.phy_impl.kr_training_post)
520 pdata->phy_if.phy_impl.kr_training_post(pdata); 504 pdata->phy_if.phy_impl.kr_training_post(pdata);
521 }
522 505
523 return XGBE_AN_PAGE_RECEIVED; 506 return XGBE_AN_PAGE_RECEIVED;
524} 507}
@@ -1128,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
1128 1111
1129 if (pdata->tx_pause != pdata->phy.tx_pause) { 1112 if (pdata->tx_pause != pdata->phy.tx_pause) {
1130 new_state = 1; 1113 new_state = 1;
1131 pdata->hw_if.config_tx_flow_control(pdata);
1132 pdata->tx_pause = pdata->phy.tx_pause; 1114 pdata->tx_pause = pdata->phy.tx_pause;
1115 pdata->hw_if.config_tx_flow_control(pdata);
1133 } 1116 }
1134 1117
1135 if (pdata->rx_pause != pdata->phy.rx_pause) { 1118 if (pdata->rx_pause != pdata->phy.rx_pause) {
1136 new_state = 1; 1119 new_state = 1;
1137 pdata->hw_if.config_rx_flow_control(pdata);
1138 pdata->rx_pause = pdata->phy.rx_pause; 1120 pdata->rx_pause = pdata->phy.rx_pause;
1121 pdata->hw_if.config_rx_flow_control(pdata);
1139 } 1122 }
1140 1123
1141 /* Speed support */ 1124 /* Speed support */
@@ -1197,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
1197 return 0; 1180 return 0;
1198} 1181}
1199 1182
1200static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) 1183static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
1201{ 1184{
1202 int ret; 1185 int ret;
1203 1186
1187 mutex_lock(&pdata->an_mutex);
1188
1204 set_bit(XGBE_LINK_INIT, &pdata->dev_state); 1189 set_bit(XGBE_LINK_INIT, &pdata->dev_state);
1205 pdata->link_check = jiffies; 1190 pdata->link_check = jiffies;
1206 1191
1207 ret = pdata->phy_if.phy_impl.an_config(pdata); 1192 ret = pdata->phy_if.phy_impl.an_config(pdata);
1208 if (ret) 1193 if (ret)
1209 return ret; 1194 goto out;
1210 1195
1211 if (pdata->phy.autoneg != AUTONEG_ENABLE) { 1196 if (pdata->phy.autoneg != AUTONEG_ENABLE) {
1212 ret = xgbe_phy_config_fixed(pdata); 1197 ret = xgbe_phy_config_fixed(pdata);
1213 if (ret || !pdata->kr_redrv) 1198 if (ret || !pdata->kr_redrv)
1214 return ret; 1199 goto out;
1215 1200
1216 netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n"); 1201 netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
1217 } else { 1202 } else {
@@ -1221,24 +1206,27 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
1221 /* Disable auto-negotiation interrupt */ 1206 /* Disable auto-negotiation interrupt */
1222 disable_irq(pdata->an_irq); 1207 disable_irq(pdata->an_irq);
1223 1208
1224 /* Start auto-negotiation in a supported mode */ 1209 if (set_mode) {
1225 if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { 1210 /* Start auto-negotiation in a supported mode */
1226 xgbe_set_mode(pdata, XGBE_MODE_KR); 1211 if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
1227 } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { 1212 xgbe_set_mode(pdata, XGBE_MODE_KR);
1228 xgbe_set_mode(pdata, XGBE_MODE_KX_2500); 1213 } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
1229 } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { 1214 xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
1230 xgbe_set_mode(pdata, XGBE_MODE_KX_1000); 1215 } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
1231 } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { 1216 xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
1232 xgbe_set_mode(pdata, XGBE_MODE_SFI); 1217 } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
1233 } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { 1218 xgbe_set_mode(pdata, XGBE_MODE_SFI);
1234 xgbe_set_mode(pdata, XGBE_MODE_X); 1219 } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
1235 } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { 1220 xgbe_set_mode(pdata, XGBE_MODE_X);
1236 xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); 1221 } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
1237 } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { 1222 xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
1238 xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); 1223 } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
1239 } else { 1224 xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
1240 enable_irq(pdata->an_irq); 1225 } else {
1241 return -EINVAL; 1226 enable_irq(pdata->an_irq);
1227 ret = -EINVAL;
1228 goto out;
1229 }
1242 } 1230 }
1243 1231
1244 /* Disable and stop any in progress auto-negotiation */ 1232 /* Disable and stop any in progress auto-negotiation */
@@ -1258,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
1258 xgbe_an_init(pdata); 1246 xgbe_an_init(pdata);
1259 xgbe_an_restart(pdata); 1247 xgbe_an_restart(pdata);
1260 1248
1261 return 0; 1249out:
1262}
1263
1264static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
1265{
1266 int ret;
1267
1268 mutex_lock(&pdata->an_mutex);
1269
1270 ret = __xgbe_phy_config_aneg(pdata);
1271 if (ret) 1250 if (ret)
1272 set_bit(XGBE_LINK_ERR, &pdata->dev_state); 1251 set_bit(XGBE_LINK_ERR, &pdata->dev_state);
1273 else 1252 else
@@ -1278,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
1278 return ret; 1257 return ret;
1279} 1258}
1280 1259
1260static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
1261{
1262 return __xgbe_phy_config_aneg(pdata, true);
1263}
1264
1265static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata)
1266{
1267 return __xgbe_phy_config_aneg(pdata, false);
1268}
1269
1281static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) 1270static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
1282{ 1271{
1283 return (pdata->an_result == XGBE_AN_COMPLETE); 1272 return (pdata->an_result == XGBE_AN_COMPLETE);
@@ -1334,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
1334 1323
1335 pdata->phy.duplex = DUPLEX_FULL; 1324 pdata->phy.duplex = DUPLEX_FULL;
1336 1325
1337 xgbe_set_mode(pdata, mode); 1326 if (xgbe_set_mode(pdata, mode) && pdata->an_again)
1327 xgbe_phy_reconfig_aneg(pdata);
1338} 1328}
1339 1329
1340static void xgbe_phy_status(struct xgbe_prv_data *pdata) 1330static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1639,4 +1629,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
1639 phy_if->phy_valid_speed = xgbe_phy_valid_speed; 1629 phy_if->phy_valid_speed = xgbe_phy_valid_speed;
1640 1630
1641 phy_if->an_isr = xgbe_an_combined_isr; 1631 phy_if->an_isr = xgbe_an_combined_isr;
1632
1633 phy_if->module_info = xgbe_phy_module_info;
1634 phy_if->module_eeprom = xgbe_phy_module_eeprom;
1642} 1635}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 82d1f416ee2a..7b86240ecd5f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -335,16 +335,33 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
335 pdata->awcr = XGBE_DMA_PCI_AWCR; 335 pdata->awcr = XGBE_DMA_PCI_AWCR;
336 pdata->awarcr = XGBE_DMA_PCI_AWARCR; 336 pdata->awarcr = XGBE_DMA_PCI_AWARCR;
337 337
338 /* Read the port property registers */
339 pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
340 pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
341 pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
342 pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
343 pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
344 if (netif_msg_probe(pdata)) {
345 dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
346 dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
347 dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
348 dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
349 dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
350 }
351
338 /* Set the maximum channels and queues */ 352 /* Set the maximum channels and queues */
339 reg = XP_IOREAD(pdata, XP_PROP_1); 353 pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
340 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 354 MAX_TX_DMA);
341 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 355 pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
342 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 356 MAX_RX_DMA);
343 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 357 pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
358 MAX_TX_QUEUES);
359 pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
360 MAX_RX_QUEUES);
344 if (netif_msg_probe(pdata)) { 361 if (netif_msg_probe(pdata)) {
345 dev_dbg(dev, "max tx/rx channel count = %u/%u\n", 362 dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
346 pdata->tx_max_channel_count, 363 pdata->tx_max_channel_count,
347 pdata->tx_max_channel_count); 364 pdata->rx_max_channel_count);
348 dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n", 365 dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
349 pdata->tx_max_q_count, pdata->rx_max_q_count); 366 pdata->tx_max_q_count, pdata->rx_max_q_count);
350 } 367 }
@@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
353 xgbe_set_counts(pdata); 370 xgbe_set_counts(pdata);
354 371
355 /* Set the maximum fifo amounts */ 372 /* Set the maximum fifo amounts */
356 reg = XP_IOREAD(pdata, XP_PROP_2); 373 pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
357 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 374 TX_FIFO_SIZE);
358 pdata->tx_max_fifo_size *= 16384; 375 pdata->tx_max_fifo_size *= 16384;
359 pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, 376 pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
360 pdata->vdata->tx_max_fifo_size); 377 pdata->vdata->tx_max_fifo_size);
361 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 378 pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
379 RX_FIFO_SIZE);
362 pdata->rx_max_fifo_size *= 16384; 380 pdata->rx_max_fifo_size *= 16384;
363 pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, 381 pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
364 pdata->vdata->rx_max_fifo_size); 382 pdata->vdata->rx_max_fifo_size);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index aac884314000..151bdb629e8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -119,6 +119,7 @@
119#include <linux/kmod.h> 119#include <linux/kmod.h>
120#include <linux/mdio.h> 120#include <linux/mdio.h>
121#include <linux/phy.h> 121#include <linux/phy.h>
122#include <linux/ethtool.h>
122 123
123#include "xgbe.h" 124#include "xgbe.h"
124#include "xgbe-common.h" 125#include "xgbe-common.h"
@@ -270,6 +271,15 @@ struct xgbe_sfp_eeprom {
270 u8 vendor[32]; 271 u8 vendor[32];
271}; 272};
272 273
274#define XGBE_SFP_DIAGS_SUPPORTED(_x) \
275 ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \
276 !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
277
278#define XGBE_SFP_EEPROM_BASE_LEN 256
279#define XGBE_SFP_EEPROM_DIAG_LEN 256
280#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \
281 XGBE_SFP_EEPROM_DIAG_LEN)
282
273#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " 283#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
274#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " 284#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
275 285
@@ -327,8 +337,6 @@ struct xgbe_phy_data {
327 337
328 unsigned int mdio_addr; 338 unsigned int mdio_addr;
329 339
330 unsigned int comm_owned;
331
332 /* SFP Support */ 340 /* SFP Support */
333 enum xgbe_sfp_comm sfp_comm; 341 enum xgbe_sfp_comm sfp_comm;
334 unsigned int sfp_mux_address; 342 unsigned int sfp_mux_address;
@@ -345,7 +353,6 @@ struct xgbe_phy_data {
345 unsigned int sfp_rx_los; 353 unsigned int sfp_rx_los;
346 unsigned int sfp_tx_fault; 354 unsigned int sfp_tx_fault;
347 unsigned int sfp_mod_absent; 355 unsigned int sfp_mod_absent;
348 unsigned int sfp_diags;
349 unsigned int sfp_changed; 356 unsigned int sfp_changed;
350 unsigned int sfp_phy_avail; 357 unsigned int sfp_phy_avail;
351 unsigned int sfp_cable_len; 358 unsigned int sfp_cable_len;
@@ -382,12 +389,6 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
382static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, 389static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
383 struct xgbe_i2c_op *i2c_op) 390 struct xgbe_i2c_op *i2c_op)
384{ 391{
385 struct xgbe_phy_data *phy_data = pdata->phy_data;
386
387 /* Be sure we own the bus */
388 if (WARN_ON(!phy_data->comm_owned))
389 return -EIO;
390
391 return pdata->i2c_if.i2c_xfer(pdata, i2c_op); 392 return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
392} 393}
393 394
@@ -549,10 +550,6 @@ static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
549 550
550static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) 551static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
551{ 552{
552 struct xgbe_phy_data *phy_data = pdata->phy_data;
553
554 phy_data->comm_owned = 0;
555
556 mutex_unlock(&xgbe_phy_comm_lock); 553 mutex_unlock(&xgbe_phy_comm_lock);
557} 554}
558 555
@@ -562,9 +559,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
562 unsigned long timeout; 559 unsigned long timeout;
563 unsigned int mutex_id; 560 unsigned int mutex_id;
564 561
565 if (phy_data->comm_owned)
566 return 0;
567
568 /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, 562 /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
569 * the driver needs to take the software mutex and then the hardware 563 * the driver needs to take the software mutex and then the hardware
570 * mutexes before being able to use the busses. 564 * mutexes before being able to use the busses.
@@ -593,7 +587,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
593 XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); 587 XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
594 XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); 588 XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
595 589
596 phy_data->comm_owned = 1;
597 return 0; 590 return 0;
598 } 591 }
599 592
@@ -867,6 +860,9 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
867 struct xgbe_phy_data *phy_data = pdata->phy_data; 860 struct xgbe_phy_data *phy_data = pdata->phy_data;
868 unsigned int phy_id = phy_data->phydev->phy_id; 861 unsigned int phy_id = phy_data->phydev->phy_id;
869 862
863 if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
864 return false;
865
870 if ((phy_id & 0xfffffff0) != 0x01ff0cc0) 866 if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
871 return false; 867 return false;
872 868
@@ -882,9 +878,10 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
882 phy_write(phy_data->phydev, 0x04, 0x0d01); 878 phy_write(phy_data->phydev, 0x04, 0x0d01);
883 phy_write(phy_data->phydev, 0x00, 0x9140); 879 phy_write(phy_data->phydev, 0x00, 0x9140);
884 880
885 phy_data->phydev->supported = PHY_GBIT_FEATURES; 881 phy_data->phydev->supported = PHY_10BT_FEATURES |
886 phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 882 PHY_100BT_FEATURES |
887 phy_data->phydev->advertising = phy_data->phydev->supported; 883 PHY_1000BT_FEATURES;
884 phy_support_asym_pause(phy_data->phydev);
888 885
889 netif_dbg(pdata, drv, pdata->netdev, 886 netif_dbg(pdata, drv, pdata->netdev,
890 "Finisar PHY quirk in place\n"); 887 "Finisar PHY quirk in place\n");
@@ -892,8 +889,84 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
892 return true; 889 return true;
893} 890}
894 891
892static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
893{
894 struct xgbe_phy_data *phy_data = pdata->phy_data;
895 struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
896 unsigned int phy_id = phy_data->phydev->phy_id;
897 int reg;
898
899 if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
900 return false;
901
902 if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
903 XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
904 return false;
905
906 /* For Bel-Fuse, use the extra AN flag */
907 pdata->an_again = 1;
908
909 if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
910 XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN))
911 return false;
912
913 if ((phy_id & 0xfffffff0) != 0x03625d10)
914 return false;
915
916 /* Disable RGMII mode */
917 phy_write(phy_data->phydev, 0x18, 0x7007);
918 reg = phy_read(phy_data->phydev, 0x18);
919 phy_write(phy_data->phydev, 0x18, reg & ~0x0080);
920
921 /* Enable fiber register bank */
922 phy_write(phy_data->phydev, 0x1c, 0x7c00);
923 reg = phy_read(phy_data->phydev, 0x1c);
924 reg &= 0x03ff;
925 reg &= ~0x0001;
926 phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001);
927
928 /* Power down SerDes */
929 reg = phy_read(phy_data->phydev, 0x00);
930 phy_write(phy_data->phydev, 0x00, reg | 0x00800);
931
932 /* Configure SGMII-to-Copper mode */
933 phy_write(phy_data->phydev, 0x1c, 0x7c00);
934 reg = phy_read(phy_data->phydev, 0x1c);
935 reg &= 0x03ff;
936 reg &= ~0x0006;
937 phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004);
938
939 /* Power up SerDes */
940 reg = phy_read(phy_data->phydev, 0x00);
941 phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
942
943 /* Enable copper register bank */
944 phy_write(phy_data->phydev, 0x1c, 0x7c00);
945 reg = phy_read(phy_data->phydev, 0x1c);
946 reg &= 0x03ff;
947 reg &= ~0x0001;
948 phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg);
949
950 /* Power up SerDes */
951 reg = phy_read(phy_data->phydev, 0x00);
952 phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
953
954 phy_data->phydev->supported = (PHY_10BT_FEATURES |
955 PHY_100BT_FEATURES |
956 PHY_1000BT_FEATURES);
957 phy_support_asym_pause(phy_data->phydev);
958
959 netif_dbg(pdata, drv, pdata->netdev,
960 "BelFuse PHY quirk in place\n");
961
962 return true;
963}
964
895static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) 965static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
896{ 966{
967 if (xgbe_phy_belfuse_phy_quirks(pdata))
968 return;
969
897 if (xgbe_phy_finisar_phy_quirks(pdata)) 970 if (xgbe_phy_finisar_phy_quirks(pdata))
898 return; 971 return;
899} 972}
@@ -910,6 +983,9 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
910 if (phy_data->phydev) 983 if (phy_data->phydev)
911 return 0; 984 return 0;
912 985
986 /* Clear the extra AN flag */
987 pdata->an_again = 0;
988
913 /* Check for the use of an external PHY */ 989 /* Check for the use of an external PHY */
914 if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) 990 if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE)
915 return 0; 991 return 0;
@@ -1034,37 +1110,6 @@ static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
1034 return false; 1110 return false;
1035} 1111}
1036 1112
1037static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
1038{
1039 struct xgbe_phy_data *phy_data = pdata->phy_data;
1040 struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
1041
1042 if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
1043 XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
1044 return false;
1045
1046 if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
1047 XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) {
1048 phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
1049 phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
1050 phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
1051 if (phy_data->sfp_changed)
1052 netif_dbg(pdata, drv, pdata->netdev,
1053 "Bel-Fuse SFP quirk in place\n");
1054 return true;
1055 }
1056
1057 return false;
1058}
1059
1060static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata)
1061{
1062 if (xgbe_phy_belfuse_parse_quirks(pdata))
1063 return true;
1064
1065 return false;
1066}
1067
1068static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) 1113static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
1069{ 1114{
1070 struct xgbe_phy_data *phy_data = pdata->phy_data; 1115 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1083,9 +1128,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
1083 phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); 1128 phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
1084 phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); 1129 phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
1085 1130
1086 if (xgbe_phy_sfp_parse_quirks(pdata))
1087 return;
1088
1089 /* Assume ACTIVE cable unless told it is PASSIVE */ 1131 /* Assume ACTIVE cable unless told it is PASSIVE */
1090 if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { 1132 if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
1091 phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; 1133 phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
@@ -1227,13 +1269,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
1227 1269
1228 memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); 1270 memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
1229 1271
1230 if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) {
1231 u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG];
1232
1233 if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
1234 phy_data->sfp_diags = 1;
1235 }
1236
1237 xgbe_phy_free_phy_device(pdata); 1272 xgbe_phy_free_phy_device(pdata);
1238 } else { 1273 } else {
1239 phy_data->sfp_changed = 0; 1274 phy_data->sfp_changed = 0;
@@ -1283,7 +1318,6 @@ static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
1283 phy_data->sfp_rx_los = 0; 1318 phy_data->sfp_rx_los = 0;
1284 phy_data->sfp_tx_fault = 0; 1319 phy_data->sfp_tx_fault = 0;
1285 phy_data->sfp_mod_absent = 1; 1320 phy_data->sfp_mod_absent = 1;
1286 phy_data->sfp_diags = 0;
1287 phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; 1321 phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
1288 phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; 1322 phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
1289 phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; 1323 phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
@@ -1326,6 +1360,130 @@ put:
1326 xgbe_phy_put_comm_ownership(pdata); 1360 xgbe_phy_put_comm_ownership(pdata);
1327} 1361}
1328 1362
1363static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
1364 struct ethtool_eeprom *eeprom, u8 *data)
1365{
1366 struct xgbe_phy_data *phy_data = pdata->phy_data;
1367 u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX];
1368 struct xgbe_sfp_eeprom *sfp_eeprom;
1369 unsigned int i, j, rem;
1370 int ret;
1371
1372 rem = eeprom->len;
1373
1374 if (!eeprom->len) {
1375 ret = -EINVAL;
1376 goto done;
1377 }
1378
1379 if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) {
1380 ret = -EINVAL;
1381 goto done;
1382 }
1383
1384 if (phy_data->port_mode != XGBE_PORT_MODE_SFP) {
1385 ret = -ENXIO;
1386 goto done;
1387 }
1388
1389 if (!netif_running(pdata->netdev)) {
1390 ret = -EIO;
1391 goto done;
1392 }
1393
1394 if (phy_data->sfp_mod_absent) {
1395 ret = -EIO;
1396 goto done;
1397 }
1398
1399 ret = xgbe_phy_get_comm_ownership(pdata);
1400 if (ret) {
1401 ret = -EIO;
1402 goto done;
1403 }
1404
1405 ret = xgbe_phy_sfp_get_mux(pdata);
1406 if (ret) {
1407 netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
1408 ret = -EIO;
1409 goto put_own;
1410 }
1411
1412 /* Read the SFP serial ID eeprom */
1413 eeprom_addr = 0;
1414 ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
1415 &eeprom_addr, sizeof(eeprom_addr),
1416 eeprom_data, XGBE_SFP_EEPROM_BASE_LEN);
1417 if (ret) {
1418 netdev_err(pdata->netdev,
1419 "I2C error reading SFP EEPROM\n");
1420 ret = -EIO;
1421 goto put_mux;
1422 }
1423
1424 sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data;
1425
1426 if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) {
1427 /* Read the SFP diagnostic eeprom */
1428 eeprom_addr = 0;
1429 ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS,
1430 &eeprom_addr, sizeof(eeprom_addr),
1431 eeprom_data + XGBE_SFP_EEPROM_BASE_LEN,
1432 XGBE_SFP_EEPROM_DIAG_LEN);
1433 if (ret) {
1434 netdev_err(pdata->netdev,
1435 "I2C error reading SFP DIAGS\n");
1436 ret = -EIO;
1437 goto put_mux;
1438 }
1439 }
1440
1441 for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) {
1442 if ((j >= XGBE_SFP_EEPROM_BASE_LEN) &&
1443 !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom))
1444 break;
1445
1446 data[i] = eeprom_data[j];
1447 rem--;
1448 }
1449
1450put_mux:
1451 xgbe_phy_sfp_put_mux(pdata);
1452
1453put_own:
1454 xgbe_phy_put_comm_ownership(pdata);
1455
1456done:
1457 eeprom->len -= rem;
1458
1459 return ret;
1460}
1461
1462static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
1463 struct ethtool_modinfo *modinfo)
1464{
1465 struct xgbe_phy_data *phy_data = pdata->phy_data;
1466
1467 if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
1468 return -ENXIO;
1469
1470 if (!netif_running(pdata->netdev))
1471 return -EIO;
1472
1473 if (phy_data->sfp_mod_absent)
1474 return -EIO;
1475
1476 if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) {
1477 modinfo->type = ETH_MODULE_SFF_8472;
1478 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1479 } else {
1480 modinfo->type = ETH_MODULE_SFF_8079;
1481 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1482 }
1483
1484 return 0;
1485}
1486
1329static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) 1487static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
1330{ 1488{
1331 struct ethtool_link_ksettings *lks = &pdata->phy.lks; 1489 struct ethtool_link_ksettings *lks = &pdata->phy.lks;
@@ -1339,10 +1497,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
1339 if (!phy_data->phydev) 1497 if (!phy_data->phydev)
1340 return; 1498 return;
1341 1499
1342 if (phy_data->phydev->advertising & ADVERTISED_Pause) 1500 lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
1343 lcl_adv |= ADVERTISE_PAUSE_CAP;
1344 if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
1345 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1346 1501
1347 if (phy_data->phydev->pause) { 1502 if (phy_data->phydev->pause) {
1348 XGBE_SET_LP_ADV(lks, Pause); 1503 XGBE_SET_LP_ADV(lks, Pause);
@@ -1611,6 +1766,10 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
1611 XGBE_CLR_ADV(dlks, 1000baseKX_Full); 1766 XGBE_CLR_ADV(dlks, 1000baseKX_Full);
1612 XGBE_CLR_ADV(dlks, 10000baseKR_Full); 1767 XGBE_CLR_ADV(dlks, 10000baseKR_Full);
1613 1768
1769 /* Advertise FEC support is present */
1770 if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
1771 XGBE_SET_ADV(dlks, 10000baseR_FEC);
1772
1614 switch (phy_data->port_mode) { 1773 switch (phy_data->port_mode) {
1615 case XGBE_PORT_MODE_BACKPLANE: 1774 case XGBE_PORT_MODE_BACKPLANE:
1616 XGBE_SET_ADV(dlks, 10000baseKR_Full); 1775 XGBE_SET_ADV(dlks, 10000baseKR_Full);
@@ -2421,22 +2580,21 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
2421static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) 2580static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
2422{ 2581{
2423 struct xgbe_phy_data *phy_data = pdata->phy_data; 2582 struct xgbe_phy_data *phy_data = pdata->phy_data;
2424 unsigned int reg;
2425
2426 reg = XP_IOREAD(pdata, XP_PROP_3);
2427 2583
2428 phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + 2584 phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
2429 XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); 2585 XP_GET_BITS(pdata->pp3, XP_PROP_3,
2586 GPIO_ADDR);
2430 2587
2431 phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); 2588 phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2589 GPIO_MASK);
2432 2590
2433 phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, 2591 phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2434 GPIO_RX_LOS); 2592 GPIO_RX_LOS);
2435 phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, 2593 phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2436 GPIO_TX_FAULT); 2594 GPIO_TX_FAULT);
2437 phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, 2595 phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2438 GPIO_MOD_ABS); 2596 GPIO_MOD_ABS);
2439 phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, 2597 phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2440 GPIO_RATE_SELECT); 2598 GPIO_RATE_SELECT);
2441 2599
2442 if (netif_msg_probe(pdata)) { 2600 if (netif_msg_probe(pdata)) {
@@ -2458,18 +2616,17 @@ static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
2458static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) 2616static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
2459{ 2617{
2460 struct xgbe_phy_data *phy_data = pdata->phy_data; 2618 struct xgbe_phy_data *phy_data = pdata->phy_data;
2461 unsigned int reg, mux_addr_hi, mux_addr_lo; 2619 unsigned int mux_addr_hi, mux_addr_lo;
2462 2620
2463 reg = XP_IOREAD(pdata, XP_PROP_4); 2621 mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI);
2464 2622 mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO);
2465 mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
2466 mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
2467 if (mux_addr_lo == XGBE_SFP_DIRECT) 2623 if (mux_addr_lo == XGBE_SFP_DIRECT)
2468 return; 2624 return;
2469 2625
2470 phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; 2626 phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
2471 phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; 2627 phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
2472 phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); 2628 phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4,
2629 MUX_CHAN);
2473 2630
2474 if (netif_msg_probe(pdata)) { 2631 if (netif_msg_probe(pdata)) {
2475 dev_dbg(pdata->dev, "SFP: mux_address=%#x\n", 2632 dev_dbg(pdata->dev, "SFP: mux_address=%#x\n",
@@ -2592,13 +2749,11 @@ static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
2592static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) 2749static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
2593{ 2750{
2594 struct xgbe_phy_data *phy_data = pdata->phy_data; 2751 struct xgbe_phy_data *phy_data = pdata->phy_data;
2595 unsigned int reg;
2596 2752
2597 if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) 2753 if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
2598 return 0; 2754 return 0;
2599 2755
2600 reg = XP_IOREAD(pdata, XP_PROP_3); 2756 phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET);
2601 phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
2602 switch (phy_data->mdio_reset) { 2757 switch (phy_data->mdio_reset) {
2603 case XGBE_MDIO_RESET_NONE: 2758 case XGBE_MDIO_RESET_NONE:
2604 case XGBE_MDIO_RESET_I2C_GPIO: 2759 case XGBE_MDIO_RESET_I2C_GPIO:
@@ -2612,12 +2767,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
2612 2767
2613 if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { 2768 if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
2614 phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + 2769 phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
2615 XP_GET_BITS(reg, XP_PROP_3, 2770 XP_GET_BITS(pdata->pp3, XP_PROP_3,
2616 MDIO_RESET_I2C_ADDR); 2771 MDIO_RESET_I2C_ADDR);
2617 phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, 2772 phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2618 MDIO_RESET_I2C_GPIO); 2773 MDIO_RESET_I2C_GPIO);
2619 } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) { 2774 } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) {
2620 phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, 2775 phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
2621 MDIO_RESET_INT_GPIO); 2776 MDIO_RESET_INT_GPIO);
2622 } 2777 }
2623 2778
@@ -2707,12 +2862,9 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
2707 2862
2708static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) 2863static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
2709{ 2864{
2710 unsigned int reg; 2865 if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS))
2711
2712 reg = XP_IOREAD(pdata, XP_PROP_0);
2713 if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
2714 return false; 2866 return false;
2715 if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) 2867 if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE))
2716 return false; 2868 return false;
2717 2869
2718 return true; 2870 return true;
@@ -2921,7 +3073,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
2921 struct ethtool_link_ksettings *lks = &pdata->phy.lks; 3073 struct ethtool_link_ksettings *lks = &pdata->phy.lks;
2922 struct xgbe_phy_data *phy_data; 3074 struct xgbe_phy_data *phy_data;
2923 struct mii_bus *mii; 3075 struct mii_bus *mii;
2924 unsigned int reg;
2925 int ret; 3076 int ret;
2926 3077
2927 /* Check if enabled */ 3078 /* Check if enabled */
@@ -2940,12 +3091,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
2940 return -ENOMEM; 3091 return -ENOMEM;
2941 pdata->phy_data = phy_data; 3092 pdata->phy_data = phy_data;
2942 3093
2943 reg = XP_IOREAD(pdata, XP_PROP_0); 3094 phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
2944 phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); 3095 phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID);
2945 phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); 3096 phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS);
2946 phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); 3097 phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE);
2947 phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); 3098 phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR);
2948 phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
2949 if (netif_msg_probe(pdata)) { 3099 if (netif_msg_probe(pdata)) {
2950 dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode); 3100 dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode);
2951 dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id); 3101 dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id);
@@ -2954,12 +3104,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
2954 dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr); 3104 dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr);
2955 } 3105 }
2956 3106
2957 reg = XP_IOREAD(pdata, XP_PROP_4); 3107 phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT);
2958 phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); 3108 phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF);
2959 phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); 3109 phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR);
2960 phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); 3110 phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE);
2961 phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); 3111 phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL);
2962 phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
2963 if (phy_data->redrv && netif_msg_probe(pdata)) { 3112 if (phy_data->redrv && netif_msg_probe(pdata)) {
2964 dev_dbg(pdata->dev, "redrv present\n"); 3113 dev_dbg(pdata->dev, "redrv present\n");
2965 dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if); 3114 dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if);
@@ -3231,4 +3380,7 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
3231 3380
3232 phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; 3381 phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
3233 phy_impl->kr_training_post = xgbe_phy_kr_training_post; 3382 phy_impl->kr_training_post = xgbe_phy_kr_training_post;
3383
3384 phy_impl->module_info = xgbe_phy_module_info;
3385 phy_impl->module_eeprom = xgbe_phy_module_eeprom;
3234} 3386}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 95d4b56448c6..47bcbcf58048 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -144,6 +144,11 @@
144#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) 144#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
145#define XGBE_RX_DESC_CNT 512 145#define XGBE_RX_DESC_CNT 512
146 146
147#define XGBE_TX_DESC_CNT_MIN 64
148#define XGBE_TX_DESC_CNT_MAX 4096
149#define XGBE_RX_DESC_CNT_MIN 64
150#define XGBE_RX_DESC_CNT_MAX 4096
151
147#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) 152#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
148 153
149/* Descriptors required for maximum contiguous TSO/GSO packet */ 154/* Descriptors required for maximum contiguous TSO/GSO packet */
@@ -835,6 +840,7 @@ struct xgbe_hw_if {
835 * Optional routines: 840 * Optional routines:
836 * an_pre, an_post 841 * an_pre, an_post
837 * kr_training_pre, kr_training_post 842 * kr_training_pre, kr_training_post
843 * module_info, module_eeprom
838 */ 844 */
839struct xgbe_phy_impl_if { 845struct xgbe_phy_impl_if {
840 /* Perform Setup/teardown actions */ 846 /* Perform Setup/teardown actions */
@@ -883,6 +889,12 @@ struct xgbe_phy_impl_if {
883 /* Pre/Post KR training enablement support */ 889 /* Pre/Post KR training enablement support */
884 void (*kr_training_pre)(struct xgbe_prv_data *); 890 void (*kr_training_pre)(struct xgbe_prv_data *);
885 void (*kr_training_post)(struct xgbe_prv_data *); 891 void (*kr_training_post)(struct xgbe_prv_data *);
892
893 /* SFP module related info */
894 int (*module_info)(struct xgbe_prv_data *pdata,
895 struct ethtool_modinfo *modinfo);
896 int (*module_eeprom)(struct xgbe_prv_data *pdata,
897 struct ethtool_eeprom *eeprom, u8 *data);
886}; 898};
887 899
888struct xgbe_phy_if { 900struct xgbe_phy_if {
@@ -905,6 +917,12 @@ struct xgbe_phy_if {
905 /* For single interrupt support */ 917 /* For single interrupt support */
906 irqreturn_t (*an_isr)(struct xgbe_prv_data *); 918 irqreturn_t (*an_isr)(struct xgbe_prv_data *);
907 919
920 /* For ethtool PHY support */
921 int (*module_info)(struct xgbe_prv_data *pdata,
922 struct ethtool_modinfo *modinfo);
923 int (*module_eeprom)(struct xgbe_prv_data *pdata,
924 struct ethtool_eeprom *eeprom, u8 *data);
925
908 /* PHY implementation specific services */ 926 /* PHY implementation specific services */
909 struct xgbe_phy_impl_if phy_impl; 927 struct xgbe_phy_impl_if phy_impl;
910}; 928};
@@ -1027,6 +1045,13 @@ struct xgbe_prv_data {
1027 void __iomem *xprop_regs; /* XGBE property registers */ 1045 void __iomem *xprop_regs; /* XGBE property registers */
1028 void __iomem *xi2c_regs; /* XGBE I2C CSRs */ 1046 void __iomem *xi2c_regs; /* XGBE I2C CSRs */
1029 1047
1048 /* Port property registers */
1049 unsigned int pp0;
1050 unsigned int pp1;
1051 unsigned int pp2;
1052 unsigned int pp3;
1053 unsigned int pp4;
1054
1030 /* Overall device lock */ 1055 /* Overall device lock */
1031 spinlock_t lock; 1056 spinlock_t lock;
1032 1057
@@ -1097,6 +1122,9 @@ struct xgbe_prv_data {
1097 unsigned int rx_ring_count; 1122 unsigned int rx_ring_count;
1098 unsigned int rx_desc_count; 1123 unsigned int rx_desc_count;
1099 1124
1125 unsigned int new_tx_ring_count;
1126 unsigned int new_rx_ring_count;
1127
1100 unsigned int tx_max_q_count; 1128 unsigned int tx_max_q_count;
1101 unsigned int rx_max_q_count; 1129 unsigned int rx_max_q_count;
1102 unsigned int tx_q_count; 1130 unsigned int tx_q_count;
@@ -1233,6 +1261,7 @@ struct xgbe_prv_data {
1233 enum xgbe_rx kr_state; 1261 enum xgbe_rx kr_state;
1234 enum xgbe_rx kx_state; 1262 enum xgbe_rx kx_state;
1235 struct work_struct an_work; 1263 struct work_struct an_work;
1264 unsigned int an_again;
1236 unsigned int an_supported; 1265 unsigned int an_supported;
1237 unsigned int parallel_detect; 1266 unsigned int parallel_detect;
1238 unsigned int fec_ability; 1267 unsigned int fec_ability;
@@ -1310,6 +1339,8 @@ int xgbe_powerup(struct net_device *, unsigned int);
1310int xgbe_powerdown(struct net_device *, unsigned int); 1339int xgbe_powerdown(struct net_device *, unsigned int);
1311void xgbe_init_rx_coalesce(struct xgbe_prv_data *); 1340void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
1312void xgbe_init_tx_coalesce(struct xgbe_prv_data *); 1341void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
1342void xgbe_restart_dev(struct xgbe_prv_data *pdata);
1343void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
1313 1344
1314#ifdef CONFIG_DEBUG_FS 1345#ifdef CONFIG_DEBUG_FS
1315void xgbe_debugfs_init(struct xgbe_prv_data *); 1346void xgbe_debugfs_init(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
index 1205861b6318..eedd3f3dd22e 100644
--- a/drivers/net/ethernet/apm/xgene-v2/Kconfig
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -1,6 +1,5 @@
1config NET_XGENE_V2 1config NET_XGENE_V2
2 tristate "APM X-Gene SoC Ethernet-v2 Driver" 2 tristate "APM X-Gene SoC Ethernet-v2 Driver"
3 depends on HAS_DMA
4 depends on ARCH_XGENE || COMPILE_TEST 3 depends on ARCH_XGENE || COMPILE_TEST
5 help 4 help
6 This is the Ethernet driver for the on-chip ethernet interface 5 This is the Ethernet driver for the on-chip ethernet interface
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index afccb033177b..e4e33c900b57 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,6 +1,5 @@
1config NET_XGENE 1config NET_XGENE
2 tristate "APM X-Gene SoC Ethernet Driver" 2 tristate "APM X-Gene SoC Ethernet Driver"
3 depends on HAS_DMA
4 depends on ARCH_XGENE || COMPILE_TEST 3 depends on ARCH_XGENE || COMPILE_TEST
5 select PHYLIB 4 select PHYLIB
6 select MDIO_XGENE 5 select MDIO_XGENE
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 4f50f11718f4..78dd09b5beeb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -306,45 +306,25 @@ static int xgene_set_pauseparam(struct net_device *ndev,
306{ 306{
307 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 307 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
308 struct phy_device *phydev = ndev->phydev; 308 struct phy_device *phydev = ndev->phydev;
309 u32 oldadv, newadv;
310 309
311 if (phy_interface_mode_is_rgmii(pdata->phy_mode) || 310 if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
312 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { 311 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
313 if (!phydev) 312 if (!phydev)
314 return -EINVAL; 313 return -EINVAL;
315 314
316 if (!(phydev->supported & SUPPORTED_Pause) || 315 if (!phy_validate_pause(phydev, pp))
317 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
318 pp->rx_pause != pp->tx_pause))
319 return -EINVAL; 316 return -EINVAL;
320 317
321 pdata->pause_autoneg = pp->autoneg; 318 pdata->pause_autoneg = pp->autoneg;
322 pdata->tx_pause = pp->tx_pause; 319 pdata->tx_pause = pp->tx_pause;
323 pdata->rx_pause = pp->rx_pause; 320 pdata->rx_pause = pp->rx_pause;
324 321
325 oldadv = phydev->advertising; 322 phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause);
326 newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
327 323
328 if (pp->rx_pause) 324 if (!pp->autoneg) {
329 newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 325 pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
330 326 pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
331 if (pp->tx_pause)
332 newadv ^= ADVERTISED_Asym_Pause;
333
334 if (oldadv ^ newadv) {
335 phydev->advertising = newadv;
336
337 if (phydev->autoneg)
338 return phy_start_aneg(phydev);
339
340 if (!pp->autoneg) {
341 pdata->mac_ops->flowctl_tx(pdata,
342 pdata->tx_pause);
343 pdata->mac_ops->flowctl_rx(pdata,
344 pdata->rx_pause);
345 }
346 } 327 }
347
348 } else { 328 } else {
349 if (pp->autoneg) 329 if (pp->autoneg)
350 return -EINVAL; 330 return -EINVAL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 3188f553da35..e3560311711a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -836,19 +836,19 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
836#ifdef CONFIG_ACPI 836#ifdef CONFIG_ACPI
837static struct acpi_device *acpi_phy_find_device(struct device *dev) 837static struct acpi_device *acpi_phy_find_device(struct device *dev)
838{ 838{
839 struct acpi_reference_args args; 839 struct fwnode_reference_args args;
840 struct fwnode_handle *fw_node; 840 struct fwnode_handle *fw_node;
841 int status; 841 int status;
842 842
843 fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev)); 843 fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
844 status = acpi_node_get_property_reference(fw_node, "phy-handle", 0, 844 status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
845 &args); 845 &args);
846 if (ACPI_FAILURE(status)) { 846 if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) {
847 dev_dbg(dev, "No matching phy in ACPI table\n"); 847 dev_dbg(dev, "No matching phy in ACPI table\n");
848 return NULL; 848 return NULL;
849 } 849 }
850 850
851 return args.adev; 851 return to_acpi_device_node(args.fwnode);
852} 852}
853#endif 853#endif
854 854
@@ -895,12 +895,10 @@ int xgene_enet_phy_connect(struct net_device *ndev)
895 } 895 }
896 896
897 pdata->phy_speed = SPEED_UNKNOWN; 897 pdata->phy_speed = SPEED_UNKNOWN;
898 phy_dev->supported &= ~SUPPORTED_10baseT_Half & 898 phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
899 ~SUPPORTED_100baseT_Half & 899 phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
900 ~SUPPORTED_1000baseT_Half; 900 phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
901 phy_dev->supported |= SUPPORTED_Pause | 901 phy_support_asym_pause(phy_dev);
902 SUPPORTED_Asym_Pause;
903 phy_dev->advertising = phy_dev->supported;
904 902
905 return 0; 903 return 0;
906} 904}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3b889efddf78..50dd6bf176d0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -29,9 +29,6 @@
29#define RES_RING_CSR 1 29#define RES_RING_CSR 1
30#define RES_RING_CMD 2 30#define RES_RING_CMD 2
31 31
32static const struct of_device_id xgene_enet_of_match[];
33static const struct acpi_device_id xgene_enet_acpi_match[];
34
35static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 32static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36{ 33{
37 struct xgene_enet_raw_desc16 *raw_desc; 34 struct xgene_enet_raw_desc16 *raw_desc;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 5a655d289dd5..6a8e2567f2bd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/crc32.h> 21#include <linux/crc32.h>
22#include <linux/crc32poly.h>
22#include <linux/bitrev.h> 23#include <linux/bitrev.h>
23#include <linux/ethtool.h> 24#include <linux/ethtool.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -37,11 +38,6 @@
37#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) 38#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
38#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) 39#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
39 40
40/*
41 * CRC polynomial - used in working out multicast filter bits.
42 */
43#define ENET_CRCPOLY 0x04c11db7
44
45/* switch to use multicast code lifted from sunhme driver */ 41/* switch to use multicast code lifted from sunhme driver */
46#define SUNHME_MULTICAST 42#define SUNHME_MULTICAST
47 43
@@ -158,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
158static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 154static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
159static void bmac_set_timeout(struct net_device *dev); 155static void bmac_set_timeout(struct net_device *dev);
160static void bmac_tx_timeout(struct timer_list *t); 156static void bmac_tx_timeout(struct timer_list *t);
161static int bmac_output(struct sk_buff *skb, struct net_device *dev); 157static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
162static void bmac_start(struct net_device *dev); 158static void bmac_start(struct net_device *dev);
163 159
164#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 160#define DBDMA_SET(x) ( ((x) | (x) << 16) )
@@ -838,7 +834,7 @@ crc416(unsigned int curval, unsigned short nxtval)
838 next = next >> 1; 834 next = next >> 1;
839 835
840 /* do the XOR */ 836 /* do the XOR */
841 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; 837 if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
842 } 838 }
843 return cur; 839 return cur;
844} 840}
@@ -1460,7 +1456,7 @@ bmac_start(struct net_device *dev)
1460 spin_unlock_irqrestore(&bp->lock, flags); 1456 spin_unlock_irqrestore(&bp->lock, flags);
1461} 1457}
1462 1458
1463static int 1459static netdev_tx_t
1464bmac_output(struct sk_buff *skb, struct net_device *dev) 1460bmac_output(struct sk_buff *skb, struct net_device *dev)
1465{ 1461{
1466 struct bmac_data *bp = netdev_priv(dev); 1462 struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 0b5429d76bcf..68b9ee489489 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -78,7 +78,7 @@ struct mace_data {
78 78
79static int mace_open(struct net_device *dev); 79static int mace_open(struct net_device *dev);
80static int mace_close(struct net_device *dev); 80static int mace_close(struct net_device *dev);
81static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 81static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
82static void mace_set_multicast(struct net_device *dev); 82static void mace_set_multicast(struct net_device *dev);
83static void mace_reset(struct net_device *dev); 83static void mace_reset(struct net_device *dev);
84static int mace_set_address(struct net_device *dev, void *addr); 84static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
525 mp->timeout_active = 1; 525 mp->timeout_active = 1;
526} 526}
527 527
528static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 528static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
529{ 529{
530 struct mace_data *mp = netdev_priv(dev); 530 struct mace_data *mp = netdev_priv(dev);
531 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 531 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 137cbb470af2..376f2c2613e7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -89,7 +89,7 @@ struct mace_frame {
89 89
90static int mace_open(struct net_device *dev); 90static int mace_open(struct net_device *dev);
91static int mace_close(struct net_device *dev); 91static int mace_close(struct net_device *dev);
92static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 92static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
93static void mace_set_multicast(struct net_device *dev); 93static void mace_set_multicast(struct net_device *dev);
94static int mace_set_address(struct net_device *dev, void *addr); 94static int mace_set_address(struct net_device *dev, void *addr);
95static void mace_reset(struct net_device *dev); 95static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
444 * Transmit a frame 444 * Transmit a frame
445 */ 445 */
446 446
447static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 447static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
448{ 448{
449 struct mace_data *mp = netdev_priv(dev); 449 struct mace_data *mp = netdev_priv(dev);
450 unsigned long flags; 450 unsigned long flags;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index fc7383106946..91eb8910b1c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -63,8 +63,6 @@
63 63
64#define AQ_CFG_NAPI_WEIGHT 64U 64#define AQ_CFG_NAPI_WEIGHT 64U
65 65
66#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
67
68/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ 66/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
69 67
70#define AQ_NIC_FC_OFF 0U 68#define AQ_NIC_FC_OFF 0U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index d52b088ff8f0..becb578211ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -57,4 +57,9 @@
57#define AQ_NIC_RATE_1G BIT(4) 57#define AQ_NIC_RATE_1G BIT(4)
58#define AQ_NIC_RATE_100M BIT(5) 58#define AQ_NIC_RATE_100M BIT(5)
59 59
60#define AQ_NIC_RATE_EEE_10G BIT(6)
61#define AQ_NIC_RATE_EEE_5G BIT(7)
62#define AQ_NIC_RATE_EEE_2GS BIT(8)
63#define AQ_NIC_RATE_EEE_1G BIT(9)
64
60#endif /* AQ_COMMON_H */ 65#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index f2d8063a2cef..99ef1daaa4d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
11 11
12#include "aq_ethtool.h" 12#include "aq_ethtool.h"
13#include "aq_nic.h" 13#include "aq_nic.h"
14#include "aq_vec.h"
14 15
15static void aq_ethtool_get_regs(struct net_device *ndev, 16static void aq_ethtool_get_regs(struct net_device *ndev,
16 struct ethtool_regs *regs, void *p) 17 struct ethtool_regs *regs, void *p)
@@ -97,8 +98,8 @@ static void aq_ethtool_stats(struct net_device *ndev,
97 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); 98 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
98 99
99 memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + 100 memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
100 ARRAY_SIZE(aq_ethtool_queue_stat_names) * 101 ARRAY_SIZE(aq_ethtool_queue_stat_names) *
101 cfg->vecs) * sizeof(u64)); 102 cfg->vecs) * sizeof(u64));
102 aq_nic_get_stats(aq_nic, data); 103 aq_nic_get_stats(aq_nic, data);
103} 104}
104 105
@@ -284,6 +285,222 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
284 return aq_nic_update_interrupt_moderation_settings(aq_nic); 285 return aq_nic_update_interrupt_moderation_settings(aq_nic);
285} 286}
286 287
288static void aq_ethtool_get_wol(struct net_device *ndev,
289 struct ethtool_wolinfo *wol)
290{
291 struct aq_nic_s *aq_nic = netdev_priv(ndev);
292 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
293
294 wol->supported = WAKE_MAGIC;
295 wol->wolopts = 0;
296
297 if (cfg->wol)
298 wol->wolopts |= WAKE_MAGIC;
299}
300
301static int aq_ethtool_set_wol(struct net_device *ndev,
302 struct ethtool_wolinfo *wol)
303{
304 struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
305 struct aq_nic_s *aq_nic = netdev_priv(ndev);
306 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
307 int err = 0;
308
309 if (wol->wolopts & WAKE_MAGIC)
310 cfg->wol |= AQ_NIC_WOL_ENABLED;
311 else
312 cfg->wol &= ~AQ_NIC_WOL_ENABLED;
313 err = device_set_wakeup_enable(&pdev->dev, wol->wolopts);
314
315 return err;
316}
317
318static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
319{
320 u32 rate = 0;
321
322 if (speed & AQ_NIC_RATE_EEE_10G)
323 rate |= SUPPORTED_10000baseT_Full;
324
325 if (speed & AQ_NIC_RATE_EEE_2GS)
326 rate |= SUPPORTED_2500baseX_Full;
327
328 if (speed & AQ_NIC_RATE_EEE_1G)
329 rate |= SUPPORTED_1000baseT_Full;
330
331 return rate;
332}
333
334static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
335{
336 struct aq_nic_s *aq_nic = netdev_priv(ndev);
337 u32 rate, supported_rates;
338 int err = 0;
339
340 if (!aq_nic->aq_fw_ops->get_eee_rate)
341 return -EOPNOTSUPP;
342
343 err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
344 &supported_rates);
345 if (err < 0)
346 return err;
347
348 eee->supported = eee_mask_to_ethtool_mask(supported_rates);
349
350 if (aq_nic->aq_nic_cfg.eee_speeds)
351 eee->advertised = eee->supported;
352
353 eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
354
355 eee->eee_enabled = !!eee->advertised;
356
357 eee->tx_lpi_enabled = eee->eee_enabled;
358 if (eee->advertised & eee->lp_advertised)
359 eee->eee_active = true;
360
361 return 0;
362}
363
364static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
365{
366 struct aq_nic_s *aq_nic = netdev_priv(ndev);
367 u32 rate, supported_rates;
368 struct aq_nic_cfg_s *cfg;
369 int err = 0;
370
371 cfg = aq_nic_get_cfg(aq_nic);
372
373 if (unlikely(!aq_nic->aq_fw_ops->get_eee_rate ||
374 !aq_nic->aq_fw_ops->set_eee_rate))
375 return -EOPNOTSUPP;
376
377 err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
378 &supported_rates);
379 if (err < 0)
380 return err;
381
382 if (eee->eee_enabled) {
383 rate = supported_rates;
384 cfg->eee_speeds = rate;
385 } else {
386 rate = 0;
387 cfg->eee_speeds = 0;
388 }
389
390 return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
391}
392
393static int aq_ethtool_nway_reset(struct net_device *ndev)
394{
395 struct aq_nic_s *aq_nic = netdev_priv(ndev);
396
397 if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
398 return -EOPNOTSUPP;
399
400 if (netif_running(ndev))
401 return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
402
403 return 0;
404}
405
406static void aq_ethtool_get_pauseparam(struct net_device *ndev,
407 struct ethtool_pauseparam *pause)
408{
409 struct aq_nic_s *aq_nic = netdev_priv(ndev);
410 u32 fc = aq_nic->aq_nic_cfg.flow_control;
411
412 pause->autoneg = 0;
413
414 pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
415 pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
416
417}
418
419static int aq_ethtool_set_pauseparam(struct net_device *ndev,
420 struct ethtool_pauseparam *pause)
421{
422 struct aq_nic_s *aq_nic = netdev_priv(ndev);
423 int err = 0;
424
425 if (!aq_nic->aq_fw_ops->set_flow_control)
426 return -EOPNOTSUPP;
427
428 if (pause->autoneg == AUTONEG_ENABLE)
429 return -EOPNOTSUPP;
430
431 if (pause->rx_pause)
432 aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
433 else
434 aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
435
436 if (pause->tx_pause)
437 aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
438 else
439 aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
440
441 err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
442
443 return err;
444}
445
446static void aq_get_ringparam(struct net_device *ndev,
447 struct ethtool_ringparam *ring)
448{
449 struct aq_nic_s *aq_nic = netdev_priv(ndev);
450 struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
451
452 ring->rx_pending = aq_nic_cfg->rxds;
453 ring->tx_pending = aq_nic_cfg->txds;
454
455 ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
456 ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
457}
458
459static int aq_set_ringparam(struct net_device *ndev,
460 struct ethtool_ringparam *ring)
461{
462 int err = 0;
463 bool ndev_running = false;
464 struct aq_nic_s *aq_nic = netdev_priv(ndev);
465 struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
466 const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
467
468 if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
469 err = -EOPNOTSUPP;
470 goto err_exit;
471 }
472
473 if (netif_running(ndev)) {
474 ndev_running = true;
475 dev_close(ndev);
476 }
477
478 aq_nic_free_vectors(aq_nic);
479
480 aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
481 aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
482 aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
483
484 aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
485 aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
486 aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
487
488 for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
489 aq_nic->aq_vecs++) {
490 aq_nic->aq_vec[aq_nic->aq_vecs] =
491 aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
492 if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
493 err = -ENOMEM;
494 goto err_exit;
495 }
496 }
497 if (ndev_running)
498 err = dev_open(ndev);
499
500err_exit:
501 return err;
502}
503
287const struct ethtool_ops aq_ethtool_ops = { 504const struct ethtool_ops aq_ethtool_ops = {
288 .get_link = aq_ethtool_get_link, 505 .get_link = aq_ethtool_get_link,
289 .get_regs_len = aq_ethtool_get_regs_len, 506 .get_regs_len = aq_ethtool_get_regs_len,
@@ -291,6 +508,15 @@ const struct ethtool_ops aq_ethtool_ops = {
291 .get_drvinfo = aq_ethtool_get_drvinfo, 508 .get_drvinfo = aq_ethtool_get_drvinfo,
292 .get_strings = aq_ethtool_get_strings, 509 .get_strings = aq_ethtool_get_strings,
293 .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, 510 .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
511 .get_wol = aq_ethtool_get_wol,
512 .set_wol = aq_ethtool_set_wol,
513 .nway_reset = aq_ethtool_nway_reset,
514 .get_ringparam = aq_get_ringparam,
515 .set_ringparam = aq_set_ringparam,
516 .get_eee = aq_ethtool_get_eee,
517 .set_eee = aq_ethtool_set_eee,
518 .get_pauseparam = aq_ethtool_get_pauseparam,
519 .set_pauseparam = aq_ethtool_set_pauseparam,
294 .get_rxfh_key_size = aq_ethtool_get_rss_key_size, 520 .get_rxfh_key_size = aq_ethtool_get_rss_key_size,
295 .get_rxfh = aq_ethtool_get_rss, 521 .get_rxfh = aq_ethtool_get_rss,
296 .get_rxnfc = aq_ethtool_get_rxnfc, 522 .get_rxnfc = aq_ethtool_get_rxnfc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a2d416b24ffc..a1e70da358ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -24,8 +24,10 @@ struct aq_hw_caps_s {
24 u64 link_speed_msk; 24 u64 link_speed_msk;
25 unsigned int hw_priv_flags; 25 unsigned int hw_priv_flags;
26 u32 media_type; 26 u32 media_type;
27 u32 rxds; 27 u32 rxds_max;
28 u32 txds; 28 u32 txds_max;
29 u32 rxds_min;
30 u32 txds_min;
29 u32 txhwb_alignment; 31 u32 txhwb_alignment;
30 u32 irq_mask; 32 u32 irq_mask;
31 u32 vecs; 33 u32 vecs;
@@ -98,6 +100,11 @@ struct aq_stats_s {
98#define AQ_HW_MEDIA_TYPE_TP 1U 100#define AQ_HW_MEDIA_TYPE_TP 1U
99#define AQ_HW_MEDIA_TYPE_FIBRE 2U 101#define AQ_HW_MEDIA_TYPE_FIBRE 2U
100 102
103#define AQ_HW_TXD_MULTIPLE 8U
104#define AQ_HW_RXD_MULTIPLE 8U
105
106#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
107
101struct aq_hw_s { 108struct aq_hw_s {
102 atomic_t flags; 109 atomic_t flags;
103 u8 rbl_enabled:1; 110 u8 rbl_enabled:1;
@@ -105,7 +112,7 @@ struct aq_hw_s {
105 const struct aq_fw_ops *aq_fw_ops; 112 const struct aq_fw_ops *aq_fw_ops;
106 void __iomem *mmio; 113 void __iomem *mmio;
107 struct aq_hw_link_status_s aq_link_status; 114 struct aq_hw_link_status_s aq_link_status;
108 struct hw_aq_atl_utils_mbox mbox; 115 struct hw_atl_utils_mbox mbox;
109 struct hw_atl_stats_s last_stats; 116 struct hw_atl_stats_s last_stats;
110 struct aq_stats_s curr_stats; 117 struct aq_stats_s curr_stats;
111 u64 speed; 118 u64 speed;
@@ -117,7 +124,7 @@ struct aq_hw_s {
117 u32 mbox_addr; 124 u32 mbox_addr;
118 u32 rpc_addr; 125 u32 rpc_addr;
119 u32 rpc_tid; 126 u32 rpc_tid;
120 struct hw_aq_atl_utils_fw_rpc rpc; 127 struct hw_atl_utils_fw_rpc rpc;
121}; 128};
122 129
123struct aq_ring_s; 130struct aq_ring_s;
@@ -177,7 +184,7 @@ struct aq_hw_ops {
177 unsigned int packet_filter); 184 unsigned int packet_filter);
178 185
179 int (*hw_multicast_list_set)(struct aq_hw_s *self, 186 int (*hw_multicast_list_set)(struct aq_hw_s *self,
180 u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] 187 u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
181 [ETH_ALEN], 188 [ETH_ALEN],
182 u32 count); 189 u32 count);
183 190
@@ -197,25 +204,43 @@ struct aq_hw_ops {
197 204
198 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 205 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
199 206
200 int (*hw_deinit)(struct aq_hw_s *self); 207 int (*hw_set_offload)(struct aq_hw_s *self,
208 struct aq_nic_cfg_s *aq_nic_cfg);
201 209
202 int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); 210 int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
203}; 211};
204 212
205struct aq_fw_ops { 213struct aq_fw_ops {
206 int (*init)(struct aq_hw_s *self); 214 int (*init)(struct aq_hw_s *self);
207 215
216 int (*deinit)(struct aq_hw_s *self);
217
208 int (*reset)(struct aq_hw_s *self); 218 int (*reset)(struct aq_hw_s *self);
209 219
220 int (*renegotiate)(struct aq_hw_s *self);
221
210 int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); 222 int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
211 223
212 int (*set_link_speed)(struct aq_hw_s *self, u32 speed); 224 int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
213 225
214 int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); 226 int (*set_state)(struct aq_hw_s *self,
227 enum hal_atl_utils_fw_state_e state);
215 228
216 int (*update_link_status)(struct aq_hw_s *self); 229 int (*update_link_status)(struct aq_hw_s *self);
217 230
218 int (*update_stats)(struct aq_hw_s *self); 231 int (*update_stats)(struct aq_hw_s *self);
232
233 u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
234
235 int (*set_flow_control)(struct aq_hw_s *self);
236
237 int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
238 u8 *mac);
239
240 int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
241
242 int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
243 u32 *supported_rates);
219}; 244};
220 245
221#endif /* AQ_HW_H */ 246#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index ba5fe8c4125d..7c07eef275eb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
99 struct aq_nic_s *aq_nic = netdev_priv(ndev); 99 struct aq_nic_s *aq_nic = netdev_priv(ndev);
100 struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic); 100 struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
101 bool is_lro = false; 101 bool is_lro = false;
102 int err = 0;
103
104 aq_cfg->features = features;
102 105
103 if (aq_cfg->hw_features & NETIF_F_LRO) { 106 if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
104 is_lro = features & NETIF_F_LRO; 107 is_lro = features & NETIF_F_LRO;
105 108
106 if (aq_cfg->is_lro != is_lro) { 109 if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
112 } 115 }
113 } 116 }
114 } 117 }
118 if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
119 err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
120 aq_cfg);
115 121
116 return 0; 122 return err;
117} 123}
118 124
119static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) 125static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
@@ -135,17 +141,10 @@ err_exit:
135static void aq_ndev_set_multicast_settings(struct net_device *ndev) 141static void aq_ndev_set_multicast_settings(struct net_device *ndev)
136{ 142{
137 struct aq_nic_s *aq_nic = netdev_priv(ndev); 143 struct aq_nic_s *aq_nic = netdev_priv(ndev);
138 int err = 0;
139 144
140 err = aq_nic_set_packet_filter(aq_nic, ndev->flags); 145 aq_nic_set_packet_filter(aq_nic, ndev->flags);
141 if (err < 0)
142 return;
143 146
144 if (netdev_mc_count(ndev)) { 147 aq_nic_set_multicast_list(aq_nic, ndev);
145 err = aq_nic_set_multicast_list(aq_nic, ndev);
146 if (err < 0)
147 return;
148 }
149} 148}
150 149
151static const struct net_device_ops aq_ndev_ops = { 150static const struct net_device_ops aq_ndev_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1a1a6380c128..7abdc0952425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
89 aq_nic_rss_init(self, cfg->num_rss_queues); 89 aq_nic_rss_init(self, cfg->num_rss_queues);
90 90
91 /*descriptors */ 91 /*descriptors */
92 cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF); 92 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
93 cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF); 93 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
94 94
95 /*rss rings */ 95 /*rss rings */
96 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); 96 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
118 } 118 }
119 119
120 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; 120 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
121 cfg->hw_features = cfg->aq_hw_caps->hw_features; 121 cfg->features = cfg->aq_hw_caps->hw_features;
122} 122}
123 123
124static int aq_nic_update_link_status(struct aq_nic_s *self) 124static int aq_nic_update_link_status(struct aq_nic_s *self)
125{ 125{
126 int err = self->aq_fw_ops->update_link_status(self->aq_hw); 126 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
127 u32 fc = 0;
127 128
128 if (err) 129 if (err)
129 return err; 130 return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
133 AQ_CFG_DRV_NAME, self->link_status.mbps, 134 AQ_CFG_DRV_NAME, self->link_status.mbps,
134 self->aq_hw->aq_link_status.mbps); 135 self->aq_hw->aq_link_status.mbps);
135 aq_nic_update_interrupt_moderation_settings(self); 136 aq_nic_update_interrupt_moderation_settings(self);
137
138 /* Driver has to update flow control settings on RX block
139 * on any link event.
140 * We should query FW whether it negotiated FC.
141 */
142 if (self->aq_fw_ops->get_flow_control)
143 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
144 if (self->aq_hw_ops->hw_set_fc)
145 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
136 } 146 }
137 147
138 self->link_status = self->aq_hw->aq_link_status; 148 self->link_status = self->aq_hw->aq_link_status;
@@ -189,7 +199,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
189 aq_vec_isr(i, (void *)aq_vec); 199 aq_vec_isr(i, (void *)aq_vec);
190 200
191 mod_timer(&self->polling_timer, jiffies + 201 mod_timer(&self->polling_timer, jiffies +
192 AQ_CFG_POLLING_TIMER_INTERVAL); 202 AQ_CFG_POLLING_TIMER_INTERVAL);
193} 203}
194 204
195int aq_nic_ndev_register(struct aq_nic_s *self) 205int aq_nic_ndev_register(struct aq_nic_s *self)
@@ -301,13 +311,13 @@ int aq_nic_start(struct aq_nic_s *self)
301 unsigned int i = 0U; 311 unsigned int i = 0U;
302 312
303 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 313 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
304 self->mc_list.ar, 314 self->mc_list.ar,
305 self->mc_list.count); 315 self->mc_list.count);
306 if (err < 0) 316 if (err < 0)
307 goto err_exit; 317 goto err_exit;
308 318
309 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, 319 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
310 self->packet_filter); 320 self->packet_filter);
311 if (err < 0) 321 if (err < 0)
312 goto err_exit; 322 goto err_exit;
313 323
@@ -327,7 +337,7 @@ int aq_nic_start(struct aq_nic_s *self)
327 goto err_exit; 337 goto err_exit;
328 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); 338 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
329 mod_timer(&self->service_timer, jiffies + 339 mod_timer(&self->service_timer, jiffies +
330 AQ_CFG_SERVICE_TIMER_INTERVAL); 340 AQ_CFG_SERVICE_TIMER_INTERVAL);
331 341
332 if (self->aq_nic_cfg.is_polling) { 342 if (self->aq_nic_cfg.is_polling) {
333 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); 343 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
@@ -344,7 +354,7 @@ int aq_nic_start(struct aq_nic_s *self)
344 } 354 }
345 355
346 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, 356 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
347 AQ_CFG_IRQ_MASK); 357 AQ_CFG_IRQ_MASK);
348 if (err < 0) 358 if (err < 0)
349 goto err_exit; 359 goto err_exit;
350 } 360 }
@@ -563,34 +573,41 @@ err_exit:
563 573
564int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 574int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
565{ 575{
576 unsigned int packet_filter = self->packet_filter;
566 struct netdev_hw_addr *ha = NULL; 577 struct netdev_hw_addr *ha = NULL;
567 unsigned int i = 0U; 578 unsigned int i = 0U;
568 579
569 self->mc_list.count = 0U; 580 self->mc_list.count = 0;
570 581 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
571 netdev_for_each_mc_addr(ha, ndev) { 582 packet_filter |= IFF_PROMISC;
572 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 583 } else {
573 ++self->mc_list.count; 584 netdev_for_each_uc_addr(ha, ndev) {
585 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
574 586
575 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) 587 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
576 break; 588 break;
589 }
577 } 590 }
578 591
579 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { 592 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
580 /* Number of filters is too big: atlantic does not support this. 593 packet_filter |= IFF_ALLMULTI;
581 * Force all multi filter to support this.
582 * With this we disable all UC filters and setup "all pass"
583 * multicast mask
584 */
585 self->packet_filter |= IFF_ALLMULTI;
586 self->aq_nic_cfg.mc_list_count = 0;
587 return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
588 self->packet_filter);
589 } else { 594 } else {
590 return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 595 netdev_for_each_mc_addr(ha, ndev) {
591 self->mc_list.ar, 596 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
592 self->mc_list.count); 597
598 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
599 break;
600 }
601 }
602
603 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
604 packet_filter |= IFF_MULTICAST;
605 self->mc_list.count = i;
606 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
607 self->mc_list.ar,
608 self->mc_list.count);
593 } 609 }
610 return aq_nic_set_packet_filter(self, packet_filter);
594} 611}
595 612
596int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 613int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
@@ -761,10 +778,16 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
761 ethtool_link_ksettings_add_link_mode(cmd, advertising, 778 ethtool_link_ksettings_add_link_mode(cmd, advertising,
762 100baseT_Full); 779 100baseT_Full);
763 780
764 if (self->aq_nic_cfg.flow_control) 781 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
765 ethtool_link_ksettings_add_link_mode(cmd, advertising, 782 ethtool_link_ksettings_add_link_mode(cmd, advertising,
766 Pause); 783 Pause);
767 784
785 /* Asym is when either RX or TX, but not both */
786 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
787 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
788 ethtool_link_ksettings_add_link_mode(cmd, advertising,
789 Asym_Pause);
790
768 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) 791 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
769 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); 792 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
770 else 793 else
@@ -878,11 +901,13 @@ void aq_nic_deinit(struct aq_nic_s *self)
878 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 901 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
879 aq_vec_deinit(aq_vec); 902 aq_vec_deinit(aq_vec);
880 903
881 if (self->power_state == AQ_HW_POWER_STATE_D0) { 904 self->aq_fw_ops->deinit(self->aq_hw);
882 (void)self->aq_hw_ops->hw_deinit(self->aq_hw); 905
883 } else { 906 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
884 (void)self->aq_hw_ops->hw_set_power(self->aq_hw, 907 self->aq_hw->aq_nic_cfg->wol) {
885 self->power_state); 908 self->aq_fw_ops->set_power(self->aq_hw,
909 self->power_state,
910 self->ndev->dev_addr);
886 } 911 }
887 912
888err_exit:; 913err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index faa533a0ec47..44ec47a3d60a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -23,7 +23,7 @@ struct aq_vec_s;
23 23
24struct aq_nic_cfg_s { 24struct aq_nic_cfg_s {
25 const struct aq_hw_caps_s *aq_hw_caps; 25 const struct aq_hw_caps_s *aq_hw_caps;
26 u64 hw_features; 26 u64 features;
27 u32 rxds; /* rx ring size, descriptors # */ 27 u32 rxds; /* rx ring size, descriptors # */
28 u32 txds; /* tx ring size, descriptors # */ 28 u32 txds; /* tx ring size, descriptors # */
29 u32 vecs; /* vecs==allocated irqs */ 29 u32 vecs; /* vecs==allocated irqs */
@@ -36,6 +36,7 @@ struct aq_nic_cfg_s {
36 u32 flow_control; 36 u32 flow_control;
37 u32 link_speed_msk; 37 u32 link_speed_msk;
38 u32 vlan_id; 38 u32 vlan_id;
39 u32 wol;
39 u16 is_mc_list_enabled; 40 u16 is_mc_list_enabled;
40 u16 mc_list_count; 41 u16 mc_list_count;
41 bool is_autoneg; 42 bool is_autoneg;
@@ -44,6 +45,7 @@ struct aq_nic_cfg_s {
44 bool is_lro; 45 bool is_lro;
45 u8 tcs; 46 u8 tcs;
46 struct aq_rss_parameters aq_rss; 47 struct aq_rss_parameters aq_rss;
48 u32 eee_speeds;
47}; 49};
48 50
49#define AQ_NIC_FLAG_STARTED 0x00000004U 51#define AQ_NIC_FLAG_STARTED 0x00000004U
@@ -54,6 +56,8 @@ struct aq_nic_cfg_s {
54#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U 56#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
55#define AQ_NIC_FLAG_ERR_HW 0x80000000U 57#define AQ_NIC_FLAG_ERR_HW 0x80000000U
56 58
59#define AQ_NIC_WOL_ENABLED BIT(0)
60
57#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ 61#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
58 ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) 62 ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
59 63
@@ -75,7 +79,7 @@ struct aq_nic_s {
75 struct aq_hw_link_status_s link_status; 79 struct aq_hw_link_status_s link_status;
76 struct { 80 struct {
77 u32 count; 81 u32 count;
78 u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; 82 u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
79 } mc_list; 83 } mc_list;
80 84
81 struct pci_dev *pdev; 85 struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index a50e08bb4748..1d5d6b8df855 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
84 const struct aq_hw_ops **ops, 84 const struct aq_hw_ops **ops,
85 const struct aq_hw_caps_s **caps) 85 const struct aq_hw_caps_s **caps)
86{ 86{
87 int i = 0; 87 int i;
88 88
89 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) 89 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
90 return -EINVAL; 90 return -EINVAL;
@@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
107 107
108int aq_pci_func_init(struct pci_dev *pdev) 108int aq_pci_func_init(struct pci_dev *pdev)
109{ 109{
110 int err = 0; 110 int err;
111 111
112 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 112 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
113 if (!err) { 113 if (!err) {
@@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
141 char *name, void *aq_vec, cpumask_t *affinity_mask) 141 char *name, void *aq_vec, cpumask_t *affinity_mask)
142{ 142{
143 struct pci_dev *pdev = self->pdev; 143 struct pci_dev *pdev = self->pdev;
144 int err = 0; 144 int err;
145 145
146 if (pdev->msix_enabled || pdev->msi_enabled) 146 if (pdev->msix_enabled || pdev->msi_enabled)
147 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, 147 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
@@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
164void aq_pci_func_free_irqs(struct aq_nic_s *self) 164void aq_pci_func_free_irqs(struct aq_nic_s *self)
165{ 165{
166 struct pci_dev *pdev = self->pdev; 166 struct pci_dev *pdev = self->pdev;
167 unsigned int i = 0U; 167 unsigned int i;
168 168
169 for (i = 32U; i--;) { 169 for (i = 32U; i--;) {
170 if (!((1U << i) & self->msix_entry_mask)) 170 if (!((1U << i) & self->msix_entry_mask))
@@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
194static int aq_pci_probe(struct pci_dev *pdev, 194static int aq_pci_probe(struct pci_dev *pdev,
195 const struct pci_device_id *pci_id) 195 const struct pci_device_id *pci_id)
196{ 196{
197 struct aq_nic_s *self = NULL; 197 struct aq_nic_s *self;
198 int err = 0; 198 int err;
199 struct net_device *ndev; 199 struct net_device *ndev;
200 resource_size_t mmio_pa; 200 resource_size_t mmio_pa;
201 u32 bar; 201 u32 bar;
@@ -267,14 +267,13 @@ static int aq_pci_probe(struct pci_dev *pdev,
267 numvecs = min(numvecs, num_online_cpus()); 267 numvecs = min(numvecs, num_online_cpus());
268 /*enable interrupts */ 268 /*enable interrupts */
269#if !AQ_CFG_FORCE_LEGACY_INT 269#if !AQ_CFG_FORCE_LEGACY_INT
270 numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs, 270 err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
271 PCI_IRQ_MSIX | PCI_IRQ_MSI | 271 PCI_IRQ_MSIX | PCI_IRQ_MSI |
272 PCI_IRQ_LEGACY); 272 PCI_IRQ_LEGACY);
273 273
274 if (numvecs < 0) { 274 if (err < 0)
275 err = numvecs;
276 goto err_hwinit; 275 goto err_hwinit;
277 } 276 numvecs = err;
278#endif 277#endif
279 self->irqvecs = numvecs; 278 self->irqvecs = numvecs;
280 279
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b5f1f62e8e25..74550ccc7a20 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -29,8 +29,8 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
29 goto err_exit; 29 goto err_exit;
30 } 30 }
31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), 31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
32 self->size * self->dx_size, 32 self->size * self->dx_size,
33 &self->dx_ring_pa, GFP_KERNEL); 33 &self->dx_ring_pa, GFP_KERNEL);
34 if (!self->dx_ring) { 34 if (!self->dx_ring) {
35 err = -ENOMEM; 35 err = -ENOMEM;
36 goto err_exit; 36 goto err_exit;
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
172 return !!budget; 172 return !!budget;
173} 173}
174 174
175static void aq_rx_checksum(struct aq_ring_s *self,
176 struct aq_ring_buff_s *buff,
177 struct sk_buff *skb)
178{
179 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
180 return;
181
182 if (unlikely(buff->is_cso_err)) {
183 ++self->stats.rx.errors;
184 skb->ip_summed = CHECKSUM_NONE;
185 return;
186 }
187 if (buff->is_ip_cso) {
188 __skb_incr_checksum_unnecessary(skb);
189 if (buff->is_udp_cso || buff->is_tcp_cso)
190 __skb_incr_checksum_unnecessary(skb);
191 } else {
192 skb->ip_summed = CHECKSUM_NONE;
193 }
194}
195
175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 196#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
176int aq_ring_rx_clean(struct aq_ring_s *self, 197int aq_ring_rx_clean(struct aq_ring_s *self,
177 struct napi_struct *napi, 198 struct napi_struct *napi,
@@ -225,9 +246,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
225 } 246 }
226 247
227 /* for single fragment packets use build_skb() */ 248 /* for single fragment packets use build_skb() */
228 if (buff->is_eop) { 249 if (buff->is_eop &&
250 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
229 skb = build_skb(page_address(buff->page), 251 skb = build_skb(page_address(buff->page),
230 buff->len + AQ_SKB_ALIGN); 252 AQ_CFG_RX_FRAME_MAX);
231 if (unlikely(!skb)) { 253 if (unlikely(!skb)) {
232 err = -ENOMEM; 254 err = -ENOMEM;
233 goto err_exit; 255 goto err_exit;
@@ -247,34 +269,27 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
247 buff->len - ETH_HLEN, 269 buff->len - ETH_HLEN,
248 SKB_TRUESIZE(buff->len - ETH_HLEN)); 270 SKB_TRUESIZE(buff->len - ETH_HLEN));
249 271
250 for (i = 1U, next_ = buff->next, 272 if (!buff->is_eop) {
251 buff_ = &self->buff_ring[next_]; true; 273 for (i = 1U, next_ = buff->next,
252 next_ = buff_->next, 274 buff_ = &self->buff_ring[next_];
253 buff_ = &self->buff_ring[next_], ++i) { 275 true; next_ = buff_->next,
254 skb_add_rx_frag(skb, i, buff_->page, 0, 276 buff_ = &self->buff_ring[next_], ++i) {
255 buff_->len, 277 skb_add_rx_frag(skb, i,
256 SKB_TRUESIZE(buff->len - 278 buff_->page, 0,
257 ETH_HLEN)); 279 buff_->len,
258 buff_->is_cleaned = 1; 280 SKB_TRUESIZE(buff->len -
259 281 ETH_HLEN));
260 if (buff_->is_eop) 282 buff_->is_cleaned = 1;
261 break; 283
284 if (buff_->is_eop)
285 break;
286 }
262 } 287 }
263 } 288 }
264 289
265 skb->protocol = eth_type_trans(skb, ndev); 290 skb->protocol = eth_type_trans(skb, ndev);
266 if (unlikely(buff->is_cso_err)) { 291
267 ++self->stats.rx.errors; 292 aq_rx_checksum(self, buff, skb);
268 skb->ip_summed = CHECKSUM_NONE;
269 } else {
270 if (buff->is_ip_cso) {
271 __skb_incr_checksum_unnecessary(skb);
272 if (buff->is_udp_cso || buff->is_tcp_cso)
273 __skb_incr_checksum_unnecessary(skb);
274 } else {
275 skb->ip_summed = CHECKSUM_NONE;
276 }
277 }
278 293
279 skb_set_hash(skb, buff->rss_hash, 294 skb_set_hash(skb, buff->rss_hash,
280 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : 295 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 67e2f9fb9402..2469ed4d86b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -19,65 +19,67 @@
19#include "hw_atl_a0_internal.h" 19#include "hw_atl_a0_internal.h"
20 20
21#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ 21#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
22 .is_64_dma = true, \ 22 .is_64_dma = true, \
23 .msix_irqs = 4U, \ 23 .msix_irqs = 4U, \
24 .irq_mask = ~0U, \ 24 .irq_mask = ~0U, \
25 .vecs = HW_ATL_A0_RSS_MAX, \ 25 .vecs = HW_ATL_A0_RSS_MAX, \
26 .tcs = HW_ATL_A0_TC_MAX, \ 26 .tcs = HW_ATL_A0_TC_MAX, \
27 .rxd_alignment = 1U, \ 27 .rxd_alignment = 1U, \
28 .rxd_size = HW_ATL_A0_RXD_SIZE, \ 28 .rxd_size = HW_ATL_A0_RXD_SIZE, \
29 .rxds = 248U, \ 29 .rxds_max = HW_ATL_A0_MAX_RXD, \
30 .txd_alignment = 1U, \ 30 .rxds_min = HW_ATL_A0_MIN_RXD, \
31 .txd_size = HW_ATL_A0_TXD_SIZE, \ 31 .txd_alignment = 1U, \
32 .txds = 8U * 1024U, \ 32 .txd_size = HW_ATL_A0_TXD_SIZE, \
33 .txhwb_alignment = 4096U, \ 33 .txds_max = HW_ATL_A0_MAX_TXD, \
34 .tx_rings = HW_ATL_A0_TX_RINGS, \ 34 .txds_min = HW_ATL_A0_MIN_RXD, \
35 .rx_rings = HW_ATL_A0_RX_RINGS, \ 35 .txhwb_alignment = 4096U, \
36 .hw_features = NETIF_F_HW_CSUM | \ 36 .tx_rings = HW_ATL_A0_TX_RINGS, \
37 NETIF_F_RXHASH | \ 37 .rx_rings = HW_ATL_A0_RX_RINGS, \
38 NETIF_F_RXCSUM | \ 38 .hw_features = NETIF_F_HW_CSUM | \
39 NETIF_F_SG | \ 39 NETIF_F_RXHASH | \
40 NETIF_F_TSO, \ 40 NETIF_F_RXCSUM | \
41 NETIF_F_SG | \
42 NETIF_F_TSO, \
41 .hw_priv_flags = IFF_UNICAST_FLT, \ 43 .hw_priv_flags = IFF_UNICAST_FLT, \
42 .flow_control = true, \ 44 .flow_control = true, \
43 .mtu = HW_ATL_A0_MTU_JUMBO, \ 45 .mtu = HW_ATL_A0_MTU_JUMBO, \
44 .mac_regs_count = 88, \ 46 .mac_regs_count = 88, \
45 .hw_alive_check_addr = 0x10U 47 .hw_alive_check_addr = 0x10U
46 48
47const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { 49const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
48 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 50 DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
49 .media_type = AQ_HW_MEDIA_TYPE_FIBRE, 51 .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
50 .link_speed_msk = HW_ATL_A0_RATE_5G | 52 .link_speed_msk = AQ_NIC_RATE_5G |
51 HW_ATL_A0_RATE_2G5 | 53 AQ_NIC_RATE_2GS |
52 HW_ATL_A0_RATE_1G | 54 AQ_NIC_RATE_1G |
53 HW_ATL_A0_RATE_100M, 55 AQ_NIC_RATE_100M,
54}; 56};
55 57
56const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { 58const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
57 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 59 DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
58 .media_type = AQ_HW_MEDIA_TYPE_TP, 60 .media_type = AQ_HW_MEDIA_TYPE_TP,
59 .link_speed_msk = HW_ATL_A0_RATE_10G | 61 .link_speed_msk = AQ_NIC_RATE_10G |
60 HW_ATL_A0_RATE_5G | 62 AQ_NIC_RATE_5G |
61 HW_ATL_A0_RATE_2G5 | 63 AQ_NIC_RATE_2GS |
62 HW_ATL_A0_RATE_1G | 64 AQ_NIC_RATE_1G |
63 HW_ATL_A0_RATE_100M, 65 AQ_NIC_RATE_100M,
64}; 66};
65 67
66const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { 68const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
67 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 69 DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
68 .media_type = AQ_HW_MEDIA_TYPE_TP, 70 .media_type = AQ_HW_MEDIA_TYPE_TP,
69 .link_speed_msk = HW_ATL_A0_RATE_5G | 71 .link_speed_msk = AQ_NIC_RATE_5G |
70 HW_ATL_A0_RATE_2G5 | 72 AQ_NIC_RATE_2GS |
71 HW_ATL_A0_RATE_1G | 73 AQ_NIC_RATE_1G |
72 HW_ATL_A0_RATE_100M, 74 AQ_NIC_RATE_100M,
73}; 75};
74 76
75const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { 77const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
76 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 78 DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
77 .media_type = AQ_HW_MEDIA_TYPE_TP, 79 .media_type = AQ_HW_MEDIA_TYPE_TP,
78 .link_speed_msk = HW_ATL_A0_RATE_2G5 | 80 .link_speed_msk = AQ_NIC_RATE_2GS |
79 HW_ATL_A0_RATE_1G | 81 AQ_NIC_RATE_1G |
80 HW_ATL_A0_RATE_100M, 82 AQ_NIC_RATE_100M,
81}; 83};
82 84
83static int hw_atl_a0_hw_reset(struct aq_hw_s *self) 85static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
@@ -282,7 +284,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
282 284
283 /* RSS Ring selection */ 285 /* RSS Ring selection */
284 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? 286 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
285 0xB3333333U : 0x00000000U); 287 0xB3333333U : 0x00000000U);
286 288
287 /* Multicast filters */ 289 /* Multicast filters */
288 for (i = HW_ATL_A0_MAC_MAX; i--;) { 290 for (i = HW_ATL_A0_MAC_MAX; i--;) {
@@ -323,7 +325,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
323 } 325 }
324 h = (mac_addr[0] << 8) | (mac_addr[1]); 326 h = (mac_addr[0] << 8) | (mac_addr[1]);
325 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 327 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
326 (mac_addr[4] << 8) | mac_addr[5]; 328 (mac_addr[4] << 8) | mac_addr[5];
327 329
328 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); 330 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
329 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); 331 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
@@ -517,7 +519,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
517 519
518 hw_atl_rdm_rx_desc_data_buff_size_set(self, 520 hw_atl_rdm_rx_desc_data_buff_size_set(self,
519 AQ_CFG_RX_FRAME_MAX / 1024U, 521 AQ_CFG_RX_FRAME_MAX / 1024U,
520 aq_ring->idx); 522 aq_ring->idx);
521 523
522 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); 524 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
523 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 525 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
@@ -756,7 +758,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
756 hw_atl_rpfl2_uc_flr_en_set(self, 758 hw_atl_rpfl2_uc_flr_en_set(self,
757 (self->aq_nic_cfg->is_mc_list_enabled && 759 (self->aq_nic_cfg->is_mc_list_enabled &&
758 (i <= self->aq_nic_cfg->mc_list_count)) ? 760 (i <= self->aq_nic_cfg->mc_list_count)) ?
759 1U : 0U, i); 761 1U : 0U, i);
760 762
761 return aq_hw_err_from_flags(self); 763 return aq_hw_err_from_flags(self);
762} 764}
@@ -765,7 +767,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
765 767
766static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, 768static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
767 u8 ar_mac 769 u8 ar_mac
768 [AQ_CFG_MULTICAST_ADDRESS_MAX] 770 [AQ_HW_MULTICAST_ADDRESS_MAX]
769 [ETH_ALEN], 771 [ETH_ALEN],
770 u32 count) 772 u32 count)
771{ 773{
@@ -875,8 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
875const struct aq_hw_ops hw_atl_ops_a0 = { 877const struct aq_hw_ops hw_atl_ops_a0 = {
876 .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, 878 .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
877 .hw_init = hw_atl_a0_hw_init, 879 .hw_init = hw_atl_a0_hw_init,
878 .hw_deinit = hw_atl_utils_hw_deinit,
879 .hw_set_power = hw_atl_utils_hw_set_power,
880 .hw_reset = hw_atl_a0_hw_reset, 880 .hw_reset = hw_atl_a0_hw_reset,
881 .hw_start = hw_atl_a0_hw_start, 881 .hw_start = hw_atl_a0_hw_start,
882 .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, 882 .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1d8855558d74..a021dc431ef7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -62,12 +62,6 @@
62#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU 62#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU
63#define HW_ATL_A0_MPI_SPEED_SHIFT 16U 63#define HW_ATL_A0_MPI_SPEED_SHIFT 16U
64 64
65#define HW_ATL_A0_RATE_10G BIT(0)
66#define HW_ATL_A0_RATE_5G BIT(1)
67#define HW_ATL_A0_RATE_2G5 BIT(3)
68#define HW_ATL_A0_RATE_1G BIT(4)
69#define HW_ATL_A0_RATE_100M BIT(5)
70
71#define HW_ATL_A0_TXBUF_MAX 160U 65#define HW_ATL_A0_TXBUF_MAX 160U
72#define HW_ATL_A0_RXBUF_MAX 320U 66#define HW_ATL_A0_RXBUF_MAX 320U
73 67
@@ -88,4 +82,12 @@
88 82
89#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U 83#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
90 84
85#define HW_ATL_A0_MIN_RXD \
86 (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
87#define HW_ATL_A0_MIN_TXD \
88 (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
89
90#define HW_ATL_A0_MAX_RXD 8184U
91#define HW_ATL_A0_MAX_TXD 8184U
92
91#endif /* HW_ATL_A0_INTERNAL_H */ 93#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 819f6bcf9b4e..a7e853fa43c2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -20,67 +20,69 @@
20#include "hw_atl_llh_internal.h" 20#include "hw_atl_llh_internal.h"
21 21
22#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ 22#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
23 .is_64_dma = true, \ 23 .is_64_dma = true, \
24 .msix_irqs = 4U, \ 24 .msix_irqs = 4U, \
25 .irq_mask = ~0U, \ 25 .irq_mask = ~0U, \
26 .vecs = HW_ATL_B0_RSS_MAX, \ 26 .vecs = HW_ATL_B0_RSS_MAX, \
27 .tcs = HW_ATL_B0_TC_MAX, \ 27 .tcs = HW_ATL_B0_TC_MAX, \
28 .rxd_alignment = 1U, \ 28 .rxd_alignment = 1U, \
29 .rxd_size = HW_ATL_B0_RXD_SIZE, \ 29 .rxd_size = HW_ATL_B0_RXD_SIZE, \
30 .rxds = 4U * 1024U, \ 30 .rxds_max = HW_ATL_B0_MAX_RXD, \
31 .txd_alignment = 1U, \ 31 .rxds_min = HW_ATL_B0_MIN_RXD, \
32 .txd_size = HW_ATL_B0_TXD_SIZE, \ 32 .txd_alignment = 1U, \
33 .txds = 8U * 1024U, \ 33 .txd_size = HW_ATL_B0_TXD_SIZE, \
34 .txhwb_alignment = 4096U, \ 34 .txds_max = HW_ATL_B0_MAX_TXD, \
35 .tx_rings = HW_ATL_B0_TX_RINGS, \ 35 .txds_min = HW_ATL_B0_MIN_TXD, \
36 .rx_rings = HW_ATL_B0_RX_RINGS, \ 36 .txhwb_alignment = 4096U, \
37 .hw_features = NETIF_F_HW_CSUM | \ 37 .tx_rings = HW_ATL_B0_TX_RINGS, \
38 NETIF_F_RXCSUM | \ 38 .rx_rings = HW_ATL_B0_RX_RINGS, \
39 NETIF_F_RXHASH | \ 39 .hw_features = NETIF_F_HW_CSUM | \
40 NETIF_F_SG | \ 40 NETIF_F_RXCSUM | \
41 NETIF_F_TSO | \ 41 NETIF_F_RXHASH | \
42 NETIF_F_LRO, \ 42 NETIF_F_SG | \
43 .hw_priv_flags = IFF_UNICAST_FLT, \ 43 NETIF_F_TSO | \
44 .flow_control = true, \ 44 NETIF_F_LRO, \
45 .mtu = HW_ATL_B0_MTU_JUMBO, \ 45 .hw_priv_flags = IFF_UNICAST_FLT, \
46 .mac_regs_count = 88, \ 46 .flow_control = true, \
47 .mtu = HW_ATL_B0_MTU_JUMBO, \
48 .mac_regs_count = 88, \
47 .hw_alive_check_addr = 0x10U 49 .hw_alive_check_addr = 0x10U
48 50
49const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { 51const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
50 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 52 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
51 .media_type = AQ_HW_MEDIA_TYPE_FIBRE, 53 .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
52 .link_speed_msk = HW_ATL_B0_RATE_10G | 54 .link_speed_msk = AQ_NIC_RATE_10G |
53 HW_ATL_B0_RATE_5G | 55 AQ_NIC_RATE_5G |
54 HW_ATL_B0_RATE_2G5 | 56 AQ_NIC_RATE_2GS |
55 HW_ATL_B0_RATE_1G | 57 AQ_NIC_RATE_1G |
56 HW_ATL_B0_RATE_100M, 58 AQ_NIC_RATE_100M,
57}; 59};
58 60
59const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { 61const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
60 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 62 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
61 .media_type = AQ_HW_MEDIA_TYPE_TP, 63 .media_type = AQ_HW_MEDIA_TYPE_TP,
62 .link_speed_msk = HW_ATL_B0_RATE_10G | 64 .link_speed_msk = AQ_NIC_RATE_10G |
63 HW_ATL_B0_RATE_5G | 65 AQ_NIC_RATE_5G |
64 HW_ATL_B0_RATE_2G5 | 66 AQ_NIC_RATE_2GS |
65 HW_ATL_B0_RATE_1G | 67 AQ_NIC_RATE_1G |
66 HW_ATL_B0_RATE_100M, 68 AQ_NIC_RATE_100M,
67}; 69};
68 70
69const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { 71const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
70 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 72 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
71 .media_type = AQ_HW_MEDIA_TYPE_TP, 73 .media_type = AQ_HW_MEDIA_TYPE_TP,
72 .link_speed_msk = HW_ATL_B0_RATE_5G | 74 .link_speed_msk = AQ_NIC_RATE_5G |
73 HW_ATL_B0_RATE_2G5 | 75 AQ_NIC_RATE_2GS |
74 HW_ATL_B0_RATE_1G | 76 AQ_NIC_RATE_1G |
75 HW_ATL_B0_RATE_100M, 77 AQ_NIC_RATE_100M,
76}; 78};
77 79
78const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { 80const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
79 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 81 DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
80 .media_type = AQ_HW_MEDIA_TYPE_TP, 82 .media_type = AQ_HW_MEDIA_TYPE_TP,
81 .link_speed_msk = HW_ATL_B0_RATE_2G5 | 83 .link_speed_msk = AQ_NIC_RATE_2GS |
82 HW_ATL_B0_RATE_1G | 84 AQ_NIC_RATE_1G |
83 HW_ATL_B0_RATE_100M, 85 AQ_NIC_RATE_100M,
84}; 86};
85 87
86static int hw_atl_b0_hw_reset(struct aq_hw_s *self) 88static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
@@ -98,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
98 return err; 100 return err;
99} 101}
100 102
103static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
104{
105 hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
106 return 0;
107}
108
101static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) 109static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
102{ 110{
103 u32 tc = 0U; 111 u32 tc = 0U;
104 u32 buff_size = 0U; 112 u32 buff_size = 0U;
105 unsigned int i_priority = 0U; 113 unsigned int i_priority = 0U;
106 bool is_rx_flow_control = false;
107 114
108 /* TPS Descriptor rate init */ 115 /* TPS Descriptor rate init */
109 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 116 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -136,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
136 143
137 /* QoS Rx buf size per TC */ 144 /* QoS Rx buf size per TC */
138 tc = 0; 145 tc = 0;
139 is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
140 buff_size = HW_ATL_B0_RXBUF_MAX; 146 buff_size = HW_ATL_B0_RXBUF_MAX;
141 147
142 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); 148 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -148,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
148 (buff_size * 154 (buff_size *
149 (1024U / 32U) * 50U) / 155 (1024U / 32U) * 50U) /
150 100U, tc); 156 100U, tc);
151 hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); 157
158 hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
152 159
153 /* QoS 802.1p priority -> TC mapping */ 160 /* QoS 802.1p priority -> TC mapping */
154 for (i_priority = 8U; i_priority--;) 161 for (i_priority = 8U; i_priority--;)
@@ -227,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
227 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); 234 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
228 235
229 /* RX checksums offloads*/ 236 /* RX checksums offloads*/
230 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); 237 hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
231 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); 238 NETIF_F_RXCSUM));
239 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
240 NETIF_F_RXCSUM));
232 241
233 /* LSO offloads*/ 242 /* LSO offloads*/
234 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 243 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -653,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
653 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) 662 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
654 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; 663 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
655 664
656 unsigned int is_err = 1U;
657 unsigned int is_rx_check_sum_enabled = 0U; 665 unsigned int is_rx_check_sum_enabled = 0U;
658 unsigned int pkt_type = 0U; 666 unsigned int pkt_type = 0U;
667 u8 rx_stat = 0U;
659 668
660 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ 669 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
661 break; 670 break;
@@ -663,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
663 672
664 buff = &ring->buff_ring[ring->hw_head]; 673 buff = &ring->buff_ring[ring->hw_head];
665 674
666 is_err = (0x0000003CU & rxd_wb->status); 675 rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
667 676
668 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); 677 is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
669 is_err &= ~0x20U; /* exclude validity bit */
670 678
671 pkt_type = 0xFFU & (rxd_wb->type >> 4); 679 pkt_type = 0xFFU & (rxd_wb->type >> 4);
672 680
673 if (is_rx_check_sum_enabled) { 681 if (is_rx_check_sum_enabled & BIT(0) &&
674 if (0x0U == (pkt_type & 0x3U)) 682 (0x0U == (pkt_type & 0x3U)))
675 buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; 683 buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
676 684
685 if (is_rx_check_sum_enabled & BIT(1)) {
677 if (0x4U == (pkt_type & 0x1CU)) 686 if (0x4U == (pkt_type & 0x1CU))
678 buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; 687 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
688 !!(rx_stat & BIT(3));
679 else if (0x0U == (pkt_type & 0x1CU)) 689 else if (0x0U == (pkt_type & 0x1CU))
680 buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; 690 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
681 691 !!(rx_stat & BIT(3));
682 /* Checksum offload workaround for small packets */ 692 }
683 if (rxd_wb->pkt_len <= 60) { 693 buff->is_cso_err = !!(rx_stat & 0x6);
684 buff->is_ip_cso = 0U; 694 /* Checksum offload workaround for small packets */
685 buff->is_cso_err = 0U; 695 if (unlikely(rxd_wb->pkt_len <= 60)) {
686 } 696 buff->is_ip_cso = 0U;
697 buff->is_cso_err = 0U;
687 } 698 }
688
689 is_err &= ~0x18U;
690 699
691 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); 700 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
692 701
693 if (is_err || rxd_wb->type & 0x1000U) { 702 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
694 /* status error or DMA error */ 703 /* MAC error or DMA error */
695 buff->is_error = 1U; 704 buff->is_error = 1U;
696 } else { 705 } else {
697 if (self->aq_nic_cfg->is_rss) { 706 if (self->aq_nic_cfg->is_rss) {
@@ -762,7 +771,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
762 771
763 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); 772 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
764 hw_atl_rpfl2multicast_flr_en_set(self, 773 hw_atl_rpfl2multicast_flr_en_set(self,
765 IS_FILTER_ENABLED(IFF_MULTICAST), 0); 774 IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
766 775
767 hw_atl_rpfl2_accept_all_mc_packets_set(self, 776 hw_atl_rpfl2_accept_all_mc_packets_set(self,
768 IS_FILTER_ENABLED(IFF_ALLMULTI)); 777 IS_FILTER_ENABLED(IFF_ALLMULTI));
@@ -784,7 +793,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
784 793
785static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, 794static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
786 u8 ar_mac 795 u8 ar_mac
787 [AQ_CFG_MULTICAST_ADDRESS_MAX] 796 [AQ_HW_MULTICAST_ADDRESS_MAX]
788 [ETH_ALEN], 797 [ETH_ALEN],
789 u32 count) 798 u32 count)
790{ 799{
@@ -812,7 +821,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
812 821
813 hw_atl_rpfl2_uc_flr_en_set(self, 822 hw_atl_rpfl2_uc_flr_en_set(self,
814 (self->aq_nic_cfg->is_mc_list_enabled), 823 (self->aq_nic_cfg->is_mc_list_enabled),
815 HW_ATL_B0_MAC_MIN + i); 824 HW_ATL_B0_MAC_MIN + i);
816 } 825 }
817 826
818 err = aq_hw_err_from_flags(self); 827 err = aq_hw_err_from_flags(self);
@@ -913,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
913static int hw_atl_b0_hw_stop(struct aq_hw_s *self) 922static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
914{ 923{
915 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); 924 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
925
926 /* Invalidate Descriptor Cache to prevent writing to the cached
927 * descriptors and to the data pointer of those descriptors
928 */
929 hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
930
916 return aq_hw_err_from_flags(self); 931 return aq_hw_err_from_flags(self);
917} 932}
918 933
@@ -933,8 +948,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
933const struct aq_hw_ops hw_atl_ops_b0 = { 948const struct aq_hw_ops hw_atl_ops_b0 = {
934 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, 949 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
935 .hw_init = hw_atl_b0_hw_init, 950 .hw_init = hw_atl_b0_hw_init,
936 .hw_deinit = hw_atl_utils_hw_deinit,
937 .hw_set_power = hw_atl_utils_hw_set_power,
938 .hw_reset = hw_atl_b0_hw_reset, 951 .hw_reset = hw_atl_b0_hw_reset,
939 .hw_start = hw_atl_b0_hw_start, 952 .hw_start = hw_atl_b0_hw_start,
940 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, 953 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
963 .hw_get_regs = hw_atl_utils_hw_get_regs, 976 .hw_get_regs = hw_atl_utils_hw_get_regs,
964 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 977 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
965 .hw_get_fw_version = hw_atl_utils_get_fw_version, 978 .hw_get_fw_version = hw_atl_utils_get_fw_version,
979 .hw_set_offload = hw_atl_b0_hw_offload_set,
980 .hw_set_fc = hw_atl_b0_set_fc,
966}; 981};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 405d1455c222..b318eefd36ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -67,12 +67,6 @@
67#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU 67#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
68#define HW_ATL_B0_MPI_SPEED_SHIFT 16U 68#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
69 69
70#define HW_ATL_B0_RATE_10G BIT(0)
71#define HW_ATL_B0_RATE_5G BIT(1)
72#define HW_ATL_B0_RATE_2G5 BIT(3)
73#define HW_ATL_B0_RATE_1G BIT(4)
74#define HW_ATL_B0_RATE_100M BIT(5)
75
76#define HW_ATL_B0_TXBUF_MAX 160U 70#define HW_ATL_B0_TXBUF_MAX 160U
77#define HW_ATL_B0_RXBUF_MAX 320U 71#define HW_ATL_B0_RXBUF_MAX 320U
78 72
@@ -142,6 +136,14 @@
142#define HW_ATL_INTR_MODER_MAX 0x1FF 136#define HW_ATL_INTR_MODER_MAX 0x1FF
143#define HW_ATL_INTR_MODER_MIN 0xFF 137#define HW_ATL_INTR_MODER_MIN 0xFF
144 138
139#define HW_ATL_B0_MIN_RXD \
140 (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
141#define HW_ATL_B0_MIN_TXD \
142 (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
143
144#define HW_ATL_B0_MAX_RXD 8184U
145#define HW_ATL_B0_MAX_TXD 8184U
146
145/* HW layer capabilities */ 147/* HW layer capabilities */
146 148
147#endif /* HW_ATL_B0_INTERNAL_H */ 149#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 10ba035dadb1..5502ec5f0f69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
619 HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); 619 HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
620} 620}
621 621
622void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
623{
624 aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
625