aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKaricheri, Muralidharan <m-karicheri2@ti.com>2015-01-15 19:12:50 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-19 15:07:39 -0500
commit84640e27f23041d474c31d3362c3e2185ad68ec2 (patch)
treec4aa2c47bc7151fb22853084a22f0a5d4bd6e94e
parent44eefcdfb9a35f7fd73e1896aefb6292124046d2 (diff)
net: netcp: Add Keystone NetCP core ethernet driver
The network coprocessor (NetCP) is a hardware accelerator available in Keystone SoCs that processes Ethernet packets. NetCP consists of following hardware components 1 Gigabit Ethernet (GbE) subsystem with a Ethernet switch sub-module to send and receive packets. 2 Packet Accelerator (PA) module to perform packet classification operations such as header matching, and packet modification operations such as checksum generation. 3 Security Accelerator(SA) capable of performing IPSec operations on ingress/egress packets. 4 An optional 10 Gigabit Ethernet Subsystem (XGbE) which includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates per Ethernet port. 5 Packet DMA and Queue Management Subsystem (QMSS) to enqueue and dequeue packets and DMA the packets between memory and NetCP hardware components described above. NetCP core driver make use of the Keystone Navigator driver API to allocate DMA channel for the Ethenet device and to handle packet queue/de-queue, Please refer API's in include/linux/soc/ti/knav_dma.h and drivers/soc/ti/knav_qmss.h for details. NetCP driver consists of NetCP core driver and at a minimum Gigabit Ethernet (GBE) module (1) driver to implement the Network device function. Other modules (2,3) can be optionally added to achieve supported hardware acceleration function. The initial version of the driver include NetCP core driver and GBE driver modules. Please refer Documentation/devicetree/bindings/net/keystone-netcp.txt for design of the driver. Cc: David Miller <davem@davemloft.net> Cc: Rob Herring <robh+dt@kernel.org> Cc: Grant Likely <grant.likely@linaro.org> Cc: Santosh Shilimkar <santosh.shilimkar@kernel.org> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Ian Campbell <ijc+devicetree@hellion.org.uk> Cc: Kumar Gala <galak@codeaurora.org> Signed-off-by: Murali Karicheri <m-karicheri2@ti.com> Signed-off-by: Wingman Kwok <w-kwok2@ti.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/netcp.h229
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2141
5 files changed, 2391 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 9b91d9f0257e..e1ff4ce5bcab 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9609,6 +9609,13 @@ F: drivers/power/lp8788-charger.c
9609F: drivers/regulator/lp8788-*.c 9609F: drivers/regulator/lp8788-*.c
9610F: include/linux/mfd/lp8788*.h 9610F: include/linux/mfd/lp8788*.h
9611 9611
9612TI NETCP ETHERNET DRIVER
9613M: Wingman Kwok <w-kwok2@ti.com>
9614M: Murali Karicheri <m-karicheri2@ti.com>
9615L: netdev@vger.kernel.org
9616S: Maintained
9617F: drivers/net/ethernet/ti/netcp*
9618
9612TI TWL4030 SERIES SOC CODEC DRIVER 9619TI TWL4030 SERIES SOC CODEC DRIVER
9613M: Peter Ujfalusi <peter.ujfalusi@ti.com> 9620M: Peter Ujfalusi <peter.ujfalusi@ti.com>
9614L: alsa-devel@alsa-project.org (moderated for non-subscribers) 9621L: alsa-devel@alsa-project.org (moderated for non-subscribers)
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 605dd909bcc3..e11bcfa69f52 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -73,12 +73,23 @@ config TI_CPSW
73config TI_CPTS 73config TI_CPTS
74 boolean "TI Common Platform Time Sync (CPTS) Support" 74 boolean "TI Common Platform Time Sync (CPTS) Support"
75 depends on TI_CPSW 75 depends on TI_CPSW
76 depends on TI_CPSW || TI_KEYSTONE_NET
76 select PTP_1588_CLOCK 77 select PTP_1588_CLOCK
77 ---help--- 78 ---help---
78 This driver supports the Common Platform Time Sync unit of 79 This driver supports the Common Platform Time Sync unit of
79 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4 80 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
80 and Layer 2 packets, and the driver offers a PTP Hardware Clock. 81 and Layer 2 packets, and the driver offers a PTP Hardware Clock.
81 82
83config TI_KEYSTONE_NETCP
84 tristate "TI Keystone NETCP Ethernet subsystem Support"
85 depends on OF
86 depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
87 ---help---
88 This driver supports TI's Keystone NETCP Ethernet subsystem.
89
90 To compile this driver as a module, choose M here: the module
91 will be called keystone_netcp.
92
82config TLAN 93config TLAN
83 tristate "TI ThunderLAN support" 94 tristate "TI ThunderLAN support"
84 depends on (PCI || EISA) 95 depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 9cfaab8152be..4e8a8e41f69f 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -10,3 +10,6 @@ obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o 10obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
11obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 11obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
12ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o 12ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
13
14obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
15keystone_netcp-y := netcp_core.o
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644
index 000000000000..906e9bc412f5
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -0,0 +1,229 @@
1/*
2 * NetCP driver local header
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 * Murali Karicheri <m-karicheri2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21#ifndef __NETCP_H__
22#define __NETCP_H__
23
24#include <linux/netdevice.h>
25#include <linux/soc/ti/knav_dma.h>
26
27/* Maximum Ethernet frame size supported by Keystone switch */
28#define NETCP_MAX_FRAME_SIZE 9504
29
30#define SGMII_LINK_MAC_MAC_AUTONEG 0
31#define SGMII_LINK_MAC_PHY 1
32#define SGMII_LINK_MAC_MAC_FORCED 2
33#define SGMII_LINK_MAC_FIBER 3
34#define SGMII_LINK_MAC_PHY_NO_MDIO 4
35#define XGMII_LINK_MAC_PHY 10
36#define XGMII_LINK_MAC_MAC_FORCED 11
37
38struct netcp_device;
39
40struct netcp_tx_pipe {
41 struct netcp_device *netcp_device;
42 void *dma_queue;
43 unsigned int dma_queue_id;
44 u8 dma_psflags;
45 void *dma_channel;
46 const char *dma_chan_name;
47};
48
49#define ADDR_NEW BIT(0)
50#define ADDR_VALID BIT(1)
51
52enum netcp_addr_type {
53 ADDR_ANY,
54 ADDR_DEV,
55 ADDR_UCAST,
56 ADDR_MCAST,
57 ADDR_BCAST
58};
59
60struct netcp_addr {
61 struct netcp_intf *netcp;
62 unsigned char addr[ETH_ALEN];
63 enum netcp_addr_type type;
64 unsigned int flags;
65 struct list_head node;
66};
67
68struct netcp_intf {
69 struct device *dev;
70 struct device *ndev_dev;
71 struct net_device *ndev;
72 bool big_endian;
73 unsigned int tx_compl_qid;
74 void *tx_pool;
75 struct list_head txhook_list_head;
76 unsigned int tx_pause_threshold;
77 void *tx_compl_q;
78
79 unsigned int tx_resume_threshold;
80 void *rx_queue;
81 void *rx_pool;
82 struct list_head rxhook_list_head;
83 unsigned int rx_queue_id;
84 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
85 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
86 struct napi_struct rx_napi;
87 struct napi_struct tx_napi;
88
89 void *rx_channel;
90 const char *dma_chan_name;
91 u32 rx_pool_size;
92 u32 rx_pool_region_id;
93 u32 tx_pool_size;
94 u32 tx_pool_region_id;
95 struct list_head module_head;
96 struct list_head interface_list;
97 struct list_head addr_list;
98 bool netdev_registered;
99 bool primary_module_attached;
100
101 /* Lock used for protecting Rx/Tx hook list management */
102 spinlock_t lock;
103 struct netcp_device *netcp_device;
104 struct device_node *node_interface;
105
106 /* DMA configuration data */
107 u32 msg_enable;
108 u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
109};
110
111#define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS
112struct netcp_packet {
113 struct sk_buff *skb;
114 u32 *epib;
115 u32 *psdata;
116 unsigned int psdata_len;
117 struct netcp_intf *netcp;
118 struct netcp_tx_pipe *tx_pipe;
119 bool rxtstamp_complete;
120 void *ts_context;
121
122 int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt);
123};
124
125static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
126 unsigned int bytes)
127{
128 u32 *buf;
129 unsigned int words;
130
131 if ((bytes & 0x03) != 0)
132 return NULL;
133 words = bytes >> 2;
134
135 if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
136 return NULL;
137
138 p_info->psdata_len += words;
139 buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
140 return buf;
141}
142
143static inline int netcp_align_psdata(struct netcp_packet *p_info,
144 unsigned int byte_align)
145{
146 int padding;
147
148 switch (byte_align) {
149 case 0:
150 padding = -EINVAL;
151 break;
152 case 1:
153 case 2:
154 case 4:
155 padding = 0;
156 break;
157 case 8:
158 padding = (p_info->psdata_len << 2) % 8;
159 break;
160 case 16:
161 padding = (p_info->psdata_len << 2) % 16;
162 break;
163 default:
164 padding = (p_info->psdata_len << 2) % byte_align;
165 break;
166 }
167 return padding;
168}
169
170struct netcp_module {
171 const char *name;
172 struct module *owner;
173 bool primary;
174
175 /* probe/remove: called once per NETCP instance */
176 int (*probe)(struct netcp_device *netcp_device,
177 struct device *device, struct device_node *node,
178 void **inst_priv);
179 int (*remove)(struct netcp_device *netcp_device, void *inst_priv);
180
181 /* attach/release: called once per network interface */
182 int (*attach)(void *inst_priv, struct net_device *ndev,
183 struct device_node *node, void **intf_priv);
184 int (*release)(void *intf_priv);
185 int (*open)(void *intf_priv, struct net_device *ndev);
186 int (*close)(void *intf_priv, struct net_device *ndev);
187 int (*add_addr)(void *intf_priv, struct netcp_addr *naddr);
188 int (*del_addr)(void *intf_priv, struct netcp_addr *naddr);
189 int (*add_vid)(void *intf_priv, int vid);
190 int (*del_vid)(void *intf_priv, int vid);
191 int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
192
193 /* used internally */
194 struct list_head module_list;
195 struct list_head interface_list;
196};
197
198int netcp_register_module(struct netcp_module *module);
199void netcp_unregister_module(struct netcp_module *module);
200void *netcp_module_get_intf_data(struct netcp_module *module,
201 struct netcp_intf *intf);
202
203int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
204 struct netcp_device *netcp_device,
205 const char *dma_chan_name, unsigned int dma_queue_id);
206int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
207int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
208
209typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
210int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
211 netcp_hook_rtn *hook_rtn, void *hook_data);
212int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
213 netcp_hook_rtn *hook_rtn, void *hook_data);
214int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
215 netcp_hook_rtn *hook_rtn, void *hook_data);
216int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
217 netcp_hook_rtn *hook_rtn, void *hook_data);
218void *netcp_device_find_module(struct netcp_device *netcp_device,
219 const char *name);
220
221/* SGMII functions */
222int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
223int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
224int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
225
226/* XGBE SERDES init functions */
227int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
228
229#endif /* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644
index 000000000000..ba3002ec710a
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -0,0 +1,2141 @@
1/*
2 * Keystone NetCP Core driver
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/of_net.h>
25#include <linux/of_address.h>
26#include <linux/if_vlan.h>
27#include <linux/pm_runtime.h>
28#include <linux/platform_device.h>
29#include <linux/soc/ti/knav_qmss.h>
30#include <linux/soc/ti/knav_dma.h>
31
32#include "netcp.h"
33
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16
39
40#define NETCP_EFUSE_REG_INDEX 0
41
42#define NETCP_MOD_PROBE_SKIPPED 1
43#define NETCP_MOD_PROBE_FAILED 2
44
45#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
46 NETIF_MSG_DRV | NETIF_MSG_LINK | \
47 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
48 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
51 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
52 NETIF_MSG_RX_STATUS)
53
54#define knav_queue_get_id(q) knav_queue_device_control(q, \
55 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
56
57#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_ENABLE_NOTIFY, \
59 (unsigned long)NULL)
60
61#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
62 KNAV_QUEUE_DISABLE_NOTIFY, \
63 (unsigned long)NULL)
64
65#define knav_queue_get_count(q) knav_queue_device_control(q, \
66 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
67
68#define for_each_netcp_module(module) \
69 list_for_each_entry(module, &netcp_modules, module_list)
70
71#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
72 list_for_each_entry(inst_modpriv, \
73 &((netcp_device)->modpriv_head), inst_list)
74
75#define for_each_module(netcp, intf_modpriv) \
76 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
77
78/* Module management structures */
79struct netcp_device {
80 struct list_head device_list;
81 struct list_head interface_head;
82 struct list_head modpriv_head;
83 struct device *device;
84};
85
86struct netcp_inst_modpriv {
87 struct netcp_device *netcp_device;
88 struct netcp_module *netcp_module;
89 struct list_head inst_list;
90 void *module_priv;
91};
92
93struct netcp_intf_modpriv {
94 struct netcp_intf *netcp_priv;
95 struct netcp_module *netcp_module;
96 struct list_head intf_list;
97 void *module_priv;
98};
99
100static LIST_HEAD(netcp_devices);
101static LIST_HEAD(netcp_modules);
102static DEFINE_MUTEX(netcp_modules_lock);
103
104static int netcp_debug_level = -1;
105module_param(netcp_debug_level, int, 0);
106MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
107
108/* Helper functions - Get/Set */
109static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
110 struct knav_dma_desc *desc)
111{
112 *buff_len = desc->buff_len;
113 *buff = desc->buff;
114 *ndesc = desc->next_desc;
115}
116
117static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
118{
119 *pad0 = desc->pad[0];
120 *pad1 = desc->pad[1];
121}
122
123static void get_org_pkt_info(u32 *buff, u32 *buff_len,
124 struct knav_dma_desc *desc)
125{
126 *buff = desc->orig_buff;
127 *buff_len = desc->orig_len;
128}
129
130static void get_words(u32 *words, int num_words, u32 *desc)
131{
132 int i;
133
134 for (i = 0; i < num_words; i++)
135 words[i] = desc[i];
136}
137
138static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
139 struct knav_dma_desc *desc)
140{
141 desc->buff_len = buff_len;
142 desc->buff = buff;
143 desc->next_desc = ndesc;
144}
145
146static void set_desc_info(u32 desc_info, u32 pkt_info,
147 struct knav_dma_desc *desc)
148{
149 desc->desc_info = desc_info;
150 desc->packet_info = pkt_info;
151}
152
153static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
154{
155 desc->pad[0] = pad0;
156 desc->pad[1] = pad1;
157}
158
159static void set_org_pkt_info(u32 buff, u32 buff_len,
160 struct knav_dma_desc *desc)
161{
162 desc->orig_buff = buff;
163 desc->orig_len = buff_len;
164}
165
166static void set_words(u32 *words, int num_words, u32 *desc)
167{
168 int i;
169
170 for (i = 0; i < num_words; i++)
171 desc[i] = words[i];
172}
173
174/* Read the e-fuse value as 32 bit values to be endian independent */
175static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
176{
177 unsigned int addr0, addr1;
178
179 addr1 = readl(efuse_mac + 4);
180 addr0 = readl(efuse_mac);
181
182 x[0] = (addr1 & 0x0000ff00) >> 8;
183 x[1] = addr1 & 0x000000ff;
184 x[2] = (addr0 & 0xff000000) >> 24;
185 x[3] = (addr0 & 0x00ff0000) >> 16;
186 x[4] = (addr0 & 0x0000ff00) >> 8;
187 x[5] = addr0 & 0x000000ff;
188
189 return 0;
190}
191
192static const char *netcp_node_name(struct device_node *node)
193{
194 const char *name;
195
196 if (of_property_read_string(node, "label", &name) < 0)
197 name = node->name;
198 if (!name)
199 name = "unknown";
200 return name;
201}
202
203/* Module management routines */
204static int netcp_register_interface(struct netcp_intf *netcp)
205{
206 int ret;
207
208 ret = register_netdev(netcp->ndev);
209 if (!ret)
210 netcp->netdev_registered = true;
211 return ret;
212}
213
214static int netcp_module_probe(struct netcp_device *netcp_device,
215 struct netcp_module *module)
216{
217 struct device *dev = netcp_device->device;
218 struct device_node *devices, *interface, *node = dev->of_node;
219 struct device_node *child;
220 struct netcp_inst_modpriv *inst_modpriv;
221 struct netcp_intf *netcp_intf;
222 struct netcp_module *tmp;
223 bool primary_module_registered = false;
224 int ret;
225
226 /* Find this module in the sub-tree for this device */
227 devices = of_get_child_by_name(node, "netcp-devices");
228 if (!devices) {
229 dev_err(dev, "could not find netcp-devices node\n");
230 return NETCP_MOD_PROBE_SKIPPED;
231 }
232
233 for_each_available_child_of_node(devices, child) {
234 const char *name = netcp_node_name(child);
235
236 if (!strcasecmp(module->name, name))
237 break;
238 }
239
240 of_node_put(devices);
241 /* If module not used for this device, skip it */
242 if (!child) {
243 dev_warn(dev, "module(%s) not used for device\n", module->name);
244 return NETCP_MOD_PROBE_SKIPPED;
245 }
246
247 inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
248 if (!inst_modpriv) {
249 of_node_put(child);
250 return -ENOMEM;
251 }
252
253 inst_modpriv->netcp_device = netcp_device;
254 inst_modpriv->netcp_module = module;
255 list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
256
257 ret = module->probe(netcp_device, dev, child,
258 &inst_modpriv->module_priv);
259 of_node_put(child);
260 if (ret) {
261 dev_err(dev, "Probe of module(%s) failed with %d\n",
262 module->name, ret);
263 list_del(&inst_modpriv->inst_list);
264 devm_kfree(dev, inst_modpriv);
265 return NETCP_MOD_PROBE_FAILED;
266 }
267
268 /* Attach modules only if the primary module is probed */
269 for_each_netcp_module(tmp) {
270 if (tmp->primary)
271 primary_module_registered = true;
272 }
273
274 if (!primary_module_registered)
275 return 0;
276
277 /* Attach module to interfaces */
278 list_for_each_entry(netcp_intf, &netcp_device->interface_head,
279 interface_list) {
280 struct netcp_intf_modpriv *intf_modpriv;
281
282 /* If interface not registered then register now */
283 if (!netcp_intf->netdev_registered)
284 ret = netcp_register_interface(netcp_intf);
285
286 if (ret)
287 return -ENODEV;
288
289 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
290 GFP_KERNEL);
291 if (!intf_modpriv)
292 return -ENOMEM;
293
294 interface = of_parse_phandle(netcp_intf->node_interface,
295 module->name, 0);
296
297 intf_modpriv->netcp_priv = netcp_intf;
298 intf_modpriv->netcp_module = module;
299 list_add_tail(&intf_modpriv->intf_list,
300 &netcp_intf->module_head);
301
302 ret = module->attach(inst_modpriv->module_priv,
303 netcp_intf->ndev, interface,
304 &intf_modpriv->module_priv);
305 of_node_put(interface);
306 if (ret) {
307 dev_dbg(dev, "Attach of module %s declined with %d\n",
308 module->name, ret);
309 list_del(&intf_modpriv->intf_list);
310 devm_kfree(dev, intf_modpriv);
311 continue;
312 }
313 }
314 return 0;
315}
316
317int netcp_register_module(struct netcp_module *module)
318{
319 struct netcp_device *netcp_device;
320 struct netcp_module *tmp;
321 int ret;
322
323 if (!module->name) {
324 WARN(1, "error registering netcp module: no name\n");
325 return -EINVAL;
326 }
327
328 if (!module->probe) {
329 WARN(1, "error registering netcp module: no probe\n");
330 return -EINVAL;
331 }
332
333 mutex_lock(&netcp_modules_lock);
334
335 for_each_netcp_module(tmp) {
336 if (!strcasecmp(tmp->name, module->name)) {
337 mutex_unlock(&netcp_modules_lock);
338 return -EEXIST;
339 }
340 }
341 list_add_tail(&module->module_list, &netcp_modules);
342
343 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
344 ret = netcp_module_probe(netcp_device, module);
345 if (ret < 0)
346 goto fail;
347 }
348
349 mutex_unlock(&netcp_modules_lock);
350 return 0;
351
352fail:
353 mutex_unlock(&netcp_modules_lock);
354 netcp_unregister_module(module);
355 return ret;
356}
357
358static void netcp_release_module(struct netcp_device *netcp_device,
359 struct netcp_module *module)
360{
361 struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
362 struct netcp_intf *netcp_intf, *netcp_tmp;
363 struct device *dev = netcp_device->device;
364
365 /* Release the module from each interface */
366 list_for_each_entry_safe(netcp_intf, netcp_tmp,
367 &netcp_device->interface_head,
368 interface_list) {
369 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
370
371 list_for_each_entry_safe(intf_modpriv, intf_tmp,
372 &netcp_intf->module_head,
373 intf_list) {
374 if (intf_modpriv->netcp_module == module) {
375 module->release(intf_modpriv->module_priv);
376 list_del(&intf_modpriv->intf_list);
377 devm_kfree(dev, intf_modpriv);
378 break;
379 }
380 }
381 }
382
383 /* Remove the module from each instance */
384 list_for_each_entry_safe(inst_modpriv, inst_tmp,
385 &netcp_device->modpriv_head, inst_list) {
386 if (inst_modpriv->netcp_module == module) {
387 module->remove(netcp_device,
388 inst_modpriv->module_priv);
389 list_del(&inst_modpriv->inst_list);
390 devm_kfree(dev, inst_modpriv);
391 break;
392 }
393 }
394}
395
396void netcp_unregister_module(struct netcp_module *module)
397{
398 struct netcp_device *netcp_device;
399 struct netcp_module *module_tmp;
400
401 mutex_lock(&netcp_modules_lock);
402
403 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
404 netcp_release_module(netcp_device, module);
405 }
406
407 /* Remove the module from the module list */
408 for_each_netcp_module(module_tmp) {
409 if (module == module_tmp) {
410 list_del(&module->module_list);
411 break;
412 }
413 }
414
415 mutex_unlock(&netcp_modules_lock);
416}
417
418void *netcp_module_get_intf_data(struct netcp_module *module,
419 struct netcp_intf *intf)
420{
421 struct netcp_intf_modpriv *intf_modpriv;
422
423 list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
424 if (intf_modpriv->netcp_module == module)
425 return intf_modpriv->module_priv;
426 return NULL;
427}
428
429/* Module TX and RX Hook management */
430struct netcp_hook_list {
431 struct list_head list;
432 netcp_hook_rtn *hook_rtn;
433 void *hook_data;
434 int order;
435};
436
437int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
438 netcp_hook_rtn *hook_rtn, void *hook_data)
439{
440 struct netcp_hook_list *entry;
441 struct netcp_hook_list *next;
442 unsigned long flags;
443
444 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
445 if (!entry)
446 return -ENOMEM;
447
448 entry->hook_rtn = hook_rtn;
449 entry->hook_data = hook_data;
450 entry->order = order;
451
452 spin_lock_irqsave(&netcp_priv->lock, flags);
453 list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
454 if (next->order > order)
455 break;
456 }
457 __list_add(&entry->list, next->list.prev, &next->list);
458 spin_unlock_irqrestore(&netcp_priv->lock, flags);
459
460 return 0;
461}
462
463int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
464 netcp_hook_rtn *hook_rtn, void *hook_data)
465{
466 struct netcp_hook_list *next, *n;
467 unsigned long flags;
468
469 spin_lock_irqsave(&netcp_priv->lock, flags);
470 list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
471 if ((next->order == order) &&
472 (next->hook_rtn == hook_rtn) &&
473 (next->hook_data == hook_data)) {
474 list_del(&next->list);
475 spin_unlock_irqrestore(&netcp_priv->lock, flags);
476 devm_kfree(netcp_priv->dev, next);
477 return 0;
478 }
479 }
480 spin_unlock_irqrestore(&netcp_priv->lock, flags);
481 return -ENOENT;
482}
483
484int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
485 netcp_hook_rtn *hook_rtn, void *hook_data)
486{
487 struct netcp_hook_list *entry;
488 struct netcp_hook_list *next;
489 unsigned long flags;
490
491 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
492 if (!entry)
493 return -ENOMEM;
494
495 entry->hook_rtn = hook_rtn;
496 entry->hook_data = hook_data;
497 entry->order = order;
498
499 spin_lock_irqsave(&netcp_priv->lock, flags);
500 list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
501 if (next->order > order)
502 break;
503 }
504 __list_add(&entry->list, next->list.prev, &next->list);
505 spin_unlock_irqrestore(&netcp_priv->lock, flags);
506
507 return 0;
508}
509
510int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
511 netcp_hook_rtn *hook_rtn, void *hook_data)
512{
513 struct netcp_hook_list *next, *n;
514 unsigned long flags;
515
516 spin_lock_irqsave(&netcp_priv->lock, flags);
517 list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
518 if ((next->order == order) &&
519 (next->hook_rtn == hook_rtn) &&
520 (next->hook_data == hook_data)) {
521 list_del(&next->list);
522 spin_unlock_irqrestore(&netcp_priv->lock, flags);
523 devm_kfree(netcp_priv->dev, next);
524 return 0;
525 }
526 }
527 spin_unlock_irqrestore(&netcp_priv->lock, flags);
528
529 return -ENOENT;
530}
531
532static void netcp_frag_free(bool is_frag, void *ptr)
533{
534 if (is_frag)
535 put_page(virt_to_head_page(ptr));
536 else
537 kfree(ptr);
538}
539
540static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
541 struct knav_dma_desc *desc)
542{
543 struct knav_dma_desc *ndesc;
544 dma_addr_t dma_desc, dma_buf;
545 unsigned int buf_len, dma_sz = sizeof(*ndesc);
546 void *buf_ptr;
547 u32 tmp;
548
549 get_words(&dma_desc, 1, &desc->next_desc);
550
551 while (dma_desc) {
552 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
553 if (unlikely(!ndesc)) {
554 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
555 break;
556 }
557 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
558 get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
559 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
560 __free_page(buf_ptr);
561 knav_pool_desc_put(netcp->rx_pool, desc);
562 }
563
564 get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
565 if (buf_ptr)
566 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
567 knav_pool_desc_put(netcp->rx_pool, desc);
568}
569
570static void netcp_empty_rx_queue(struct netcp_intf *netcp)
571{
572 struct knav_dma_desc *desc;
573 unsigned int dma_sz;
574 dma_addr_t dma;
575
576 for (; ;) {
577 dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
578 if (!dma)
579 break;
580
581 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
582 if (unlikely(!desc)) {
583 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
584 __func__);
585 netcp->ndev->stats.rx_errors++;
586 continue;
587 }
588 netcp_free_rx_desc_chain(netcp, desc);
589 netcp->ndev->stats.rx_dropped++;
590 }
591}
592
593static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
594{
595 unsigned int dma_sz, buf_len, org_buf_len;
596 struct knav_dma_desc *desc, *ndesc;
597 unsigned int pkt_sz = 0, accum_sz;
598 struct netcp_hook_list *rx_hook;
599 dma_addr_t dma_desc, dma_buff;
600 struct netcp_packet p_info;
601 struct sk_buff *skb;
602 void *org_buf_ptr;
603 u32 tmp;
604
605 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
606 if (!dma_desc)
607 return -1;
608
609 desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
610 if (unlikely(!desc)) {
611 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
612 return 0;
613 }
614
615 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
616 get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
617
618 if (unlikely(!org_buf_ptr)) {
619 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
620 goto free_desc;
621 }
622
623 pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
624 accum_sz = buf_len;
625 dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
626
627 /* Build a new sk_buff for the primary buffer */
628 skb = build_skb(org_buf_ptr, org_buf_len);
629 if (unlikely(!skb)) {
630 dev_err(netcp->ndev_dev, "build_skb() failed\n");
631 goto free_desc;
632 }
633
634 /* update data, tail and len */
635 skb_reserve(skb, NETCP_SOP_OFFSET);
636 __skb_put(skb, buf_len);
637
638 /* Fill in the page fragment list */
639 while (dma_desc) {
640 struct page *page;
641
642 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
643 if (unlikely(!ndesc)) {
644 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
645 goto free_desc;
646 }
647
648 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
649 get_pad_info((u32 *)&page, &tmp, ndesc);
650
651 if (likely(dma_buff && buf_len && page)) {
652 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
653 DMA_FROM_DEVICE);
654 } else {
655 dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
656 (void *)dma_buff, buf_len, page);
657 goto free_desc;
658 }
659
660 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
661 offset_in_page(dma_buff), buf_len, PAGE_SIZE);
662 accum_sz += buf_len;
663
664 /* Free the descriptor */
665 knav_pool_desc_put(netcp->rx_pool, ndesc);
666 }
667
668 /* Free the primary descriptor */
669 knav_pool_desc_put(netcp->rx_pool, desc);
670
671 /* check for packet len and warn */
672 if (unlikely(pkt_sz != accum_sz))
673 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
674 pkt_sz, accum_sz);
675
676 /* Remove ethernet FCS from the packet */
677 __pskb_trim(skb, skb->len - ETH_FCS_LEN);
678
679 /* Call each of the RX hooks */
680 p_info.skb = skb;
681 p_info.rxtstamp_complete = false;
682 list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
683 int ret;
684
685 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
686 &p_info);
687 if (unlikely(ret)) {
688 dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
689 rx_hook->order, ret);
690 netcp->ndev->stats.rx_errors++;
691 dev_kfree_skb(skb);
692 return 0;
693 }
694 }
695
696 netcp->ndev->last_rx = jiffies;
697 netcp->ndev->stats.rx_packets++;
698 netcp->ndev->stats.rx_bytes += skb->len;
699
700 /* push skb up the stack */
701 skb->protocol = eth_type_trans(skb, netcp->ndev);
702 netif_receive_skb(skb);
703 return 0;
704
705free_desc:
706 netcp_free_rx_desc_chain(netcp, desc);
707 netcp->ndev->stats.rx_errors++;
708 return 0;
709}
710
711static int netcp_process_rx_packets(struct netcp_intf *netcp,
712 unsigned int budget)
713{
714 int i;
715
716 for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
717 ;
718 return i;
719}
720
721/* Release descriptors and attached buffers from Rx FDQ */
722static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
723{
724 struct knav_dma_desc *desc;
725 unsigned int buf_len, dma_sz;
726 dma_addr_t dma;
727 void *buf_ptr;
728 u32 tmp;
729
730 /* Allocate descriptor */
731 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
732 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
733 if (unlikely(!desc)) {
734 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
735 continue;
736 }
737
738 get_org_pkt_info(&dma, &buf_len, desc);
739 get_pad_info((u32 *)&buf_ptr, &tmp, desc);
740
741 if (unlikely(!dma)) {
742 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
743 knav_pool_desc_put(netcp->rx_pool, desc);
744 continue;
745 }
746
747 if (unlikely(!buf_ptr)) {
748 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
749 knav_pool_desc_put(netcp->rx_pool, desc);
750 continue;
751 }
752
753 if (fdq == 0) {
754 dma_unmap_single(netcp->dev, dma, buf_len,
755 DMA_FROM_DEVICE);
756 netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
757 } else {
758 dma_unmap_page(netcp->dev, dma, buf_len,
759 DMA_FROM_DEVICE);
760 __free_page(buf_ptr);
761 }
762
763 knav_pool_desc_put(netcp->rx_pool, desc);
764 }
765}
766
767static void netcp_rxpool_free(struct netcp_intf *netcp)
768{
769 int i;
770
771 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
772 !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
773 netcp_free_rx_buf(netcp, i);
774
775 if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
776 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
777 netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
778
779 knav_pool_destroy(netcp->rx_pool);
780 netcp->rx_pool = NULL;
781}
782
783static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
784{
785 struct knav_dma_desc *hwdesc;
786 unsigned int buf_len, dma_sz;
787 u32 desc_info, pkt_info;
788 struct page *page;
789 dma_addr_t dma;
790 void *bufptr;
791 u32 pad[2];
792
793 /* Allocate descriptor */
794 hwdesc = knav_pool_desc_get(netcp->rx_pool);
795 if (IS_ERR_OR_NULL(hwdesc)) {
796 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
797 return;
798 }
799
800 if (likely(fdq == 0)) {
801 unsigned int primary_buf_len;
802 /* Allocate a primary receive queue entry */
803 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
804 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
805 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
806
807 if (primary_buf_len <= PAGE_SIZE) {
808 bufptr = netdev_alloc_frag(primary_buf_len);
809 pad[1] = primary_buf_len;
810 } else {
811 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
812 GFP_DMA32 | __GFP_COLD);
813 pad[1] = 0;
814 }
815
816 if (unlikely(!bufptr)) {
817 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
818 goto fail;
819 }
820 dma = dma_map_single(netcp->dev, bufptr, buf_len,
821 DMA_TO_DEVICE);
822 pad[0] = (u32)bufptr;
823
824 } else {
825 /* Allocate a secondary receive queue entry */
826 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
827 if (unlikely(!page)) {
828 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
829 goto fail;
830 }
831 buf_len = PAGE_SIZE;
832 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
833 pad[0] = (u32)page;
834 pad[1] = 0;
835 }
836
837 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
838 desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
839 pkt_info = KNAV_DMA_DESC_HAS_EPIB;
840 pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
841 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
842 KNAV_DMA_DESC_RETQ_SHIFT;
843 set_org_pkt_info(dma, buf_len, hwdesc);
844 set_pad_info(pad[0], pad[1], hwdesc);
845 set_desc_info(desc_info, pkt_info, hwdesc);
846
847 /* Push to FDQs */
848 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
849 &dma_sz);
850 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
851 return;
852
853fail:
854 knav_pool_desc_put(netcp->rx_pool, hwdesc);
855}
856
857/* Refill Rx FDQ with descriptors & attached buffers */
858static void netcp_rxpool_refill(struct netcp_intf *netcp)
859{
860 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
861 int i;
862
863 /* Calculate the FDQ deficit and refill */
864 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
865 fdq_deficit[i] = netcp->rx_queue_depths[i] -
866 knav_queue_get_count(netcp->rx_fdq[i]);
867
868 while (fdq_deficit[i]--)
869 netcp_allocate_rx_buf(netcp, i);
870 } /* end for fdqs */
871}
872
873/* NAPI poll */
874static int netcp_rx_poll(struct napi_struct *napi, int budget)
875{
876 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
877 rx_napi);
878 unsigned int packets;
879
880 packets = netcp_process_rx_packets(netcp, budget);
881
882 if (packets < budget) {
883 napi_complete(&netcp->rx_napi);
884 knav_queue_enable_notify(netcp->rx_queue);
885 }
886
887 netcp_rxpool_refill(netcp);
888 return packets;
889}
890
891static void netcp_rx_notify(void *arg)
892{
893 struct netcp_intf *netcp = arg;
894
895 knav_queue_disable_notify(netcp->rx_queue);
896 napi_schedule(&netcp->rx_napi);
897}
898
899static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
900 struct knav_dma_desc *desc,
901 unsigned int desc_sz)
902{
903 struct knav_dma_desc *ndesc = desc;
904 dma_addr_t dma_desc, dma_buf;
905 unsigned int buf_len;
906
907 while (ndesc) {
908 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
909
910 if (dma_buf && buf_len)
911 dma_unmap_single(netcp->dev, dma_buf, buf_len,
912 DMA_TO_DEVICE);
913 else
914 dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
915 (void *)dma_buf, buf_len);
916
917 knav_pool_desc_put(netcp->tx_pool, ndesc);
918 ndesc = NULL;
919 if (dma_desc) {
920 ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
921 desc_sz);
922 if (!ndesc)
923 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
924 }
925 }
926}
927
928static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
929 unsigned int budget)
930{
931 struct knav_dma_desc *desc;
932 struct sk_buff *skb;
933 unsigned int dma_sz;
934 dma_addr_t dma;
935 int pkts = 0;
936 u32 tmp;
937
938 while (budget--) {
939 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
940 if (!dma)
941 break;
942 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
943 if (unlikely(!desc)) {
944 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
945 netcp->ndev->stats.tx_errors++;
946 continue;
947 }
948
949 get_pad_info((u32 *)&skb, &tmp, desc);
950 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
951 if (!skb) {
952 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
953 netcp->ndev->stats.tx_errors++;
954 continue;
955 }
956
957 if (netif_subqueue_stopped(netcp->ndev, skb) &&
958 netif_running(netcp->ndev) &&
959 (knav_pool_count(netcp->tx_pool) >
960 netcp->tx_resume_threshold)) {
961 u16 subqueue = skb_get_queue_mapping(skb);
962
963 netif_wake_subqueue(netcp->ndev, subqueue);
964 }
965
966 netcp->ndev->stats.tx_packets++;
967 netcp->ndev->stats.tx_bytes += skb->len;
968 dev_kfree_skb(skb);
969 pkts++;
970 }
971 return pkts;
972}
973
974static int netcp_tx_poll(struct napi_struct *napi, int budget)
975{
976 int packets;
977 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
978 tx_napi);
979
980 packets = netcp_process_tx_compl_packets(netcp, budget);
981 if (packets < budget) {
982 napi_complete(&netcp->tx_napi);
983 knav_queue_enable_notify(netcp->tx_compl_q);
984 }
985
986 return packets;
987}
988
989static void netcp_tx_notify(void *arg)
990{
991 struct netcp_intf *netcp = arg;
992
993 knav_queue_disable_notify(netcp->tx_compl_q);
994 napi_schedule(&netcp->tx_napi);
995}
996
997static struct knav_dma_desc*
998netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
999{
1000 struct knav_dma_desc *desc, *ndesc, *pdesc;
1001 unsigned int pkt_len = skb_headlen(skb);
1002 struct device *dev = netcp->dev;
1003 dma_addr_t dma_addr;
1004 unsigned int dma_sz;
1005 int i;
1006
1007 /* Map the linear buffer */
1008 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1009 if (unlikely(!dma_addr)) {
1010 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1011 return NULL;
1012 }
1013
1014 desc = knav_pool_desc_get(netcp->tx_pool);
1015 if (unlikely(IS_ERR_OR_NULL(desc))) {
1016 dev_err(netcp->ndev_dev, "out of TX desc\n");
1017 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1018 return NULL;
1019 }
1020
1021 set_pkt_info(dma_addr, pkt_len, 0, desc);
1022 if (skb_is_nonlinear(skb)) {
1023 prefetchw(skb_shinfo(skb));
1024 } else {
1025 desc->next_desc = 0;
1026 goto upd_pkt_len;
1027 }
1028
1029 pdesc = desc;
1030
1031 /* Handle the case where skb is fragmented in pages */
1032 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1033 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1034 struct page *page = skb_frag_page(frag);
1035 u32 page_offset = frag->page_offset;
1036 u32 buf_len = skb_frag_size(frag);
1037 dma_addr_t desc_dma;
1038 u32 pkt_info;
1039
1040 dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1041 DMA_TO_DEVICE);
1042 if (unlikely(!dma_addr)) {
1043 dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1044 goto free_descs;
1045 }
1046
1047 ndesc = knav_pool_desc_get(netcp->tx_pool);
1048 if (unlikely(IS_ERR_OR_NULL(ndesc))) {
1049 dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1050 dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1051 goto free_descs;
1052 }
1053
1054 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
1055 (void *)ndesc);
1056 pkt_info =
1057 (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1058 KNAV_DMA_DESC_RETQ_SHIFT;
1059 set_pkt_info(dma_addr, buf_len, 0, ndesc);
1060 set_words(&desc_dma, 1, &pdesc->next_desc);
1061 pkt_len += buf_len;
1062 if (pdesc != desc)
1063 knav_pool_desc_map(netcp->tx_pool, pdesc,
1064 sizeof(*pdesc), &desc_dma, &dma_sz);
1065 pdesc = ndesc;
1066 }
1067 if (pdesc != desc)
1068 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1069 &dma_addr, &dma_sz);
1070
1071 /* frag list based linkage is not supported for now. */
1072 if (skb_shinfo(skb)->frag_list) {
1073 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1074 goto free_descs;
1075 }
1076
1077upd_pkt_len:
1078 WARN_ON(pkt_len != skb->len);
1079
1080 pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1081 set_words(&pkt_len, 1, &desc->desc_info);
1082 return desc;
1083
1084free_descs:
1085 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1086 return NULL;
1087}
1088
1089static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1090 struct sk_buff *skb,
1091 struct knav_dma_desc *desc)
1092{
1093 struct netcp_tx_pipe *tx_pipe = NULL;
1094 struct netcp_hook_list *tx_hook;
1095 struct netcp_packet p_info;
1096 u32 packet_info = 0;
1097 unsigned int dma_sz;
1098 dma_addr_t dma;
1099 int ret = 0;
1100
1101 p_info.netcp = netcp;
1102 p_info.skb = skb;
1103 p_info.tx_pipe = NULL;
1104 p_info.psdata_len = 0;
1105 p_info.ts_context = NULL;
1106 p_info.txtstamp_complete = NULL;
1107 p_info.epib = desc->epib;
1108 p_info.psdata = desc->psdata;
1109 memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
1110
1111 /* Find out where to inject the packet for transmission */
1112 list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1113 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1114 &p_info);
1115 if (unlikely(ret != 0)) {
1116 dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1117 tx_hook->order, ret);
1118 ret = (ret < 0) ? ret : NETDEV_TX_OK;
1119 goto out;
1120 }
1121 }
1122
1123 /* Make sure some TX hook claimed the packet */
1124 tx_pipe = p_info.tx_pipe;
1125 if (!tx_pipe) {
1126 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1127 ret = -ENXIO;
1128 goto out;
1129 }
1130
1131 /* update descriptor */
1132 if (p_info.psdata_len) {
1133 u32 *psdata = p_info.psdata;
1134
1135 memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
1136 p_info.psdata_len);
1137 set_words(psdata, p_info.psdata_len, psdata);
1138 packet_info |=
1139 (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
1140 KNAV_DMA_DESC_PSLEN_SHIFT;
1141 }
1142
1143 packet_info |= KNAV_DMA_DESC_HAS_EPIB |
1144 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1145 KNAV_DMA_DESC_RETQ_SHIFT) |
1146 ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
1147 KNAV_DMA_DESC_PSFLAG_SHIFT);
1148
1149 set_words(&packet_info, 1, &desc->packet_info);
1150 set_words((u32 *)&skb, 1, &desc->pad[0]);
1151
1152 /* submit packet descriptor */
1153 ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1154 &dma_sz);
1155 if (unlikely(ret)) {
1156 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1157 ret = -ENOMEM;
1158 goto out;
1159 }
1160 skb_tx_timestamp(skb);
1161 knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1162
1163out:
1164 return ret;
1165}
1166
1167/* Submit the packet */
1168static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1169{
1170 struct netcp_intf *netcp = netdev_priv(ndev);
1171 int subqueue = skb_get_queue_mapping(skb);
1172 struct knav_dma_desc *desc;
1173 int desc_count, ret = 0;
1174
1175 if (unlikely(skb->len <= 0)) {
1176 dev_kfree_skb(skb);
1177 return NETDEV_TX_OK;
1178 }
1179
1180 if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1181 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1182 if (ret < 0) {
1183 /* If we get here, the skb has already been dropped */
1184 dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1185 ret);
1186 ndev->stats.tx_dropped++;
1187 return ret;
1188 }
1189 skb->len = NETCP_MIN_PACKET_SIZE;
1190 }
1191
1192 desc = netcp_tx_map_skb(skb, netcp);
1193 if (unlikely(!desc)) {
1194 netif_stop_subqueue(ndev, subqueue);
1195 ret = -ENOBUFS;
1196 goto drop;
1197 }
1198
1199 ret = netcp_tx_submit_skb(netcp, skb, desc);
1200 if (ret)
1201 goto drop;
1202
1203 ndev->trans_start = jiffies;
1204
1205 /* Check Tx pool count & stop subqueue if needed */
1206 desc_count = knav_pool_count(netcp->tx_pool);
1207 if (desc_count < netcp->tx_pause_threshold) {
1208 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1209 netif_stop_subqueue(ndev, subqueue);
1210 }
1211 return NETDEV_TX_OK;
1212
1213drop:
1214 ndev->stats.tx_dropped++;
1215 if (desc)
1216 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1217 dev_kfree_skb(skb);
1218 return ret;
1219}
1220
1221int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1222{
1223 if (tx_pipe->dma_channel) {
1224 knav_dma_close_channel(tx_pipe->dma_channel);
1225 tx_pipe->dma_channel = NULL;
1226 }
1227 return 0;
1228}
1229
1230int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1231{
1232 struct device *dev = tx_pipe->netcp_device->device;
1233 struct knav_dma_cfg config;
1234 int ret = 0;
1235 u8 name[16];
1236
1237 memset(&config, 0, sizeof(config));
1238 config.direction = DMA_MEM_TO_DEV;
1239 config.u.tx.filt_einfo = false;
1240 config.u.tx.filt_pswords = false;
1241 config.u.tx.priority = DMA_PRIO_MED_L;
1242
1243 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1244 tx_pipe->dma_chan_name, &config);
1245 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
1246 dev_err(dev, "failed opening tx chan(%s)\n",
1247 tx_pipe->dma_chan_name);
1248 goto err;
1249 }
1250
1251 snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1252 tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1253 KNAV_QUEUE_SHARED);
1254 if (IS_ERR(tx_pipe->dma_queue)) {
1255 dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
1256 name, ret);
1257 ret = PTR_ERR(tx_pipe->dma_queue);
1258 goto err;
1259 }
1260
1261 dev_dbg(dev, "opened tx pipe %s\n", name);
1262 return 0;
1263
1264err:
1265 if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1266 knav_dma_close_channel(tx_pipe->dma_channel);
1267 tx_pipe->dma_channel = NULL;
1268 return ret;
1269}
1270
1271int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1272 struct netcp_device *netcp_device,
1273 const char *dma_chan_name, unsigned int dma_queue_id)
1274{
1275 memset(tx_pipe, 0, sizeof(*tx_pipe));
1276 tx_pipe->netcp_device = netcp_device;
1277 tx_pipe->dma_chan_name = dma_chan_name;
1278 tx_pipe->dma_queue_id = dma_queue_id;
1279 return 0;
1280}
1281
1282static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1283 const u8 *addr,
1284 enum netcp_addr_type type)
1285{
1286 struct netcp_addr *naddr;
1287
1288 list_for_each_entry(naddr, &netcp->addr_list, node) {
1289 if (naddr->type != type)
1290 continue;
1291 if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1292 continue;
1293 return naddr;
1294 }
1295
1296 return NULL;
1297}
1298
1299static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1300 const u8 *addr,
1301 enum netcp_addr_type type)
1302{
1303 struct netcp_addr *naddr;
1304
1305 naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1306 if (!naddr)
1307 return NULL;
1308
1309 naddr->type = type;
1310 naddr->flags = 0;
1311 naddr->netcp = netcp;
1312 if (addr)
1313 ether_addr_copy(naddr->addr, addr);
1314 else
1315 memset(naddr->addr, 0, ETH_ALEN);
1316 list_add_tail(&naddr->node, &netcp->addr_list);
1317
1318 return naddr;
1319}
1320
1321static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1322{
1323 list_del(&naddr->node);
1324 devm_kfree(netcp->dev, naddr);
1325}
1326
1327static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1328{
1329 struct netcp_addr *naddr;
1330
1331 list_for_each_entry(naddr, &netcp->addr_list, node)
1332 naddr->flags = 0;
1333}
1334
1335static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1336 enum netcp_addr_type type)
1337{
1338 struct netcp_addr *naddr;
1339
1340 naddr = netcp_addr_find(netcp, addr, type);
1341 if (naddr) {
1342 naddr->flags |= ADDR_VALID;
1343 return;
1344 }
1345
1346 naddr = netcp_addr_add(netcp, addr, type);
1347 if (!WARN_ON(!naddr))
1348 naddr->flags |= ADDR_NEW;
1349}
1350
1351static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1352{
1353 struct netcp_addr *naddr, *tmp;
1354 struct netcp_intf_modpriv *priv;
1355 struct netcp_module *module;
1356 int error;
1357
1358 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1359 if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1360 continue;
1361 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1362 naddr->addr, naddr->type);
1363 mutex_lock(&netcp_modules_lock);
1364 for_each_module(netcp, priv) {
1365 module = priv->netcp_module;
1366 if (!module->del_addr)
1367 continue;
1368 error = module->del_addr(priv->module_priv,
1369 naddr);
1370 WARN_ON(error);
1371 }
1372 mutex_unlock(&netcp_modules_lock);
1373 netcp_addr_del(netcp, naddr);
1374 }
1375}
1376
1377static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1378{
1379 struct netcp_addr *naddr, *tmp;
1380 struct netcp_intf_modpriv *priv;
1381 struct netcp_module *module;
1382 int error;
1383
1384 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1385 if (!(naddr->flags & ADDR_NEW))
1386 continue;
1387 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1388 naddr->addr, naddr->type);
1389 mutex_lock(&netcp_modules_lock);
1390 for_each_module(netcp, priv) {
1391 module = priv->netcp_module;
1392 if (!module->add_addr)
1393 continue;
1394 error = module->add_addr(priv->module_priv, naddr);
1395 WARN_ON(error);
1396 }
1397 mutex_unlock(&netcp_modules_lock);
1398 }
1399}
1400
1401static void netcp_set_rx_mode(struct net_device *ndev)
1402{
1403 struct netcp_intf *netcp = netdev_priv(ndev);
1404 struct netdev_hw_addr *ndev_addr;
1405 bool promisc;
1406
1407 promisc = (ndev->flags & IFF_PROMISC ||
1408 ndev->flags & IFF_ALLMULTI ||
1409 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1410
1411 /* first clear all marks */
1412 netcp_addr_clear_mark(netcp);
1413
1414 /* next add new entries, mark existing ones */
1415 netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1416 for_each_dev_addr(ndev, ndev_addr)
1417 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1418 netdev_for_each_uc_addr(ndev_addr, ndev)
1419 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1420 netdev_for_each_mc_addr(ndev_addr, ndev)
1421 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1422
1423 if (promisc)
1424 netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1425
1426 /* finally sweep and callout into modules */
1427 netcp_addr_sweep_del(netcp);
1428 netcp_addr_sweep_add(netcp);
1429}
1430
1431static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1432{
1433 int i;
1434
1435 if (netcp->rx_channel) {
1436 knav_dma_close_channel(netcp->rx_channel);
1437 netcp->rx_channel = NULL;
1438 }
1439
1440 if (!IS_ERR_OR_NULL(netcp->rx_pool))
1441 netcp_rxpool_free(netcp);
1442
1443 if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1444 knav_queue_close(netcp->rx_queue);
1445 netcp->rx_queue = NULL;
1446 }
1447
1448 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1449 !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1450 knav_queue_close(netcp->rx_fdq[i]);
1451 netcp->rx_fdq[i] = NULL;
1452 }
1453
1454 if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1455 knav_queue_close(netcp->tx_compl_q);
1456 netcp->tx_compl_q = NULL;
1457 }
1458
1459 if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1460 knav_pool_destroy(netcp->tx_pool);
1461 netcp->tx_pool = NULL;
1462 }
1463}
1464
1465static int netcp_setup_navigator_resources(struct net_device *ndev)
1466{
1467 struct netcp_intf *netcp = netdev_priv(ndev);
1468 struct knav_queue_notify_config notify_cfg;
1469 struct knav_dma_cfg config;
1470 u32 last_fdq = 0;
1471 u8 name[16];
1472 int ret;
1473 int i;
1474
1475 /* Create Rx/Tx descriptor pools */
1476 snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1477 netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1478 netcp->rx_pool_region_id);
1479 if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1480 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1481 ret = PTR_ERR(netcp->rx_pool);
1482 goto fail;
1483 }
1484
1485 snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1486 netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1487 netcp->tx_pool_region_id);
1488 if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1489 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1490 ret = PTR_ERR(netcp->tx_pool);
1491 goto fail;
1492 }
1493
1494 /* open Tx completion queue */
1495 snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1496 netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1497 if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1498 ret = PTR_ERR(netcp->tx_compl_q);
1499 goto fail;
1500 }
1501 netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1502
1503 /* Set notification for Tx completion */
1504 notify_cfg.fn = netcp_tx_notify;
1505 notify_cfg.fn_arg = netcp;
1506 ret = knav_queue_device_control(netcp->tx_compl_q,
1507 KNAV_QUEUE_SET_NOTIFIER,
1508 (unsigned long)&notify_cfg);
1509 if (ret)
1510 goto fail;
1511
1512 knav_queue_disable_notify(netcp->tx_compl_q);
1513
1514 /* open Rx completion queue */
1515 snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1516 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1517 if (IS_ERR_OR_NULL(netcp->rx_queue)) {
1518 ret = PTR_ERR(netcp->rx_queue);
1519 goto fail;
1520 }
1521 netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1522
1523 /* Set notification for Rx completion */
1524 notify_cfg.fn = netcp_rx_notify;
1525 notify_cfg.fn_arg = netcp;
1526 ret = knav_queue_device_control(netcp->rx_queue,
1527 KNAV_QUEUE_SET_NOTIFIER,
1528 (unsigned long)&notify_cfg);
1529 if (ret)
1530 goto fail;
1531
1532 knav_queue_disable_notify(netcp->rx_queue);
1533
1534 /* open Rx FDQs */
1535 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1536 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
1537 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1538 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1539 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
1540 ret = PTR_ERR(netcp->rx_fdq[i]);
1541 goto fail;
1542 }
1543 }
1544
1545 memset(&config, 0, sizeof(config));
1546 config.direction = DMA_DEV_TO_MEM;
1547 config.u.rx.einfo_present = true;
1548 config.u.rx.psinfo_present = true;
1549 config.u.rx.err_mode = DMA_DROP;
1550 config.u.rx.desc_type = DMA_DESC_HOST;
1551 config.u.rx.psinfo_at_sop = false;
1552 config.u.rx.sop_offset = NETCP_SOP_OFFSET;
1553 config.u.rx.dst_q = netcp->rx_queue_id;
1554 config.u.rx.thresh = DMA_THRESH_NONE;
1555
1556 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1557 if (netcp->rx_fdq[i])
1558 last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1559 config.u.rx.fdq[i] = last_fdq;
1560 }
1561
1562 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1563 netcp->dma_chan_name, &config);
1564 if (IS_ERR_OR_NULL(netcp->rx_channel)) {
1565 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1566 netcp->dma_chan_name);
1567 goto fail;
1568 }
1569
1570 dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1571 return 0;
1572
1573fail:
1574 netcp_free_navigator_resources(netcp);
1575 return ret;
1576}
1577
1578/* Open the device */
1579static int netcp_ndo_open(struct net_device *ndev)
1580{
1581 struct netcp_intf *netcp = netdev_priv(ndev);
1582 struct netcp_intf_modpriv *intf_modpriv;
1583 struct netcp_module *module;
1584 int ret;
1585
1586 netif_carrier_off(ndev);
1587 ret = netcp_setup_navigator_resources(ndev);
1588 if (ret) {
1589 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1590 goto fail;
1591 }
1592
1593 mutex_lock(&netcp_modules_lock);
1594 for_each_module(netcp, intf_modpriv) {
1595 module = intf_modpriv->netcp_module;
1596 if (module->open) {
1597 ret = module->open(intf_modpriv->module_priv, ndev);
1598 if (ret != 0) {
1599 dev_err(netcp->ndev_dev, "module open failed\n");
1600 goto fail_open;
1601 }
1602 }
1603 }
1604 mutex_unlock(&netcp_modules_lock);
1605
1606 netcp_rxpool_refill(netcp);
1607 napi_enable(&netcp->rx_napi);
1608 napi_enable(&netcp->tx_napi);
1609 knav_queue_enable_notify(netcp->tx_compl_q);
1610 knav_queue_enable_notify(netcp->rx_queue);
1611 netif_tx_wake_all_queues(ndev);
1612 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1613 return 0;
1614
1615fail_open:
1616 for_each_module(netcp, intf_modpriv) {
1617 module = intf_modpriv->netcp_module;
1618 if (module->close)
1619 module->close(intf_modpriv->module_priv, ndev);
1620 }
1621 mutex_unlock(&netcp_modules_lock);
1622
1623fail:
1624 netcp_free_navigator_resources(netcp);
1625 return ret;
1626}
1627
1628/* Close the device */
1629static int netcp_ndo_stop(struct net_device *ndev)
1630{
1631 struct netcp_intf *netcp = netdev_priv(ndev);
1632 struct netcp_intf_modpriv *intf_modpriv;
1633 struct netcp_module *module;
1634 int err = 0;
1635
1636 netif_tx_stop_all_queues(ndev);
1637 netif_carrier_off(ndev);
1638 netcp_addr_clear_mark(netcp);
1639 netcp_addr_sweep_del(netcp);
1640 knav_queue_disable_notify(netcp->rx_queue);
1641 knav_queue_disable_notify(netcp->tx_compl_q);
1642 napi_disable(&netcp->rx_napi);
1643 napi_disable(&netcp->tx_napi);
1644
1645 mutex_lock(&netcp_modules_lock);
1646 for_each_module(netcp, intf_modpriv) {
1647 module = intf_modpriv->netcp_module;
1648 if (module->close) {
1649 err = module->close(intf_modpriv->module_priv, ndev);
1650 if (err != 0)
1651 dev_err(netcp->ndev_dev, "Close failed\n");
1652 }
1653 }
1654 mutex_unlock(&netcp_modules_lock);
1655
1656 /* Recycle Rx descriptors from completion queue */
1657 netcp_empty_rx_queue(netcp);
1658
1659 /* Recycle Tx descriptors from completion queue */
1660 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1661
1662 if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1663 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1664 netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1665
1666 netcp_free_navigator_resources(netcp);
1667 dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1668 return 0;
1669}
1670
1671static int netcp_ndo_ioctl(struct net_device *ndev,
1672 struct ifreq *req, int cmd)
1673{
1674 struct netcp_intf *netcp = netdev_priv(ndev);
1675 struct netcp_intf_modpriv *intf_modpriv;
1676 struct netcp_module *module;
1677 int ret = -1, err = -EOPNOTSUPP;
1678
1679 if (!netif_running(ndev))
1680 return -EINVAL;
1681
1682 mutex_lock(&netcp_modules_lock);
1683 for_each_module(netcp, intf_modpriv) {
1684 module = intf_modpriv->netcp_module;
1685 if (!module->ioctl)
1686 continue;
1687
1688 err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1689 if ((err < 0) && (err != -EOPNOTSUPP)) {
1690 ret = err;
1691 goto out;
1692 }
1693 if (err == 0)
1694 ret = err;
1695 }
1696
1697out:
1698 mutex_unlock(&netcp_modules_lock);
1699 return (ret == 0) ? 0 : err;
1700}
1701
1702static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
1703{
1704 struct netcp_intf *netcp = netdev_priv(ndev);
1705
1706 /* MTU < 68 is an error for IPv4 traffic */
1707 if ((new_mtu < 68) ||
1708 (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
1709 dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
1710 return -EINVAL;
1711 }
1712
1713 ndev->mtu = new_mtu;
1714 return 0;
1715}
1716
1717static void netcp_ndo_tx_timeout(struct net_device *ndev)
1718{
1719 struct netcp_intf *netcp = netdev_priv(ndev);
1720 unsigned int descs = knav_pool_count(netcp->tx_pool);
1721
1722 dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1723 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1724 ndev->trans_start = jiffies;
1725 netif_tx_wake_all_queues(ndev);
1726}
1727
1728static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1729{
1730 struct netcp_intf *netcp = netdev_priv(ndev);
1731 struct netcp_intf_modpriv *intf_modpriv;
1732 struct netcp_module *module;
1733 int err = 0;
1734
1735 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1736
1737 mutex_lock(&netcp_modules_lock);
1738 for_each_module(netcp, intf_modpriv) {
1739 module = intf_modpriv->netcp_module;
1740 if ((module->add_vid) && (vid != 0)) {
1741 err = module->add_vid(intf_modpriv->module_priv, vid);
1742 if (err != 0) {
1743 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1744 vid);
1745 break;
1746 }
1747 }
1748 }
1749 mutex_unlock(&netcp_modules_lock);
1750 return err;
1751}
1752
1753static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1754{
1755 struct netcp_intf *netcp = netdev_priv(ndev);
1756 struct netcp_intf_modpriv *intf_modpriv;
1757 struct netcp_module *module;
1758 int err = 0;
1759
1760 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1761
1762 mutex_lock(&netcp_modules_lock);
1763 for_each_module(netcp, intf_modpriv) {
1764 module = intf_modpriv->netcp_module;
1765 if (module->del_vid) {
1766 err = module->del_vid(intf_modpriv->module_priv, vid);
1767 if (err != 0) {
1768 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1769 vid);
1770 break;
1771 }
1772 }
1773 }
1774 mutex_unlock(&netcp_modules_lock);
1775 return err;
1776}
1777
1778static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1779 void *accel_priv,
1780 select_queue_fallback_t fallback)
1781{
1782 return 0;
1783}
1784
1785static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
1786{
1787 int i;
1788
1789 /* setup tc must be called under rtnl lock */
1790 ASSERT_RTNL();
1791
1792 /* Sanity-check the number of traffic classes requested */
1793 if ((dev->real_num_tx_queues <= 1) ||
1794 (dev->real_num_tx_queues < num_tc))
1795 return -EINVAL;
1796
1797 /* Configure traffic class to queue mappings */
1798 if (num_tc) {
1799 netdev_set_num_tc(dev, num_tc);
1800 for (i = 0; i < num_tc; i++)
1801 netdev_set_tc_queue(dev, i, 1, i);
1802 } else {
1803 netdev_reset_tc(dev);
1804 }
1805
1806 return 0;
1807}
1808
1809static const struct net_device_ops netcp_netdev_ops = {
1810 .ndo_open = netcp_ndo_open,
1811 .ndo_stop = netcp_ndo_stop,
1812 .ndo_start_xmit = netcp_ndo_start_xmit,
1813 .ndo_set_rx_mode = netcp_set_rx_mode,
1814 .ndo_do_ioctl = netcp_ndo_ioctl,
1815 .ndo_change_mtu = netcp_ndo_change_mtu,
1816 .ndo_set_mac_address = eth_mac_addr,
1817 .ndo_validate_addr = eth_validate_addr,
1818 .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
1819 .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
1820 .ndo_tx_timeout = netcp_ndo_tx_timeout,
1821 .ndo_select_queue = netcp_select_queue,
1822 .ndo_setup_tc = netcp_setup_tc,
1823};
1824
1825static int netcp_create_interface(struct netcp_device *netcp_device,
1826 struct device_node *node_interface)
1827{
1828 struct device *dev = netcp_device->device;
1829 struct device_node *node = dev->of_node;
1830 struct netcp_intf *netcp;
1831 struct net_device *ndev;
1832 resource_size_t size;
1833 struct resource res;
1834 void __iomem *efuse = NULL;
1835 u32 efuse_mac = 0;
1836 const void *mac_addr;
1837 u8 efuse_mac_addr[6];
1838 u32 temp[2];
1839 int ret = 0;
1840
1841 ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1842 if (!ndev) {
1843 dev_err(dev, "Error allocating netdev\n");
1844 return -ENOMEM;
1845 }
1846
1847 ndev->features |= NETIF_F_SG;
1848 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1849 ndev->hw_features = ndev->features;
1850 ndev->vlan_features |= NETIF_F_SG;
1851
1852 netcp = netdev_priv(ndev);
1853 spin_lock_init(&netcp->lock);
1854 INIT_LIST_HEAD(&netcp->module_head);
1855 INIT_LIST_HEAD(&netcp->txhook_list_head);
1856 INIT_LIST_HEAD(&netcp->rxhook_list_head);
1857 INIT_LIST_HEAD(&netcp->addr_list);
1858 netcp->netcp_device = netcp_device;
1859 netcp->dev = netcp_device->device;
1860 netcp->ndev = ndev;
1861 netcp->ndev_dev = &ndev->dev;
1862 netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
1863 netcp->tx_pause_threshold = MAX_SKB_FRAGS;
1864 netcp->tx_resume_threshold = netcp->tx_pause_threshold;
1865 netcp->node_interface = node_interface;
1866
1867 ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
1868 if (efuse_mac) {
1869 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
1870 dev_err(dev, "could not find efuse-mac reg resource\n");
1871 ret = -ENODEV;
1872 goto quit;
1873 }
1874 size = resource_size(&res);
1875
1876 if (!devm_request_mem_region(dev, res.start, size,
1877 dev_name(dev))) {
1878 dev_err(dev, "could not reserve resource\n");
1879 ret = -ENOMEM;
1880 goto quit;
1881 }
1882
1883 efuse = devm_ioremap_nocache(dev, res.start, size);
1884 if (!efuse) {
1885 dev_err(dev, "could not map resource\n");
1886 devm_release_mem_region(dev, res.start, size);
1887 ret = -ENOMEM;
1888 goto quit;
1889 }
1890
1891 emac_arch_get_mac_addr(efuse_mac_addr, efuse);
1892 if (is_valid_ether_addr(efuse_mac_addr))
1893 ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
1894 else
1895 random_ether_addr(ndev->dev_addr);
1896
1897 devm_iounmap(dev, efuse);
1898 devm_release_mem_region(dev, res.start, size);
1899 } else {
1900 mac_addr = of_get_mac_address(node_interface);
1901 if (mac_addr)
1902 ether_addr_copy(ndev->dev_addr, mac_addr);
1903 else
1904 random_ether_addr(ndev->dev_addr);
1905 }
1906
1907 ret = of_property_read_string(node_interface, "rx-channel",
1908 &netcp->dma_chan_name);
1909 if (ret < 0) {
1910 dev_err(dev, "missing \"rx-channel\" parameter\n");
1911 ret = -ENODEV;
1912 goto quit;
1913 }
1914
1915 ret = of_property_read_u32(node_interface, "rx-queue",
1916 &netcp->rx_queue_id);
1917 if (ret < 0) {
1918 dev_warn(dev, "missing \"rx-queue\" parameter\n");
1919 netcp->rx_queue_id = KNAV_QUEUE_QPEND;
1920 }
1921
1922 ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
1923 netcp->rx_queue_depths,
1924 KNAV_DMA_FDQ_PER_CHAN);
1925 if (ret < 0) {
1926 dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
1927 netcp->rx_queue_depths[0] = 128;
1928 }
1929
1930 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1931 netcp->rx_buffer_sizes,
1932 KNAV_DMA_FDQ_PER_CHAN);
1933 if (ret) {
1934 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1935 netcp->rx_buffer_sizes[0] = 1536;
1936 }
1937
1938 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1939 if (ret < 0) {
1940 dev_err(dev, "missing \"rx-pool\" parameter\n");
1941 ret = -ENODEV;
1942 goto quit;
1943 }
1944 netcp->rx_pool_size = temp[0];
1945 netcp->rx_pool_region_id = temp[1];
1946
1947 ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
1948 if (ret < 0) {
1949 dev_err(dev, "missing \"tx-pool\" parameter\n");
1950 ret = -ENODEV;
1951 goto quit;
1952 }
1953 netcp->tx_pool_size = temp[0];
1954 netcp->tx_pool_region_id = temp[1];
1955
1956 if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
1957 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
1958 MAX_SKB_FRAGS);
1959 ret = -ENODEV;
1960 goto quit;
1961 }
1962
1963 ret = of_property_read_u32(node_interface, "tx-completion-queue",
1964 &netcp->tx_compl_qid);
1965 if (ret < 0) {
1966 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
1967 netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
1968 }
1969
1970 /* NAPI register */
1971 netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
1972 netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
1973
1974 /* Register the network device */
1975 ndev->dev_id = 0;
1976 ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
1977 ndev->netdev_ops = &netcp_netdev_ops;
1978 SET_NETDEV_DEV(ndev, dev);
1979
1980 list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
1981 return 0;
1982
1983quit:
1984 free_netdev(ndev);
1985 return ret;
1986}
1987
1988static void netcp_delete_interface(struct netcp_device *netcp_device,
1989 struct net_device *ndev)
1990{
1991 struct netcp_intf_modpriv *intf_modpriv, *tmp;
1992 struct netcp_intf *netcp = netdev_priv(ndev);
1993 struct netcp_module *module;
1994
1995 dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
1996 ndev->name);
1997
1998 /* Notify each of the modules that the interface is going away */
1999 list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2000 intf_list) {
2001 module = intf_modpriv->netcp_module;
2002 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2003 module->name);
2004 if (module->release)
2005 module->release(intf_modpriv->module_priv);
2006 list_del(&intf_modpriv->intf_list);
2007 kfree(intf_modpriv);
2008 }
2009 WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2010 ndev->name);
2011
2012 list_del(&netcp->interface_list);
2013
2014 of_node_put(netcp->node_interface);
2015 unregister_netdev(ndev);
2016 netif_napi_del(&netcp->rx_napi);
2017 free_netdev(ndev);
2018}
2019
2020static int netcp_probe(struct platform_device *pdev)
2021{
2022 struct device_node *node = pdev->dev.of_node;
2023 struct netcp_intf *netcp_intf, *netcp_tmp;
2024 struct device_node *child, *interfaces;
2025 struct netcp_device *netcp_device;
2026 struct device *dev = &pdev->dev;
2027 struct netcp_module *module;
2028 int ret;
2029
2030 if (!node) {
2031 dev_err(dev, "could not find device info\n");
2032 return -ENODEV;
2033 }
2034
2035 /* Allocate a new NETCP device instance */
2036 netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2037 if (!netcp_device)
2038 return -ENOMEM;
2039
2040 pm_runtime_enable(&pdev->dev);
2041 ret = pm_runtime_get_sync(&pdev->dev);
2042 if (ret < 0) {
2043 dev_err(dev, "Failed to enable NETCP power-domain\n");
2044 pm_runtime_disable(&pdev->dev);
2045 return ret;
2046 }
2047
2048 /* Initialize the NETCP device instance */
2049 INIT_LIST_HEAD(&netcp_device->interface_head);
2050 INIT_LIST_HEAD(&netcp_device->modpriv_head);
2051 netcp_device->device = dev;
2052 platform_set_drvdata(pdev, netcp_device);
2053
2054 /* create interfaces */
2055 interfaces = of_get_child_by_name(node, "netcp-interfaces");
2056 if (!interfaces) {
2057 dev_err(dev, "could not find netcp-interfaces node\n");
2058 ret = -ENODEV;
2059 goto probe_quit;
2060 }
2061
2062 for_each_available_child_of_node(interfaces, child) {
2063 ret = netcp_create_interface(netcp_device, child);
2064 if (ret) {
2065 dev_err(dev, "could not create interface(%s)\n",
2066 child->name);
2067 goto probe_quit_interface;
2068 }
2069 }
2070
2071 /* Add the device instance to the list */
2072 list_add_tail(&netcp_device->device_list, &netcp_devices);
2073
2074 /* Probe & attach any modules already registered */
2075 mutex_lock(&netcp_modules_lock);
2076 for_each_netcp_module(module) {
2077 ret = netcp_module_probe(netcp_device, module);
2078 if (ret < 0)
2079 dev_err(dev, "module(%s) probe failed\n", module->name);
2080 }
2081 mutex_unlock(&netcp_modules_lock);
2082 return 0;
2083
2084probe_quit_interface:
2085 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2086 &netcp_device->interface_head,
2087 interface_list) {
2088 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2089 }
2090
2091probe_quit:
2092 pm_runtime_put_sync(&pdev->dev);
2093 pm_runtime_disable(&pdev->dev);
2094 platform_set_drvdata(pdev, NULL);
2095 return ret;
2096}
2097
2098static int netcp_remove(struct platform_device *pdev)
2099{
2100 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2101 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2102 struct netcp_module *module;
2103
2104 list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2105 inst_list) {
2106 module = inst_modpriv->netcp_module;
2107 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2108 module->remove(netcp_device, inst_modpriv->module_priv);
2109 list_del(&inst_modpriv->inst_list);
2110 kfree(inst_modpriv);
2111 }
2112 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2113 pdev->name);
2114
2115 devm_kfree(&pdev->dev, netcp_device);
2116 pm_runtime_put_sync(&pdev->dev);
2117 pm_runtime_disable(&pdev->dev);
2118 platform_set_drvdata(pdev, NULL);
2119 return 0;
2120}
2121
2122static struct of_device_id of_match[] = {
2123 { .compatible = "ti,netcp-1.0", },
2124 {},
2125};
2126MODULE_DEVICE_TABLE(of, of_match);
2127
2128static struct platform_driver netcp_driver = {
2129 .driver = {
2130 .name = "netcp-1.0",
2131 .owner = THIS_MODULE,
2132 .of_match_table = of_match,
2133 },
2134 .probe = netcp_probe,
2135 .remove = netcp_remove,
2136};
2137module_platform_driver(netcp_driver);
2138
2139MODULE_LICENSE("GPL v2");
2140MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2141MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");