aboutsummaryrefslogtreecommitdiffstats
path: root/net/caif/caif_dev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-20 16:43:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-20 16:43:21 -0400
commit06f4e926d256d902dd9a53dcb400fd74974ce087 (patch)
tree0b438b67f5f0eff6fd617bc497a9dace6164a488 /net/caif/caif_dev.c
parent8e7bfcbab3825d1b404d615cb1b54f44ff81f981 (diff)
parentd93515611bbc70c2fe4db232e5feb448ed8e4cc9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1446 commits) macvlan: fix panic if lowerdev in a bond tg3: Add braces around 5906 workaround. tg3: Fix NETIF_F_LOOPBACK error macvlan: remove one synchronize_rcu() call networking: NET_CLS_ROUTE4 depends on INET irda: Fix error propagation in ircomm_lmp_connect_response() irda: Kill set but unused variable 'bytes' in irlan_check_command_param() irda: Kill set but unused variable 'clen' in ircomm_connect_indication() rxrpc: Fix set but unused variable 'usage' in rxrpc_get_transport() be2net: Kill set but unused variable 'req' in lancer_fw_download() irda: Kill set but unused vars 'saddr' and 'daddr' in irlan_provider_connect_indication() atl1c: atl1c_resume() is only used when CONFIG_PM_SLEEP is defined. rxrpc: Fix set but unused variable 'usage' in rxrpc_get_peer(). rxrpc: Kill set but unused variable 'local' in rxrpc_UDP_error_handler() rxrpc: Kill set but unused variable 'sp' in rxrpc_process_connection() rxrpc: Kill set but unused variable 'sp' in rxrpc_rotate_tx_window() pkt_sched: Kill set but unused variable 'protocol' in tc_classify() isdn: capi: Use pr_debug() instead of ifdefs. tg3: Update version to 3.119 tg3: Apply rx_discards fix to 5719/5720 ... Fix up trivial conflicts in arch/x86/Kconfig and net/mac80211/agg-tx.c as per Davem.
Diffstat (limited to 'net/caif/caif_dev.c')
-rw-r--r--net/caif/caif_dev.c387
1 files changed, 197 insertions, 190 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index a42a408306e4..366ca0fb7a29 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -12,49 +12,51 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 13
14#include <linux/version.h> 14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/if_arp.h> 16#include <linux/if_arp.h>
18#include <linux/net.h> 17#include <linux/net.h>
19#include <linux/netdevice.h> 18#include <linux/netdevice.h>
20#include <linux/skbuff.h> 19#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/wait.h>
23#include <net/netns/generic.h> 20#include <net/netns/generic.h>
24#include <net/net_namespace.h> 21#include <net/net_namespace.h>
25#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
26#include <net/caif/caif_device.h> 23#include <net/caif/caif_device.h>
27#include <net/caif/caif_dev.h>
28#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
29#include <net/caif/cfpkt.h> 25#include <net/caif/cfpkt.h>
30#include <net/caif/cfcnfg.h> 26#include <net/caif/cfcnfg.h>
31 27
32MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
33#define TIMEOUT (HZ*5)
34 29
35/* Used for local tracking of the CAIF net devices */ 30/* Used for local tracking of the CAIF net devices */
36struct caif_device_entry { 31struct caif_device_entry {
37 struct cflayer layer; 32 struct cflayer layer;
38 struct list_head list; 33 struct list_head list;
39 atomic_t in_use;
40 atomic_t state;
41 u16 phyid;
42 struct net_device *netdev; 34 struct net_device *netdev;
43 wait_queue_head_t event; 35 int __percpu *pcpu_refcnt;
44}; 36};
45 37
46struct caif_device_entry_list { 38struct caif_device_entry_list {
47 struct list_head list; 39 struct list_head list;
48 /* Protects simulanous deletes in list */ 40 /* Protects simulanous deletes in list */
49 spinlock_t lock; 41 struct mutex lock;
50}; 42};
51 43
52struct caif_net { 44struct caif_net {
45 struct cfcnfg *cfg;
53 struct caif_device_entry_list caifdevs; 46 struct caif_device_entry_list caifdevs;
54}; 47};
55 48
56static int caif_net_id; 49static int caif_net_id;
57static struct cfcnfg *cfg; 50
51struct cfcnfg *get_cfcnfg(struct net *net)
52{
53 struct caif_net *caifn;
54 BUG_ON(!net);
55 caifn = net_generic(net, caif_net_id);
56 BUG_ON(!caifn);
57 return caifn->cfg;
58}
59EXPORT_SYMBOL(get_cfcnfg);
58 60
59static struct caif_device_entry_list *caif_device_list(struct net *net) 61static struct caif_device_entry_list *caif_device_list(struct net *net)
60{ 62{
@@ -65,19 +67,39 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
65 return &caifn->caifdevs; 67 return &caifn->caifdevs;
66} 68}
67 69
70static void caifd_put(struct caif_device_entry *e)
71{
72 irqsafe_cpu_dec(*e->pcpu_refcnt);
73}
74
75static void caifd_hold(struct caif_device_entry *e)
76{
77 irqsafe_cpu_inc(*e->pcpu_refcnt);
78}
79
80static int caifd_refcnt_read(struct caif_device_entry *e)
81{
82 int i, refcnt = 0;
83 for_each_possible_cpu(i)
84 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
85 return refcnt;
86}
87
68/* Allocate new CAIF device. */ 88/* Allocate new CAIF device. */
69static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 89static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
70{ 90{
71 struct caif_device_entry_list *caifdevs; 91 struct caif_device_entry_list *caifdevs;
72 struct caif_device_entry *caifd; 92 struct caif_device_entry *caifd;
93
73 caifdevs = caif_device_list(dev_net(dev)); 94 caifdevs = caif_device_list(dev_net(dev));
74 BUG_ON(!caifdevs); 95 BUG_ON(!caifdevs);
96
75 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 97 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
76 if (!caifd) 98 if (!caifd)
77 return NULL; 99 return NULL;
100 caifd->pcpu_refcnt = alloc_percpu(int);
78 caifd->netdev = dev; 101 caifd->netdev = dev;
79 list_add(&caifd->list, &caifdevs->list); 102 dev_hold(dev);
80 init_waitqueue_head(&caifd->event);
81 return caifd; 103 return caifd;
82} 104}
83 105
@@ -87,98 +109,60 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
87 caif_device_list(dev_net(dev)); 109 caif_device_list(dev_net(dev));
88 struct caif_device_entry *caifd; 110 struct caif_device_entry *caifd;
89 BUG_ON(!caifdevs); 111 BUG_ON(!caifdevs);
90 list_for_each_entry(caifd, &caifdevs->list, list) { 112 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
91 if (caifd->netdev == dev) 113 if (caifd->netdev == dev)
92 return caifd; 114 return caifd;
93 } 115 }
94 return NULL; 116 return NULL;
95} 117}
96 118
97static void caif_device_destroy(struct net_device *dev)
98{
99 struct caif_device_entry_list *caifdevs =
100 caif_device_list(dev_net(dev));
101 struct caif_device_entry *caifd;
102 ASSERT_RTNL();
103 if (dev->type != ARPHRD_CAIF)
104 return;
105
106 spin_lock_bh(&caifdevs->lock);
107 caifd = caif_get(dev);
108 if (caifd == NULL) {
109 spin_unlock_bh(&caifdevs->lock);
110 return;
111 }
112
113 list_del(&caifd->list);
114 spin_unlock_bh(&caifdevs->lock);
115
116 kfree(caifd);
117}
118
119static int transmit(struct cflayer *layer, struct cfpkt *pkt) 119static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120{ 120{
121 int err;
121 struct caif_device_entry *caifd = 122 struct caif_device_entry *caifd =
122 container_of(layer, struct caif_device_entry, layer); 123 container_of(layer, struct caif_device_entry, layer);
123 struct sk_buff *skb, *skb2; 124 struct sk_buff *skb;
124 int ret = -EINVAL; 125
125 skb = cfpkt_tonative(pkt); 126 skb = cfpkt_tonative(pkt);
126 skb->dev = caifd->netdev; 127 skb->dev = caifd->netdev;
127 /*
128 * Don't allow SKB to be destroyed upon error, but signal resend
129 * notification to clients. We can't rely on the return value as
130 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
131 */
132 if (netif_queue_stopped(caifd->netdev))
133 return -EAGAIN;
134 skb2 = skb_get(skb);
135
136 ret = dev_queue_xmit(skb2);
137
138 if (!ret)
139 kfree_skb(skb);
140 else
141 return -EAGAIN;
142 128
143 return 0; 129 err = dev_queue_xmit(skb);
144} 130 if (err > 0)
131 err = -EIO;
145 132
146static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 133 return err;
147{
148 struct caif_device_entry *caifd;
149 struct caif_dev_common *caifdev;
150 caifd = container_of(layr, struct caif_device_entry, layer);
151 caifdev = netdev_priv(caifd->netdev);
152 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
153 atomic_set(&caifd->in_use, 1);
154 wake_up_interruptible(&caifd->event);
155
156 } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
157 atomic_set(&caifd->in_use, 0);
158 wake_up_interruptible(&caifd->event);
159 }
160 return 0;
161} 134}
162 135
163/* 136/*
164 * Stuff received packets to associated sockets. 137 * Stuff received packets into the CAIF stack.
165 * On error, returns non-zero and releases the skb. 138 * On error, returns non-zero and releases the skb.
166 */ 139 */
167static int receive(struct sk_buff *skb, struct net_device *dev, 140static int receive(struct sk_buff *skb, struct net_device *dev,
168 struct packet_type *pkttype, struct net_device *orig_dev) 141 struct packet_type *pkttype, struct net_device *orig_dev)
169{ 142{
170 struct net *net;
171 struct cfpkt *pkt; 143 struct cfpkt *pkt;
172 struct caif_device_entry *caifd; 144 struct caif_device_entry *caifd;
173 net = dev_net(dev); 145
174 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 146 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
147
148 rcu_read_lock();
175 caifd = caif_get(dev); 149 caifd = caif_get(dev);
176 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
177 return NET_RX_DROP;
178 150
179 if (caifd->layer.up->receive(caifd->layer.up, pkt)) 151 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
152 !netif_oper_up(caifd->netdev)) {
153 rcu_read_unlock();
154 kfree_skb(skb);
180 return NET_RX_DROP; 155 return NET_RX_DROP;
156 }
157
158 /* Hold reference to netdevice while using CAIF stack */
159 caifd_hold(caifd);
160 rcu_read_unlock();
161
162 caifd->layer.up->receive(caifd->layer.up, pkt);
181 163
164 /* Release reference to stack upwards */
165 caifd_put(caifd);
182 return 0; 166 return 0;
183} 167}
184 168
@@ -189,15 +173,25 @@ static struct packet_type caif_packet_type __read_mostly = {
189 173
190static void dev_flowctrl(struct net_device *dev, int on) 174static void dev_flowctrl(struct net_device *dev, int on)
191{ 175{
192 struct caif_device_entry *caifd = caif_get(dev); 176 struct caif_device_entry *caifd;
193 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) 177
178 rcu_read_lock();
179
180 caifd = caif_get(dev);
181 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
182 rcu_read_unlock();
194 return; 183 return;
184 }
185
186 caifd_hold(caifd);
187 rcu_read_unlock();
195 188
196 caifd->layer.up->ctrlcmd(caifd->layer.up, 189 caifd->layer.up->ctrlcmd(caifd->layer.up,
197 on ? 190 on ?
198 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 191 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
199 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 192 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
200 caifd->layer.id); 193 caifd->layer.id);
194 caifd_put(caifd);
201} 195}
202 196
203/* notify Caif of device events */ 197/* notify Caif of device events */
@@ -208,37 +202,28 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
208 struct caif_device_entry *caifd = NULL; 202 struct caif_device_entry *caifd = NULL;
209 struct caif_dev_common *caifdev; 203 struct caif_dev_common *caifdev;
210 enum cfcnfg_phy_preference pref; 204 enum cfcnfg_phy_preference pref;
211 int res = -EINVAL;
212 enum cfcnfg_phy_type phy_type; 205 enum cfcnfg_phy_type phy_type;
206 struct cfcnfg *cfg;
207 struct caif_device_entry_list *caifdevs =
208 caif_device_list(dev_net(dev));
213 209
214 if (dev->type != ARPHRD_CAIF) 210 if (dev->type != ARPHRD_CAIF)
215 return 0; 211 return 0;
216 212
213 cfg = get_cfcnfg(dev_net(dev));
214 if (cfg == NULL)
215 return 0;
216
217 switch (what) { 217 switch (what) {
218 case NETDEV_REGISTER: 218 case NETDEV_REGISTER:
219 netdev_info(dev, "register\n");
220 caifd = caif_device_alloc(dev); 219 caifd = caif_device_alloc(dev);
221 if (caifd == NULL) 220 if (!caifd)
222 break; 221 return 0;
222
223 caifdev = netdev_priv(dev); 223 caifdev = netdev_priv(dev);
224 caifdev->flowctrl = dev_flowctrl; 224 caifdev->flowctrl = dev_flowctrl;
225 atomic_set(&caifd->state, what);
226 res = 0;
227 break;
228 225
229 case NETDEV_UP:
230 netdev_info(dev, "up\n");
231 caifd = caif_get(dev);
232 if (caifd == NULL)
233 break;
234 caifdev = netdev_priv(dev);
235 if (atomic_read(&caifd->state) == NETDEV_UP) {
236 netdev_info(dev, "already up\n");
237 break;
238 }
239 atomic_set(&caifd->state, what);
240 caifd->layer.transmit = transmit; 226 caifd->layer.transmit = transmit;
241 caifd->layer.modemcmd = modemcmd;
242 227
243 if (caifdev->use_frag) 228 if (caifdev->use_frag)
244 phy_type = CFPHYTYPE_FRAG; 229 phy_type = CFPHYTYPE_FRAG;
@@ -256,62 +241,94 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
256 pref = CFPHYPREF_HIGH_BW; 241 pref = CFPHYPREF_HIGH_BW;
257 break; 242 break;
258 } 243 }
259 dev_hold(dev); 244 strncpy(caifd->layer.name, dev->name,
260 cfcnfg_add_phy_layer(get_caif_conf(), 245 sizeof(caifd->layer.name) - 1);
246 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
247
248 mutex_lock(&caifdevs->lock);
249 list_add_rcu(&caifd->list, &caifdevs->list);
250
251 cfcnfg_add_phy_layer(cfg,
261 phy_type, 252 phy_type,
262 dev, 253 dev,
263 &caifd->layer, 254 &caifd->layer,
264 &caifd->phyid,
265 pref, 255 pref,
266 caifdev->use_fcs, 256 caifdev->use_fcs,
267 caifdev->use_stx); 257 caifdev->use_stx);
268 strncpy(caifd->layer.name, dev->name, 258 mutex_unlock(&caifdevs->lock);
269 sizeof(caifd->layer.name) - 1);
270 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 break; 259 break;
272 260
273 case NETDEV_GOING_DOWN: 261 case NETDEV_UP:
262 rcu_read_lock();
263
274 caifd = caif_get(dev); 264 caifd = caif_get(dev);
275 if (caifd == NULL) 265 if (caifd == NULL) {
266 rcu_read_unlock();
276 break; 267 break;
277 netdev_info(dev, "going down\n"); 268 }
278 269
279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || 270 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
280 atomic_read(&caifd->state) == NETDEV_DOWN) 271 rcu_read_unlock();
281 break;
282 272
283 atomic_set(&caifd->state, what);
284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 return -EINVAL;
286 caifd->layer.up->ctrlcmd(caifd->layer.up,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id);
289 might_sleep();
290 res = wait_event_interruptible_timeout(caifd->event,
291 atomic_read(&caifd->in_use) == 0,
292 TIMEOUT);
293 break; 273 break;
294 274
295 case NETDEV_DOWN: 275 case NETDEV_DOWN:
276 rcu_read_lock();
277
296 caifd = caif_get(dev); 278 caifd = caif_get(dev);
297 if (caifd == NULL) 279 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
298 break; 280 rcu_read_unlock();
299 netdev_info(dev, "down\n"); 281 return -EINVAL;
300 if (atomic_read(&caifd->in_use)) 282 }
301 netdev_warn(dev, 283
302 "Unregistering an active CAIF device\n"); 284 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 285 caifd_hold(caifd);
304 dev_put(dev); 286 rcu_read_unlock();
305 atomic_set(&caifd->state, what); 287
288 caifd->layer.up->ctrlcmd(caifd->layer.up,
289 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
290 caifd->layer.id);
291 caifd_put(caifd);
306 break; 292 break;
307 293
308 case NETDEV_UNREGISTER: 294 case NETDEV_UNREGISTER:
295 mutex_lock(&caifdevs->lock);
296
309 caifd = caif_get(dev); 297 caifd = caif_get(dev);
310 if (caifd == NULL) 298 if (caifd == NULL) {
299 mutex_unlock(&caifdevs->lock);
300 break;
301 }
302 list_del_rcu(&caifd->list);
303
304 /*
305 * NETDEV_UNREGISTER is called repeatedly until all reference
306 * counts for the net-device are released. If references to
307 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
308 * the next call to NETDEV_UNREGISTER.
309 *
310 * If any packets are in flight down the CAIF Stack,
311 * cfcnfg_del_phy_layer will return nonzero.
312 * If no packets are in flight, the CAIF Stack associated
313 * with the net-device un-registering is freed.
314 */
315
316 if (caifd_refcnt_read(caifd) != 0 ||
317 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
318
319 pr_info("Wait for device inuse\n");
320 /* Enrole device if CAIF Stack is still in use */
321 list_add_rcu(&caifd->list, &caifdevs->list);
322 mutex_unlock(&caifdevs->lock);
311 break; 323 break;
312 netdev_info(dev, "unregister\n"); 324 }
313 atomic_set(&caifd->state, what); 325
314 caif_device_destroy(dev); 326 synchronize_rcu();
327 dev_put(caifd->netdev);
328 free_percpu(caifd->pcpu_refcnt);
329 kfree(caifd);
330
331 mutex_unlock(&caifdevs->lock);
315 break; 332 break;
316 } 333 }
317 return 0; 334 return 0;
@@ -322,61 +339,60 @@ static struct notifier_block caif_device_notifier = {
322 .priority = 0, 339 .priority = 0,
323}; 340};
324 341
325
326struct cfcnfg *get_caif_conf(void)
327{
328 return cfg;
329}
330EXPORT_SYMBOL(get_caif_conf);
331
332int caif_connect_client(struct caif_connect_request *conn_req,
333 struct cflayer *client_layer, int *ifindex,
334 int *headroom, int *tailroom)
335{
336 struct cfctrl_link_param param;
337 int ret;
338 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
339 if (ret)
340 return ret;
341 /* Hook up the adaptation layer. */
342 return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
343 client_layer, ifindex,
344 headroom, tailroom);
345}
346EXPORT_SYMBOL(caif_connect_client);
347
348int caif_disconnect_client(struct cflayer *adap_layer)
349{
350 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
351}
352EXPORT_SYMBOL(caif_disconnect_client);
353
354void caif_release_client(struct cflayer *adap_layer)
355{
356 cfcnfg_release_adap_layer(adap_layer);
357}
358EXPORT_SYMBOL(caif_release_client);
359
360/* Per-namespace Caif devices handling */ 342/* Per-namespace Caif devices handling */
361static int caif_init_net(struct net *net) 343static int caif_init_net(struct net *net)
362{ 344{
363 struct caif_net *caifn = net_generic(net, caif_net_id); 345 struct caif_net *caifn = net_generic(net, caif_net_id);
346 BUG_ON(!caifn);
364 INIT_LIST_HEAD(&caifn->caifdevs.list); 347 INIT_LIST_HEAD(&caifn->caifdevs.list);
365 spin_lock_init(&caifn->caifdevs.lock); 348 mutex_init(&caifn->caifdevs.lock);
349
350 caifn->cfg = cfcnfg_create();
351 if (!caifn->cfg) {
352 pr_warn("can't create cfcnfg\n");
353 return -ENOMEM;
354 }
355
366 return 0; 356 return 0;
367} 357}
368 358
369static void caif_exit_net(struct net *net) 359static void caif_exit_net(struct net *net)
370{ 360{
371 struct net_device *dev; 361 struct caif_device_entry *caifd, *tmp;
372 int res; 362 struct caif_device_entry_list *caifdevs =
363 caif_device_list(net);
364 struct cfcnfg *cfg;
365
373 rtnl_lock(); 366 rtnl_lock();
374 for_each_netdev(net, dev) { 367 mutex_lock(&caifdevs->lock);
375 if (dev->type != ARPHRD_CAIF) 368
376 continue; 369 cfg = get_cfcnfg(net);
377 res = dev_close(dev); 370 if (cfg == NULL) {
378 caif_device_destroy(dev); 371 mutex_unlock(&caifdevs->lock);
372 return;
379 } 373 }
374
375 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
376 int i = 0;
377 list_del_rcu(&caifd->list);
378 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
379
380 while (i < 10 &&
381 (caifd_refcnt_read(caifd) != 0 ||
382 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
383
384 pr_info("Wait for device inuse\n");
385 msleep(250);
386 i++;
387 }
388 synchronize_rcu();
389 dev_put(caifd->netdev);
390 free_percpu(caifd->pcpu_refcnt);
391 kfree(caifd);
392 }
393 cfcnfg_remove(cfg);
394
395 mutex_unlock(&caifdevs->lock);
380 rtnl_unlock(); 396 rtnl_unlock();
381} 397}
382 398
@@ -391,32 +407,23 @@ static struct pernet_operations caif_net_ops = {
391static int __init caif_device_init(void) 407static int __init caif_device_init(void)
392{ 408{
393 int result; 409 int result;
394 cfg = cfcnfg_create(); 410
395 if (!cfg) {
396 pr_warn("can't create cfcnfg\n");
397 goto err_cfcnfg_create_failed;
398 }
399 result = register_pernet_device(&caif_net_ops); 411 result = register_pernet_device(&caif_net_ops);
400 412
401 if (result) { 413 if (result)
402 kfree(cfg);
403 cfg = NULL;
404 return result; 414 return result;
405 } 415
406 dev_add_pack(&caif_packet_type);
407 register_netdevice_notifier(&caif_device_notifier); 416 register_netdevice_notifier(&caif_device_notifier);
417 dev_add_pack(&caif_packet_type);
408 418
409 return result; 419 return result;
410err_cfcnfg_create_failed:
411 return -ENODEV;
412} 420}
413 421
414static void __exit caif_device_exit(void) 422static void __exit caif_device_exit(void)
415{ 423{
416 dev_remove_pack(&caif_packet_type);
417 unregister_pernet_device(&caif_net_ops); 424 unregister_pernet_device(&caif_net_ops);
418 unregister_netdevice_notifier(&caif_device_notifier); 425 unregister_netdevice_notifier(&caif_device_notifier);
419 cfcnfg_remove(cfg); 426 dev_remove_pack(&caif_packet_type);
420} 427}
421 428
422module_init(caif_device_init); 429module_init(caif_device_init);