diff options
-rw-r--r-- | include/net/caif/cfcnfg.h | 10 | ||||
-rw-r--r-- | net/caif/caif_dev.c | 277 |
2 files changed, 169 insertions, 118 deletions
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h index f33d36341132..e0a1eb5d7eaf 100644 --- a/include/net/caif/cfcnfg.h +++ b/include/net/caif/cfcnfg.h | |||
@@ -145,4 +145,14 @@ struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, | |||
145 | * @ifi: ifindex obtained from socket.c bindtodevice. | 145 | * @ifi: ifindex obtained from socket.c bindtodevice. |
146 | */ | 146 | */ |
147 | int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi); | 147 | int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi); |
148 | |||
149 | /** | ||
150 | * cfcnfg_set_phy_state() - Set the state of the physical interface device. | ||
151 | * @cnfg: Configuration object | ||
152 | * @phy_layer: Physical Layer representation | ||
153 | * @up: State of device | ||
154 | */ | ||
155 | int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, | ||
156 | bool up); | ||
157 | |||
148 | #endif /* CFCNFG_H_ */ | 158 | #endif /* CFCNFG_H_ */ |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 75e00d59eb49..6d1d86be187b 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -12,14 +12,11 @@ | |||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
13 | 13 | ||
14 | #include <linux/version.h> | 14 | #include <linux/version.h> |
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/if_arp.h> | 16 | #include <linux/if_arp.h> |
18 | #include <linux/net.h> | 17 | #include <linux/net.h> |
19 | #include <linux/netdevice.h> | 18 | #include <linux/netdevice.h> |
20 | #include <linux/skbuff.h> | 19 | #include <linux/mutex.h> |
21 | #include <linux/sched.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <net/netns/generic.h> | 20 | #include <net/netns/generic.h> |
24 | #include <net/net_namespace.h> | 21 | #include <net/net_namespace.h> |
25 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
@@ -30,23 +27,19 @@ | |||
30 | #include <net/caif/cfcnfg.h> | 27 | #include <net/caif/cfcnfg.h> |
31 | 28 | ||
32 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
33 | #define TIMEOUT (HZ*5) | ||
34 | 30 | ||
35 | /* Used for local tracking of the CAIF net devices */ | 31 | /* Used for local tracking of the CAIF net devices */ |
36 | struct caif_device_entry { | 32 | struct caif_device_entry { |
37 | struct cflayer layer; | 33 | struct cflayer layer; |
38 | struct list_head list; | 34 | struct list_head list; |
39 | atomic_t in_use; | ||
40 | atomic_t state; | ||
41 | u16 phyid; | ||
42 | struct net_device *netdev; | 35 | struct net_device *netdev; |
43 | wait_queue_head_t event; | 36 | int __percpu *pcpu_refcnt; |
44 | }; | 37 | }; |
45 | 38 | ||
46 | struct caif_device_entry_list { | 39 | struct caif_device_entry_list { |
47 | struct list_head list; | 40 | struct list_head list; |
48 | /* Protects simulanous deletes in list */ | 41 | /* Protects simulanous deletes in list */ |
49 | spinlock_t lock; | 42 | struct mutex lock; |
50 | }; | 43 | }; |
51 | 44 | ||
52 | struct caif_net { | 45 | struct caif_net { |
@@ -65,19 +58,39 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) | |||
65 | return &caifn->caifdevs; | 58 | return &caifn->caifdevs; |
66 | } | 59 | } |
67 | 60 | ||
61 | static void caifd_put(struct caif_device_entry *e) | ||
62 | { | ||
63 | irqsafe_cpu_dec(*e->pcpu_refcnt); | ||
64 | } | ||
65 | |||
66 | static void caifd_hold(struct caif_device_entry *e) | ||
67 | { | ||
68 | irqsafe_cpu_inc(*e->pcpu_refcnt); | ||
69 | } | ||
70 | |||
71 | static int caifd_refcnt_read(struct caif_device_entry *e) | ||
72 | { | ||
73 | int i, refcnt = 0; | ||
74 | for_each_possible_cpu(i) | ||
75 | refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); | ||
76 | return refcnt; | ||
77 | } | ||
78 | |||
68 | /* Allocate new CAIF device. */ | 79 | /* Allocate new CAIF device. */ |
69 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | 80 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) |
70 | { | 81 | { |
71 | struct caif_device_entry_list *caifdevs; | 82 | struct caif_device_entry_list *caifdevs; |
72 | struct caif_device_entry *caifd; | 83 | struct caif_device_entry *caifd; |
84 | |||
73 | caifdevs = caif_device_list(dev_net(dev)); | 85 | caifdevs = caif_device_list(dev_net(dev)); |
74 | BUG_ON(!caifdevs); | 86 | BUG_ON(!caifdevs); |
87 | |||
75 | caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); | 88 | caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); |
76 | if (!caifd) | 89 | if (!caifd) |
77 | return NULL; | 90 | return NULL; |
91 | caifd->pcpu_refcnt = alloc_percpu(int); | ||
78 | caifd->netdev = dev; | 92 | caifd->netdev = dev; |
79 | list_add(&caifd->list, &caifdevs->list); | 93 | dev_hold(dev); |
80 | init_waitqueue_head(&caifd->event); | ||
81 | return caifd; | 94 | return caifd; |
82 | } | 95 | } |
83 | 96 | ||
@@ -87,35 +100,13 @@ static struct caif_device_entry *caif_get(struct net_device *dev) | |||
87 | caif_device_list(dev_net(dev)); | 100 | caif_device_list(dev_net(dev)); |
88 | struct caif_device_entry *caifd; | 101 | struct caif_device_entry *caifd; |
89 | BUG_ON(!caifdevs); | 102 | BUG_ON(!caifdevs); |
90 | list_for_each_entry(caifd, &caifdevs->list, list) { | 103 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { |
91 | if (caifd->netdev == dev) | 104 | if (caifd->netdev == dev) |
92 | return caifd; | 105 | return caifd; |
93 | } | 106 | } |
94 | return NULL; | 107 | return NULL; |
95 | } | 108 | } |
96 | 109 | ||
97 | static void caif_device_destroy(struct net_device *dev) | ||
98 | { | ||
99 | struct caif_device_entry_list *caifdevs = | ||
100 | caif_device_list(dev_net(dev)); | ||
101 | struct caif_device_entry *caifd; | ||
102 | ASSERT_RTNL(); | ||
103 | if (dev->type != ARPHRD_CAIF) | ||
104 | return; | ||
105 | |||
106 | spin_lock_bh(&caifdevs->lock); | ||
107 | caifd = caif_get(dev); | ||
108 | if (caifd == NULL) { | ||
109 | spin_unlock_bh(&caifdevs->lock); | ||
110 | return; | ||
111 | } | ||
112 | |||
113 | list_del(&caifd->list); | ||
114 | spin_unlock_bh(&caifdevs->lock); | ||
115 | |||
116 | kfree(caifd); | ||
117 | } | ||
118 | |||
119 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) | 110 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) |
120 | { | 111 | { |
121 | struct caif_device_entry *caifd = | 112 | struct caif_device_entry *caifd = |
@@ -130,23 +121,8 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt) | |||
130 | return 0; | 121 | return 0; |
131 | } | 122 | } |
132 | 123 | ||
133 | static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
134 | { | ||
135 | struct caif_device_entry *caifd; | ||
136 | caifd = container_of(layr, struct caif_device_entry, layer); | ||
137 | if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { | ||
138 | atomic_set(&caifd->in_use, 1); | ||
139 | wake_up_interruptible(&caifd->event); | ||
140 | |||
141 | } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) { | ||
142 | atomic_set(&caifd->in_use, 0); | ||
143 | wake_up_interruptible(&caifd->event); | ||
144 | } | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* | 124 | /* |
149 | * Stuff received packets to associated sockets. | 125 | * Stuff received packets into the CAIF stack. |
150 | * On error, returns non-zero and releases the skb. | 126 | * On error, returns non-zero and releases the skb. |
151 | */ | 127 | */ |
152 | static int receive(struct sk_buff *skb, struct net_device *dev, | 128 | static int receive(struct sk_buff *skb, struct net_device *dev, |
@@ -154,14 +130,27 @@ static int receive(struct sk_buff *skb, struct net_device *dev, | |||
154 | { | 130 | { |
155 | struct cfpkt *pkt; | 131 | struct cfpkt *pkt; |
156 | struct caif_device_entry *caifd; | 132 | struct caif_device_entry *caifd; |
133 | |||
157 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); | 134 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); |
135 | |||
136 | rcu_read_lock(); | ||
158 | caifd = caif_get(dev); | 137 | caifd = caif_get(dev); |
159 | if (!caifd || !caifd->layer.up || !caifd->layer.up->receive) | ||
160 | return NET_RX_DROP; | ||
161 | 138 | ||
162 | if (caifd->layer.up->receive(caifd->layer.up, pkt)) | 139 | if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || |
140 | !netif_oper_up(caifd->netdev)) { | ||
141 | rcu_read_unlock(); | ||
142 | kfree_skb(skb); | ||
163 | return NET_RX_DROP; | 143 | return NET_RX_DROP; |
144 | } | ||
145 | |||
146 | /* Hold reference to netdevice while using CAIF stack */ | ||
147 | caifd_hold(caifd); | ||
148 | rcu_read_unlock(); | ||
164 | 149 | ||
150 | caifd->layer.up->receive(caifd->layer.up, pkt); | ||
151 | |||
152 | /* Release reference to stack upwards */ | ||
153 | caifd_put(caifd); | ||
165 | return 0; | 154 | return 0; |
166 | } | 155 | } |
167 | 156 | ||
@@ -172,15 +161,25 @@ static struct packet_type caif_packet_type __read_mostly = { | |||
172 | 161 | ||
173 | static void dev_flowctrl(struct net_device *dev, int on) | 162 | static void dev_flowctrl(struct net_device *dev, int on) |
174 | { | 163 | { |
175 | struct caif_device_entry *caifd = caif_get(dev); | 164 | struct caif_device_entry *caifd; |
176 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | 165 | |
166 | rcu_read_lock(); | ||
167 | |||
168 | caifd = caif_get(dev); | ||
169 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { | ||
170 | rcu_read_unlock(); | ||
177 | return; | 171 | return; |
172 | } | ||
173 | |||
174 | caifd_hold(caifd); | ||
175 | rcu_read_unlock(); | ||
178 | 176 | ||
179 | caifd->layer.up->ctrlcmd(caifd->layer.up, | 177 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
180 | on ? | 178 | on ? |
181 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : | 179 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : |
182 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | 180 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, |
183 | caifd->layer.id); | 181 | caifd->layer.id); |
182 | caifd_put(caifd); | ||
184 | } | 183 | } |
185 | 184 | ||
186 | /* notify Caif of device events */ | 185 | /* notify Caif of device events */ |
@@ -192,34 +191,22 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, | |||
192 | struct caif_dev_common *caifdev; | 191 | struct caif_dev_common *caifdev; |
193 | enum cfcnfg_phy_preference pref; | 192 | enum cfcnfg_phy_preference pref; |
194 | enum cfcnfg_phy_type phy_type; | 193 | enum cfcnfg_phy_type phy_type; |
194 | struct caif_device_entry_list *caifdevs = | ||
195 | caif_device_list(dev_net(dev)); | ||
195 | 196 | ||
196 | if (dev->type != ARPHRD_CAIF) | 197 | if (dev->type != ARPHRD_CAIF) |
197 | return 0; | 198 | return 0; |
198 | 199 | ||
199 | switch (what) { | 200 | switch (what) { |
200 | case NETDEV_REGISTER: | 201 | case NETDEV_REGISTER: |
201 | netdev_info(dev, "register\n"); | ||
202 | caifd = caif_device_alloc(dev); | 202 | caifd = caif_device_alloc(dev); |
203 | if (caifd == NULL) | 203 | if (!caifd) |
204 | break; | 204 | return 0; |
205 | |||
205 | caifdev = netdev_priv(dev); | 206 | caifdev = netdev_priv(dev); |
206 | caifdev->flowctrl = dev_flowctrl; | 207 | caifdev->flowctrl = dev_flowctrl; |
207 | atomic_set(&caifd->state, what); | ||
208 | break; | ||
209 | 208 | ||
210 | case NETDEV_UP: | ||
211 | netdev_info(dev, "up\n"); | ||
212 | caifd = caif_get(dev); | ||
213 | if (caifd == NULL) | ||
214 | break; | ||
215 | caifdev = netdev_priv(dev); | ||
216 | if (atomic_read(&caifd->state) == NETDEV_UP) { | ||
217 | netdev_info(dev, "already up\n"); | ||
218 | break; | ||
219 | } | ||
220 | atomic_set(&caifd->state, what); | ||
221 | caifd->layer.transmit = transmit; | 209 | caifd->layer.transmit = transmit; |
222 | caifd->layer.modemcmd = modemcmd; | ||
223 | 210 | ||
224 | if (caifdev->use_frag) | 211 | if (caifdev->use_frag) |
225 | phy_type = CFPHYTYPE_FRAG; | 212 | phy_type = CFPHYTYPE_FRAG; |
@@ -237,62 +224,95 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, | |||
237 | pref = CFPHYPREF_HIGH_BW; | 224 | pref = CFPHYPREF_HIGH_BW; |
238 | break; | 225 | break; |
239 | } | 226 | } |
240 | dev_hold(dev); | 227 | strncpy(caifd->layer.name, dev->name, |
228 | sizeof(caifd->layer.name) - 1); | ||
229 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | ||
230 | |||
231 | mutex_lock(&caifdevs->lock); | ||
232 | list_add_rcu(&caifd->list, &caifdevs->list); | ||
233 | |||
241 | cfcnfg_add_phy_layer(cfg, | 234 | cfcnfg_add_phy_layer(cfg, |
242 | phy_type, | 235 | phy_type, |
243 | dev, | 236 | dev, |
244 | &caifd->layer, | 237 | &caifd->layer, |
245 | &caifd->phyid, | 238 | 0, |
246 | pref, | 239 | pref, |
247 | caifdev->use_fcs, | 240 | caifdev->use_fcs, |
248 | caifdev->use_stx); | 241 | caifdev->use_stx); |
249 | strncpy(caifd->layer.name, dev->name, | 242 | mutex_unlock(&caifdevs->lock); |
250 | sizeof(caifd->layer.name) - 1); | ||
251 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | ||
252 | break; | 243 | break; |
253 | 244 | ||
254 | case NETDEV_GOING_DOWN: | 245 | case NETDEV_UP: |
246 | rcu_read_lock(); | ||
247 | |||
255 | caifd = caif_get(dev); | 248 | caifd = caif_get(dev); |
256 | if (caifd == NULL) | 249 | if (caifd == NULL) { |
250 | rcu_read_unlock(); | ||
257 | break; | 251 | break; |
258 | netdev_info(dev, "going down\n"); | 252 | } |
259 | 253 | ||
260 | if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || | 254 | cfcnfg_set_phy_state(cfg, &caifd->layer, true); |
261 | atomic_read(&caifd->state) == NETDEV_DOWN) | 255 | rcu_read_unlock(); |
262 | break; | ||
263 | 256 | ||
264 | atomic_set(&caifd->state, what); | ||
265 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
266 | return -EINVAL; | ||
267 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
268 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | ||
269 | caifd->layer.id); | ||
270 | might_sleep(); | ||
271 | wait_event_interruptible_timeout(caifd->event, | ||
272 | atomic_read(&caifd->in_use) == 0, | ||
273 | TIMEOUT); | ||
274 | break; | 257 | break; |
275 | 258 | ||
276 | case NETDEV_DOWN: | 259 | case NETDEV_DOWN: |
260 | rcu_read_lock(); | ||
261 | |||
277 | caifd = caif_get(dev); | 262 | caifd = caif_get(dev); |
278 | if (caifd == NULL) | 263 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
279 | break; | 264 | rcu_read_unlock(); |
280 | netdev_info(dev, "down\n"); | 265 | return -EINVAL; |
281 | if (atomic_read(&caifd->in_use)) | 266 | } |
282 | netdev_warn(dev, | 267 | |
283 | "Unregistering an active CAIF device\n"); | 268 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); |
284 | cfcnfg_del_phy_layer(cfg, &caifd->layer); | 269 | caifd_hold(caifd); |
285 | dev_put(dev); | 270 | rcu_read_unlock(); |
286 | atomic_set(&caifd->state, what); | 271 | |
272 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
273 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | ||
274 | caifd->layer.id); | ||
275 | caifd_put(caifd); | ||
287 | break; | 276 | break; |
288 | 277 | ||
289 | case NETDEV_UNREGISTER: | 278 | case NETDEV_UNREGISTER: |
279 | mutex_lock(&caifdevs->lock); | ||
280 | |||
290 | caifd = caif_get(dev); | 281 | caifd = caif_get(dev); |
291 | if (caifd == NULL) | 282 | if (caifd == NULL) { |
283 | mutex_unlock(&caifdevs->lock); | ||
292 | break; | 284 | break; |
293 | netdev_info(dev, "unregister\n"); | 285 | } |
294 | atomic_set(&caifd->state, what); | 286 | list_del_rcu(&caifd->list); |
295 | caif_device_destroy(dev); | 287 | |
288 | /* | ||
289 | * NETDEV_UNREGISTER is called repeatedly until all reference | ||
290 | * counts for the net-device are released. If references to | ||
291 | * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for | ||
292 | * the next call to NETDEV_UNREGISTER. | ||
293 | * | ||
294 | * If any packets are in flight down the CAIF Stack, | ||
295 | * cfcnfg_del_phy_layer will return nonzero. | ||
296 | * If no packets are in flight, the CAIF Stack associated | ||
297 | * with the net-device un-registering is freed. | ||
298 | */ | ||
299 | |||
300 | if (caifd_refcnt_read(caifd) != 0 || | ||
301 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { | ||
302 | |||
303 | pr_info("Wait for device inuse\n"); | ||
304 | /* Enrole device if CAIF Stack is still in use */ | ||
305 | list_add_rcu(&caifd->list, &caifdevs->list); | ||
306 | mutex_unlock(&caifdevs->lock); | ||
307 | break; | ||
308 | } | ||
309 | |||
310 | synchronize_rcu(); | ||
311 | dev_put(caifd->netdev); | ||
312 | free_percpu(caifd->pcpu_refcnt); | ||
313 | kfree(caifd); | ||
314 | |||
315 | mutex_unlock(&caifdevs->lock); | ||
296 | break; | 316 | break; |
297 | } | 317 | } |
298 | return 0; | 318 | return 0; |
@@ -304,8 +324,8 @@ static struct notifier_block caif_device_notifier = { | |||
304 | }; | 324 | }; |
305 | 325 | ||
306 | int caif_connect_client(struct caif_connect_request *conn_req, | 326 | int caif_connect_client(struct caif_connect_request *conn_req, |
307 | struct cflayer *client_layer, int *ifindex, | 327 | struct cflayer *client_layer, int *ifindex, |
308 | int *headroom, int *tailroom) | 328 | int *headroom, int *tailroom) |
309 | { | 329 | { |
310 | struct cfctrl_link_param param; | 330 | struct cfctrl_link_param param; |
311 | int ret; | 331 | int ret; |
@@ -315,8 +335,8 @@ int caif_connect_client(struct caif_connect_request *conn_req, | |||
315 | return ret; | 335 | return ret; |
316 | /* Hook up the adaptation layer. */ | 336 | /* Hook up the adaptation layer. */ |
317 | return cfcnfg_add_adaptation_layer(cfg, ¶m, | 337 | return cfcnfg_add_adaptation_layer(cfg, ¶m, |
318 | client_layer, ifindex, | 338 | client_layer, ifindex, |
319 | headroom, tailroom); | 339 | headroom, tailroom); |
320 | } | 340 | } |
321 | EXPORT_SYMBOL(caif_connect_client); | 341 | EXPORT_SYMBOL(caif_connect_client); |
322 | 342 | ||
@@ -331,20 +351,40 @@ static int caif_init_net(struct net *net) | |||
331 | { | 351 | { |
332 | struct caif_net *caifn = net_generic(net, caif_net_id); | 352 | struct caif_net *caifn = net_generic(net, caif_net_id); |
333 | INIT_LIST_HEAD(&caifn->caifdevs.list); | 353 | INIT_LIST_HEAD(&caifn->caifdevs.list); |
334 | spin_lock_init(&caifn->caifdevs.lock); | 354 | mutex_init(&caifn->caifdevs.lock); |
335 | return 0; | 355 | return 0; |
336 | } | 356 | } |
337 | 357 | ||
338 | static void caif_exit_net(struct net *net) | 358 | static void caif_exit_net(struct net *net) |
339 | { | 359 | { |
340 | struct net_device *dev; | 360 | struct caif_device_entry *caifd, *tmp; |
361 | struct caif_device_entry_list *caifdevs = | ||
362 | caif_device_list(net); | ||
363 | |||
341 | rtnl_lock(); | 364 | rtnl_lock(); |
342 | for_each_netdev(net, dev) { | 365 | mutex_lock(&caifdevs->lock); |
343 | if (dev->type != ARPHRD_CAIF) | 366 | |
344 | continue; | 367 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { |
345 | dev_close(dev); | 368 | int i = 0; |
346 | caif_device_destroy(dev); | 369 | list_del_rcu(&caifd->list); |
370 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); | ||
371 | |||
372 | while (i < 10 && | ||
373 | (caifd_refcnt_read(caifd) != 0 || | ||
374 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { | ||
375 | |||
376 | pr_info("Wait for device inuse\n"); | ||
377 | msleep(250); | ||
378 | i++; | ||
379 | } | ||
380 | synchronize_rcu(); | ||
381 | dev_put(caifd->netdev); | ||
382 | free_percpu(caifd->pcpu_refcnt); | ||
383 | kfree(caifd); | ||
347 | } | 384 | } |
385 | |||
386 | |||
387 | mutex_unlock(&caifdevs->lock); | ||
348 | rtnl_unlock(); | 388 | rtnl_unlock(); |
349 | } | 389 | } |
350 | 390 | ||
@@ -359,6 +399,7 @@ static struct pernet_operations caif_net_ops = { | |||
359 | static int __init caif_device_init(void) | 399 | static int __init caif_device_init(void) |
360 | { | 400 | { |
361 | int result; | 401 | int result; |
402 | |||
362 | cfg = cfcnfg_create(); | 403 | cfg = cfcnfg_create(); |
363 | if (!cfg) { | 404 | if (!cfg) { |
364 | pr_warn("can't create cfcnfg\n"); | 405 | pr_warn("can't create cfcnfg\n"); |