aboutsummaryrefslogtreecommitdiffstats
path: root/net/caif/cfcnfg.c
diff options
context:
space:
mode:
authorsjur.brandeland@stericsson.com <sjur.brandeland@stericsson.com>2011-05-12 22:44:01 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-15 17:45:54 -0400
commitf36214408470ecf6a052e76b72d05b2328b60fcf (patch)
treeadc3df40fdf0ff782392a881c2d80ba16d237c6b /net/caif/cfcnfg.c
parentbd30ce4bc0b7dc859c1d1cba7ad87e08642418b0 (diff)
caif: Use RCU and lists in cfcnfg.c for managing caif link layers
RCU lists are used for handling the link layers instead of array. When generating CAIF phy-id, ifindex is used as base. Legal range is 1-6. Introduced set_phy_state() for managing CAIF Link layer state. Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/caif/cfcnfg.c')
-rw-r--r--net/caif/cfcnfg.c373
1 files changed, 213 insertions, 160 deletions
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 25c0b198e285..7892cc084e27 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -10,6 +10,7 @@
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/module.h>
13#include <net/caif/caif_layer.h> 14#include <net/caif/caif_layer.h>
14#include <net/caif/cfpkt.h> 15#include <net/caif/cfpkt.h>
15#include <net/caif/cfcnfg.h> 16#include <net/caif/cfcnfg.h>
@@ -18,11 +19,7 @@
18#include <net/caif/cffrml.h> 19#include <net/caif/cffrml.h>
19#include <net/caif/cfserl.h> 20#include <net/caif/cfserl.h>
20#include <net/caif/cfsrvl.h> 21#include <net/caif/cfsrvl.h>
21 22#include <net/caif/caif_dev.h>
22#include <linux/module.h>
23#include <asm/atomic.h>
24
25#define MAX_PHY_LAYERS 7
26 23
27#define container_obj(layr) container_of(layr, struct cfcnfg, layer) 24#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
28 25
@@ -30,6 +27,9 @@
30 * to manage physical interfaces 27 * to manage physical interfaces
31 */ 28 */
32struct cfcnfg_phyinfo { 29struct cfcnfg_phyinfo {
30 struct list_head node;
31 bool up;
32
33 /* Pointer to the layer below the MUX (framing layer) */ 33 /* Pointer to the layer below the MUX (framing layer) */
34 struct cflayer *frm_layer; 34 struct cflayer *frm_layer;
35 /* Pointer to the lowest actual physical layer */ 35 /* Pointer to the lowest actual physical layer */
@@ -39,9 +39,6 @@ struct cfcnfg_phyinfo {
39 /* Preference of the physical in interface */ 39 /* Preference of the physical in interface */
40 enum cfcnfg_phy_preference pref; 40 enum cfcnfg_phy_preference pref;
41 41
42 /* Reference count, number of channels using the device */
43 int phy_ref_count;
44
45 /* Information about the physical device */ 42 /* Information about the physical device */
46 struct dev_info dev_info; 43 struct dev_info dev_info;
47 44
@@ -59,8 +56,8 @@ struct cfcnfg {
59 struct cflayer layer; 56 struct cflayer layer;
60 struct cflayer *ctrl; 57 struct cflayer *ctrl;
61 struct cflayer *mux; 58 struct cflayer *mux;
62 u8 last_phyid; 59 struct list_head phys;
63 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; 60 struct mutex lock;
64}; 61};
65 62
66static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 63static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
@@ -76,6 +73,9 @@ struct cfcnfg *cfcnfg_create(void)
76{ 73{
77 struct cfcnfg *this; 74 struct cfcnfg *this;
78 struct cfctrl_rsp *resp; 75 struct cfctrl_rsp *resp;
76
77 might_sleep();
78
79 /* Initiate this layer */ 79 /* Initiate this layer */
80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
81 if (!this) { 81 if (!this) {
@@ -99,15 +99,19 @@ struct cfcnfg *cfcnfg_create(void)
99 resp->radioset_rsp = cfctrl_resp_func; 99 resp->radioset_rsp = cfctrl_resp_func;
100 resp->linksetup_rsp = cfcnfg_linkup_rsp; 100 resp->linksetup_rsp = cfcnfg_linkup_rsp;
101 resp->reject_rsp = cfcnfg_reject_rsp; 101 resp->reject_rsp = cfcnfg_reject_rsp;
102 102 INIT_LIST_HEAD(&this->phys);
103 this->last_phyid = 1;
104 103
105 cfmuxl_set_uplayer(this->mux, this->ctrl, 0); 104 cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
106 layer_set_dn(this->ctrl, this->mux); 105 layer_set_dn(this->ctrl, this->mux);
107 layer_set_up(this->ctrl, this); 106 layer_set_up(this->ctrl, this);
107 mutex_init(&this->lock);
108
108 return this; 109 return this;
109out_of_mem: 110out_of_mem:
110 pr_warn("Out of memory\n"); 111 pr_warn("Out of memory\n");
112
113 synchronize_rcu();
114
111 kfree(this->mux); 115 kfree(this->mux);
112 kfree(this->ctrl); 116 kfree(this->ctrl);
113 kfree(this); 117 kfree(this);
@@ -117,7 +121,10 @@ EXPORT_SYMBOL(cfcnfg_create);
117 121
118void cfcnfg_remove(struct cfcnfg *cfg) 122void cfcnfg_remove(struct cfcnfg *cfg)
119{ 123{
124 might_sleep();
120 if (cfg) { 125 if (cfg) {
126 synchronize_rcu();
127
121 kfree(cfg->mux); 128 kfree(cfg->mux);
122 kfree(cfg->ctrl); 129 kfree(cfg->ctrl);
123 kfree(cfg); 130 kfree(cfg);
@@ -128,6 +135,17 @@ static void cfctrl_resp_func(void)
128{ 135{
129} 136}
130 137
138static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
139 u8 phyid)
140{
141 struct cfcnfg_phyinfo *phy;
142
143 list_for_each_entry_rcu(phy, &cnfg->phys, node)
144 if (phy->id == phyid)
145 return phy;
146 return NULL;
147}
148
131static void cfctrl_enum_resp(void) 149static void cfctrl_enum_resp(void)
132{ 150{
133} 151}
@@ -135,106 +153,65 @@ static void cfctrl_enum_resp(void)
135struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, 153struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
136 enum cfcnfg_phy_preference phy_pref) 154 enum cfcnfg_phy_preference phy_pref)
137{ 155{
138 u16 i;
139
140 /* Try to match with specified preference */ 156 /* Try to match with specified preference */
141 for (i = 1; i < MAX_PHY_LAYERS; i++) { 157 struct cfcnfg_phyinfo *phy;
142 if (cnfg->phy_layers[i].id == i && 158
143 cnfg->phy_layers[i].pref == phy_pref && 159 list_for_each_entry_rcu(phy, &cnfg->phys, node) {
144 cnfg->phy_layers[i].frm_layer != NULL) { 160 if (phy->up && phy->pref == phy_pref &&
145 caif_assert(cnfg->phy_layers != NULL); 161 phy->frm_layer != NULL)
146 caif_assert(cnfg->phy_layers[i].id == i); 162
147 return &cnfg->phy_layers[i].dev_info; 163 return &phy->dev_info;
148 }
149 }
150 /* Otherwise just return something */
151 for (i = 1; i < MAX_PHY_LAYERS; i++) {
152 if (cnfg->phy_layers[i].id == i) {
153 caif_assert(cnfg->phy_layers != NULL);
154 caif_assert(cnfg->phy_layers[i].id == i);
155 return &cnfg->phy_layers[i].dev_info;
156 }
157 } 164 }
158 165
159 return NULL; 166 /* Otherwise just return something */
160} 167 list_for_each_entry_rcu(phy, &cnfg->phys, node)
168 if (phy->up)
169 return &phy->dev_info;
161 170
162static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
163 u8 phyid)
164{
165 int i;
166 /* Try to match with specified preference */
167 for (i = 0; i < MAX_PHY_LAYERS; i++)
168 if (cnfg->phy_layers[i].frm_layer != NULL &&
169 cnfg->phy_layers[i].id == phyid)
170 return &cnfg->phy_layers[i];
171 return NULL; 171 return NULL;
172} 172}
173 173
174
175int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 174int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
176{ 175{
177 int i; 176 struct cfcnfg_phyinfo *phy;
178 for (i = 0; i < MAX_PHY_LAYERS; i++) 177
179 if (cnfg->phy_layers[i].frm_layer != NULL && 178 list_for_each_entry_rcu(phy, &cnfg->phys, node)
180 cnfg->phy_layers[i].ifindex == ifi) 179 if (phy->ifindex == ifi && phy->up)
181 return i; 180 return phy->id;
182 return -ENODEV; 181 return -ENODEV;
183} 182}
184 183
185int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 184int cfcnfg_disconn_adapt_layer(struct cfcnfg *cfg, struct cflayer *adap_layer)
186{ 185{
187 u8 channel_id = 0; 186 u8 channel_id = 0;
188 int ret = 0; 187 int ret = 0;
189 struct cflayer *servl = NULL; 188 struct cflayer *servl = NULL;
190 struct cfcnfg_phyinfo *phyinfo = NULL;
191 u8 phyid = 0;
192 189
193 caif_assert(adap_layer != NULL); 190 caif_assert(adap_layer != NULL);
191
194 channel_id = adap_layer->id; 192 channel_id = adap_layer->id;
195 if (adap_layer->dn == NULL || channel_id == 0) { 193 if (adap_layer->dn == NULL || channel_id == 0) {
196 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 194 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
197 ret = -ENOTCONN; 195 ret = -ENOTCONN;
198 goto end; 196 goto end;
199 } 197 }
200 servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); 198
199 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
201 if (servl == NULL) { 200 if (servl == NULL) {
202 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", 201 pr_err("PROTOCOL ERROR - "
203 channel_id); 202 "Error removing service_layer Channel_Id(%d)",
203 channel_id);
204 ret = -EINVAL; 204 ret = -EINVAL;
205 goto end; 205 goto end;
206 } 206 }
207 layer_set_up(servl, NULL); 207
208 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 208 ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
209 if (ret) 209
210 goto end;
211 caif_assert(channel_id == servl->id);
212 if (adap_layer->dn != NULL) {
213 phyid = cfsrvl_getphyid(adap_layer->dn);
214
215 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
216 if (phyinfo == NULL) {
217 pr_warn("No interface to send disconnect to\n");
218 ret = -ENODEV;
219 goto end;
220 }
221 if (phyinfo->id != phyid ||
222 phyinfo->phy_layer->id != phyid ||
223 phyinfo->frm_layer->id != phyid) {
224 pr_err("Inconsistency in phy registration\n");
225 ret = -EINVAL;
226 goto end;
227 }
228 }
229 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
230 phyinfo->phy_layer != NULL &&
231 phyinfo->phy_layer->modemcmd != NULL) {
232 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
233 _CAIF_MODEMCMD_PHYIF_USELESS);
234 }
235end: 210end:
236 cfsrvl_put(servl); 211 cfctrl_cancel_req(cfg->ctrl, adap_layer);
237 cfctrl_cancel_req(cnfg->ctrl, adap_layer); 212
213 /* Do RCU sync before initiating cleanup */
214 synchronize_rcu();
238 if (adap_layer->ctrlcmd != NULL) 215 if (adap_layer->ctrlcmd != NULL)
239 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 216 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
240 return ret; 217 return ret;
@@ -269,39 +246,56 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
269 int *proto_tail) 246 int *proto_tail)
270{ 247{
271 struct cflayer *frml; 248 struct cflayer *frml;
249 struct cfcnfg_phyinfo *phy;
250 int err;
251
252 rcu_read_lock();
253 phy = cfcnfg_get_phyinfo_rcu(cnfg, param->phyid);
254 if (!phy) {
255 err = -ENODEV;
256 goto unlock;
257 }
258 err = -EINVAL;
259
272 if (adap_layer == NULL) { 260 if (adap_layer == NULL) {
273 pr_err("adap_layer is zero\n"); 261 pr_err("adap_layer is zero\n");
274 return -EINVAL; 262 goto unlock;
275 } 263 }
276 if (adap_layer->receive == NULL) { 264 if (adap_layer->receive == NULL) {
277 pr_err("adap_layer->receive is NULL\n"); 265 pr_err("adap_layer->receive is NULL\n");
278 return -EINVAL; 266 goto unlock;
279 } 267 }
280 if (adap_layer->ctrlcmd == NULL) { 268 if (adap_layer->ctrlcmd == NULL) {
281 pr_err("adap_layer->ctrlcmd == NULL\n"); 269 pr_err("adap_layer->ctrlcmd == NULL\n");
282 return -EINVAL; 270 goto unlock;
283 } 271 }
284 frml = cnfg->phy_layers[param->phyid].frm_layer; 272
273 err = -ENODEV;
274 frml = phy->frm_layer;
285 if (frml == NULL) { 275 if (frml == NULL) {
286 pr_err("Specified PHY type does not exist!\n"); 276 pr_err("Specified PHY type does not exist!\n");
287 return -ENODEV; 277 goto unlock;
288 } 278 }
289 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 279 caif_assert(param->phyid == phy->id);
290 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == 280 caif_assert(phy->frm_layer->id ==
291 param->phyid); 281 param->phyid);
292 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == 282 caif_assert(phy->phy_layer->id ==
293 param->phyid); 283 param->phyid);
294 284
295 *ifindex = cnfg->phy_layers[param->phyid].ifindex; 285 *ifindex = phy->ifindex;
286 *proto_tail = 2;
296 *proto_head = 287 *proto_head =
297 protohead[param->linktype]+ 288 protohead[param->linktype] + (phy->use_stx ? 1 : 0);
298 (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
299 289
300 *proto_tail = 2; 290 rcu_read_unlock();
301 291
302 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 292 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
303 cfctrl_enum_req(cnfg->ctrl, param->phyid); 293 cfctrl_enum_req(cnfg->ctrl, param->phyid);
304 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 294 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
295
296unlock:
297 rcu_read_unlock();
298 return err;
305} 299}
306EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); 300EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
307 301
@@ -315,32 +309,37 @@ static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
315 309
316static void 310static void
317cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, 311cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
318 u8 phyid, struct cflayer *adapt_layer) 312 u8 phyid, struct cflayer *adapt_layer)
319{ 313{
320 struct cfcnfg *cnfg = container_obj(layer); 314 struct cfcnfg *cnfg = container_obj(layer);
321 struct cflayer *servicel = NULL; 315 struct cflayer *servicel = NULL;
322 struct cfcnfg_phyinfo *phyinfo; 316 struct cfcnfg_phyinfo *phyinfo;
323 struct net_device *netdev; 317 struct net_device *netdev;
324 318
319 rcu_read_lock();
320
325 if (adapt_layer == NULL) { 321 if (adapt_layer == NULL) {
326 pr_debug("link setup response but no client exist, send linkdown back\n"); 322 pr_debug("link setup response but no client exist,"
323 "send linkdown back\n");
327 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 324 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
328 return; 325 goto unlock;
329 } 326 }
330 327
331 caif_assert(cnfg != NULL); 328 caif_assert(cnfg != NULL);
332 caif_assert(phyid != 0); 329 caif_assert(phyid != 0);
333 phyinfo = &cnfg->phy_layers[phyid]; 330
331 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
332 if (phyinfo == NULL) {
333 pr_err("ERROR: Link Layer Device dissapeared"
334 "while connecting\n");
335 goto unlock;
336 }
337
338 caif_assert(phyinfo != NULL);
334 caif_assert(phyinfo->id == phyid); 339 caif_assert(phyinfo->id == phyid);
335 caif_assert(phyinfo->phy_layer != NULL); 340 caif_assert(phyinfo->phy_layer != NULL);
336 caif_assert(phyinfo->phy_layer->id == phyid); 341 caif_assert(phyinfo->phy_layer->id == phyid);
337 342
338 phyinfo->phy_ref_count++;
339 if (phyinfo->phy_ref_count == 1 &&
340 phyinfo->phy_layer->modemcmd != NULL) {
341 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
342 _CAIF_MODEMCMD_PHYIF_USEFULL);
343 }
344 adapt_layer->id = channel_id; 343 adapt_layer->id = channel_id;
345 344
346 switch (serv) { 345 switch (serv) {
@@ -348,7 +347,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
348 servicel = cfvei_create(channel_id, &phyinfo->dev_info); 347 servicel = cfvei_create(channel_id, &phyinfo->dev_info);
349 break; 348 break;
350 case CFCTRL_SRV_DATAGRAM: 349 case CFCTRL_SRV_DATAGRAM:
351 servicel = cfdgml_create(channel_id, &phyinfo->dev_info); 350 servicel = cfdgml_create(channel_id,
351 &phyinfo->dev_info);
352 break; 352 break;
353 case CFCTRL_SRV_RFM: 353 case CFCTRL_SRV_RFM:
354 netdev = phyinfo->dev_info.dev; 354 netdev = phyinfo->dev_info.dev;
@@ -365,94 +365,93 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
365 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 365 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
366 break; 366 break;
367 default: 367 default:
368 pr_err("Protocol error. Link setup response - unknown channel type\n"); 368 pr_err("Protocol error. Link setup response "
369 return; 369 "- unknown channel type\n");
370 goto unlock;
370 } 371 }
371 if (!servicel) { 372 if (!servicel) {
372 pr_warn("Out of memory\n"); 373 pr_warn("Out of memory\n");
373 return; 374 goto unlock;
374 } 375 }
375 layer_set_dn(servicel, cnfg->mux); 376 layer_set_dn(servicel, cnfg->mux);
376 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 377 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
377 layer_set_up(servicel, adapt_layer); 378 layer_set_up(servicel, adapt_layer);
378 layer_set_dn(adapt_layer, servicel); 379 layer_set_dn(adapt_layer, servicel);
379 cfsrvl_get(servicel); 380
381 rcu_read_unlock();
382
380 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 383 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
384 return;
385unlock:
386 rcu_read_unlock();
381} 387}
382 388
383void 389void
384cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 390cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
385 struct net_device *dev, struct cflayer *phy_layer, 391 struct net_device *dev, struct cflayer *phy_layer,
386 u16 *phyid, enum cfcnfg_phy_preference pref, 392 u16 *phy_id, enum cfcnfg_phy_preference pref,
387 bool fcs, bool stx) 393 bool fcs, bool stx)
388{ 394{
389 struct cflayer *frml; 395 struct cflayer *frml;
390 struct cflayer *phy_driver = NULL; 396 struct cflayer *phy_driver = NULL;
397 struct cfcnfg_phyinfo *phyinfo;
391 int i; 398 int i;
399 u8 phyid;
392 400
401 mutex_lock(&cnfg->lock);
393 402
394 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { 403 /* CAIF protocol allow maximum 6 link-layers */
395 *phyid = cnfg->last_phyid; 404 for (i = 0; i < 7; i++) {
396 405 phyid = (dev->ifindex + i) & 0x7;
397 /* range: * 1..(MAX_PHY_LAYERS-1) */ 406 if (phyid == 0)
398 cnfg->last_phyid = 407 continue;
399 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; 408 if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
400 } else { 409 goto got_phyid;
401 *phyid = 0;
402 for (i = 1; i < MAX_PHY_LAYERS; i++) {
403 if (cnfg->phy_layers[i].frm_layer == NULL) {
404 *phyid = i;
405 break;
406 }
407 }
408 }
409 if (*phyid == 0) {
410 pr_err("No Available PHY ID\n");
411 return;
412 } 410 }
411 pr_warn("Too many CAIF Link Layers (max 6)\n");
412 goto out;
413
414got_phyid:
415 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
413 416
414 switch (phy_type) { 417 switch (phy_type) {
415 case CFPHYTYPE_FRAG: 418 case CFPHYTYPE_FRAG:
416 phy_driver = 419 phy_driver =
417 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 420 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
418 if (!phy_driver) { 421 if (!phy_driver) {
419 pr_warn("Out of memory\n"); 422 pr_warn("Out of memory\n");
420 return; 423 goto out;
421 } 424 }
422
423 break; 425 break;
424 case CFPHYTYPE_CAIF: 426 case CFPHYTYPE_CAIF:
425 phy_driver = NULL; 427 phy_driver = NULL;
426 break; 428 break;
427 default: 429 default:
428 pr_err("%d\n", phy_type); 430 goto out;
429 return;
430 break;
431 } 431 }
432 432 phy_layer->id = phyid;
433 phy_layer->id = *phyid; 433 phyinfo->pref = pref;
434 cnfg->phy_layers[*phyid].pref = pref; 434 phyinfo->id = phyid;
435 cnfg->phy_layers[*phyid].id = *phyid; 435 phyinfo->dev_info.id = phyid;
436 cnfg->phy_layers[*phyid].dev_info.id = *phyid; 436 phyinfo->dev_info.dev = dev;
437 cnfg->phy_layers[*phyid].dev_info.dev = dev; 437 phyinfo->phy_layer = phy_layer;
438 cnfg->phy_layers[*phyid].phy_layer = phy_layer; 438 phyinfo->ifindex = dev->ifindex;
439 cnfg->phy_layers[*phyid].phy_ref_count = 0; 439 phyinfo->use_stx = stx;
440 cnfg->phy_layers[*phyid].ifindex = dev->ifindex; 440 phyinfo->use_fcs = fcs;
441 cnfg->phy_layers[*phyid].use_stx = stx;
442 cnfg->phy_layers[*phyid].use_fcs = fcs;
443 441
444 phy_layer->type = phy_type; 442 phy_layer->type = phy_type;
445 frml = cffrml_create(*phyid, fcs); 443 frml = cffrml_create(phyid, fcs);
444
446 if (!frml) { 445 if (!frml) {
447 pr_warn("Out of memory\n"); 446 pr_warn("Out of memory\n");
448 return; 447 kfree(phyinfo);
448 goto out;
449 } 449 }
450 cnfg->phy_layers[*phyid].frm_layer = frml; 450 phyinfo->frm_layer = frml;
451 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
452 layer_set_up(frml, cnfg->mux); 451 layer_set_up(frml, cnfg->mux);
453 452
454 if (phy_driver != NULL) { 453 if (phy_driver != NULL) {
455 phy_driver->id = *phyid; 454 phy_driver->id = phyid;
456 layer_set_dn(frml, phy_driver); 455 layer_set_dn(frml, phy_driver);
457 layer_set_up(phy_driver, frml); 456 layer_set_up(phy_driver, frml);
458 layer_set_dn(phy_driver, phy_layer); 457 layer_set_dn(phy_driver, phy_layer);
@@ -461,33 +460,87 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
461 layer_set_dn(frml, phy_layer); 460 layer_set_dn(frml, phy_layer);
462 layer_set_up(phy_layer, frml); 461 layer_set_up(phy_layer, frml);
463 } 462 }
463
464 list_add_rcu(&phyinfo->node, &cnfg->phys);
465out:
466 mutex_unlock(&cnfg->lock);
464} 467}
465EXPORT_SYMBOL(cfcnfg_add_phy_layer); 468EXPORT_SYMBOL(cfcnfg_add_phy_layer);
466 469
470int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
471 bool up)
472{
473 struct cfcnfg_phyinfo *phyinfo;
474
475 rcu_read_lock();
476 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
477 if (phyinfo == NULL) {
478 rcu_read_unlock();
479 return -ENODEV;
480 }
481
482 if (phyinfo->up == up) {
483 rcu_read_unlock();
484 return 0;
485 }
486 phyinfo->up = up;
487
488 if (up) {
489 cffrml_hold(phyinfo->frm_layer);
490 cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
491 phy_layer->id);
492 } else {
493 cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
494 cffrml_put(phyinfo->frm_layer);
495 }
496
497 rcu_read_unlock();
498 return 0;
499}
500EXPORT_SYMBOL(cfcnfg_set_phy_state);
501
467int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) 502int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
468{ 503{
469 struct cflayer *frml, *frml_dn; 504 struct cflayer *frml, *frml_dn;
470 u16 phyid; 505 u16 phyid;
506 struct cfcnfg_phyinfo *phyinfo;
507
508 might_sleep();
509
510 mutex_lock(&cnfg->lock);
511
471 phyid = phy_layer->id; 512 phyid = phy_layer->id;
472 caif_assert(phyid == cnfg->phy_layers[phyid].id); 513 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
473 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); 514
515 if (phyinfo == NULL)
516 return 0;
517 caif_assert(phyid == phyinfo->id);
518 caif_assert(phy_layer == phyinfo->phy_layer);
474 caif_assert(phy_layer->id == phyid); 519 caif_assert(phy_layer->id == phyid);
475 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); 520 caif_assert(phyinfo->frm_layer->id == phyid);
521
522 list_del_rcu(&phyinfo->node);
523 synchronize_rcu();
476 524
477 memset(&cnfg->phy_layers[phy_layer->id], 0, 525 frml = phyinfo->frm_layer;
478 sizeof(struct cfcnfg_phyinfo));
479 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
480 frml_dn = frml->dn; 526 frml_dn = frml->dn;
481 cffrml_set_uplayer(frml, NULL); 527 cffrml_set_uplayer(frml, NULL);
482 cffrml_set_dnlayer(frml, NULL); 528 cffrml_set_dnlayer(frml, NULL);
483 kfree(frml);
484
485 if (phy_layer != frml_dn) { 529 if (phy_layer != frml_dn) {
486 layer_set_up(frml_dn, NULL); 530 layer_set_up(frml_dn, NULL);
487 layer_set_dn(frml_dn, NULL); 531 layer_set_dn(frml_dn, NULL);
488 kfree(frml_dn);
489 } 532 }
490 layer_set_up(phy_layer, NULL); 533 layer_set_up(phy_layer, NULL);
534
535
536
537 if (phyinfo->phy_layer != frml_dn)
538 kfree(frml_dn);
539
540 kfree(frml);
541 kfree(phyinfo);
542 mutex_unlock(&cnfg->lock);
543
491 return 0; 544 return 0;
492} 545}
493EXPORT_SYMBOL(cfcnfg_del_phy_layer); 546EXPORT_SYMBOL(cfcnfg_del_phy_layer);