aboutsummaryrefslogtreecommitdiffstats
path: root/net/caif/cfmuxl.c
diff options
context:
space:
mode:
authorsjur.brandeland@stericsson.com <sjur.brandeland@stericsson.com>2011-05-12 22:43:59 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-15 17:45:54 -0400
commit0b1e9738deb30f4c35c0add43a52dcd0608b227e (patch)
treeb629d3d0db7fead50ab4256f4ace6d29e7aa980c /net/caif/cfmuxl.c
parent1b1cb1f78a5e9d54c13e176020c3e8ded5d081ce (diff)
caif: Use rcu_read_lock in CAIF mux layer.
Replace spin_lock with rcu_read_lock when accessing lists to layers and cache. While packets are in flight rcu_read_lock should not be held, instead ref-counters are used in combination with RCU. Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/caif/cfmuxl.c')
-rw-r--r--net/caif/cfmuxl.c119
1 files changed, 75 insertions, 44 deletions
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index fc2497468571..2a56df7e0a4b 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -9,6 +9,7 @@
9#include <linux/stddef.h> 9#include <linux/stddef.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/rculist.h>
12#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
13#include <net/caif/cfmuxl.h> 14#include <net/caif/cfmuxl.h>
14#include <net/caif/cfsrvl.h> 15#include <net/caif/cfsrvl.h>
@@ -64,31 +65,31 @@ struct cflayer *cfmuxl_create(void)
64int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) 65int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
65{ 66{
66 struct cfmuxl *muxl = container_obj(layr); 67 struct cfmuxl *muxl = container_obj(layr);
67 spin_lock(&muxl->receive_lock); 68
68 cfsrvl_get(up); 69 spin_lock_bh(&muxl->receive_lock);
69 list_add(&up->node, &muxl->srvl_list); 70 list_add_rcu(&up->node, &muxl->srvl_list);
70 spin_unlock(&muxl->receive_lock); 71 spin_unlock_bh(&muxl->receive_lock);
71 return 0; 72 return 0;
72} 73}
73 74
74int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 75int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
75{ 76{
76 struct cfmuxl *muxl = (struct cfmuxl *) layr; 77 struct cfmuxl *muxl = (struct cfmuxl *) layr;
77 spin_lock(&muxl->transmit_lock); 78
78 list_add(&dn->node, &muxl->frml_list); 79 spin_lock_bh(&muxl->transmit_lock);
79 spin_unlock(&muxl->transmit_lock); 80 list_add_rcu(&dn->node, &muxl->frml_list);
81 spin_unlock_bh(&muxl->transmit_lock);
80 return 0; 82 return 0;
81} 83}
82 84
83static struct cflayer *get_from_id(struct list_head *list, u16 id) 85static struct cflayer *get_from_id(struct list_head *list, u16 id)
84{ 86{
85 struct list_head *node; 87 struct cflayer *lyr;
86 struct cflayer *layer; 88 list_for_each_entry_rcu(lyr, list, node) {
87 list_for_each(node, list) { 89 if (lyr->id == id)
88 layer = list_entry(node, struct cflayer, node); 90 return lyr;
89 if (layer->id == id)
90 return layer;
91 } 91 }
92
92 return NULL; 93 return NULL;
93} 94}
94 95
@@ -96,41 +97,45 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
96{ 97{
97 struct cfmuxl *muxl = container_obj(layr); 98 struct cfmuxl *muxl = container_obj(layr);
98 struct cflayer *dn; 99 struct cflayer *dn;
99 spin_lock(&muxl->transmit_lock); 100 int idx = phyid % DN_CACHE_SIZE;
100 memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); 101
102 spin_lock_bh(&muxl->transmit_lock);
103 rcu_assign_pointer(muxl->dn_cache[idx], NULL);
101 dn = get_from_id(&muxl->frml_list, phyid); 104 dn = get_from_id(&muxl->frml_list, phyid);
102 if (dn == NULL) { 105 if (dn == NULL)
103 spin_unlock(&muxl->transmit_lock); 106 goto out;
104 return NULL; 107
105 } 108 list_del_rcu(&dn->node);
106 list_del(&dn->node);
107 caif_assert(dn != NULL); 109 caif_assert(dn != NULL);
108 spin_unlock(&muxl->transmit_lock); 110out:
111 spin_unlock_bh(&muxl->transmit_lock);
109 return dn; 112 return dn;
110} 113}
111 114
112/* Invariant: lock is taken */
113static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) 115static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
114{ 116{
115 struct cflayer *up; 117 struct cflayer *up;
116 int idx = id % UP_CACHE_SIZE; 118 int idx = id % UP_CACHE_SIZE;
117 up = muxl->up_cache[idx]; 119 up = rcu_dereference(muxl->up_cache[idx]);
118 if (up == NULL || up->id != id) { 120 if (up == NULL || up->id != id) {
121 spin_lock_bh(&muxl->receive_lock);
119 up = get_from_id(&muxl->srvl_list, id); 122 up = get_from_id(&muxl->srvl_list, id);
120 muxl->up_cache[idx] = up; 123 rcu_assign_pointer(muxl->up_cache[idx], up);
124 spin_unlock_bh(&muxl->receive_lock);
121 } 125 }
122 return up; 126 return up;
123} 127}
124 128
125/* Invariant: lock is taken */
126static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) 129static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
127{ 130{
128 struct cflayer *dn; 131 struct cflayer *dn;
129 int idx = dev_info->id % DN_CACHE_SIZE; 132 int idx = dev_info->id % DN_CACHE_SIZE;
130 dn = muxl->dn_cache[idx]; 133 dn = rcu_dereference(muxl->dn_cache[idx]);
131 if (dn == NULL || dn->id != dev_info->id) { 134 if (dn == NULL || dn->id != dev_info->id) {
135 spin_lock_bh(&muxl->transmit_lock);
132 dn = get_from_id(&muxl->frml_list, dev_info->id); 136 dn = get_from_id(&muxl->frml_list, dev_info->id);
133 muxl->dn_cache[idx] = dn; 137 rcu_assign_pointer(muxl->dn_cache[idx], dn);
138 spin_unlock_bh(&muxl->transmit_lock);
134 } 139 }
135 return dn; 140 return dn;
136} 141}
@@ -139,15 +144,17 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
139{ 144{
140 struct cflayer *up; 145 struct cflayer *up;
141 struct cfmuxl *muxl = container_obj(layr); 146 struct cfmuxl *muxl = container_obj(layr);
142 spin_lock(&muxl->receive_lock); 147 int idx = id % UP_CACHE_SIZE;
143 up = get_up(muxl, id); 148
149 spin_lock_bh(&muxl->receive_lock);
150 up = get_from_id(&muxl->srvl_list, id);
144 if (up == NULL) 151 if (up == NULL)
145 goto out; 152 goto out;
146 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 153
147 list_del(&up->node); 154 rcu_assign_pointer(muxl->up_cache[idx], NULL);
148 cfsrvl_put(up); 155 list_del_rcu(&up->node);
149out: 156out:
150 spin_unlock(&muxl->receive_lock); 157 spin_unlock_bh(&muxl->receive_lock);
151 return up; 158 return up;
152} 159}
153 160
@@ -162,22 +169,28 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
162 cfpkt_destroy(pkt); 169 cfpkt_destroy(pkt);
163 return -EPROTO; 170 return -EPROTO;
164 } 171 }
165 172 rcu_read_lock();
166 spin_lock(&muxl->receive_lock);
167 up = get_up(muxl, id); 173 up = get_up(muxl, id);
168 spin_unlock(&muxl->receive_lock); 174
169 if (up == NULL) { 175 if (up == NULL) {
170 pr_info("Received data on unknown link ID = %d (0x%x) up == NULL", 176 pr_debug("Received data on unknown link ID = %d (0x%x)"
171 id, id); 177 " up == NULL", id, id);
172 cfpkt_destroy(pkt); 178 cfpkt_destroy(pkt);
173 /* 179 /*
174 * Don't return ERROR, since modem misbehaves and sends out 180 * Don't return ERROR, since modem misbehaves and sends out
175 * flow on before linksetup response. 181 * flow on before linksetup response.
176 */ 182 */
183
184 rcu_read_unlock();
177 return /* CFGLU_EPROT; */ 0; 185 return /* CFGLU_EPROT; */ 0;
178 } 186 }
187
188 /* We can't hold rcu_lock during receive, so take a ref count instead */
179 cfsrvl_get(up); 189 cfsrvl_get(up);
190 rcu_read_unlock();
191
180 ret = up->receive(up, pkt); 192 ret = up->receive(up, pkt);
193
181 cfsrvl_put(up); 194 cfsrvl_put(up);
182 return ret; 195 return ret;
183} 196}
@@ -185,31 +198,49 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
185static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) 198static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
186{ 199{
187 struct cfmuxl *muxl = container_obj(layr); 200 struct cfmuxl *muxl = container_obj(layr);
201 int err;
188 u8 linkid; 202 u8 linkid;
189 struct cflayer *dn; 203 struct cflayer *dn;
190 struct caif_payload_info *info = cfpkt_info(pkt); 204 struct caif_payload_info *info = cfpkt_info(pkt);
191 BUG_ON(!info); 205 BUG_ON(!info);
206
207 rcu_read_lock();
208
192 dn = get_dn(muxl, info->dev_info); 209 dn = get_dn(muxl, info->dev_info);
193 if (dn == NULL) { 210 if (dn == NULL) {
194 pr_warn("Send data on unknown phy ID = %d (0x%x)\n", 211 pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
195 info->dev_info->id, info->dev_info->id); 212 info->dev_info->id, info->dev_info->id);
213 rcu_read_unlock();
214 cfpkt_destroy(pkt);
196 return -ENOTCONN; 215 return -ENOTCONN;
197 } 216 }
217
198 info->hdr_len += 1; 218 info->hdr_len += 1;
199 linkid = info->channel_id; 219 linkid = info->channel_id;
200 cfpkt_add_head(pkt, &linkid, 1); 220 cfpkt_add_head(pkt, &linkid, 1);
201 return dn->transmit(dn, pkt); 221
222 /* We can't hold rcu_lock during receive, so take a ref count instead */
223 cffrml_hold(dn);
224
225 rcu_read_unlock();
226
227 err = dn->transmit(dn, pkt);
228
229 cffrml_put(dn);
230 return err;
202} 231}
203 232
204static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 233static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
205 int phyid) 234 int phyid)
206{ 235{
207 struct cfmuxl *muxl = container_obj(layr); 236 struct cfmuxl *muxl = container_obj(layr);
208 struct list_head *node, *next;
209 struct cflayer *layer; 237 struct cflayer *layer;
210 list_for_each_safe(node, next, &muxl->srvl_list) { 238
211 layer = list_entry(node, struct cflayer, node); 239 rcu_read_lock();
212 if (cfsrvl_phyid_match(layer, phyid)) 240 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
241 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd)
242 /* NOTE: ctrlcmd is not allowed to block */
213 layer->ctrlcmd(layer, ctrl, phyid); 243 layer->ctrlcmd(layer, ctrl, phyid);
214 } 244 }
245 rcu_read_unlock();
215} 246}