diff options
author | Sjur Braendeland <sjur.brandeland@stericsson.com> | 2010-05-20 22:16:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-24 02:57:41 -0400 |
commit | 7aecf4944f2c05aafb73b4820e469c74b4ec8517 (patch) | |
tree | b3237c96401a0f35c76798c1b04b19eef129a6da /net/caif/cfctrl.c | |
parent | 9e4b816bc31962ebbb8784d602acd5fa25a08ad8 (diff) |
caif: Bugfix - use standard Linux lists
Discovered bug when running high number of parallel connect requests.
Replace buggy home brewed list with linux/list.h.
Signed-off-by: Sjur Braendeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/caif/cfctrl.c')
-rw-r--r-- | net/caif/cfctrl.c | 92 |
1 files changed, 26 insertions, 66 deletions
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 0ffe1e1ce901..fcfda98a5e6d 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c | |||
@@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void) | |||
44 | dev_info.id = 0xff; | 44 | dev_info.id = 0xff; |
45 | memset(this, 0, sizeof(*this)); | 45 | memset(this, 0, sizeof(*this)); |
46 | cfsrvl_init(&this->serv, 0, &dev_info); | 46 | cfsrvl_init(&this->serv, 0, &dev_info); |
47 | spin_lock_init(&this->info_list_lock); | ||
48 | atomic_set(&this->req_seq_no, 1); | 47 | atomic_set(&this->req_seq_no, 1); |
49 | atomic_set(&this->rsp_seq_no, 1); | 48 | atomic_set(&this->rsp_seq_no, 1); |
50 | this->serv.layer.receive = cfctrl_recv; | 49 | this->serv.layer.receive = cfctrl_recv; |
51 | sprintf(this->serv.layer.name, "ctrl"); | 50 | sprintf(this->serv.layer.name, "ctrl"); |
52 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; | 51 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; |
53 | spin_lock_init(&this->loop_linkid_lock); | 52 | spin_lock_init(&this->loop_linkid_lock); |
53 | spin_lock_init(&this->info_list_lock); | ||
54 | INIT_LIST_HEAD(&this->list); | ||
54 | this->loop_linkid = 1; | 55 | this->loop_linkid = 1; |
55 | return &this->serv.layer; | 56 | return &this->serv.layer; |
56 | } | 57 | } |
@@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1, | |||
112 | void cfctrl_insert_req(struct cfctrl *ctrl, | 113 | void cfctrl_insert_req(struct cfctrl *ctrl, |
113 | struct cfctrl_request_info *req) | 114 | struct cfctrl_request_info *req) |
114 | { | 115 | { |
115 | struct cfctrl_request_info *p; | ||
116 | spin_lock(&ctrl->info_list_lock); | 116 | spin_lock(&ctrl->info_list_lock); |
117 | req->next = NULL; | ||
118 | atomic_inc(&ctrl->req_seq_no); | 117 | atomic_inc(&ctrl->req_seq_no); |
119 | req->sequence_no = atomic_read(&ctrl->req_seq_no); | 118 | req->sequence_no = atomic_read(&ctrl->req_seq_no); |
120 | if (ctrl->first_req == NULL) { | 119 | list_add_tail(&req->list, &ctrl->list); |
121 | ctrl->first_req = req; | ||
122 | spin_unlock(&ctrl->info_list_lock); | ||
123 | return; | ||
124 | } | ||
125 | p = ctrl->first_req; | ||
126 | while (p->next != NULL) | ||
127 | p = p->next; | ||
128 | p->next = req; | ||
129 | spin_unlock(&ctrl->info_list_lock); | 120 | spin_unlock(&ctrl->info_list_lock); |
130 | } | 121 | } |
131 | 122 | ||
@@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl, | |||
133 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, | 124 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, |
134 | struct cfctrl_request_info *req) | 125 | struct cfctrl_request_info *req) |
135 | { | 126 | { |
136 | struct cfctrl_request_info *p; | 127 | struct cfctrl_request_info *p, *tmp, *first; |
137 | struct cfctrl_request_info *ret; | ||
138 | 128 | ||
139 | spin_lock(&ctrl->info_list_lock); | 129 | spin_lock(&ctrl->info_list_lock); |
140 | if (ctrl->first_req == NULL) { | 130 | first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); |
141 | spin_unlock(&ctrl->info_list_lock); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | if (cfctrl_req_eq(req, ctrl->first_req)) { | ||
146 | ret = ctrl->first_req; | ||
147 | caif_assert(ctrl->first_req); | ||
148 | atomic_set(&ctrl->rsp_seq_no, | ||
149 | ctrl->first_req->sequence_no); | ||
150 | ctrl->first_req = ctrl->first_req->next; | ||
151 | spin_unlock(&ctrl->info_list_lock); | ||
152 | return ret; | ||
153 | } | ||
154 | 131 | ||
155 | p = ctrl->first_req; | 132 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
156 | 133 | if (cfctrl_req_eq(req, p)) { | |
157 | while (p->next != NULL) { | 134 | if (p != first) |
158 | if (cfctrl_req_eq(req, p->next)) { | 135 | pr_warning("CAIF: %s(): Requests are not " |
159 | pr_warning("CAIF: %s(): Requests are not " | ||
160 | "received in order\n", | 136 | "received in order\n", |
161 | __func__); | 137 | __func__); |
162 | ret = p->next; | 138 | |
163 | atomic_set(&ctrl->rsp_seq_no, | 139 | atomic_set(&ctrl->rsp_seq_no, |
164 | p->next->sequence_no); | 140 | p->sequence_no); |
165 | p->next = p->next->next; | 141 | list_del(&p->list); |
166 | spin_unlock(&ctrl->info_list_lock); | 142 | goto out; |
167 | return ret; | ||
168 | } | 143 | } |
169 | p = p->next; | ||
170 | } | 144 | } |
145 | p = NULL; | ||
146 | out: | ||
171 | spin_unlock(&ctrl->info_list_lock); | 147 | spin_unlock(&ctrl->info_list_lock); |
172 | 148 | return p; | |
173 | pr_warning("CAIF: %s(): Request does not match\n", | ||
174 | __func__); | ||
175 | return NULL; | ||
176 | } | 149 | } |
177 | 150 | ||
178 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) | 151 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) |
@@ -388,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer) | |||
388 | 361 | ||
389 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) | 362 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) |
390 | { | 363 | { |
391 | struct cfctrl_request_info *p, *req; | 364 | struct cfctrl_request_info *p, *tmp; |
392 | struct cfctrl *ctrl = container_obj(layr); | 365 | struct cfctrl *ctrl = container_obj(layr); |
393 | spin_lock(&ctrl->info_list_lock); | 366 | spin_lock(&ctrl->info_list_lock); |
394 | 367 | pr_warning("CAIF: %s(): enter\n", __func__); | |
395 | if (ctrl->first_req == NULL) { | 368 | |
396 | spin_unlock(&ctrl->info_list_lock); | 369 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
397 | return; | 370 | if (p->client_layer == adap_layer) { |
398 | } | 371 | pr_warning("CAIF: %s(): cancel req :%d\n", __func__, |
399 | 372 | p->sequence_no); | |
400 | if (ctrl->first_req->client_layer == adap_layer) { | 373 | list_del(&p->list); |
401 | 374 | kfree(p); | |
402 | req = ctrl->first_req; | ||
403 | ctrl->first_req = ctrl->first_req->next; | ||
404 | kfree(req); | ||
405 | } | ||
406 | |||
407 | p = ctrl->first_req; | ||
408 | while (p != NULL && p->next != NULL) { | ||
409 | if (p->next->client_layer == adap_layer) { | ||
410 | |||
411 | req = p->next; | ||
412 | p->next = p->next->next; | ||
413 | kfree(p->next); | ||
414 | } | 375 | } |
415 | p = p->next; | ||
416 | } | 376 | } |
417 | 377 | ||
418 | spin_unlock(&ctrl->info_list_lock); | 378 | spin_unlock(&ctrl->info_list_lock); |
@@ -634,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
634 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | 594 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: |
635 | case CAIF_CTRLCMD_FLOW_OFF_IND: | 595 | case CAIF_CTRLCMD_FLOW_OFF_IND: |
636 | spin_lock(&this->info_list_lock); | 596 | spin_lock(&this->info_list_lock); |
637 | if (this->first_req != NULL) { | 597 | if (!list_empty(&this->list)) { |
638 | pr_debug("CAIF: %s(): Received flow off in " | 598 | pr_debug("CAIF: %s(): Received flow off in " |
639 | "control layer", __func__); | 599 | "control layer", __func__); |
640 | } | 600 | } |