diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-26 21:33:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-26 21:33:26 -0400 |
commit | 5c1f4cac6ff75a4a602bae960a054ed3df7e9765 (patch) | |
tree | 31b0b05a41345e9dbf802a309ddf21eb506e8550 /drivers | |
parent | c6a519d2aac024d8ca5658bddd78af474b274e4b (diff) | |
parent | 56e9b263242ca80a70abd8831343b268315c27dc (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/connector/cn_queue.c | 32 | ||||
-rw-r--r-- | drivers/connector/connector.c | 74 |
2 files changed, 57 insertions, 49 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 966632182e2d..9f2f00d82917 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,16 +31,19 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | static void cn_queue_wrapper(void *data) | 34 | void cn_queue_wrapper(void *data) |
35 | { | 35 | { |
36 | struct cn_callback_entry *cbq = data; | 36 | struct cn_callback_data *d = data; |
37 | 37 | ||
38 | cbq->cb->callback(cbq->cb->priv); | 38 | d->callback(d->callback_priv); |
39 | cbq->destruct_data(cbq->ddata); | 39 | |
40 | cbq->ddata = NULL; | 40 | d->destruct_data(d->ddata); |
41 | d->ddata = NULL; | ||
42 | |||
43 | kfree(d->free); | ||
41 | } | 44 | } |
42 | 45 | ||
43 | static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) | 46 | static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *)) |
44 | { | 47 | { |
45 | struct cn_callback_entry *cbq; | 48 | struct cn_callback_entry *cbq; |
46 | 49 | ||
@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac | |||
50 | return NULL; | 53 | return NULL; |
51 | } | 54 | } |
52 | 55 | ||
53 | cbq->cb = cb; | 56 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); |
54 | INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); | 57 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
58 | cbq->data.callback = callback; | ||
59 | |||
60 | INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); | ||
55 | return cbq; | 61 | return cbq; |
56 | } | 62 | } |
57 | 63 | ||
@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) | |||
68 | return ((i1->idx == i2->idx) && (i1->val == i2->val)); | 74 | return ((i1->idx == i2->idx) && (i1->val == i2->val)); |
69 | } | 75 | } |
70 | 76 | ||
71 | int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) | 77 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)) |
72 | { | 78 | { |
73 | struct cn_callback_entry *cbq, *__cbq; | 79 | struct cn_callback_entry *cbq, *__cbq; |
74 | int found = 0; | 80 | int found = 0; |
75 | 81 | ||
76 | cbq = cn_queue_alloc_callback_entry(cb); | 82 | cbq = cn_queue_alloc_callback_entry(name, id, callback); |
77 | if (!cbq) | 83 | if (!cbq) |
78 | return -ENOMEM; | 84 | return -ENOMEM; |
79 | 85 | ||
@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) | |||
82 | 88 | ||
83 | spin_lock_bh(&dev->queue_lock); | 89 | spin_lock_bh(&dev->queue_lock); |
84 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { | 90 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { |
85 | if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { | 91 | if (cn_cb_equal(&__cbq->id.id, id)) { |
86 | found = 1; | 92 | found = 1; |
87 | break; | 93 | break; |
88 | } | 94 | } |
@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) | |||
99 | 105 | ||
100 | cbq->nls = dev->nls; | 106 | cbq->nls = dev->nls; |
101 | cbq->seq = 0; | 107 | cbq->seq = 0; |
102 | cbq->group = cbq->cb->id.idx; | 108 | cbq->group = cbq->id.id.idx; |
103 | 109 | ||
104 | return 0; | 110 | return 0; |
105 | } | 111 | } |
@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) | |||
111 | 117 | ||
112 | spin_lock_bh(&dev->queue_lock); | 118 | spin_lock_bh(&dev->queue_lock); |
113 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { | 119 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { |
114 | if (cn_cb_equal(&cbq->cb->id, id)) { | 120 | if (cn_cb_equal(&cbq->id.id, id)) { |
115 | list_del(&cbq->callback_entry); | 121 | list_del(&cbq->callback_entry); |
116 | found = 1; | 122 | found = 1; |
117 | break; | 123 | break; |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index aaf6d468a8b9..bb0b3a8de14b 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) | |||
84 | spin_lock_bh(&dev->cbdev->queue_lock); | 84 | spin_lock_bh(&dev->cbdev->queue_lock); |
85 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, | 85 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, |
86 | callback_entry) { | 86 | callback_entry) { |
87 | if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { | 87 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { |
88 | found = 1; | 88 | found = 1; |
89 | group = __cbq->group; | 89 | group = __cbq->group; |
90 | } | 90 | } |
@@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v | |||
127 | { | 127 | { |
128 | struct cn_callback_entry *__cbq; | 128 | struct cn_callback_entry *__cbq; |
129 | struct cn_dev *dev = &cdev; | 129 | struct cn_dev *dev = &cdev; |
130 | int found = 0; | 130 | int err = -ENODEV; |
131 | 131 | ||
132 | spin_lock_bh(&dev->cbdev->queue_lock); | 132 | spin_lock_bh(&dev->cbdev->queue_lock); |
133 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { | 133 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { |
134 | if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { | 134 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { |
135 | /* | ||
136 | * Let's scream if there is some magic and the | ||
137 | * data will arrive asynchronously here. | ||
138 | * [i.e. netlink messages will be queued]. | ||
139 | * After the first warning I will fix it | ||
140 | * quickly, but now I think it is | ||
141 | * impossible. --zbr (2004_04_27). | ||
142 | */ | ||
143 | if (likely(!test_bit(0, &__cbq->work.pending) && | 135 | if (likely(!test_bit(0, &__cbq->work.pending) && |
144 | __cbq->ddata == NULL)) { | 136 | __cbq->data.ddata == NULL)) { |
145 | __cbq->cb->priv = msg; | 137 | __cbq->data.callback_priv = msg; |
146 | 138 | ||
147 | __cbq->ddata = data; | 139 | __cbq->data.ddata = data; |
148 | __cbq->destruct_data = destruct_data; | 140 | __cbq->data.destruct_data = destruct_data; |
149 | 141 | ||
150 | if (queue_work(dev->cbdev->cn_queue, | 142 | if (queue_work(dev->cbdev->cn_queue, |
151 | &__cbq->work)) | 143 | &__cbq->work)) |
152 | found = 1; | 144 | err = 0; |
153 | } else { | 145 | } else { |
154 | printk("%s: cbq->data=%p, " | 146 | struct work_struct *w; |
155 | "work->pending=%08lx.\n", | 147 | struct cn_callback_data *d; |
156 | __func__, __cbq->ddata, | 148 | |
157 | __cbq->work.pending); | 149 | w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); |
158 | WARN_ON(1); | 150 | if (w) { |
151 | d = (struct cn_callback_data *)(w+1); | ||
152 | |||
153 | d->callback_priv = msg; | ||
154 | d->callback = __cbq->data.callback; | ||
155 | d->ddata = data; | ||
156 | d->destruct_data = destruct_data; | ||
157 | d->free = w; | ||
158 | |||
159 | INIT_LIST_HEAD(&w->entry); | ||
160 | w->pending = 0; | ||
161 | w->func = &cn_queue_wrapper; | ||
162 | w->data = d; | ||
163 | init_timer(&w->timer); | ||
164 | |||
165 | if (queue_work(dev->cbdev->cn_queue, w)) | ||
166 | err = 0; | ||
167 | else { | ||
168 | kfree(w); | ||
169 | err = -EINVAL; | ||
170 | } | ||
171 | } else | ||
172 | err = -ENOMEM; | ||
159 | } | 173 | } |
160 | break; | 174 | break; |
161 | } | 175 | } |
162 | } | 176 | } |
163 | spin_unlock_bh(&dev->cbdev->queue_lock); | 177 | spin_unlock_bh(&dev->cbdev->queue_lock); |
164 | 178 | ||
165 | return found ? 0 : -ENODEV; | 179 | return err; |
166 | } | 180 | } |
167 | 181 | ||
168 | /* | 182 | /* |
@@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *)) | |||
291 | { | 305 | { |
292 | int err; | 306 | int err; |
293 | struct cn_dev *dev = &cdev; | 307 | struct cn_dev *dev = &cdev; |
294 | struct cn_callback *cb; | ||
295 | |||
296 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); | ||
297 | if (!cb) | ||
298 | return -ENOMEM; | ||
299 | |||
300 | scnprintf(cb->name, sizeof(cb->name), "%s", name); | ||
301 | 308 | ||
302 | memcpy(&cb->id, id, sizeof(cb->id)); | 309 | err = cn_queue_add_callback(dev->cbdev, name, id, callback); |
303 | cb->callback = callback; | 310 | if (err) |
304 | |||
305 | err = cn_queue_add_callback(dev->cbdev, cb); | ||
306 | if (err) { | ||
307 | kfree(cb); | ||
308 | return err; | 311 | return err; |
309 | } | ||
310 | 312 | ||
311 | cn_notify(id, 0); | 313 | cn_notify(id, 0); |
312 | 314 | ||