diff options
author | Patrick McHardy <kaber@trash.net> | 2011-03-28 04:39:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-30 20:14:33 -0400 |
commit | 04f482faf50535229a5a5c8d629cf963899f857c (patch) | |
tree | 698d5c8e22e68f9a621c03972556e3a73c525465 /drivers/connector | |
parent | e2666f84958adb3a034b98e99699b55705117e01 (diff) |
connector: convert to synchronous netlink message processing
Commits 01a16b21 (netlink: kill eff_cap from struct netlink_skb_parms)
and c53fa1ed (netlink: kill loginuid/sessionid/sid members from struct
netlink_skb_parms) removed some members from struct netlink_skb_parms
that depend on the current context, all netlink users are now required
to do synchronous message processing.
connector however queues received messages and processes them in a work
queue, which is not valid anymore. This patch converts connector to do
synchronous message processing by invoking the registered callback handler
directly from the netlink receive function.
In order to avoid invoking the callback with connector locks held, a
reference count is added to struct cn_callback_entry, the reference
is taken when finding a matching callback entry on the device's queue_list
and released after the callback handler has been invoked.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Acked-by: Evgeniy Polyakov <zbr@ioremap.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/connector')
-rw-r--r-- | drivers/connector/cn_queue.c | 58 | ||||
-rw-r--r-- | drivers/connector/connector.c | 47 |
2 files changed, 29 insertions, 76 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 55653aba6735..c42c9d517790 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,24 +31,9 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | void cn_queue_wrapper(struct work_struct *work) | ||
35 | { | ||
36 | struct cn_callback_entry *cbq = | ||
37 | container_of(work, struct cn_callback_entry, work); | ||
38 | struct cn_callback_data *d = &cbq->data; | ||
39 | struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb)); | ||
40 | struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb); | ||
41 | |||
42 | d->callback(msg, nsp); | ||
43 | |||
44 | kfree_skb(d->skb); | ||
45 | d->skb = NULL; | ||
46 | |||
47 | kfree(d->free); | ||
48 | } | ||
49 | |||
50 | static struct cn_callback_entry * | 34 | static struct cn_callback_entry * |
51 | cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, | 35 | cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, |
36 | struct cb_id *id, | ||
52 | void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) | 37 | void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) |
53 | { | 38 | { |
54 | struct cn_callback_entry *cbq; | 39 | struct cn_callback_entry *cbq; |
@@ -59,17 +44,23 @@ cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, | |||
59 | return NULL; | 44 | return NULL; |
60 | } | 45 | } |
61 | 46 | ||
47 | atomic_set(&cbq->refcnt, 1); | ||
48 | |||
49 | atomic_inc(&dev->refcnt); | ||
50 | cbq->pdev = dev; | ||
51 | |||
62 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); | 52 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); |
63 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); | 53 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
64 | cbq->data.callback = callback; | 54 | cbq->callback = callback; |
65 | |||
66 | INIT_WORK(&cbq->work, &cn_queue_wrapper); | ||
67 | return cbq; | 55 | return cbq; |
68 | } | 56 | } |
69 | 57 | ||
70 | static void cn_queue_free_callback(struct cn_callback_entry *cbq) | 58 | void cn_queue_release_callback(struct cn_callback_entry *cbq) |
71 | { | 59 | { |
72 | flush_workqueue(cbq->pdev->cn_queue); | 60 | if (!atomic_dec_and_test(&cbq->refcnt)) |
61 | return; | ||
62 | |||
63 | atomic_dec(&cbq->pdev->refcnt); | ||
73 | kfree(cbq); | 64 | kfree(cbq); |
74 | } | 65 | } |
75 | 66 | ||
@@ -85,13 +76,10 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, | |||
85 | struct cn_callback_entry *cbq, *__cbq; | 76 | struct cn_callback_entry *cbq, *__cbq; |
86 | int found = 0; | 77 | int found = 0; |
87 | 78 | ||
88 | cbq = cn_queue_alloc_callback_entry(name, id, callback); | 79 | cbq = cn_queue_alloc_callback_entry(dev, name, id, callback); |
89 | if (!cbq) | 80 | if (!cbq) |
90 | return -ENOMEM; | 81 | return -ENOMEM; |
91 | 82 | ||
92 | atomic_inc(&dev->refcnt); | ||
93 | cbq->pdev = dev; | ||
94 | |||
95 | spin_lock_bh(&dev->queue_lock); | 83 | spin_lock_bh(&dev->queue_lock); |
96 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { | 84 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { |
97 | if (cn_cb_equal(&__cbq->id.id, id)) { | 85 | if (cn_cb_equal(&__cbq->id.id, id)) { |
@@ -104,8 +92,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, | |||
104 | spin_unlock_bh(&dev->queue_lock); | 92 | spin_unlock_bh(&dev->queue_lock); |
105 | 93 | ||
106 | if (found) { | 94 | if (found) { |
107 | cn_queue_free_callback(cbq); | 95 | cn_queue_release_callback(cbq); |
108 | atomic_dec(&dev->refcnt); | ||
109 | return -EINVAL; | 96 | return -EINVAL; |
110 | } | 97 | } |
111 | 98 | ||
@@ -130,10 +117,8 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) | |||
130 | } | 117 | } |
131 | spin_unlock_bh(&dev->queue_lock); | 118 | spin_unlock_bh(&dev->queue_lock); |
132 | 119 | ||
133 | if (found) { | 120 | if (found) |
134 | cn_queue_free_callback(cbq); | 121 | cn_queue_release_callback(cbq); |
135 | atomic_dec(&dev->refcnt); | ||
136 | } | ||
137 | } | 122 | } |
138 | 123 | ||
139 | struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) | 124 | struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) |
@@ -151,12 +136,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) | |||
151 | 136 | ||
152 | dev->nls = nls; | 137 | dev->nls = nls; |
153 | 138 | ||
154 | dev->cn_queue = alloc_ordered_workqueue(dev->name, 0); | ||
155 | if (!dev->cn_queue) { | ||
156 | kfree(dev); | ||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | return dev; | 139 | return dev; |
161 | } | 140 | } |
162 | 141 | ||
@@ -164,9 +143,6 @@ void cn_queue_free_dev(struct cn_queue_dev *dev) | |||
164 | { | 143 | { |
165 | struct cn_callback_entry *cbq, *n; | 144 | struct cn_callback_entry *cbq, *n; |
166 | 145 | ||
167 | flush_workqueue(dev->cn_queue); | ||
168 | destroy_workqueue(dev->cn_queue); | ||
169 | |||
170 | spin_lock_bh(&dev->queue_lock); | 146 | spin_lock_bh(&dev->queue_lock); |
171 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) | 147 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) |
172 | list_del(&cbq->callback_entry); | 148 | list_del(&cbq->callback_entry); |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index f7554de3be5e..d77005849af8 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -122,51 +122,28 @@ EXPORT_SYMBOL_GPL(cn_netlink_send); | |||
122 | */ | 122 | */ |
123 | static int cn_call_callback(struct sk_buff *skb) | 123 | static int cn_call_callback(struct sk_buff *skb) |
124 | { | 124 | { |
125 | struct cn_callback_entry *__cbq, *__new_cbq; | 125 | struct cn_callback_entry *i, *cbq = NULL; |
126 | struct cn_dev *dev = &cdev; | 126 | struct cn_dev *dev = &cdev; |
127 | struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); | 127 | struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); |
128 | struct netlink_skb_parms *nsp = &NETLINK_CB(skb); | ||
128 | int err = -ENODEV; | 129 | int err = -ENODEV; |
129 | 130 | ||
130 | spin_lock_bh(&dev->cbdev->queue_lock); | 131 | spin_lock_bh(&dev->cbdev->queue_lock); |
131 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { | 132 | list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) { |
132 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { | 133 | if (cn_cb_equal(&i->id.id, &msg->id)) { |
133 | if (likely(!work_pending(&__cbq->work) && | 134 | atomic_inc(&i->refcnt); |
134 | __cbq->data.skb == NULL)) { | 135 | cbq = i; |
135 | __cbq->data.skb = skb; | ||
136 | |||
137 | if (queue_work(dev->cbdev->cn_queue, | ||
138 | &__cbq->work)) | ||
139 | err = 0; | ||
140 | else | ||
141 | err = -EINVAL; | ||
142 | } else { | ||
143 | struct cn_callback_data *d; | ||
144 | |||
145 | err = -ENOMEM; | ||
146 | __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); | ||
147 | if (__new_cbq) { | ||
148 | d = &__new_cbq->data; | ||
149 | d->skb = skb; | ||
150 | d->callback = __cbq->data.callback; | ||
151 | d->free = __new_cbq; | ||
152 | |||
153 | INIT_WORK(&__new_cbq->work, | ||
154 | &cn_queue_wrapper); | ||
155 | |||
156 | if (queue_work(dev->cbdev->cn_queue, | ||
157 | &__new_cbq->work)) | ||
158 | err = 0; | ||
159 | else { | ||
160 | kfree(__new_cbq); | ||
161 | err = -EINVAL; | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | break; | 136 | break; |
166 | } | 137 | } |
167 | } | 138 | } |
168 | spin_unlock_bh(&dev->cbdev->queue_lock); | 139 | spin_unlock_bh(&dev->cbdev->queue_lock); |
169 | 140 | ||
141 | if (cbq != NULL) { | ||
142 | cbq->callback(msg, nsp); | ||
143 | kfree_skb(skb); | ||
144 | cn_queue_release_callback(cbq); | ||
145 | } | ||
146 | |||
170 | return err; | 147 | return err; |
171 | } | 148 | } |
172 | 149 | ||