diff options
Diffstat (limited to 'drivers/connector/cn_queue.c')
-rw-r--r-- | drivers/connector/cn_queue.c | 58 |
1 files changed, 17 insertions, 41 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 55653aba6735..c42c9d517790 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,24 +31,9 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | void cn_queue_wrapper(struct work_struct *work) | ||
35 | { | ||
36 | struct cn_callback_entry *cbq = | ||
37 | container_of(work, struct cn_callback_entry, work); | ||
38 | struct cn_callback_data *d = &cbq->data; | ||
39 | struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb)); | ||
40 | struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb); | ||
41 | |||
42 | d->callback(msg, nsp); | ||
43 | |||
44 | kfree_skb(d->skb); | ||
45 | d->skb = NULL; | ||
46 | |||
47 | kfree(d->free); | ||
48 | } | ||
49 | |||
50 | static struct cn_callback_entry * | 34 | static struct cn_callback_entry * |
51 | cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, | 35 | cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name, |
36 | struct cb_id *id, | ||
52 | void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) | 37 | void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) |
53 | { | 38 | { |
54 | struct cn_callback_entry *cbq; | 39 | struct cn_callback_entry *cbq; |
@@ -59,17 +44,23 @@ cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, | |||
59 | return NULL; | 44 | return NULL; |
60 | } | 45 | } |
61 | 46 | ||
47 | atomic_set(&cbq->refcnt, 1); | ||
48 | |||
49 | atomic_inc(&dev->refcnt); | ||
50 | cbq->pdev = dev; | ||
51 | |||
62 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); | 52 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); |
63 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); | 53 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
64 | cbq->data.callback = callback; | 54 | cbq->callback = callback; |
65 | |||
66 | INIT_WORK(&cbq->work, &cn_queue_wrapper); | ||
67 | return cbq; | 55 | return cbq; |
68 | } | 56 | } |
69 | 57 | ||
70 | static void cn_queue_free_callback(struct cn_callback_entry *cbq) | 58 | void cn_queue_release_callback(struct cn_callback_entry *cbq) |
71 | { | 59 | { |
72 | flush_workqueue(cbq->pdev->cn_queue); | 60 | if (!atomic_dec_and_test(&cbq->refcnt)) |
61 | return; | ||
62 | |||
63 | atomic_dec(&cbq->pdev->refcnt); | ||
73 | kfree(cbq); | 64 | kfree(cbq); |
74 | } | 65 | } |
75 | 66 | ||
@@ -85,13 +76,10 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, | |||
85 | struct cn_callback_entry *cbq, *__cbq; | 76 | struct cn_callback_entry *cbq, *__cbq; |
86 | int found = 0; | 77 | int found = 0; |
87 | 78 | ||
88 | cbq = cn_queue_alloc_callback_entry(name, id, callback); | 79 | cbq = cn_queue_alloc_callback_entry(dev, name, id, callback); |
89 | if (!cbq) | 80 | if (!cbq) |
90 | return -ENOMEM; | 81 | return -ENOMEM; |
91 | 82 | ||
92 | atomic_inc(&dev->refcnt); | ||
93 | cbq->pdev = dev; | ||
94 | |||
95 | spin_lock_bh(&dev->queue_lock); | 83 | spin_lock_bh(&dev->queue_lock); |
96 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { | 84 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { |
97 | if (cn_cb_equal(&__cbq->id.id, id)) { | 85 | if (cn_cb_equal(&__cbq->id.id, id)) { |
@@ -104,8 +92,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, | |||
104 | spin_unlock_bh(&dev->queue_lock); | 92 | spin_unlock_bh(&dev->queue_lock); |
105 | 93 | ||
106 | if (found) { | 94 | if (found) { |
107 | cn_queue_free_callback(cbq); | 95 | cn_queue_release_callback(cbq); |
108 | atomic_dec(&dev->refcnt); | ||
109 | return -EINVAL; | 96 | return -EINVAL; |
110 | } | 97 | } |
111 | 98 | ||
@@ -130,10 +117,8 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) | |||
130 | } | 117 | } |
131 | spin_unlock_bh(&dev->queue_lock); | 118 | spin_unlock_bh(&dev->queue_lock); |
132 | 119 | ||
133 | if (found) { | 120 | if (found) |
134 | cn_queue_free_callback(cbq); | 121 | cn_queue_release_callback(cbq); |
135 | atomic_dec(&dev->refcnt); | ||
136 | } | ||
137 | } | 122 | } |
138 | 123 | ||
139 | struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) | 124 | struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) |
@@ -151,12 +136,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) | |||
151 | 136 | ||
152 | dev->nls = nls; | 137 | dev->nls = nls; |
153 | 138 | ||
154 | dev->cn_queue = alloc_ordered_workqueue(dev->name, 0); | ||
155 | if (!dev->cn_queue) { | ||
156 | kfree(dev); | ||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | return dev; | 139 | return dev; |
161 | } | 140 | } |
162 | 141 | ||
@@ -164,9 +143,6 @@ void cn_queue_free_dev(struct cn_queue_dev *dev) | |||
164 | { | 143 | { |
165 | struct cn_callback_entry *cbq, *n; | 144 | struct cn_callback_entry *cbq, *n; |
166 | 145 | ||
167 | flush_workqueue(dev->cn_queue); | ||
168 | destroy_workqueue(dev->cn_queue); | ||
169 | |||
170 | spin_lock_bh(&dev->queue_lock); | 146 | spin_lock_bh(&dev->queue_lock); |
171 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) | 147 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) |
172 | list_del(&cbq->callback_entry); | 148 | list_del(&cbq->callback_entry); |