diff options
author | Evgeniy Polyakov <johnpol@2ka.mipt.ru> | 2005-09-26 18:06:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-26 18:06:50 -0400 |
commit | acd042bb2de50d4e6fb969281a00cc8b8b71e46d (patch) | |
tree | c696f1c0bdbc6eabcb9c13d395abb73f0d08e129 /drivers/connector/cn_queue.c | |
parent | b9d717a7b413f227ebb2d61d9c118335f7292137 (diff) |
[CONNECTOR]: async connector mode.
If input message rate from userspace is too high, do not drop them,
but try to deliver using work queue allocation.
Failing there is some kind of congestion control.
It also removes warn_on on this condition, which scares people.
Signed-off-by: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/connector/cn_queue.c')
-rw-r--r-- | drivers/connector/cn_queue.c | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 966632182e2d..9f2f00d82917 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,16 +31,19 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | static void cn_queue_wrapper(void *data) | 34 | void cn_queue_wrapper(void *data) |
35 | { | 35 | { |
36 | struct cn_callback_entry *cbq = data; | 36 | struct cn_callback_data *d = data; |
37 | 37 | ||
38 | cbq->cb->callback(cbq->cb->priv); | 38 | d->callback(d->callback_priv); |
39 | cbq->destruct_data(cbq->ddata); | 39 | |
40 | cbq->ddata = NULL; | 40 | d->destruct_data(d->ddata); |
41 | d->ddata = NULL; | ||
42 | |||
43 | kfree(d->free); | ||
41 | } | 44 | } |
42 | 45 | ||
43 | static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) | 46 | static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *)) |
44 | { | 47 | { |
45 | struct cn_callback_entry *cbq; | 48 | struct cn_callback_entry *cbq; |
46 | 49 | ||
@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac | |||
50 | return NULL; | 53 | return NULL; |
51 | } | 54 | } |
52 | 55 | ||
53 | cbq->cb = cb; | 56 | snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); |
54 | INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); | 57 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
58 | cbq->data.callback = callback; | ||
59 | |||
60 | INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); | ||
55 | return cbq; | 61 | return cbq; |
56 | } | 62 | } |
57 | 63 | ||
@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) | |||
68 | return ((i1->idx == i2->idx) && (i1->val == i2->val)); | 74 | return ((i1->idx == i2->idx) && (i1->val == i2->val)); |
69 | } | 75 | } |
70 | 76 | ||
71 | int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) | 77 | int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)) |
72 | { | 78 | { |
73 | struct cn_callback_entry *cbq, *__cbq; | 79 | struct cn_callback_entry *cbq, *__cbq; |
74 | int found = 0; | 80 | int found = 0; |
75 | 81 | ||
76 | cbq = cn_queue_alloc_callback_entry(cb); | 82 | cbq = cn_queue_alloc_callback_entry(name, id, callback); |
77 | if (!cbq) | 83 | if (!cbq) |
78 | return -ENOMEM; | 84 | return -ENOMEM; |
79 | 85 | ||
@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) | |||
82 | 88 | ||
83 | spin_lock_bh(&dev->queue_lock); | 89 | spin_lock_bh(&dev->queue_lock); |
84 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { | 90 | list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { |
85 | if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { | 91 | if (cn_cb_equal(&__cbq->id.id, id)) { |
86 | found = 1; | 92 | found = 1; |
87 | break; | 93 | break; |
88 | } | 94 | } |
@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) | |||
99 | 105 | ||
100 | cbq->nls = dev->nls; | 106 | cbq->nls = dev->nls; |
101 | cbq->seq = 0; | 107 | cbq->seq = 0; |
102 | cbq->group = cbq->cb->id.idx; | 108 | cbq->group = cbq->id.id.idx; |
103 | 109 | ||
104 | return 0; | 110 | return 0; |
105 | } | 111 | } |
@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) | |||
111 | 117 | ||
112 | spin_lock_bh(&dev->queue_lock); | 118 | spin_lock_bh(&dev->queue_lock); |
113 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { | 119 | list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { |
114 | if (cn_cb_equal(&cbq->cb->id, id)) { | 120 | if (cn_cb_equal(&cbq->id.id, id)) { |
115 | list_del(&cbq->callback_entry); | 121 | list_del(&cbq->callback_entry); |
116 | found = 1; | 122 | found = 1; |
117 | break; | 123 | break; |