diff options
author | Andy Walls <awalls@radix.net> | 2009-04-13 22:08:00 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2009-06-16 17:20:44 -0400 |
commit | 40c5520f55924ba87090d0d93222baad74202559 (patch) | |
tree | 731576eb5bb3c37df60c8cc78adbfc4d18bc0dd4 /drivers/media/video/cx18/cx18-queue.c | |
parent | 5f0a3cfcfd315d87de8f80af49b114daf7137823 (diff) |
V4L/DVB (11618): cx18: Convert per stream mutex locks to per queue spin locks
To avoid sleeps in providing buffers to user space and in handling incoming
buffers from the capture unit, converted the per stream mutex for locking
queues to 3 spin locks. There is now a spin lock per queue
to increase concurrency when moving buffers around.
Also simplified queue manipulations and buffer handling of incoming buffers
of data from the capture unit.
Signed-off-by: Andy Walls <awalls@radix.net>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/media/video/cx18/cx18-queue.c')
-rw-r--r-- | drivers/media/video/cx18/cx18-queue.c | 83 |
1 files changed, 50 insertions, 33 deletions
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c index 3046b8e74345..693a745b0858 100644 --- a/drivers/media/video/cx18/cx18-queue.c +++ b/drivers/media/video/cx18/cx18-queue.c | |||
@@ -53,13 +53,13 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, | |||
53 | buf->skipped = 0; | 53 | buf->skipped = 0; |
54 | } | 54 | } |
55 | 55 | ||
56 | mutex_lock(&s->qlock); | ||
57 | |||
58 | /* q_busy is restricted to a max buffer count imposed by firmware */ | 56 | /* q_busy is restricted to a max buffer count imposed by firmware */ |
59 | if (q == &s->q_busy && | 57 | if (q == &s->q_busy && |
60 | atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM) | 58 | atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM) |
61 | q = &s->q_free; | 59 | q = &s->q_free; |
62 | 60 | ||
61 | spin_lock(&q->lock); | ||
62 | |||
63 | if (to_front) | 63 | if (to_front) |
64 | list_add(&buf->list, &q->list); /* LIFO */ | 64 | list_add(&buf->list, &q->list); /* LIFO */ |
65 | else | 65 | else |
@@ -67,7 +67,7 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, | |||
67 | q->bytesused += buf->bytesused - buf->readpos; | 67 | q->bytesused += buf->bytesused - buf->readpos; |
68 | atomic_inc(&q->buffers); | 68 | atomic_inc(&q->buffers); |
69 | 69 | ||
70 | mutex_unlock(&s->qlock); | 70 | spin_unlock(&q->lock); |
71 | return q; | 71 | return q; |
72 | } | 72 | } |
73 | 73 | ||
@@ -75,7 +75,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) | |||
75 | { | 75 | { |
76 | struct cx18_buffer *buf = NULL; | 76 | struct cx18_buffer *buf = NULL; |
77 | 77 | ||
78 | mutex_lock(&s->qlock); | 78 | spin_lock(&q->lock); |
79 | if (!list_empty(&q->list)) { | 79 | if (!list_empty(&q->list)) { |
80 | buf = list_first_entry(&q->list, struct cx18_buffer, list); | 80 | buf = list_first_entry(&q->list, struct cx18_buffer, list); |
81 | list_del_init(&buf->list); | 81 | list_del_init(&buf->list); |
@@ -83,7 +83,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) | |||
83 | buf->skipped = 0; | 83 | buf->skipped = 0; |
84 | atomic_dec(&q->buffers); | 84 | atomic_dec(&q->buffers); |
85 | } | 85 | } |
86 | mutex_unlock(&s->qlock); | 86 | spin_unlock(&q->lock); |
87 | return buf; | 87 | return buf; |
88 | } | 88 | } |
89 | 89 | ||
@@ -94,9 +94,23 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id, | |||
94 | struct cx18_buffer *buf; | 94 | struct cx18_buffer *buf; |
95 | struct cx18_buffer *tmp; | 95 | struct cx18_buffer *tmp; |
96 | struct cx18_buffer *ret = NULL; | 96 | struct cx18_buffer *ret = NULL; |
97 | 97 | LIST_HEAD(sweep_up); | |
98 | mutex_lock(&s->qlock); | 98 | |
99 | /* | ||
100 | * We don't have to acquire multiple q locks here, because we are | ||
101 | * serialized by the single threaded work handler. | ||
102 | * Buffers from the firmware will thus remain in order as | ||
103 | * they are moved from q_busy to q_full or to the dvb ring buffer. | ||
104 | */ | ||
105 | spin_lock(&s->q_busy.lock); | ||
99 | list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) { | 106 | list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) { |
107 | /* | ||
108 | * We should find what the firmware told us is done, | ||
109 | * right at the front of the queue. If we don't, we likely have | ||
110 | * missed a buffer done message from the firmware. | ||
111 | * Once we skip a buffer repeatedly, relative to the size of | ||
112 | * q_busy, we have high confidence we've missed it. | ||
113 | */ | ||
100 | if (buf->id != id) { | 114 | if (buf->id != id) { |
101 | buf->skipped++; | 115 | buf->skipped++; |
102 | if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) { | 116 | if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) { |
@@ -105,38 +119,41 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id, | |||
105 | "times - it must have dropped out of " | 119 | "times - it must have dropped out of " |
106 | "rotation\n", s->name, buf->id, | 120 | "rotation\n", s->name, buf->id, |
107 | buf->skipped); | 121 | buf->skipped); |
108 | /* move it to q_free */ | 122 | /* Sweep it up to put it back into rotation */ |
109 | list_move_tail(&buf->list, &s->q_free.list); | 123 | list_move_tail(&buf->list, &sweep_up); |
110 | buf->bytesused = buf->readpos = buf->b_flags = | ||
111 | buf->skipped = 0; | ||
112 | atomic_dec(&s->q_busy.buffers); | 124 | atomic_dec(&s->q_busy.buffers); |
113 | atomic_inc(&s->q_free.buffers); | ||
114 | } | 125 | } |
115 | continue; | 126 | continue; |
116 | } | 127 | } |
117 | 128 | /* | |
118 | buf->bytesused = bytesused; | 129 | * We pull the desired buffer off of the queue here. Something |
119 | /* Sync the buffer before we release the qlock */ | 130 | * will have to put it back on a queue later. |
120 | cx18_buf_sync_for_cpu(s, buf); | 131 | */ |
121 | if (s->type == CX18_ENC_STREAM_TYPE_TS) { | 132 | list_del_init(&buf->list); |
122 | /* | ||
123 | * TS doesn't use q_full. As we pull the buffer off of | ||
124 | * the queue here, the caller will have to put it back. | ||
125 | */ | ||
126 | list_del_init(&buf->list); | ||
127 | } else { | ||
128 | /* Move buffer from q_busy to q_full */ | ||
129 | list_move_tail(&buf->list, &s->q_full.list); | ||
130 | set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags); | ||
131 | s->q_full.bytesused += buf->bytesused; | ||
132 | atomic_inc(&s->q_full.buffers); | ||
133 | } | ||
134 | atomic_dec(&s->q_busy.buffers); | 133 | atomic_dec(&s->q_busy.buffers); |
135 | |||
136 | ret = buf; | 134 | ret = buf; |
137 | break; | 135 | break; |
138 | } | 136 | } |
139 | mutex_unlock(&s->qlock); | 137 | spin_unlock(&s->q_busy.lock); |
138 | |||
139 | /* | ||
140 | * We found the buffer for which we were looking. Get it ready for | ||
141 | * the caller to put on q_full or in the dvb ring buffer. | ||
142 | */ | ||
143 | if (ret != NULL) { | ||
144 | ret->bytesused = bytesused; | ||
145 | ret->skipped = 0; | ||
146 | /* readpos and b_flags were 0'ed when the buf went on q_busy */ | ||
147 | cx18_buf_sync_for_cpu(s, ret); | ||
148 | if (s->type != CX18_ENC_STREAM_TYPE_TS) | ||
149 | set_bit(CX18_F_B_NEED_BUF_SWAP, &ret->b_flags); | ||
150 | } | ||
151 | |||
152 | /* Put any buffers the firmware is ignoring back into normal rotation */ | ||
153 | list_for_each_entry_safe(buf, tmp, &sweep_up, list) { | ||
154 | list_del_init(&buf->list); | ||
155 | cx18_enqueue(s, buf, &s->q_free); | ||
156 | } | ||
140 | return ret; | 157 | return ret; |
141 | } | 158 | } |
142 | 159 | ||
@@ -148,7 +165,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q) | |||
148 | if (q == &s->q_free) | 165 | if (q == &s->q_free) |
149 | return; | 166 | return; |
150 | 167 | ||
151 | mutex_lock(&s->qlock); | 168 | spin_lock(&q->lock); |
152 | while (!list_empty(&q->list)) { | 169 | while (!list_empty(&q->list)) { |
153 | buf = list_first_entry(&q->list, struct cx18_buffer, list); | 170 | buf = list_first_entry(&q->list, struct cx18_buffer, list); |
154 | list_move_tail(&buf->list, &s->q_free.list); | 171 | list_move_tail(&buf->list, &s->q_free.list); |
@@ -156,7 +173,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q) | |||
156 | atomic_inc(&s->q_free.buffers); | 173 | atomic_inc(&s->q_free.buffers); |
157 | } | 174 | } |
158 | cx18_queue_init(q); | 175 | cx18_queue_init(q); |
159 | mutex_unlock(&s->qlock); | 176 | spin_unlock(&q->lock); |
160 | } | 177 | } |
161 | 178 | ||
162 | void cx18_flush_queues(struct cx18_stream *s) | 179 | void cx18_flush_queues(struct cx18_stream *s) |