diff options
Diffstat (limited to 'arch/mips/kernel/rtlx.c')
-rw-r--r-- | arch/mips/kernel/rtlx.c | 517 |
1 files changed, 362 insertions, 155 deletions
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 986a9cf23067..6179805af9f0 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -21,45 +21,44 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <asm/uaccess.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/vmalloc.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <linux/syscalls.h> | ||
31 | #include <linux/moduleloader.h> | ||
24 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
25 | #include <linux/irq.h> | ||
26 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
27 | #include <linux/sched.h> | 34 | #include <linux/sched.h> |
28 | #include <linux/wait.h> | 35 | #include <linux/wait.h> |
29 | |||
30 | #include <asm/mipsmtregs.h> | 36 | #include <asm/mipsmtregs.h> |
31 | #include <asm/bitops.h> | 37 | #include <asm/cacheflush.h> |
38 | #include <asm/atomic.h> | ||
32 | #include <asm/cpu.h> | 39 | #include <asm/cpu.h> |
33 | #include <asm/processor.h> | 40 | #include <asm/processor.h> |
41 | #include <asm/system.h> | ||
42 | #include <asm/vpe.h> | ||
34 | #include <asm/rtlx.h> | 43 | #include <asm/rtlx.h> |
35 | #include <asm/uaccess.h> | ||
36 | 44 | ||
37 | #define RTLX_TARG_VPE 1 | 45 | #define RTLX_TARG_VPE 1 |
38 | 46 | ||
39 | static struct rtlx_info *rtlx; | 47 | static struct rtlx_info *rtlx; |
40 | static int major; | 48 | static int major; |
41 | static char module_name[] = "rtlx"; | 49 | static char module_name[] = "rtlx"; |
42 | static struct irqaction irq; | ||
43 | static int irq_num; | ||
44 | |||
45 | static inline int spacefree(int read, int write, int size) | ||
46 | { | ||
47 | if (read == write) { | ||
48 | /* | ||
49 | * never fill the buffer completely, so indexes are always | ||
50 | * equal if empty and only empty, or !equal if data available | ||
51 | */ | ||
52 | return size - 1; | ||
53 | } | ||
54 | |||
55 | return ((read + size - write) % size) - 1; | ||
56 | } | ||
57 | 50 | ||
58 | static struct chan_waitqueues { | 51 | static struct chan_waitqueues { |
59 | wait_queue_head_t rt_queue; | 52 | wait_queue_head_t rt_queue; |
60 | wait_queue_head_t lx_queue; | 53 | wait_queue_head_t lx_queue; |
54 | int in_open; | ||
61 | } channel_wqs[RTLX_CHANNELS]; | 55 | } channel_wqs[RTLX_CHANNELS]; |
62 | 56 | ||
57 | static struct irqaction irq; | ||
58 | static int irq_num; | ||
59 | static struct vpe_notifications notify; | ||
60 | static int sp_stopping = 0; | ||
61 | |||
63 | extern void *vpe_get_shared(int index); | 62 | extern void *vpe_get_shared(int index); |
64 | 63 | ||
65 | static void rtlx_dispatch(struct pt_regs *regs) | 64 | static void rtlx_dispatch(struct pt_regs *regs) |
@@ -67,174 +66,298 @@ static void rtlx_dispatch(struct pt_regs *regs) | |||
67 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs); | 66 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs); |
68 | } | 67 | } |
69 | 68 | ||
69 | |||
70 | /* Interrupt handler may be called before rtlx_init has otherwise had | ||
71 | a chance to run. | ||
72 | */ | ||
70 | static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 73 | static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
71 | { | 74 | { |
72 | int i; | 75 | int i; |
73 | 76 | ||
74 | for (i = 0; i < RTLX_CHANNELS; i++) { | 77 | for (i = 0; i < RTLX_CHANNELS; i++) { |
75 | struct rtlx_channel *chan = &rtlx->channel[i]; | 78 | wake_up(&channel_wqs[i].lx_queue); |
76 | 79 | wake_up(&channel_wqs[i].rt_queue); | |
77 | if (chan->lx_read != chan->lx_write) | ||
78 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
79 | } | 80 | } |
80 | 81 | ||
81 | return IRQ_HANDLED; | 82 | return IRQ_HANDLED; |
82 | } | 83 | } |
83 | 84 | ||
84 | /* call when we have the address of the shared structure from the SP side. */ | 85 | static __attribute_used__ void dump_rtlx(void) |
85 | static int rtlx_init(struct rtlx_info *rtlxi) | ||
86 | { | 86 | { |
87 | int i; | 87 | int i; |
88 | 88 | ||
89 | if (rtlxi->id != RTLX_ID) { | 89 | printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); |
90 | printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi); | ||
91 | return -ENOEXEC; | ||
92 | } | ||
93 | 90 | ||
94 | /* initialise the wait queues */ | ||
95 | for (i = 0; i < RTLX_CHANNELS; i++) { | 91 | for (i = 0; i < RTLX_CHANNELS; i++) { |
96 | init_waitqueue_head(&channel_wqs[i].rt_queue); | 92 | struct rtlx_channel *chan = &rtlx->channel[i]; |
97 | init_waitqueue_head(&channel_wqs[i].lx_queue); | ||
98 | } | ||
99 | 93 | ||
100 | /* set up for interrupt handling */ | 94 | printk(" rt_state %d lx_state %d buffer_size %d\n", |
101 | memset(&irq, 0, sizeof(struct irqaction)); | 95 | chan->rt_state, chan->lx_state, chan->buffer_size); |
102 | 96 | ||
103 | if (cpu_has_vint) | 97 | printk(" rt_read %d rt_write %d\n", |
104 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | 98 | chan->rt_read, chan->rt_write); |
105 | 99 | ||
106 | irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; | 100 | printk(" lx_read %d lx_write %d\n", |
107 | irq.handler = rtlx_interrupt; | 101 | chan->lx_read, chan->lx_write); |
108 | irq.flags = SA_INTERRUPT; | 102 | |
109 | irq.name = "RTLX"; | 103 | printk(" rt_buffer <%s>\n", chan->rt_buffer); |
110 | irq.dev_id = rtlx; | 104 | printk(" lx_buffer <%s>\n", chan->lx_buffer); |
111 | setup_irq(irq_num, &irq); | 105 | } |
106 | } | ||
107 | |||
108 | /* call when we have the address of the shared structure from the SP side. */ | ||
109 | static int rtlx_init(struct rtlx_info *rtlxi) | ||
110 | { | ||
111 | if (rtlxi->id != RTLX_ID) { | ||
112 | printk(KERN_ERR "no valid RTLX id at 0x%p 0x%x\n", rtlxi, rtlxi->id); | ||
113 | return -ENOEXEC; | ||
114 | } | ||
112 | 115 | ||
113 | rtlx = rtlxi; | 116 | rtlx = rtlxi; |
114 | 117 | ||
115 | return 0; | 118 | return 0; |
116 | } | 119 | } |
117 | 120 | ||
118 | /* only allow one open process at a time to open each channel */ | 121 | /* notifications */ |
119 | static int rtlx_open(struct inode *inode, struct file *filp) | 122 | static void starting(int vpe) |
120 | { | 123 | { |
121 | int minor, ret; | 124 | int i; |
125 | sp_stopping = 0; | ||
126 | |||
127 | /* force a reload of rtlx */ | ||
128 | rtlx=NULL; | ||
129 | |||
130 | /* wake up any sleeping rtlx_open's */ | ||
131 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
132 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
133 | } | ||
134 | |||
135 | static void stopping(int vpe) | ||
136 | { | ||
137 | int i; | ||
138 | |||
139 | sp_stopping = 1; | ||
140 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
141 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
142 | } | ||
143 | |||
144 | |||
145 | int rtlx_open(int index, int can_sleep) | ||
146 | { | ||
147 | int ret; | ||
122 | struct rtlx_channel *chan; | 148 | struct rtlx_channel *chan; |
149 | volatile struct rtlx_info **p; | ||
123 | 150 | ||
124 | /* assume only 1 device at the mo. */ | 151 | if (index >= RTLX_CHANNELS) { |
125 | minor = MINOR(inode->i_rdev); | 152 | printk(KERN_DEBUG "rtlx_open index out of range\n"); |
153 | return -ENOSYS; | ||
154 | } | ||
155 | |||
156 | if (channel_wqs[index].in_open) { | ||
157 | printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index); | ||
158 | return -EBUSY; | ||
159 | } | ||
160 | |||
161 | channel_wqs[index].in_open++; | ||
126 | 162 | ||
127 | if (rtlx == NULL) { | 163 | if (rtlx == NULL) { |
128 | struct rtlx_info **p; | ||
129 | if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { | 164 | if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { |
130 | printk(KERN_ERR "vpe_get_shared is NULL. " | 165 | if (can_sleep) { |
131 | "Has an SP program been loaded?\n"); | 166 | DECLARE_WAITQUEUE(wait, current); |
132 | return -EFAULT; | 167 | |
168 | /* go to sleep */ | ||
169 | add_wait_queue(&channel_wqs[index].lx_queue, &wait); | ||
170 | |||
171 | set_current_state(TASK_INTERRUPTIBLE); | ||
172 | while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { | ||
173 | schedule(); | ||
174 | set_current_state(TASK_INTERRUPTIBLE); | ||
175 | } | ||
176 | |||
177 | set_current_state(TASK_RUNNING); | ||
178 | remove_wait_queue(&channel_wqs[index].lx_queue, &wait); | ||
179 | |||
180 | /* back running */ | ||
181 | } else { | ||
182 | printk( KERN_DEBUG "No SP program loaded, and device " | ||
183 | "opened with O_NONBLOCK\n"); | ||
184 | channel_wqs[index].in_open = 0; | ||
185 | return -ENOSYS; | ||
186 | } | ||
133 | } | 187 | } |
134 | 188 | ||
135 | if (*p == NULL) { | 189 | if (*p == NULL) { |
136 | printk(KERN_ERR "vpe_shared %p %p\n", p, *p); | 190 | if (can_sleep) { |
137 | return -EFAULT; | 191 | DECLARE_WAITQUEUE(wait, current); |
192 | |||
193 | /* go to sleep */ | ||
194 | add_wait_queue(&channel_wqs[index].lx_queue, &wait); | ||
195 | |||
196 | set_current_state(TASK_INTERRUPTIBLE); | ||
197 | while (*p == NULL) { | ||
198 | schedule(); | ||
199 | |||
200 | /* reset task state to interruptable otherwise | ||
201 | we'll whizz round here like a very fast loopy | ||
202 | thing. schedule() appears to return with state | ||
203 | set to TASK_RUNNING. | ||
204 | |||
205 | If the loaded SP program, for whatever reason, | ||
206 | doesn't set up the shared structure *p will never | ||
207 | become true. So whoever connected to either /dev/rt? | ||
208 | or if it was kspd, will then take up rather a lot of | ||
209 | processor cycles. | ||
210 | */ | ||
211 | |||
212 | set_current_state(TASK_INTERRUPTIBLE); | ||
213 | } | ||
214 | |||
215 | set_current_state(TASK_RUNNING); | ||
216 | remove_wait_queue(&channel_wqs[index].lx_queue, &wait); | ||
217 | |||
218 | /* back running */ | ||
219 | } | ||
220 | else { | ||
221 | printk(" *vpe_get_shared is NULL. " | ||
222 | "Has an SP program been loaded?\n"); | ||
223 | channel_wqs[index].in_open = 0; | ||
224 | return -ENOSYS; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | if ((unsigned int)*p < KSEG0) { | ||
229 | printk(KERN_WARNING "vpe_get_shared returned an invalid pointer " | ||
230 | "maybe an error code %d\n", (int)*p); | ||
231 | channel_wqs[index].in_open = 0; | ||
232 | return -ENOSYS; | ||
138 | } | 233 | } |
139 | 234 | ||
140 | if ((ret = rtlx_init(*p)) < 0) | 235 | if ((ret = rtlx_init(*p)) < 0) { |
141 | return ret; | 236 | channel_wqs[index].in_open = 0; |
237 | return ret; | ||
238 | } | ||
142 | } | 239 | } |
143 | 240 | ||
144 | chan = &rtlx->channel[minor]; | 241 | chan = &rtlx->channel[index]; |
145 | 242 | ||
146 | if (test_and_set_bit(RTLX_STATE_OPENED, &chan->lx_state)) | 243 | if (chan->lx_state == RTLX_STATE_OPENED) { |
147 | return -EBUSY; | 244 | channel_wqs[index].in_open = 0; |
245 | return -EBUSY; | ||
246 | } | ||
148 | 247 | ||
248 | chan->lx_state = RTLX_STATE_OPENED; | ||
249 | channel_wqs[index].in_open = 0; | ||
149 | return 0; | 250 | return 0; |
150 | } | 251 | } |
151 | 252 | ||
152 | static int rtlx_release(struct inode *inode, struct file *filp) | 253 | int rtlx_release(int index) |
153 | { | 254 | { |
154 | int minor = MINOR(inode->i_rdev); | 255 | rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; |
155 | |||
156 | clear_bit(RTLX_STATE_OPENED, &rtlx->channel[minor].lx_state); | ||
157 | smp_mb__after_clear_bit(); | ||
158 | |||
159 | return 0; | 256 | return 0; |
160 | } | 257 | } |
161 | 258 | ||
162 | static unsigned int rtlx_poll(struct file *file, poll_table * wait) | 259 | unsigned int rtlx_read_poll(int index, int can_sleep) |
163 | { | 260 | { |
164 | int minor; | 261 | struct rtlx_channel *chan; |
165 | unsigned int mask = 0; | ||
166 | struct rtlx_channel *chan; | ||
167 | 262 | ||
168 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | 263 | if (rtlx == NULL) |
169 | chan = &rtlx->channel[minor]; | 264 | return 0; |
170 | 265 | ||
171 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); | 266 | chan = &rtlx->channel[index]; |
172 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); | ||
173 | 267 | ||
174 | /* data available to read? */ | 268 | /* data available to read? */ |
175 | if (chan->lx_read != chan->lx_write) | 269 | if (chan->lx_read == chan->lx_write) { |
176 | mask |= POLLIN | POLLRDNORM; | 270 | if (can_sleep) { |
271 | DECLARE_WAITQUEUE(wait, current); | ||
177 | 272 | ||
178 | /* space to write */ | 273 | /* go to sleep */ |
179 | if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size)) | 274 | add_wait_queue(&channel_wqs[index].lx_queue, &wait); |
180 | mask |= POLLOUT | POLLWRNORM; | ||
181 | 275 | ||
182 | return mask; | 276 | set_current_state(TASK_INTERRUPTIBLE); |
277 | while (chan->lx_read == chan->lx_write) { | ||
278 | schedule(); | ||
279 | |||
280 | set_current_state(TASK_INTERRUPTIBLE); | ||
281 | |||
282 | if (sp_stopping) { | ||
283 | set_current_state(TASK_RUNNING); | ||
284 | remove_wait_queue(&channel_wqs[index].lx_queue, &wait); | ||
285 | return 0; | ||
286 | } | ||
287 | } | ||
288 | |||
289 | set_current_state(TASK_RUNNING); | ||
290 | remove_wait_queue(&channel_wqs[index].lx_queue, &wait); | ||
291 | |||
292 | /* back running */ | ||
293 | } | ||
294 | else | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | return (chan->lx_write + chan->buffer_size - chan->lx_read) | ||
299 | % chan->buffer_size; | ||
183 | } | 300 | } |
184 | 301 | ||
185 | static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count, | 302 | static inline int write_spacefree(int read, int write, int size) |
186 | loff_t * ppos) | ||
187 | { | 303 | { |
188 | unsigned long failed; | 304 | if (read == write) { |
189 | size_t fl = 0L; | 305 | /* |
190 | int minor; | 306 | * Never fill the buffer completely, so indexes are always |
191 | struct rtlx_channel *lx; | 307 | * equal if empty and only empty, or !equal if data available |
192 | DECLARE_WAITQUEUE(wait, current); | 308 | */ |
309 | return size - 1; | ||
310 | } | ||
193 | 311 | ||
194 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | 312 | return ((read + size - write) % size) - 1; |
195 | lx = &rtlx->channel[minor]; | 313 | } |
196 | 314 | ||
197 | /* data available? */ | 315 | unsigned int rtlx_write_poll(int index) |
198 | if (lx->lx_write == lx->lx_read) { | 316 | { |
199 | if (file->f_flags & O_NONBLOCK) | 317 | struct rtlx_channel *chan = &rtlx->channel[index]; |
200 | return 0; /* -EAGAIN makes cat whinge */ | 318 | return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); |
319 | } | ||
201 | 320 | ||
202 | /* go to sleep */ | 321 | static inline void copy_to(void *dst, void *src, size_t count, int user) |
203 | add_wait_queue(&channel_wqs[minor].lx_queue, &wait); | 322 | { |
204 | set_current_state(TASK_INTERRUPTIBLE); | 323 | if (user) |
324 | copy_to_user(dst, src, count); | ||
325 | else | ||
326 | memcpy(dst, src, count); | ||
327 | } | ||
205 | 328 | ||
206 | while (lx->lx_write == lx->lx_read) | 329 | static inline void copy_from(void *dst, void *src, size_t count, int user) |
207 | schedule(); | 330 | { |
331 | if (user) | ||
332 | copy_from_user(dst, src, count); | ||
333 | else | ||
334 | memcpy(dst, src, count); | ||
335 | } | ||
208 | 336 | ||
209 | set_current_state(TASK_RUNNING); | 337 | ssize_t rtlx_read(int index, void *buff, size_t count, int user) |
210 | remove_wait_queue(&channel_wqs[minor].lx_queue, &wait); | 338 | { |
339 | size_t fl = 0L; | ||
340 | struct rtlx_channel *lx; | ||
211 | 341 | ||
212 | /* back running */ | 342 | if (rtlx == NULL) |
213 | } | 343 | return -ENOSYS; |
344 | |||
345 | lx = &rtlx->channel[index]; | ||
214 | 346 | ||
215 | /* find out how much in total */ | 347 | /* find out how much in total */ |
216 | count = min(count, | 348 | count = min(count, |
217 | (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); | 349 | (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) |
350 | % lx->buffer_size); | ||
218 | 351 | ||
219 | /* then how much from the read pointer onwards */ | 352 | /* then how much from the read pointer onwards */ |
220 | fl = min(count, (size_t)lx->buffer_size - lx->lx_read); | 353 | fl = min( count, (size_t)lx->buffer_size - lx->lx_read); |
221 | 354 | ||
222 | failed = copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl); | 355 | copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user); |
223 | if (failed) { | ||
224 | count = fl - failed; | ||
225 | goto out; | ||
226 | } | ||
227 | 356 | ||
228 | /* and if there is anything left at the beginning of the buffer */ | 357 | /* and if there is anything left at the beginning of the buffer */ |
229 | if (count - fl) { | 358 | if ( count - fl ) |
230 | failed = copy_to_user (buffer + fl, lx->lx_buffer, count - fl); | 359 | copy_to (buff + fl, lx->lx_buffer, count - fl, user); |
231 | if (failed) { | ||
232 | count -= failed; | ||
233 | goto out; | ||
234 | } | ||
235 | } | ||
236 | 360 | ||
237 | out: | ||
238 | /* update the index */ | 361 | /* update the index */ |
239 | lx->lx_read += count; | 362 | lx->lx_read += count; |
240 | lx->lx_read %= lx->buffer_size; | 363 | lx->lx_read %= lx->buffer_size; |
@@ -242,20 +365,101 @@ out: | |||
242 | return count; | 365 | return count; |
243 | } | 366 | } |
244 | 367 | ||
245 | static ssize_t rtlx_write(struct file *file, const char __user * buffer, | 368 | ssize_t rtlx_write(int index, void *buffer, size_t count, int user) |
369 | { | ||
370 | struct rtlx_channel *rt; | ||
371 | size_t fl; | ||
372 | |||
373 | if (rtlx == NULL) | ||
374 | return(-ENOSYS); | ||
375 | |||
376 | rt = &rtlx->channel[index]; | ||
377 | |||
378 | /* total number of bytes to copy */ | ||
379 | count = min(count, | ||
380 | (size_t)write_spacefree(rt->rt_read, rt->rt_write, | ||
381 | rt->buffer_size)); | ||
382 | |||
383 | /* first bit from write pointer to the end of the buffer, or count */ | ||
384 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | ||
385 | |||
386 | copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user); | ||
387 | |||
388 | /* if there's any left copy to the beginning of the buffer */ | ||
389 | if( count - fl ) | ||
390 | copy_from (rt->rt_buffer, buffer + fl, count - fl, user); | ||
391 | |||
392 | rt->rt_write += count; | ||
393 | rt->rt_write %= rt->buffer_size; | ||
394 | |||
395 | return(count); | ||
396 | } | ||
397 | |||
398 | |||
399 | static int file_open(struct inode *inode, struct file *filp) | ||
400 | { | ||
401 | int minor = MINOR(inode->i_rdev); | ||
402 | |||
403 | return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1); | ||
404 | } | ||
405 | |||
406 | static int file_release(struct inode *inode, struct file *filp) | ||
407 | { | ||
408 | int minor; | ||
409 | minor = MINOR(inode->i_rdev); | ||
410 | |||
411 | return rtlx_release(minor); | ||
412 | } | ||
413 | |||
414 | static unsigned int file_poll(struct file *file, poll_table * wait) | ||
415 | { | ||
416 | int minor; | ||
417 | unsigned int mask = 0; | ||
418 | |||
419 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
420 | |||
421 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); | ||
422 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); | ||
423 | |||
424 | if (rtlx == NULL) | ||
425 | return 0; | ||
426 | |||
427 | /* data available to read? */ | ||
428 | if (rtlx_read_poll(minor, 0)) | ||
429 | mask |= POLLIN | POLLRDNORM; | ||
430 | |||
431 | /* space to write */ | ||
432 | if (rtlx_write_poll(minor)) | ||
433 | mask |= POLLOUT | POLLWRNORM; | ||
434 | |||
435 | return mask; | ||
436 | } | ||
437 | |||
438 | static ssize_t file_read(struct file *file, char __user * buffer, size_t count, | ||
439 | loff_t * ppos) | ||
440 | { | ||
441 | int minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
442 | |||
443 | /* data available? */ | ||
444 | if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { | ||
445 | return 0; // -EAGAIN makes cat whinge | ||
446 | } | ||
447 | |||
448 | return rtlx_read(minor, buffer, count, 1); | ||
449 | } | ||
450 | |||
451 | static ssize_t file_write(struct file *file, const char __user * buffer, | ||
246 | size_t count, loff_t * ppos) | 452 | size_t count, loff_t * ppos) |
247 | { | 453 | { |
248 | unsigned long failed; | ||
249 | int minor; | 454 | int minor; |
250 | struct rtlx_channel *rt; | 455 | struct rtlx_channel *rt; |
251 | size_t fl; | ||
252 | DECLARE_WAITQUEUE(wait, current); | 456 | DECLARE_WAITQUEUE(wait, current); |
253 | 457 | ||
254 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | 458 | minor = MINOR(file->f_dentry->d_inode->i_rdev); |
255 | rt = &rtlx->channel[minor]; | 459 | rt = &rtlx->channel[minor]; |
256 | 460 | ||
257 | /* any space left... */ | 461 | /* any space left... */ |
258 | if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) { | 462 | if (!rtlx_write_poll(minor)) { |
259 | 463 | ||
260 | if (file->f_flags & O_NONBLOCK) | 464 | if (file->f_flags & O_NONBLOCK) |
261 | return -EAGAIN; | 465 | return -EAGAIN; |
@@ -263,61 +467,64 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer, | |||
263 | add_wait_queue(&channel_wqs[minor].rt_queue, &wait); | 467 | add_wait_queue(&channel_wqs[minor].rt_queue, &wait); |
264 | set_current_state(TASK_INTERRUPTIBLE); | 468 | set_current_state(TASK_INTERRUPTIBLE); |
265 | 469 | ||
266 | while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) | 470 | while (!rtlx_write_poll(minor)) |
267 | schedule(); | 471 | schedule(); |
268 | 472 | ||
269 | set_current_state(TASK_RUNNING); | 473 | set_current_state(TASK_RUNNING); |
270 | remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); | 474 | remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); |
271 | } | 475 | } |
272 | 476 | ||
273 | /* total number of bytes to copy */ | 477 | return rtlx_write(minor, (void *)buffer, count, 1); |
274 | count = min(count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) ); | ||
275 | |||
276 | /* first bit from write pointer to the end of the buffer, or count */ | ||
277 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | ||
278 | |||
279 | failed = copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl); | ||
280 | if (failed) { | ||
281 | count = fl - failed; | ||
282 | goto out; | ||
283 | } | ||
284 | |||
285 | /* if there's any left copy to the beginning of the buffer */ | ||
286 | if (count - fl) { | ||
287 | failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); | ||
288 | if (failed) { | ||
289 | count -= failed; | ||
290 | goto out; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | out: | ||
295 | rt->rt_write += count; | ||
296 | rt->rt_write %= rt->buffer_size; | ||
297 | |||
298 | return count; | ||
299 | } | 478 | } |
300 | 479 | ||
301 | static struct file_operations rtlx_fops = { | 480 | static struct file_operations rtlx_fops = { |
302 | .owner = THIS_MODULE, | 481 | .owner = THIS_MODULE, |
303 | .open = rtlx_open, | 482 | .open = file_open, |
304 | .release = rtlx_release, | 483 | .release = file_release, |
305 | .write = rtlx_write, | 484 | .write = file_write, |
306 | .read = rtlx_read, | 485 | .read = file_read, |
307 | .poll = rtlx_poll | 486 | .poll = file_poll |
308 | }; | 487 | }; |
309 | 488 | ||
489 | static struct irqaction rtlx_irq = { | ||
490 | .handler = rtlx_interrupt, | ||
491 | .flags = SA_INTERRUPT, | ||
492 | .name = "RTLX", | ||
493 | }; | ||
494 | |||
495 | static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; | ||
496 | |||
310 | static char register_chrdev_failed[] __initdata = | 497 | static char register_chrdev_failed[] __initdata = |
311 | KERN_ERR "rtlx_module_init: unable to register device\n"; | 498 | KERN_ERR "rtlx_module_init: unable to register device\n"; |
312 | 499 | ||
313 | static int __init rtlx_module_init(void) | 500 | static int rtlx_module_init(void) |
314 | { | 501 | { |
502 | int i; | ||
503 | |||
315 | major = register_chrdev(0, module_name, &rtlx_fops); | 504 | major = register_chrdev(0, module_name, &rtlx_fops); |
316 | if (major < 0) { | 505 | if (major < 0) { |
317 | printk(register_chrdev_failed); | 506 | printk(register_chrdev_failed); |
318 | return major; | 507 | return major; |
319 | } | 508 | } |
320 | 509 | ||
510 | /* initialise the wait queues */ | ||
511 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
512 | init_waitqueue_head(&channel_wqs[i].rt_queue); | ||
513 | init_waitqueue_head(&channel_wqs[i].lx_queue); | ||
514 | channel_wqs[i].in_open = 0; | ||
515 | } | ||
516 | |||
517 | /* set up notifiers */ | ||
518 | notify.start = starting; | ||
519 | notify.stop = stopping; | ||
520 | vpe_notify(RTLX_TARG_VPE, ¬ify); | ||
521 | |||
522 | if (cpu_has_vint) | ||
523 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | ||
524 | |||
525 | rtlx_irq.dev_id = rtlx; | ||
526 | setup_irq(rtlx_irq_num, &rtlx_irq); | ||
527 | |||
321 | return 0; | 528 | return 0; |
322 | } | 529 | } |
323 | 530 | ||
@@ -330,5 +537,5 @@ module_init(rtlx_module_init); | |||
330 | module_exit(rtlx_module_exit); | 537 | module_exit(rtlx_module_exit); |
331 | 538 | ||
332 | MODULE_DESCRIPTION("MIPS RTLX"); | 539 | MODULE_DESCRIPTION("MIPS RTLX"); |
333 | MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc."); | 540 | MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); |
334 | MODULE_LICENSE("GPL"); | 541 | MODULE_LICENSE("GPL"); |