aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r--drivers/ieee1394/amdtp.c1297
-rw-r--r--drivers/ieee1394/amdtp.h84
-rw-r--r--drivers/ieee1394/cmp.c311
-rw-r--r--drivers/ieee1394/cmp.h31
4 files changed, 0 insertions, 1723 deletions
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
deleted file mode 100644
index 17390d762cf7..000000000000
--- a/drivers/ieee1394/amdtp.c
+++ /dev/null
@@ -1,1297 +0,0 @@
1/* -*- c-basic-offset: 8 -*-
2 *
3 * amdtp.c - Audio and Music Data Transmission Protocol Driver
4 * Copyright (C) 2001 Kristian Høgsberg
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/* OVERVIEW
22 * --------
23 *
24 * The AMDTP driver is designed to expose the IEEE1394 bus as a
25 * regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and
26 * then your favourite MP3 player, game or whatever sound program will
27 * output to an IEEE1394 isochronous channel. The signal destination
28 * could be a set of IEEE1394 loudspeakers (if and when such things
29 * become available) or an amplifier with IEEE1394 input (like the
30 * Sony STR-LSA1). The driver only handles the actual streaming, some
31 * connection management is also required for this to actually work.
32 * That is outside the scope of this driver, and furthermore it is not
33 * really standardized yet.
34 *
35 * The Audio and Music Data Tranmission Protocol is available at
36 *
37 * http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
38 *
39 *
40 * TODO
41 * ----
42 *
43 * - We should be able to change input sample format between LE/BE, as
44 * we already shift the bytes around when we construct the iso
45 * packets.
46 *
47 * - Fix DMA stop after bus reset!
48 *
49 * - Clean up iso context handling in ohci1394.
50 *
51 *
52 * MAYBE TODO
53 * ----------
54 *
55 * - Receive data for local playback or recording. Playback requires
56 * soft syncing with the sound card.
57 *
58 * - Signal processing, i.e. receive packets, do some processing, and
59 * transmit them again using the same packet structure and timestamps
60 * offset by processing time.
61 *
62 * - Maybe make an ALSA interface, that is, create a file_ops
63 * implementation that recognizes ALSA ioctls and uses defaults for
64 * things that can't be controlled through ALSA (iso channel).
65 *
66 * Changes:
67 *
68 * - Audit copy_from_user in amdtp_write.
69 * Daniele Bellucci <bellucda@tiscali.it>
70 *
71 */
72
73#include <linux/module.h>
74#include <linux/list.h>
75#include <linux/sched.h>
76#include <linux/types.h>
77#include <linux/fs.h>
78#include <linux/ioctl.h>
79#include <linux/wait.h>
80#include <linux/pci.h>
81#include <linux/interrupt.h>
82#include <linux/poll.h>
83#include <linux/compat.h>
84#include <linux/cdev.h>
85#include <asm/uaccess.h>
86#include <asm/atomic.h>
87
88#include "hosts.h"
89#include "highlevel.h"
90#include "ieee1394.h"
91#include "ieee1394_core.h"
92#include "ohci1394.h"
93
94#include "amdtp.h"
95#include "cmp.h"
96
97#define FMT_AMDTP 0x10
98#define FDF_AM824 0x00
99#define FDF_SFC_32KHZ 0x00
100#define FDF_SFC_44K1HZ 0x01
101#define FDF_SFC_48KHZ 0x02
102#define FDF_SFC_88K2HZ 0x03
103#define FDF_SFC_96KHZ 0x04
104#define FDF_SFC_176K4HZ 0x05
105#define FDF_SFC_192KHZ 0x06
106
107struct descriptor_block {
108 struct output_more_immediate {
109 u32 control;
110 u32 pad0;
111 u32 skip;
112 u32 pad1;
113 u32 header[4];
114 } header_desc;
115
116 struct output_last {
117 u32 control;
118 u32 data_address;
119 u32 branch;
120 u32 status;
121 } payload_desc;
122};
123
124struct packet {
125 struct descriptor_block *db;
126 dma_addr_t db_bus;
127 struct iso_packet *payload;
128 dma_addr_t payload_bus;
129};
130
131#include <asm/byteorder.h>
132
133#if defined __BIG_ENDIAN_BITFIELD
134
135struct iso_packet {
136 /* First quadlet */
137 unsigned int dbs : 8;
138 unsigned int eoh0 : 2;
139 unsigned int sid : 6;
140
141 unsigned int dbc : 8;
142 unsigned int fn : 2;
143 unsigned int qpc : 3;
144 unsigned int sph : 1;
145 unsigned int reserved : 2;
146
147 /* Second quadlet */
148 unsigned int fdf : 8;
149 unsigned int eoh1 : 2;
150 unsigned int fmt : 6;
151
152 unsigned int syt : 16;
153
154 quadlet_t data[0];
155};
156
157#elif defined __LITTLE_ENDIAN_BITFIELD
158
159struct iso_packet {
160 /* First quadlet */
161 unsigned int sid : 6;
162 unsigned int eoh0 : 2;
163 unsigned int dbs : 8;
164
165 unsigned int reserved : 2;
166 unsigned int sph : 1;
167 unsigned int qpc : 3;
168 unsigned int fn : 2;
169 unsigned int dbc : 8;
170
171 /* Second quadlet */
172 unsigned int fmt : 6;
173 unsigned int eoh1 : 2;
174 unsigned int fdf : 8;
175
176 unsigned int syt : 16;
177
178 quadlet_t data[0];
179};
180
181#else
182
183#error Unknown bitfield type
184
185#endif
186
187struct fraction {
188 int integer;
189 int numerator;
190 int denominator;
191};
192
193#define PACKET_LIST_SIZE 256
194#define MAX_PACKET_LISTS 4
195
196struct packet_list {
197 struct list_head link;
198 int last_cycle_count;
199 struct packet packets[PACKET_LIST_SIZE];
200};
201
202#define BUFFER_SIZE 128
203
204/* This implements a circular buffer for incoming samples. */
205
206struct buffer {
207 size_t head, tail, length, size;
208 unsigned char data[0];
209};
210
211struct stream {
212 int iso_channel;
213 int format;
214 int rate;
215 int dimension;
216 int fdf;
217 int mode;
218 int sample_format;
219 struct cmp_pcr *opcr;
220
221 /* Input samples are copied here. */
222 struct buffer *input;
223
224 /* ISO Packer state */
225 unsigned char dbc;
226 struct packet_list *current_packet_list;
227 int current_packet;
228 struct fraction ready_samples, samples_per_cycle;
229
230 /* We use these to generate control bits when we are packing
231 * iec958 data.
232 */
233 int iec958_frame_count;
234 int iec958_rate_code;
235
236 /* The cycle_count and cycle_offset fields are used for the
237 * synchronization timestamps (syt) in the cip header. They
238 * are incremented by at least a cycle every time we put a
239 * time stamp in a packet. As we don't time stamp all
240 * packages, cycle_count isn't updated in every cycle, and
241 * sometimes it's incremented by 2. Thus, we have
242 * cycle_count2, which is simply incremented by one with each
243 * packet, so we can compare it to the transmission time
244 * written back in the dma programs.
245 */
246 atomic_t cycle_count, cycle_count2;
247 struct fraction cycle_offset, ticks_per_syt_offset;
248 int syt_interval;
249 int stale_count;
250
251 /* Theses fields control the sample output to the DMA engine.
252 * The dma_packet_lists list holds packet lists currently
253 * queued for dma; the head of the list is currently being
254 * processed. The last program in a packet list generates an
255 * interrupt, which removes the head from dma_packet_lists and
256 * puts it back on the free list.
257 */
258 struct list_head dma_packet_lists;
259 struct list_head free_packet_lists;
260 wait_queue_head_t packet_list_wait;
261 spinlock_t packet_list_lock;
262 struct ohci1394_iso_tasklet iso_tasklet;
263 struct pci_pool *descriptor_pool, *packet_pool;
264
265 /* Streams at a host controller are chained through this field. */
266 struct list_head link;
267 struct amdtp_host *host;
268};
269
270struct amdtp_host {
271 struct hpsb_host *host;
272 struct ti_ohci *ohci;
273 struct list_head stream_list;
274 spinlock_t stream_list_lock;
275};
276
277static struct hpsb_highlevel amdtp_highlevel;
278
279
280/* FIXME: This doesn't belong here... */
281
282#define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000
283#define OHCI1394_CONTEXT_RUN 0x00008000
284#define OHCI1394_CONTEXT_WAKE 0x00001000
285#define OHCI1394_CONTEXT_DEAD 0x00000800
286#define OHCI1394_CONTEXT_ACTIVE 0x00000400
287
288static void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx,
289 dma_addr_t first_cmd, int z, int cycle_match)
290{
291 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
292 reg_write(ohci, OHCI1394_IsoXmitCommandPtr + ctx * 16, first_cmd | z);
293 reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, ~0);
294 wmb();
295 reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
296 OHCI1394_CONTEXT_CYCLE_MATCH | (cycle_match << 16) |
297 OHCI1394_CONTEXT_RUN);
298}
299
300static void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx)
301{
302 reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
303 OHCI1394_CONTEXT_WAKE);
304}
305
306static void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous)
307{
308 u32 control;
309 int wait;
310
311 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << ctx);
312 reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16,
313 OHCI1394_CONTEXT_RUN);
314 wmb();
315
316 if (synchronous) {
317 for (wait = 0; wait < 5; wait++) {
318 control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
319 if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
320 break;
321
322 schedule_timeout_interruptible(1);
323 }
324 }
325}
326
327/* Note: we can test if free_packet_lists is empty without aquiring
328 * the packet_list_lock. The interrupt handler only adds to the free
329 * list, there is no race condition between testing the list non-empty
330 * and acquiring the lock.
331 */
332
333static struct packet_list *stream_get_free_packet_list(struct stream *s)
334{
335 struct packet_list *pl;
336 unsigned long flags;
337
338 if (list_empty(&s->free_packet_lists))
339 return NULL;
340
341 spin_lock_irqsave(&s->packet_list_lock, flags);
342 pl = list_entry(s->free_packet_lists.next, struct packet_list, link);
343 list_del(&pl->link);
344 spin_unlock_irqrestore(&s->packet_list_lock, flags);
345
346 return pl;
347}
348
349static void stream_start_dma(struct stream *s, struct packet_list *pl)
350{
351 u32 syt_cycle, cycle_count, start_cycle;
352
353 cycle_count = reg_read(s->host->ohci,
354 OHCI1394_IsochronousCycleTimer) >> 12;
355 syt_cycle = (pl->last_cycle_count - PACKET_LIST_SIZE + 1) & 0x0f;
356
357 /* We program the DMA controller to start transmission at
358 * least 17 cycles from now - this happens when the lower four
359 * bits of cycle_count is 0x0f and syt_cycle is 0, in this
360 * case the start cycle is cycle_count - 15 + 32. */
361 start_cycle = (cycle_count & ~0x0f) + 32 + syt_cycle;
362 if ((start_cycle & 0x1fff) >= 8000)
363 start_cycle = start_cycle - 8000 + 0x2000;
364
365 ohci1394_start_it_ctx(s->host->ohci, s->iso_tasklet.context,
366 pl->packets[0].db_bus, 3,
367 start_cycle & 0x7fff);
368}
369
370static void stream_put_dma_packet_list(struct stream *s,
371 struct packet_list *pl)
372{
373 unsigned long flags;
374 struct packet_list *prev;
375
376 /* Remember the cycle_count used for timestamping the last packet. */
377 pl->last_cycle_count = atomic_read(&s->cycle_count2) - 1;
378 pl->packets[PACKET_LIST_SIZE - 1].db->payload_desc.branch = 0;
379
380 spin_lock_irqsave(&s->packet_list_lock, flags);
381 list_add_tail(&pl->link, &s->dma_packet_lists);
382 spin_unlock_irqrestore(&s->packet_list_lock, flags);
383
384 prev = list_entry(pl->link.prev, struct packet_list, link);
385 if (pl->link.prev != &s->dma_packet_lists) {
386 struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1];
387 last->db->payload_desc.branch = pl->packets[0].db_bus | 3;
388 last->db->header_desc.skip = pl->packets[0].db_bus | 3;
389 ohci1394_wake_it_ctx(s->host->ohci, s->iso_tasklet.context);
390 }
391 else
392 stream_start_dma(s, pl);
393}
394
395static void stream_shift_packet_lists(unsigned long l)
396{
397 struct stream *s = (struct stream *) l;
398 struct packet_list *pl;
399 struct packet *last;
400 int diff;
401
402 if (list_empty(&s->dma_packet_lists)) {
403 HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__);
404 return;
405 }
406
407 /* Now that we know the list is non-empty, we can get the head
408 * of the list without locking, because the process context
409 * only adds to the tail.
410 */
411 pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
412 last = &pl->packets[PACKET_LIST_SIZE - 1];
413
414 /* This is weird... if we stop dma processing in the middle of
415 * a packet list, the dma context immediately generates an
416 * interrupt if we enable it again later. This only happens
417 * when amdtp_release is interrupted while waiting for dma to
418 * complete, though. Anyway, we detect this by seeing that
419 * the status of the dma descriptor that we expected an
420 * interrupt from is still 0.
421 */
422 if (last->db->payload_desc.status == 0) {
423 HPSB_INFO("weird interrupt...");
424 return;
425 }
426
427 /* If the last descriptor block does not specify a branch
428 * address, we have a sample underflow.
429 */
430 if (last->db->payload_desc.branch == 0)
431 HPSB_INFO("FIXME: sample underflow...");
432
433 /* Here we check when (which cycle) the last packet was sent
434 * and compare it to what the iso packer was using at the
435 * time. If there is a mismatch, we adjust the cycle count in
436 * the iso packer. However, there are still up to
437 * MAX_PACKET_LISTS packet lists queued with bad time stamps,
438 * so we disable time stamp monitoring for the next
439 * MAX_PACKET_LISTS packet lists.
440 */
441 diff = (last->db->payload_desc.status - pl->last_cycle_count) & 0xf;
442 if (diff > 0 && s->stale_count == 0) {
443 atomic_add(diff, &s->cycle_count);
444 atomic_add(diff, &s->cycle_count2);
445 s->stale_count = MAX_PACKET_LISTS;
446 }
447
448 if (s->stale_count > 0)
449 s->stale_count--;
450
451 /* Finally, we move the packet list that was just processed
452 * back to the free list, and notify any waiters.
453 */
454 spin_lock(&s->packet_list_lock);
455 list_del(&pl->link);
456 list_add_tail(&pl->link, &s->free_packet_lists);
457 spin_unlock(&s->packet_list_lock);
458
459 wake_up_interruptible(&s->packet_list_wait);
460}
461
462static struct packet *stream_current_packet(struct stream *s)
463{
464 if (s->current_packet_list == NULL &&
465 (s->current_packet_list = stream_get_free_packet_list(s)) == NULL)
466 return NULL;
467
468 return &s->current_packet_list->packets[s->current_packet];
469}
470
471static void stream_queue_packet(struct stream *s)
472{
473 s->current_packet++;
474 if (s->current_packet == PACKET_LIST_SIZE) {
475 stream_put_dma_packet_list(s, s->current_packet_list);
476 s->current_packet_list = NULL;
477 s->current_packet = 0;
478 }
479}
480
481/* Integer fractional math. When we transmit a 44k1Hz signal we must
482 * send 5 41/80 samples per isochronous cycle, as these occur 8000
483 * times a second. Of course, we must send an integral number of
484 * samples in a packet, so we use the integer math to alternate
485 * between sending 5 and 6 samples per packet.
486 */
487
488static void fraction_init(struct fraction *f, int numerator, int denominator)
489{
490 f->integer = numerator / denominator;
491 f->numerator = numerator % denominator;
492 f->denominator = denominator;
493}
494
495static __inline__ void fraction_add(struct fraction *dst,
496 struct fraction *src1,
497 struct fraction *src2)
498{
499 /* assert: src1->denominator == src2->denominator */
500
501 int sum, denom;
502
503 /* We use these two local variables to allow gcc to optimize
504 * the division and the modulo into only one division. */
505
506 sum = src1->numerator + src2->numerator;
507 denom = src1->denominator;
508 dst->integer = src1->integer + src2->integer + sum / denom;
509 dst->numerator = sum % denom;
510 dst->denominator = denom;
511}
512
513static __inline__ void fraction_sub_int(struct fraction *dst,
514 struct fraction *src, int integer)
515{
516 dst->integer = src->integer - integer;
517 dst->numerator = src->numerator;
518 dst->denominator = src->denominator;
519}
520
521static __inline__ int fraction_floor(struct fraction *frac)
522{
523 return frac->integer;
524}
525
526static __inline__ int fraction_ceil(struct fraction *frac)
527{
528 return frac->integer + (frac->numerator > 0 ? 1 : 0);
529}
530
531static void packet_initialize(struct packet *p, struct packet *next)
532{
533 /* Here we initialize the dma descriptor block for
534 * transferring one iso packet. We use two descriptors per
535 * packet: an OUTPUT_MORE_IMMMEDIATE descriptor for the
536 * IEEE1394 iso packet header and an OUTPUT_LAST descriptor
537 * for the payload.
538 */
539
540 p->db->header_desc.control =
541 DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
542
543 if (next) {
544 p->db->payload_desc.control =
545 DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
546 p->db->payload_desc.branch = next->db_bus | 3;
547 p->db->header_desc.skip = next->db_bus | 3;
548 }
549 else {
550 p->db->payload_desc.control =
551 DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
552 DMA_CTL_UPDATE | DMA_CTL_IRQ;
553 p->db->payload_desc.branch = 0;
554 p->db->header_desc.skip = 0;
555 }
556 p->db->payload_desc.data_address = p->payload_bus;
557 p->db->payload_desc.status = 0;
558}
559
560static struct packet_list *packet_list_alloc(struct stream *s)
561{
562 int i;
563 struct packet_list *pl;
564 struct packet *next;
565
566 pl = kmalloc(sizeof *pl, SLAB_KERNEL);
567 if (pl == NULL)
568 return NULL;
569
570 for (i = 0; i < PACKET_LIST_SIZE; i++) {
571 struct packet *p = &pl->packets[i];
572 p->db = pci_pool_alloc(s->descriptor_pool, SLAB_KERNEL,
573 &p->db_bus);
574 p->payload = pci_pool_alloc(s->packet_pool, SLAB_KERNEL,
575 &p->payload_bus);
576 }
577
578 for (i = 0; i < PACKET_LIST_SIZE; i++) {
579 if (i < PACKET_LIST_SIZE - 1)
580 next = &pl->packets[i + 1];
581 else
582 next = NULL;
583 packet_initialize(&pl->packets[i], next);
584 }
585
586 return pl;
587}
588
589static void packet_list_free(struct packet_list *pl, struct stream *s)
590{
591 int i;
592
593 for (i = 0; i < PACKET_LIST_SIZE; i++) {
594 struct packet *p = &pl->packets[i];
595 pci_pool_free(s->descriptor_pool, p->db, p->db_bus);
596 pci_pool_free(s->packet_pool, p->payload, p->payload_bus);
597 }
598 kfree(pl);
599}
600
601static struct buffer *buffer_alloc(int size)
602{
603 struct buffer *b;
604
605 b = kmalloc(sizeof *b + size, SLAB_KERNEL);
606 if (b == NULL)
607 return NULL;
608 b->head = 0;
609 b->tail = 0;
610 b->length = 0;
611 b->size = size;
612
613 return b;
614}
615
616static unsigned char *buffer_get_bytes(struct buffer *buffer, int size)
617{
618 unsigned char *p;
619
620 if (buffer->head + size > buffer->size)
621 BUG();
622
623 p = &buffer->data[buffer->head];
624 buffer->head += size;
625 if (buffer->head == buffer->size)
626 buffer->head = 0;
627 buffer->length -= size;
628
629 return p;
630}
631
632static unsigned char *buffer_put_bytes(struct buffer *buffer,
633 size_t max, size_t *actual)
634{
635 size_t length;
636 unsigned char *p;
637
638 p = &buffer->data[buffer->tail];
639 length = min(buffer->size - buffer->length, max);
640 if (buffer->tail + length < buffer->size) {
641 *actual = length;
642 buffer->tail += length;
643 }
644 else {
645 *actual = buffer->size - buffer->tail;
646 buffer->tail = 0;
647 }
648
649 buffer->length += *actual;
650 return p;
651}
652
653static u32 get_iec958_header_bits(struct stream *s, int sub_frame, u32 sample)
654{
655 int csi, parity, shift;
656 int block_start;
657 u32 bits;
658
659 switch (s->iec958_frame_count) {
660 case 1:
661 csi = s->format == AMDTP_FORMAT_IEC958_AC3;
662 break;
663 case 2:
664 case 9:
665 csi = 1;
666 break;
667 case 24 ... 27:
668 csi = (s->iec958_rate_code >> (27 - s->iec958_frame_count)) & 0x01;
669 break;
670 default:
671 csi = 0;
672 break;
673 }
674
675 block_start = (s->iec958_frame_count == 0 && sub_frame == 0);
676
677 /* The parity bit is the xor of the sample bits and the
678 * channel status info bit. */
679 for (shift = 16, parity = sample ^ csi; shift > 0; shift >>= 1)
680 parity ^= (parity >> shift);
681
682 bits = (block_start << 5) | /* Block start bit */
683 ((sub_frame == 0) << 4) | /* Subframe bit */
684 ((parity & 1) << 3) | /* Parity bit */
685 (csi << 2); /* Channel status info bit */
686
687 return bits;
688}
689
690static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
691{
692 switch (s->format) {
693 case AMDTP_FORMAT_IEC958_PCM:
694 case AMDTP_FORMAT_IEC958_AC3:
695 return get_iec958_header_bits(s, sub_frame, sample);
696
697 case AMDTP_FORMAT_RAW:
698 return 0x40;
699
700 default:
701 return 0;
702 }
703}
704
705static void fill_payload_le16(struct stream *s, quadlet_t *data, int nevents)
706{
707 quadlet_t *event, sample, bits;
708 unsigned char *p;
709 int i, j;
710
711 for (i = 0, event = data; i < nevents; i++) {
712
713 for (j = 0; j < s->dimension; j++) {
714 p = buffer_get_bytes(s->input, 2);
715 sample = (p[1] << 16) | (p[0] << 8);
716 bits = get_header_bits(s, j, sample);
717 event[j] = cpu_to_be32((bits << 24) | sample);
718 }
719
720 event += s->dimension;
721 if (++s->iec958_frame_count == 192)
722 s->iec958_frame_count = 0;
723 }
724}
725
726static void fill_packet(struct stream *s, struct packet *packet, int nevents)
727{
728 int syt_index, syt, size;
729 u32 control;
730
731 size = (nevents * s->dimension + 2) * sizeof(quadlet_t);
732
733 /* Update DMA descriptors */
734 packet->db->payload_desc.status = 0;
735 control = packet->db->payload_desc.control & 0xffff0000;
736 packet->db->payload_desc.control = control | size;
737
738 /* Fill IEEE1394 headers */
739 packet->db->header_desc.header[0] =
740 (IEEE1394_SPEED_100 << 16) | (0x01 << 14) |
741 (s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
742 packet->db->header_desc.header[1] = size << 16;
743
744 /* Calculate synchronization timestamp (syt). First we
745 * determine syt_index, that is, the index in the packet of
746 * the sample for which the timestamp is valid. */
747 syt_index = (s->syt_interval - s->dbc) & (s->syt_interval - 1);
748 if (syt_index < nevents) {
749 syt = ((atomic_read(&s->cycle_count) << 12) |
750 s->cycle_offset.integer) & 0xffff;
751 fraction_add(&s->cycle_offset,
752 &s->cycle_offset, &s->ticks_per_syt_offset);
753
754 /* This next addition should be modulo 8000 (0x1f40),
755 * but we only use the lower 4 bits of cycle_count, so
756 * we don't need the modulo. */
757 atomic_add(s->cycle_offset.integer / 3072, &s->cycle_count);
758 s->cycle_offset.integer %= 3072;
759 }
760 else
761 syt = 0xffff;
762
763 atomic_inc(&s->cycle_count2);
764
765 /* Fill cip header */
766 packet->payload->eoh0 = 0;
767 packet->payload->sid = s->host->host->node_id & 0x3f;
768 packet->payload->dbs = s->dimension;
769 packet->payload->fn = 0;
770 packet->payload->qpc = 0;
771 packet->payload->sph = 0;
772 packet->payload->reserved = 0;
773 packet->payload->dbc = s->dbc;
774 packet->payload->eoh1 = 2;
775 packet->payload->fmt = FMT_AMDTP;
776 packet->payload->fdf = s->fdf;
777 packet->payload->syt = cpu_to_be16(syt);
778
779 switch (s->sample_format) {
780 case AMDTP_INPUT_LE16:
781 fill_payload_le16(s, packet->payload->data, nevents);
782 break;
783 }
784
785 s->dbc += nevents;
786}
787
788static void stream_flush(struct stream *s)
789{
790 struct packet *p;
791 int nevents;
792 struct fraction next;
793
794 /* The AMDTP specifies two transmission modes: blocking and
795 * non-blocking. In blocking mode you always transfer
796 * syt_interval or zero samples, whereas in non-blocking mode
797 * you send as many samples as you have available at transfer
798 * time.
799 *
800 * The fraction samples_per_cycle specifies the number of
801 * samples that become available per cycle. We add this to
802 * the fraction ready_samples, which specifies the number of
803 * leftover samples from the previous transmission. The sum,
804 * stored in the fraction next, specifies the number of
805 * samples available for transmission, and from this we
806 * determine the number of samples to actually transmit.
807 */
808
809 while (1) {
810 fraction_add(&next, &s->ready_samples, &s->samples_per_cycle);
811 if (s->mode == AMDTP_MODE_BLOCKING) {
812 if (fraction_floor(&next) >= s->syt_interval)
813 nevents = s->syt_interval;
814 else
815 nevents = 0;
816 }
817 else
818 nevents = fraction_floor(&next);
819
820 p = stream_current_packet(s);
821 if (s->input->length < nevents * s->dimension * 2 || p == NULL)
822 break;
823
824 fill_packet(s, p, nevents);
825 stream_queue_packet(s);
826
827 /* Now that we have successfully queued the packet for
828 * transmission, we update the fraction ready_samples. */
829 fraction_sub_int(&s->ready_samples, &next, nevents);
830 }
831}
832
833static int stream_alloc_packet_lists(struct stream *s)
834{
835 int max_nevents, max_packet_size, i;
836
837 if (s->mode == AMDTP_MODE_BLOCKING)
838 max_nevents = s->syt_interval;
839 else
840 max_nevents = fraction_ceil(&s->samples_per_cycle);
841
842 max_packet_size = max_nevents * s->dimension * 4 + 8;
843 s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
844 max_packet_size, 0, 0);
845
846 if (s->packet_pool == NULL)
847 return -1;
848
849 INIT_LIST_HEAD(&s->free_packet_lists);
850 INIT_LIST_HEAD(&s->dma_packet_lists);
851 for (i = 0; i < MAX_PACKET_LISTS; i++) {
852 struct packet_list *pl = packet_list_alloc(s);
853 if (pl == NULL)
854 break;
855 list_add_tail(&pl->link, &s->free_packet_lists);
856 }
857
858 return i < MAX_PACKET_LISTS ? -1 : 0;
859}
860
861static void stream_free_packet_lists(struct stream *s)
862{
863 struct packet_list *packet_l, *packet_l_next;
864
865 if (s->current_packet_list != NULL)
866 packet_list_free(s->current_packet_list, s);
867 list_for_each_entry_safe(packet_l, packet_l_next, &s->dma_packet_lists, link)
868 packet_list_free(packet_l, s);
869 list_for_each_entry_safe(packet_l, packet_l_next, &s->free_packet_lists, link)
870 packet_list_free(packet_l, s);
871 if (s->packet_pool != NULL)
872 pci_pool_destroy(s->packet_pool);
873
874 s->current_packet_list = NULL;
875 INIT_LIST_HEAD(&s->free_packet_lists);
876 INIT_LIST_HEAD(&s->dma_packet_lists);
877 s->packet_pool = NULL;
878}
879
880static void plug_update(struct cmp_pcr *plug, void *data)
881{
882 struct stream *s = data;
883
884 HPSB_INFO("plug update: p2p_count=%d, channel=%d",
885 plug->p2p_count, plug->channel);
886 s->iso_channel = plug->channel;
887 if (plug->p2p_count > 0) {
888 struct packet_list *pl;
889
890 pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
891 stream_start_dma(s, pl);
892 }
893 else {
894 ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 0);
895 }
896}
897
898static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
899{
900 const int transfer_delay = 9000;
901
902 if (cfg->format <= AMDTP_FORMAT_IEC958_AC3)
903 s->format = cfg->format;
904 else
905 return -EINVAL;
906
907 switch (cfg->rate) {
908 case 32000:
909 s->syt_interval = 8;
910 s->fdf = FDF_SFC_32KHZ;
911 s->iec958_rate_code = 0x0c;
912 break;
913 case 44100:
914 s->syt_interval = 8;
915 s->fdf = FDF_SFC_44K1HZ;
916 s->iec958_rate_code = 0x00;
917 break;
918 case 48000:
919 s->syt_interval = 8;
920 s->fdf = FDF_SFC_48KHZ;
921 s->iec958_rate_code = 0x04;
922 break;
923 case 88200:
924 s->syt_interval = 16;
925 s->fdf = FDF_SFC_88K2HZ;
926 s->iec958_rate_code = 0x00;
927 break;
928 case 96000:
929 s->syt_interval = 16;
930 s->fdf = FDF_SFC_96KHZ;
931 s->iec958_rate_code = 0x00;
932 break;
933 case 176400:
934 s->syt_interval = 32;
935 s->fdf = FDF_SFC_176K4HZ;
936 s->iec958_rate_code = 0x00;
937 break;
938 case 192000:
939 s->syt_interval = 32;
940 s->fdf = FDF_SFC_192KHZ;
941 s->iec958_rate_code = 0x00;
942 break;
943
944 default:
945 return -EINVAL;
946 }
947
948 s->rate = cfg->rate;
949 fraction_init(&s->samples_per_cycle, s->rate, 8000);
950 fraction_init(&s->ready_samples, 0, 8000);
951
952 /* The ticks_per_syt_offset is initialized to the number of
953 * ticks between syt_interval events. The number of ticks per
954 * second is 24.576e6, so the number of ticks between
955 * syt_interval events is 24.576e6 * syt_interval / rate.
956 */
957 fraction_init(&s->ticks_per_syt_offset,
958 24576000 * s->syt_interval, s->rate);
959 fraction_init(&s->cycle_offset, (transfer_delay % 3072) * s->rate, s->rate);
960 atomic_set(&s->cycle_count, transfer_delay / 3072);
961 atomic_set(&s->cycle_count2, 0);
962
963 s->mode = cfg->mode;
964 s->sample_format = AMDTP_INPUT_LE16;
965
966 /* When using the AM824 raw subformat we can stream signals of
967 * any dimension. The IEC958 subformat, however, only
968 * supports 2 channels.
969 */
970 if (s->format == AMDTP_FORMAT_RAW || cfg->dimension == 2)
971 s->dimension = cfg->dimension;
972 else
973 return -EINVAL;
974
975 if (s->opcr != NULL) {
976 cmp_unregister_opcr(s->host->host, s->opcr);
977 s->opcr = NULL;
978 }
979
980 switch(cmd) {
981 case AMDTP_IOC_PLUG:
982 s->opcr = cmp_register_opcr(s->host->host, cfg->u.plug,
983 /*payload*/ 12, plug_update, s);
984 if (s->opcr == NULL)
985 return -EINVAL;
986 s->iso_channel = s->opcr->channel;
987 break;
988
989 case AMDTP_IOC_CHANNEL:
990 if (cfg->u.channel >= 0 && cfg->u.channel < 64)
991 s->iso_channel = cfg->u.channel;
992 else
993 return -EINVAL;
994 break;
995 }
996
997 /* The ioctl settings were all valid, so we realloc the packet
998 * lists to make sure the packet size is big enough.
999 */
1000 if (s->packet_pool != NULL)
1001 stream_free_packet_lists(s);
1002
1003 if (stream_alloc_packet_lists(s) < 0) {
1004 stream_free_packet_lists(s);
1005 return -ENOMEM;
1006 }
1007
1008 return 0;
1009}
1010
1011static struct stream *stream_alloc(struct amdtp_host *host)
1012{
1013 struct stream *s;
1014 unsigned long flags;
1015
1016 s = kmalloc(sizeof(struct stream), SLAB_KERNEL);
1017 if (s == NULL)
1018 return NULL;
1019
1020 memset(s, 0, sizeof(struct stream));
1021 s->host = host;
1022
1023 s->input = buffer_alloc(BUFFER_SIZE);
1024 if (s->input == NULL) {
1025 kfree(s);
1026 return NULL;
1027 }
1028
1029 s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
1030 sizeof(struct descriptor_block),
1031 16, 0);
1032
1033 if (s->descriptor_pool == NULL) {
1034 kfree(s->input);
1035 kfree(s);
1036 return NULL;
1037 }
1038
1039 INIT_LIST_HEAD(&s->free_packet_lists);
1040 INIT_LIST_HEAD(&s->dma_packet_lists);
1041
1042 init_waitqueue_head(&s->packet_list_wait);
1043 spin_lock_init(&s->packet_list_lock);
1044
1045 ohci1394_init_iso_tasklet(&s->iso_tasklet, OHCI_ISO_TRANSMIT,
1046 stream_shift_packet_lists,
1047 (unsigned long) s);
1048
1049 if (ohci1394_register_iso_tasklet(host->ohci, &s->iso_tasklet) < 0) {
1050 pci_pool_destroy(s->descriptor_pool);
1051 kfree(s->input);
1052 kfree(s);
1053 return NULL;
1054 }
1055
1056 spin_lock_irqsave(&host->stream_list_lock, flags);
1057 list_add_tail(&s->link, &host->stream_list);
1058 spin_unlock_irqrestore(&host->stream_list_lock, flags);
1059
1060 return s;
1061}
1062
1063static void stream_free(struct stream *s)
1064{
1065 unsigned long flags;
1066
1067 /* Stop the DMA. We wait for the dma packet list to become
1068 * empty and let the dma controller run out of programs. This
1069 * seems to be more reliable than stopping it directly, since
1070 * that sometimes generates an it transmit interrupt if we
1071 * later re-enable the context.
1072 */
1073 wait_event_interruptible(s->packet_list_wait,
1074 list_empty(&s->dma_packet_lists));
1075
1076 ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 1);
1077 ohci1394_unregister_iso_tasklet(s->host->ohci, &s->iso_tasklet);
1078
1079 if (s->opcr != NULL)
1080 cmp_unregister_opcr(s->host->host, s->opcr);
1081
1082 spin_lock_irqsave(&s->host->stream_list_lock, flags);
1083 list_del(&s->link);
1084 spin_unlock_irqrestore(&s->host->stream_list_lock, flags);
1085
1086 kfree(s->input);
1087
1088 stream_free_packet_lists(s);
1089 pci_pool_destroy(s->descriptor_pool);
1090
1091 kfree(s);
1092}
1093
1094/* File operations */
1095
1096static ssize_t amdtp_write(struct file *file, const char __user *buffer, size_t count,
1097 loff_t *offset_is_ignored)
1098{
1099 struct stream *s = file->private_data;
1100 unsigned char *p;
1101 int i;
1102 size_t length;
1103
1104 if (s->packet_pool == NULL)
1105 return -EBADFD;
1106
1107 /* Fill the circular buffer from the input buffer and call the
1108 * iso packer when the buffer is full. The iso packer may
1109 * leave bytes in the buffer for two reasons: either the
1110 * remaining bytes wasn't enough to build a new packet, or
1111 * there were no free packet lists. In the first case we
1112 * re-fill the buffer and call the iso packer again or return
1113 * if we used all the data from userspace. In the second
1114 * case, the wait_event_interruptible will block until the irq
1115 * handler frees a packet list.
1116 */
1117
1118 for (i = 0; i < count; i += length) {
1119 p = buffer_put_bytes(s->input, count - i, &length);
1120 if (copy_from_user(p, buffer + i, length))
1121 return -EFAULT;
1122 if (s->input->length < s->input->size)
1123 continue;
1124
1125 stream_flush(s);
1126
1127 if (s->current_packet_list != NULL)
1128 continue;
1129
1130 if (file->f_flags & O_NONBLOCK)
1131 return i + length > 0 ? i + length : -EAGAIN;
1132
1133 if (wait_event_interruptible(s->packet_list_wait,
1134 !list_empty(&s->free_packet_lists)))
1135 return -EINTR;
1136 }
1137
1138 return count;
1139}
1140
1141static long amdtp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1142{
1143 struct stream *s = file->private_data;
1144 struct amdtp_ioctl cfg;
1145 int err;
1146 lock_kernel();
1147 switch(cmd)
1148 {
1149 case AMDTP_IOC_PLUG:
1150 case AMDTP_IOC_CHANNEL:
1151 if (copy_from_user(&cfg, (struct amdtp_ioctl __user *) arg, sizeof cfg))
1152 err = -EFAULT;
1153 else
1154 err = stream_configure(s, cmd, &cfg);
1155 break;
1156
1157 default:
1158 err = -EINVAL;
1159 break;
1160 }
1161 unlock_kernel();
1162 return err;
1163}
1164
1165static unsigned int amdtp_poll(struct file *file, poll_table *pt)
1166{
1167 struct stream *s = file->private_data;
1168
1169 poll_wait(file, &s->packet_list_wait, pt);
1170
1171 if (!list_empty(&s->free_packet_lists))
1172 return POLLOUT | POLLWRNORM;
1173 else
1174 return 0;
1175}
1176
1177static int amdtp_open(struct inode *inode, struct file *file)
1178{
1179 struct amdtp_host *host;
1180 int i = ieee1394_file_to_instance(file);
1181
1182 host = hpsb_get_hostinfo_bykey(&amdtp_highlevel, i);
1183 if (host == NULL)
1184 return -ENODEV;
1185
1186 file->private_data = stream_alloc(host);
1187 if (file->private_data == NULL)
1188 return -ENOMEM;
1189
1190 return 0;
1191}
1192
1193static int amdtp_release(struct inode *inode, struct file *file)
1194{
1195 struct stream *s = file->private_data;
1196
1197 stream_free(s);
1198
1199 return 0;
1200}
1201
1202static struct cdev amdtp_cdev;
1203static struct file_operations amdtp_fops =
1204{
1205 .owner = THIS_MODULE,
1206 .write = amdtp_write,
1207 .poll = amdtp_poll,
1208 .unlocked_ioctl = amdtp_ioctl,
1209 .compat_ioctl = amdtp_ioctl, /* All amdtp ioctls are compatible */
1210 .open = amdtp_open,
1211 .release = amdtp_release
1212};
1213
1214/* IEEE1394 Subsystem functions */
1215
1216static void amdtp_add_host(struct hpsb_host *host)
1217{
1218 struct amdtp_host *ah;
1219 int minor;
1220
1221 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
1222 return;
1223
1224 ah = hpsb_create_hostinfo(&amdtp_highlevel, host, sizeof(*ah));
1225 if (!ah) {
1226 HPSB_ERR("amdtp: Unable able to alloc hostinfo");
1227 return;
1228 }
1229
1230 ah->host = host;
1231 ah->ohci = host->hostdata;
1232
1233 hpsb_set_hostinfo_key(&amdtp_highlevel, host, ah->host->id);
1234
1235 minor = IEEE1394_MINOR_BLOCK_AMDTP * 16 + ah->host->id;
1236
1237 INIT_LIST_HEAD(&ah->stream_list);
1238 spin_lock_init(&ah->stream_list_lock);
1239
1240 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
1241 S_IFCHR|S_IRUSR|S_IWUSR, "amdtp/%d", ah->host->id);
1242}
1243
1244static void amdtp_remove_host(struct hpsb_host *host)
1245{
1246 struct amdtp_host *ah = hpsb_get_hostinfo(&amdtp_highlevel, host);
1247
1248 if (ah)
1249 devfs_remove("amdtp/%d", ah->host->id);
1250
1251 return;
1252}
1253
1254static struct hpsb_highlevel amdtp_highlevel = {
1255 .name = "amdtp",
1256 .add_host = amdtp_add_host,
1257 .remove_host = amdtp_remove_host,
1258};
1259
1260/* Module interface */
1261
1262MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
1263MODULE_DESCRIPTION("Driver for Audio & Music Data Transmission Protocol "
1264 "on OHCI boards.");
1265MODULE_SUPPORTED_DEVICE("amdtp");
1266MODULE_LICENSE("GPL");
1267
1268static int __init amdtp_init_module (void)
1269{
1270 cdev_init(&amdtp_cdev, &amdtp_fops);
1271 amdtp_cdev.owner = THIS_MODULE;
1272 kobject_set_name(&amdtp_cdev.kobj, "amdtp");
1273 if (cdev_add(&amdtp_cdev, IEEE1394_AMDTP_DEV, 16)) {
1274 HPSB_ERR("amdtp: unable to add char device");
1275 return -EIO;
1276 }
1277
1278 devfs_mk_dir("amdtp");
1279
1280 hpsb_register_highlevel(&amdtp_highlevel);
1281
1282 HPSB_INFO("Loaded AMDTP driver");
1283
1284 return 0;
1285}
1286
1287static void __exit amdtp_exit_module (void)
1288{
1289 hpsb_unregister_highlevel(&amdtp_highlevel);
1290 devfs_remove("amdtp");
1291 cdev_del(&amdtp_cdev);
1292
1293 HPSB_INFO("Unloaded AMDTP driver");
1294}
1295
1296module_init(amdtp_init_module);
1297module_exit(amdtp_exit_module);
diff --git a/drivers/ieee1394/amdtp.h b/drivers/ieee1394/amdtp.h
deleted file mode 100644
index 531f28e3ab50..000000000000
--- a/drivers/ieee1394/amdtp.h
+++ /dev/null
@@ -1,84 +0,0 @@
1/* -*- c-basic-offset: 8 -*- */
2
3#ifndef __AMDTP_H
4#define __AMDTP_H
5
6#include <asm/types.h>
7#include "ieee1394-ioctl.h"
8
9/* The userspace interface for the Audio & Music Data Transmission
10 * Protocol driver is really simple. First, open /dev/amdtp, use the
11 * ioctl to configure format, rate, dimension and either plug or
12 * channel, then start writing samples.
13 *
14 * The formats supported by the driver are listed below.
15 * AMDTP_FORMAT_RAW corresponds to the AM824 raw format, which can
16 * carry any number of channels, so use this if you're streaming
17 * multichannel audio. The AMDTP_FORMAT_IEC958_PCM corresponds to the
18 * AM824 IEC958 encapsulation without the IEC958 data bit set, using
19 * AMDTP_FORMAT_IEC958_AC3 will transmit the samples with the data bit
20 * set, suitable for transmitting compressed AC-3 audio.
21 *
22 * The rate field specifies the transmission rate; supported values
23 * are 32000, 44100, 48000, 88200, 96000, 176400 and 192000.
24 *
25 * The dimension field specifies the dimension of the signal, that is,
26 * the number of audio channels. Only AMDTP_FORMAT_RAW supports
27 * settings greater than 2.
28 *
29 * The mode field specifies which transmission mode to use. The AMDTP
30 * specifies two different transmission modes: blocking and
31 * non-blocking. The blocking transmission mode always send a fixed
32 * number of samples, typically 8, 16 or 32. To exactly match the
33 * transmission rate, the driver alternates between sending empty and
34 * non-empty packets. In non-blocking mode, the driver transmits as
35 * small packets as possible. For example, for a transmission rate of
36 * 44100Hz, the driver should send 5 41/80 samples in every cycle, but
37 * this is not possible so instead the driver alternates between
38 * sending 5 and 6 samples.
39 *
40 * The last thing to specify is either the isochronous channel to use
41 * or the output plug to connect to. If you know what channel the
42 * destination device will listen on, you can specify the channel
43 * directly and use the AMDTP_IOC_CHANNEL ioctl. However, if the
44 * destination device chooses the channel and uses the IEC61883-1 plug
45 * mechanism, you can specify an output plug to connect to. The
46 * driver will pick up the channel number from the plug once the
47 * destination device locks the output plug control register. In this
48 * case set the plug field and use the AMDTP_IOC_PLUG ioctl.
49 *
50 * Having configured the interface, the driver now accepts writes of
51 * regular 16 bit signed little endian samples, with the channels
52 * interleaved. For example, 4 channels would look like:
53 *
54 * | sample 0 | sample 1 ...
55 * | ch. 0 | ch. 1 | ch. 2 | ch. 3 | ch. 0 | ...
56 * | lsb | msb | lsb | msb | lsb | msb | lsb | msb | lsb | msb | ...
57 *
58 */
59
60enum {
61 AMDTP_FORMAT_RAW,
62 AMDTP_FORMAT_IEC958_PCM,
63 AMDTP_FORMAT_IEC958_AC3
64};
65
66enum {
67 AMDTP_MODE_BLOCKING,
68 AMDTP_MODE_NON_BLOCKING,
69};
70
71enum {
72 AMDTP_INPUT_LE16,
73 AMDTP_INPUT_BE16,
74};
75
76struct amdtp_ioctl {
77 __u32 format;
78 __u32 rate;
79 __u32 dimension;
80 __u32 mode;
81 union { __u32 channel; __u32 plug; } u;
82};
83
84#endif /* __AMDTP_H */
diff --git a/drivers/ieee1394/cmp.c b/drivers/ieee1394/cmp.c
deleted file mode 100644
index 69aed26e83a1..000000000000
--- a/drivers/ieee1394/cmp.c
+++ /dev/null
@@ -1,311 +0,0 @@
1/* -*- c-basic-offset: 8 -*-
2 *
3 * cmp.c - Connection Management Procedures
4 * Copyright (C) 2001 Kristian Høgsberg
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/* TODO
22 * ----
23 *
24 * - Implement IEC61883-1 output plugs and connection management.
25 * This should probably be part of the general subsystem, as it could
26 * be shared with dv1394.
27 *
28 * - Add IEC61883 unit directory when loading this module. This
29 * requires a run-time changeable config rom.
30 */
31
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/sched.h>
35#include <linux/types.h>
36#include <linux/wait.h>
37#include <linux/interrupt.h>
38
39#include "hosts.h"
40#include "highlevel.h"
41#include "ieee1394.h"
42#include "ieee1394_core.h"
43#include "cmp.h"
44
45struct plug {
46 union {
47 struct cmp_pcr pcr;
48 quadlet_t quadlet;
49 } u;
50 void (*update)(struct cmp_pcr *plug, void *data);
51 void *data;
52};
53
54struct cmp_host {
55 struct hpsb_host *host;
56
57 union {
58 struct cmp_mpr ompr;
59 quadlet_t ompr_quadlet;
60 } u;
61 struct plug opcr[2];
62
63 union {
64 struct cmp_mpr impr;
65 quadlet_t impr_quadlet;
66 } v;
67 struct plug ipcr[2];
68};
69
70enum {
71 CMP_P2P_CONNECTION,
72 CMP_BC_CONNECTION
73};
74
75#define CSR_PCR_MAP 0x900
76#define CSR_PCR_MAP_END 0x9fc
77
78static struct hpsb_highlevel cmp_highlevel;
79
80static void cmp_add_host(struct hpsb_host *host);
81static void cmp_host_reset(struct hpsb_host *host);
82static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
83 u64 addr, size_t length, u16 flags);
84static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
85 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 flags);
86
87static struct hpsb_highlevel cmp_highlevel = {
88 .name = "cmp",
89 .add_host = cmp_add_host,
90 .host_reset = cmp_host_reset,
91};
92
93static struct hpsb_address_ops pcr_ops = {
94 .read = pcr_read,
95 .lock = pcr_lock,
96};
97
98
99struct cmp_pcr *
100cmp_register_opcr(struct hpsb_host *host, int opcr_number, int payload,
101 void (*update)(struct cmp_pcr *pcr, void *data),
102 void *data)
103{
104 struct cmp_host *ch;
105 struct plug *plug;
106
107 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
108
109 if (opcr_number >= ch->u.ompr.nplugs ||
110 ch->opcr[opcr_number].update != NULL)
111 return NULL;
112
113 plug = &ch->opcr[opcr_number];
114 plug->u.pcr.online = 1;
115 plug->u.pcr.bcast_count = 0;
116 plug->u.pcr.p2p_count = 0;
117 plug->u.pcr.overhead = 0;
118 plug->u.pcr.payload = payload;
119 plug->update = update;
120 plug->data = data;
121
122 return &plug->u.pcr;
123}
124
125void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *opcr)
126{
127 struct cmp_host *ch;
128 struct plug *plug;
129
130 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
131 plug = (struct plug *)opcr;
132 if (plug - ch->opcr >= ch->u.ompr.nplugs) BUG();
133
134 plug->u.pcr.online = 0;
135 plug->update = NULL;
136}
137
138static void reset_plugs(struct cmp_host *ch)
139{
140 int i;
141
142 ch->u.ompr.non_persistent_ext = 0xff;
143 for (i = 0; i < ch->u.ompr.nplugs; i++) {
144 ch->opcr[i].u.pcr.bcast_count = 0;
145 ch->opcr[i].u.pcr.p2p_count = 0;
146 ch->opcr[i].u.pcr.overhead = 0;
147 }
148}
149
150static void cmp_add_host(struct hpsb_host *host)
151{
152 struct cmp_host *ch = hpsb_create_hostinfo(&cmp_highlevel, host, sizeof (*ch));
153
154 if (ch == NULL) {
155 HPSB_ERR("Failed to allocate cmp_host");
156 return;
157 }
158
159 hpsb_register_addrspace(&cmp_highlevel, host, &pcr_ops,
160 CSR_REGISTER_BASE + CSR_PCR_MAP,
161 CSR_REGISTER_BASE + CSR_PCR_MAP_END);
162
163 ch->host = host;
164 ch->u.ompr.rate = IEEE1394_SPEED_100;
165 ch->u.ompr.bcast_channel_base = 63;
166 ch->u.ompr.nplugs = 2;
167
168 reset_plugs(ch);
169}
170
171static void cmp_host_reset(struct hpsb_host *host)
172{
173 struct cmp_host *ch;
174
175 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
176 if (ch == NULL) {
177 HPSB_ERR("cmp: Tried to reset unknown host");
178 return;
179 }
180
181 reset_plugs(ch);
182}
183
184static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
185 u64 addr, size_t length, u16 flags)
186{
187 int csraddr = addr - CSR_REGISTER_BASE;
188 int plug;
189 struct cmp_host *ch;
190
191 if (length != 4)
192 return RCODE_TYPE_ERROR;
193
194 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
195 if (csraddr == 0x900) {
196 *buf = cpu_to_be32(ch->u.ompr_quadlet);
197 return RCODE_COMPLETE;
198 }
199 else if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
200 plug = (csraddr - 0x904) / 4;
201 *buf = cpu_to_be32(ch->opcr[plug].u.quadlet);
202 return RCODE_COMPLETE;
203 }
204 else if (csraddr < 0x980) {
205 return RCODE_ADDRESS_ERROR;
206 }
207 else if (csraddr == 0x980) {
208 *buf = cpu_to_be32(ch->v.impr_quadlet);
209 return RCODE_COMPLETE;
210 }
211 else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
212 plug = (csraddr - 0x984) / 4;
213 *buf = cpu_to_be32(ch->ipcr[plug].u.quadlet);
214 return RCODE_COMPLETE;
215 }
216 else
217 return RCODE_ADDRESS_ERROR;
218}
219
220static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
221 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 flags)
222{
223 int csraddr = addr - CSR_REGISTER_BASE;
224 int plug;
225 struct cmp_host *ch;
226
227 ch = hpsb_get_hostinfo(&cmp_highlevel, host);
228
229 if (extcode != EXTCODE_COMPARE_SWAP)
230 return RCODE_TYPE_ERROR;
231
232 if (csraddr == 0x900) {
233 /* FIXME: Ignore writes to bits 30-31 and 0-7 */
234 *store = cpu_to_be32(ch->u.ompr_quadlet);
235 if (arg == cpu_to_be32(ch->u.ompr_quadlet))
236 ch->u.ompr_quadlet = be32_to_cpu(data);
237
238 return RCODE_COMPLETE;
239 }
240 if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
241 plug = (csraddr - 0x904) / 4;
242 *store = cpu_to_be32(ch->opcr[plug].u.quadlet);
243
244 if (arg == *store)
245 ch->opcr[plug].u.quadlet = be32_to_cpu(data);
246
247 if (be32_to_cpu(*store) != ch->opcr[plug].u.quadlet &&
248 ch->opcr[plug].update != NULL)
249 ch->opcr[plug].update(&ch->opcr[plug].u.pcr,
250 ch->opcr[plug].data);
251
252 return RCODE_COMPLETE;
253 }
254 else if (csraddr < 0x980) {
255 return RCODE_ADDRESS_ERROR;
256 }
257 else if (csraddr == 0x980) {
258 /* FIXME: Ignore writes to bits 24-31 and 0-7 */
259 *store = cpu_to_be32(ch->u.ompr_quadlet);
260 if (arg == cpu_to_be32(ch->u.ompr_quadlet))
261 ch->u.ompr_quadlet = be32_to_cpu(data);
262
263 return RCODE_COMPLETE;
264 }
265 else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
266 plug = (csraddr - 0x984) / 4;
267 *store = cpu_to_be32(ch->ipcr[plug].u.quadlet);
268
269 if (arg == *store)
270 ch->ipcr[plug].u.quadlet = be32_to_cpu(data);
271
272 if (be32_to_cpu(*store) != ch->ipcr[plug].u.quadlet &&
273 ch->ipcr[plug].update != NULL)
274 ch->ipcr[plug].update(&ch->ipcr[plug].u.pcr,
275 ch->ipcr[plug].data);
276
277 return RCODE_COMPLETE;
278 }
279 else
280 return RCODE_ADDRESS_ERROR;
281}
282
283
284/* Module interface */
285
286MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
287MODULE_DESCRIPTION("Connection Management Procedures (CMP)");
288MODULE_SUPPORTED_DEVICE("cmp");
289MODULE_LICENSE("GPL");
290
291EXPORT_SYMBOL(cmp_register_opcr);
292EXPORT_SYMBOL(cmp_unregister_opcr);
293
294static int __init cmp_init_module (void)
295{
296 hpsb_register_highlevel (&cmp_highlevel);
297
298 HPSB_INFO("Loaded CMP driver");
299
300 return 0;
301}
302
303static void __exit cmp_exit_module (void)
304{
305 hpsb_unregister_highlevel(&cmp_highlevel);
306
307 HPSB_INFO("Unloaded CMP driver");
308}
309
310module_init(cmp_init_module);
311module_exit(cmp_exit_module);
diff --git a/drivers/ieee1394/cmp.h b/drivers/ieee1394/cmp.h
deleted file mode 100644
index f9288bfcd494..000000000000
--- a/drivers/ieee1394/cmp.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __CMP_H
2#define __CMP_H
3
4struct cmp_mpr {
5 u32 nplugs:5;
6 u32 reserved:3;
7 u32 persistent_ext:8;
8 u32 non_persistent_ext:8;
9 u32 bcast_channel_base:6;
10 u32 rate:2;
11} __attribute__((packed));
12
13struct cmp_pcr {
14 u32 payload:10;
15 u32 overhead:4;
16 u32 speed:2;
17 u32 channel:6;
18 u32 reserved:2;
19 u32 p2p_count:6;
20 u32 bcast_count:1;
21 u32 online:1;
22} __attribute__((packed));
23
24struct cmp_pcr *cmp_register_opcr(struct hpsb_host *host, int plug,
25 int payload,
26 void (*update)(struct cmp_pcr *plug,
27 void *data),
28 void *data);
29void cmp_unregister_opcr(struct hpsb_host *host, struct cmp_pcr *plug);
30
31#endif /* __CMP_H */