aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2010-10-09 18:12:20 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2010-10-11 08:48:03 -0400
commit66fa12c571d35e3cd62574c65f1785a460105397 (patch)
treeb4f8de3d5ca827d2b134ed628628a7bff46967ca /drivers/ieee1394
parent1ef5b816c0eaf84f91106cfc0893069c49e86113 (diff)
ieee1394: remove the old IEEE 1394 driver stack
The drivers - ohci1394 (controller driver) - ieee1394 (core) - dv1394, raw1394, video1394 (userspace ABI) - eth1394, sbp2 (protocol drivers) are replaced by - firewire-ohci (controller driver) - firewire-core (core and userspace ABI) - firewire-net, firewire-sbp2 (protocol drivers) which are more featureful, better performing, and more secure than the older drivers; all with a smaller and more modern code base. The driver firedtv in drivers/media/dvb/firewire/ contains backends to both ieee1394 and firewire-core. Its ieee1394 backend code can be removed in an independent commit; firedtv as-is builds and works fine without ieee1394. The driver pcilynx (an incomplete controller driver) is deleted without replacement since PCILynx cards are extremely rare. Owners of these cards use them with the stand-alone bus sniffer driver nosy instead. The drivers nosy and init_ohci1394_dma which do not interact with either of the two IEEE 1394 stacks are not affected by the ieee1394 subsystem removal. There are still some issues with the newer firewire subsystem compared to the older one: - The rare and quirky controllers ALi M52xx, Apple UniNorth v1, NVIDIA NForce2 are even less well supported by firewire-ohci than by ohci1394. I am looking into the M52xx issue. - The experimental firewire-net is reportedly less stable than its experimental cousin eth1394. - Audio playback of a certain group of audio devices (ones based on DICE chipset with EAP; supported by prerelease FFADO code) does not work yet. This issue is still under investigation. - There were some ieee1394 based out-of-the-mainline drivers. Of them, only lisight, an audio driver for iSight webcams, seems still useful. Work is underway to reimplement it on top of firewire-core. All these remainig issues are minor; they should not stand in the way of overall better user experience of IEEE 1394 on Linux, together with a reduction in support efforts and maintenance burden. The coexistence of two IEEE 1394 kernel driver stacks in the mainline since 2.6.22 shall end now, as announced earlier this year. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r--drivers/ieee1394/Kconfig182
-rw-r--r--drivers/ieee1394/Makefile16
-rw-r--r--drivers/ieee1394/config_roms.c194
-rw-r--r--drivers/ieee1394/config_roms.h19
-rw-r--r--drivers/ieee1394/csr.c843
-rw-r--r--drivers/ieee1394/csr.h99
-rw-r--r--drivers/ieee1394/csr1212.c1467
-rw-r--r--drivers/ieee1394/csr1212.h383
-rw-r--r--drivers/ieee1394/dma.c289
-rw-r--r--drivers/ieee1394/dma.h89
-rw-r--r--drivers/ieee1394/dv1394-private.h587
-rw-r--r--drivers/ieee1394/dv1394.c2584
-rw-r--r--drivers/ieee1394/dv1394.h305
-rw-r--r--drivers/ieee1394/eth1394.c1736
-rw-r--r--drivers/ieee1394/eth1394.h234
-rw-r--r--drivers/ieee1394/highlevel.c691
-rw-r--r--drivers/ieee1394/highlevel.h141
-rw-r--r--drivers/ieee1394/hosts.c249
-rw-r--r--drivers/ieee1394/hosts.h201
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h106
-rw-r--r--drivers/ieee1394/ieee1394.h220
-rw-r--r--drivers/ieee1394/ieee1394_core.c1380
-rw-r--r--drivers/ieee1394/ieee1394_core.h172
-rw-r--r--drivers/ieee1394/ieee1394_hotplug.h19
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c595
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h40
-rw-r--r--drivers/ieee1394/ieee1394_types.h69
-rw-r--r--drivers/ieee1394/iso.c568
-rw-r--r--drivers/ieee1394/iso.h195
-rw-r--r--drivers/ieee1394/nodemgr.c1901
-rw-r--r--drivers/ieee1394/nodemgr.h186
-rw-r--r--drivers/ieee1394/ohci1394.c3590
-rw-r--r--drivers/ieee1394/ohci1394.h453
-rw-r--r--drivers/ieee1394/pcilynx.c1554
-rw-r--r--drivers/ieee1394/pcilynx.h468
-rw-r--r--drivers/ieee1394/raw1394-private.h81
-rw-r--r--drivers/ieee1394/raw1394.c3096
-rw-r--r--drivers/ieee1394/raw1394.h191
-rw-r--r--drivers/ieee1394/sbp2.c2138
-rw-r--r--drivers/ieee1394/sbp2.h346
-rw-r--r--drivers/ieee1394/video1394.c1528
-rw-r--r--drivers/ieee1394/video1394.h67
42 files changed, 0 insertions, 29272 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
deleted file mode 100644
index e02096cf7d95..000000000000
--- a/drivers/ieee1394/Kconfig
+++ /dev/null
@@ -1,182 +0,0 @@
1config IEEE1394
2 tristate "Legacy alternative FireWire driver stack"
3 depends on PCI || BROKEN
4 help
5 IEEE 1394 describes a high performance serial bus, which is also
6 known as FireWire(tm) or i.Link(tm) and is used for connecting all
7 sorts of devices (most notably digital video cameras) to your
8 computer.
9
10 If you have FireWire hardware and want to use it, say Y here. This
11 is the core support only, you will also need to select a driver for
12 your IEEE 1394 adapter.
13
14 To compile this driver as a module, say M here: the module will be
15 called ieee1394.
16
17 NOTE:
18 ieee1394 is superseded by the newer firewire-core driver. See
19 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
20 further information on how to switch to the new FireWire drivers.
21
22config IEEE1394_OHCI1394
23 tristate "OHCI-1394 controllers"
24 depends on PCI && IEEE1394
25 help
26 Enable this driver if you have an IEEE 1394 controller based on the
27 OHCI-1394 specification. The current driver is only tested with OHCI
28 chipsets made by Texas Instruments and NEC. Most third-party vendors
29 use one of these chipsets. It should work with any OHCI-1394
30 compliant card, however.
31
32 To compile this driver as a module, say M here: the module will be
33 called ohci1394.
34
35 NOTE:
36 ohci1394 is superseded by the newer firewire-ohci driver. See
37 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
38 further information on how to switch to the new FireWire drivers.
39
40 If you want to install firewire-ohci and ohci1394 together, you
41 should configure them only as modules and blacklist the driver(s)
42 which you don't want to have auto-loaded. Add either
43
44 blacklist ohci1394
45 blacklist video1394
46 blacklist dv1394
47 or
48 blacklist firewire-ohci
49
50 to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
51 depending on your distribution.
52
53comment "PCILynx controller requires I2C"
54 depends on IEEE1394 && I2C=n
55
56config IEEE1394_PCILYNX
57 tristate "PCILynx controller"
58 depends on PCI && IEEE1394 && I2C
59 select I2C_ALGOBIT
60 help
61 Say Y here if you have an IEEE-1394 controller with the Texas
62 Instruments PCILynx chip. Note: this driver is written for revision
63 2 of this chip and may not work with revision 0.
64
65 To compile this driver as a module, say M here: the module will be
66 called pcilynx.
67
68 Only some old and now very rare PCI and CardBus cards and
69 PowerMacs G3 B&W contain the PCILynx controller. Therefore
70 almost everybody can say N here.
71
72comment "SBP-2 support (for storage devices) requires SCSI"
73 depends on IEEE1394 && SCSI=n
74
75config IEEE1394_SBP2
76 tristate "Storage devices (SBP-2 protocol)"
77 depends on IEEE1394 && SCSI
78 help
79 This option enables you to use SBP-2 devices connected to an IEEE
80 1394 bus. SBP-2 devices include storage devices like harddisks and
81 DVD drives, also some other FireWire devices like scanners.
82
83 You should also enable support for disks, CD-ROMs, etc. in the SCSI
84 configuration section.
85
86 To compile this driver as a module, say M here: the module will be
87 called sbp2.
88
89 NOTE:
90 sbp2 is superseded by the newer firewire-sbp2 driver. See
91 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
92 further information on how to switch to the new FireWire drivers.
93
94config IEEE1394_SBP2_PHYS_DMA
95 bool "Enable replacement for physical DMA in SBP2"
96 depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL
97 help
98 This builds sbp2 for use with non-OHCI host adapters which do not
99 support physical DMA or for when ohci1394 is run with phys_dma=0.
100 Physical DMA is data movement without assistance of the drivers'
101 interrupt handlers. This option includes the interrupt handlers
102 that are required in absence of this hardware feature.
103
104 This option is buggy and currently broken on some architectures.
105 If unsure, say N.
106
107config IEEE1394_ETH1394_ROM_ENTRY
108 depends on IEEE1394
109 bool
110 default n
111
112config IEEE1394_ETH1394
113 tristate "IP networking over 1394 (experimental)"
114 depends on IEEE1394 && EXPERIMENTAL && INET
115 select IEEE1394_ETH1394_ROM_ENTRY
116 help
117 This driver implements a functional majority of RFC 2734: IPv4 over
118 1394. It will provide IP connectivity with implementations of RFC
119 2734 found on other operating systems. It will not communicate with
120 older versions of this driver found in stock kernels prior to 2.6.3.
121 This driver is still considered experimental. It does not yet support
122 MCAP, therefore multicast support is significantly limited.
123
124 The module is called eth1394 although it does not emulate Ethernet.
125
126 NOTE:
127 eth1394 is superseded by the newer firewire-net driver. See
128 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
129 further information on how to switch to the new FireWire drivers.
130
131config IEEE1394_RAWIO
132 tristate "raw1394 userspace interface"
133 depends on IEEE1394
134 help
135 This option adds support for the raw1394 device file which enables
136 direct communication of user programs with IEEE 1394 devices
137 (isochronous and asynchronous). Almost all application programs
138 which access FireWire require this option.
139
140 To compile this driver as a module, say M here: the module will be
141 called raw1394.
142
143 NOTE:
144 raw1394 is superseded by the newer firewire-core driver. See
145 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
146 further information on how to switch to the new FireWire drivers.
147
148config IEEE1394_VIDEO1394
149 tristate "video1394 userspace interface"
150 depends on IEEE1394 && IEEE1394_OHCI1394
151 help
152 This option adds support for the video1394 device files which enable
153 isochronous communication of user programs with IEEE 1394 devices,
154 especially video capture or export. This interface is used by all
155 libdc1394 based programs and by several other programs, in addition to
156 the raw1394 interface. It is generally not required for DV capture.
157
158 To compile this driver as a module, say M here: the module will be
159 called video1394.
160
161 NOTE:
162 video1394 is superseded by the newer firewire-core driver. See
163 http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
164 further information on how to switch to the new FireWire drivers.
165
166config IEEE1394_DV1394
167 tristate "dv1394 userspace interface (deprecated)"
168 depends on IEEE1394 && IEEE1394_OHCI1394
169 help
170 The dv1394 driver is unsupported and may be removed from Linux in a
171 future release. Its functionality is now provided by either
172 raw1394 or firewire-core together with libraries such as libiec61883.
173
174config IEEE1394_VERBOSEDEBUG
175 bool "Excessive debugging output"
176 depends on IEEE1394
177 help
178 If you say Y here, you will get very verbose debugging logs from the
179 ieee1394 drivers, including sent and received packet headers. This
180 will quickly result in large amounts of data sent to the system log.
181
182 Say Y if you really need the debugging output. Everyone else says N.
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
deleted file mode 100644
index 427b86b87760..000000000000
--- a/drivers/ieee1394/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
1#
2# Makefile for the Linux IEEE 1394 implementation
3#
4
5ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
6 highlevel.o csr.o nodemgr.o dma.o iso.o \
7 csr1212.o config_roms.o
8
9obj-$(CONFIG_IEEE1394) += ieee1394.o
10obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
11obj-$(CONFIG_IEEE1394_OHCI1394) += ohci1394.o
12obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
13obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
14obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
diff --git a/drivers/ieee1394/config_roms.c b/drivers/ieee1394/config_roms.c
deleted file mode 100644
index 1b981207fa76..000000000000
--- a/drivers/ieee1394/config_roms.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * ConfigROM entries
5 *
6 * Copyright (C) 2004 Ben Collins
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#include <linux/types.h>
13
14#include "csr1212.h"
15#include "ieee1394.h"
16#include "ieee1394_types.h"
17#include "hosts.h"
18#include "ieee1394_core.h"
19#include "highlevel.h"
20#include "csr.h"
21#include "config_roms.h"
22
23struct hpsb_config_rom_entry {
24 const char *name;
25
26 /* Base initialization, called at module load */
27 int (*init)(void);
28
29 /* Cleanup called at module exit */
30 void (*cleanup)(void);
31
32 /* The flag added to host->config_roms */
33 unsigned int flag;
34};
35
36/* The default host entry. This must succeed. */
37int hpsb_default_host_entry(struct hpsb_host *host)
38{
39 struct csr1212_keyval *root;
40 struct csr1212_keyval *vend_id = NULL;
41 struct csr1212_keyval *text = NULL;
42 char csr_name[128];
43 int ret;
44
45 sprintf(csr_name, "Linux - %s", host->driver->name);
46 root = host->csr.rom->root_kv;
47
48 vend_id = csr1212_new_immediate(CSR1212_KV_ID_VENDOR, host->csr.guid_hi >> 8);
49 text = csr1212_new_string_descriptor_leaf(csr_name);
50
51 if (!vend_id || !text) {
52 if (vend_id)
53 csr1212_release_keyval(vend_id);
54 if (text)
55 csr1212_release_keyval(text);
56 csr1212_destroy_csr(host->csr.rom);
57 return -ENOMEM;
58 }
59
60 csr1212_associate_keyval(vend_id, text);
61 csr1212_release_keyval(text);
62 ret = csr1212_attach_keyval_to_directory(root, vend_id);
63 csr1212_release_keyval(vend_id);
64 if (ret != CSR1212_SUCCESS) {
65 csr1212_destroy_csr(host->csr.rom);
66 return -ENOMEM;
67 }
68
69 host->update_config_rom = 1;
70
71 return 0;
72}
73
74
75#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
76#include "eth1394.h"
77
78static struct csr1212_keyval *ip1394_ud;
79
80static int config_rom_ip1394_init(void)
81{
82 struct csr1212_keyval *spec_id = NULL;
83 struct csr1212_keyval *spec_desc = NULL;
84 struct csr1212_keyval *ver = NULL;
85 struct csr1212_keyval *ver_desc = NULL;
86 int ret = -ENOMEM;
87
88 ip1394_ud = csr1212_new_directory(CSR1212_KV_ID_UNIT);
89
90 spec_id = csr1212_new_immediate(CSR1212_KV_ID_SPECIFIER_ID,
91 ETHER1394_GASP_SPECIFIER_ID);
92 spec_desc = csr1212_new_string_descriptor_leaf("IANA");
93 ver = csr1212_new_immediate(CSR1212_KV_ID_VERSION,
94 ETHER1394_GASP_VERSION);
95 ver_desc = csr1212_new_string_descriptor_leaf("IPv4");
96
97 if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
98 goto ip1394_fail;
99
100 csr1212_associate_keyval(spec_id, spec_desc);
101 csr1212_associate_keyval(ver, ver_desc);
102 if (csr1212_attach_keyval_to_directory(ip1394_ud, spec_id)
103 == CSR1212_SUCCESS &&
104 csr1212_attach_keyval_to_directory(ip1394_ud, ver)
105 == CSR1212_SUCCESS)
106 ret = 0;
107
108ip1394_fail:
109 if (ret && ip1394_ud) {
110 csr1212_release_keyval(ip1394_ud);
111 ip1394_ud = NULL;
112 }
113
114 if (spec_id)
115 csr1212_release_keyval(spec_id);
116 if (spec_desc)
117 csr1212_release_keyval(spec_desc);
118 if (ver)
119 csr1212_release_keyval(ver);
120 if (ver_desc)
121 csr1212_release_keyval(ver_desc);
122
123 return ret;
124}
125
126static void config_rom_ip1394_cleanup(void)
127{
128 if (ip1394_ud) {
129 csr1212_release_keyval(ip1394_ud);
130 ip1394_ud = NULL;
131 }
132}
133
134int hpsb_config_rom_ip1394_add(struct hpsb_host *host)
135{
136 if (!ip1394_ud)
137 return -ENODEV;
138
139 if (csr1212_attach_keyval_to_directory(host->csr.rom->root_kv,
140 ip1394_ud) != CSR1212_SUCCESS)
141 return -ENOMEM;
142
143 host->config_roms |= HPSB_CONFIG_ROM_ENTRY_IP1394;
144 host->update_config_rom = 1;
145 return 0;
146}
147EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_add);
148
149void hpsb_config_rom_ip1394_remove(struct hpsb_host *host)
150{
151 csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
152 host->config_roms &= ~HPSB_CONFIG_ROM_ENTRY_IP1394;
153 host->update_config_rom = 1;
154}
155EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_remove);
156
157static struct hpsb_config_rom_entry ip1394_entry = {
158 .name = "ip1394",
159 .init = config_rom_ip1394_init,
160 .cleanup = config_rom_ip1394_cleanup,
161 .flag = HPSB_CONFIG_ROM_ENTRY_IP1394,
162};
163
164#endif /* CONFIG_IEEE1394_ETH1394_ROM_ENTRY */
165
166static struct hpsb_config_rom_entry *const config_rom_entries[] = {
167#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
168 &ip1394_entry,
169#endif
170};
171
172/* Initialize all config roms */
173int hpsb_init_config_roms(void)
174{
175 int i, error = 0;
176
177 for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
178 if (config_rom_entries[i]->init()) {
179 HPSB_ERR("Failed to initialize config rom entry `%s'",
180 config_rom_entries[i]->name);
181 error = -1;
182 }
183
184 return error;
185}
186
187/* Cleanup all config roms */
188void hpsb_cleanup_config_roms(void)
189{
190 int i;
191
192 for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
193 config_rom_entries[i]->cleanup();
194}
diff --git a/drivers/ieee1394/config_roms.h b/drivers/ieee1394/config_roms.h
deleted file mode 100644
index 1f5cd1f16c44..000000000000
--- a/drivers/ieee1394/config_roms.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _IEEE1394_CONFIG_ROMS_H
2#define _IEEE1394_CONFIG_ROMS_H
3
4struct hpsb_host;
5
6int hpsb_default_host_entry(struct hpsb_host *host);
7int hpsb_init_config_roms(void);
8void hpsb_cleanup_config_roms(void);
9
10/* List of flags to check if a host contains a certain extra config rom
11 * entry. Available in the host->config_roms member. */
12#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001
13
14#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
15int hpsb_config_rom_ip1394_add(struct hpsb_host *host);
16void hpsb_config_rom_ip1394_remove(struct hpsb_host *host);
17#endif
18
19#endif /* _IEEE1394_CONFIG_ROMS_H */
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
deleted file mode 100644
index d696f69ebce5..000000000000
--- a/drivers/ieee1394/csr.c
+++ /dev/null
@@ -1,843 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * CSR implementation, iso/bus manager implementation.
5 *
6 * Copyright (C) 1999 Andreas E. Bombe
7 * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
8 *
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
11 *
12 *
13 * Contributions:
14 *
15 * Manfred Weihs <weihs@ict.tuwien.ac.at>
16 * configuration ROM manipulation
17 *
18 */
19
20#include <linux/jiffies.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/param.h>
25#include <linux/spinlock.h>
26#include <linux/string.h>
27
28#include "csr1212.h"
29#include "ieee1394_types.h"
30#include "hosts.h"
31#include "ieee1394.h"
32#include "highlevel.h"
33#include "ieee1394_core.h"
34
35/* Module Parameters */
36/* this module parameter can be used to disable mapping of the FCP registers */
37
38static int fcp = 1;
39module_param(fcp, int, 0444);
40MODULE_PARM_DESC(fcp, "Map FCP registers (default = 1, disable = 0).");
41
42static struct csr1212_keyval *node_cap = NULL;
43
44static void add_host(struct hpsb_host *host);
45static void remove_host(struct hpsb_host *host);
46static void host_reset(struct hpsb_host *host);
47static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
48 u64 addr, size_t length, u16 fl);
49static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
50 quadlet_t *data, u64 addr, size_t length, u16 flags);
51static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
52 u64 addr, size_t length, u16 flags);
53static int write_regs(struct hpsb_host *host, int nodeid, int destid,
54 quadlet_t *data, u64 addr, size_t length, u16 flags);
55static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
56 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl);
57static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
58 u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl);
59static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
60 u64 addr, size_t length, u16 fl);
61static u64 allocate_addr_range(u64 size, u32 alignment, void *__host);
62static void release_addr_range(u64 addr, void *__host);
63
64static struct hpsb_highlevel csr_highlevel = {
65 .name = "standard registers",
66 .add_host = add_host,
67 .remove_host = remove_host,
68 .host_reset = host_reset,
69};
70
71static const struct hpsb_address_ops map_ops = {
72 .read = read_maps,
73};
74
75static const struct hpsb_address_ops fcp_ops = {
76 .write = write_fcp,
77};
78
79static const struct hpsb_address_ops reg_ops = {
80 .read = read_regs,
81 .write = write_regs,
82 .lock = lock_regs,
83 .lock64 = lock64_regs,
84};
85
86static const struct hpsb_address_ops config_rom_ops = {
87 .read = read_config_rom,
88};
89
90struct csr1212_bus_ops csr_bus_ops = {
91 .allocate_addr_range = allocate_addr_range,
92 .release_addr = release_addr_range,
93};
94
95
96static u16 csr_crc16(unsigned *data, int length)
97{
98 int check=0, i;
99 int shift, sum, next=0;
100
101 for (i = length; i; i--) {
102 for (next = check, shift = 28; shift >= 0; shift -= 4 ) {
103 sum = ((next >> 12) ^ (be32_to_cpu(*data) >> shift)) & 0xf;
104 next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
105 }
106 check = next & 0xffff;
107 data++;
108 }
109
110 return check;
111}
112
113static void host_reset(struct hpsb_host *host)
114{
115 host->csr.state &= 0x300;
116
117 host->csr.bus_manager_id = 0x3f;
118 host->csr.bandwidth_available = 4915;
119 host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
120 host->csr.channels_available_lo = ~0;
121 host->csr.broadcast_channel = 0x80000000 | 31;
122
123 if (host->is_irm) {
124 if (host->driver->hw_csr_reg) {
125 host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
126 }
127 }
128
129 host->csr.node_ids = host->node_id << 16;
130
131 if (!host->is_root) {
132 /* clear cmstr bit */
133 host->csr.state &= ~0x100;
134 }
135
136 be32_add_cpu(&host->csr.topology_map[1], 1);
137 host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
138 | host->selfid_count);
139 host->csr.topology_map[0] =
140 cpu_to_be32((host->selfid_count + 2) << 16
141 | csr_crc16(host->csr.topology_map + 1,
142 host->selfid_count + 2));
143
144 be32_add_cpu(&host->csr.speed_map[1], 1);
145 host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
146 | csr_crc16(host->csr.speed_map+1,
147 0x3f1));
148}
149
150/*
151 * HI == seconds (bits 0:2)
152 * LO == fractions of a second in units of 125usec (bits 19:31)
153 *
154 * Convert SPLIT_TIMEOUT to jiffies.
155 * The default and minimum as per 1394a-2000 clause 8.3.2.2.6 is 100ms.
156 */
157static inline void calculate_expire(struct csr_control *csr)
158{
159 unsigned int usecs = (csr->split_timeout_hi & 7) * 1000000 +
160 (csr->split_timeout_lo >> 19) * 125;
161
162 csr->expire = usecs_to_jiffies(usecs > 100000 ? usecs : 100000);
163 HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ);
164}
165
166
167static void add_host(struct hpsb_host *host)
168{
169 struct csr1212_keyval *root;
170 quadlet_t bus_info[CSR_BUS_INFO_SIZE];
171
172 hpsb_register_addrspace(&csr_highlevel, host, &reg_ops,
173 CSR_REGISTER_BASE,
174 CSR_REGISTER_BASE + CSR_CONFIG_ROM);
175 hpsb_register_addrspace(&csr_highlevel, host, &config_rom_ops,
176 CSR_REGISTER_BASE + CSR_CONFIG_ROM,
177 CSR_REGISTER_BASE + CSR_CONFIG_ROM_END);
178 if (fcp) {
179 hpsb_register_addrspace(&csr_highlevel, host, &fcp_ops,
180 CSR_REGISTER_BASE + CSR_FCP_COMMAND,
181 CSR_REGISTER_BASE + CSR_FCP_END);
182 }
183 hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
184 CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP,
185 CSR_REGISTER_BASE + CSR_TOPOLOGY_MAP_END);
186 hpsb_register_addrspace(&csr_highlevel, host, &map_ops,
187 CSR_REGISTER_BASE + CSR_SPEED_MAP,
188 CSR_REGISTER_BASE + CSR_SPEED_MAP_END);
189
190 spin_lock_init(&host->csr.lock);
191
192 host->csr.state = 0;
193 host->csr.node_ids = 0;
194 host->csr.split_timeout_hi = 0;
195 host->csr.split_timeout_lo = 800 << 19;
196 calculate_expire(&host->csr);
197 host->csr.cycle_time = 0;
198 host->csr.bus_time = 0;
199 host->csr.bus_manager_id = 0x3f;
200 host->csr.bandwidth_available = 4915;
201 host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
202 host->csr.channels_available_lo = ~0;
203 host->csr.broadcast_channel = 0x80000000 | 31;
204
205 if (host->is_irm) {
206 if (host->driver->hw_csr_reg) {
207 host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
208 }
209 }
210
211 if (host->csr.max_rec >= 9)
212 host->csr.max_rom = 2;
213 else if (host->csr.max_rec >= 5)
214 host->csr.max_rom = 1;
215 else
216 host->csr.max_rom = 0;
217
218 host->csr.generation = 2;
219
220 bus_info[1] = IEEE1394_BUSID_MAGIC;
221 bus_info[2] = cpu_to_be32((hpsb_disable_irm ? 0 : 1 << CSR_IRMC_SHIFT) |
222 (1 << CSR_CMC_SHIFT) |
223 (1 << CSR_ISC_SHIFT) |
224 (0 << CSR_BMC_SHIFT) |
225 (0 << CSR_PMC_SHIFT) |
226 (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
227 (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
228 (host->csr.max_rom << CSR_MAX_ROM_SHIFT) |
229 (host->csr.generation << CSR_GENERATION_SHIFT) |
230 host->csr.lnk_spd);
231
232 bus_info[3] = cpu_to_be32(host->csr.guid_hi);
233 bus_info[4] = cpu_to_be32(host->csr.guid_lo);
234
235 /* The hardware copy of the bus info block will be set later when a
236 * bus reset is issued. */
237
238 csr1212_init_local_csr(host->csr.rom, bus_info, host->csr.max_rom);
239
240 root = host->csr.rom->root_kv;
241
242 if(csr1212_attach_keyval_to_directory(root, node_cap) != CSR1212_SUCCESS) {
243 HPSB_ERR("Failed to attach Node Capabilities to root directory");
244 }
245
246 host->update_config_rom = 1;
247}
248
249static void remove_host(struct hpsb_host *host)
250{
251 quadlet_t bus_info[CSR_BUS_INFO_SIZE];
252
253 bus_info[1] = IEEE1394_BUSID_MAGIC;
254 bus_info[2] = cpu_to_be32((0 << CSR_IRMC_SHIFT) |
255 (0 << CSR_CMC_SHIFT) |
256 (0 << CSR_ISC_SHIFT) |
257 (0 << CSR_BMC_SHIFT) |
258 (0 << CSR_PMC_SHIFT) |
259 (host->csr.cyc_clk_acc << CSR_CYC_CLK_ACC_SHIFT) |
260 (host->csr.max_rec << CSR_MAX_REC_SHIFT) |
261 (0 << CSR_MAX_ROM_SHIFT) |
262 (0 << CSR_GENERATION_SHIFT) |
263 host->csr.lnk_spd);
264
265 bus_info[3] = cpu_to_be32(host->csr.guid_hi);
266 bus_info[4] = cpu_to_be32(host->csr.guid_lo);
267
268 csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, node_cap);
269
270 csr1212_init_local_csr(host->csr.rom, bus_info, 0);
271 host->update_config_rom = 1;
272}
273
274
275int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
276 size_t buffersize, unsigned char rom_version)
277{
278 unsigned long flags;
279 int ret;
280
281 HPSB_NOTICE("hpsb_update_config_rom() is deprecated");
282
283 spin_lock_irqsave(&host->csr.lock, flags);
284 if (rom_version != host->csr.generation)
285 ret = -1;
286 else if (buffersize > host->csr.rom->cache_head->size)
287 ret = -2;
288 else {
289 /* Just overwrite the generated ConfigROM image with new data,
290 * it can be regenerated later. */
291 memcpy(host->csr.rom->cache_head->data, new_rom, buffersize);
292 host->csr.rom->cache_head->len = buffersize;
293
294 if (host->driver->set_hw_config_rom)
295 host->driver->set_hw_config_rom(host, host->csr.rom->bus_info_data);
296 /* Increment the generation number to keep some sort of sync
297 * with the newer ConfigROM manipulation method. */
298 host->csr.generation++;
299 if (host->csr.generation > 0xf || host->csr.generation < 2)
300 host->csr.generation = 2;
301 ret=0;
302 }
303 spin_unlock_irqrestore(&host->csr.lock, flags);
304 return ret;
305}
306
307
308/* Read topology / speed maps and configuration ROM */
309static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
310 u64 addr, size_t length, u16 fl)
311{
312 unsigned long flags;
313 int csraddr = addr - CSR_REGISTER_BASE;
314 const char *src;
315
316 spin_lock_irqsave(&host->csr.lock, flags);
317
318 if (csraddr < CSR_SPEED_MAP) {
319 src = ((char *)host->csr.topology_map) + csraddr
320 - CSR_TOPOLOGY_MAP;
321 } else {
322 src = ((char *)host->csr.speed_map) + csraddr - CSR_SPEED_MAP;
323 }
324
325 memcpy(buffer, src, length);
326 spin_unlock_irqrestore(&host->csr.lock, flags);
327 return RCODE_COMPLETE;
328}
329
330
331#define out if (--length == 0) break
332
333static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
334 u64 addr, size_t length, u16 flags)
335{
336 int csraddr = addr - CSR_REGISTER_BASE;
337 int oldcycle;
338 quadlet_t ret;
339
340 if ((csraddr | length) & 0x3)
341 return RCODE_TYPE_ERROR;
342
343 length /= 4;
344
345 switch (csraddr) {
346 case CSR_STATE_CLEAR:
347 *(buf++) = cpu_to_be32(host->csr.state);
348 out;
349 case CSR_STATE_SET:
350 *(buf++) = cpu_to_be32(host->csr.state);
351 out;
352 case CSR_NODE_IDS:
353 *(buf++) = cpu_to_be32(host->csr.node_ids);
354 out;
355
356 case CSR_RESET_START:
357 return RCODE_TYPE_ERROR;
358
359 /* address gap - handled by default below */
360
361 case CSR_SPLIT_TIMEOUT_HI:
362 *(buf++) = cpu_to_be32(host->csr.split_timeout_hi);
363 out;
364 case CSR_SPLIT_TIMEOUT_LO:
365 *(buf++) = cpu_to_be32(host->csr.split_timeout_lo);
366 out;
367
368 /* address gap */
369 return RCODE_ADDRESS_ERROR;
370
371 case CSR_CYCLE_TIME:
372 oldcycle = host->csr.cycle_time;
373 host->csr.cycle_time =
374 host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
375
376 if (oldcycle > host->csr.cycle_time) {
377 /* cycle time wrapped around */
378 host->csr.bus_time += 1 << 7;
379 }
380 *(buf++) = cpu_to_be32(host->csr.cycle_time);
381 out;
382 case CSR_BUS_TIME:
383 oldcycle = host->csr.cycle_time;
384 host->csr.cycle_time =
385 host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
386
387 if (oldcycle > host->csr.cycle_time) {
388 /* cycle time wrapped around */
389 host->csr.bus_time += (1 << 7);
390 }
391 *(buf++) = cpu_to_be32(host->csr.bus_time
392 | (host->csr.cycle_time >> 25));
393 out;
394
395 /* address gap */
396 return RCODE_ADDRESS_ERROR;
397
398 case CSR_BUSY_TIMEOUT:
399 /* not yet implemented */
400 return RCODE_ADDRESS_ERROR;
401
402 case CSR_BUS_MANAGER_ID:
403 if (host->driver->hw_csr_reg)
404 ret = host->driver->hw_csr_reg(host, 0, 0, 0);
405 else
406 ret = host->csr.bus_manager_id;
407
408 *(buf++) = cpu_to_be32(ret);
409 out;
410 case CSR_BANDWIDTH_AVAILABLE:
411 if (host->driver->hw_csr_reg)
412 ret = host->driver->hw_csr_reg(host, 1, 0, 0);
413 else
414 ret = host->csr.bandwidth_available;
415
416 *(buf++) = cpu_to_be32(ret);
417 out;
418 case CSR_CHANNELS_AVAILABLE_HI:
419 if (host->driver->hw_csr_reg)
420 ret = host->driver->hw_csr_reg(host, 2, 0, 0);
421 else
422 ret = host->csr.channels_available_hi;
423
424 *(buf++) = cpu_to_be32(ret);
425 out;
426 case CSR_CHANNELS_AVAILABLE_LO:
427 if (host->driver->hw_csr_reg)
428 ret = host->driver->hw_csr_reg(host, 3, 0, 0);
429 else
430 ret = host->csr.channels_available_lo;
431
432 *(buf++) = cpu_to_be32(ret);
433 out;
434
435 case CSR_BROADCAST_CHANNEL:
436 *(buf++) = cpu_to_be32(host->csr.broadcast_channel);
437 out;
438
439 /* address gap to end - fall through to default */
440 default:
441 return RCODE_ADDRESS_ERROR;
442 }
443
444 return RCODE_COMPLETE;
445}
446
447static int write_regs(struct hpsb_host *host, int nodeid, int destid,
448 quadlet_t *data, u64 addr, size_t length, u16 flags)
449{
450 int csraddr = addr - CSR_REGISTER_BASE;
451
452 if ((csraddr | length) & 0x3)
453 return RCODE_TYPE_ERROR;
454
455 length /= 4;
456
457 switch (csraddr) {
458 case CSR_STATE_CLEAR:
459 /* FIXME FIXME FIXME */
460 printk("doh, someone wants to mess with state clear\n");
461 out;
462 case CSR_STATE_SET:
463 printk("doh, someone wants to mess with state set\n");
464 out;
465
466 case CSR_NODE_IDS:
467 host->csr.node_ids &= NODE_MASK << 16;
468 host->csr.node_ids |= be32_to_cpu(*(data++)) & (BUS_MASK << 16);
469 host->node_id = host->csr.node_ids >> 16;
470 host->driver->devctl(host, SET_BUS_ID, host->node_id >> 6);
471 out;
472
473 case CSR_RESET_START:
474 /* FIXME - perform command reset */
475 out;
476
477 /* address gap */
478 return RCODE_ADDRESS_ERROR;
479
480 case CSR_SPLIT_TIMEOUT_HI:
481 host->csr.split_timeout_hi =
482 be32_to_cpu(*(data++)) & 0x00000007;
483 calculate_expire(&host->csr);
484 out;
485 case CSR_SPLIT_TIMEOUT_LO:
486 host->csr.split_timeout_lo =
487 be32_to_cpu(*(data++)) & 0xfff80000;
488 calculate_expire(&host->csr);
489 out;
490
491 /* address gap */
492 return RCODE_ADDRESS_ERROR;
493
494 case CSR_CYCLE_TIME:
495 /* should only be set by cycle start packet, automatically */
496 host->csr.cycle_time = be32_to_cpu(*data);
497 host->driver->devctl(host, SET_CYCLE_COUNTER,
498 be32_to_cpu(*(data++)));
499 out;
500 case CSR_BUS_TIME:
501 host->csr.bus_time = be32_to_cpu(*(data++)) & 0xffffff80;
502 out;
503
504 /* address gap */
505 return RCODE_ADDRESS_ERROR;
506
507 case CSR_BUSY_TIMEOUT:
508 /* not yet implemented */
509 return RCODE_ADDRESS_ERROR;
510
511 case CSR_BUS_MANAGER_ID:
512 case CSR_BANDWIDTH_AVAILABLE:
513 case CSR_CHANNELS_AVAILABLE_HI:
514 case CSR_CHANNELS_AVAILABLE_LO:
515 /* these are not writable, only lockable */
516 return RCODE_TYPE_ERROR;
517
518 case CSR_BROADCAST_CHANNEL:
519 /* only the valid bit can be written */
520 host->csr.broadcast_channel = (host->csr.broadcast_channel & ~0x40000000)
521 | (be32_to_cpu(*data) & 0x40000000);
522 out;
523
524 /* address gap to end - fall through */
525 default:
526 return RCODE_ADDRESS_ERROR;
527 }
528
529 return RCODE_COMPLETE;
530}
531
532#undef out
533
534
535static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
536 u64 addr, quadlet_t data, quadlet_t arg, int extcode, u16 fl)
537{
538 int csraddr = addr - CSR_REGISTER_BASE;
539 unsigned long flags;
540 quadlet_t *regptr = NULL;
541
542 if (csraddr & 0x3)
543 return RCODE_TYPE_ERROR;
544
545 if (csraddr < CSR_BUS_MANAGER_ID || csraddr > CSR_CHANNELS_AVAILABLE_LO
546 || extcode != EXTCODE_COMPARE_SWAP)
547 goto unsupported_lockreq;
548
549 data = be32_to_cpu(data);
550 arg = be32_to_cpu(arg);
551
552 /* Is somebody releasing the broadcast_channel on us? */
553 if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x1)) {
554 /* Note: this is may not be the right way to handle
555 * the problem, so we should look into the proper way
556 * eventually. */
557 HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
558 "broadcast channel 31. Ignoring.",
559 NODE_BUS_ARGS(host, nodeid));
560
561 data &= ~0x1; /* keep broadcast channel allocated */
562 }
563
564 if (host->driver->hw_csr_reg) {
565 quadlet_t old;
566
567 old = host->driver->
568 hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
569 data, arg);
570
571 *store = cpu_to_be32(old);
572 return RCODE_COMPLETE;
573 }
574
575 spin_lock_irqsave(&host->csr.lock, flags);
576
577 switch (csraddr) {
578 case CSR_BUS_MANAGER_ID:
579 regptr = &host->csr.bus_manager_id;
580 *store = cpu_to_be32(*regptr);
581 if (*regptr == arg)
582 *regptr = data;
583 break;
584
585 case CSR_BANDWIDTH_AVAILABLE:
586 {
587 quadlet_t bandwidth;
588 quadlet_t old;
589 quadlet_t new;
590
591 regptr = &host->csr.bandwidth_available;
592 old = *regptr;
593
594 /* bandwidth available algorithm adapted from IEEE 1394a-2000 spec */
595 if (arg > 0x1fff) {
596 *store = cpu_to_be32(old); /* change nothing */
597 break;
598 }
599 data &= 0x1fff;
600 if (arg >= data) {
601 /* allocate bandwidth */
602 bandwidth = arg - data;
603 if (old >= bandwidth) {
604 new = old - bandwidth;
605 *store = cpu_to_be32(arg);
606 *regptr = new;
607 } else {
608 *store = cpu_to_be32(old);
609 }
610 } else {
611 /* deallocate bandwidth */
612 bandwidth = data - arg;
613 if (old + bandwidth < 0x2000) {
614 new = old + bandwidth;
615 *store = cpu_to_be32(arg);
616 *regptr = new;
617 } else {
618 *store = cpu_to_be32(old);
619 }
620 }
621 break;
622 }
623
624 case CSR_CHANNELS_AVAILABLE_HI:
625 {
626 /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
627 quadlet_t affected_channels = arg ^ data;
628
629 regptr = &host->csr.channels_available_hi;
630
631 if ((arg & affected_channels) == (*regptr & affected_channels)) {
632 *regptr ^= affected_channels;
633 *store = cpu_to_be32(arg);
634 } else {
635 *store = cpu_to_be32(*regptr);
636 }
637
638 break;
639 }
640
641 case CSR_CHANNELS_AVAILABLE_LO:
642 {
643 /* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
644 quadlet_t affected_channels = arg ^ data;
645
646 regptr = &host->csr.channels_available_lo;
647
648 if ((arg & affected_channels) == (*regptr & affected_channels)) {
649 *regptr ^= affected_channels;
650 *store = cpu_to_be32(arg);
651 } else {
652 *store = cpu_to_be32(*regptr);
653 }
654 break;
655 }
656 }
657
658 spin_unlock_irqrestore(&host->csr.lock, flags);
659
660 return RCODE_COMPLETE;
661
662 unsupported_lockreq:
663 switch (csraddr) {
664 case CSR_STATE_CLEAR:
665 case CSR_STATE_SET:
666 case CSR_RESET_START:
667 case CSR_NODE_IDS:
668 case CSR_SPLIT_TIMEOUT_HI:
669 case CSR_SPLIT_TIMEOUT_LO:
670 case CSR_CYCLE_TIME:
671 case CSR_BUS_TIME:
672 case CSR_BROADCAST_CHANNEL:
673 return RCODE_TYPE_ERROR;
674
675 case CSR_BUSY_TIMEOUT:
676 /* not yet implemented - fall through */
677 default:
678 return RCODE_ADDRESS_ERROR;
679 }
680}
681
682static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
683 u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl)
684{
685 int csraddr = addr - CSR_REGISTER_BASE;
686 unsigned long flags;
687
688 data = be64_to_cpu(data);
689 arg = be64_to_cpu(arg);
690
691 if (csraddr & 0x3)
692 return RCODE_TYPE_ERROR;
693
694 if (csraddr != CSR_CHANNELS_AVAILABLE
695 || extcode != EXTCODE_COMPARE_SWAP)
696 goto unsupported_lock64req;
697
698 /* Is somebody releasing the broadcast_channel on us? */
699 if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x100000000ULL)) {
700 /* Note: this is may not be the right way to handle
701 * the problem, so we should look into the proper way
702 * eventually. */
703 HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
704 "broadcast channel 31. Ignoring.",
705 NODE_BUS_ARGS(host, nodeid));
706
707 data &= ~0x100000000ULL; /* keep broadcast channel allocated */
708 }
709
710 if (host->driver->hw_csr_reg) {
711 quadlet_t data_hi, data_lo;
712 quadlet_t arg_hi, arg_lo;
713 quadlet_t old_hi, old_lo;
714
715 data_hi = data >> 32;
716 data_lo = data & 0xFFFFFFFF;
717 arg_hi = arg >> 32;
718 arg_lo = arg & 0xFFFFFFFF;
719
720 old_hi = host->driver->hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
721 data_hi, arg_hi);
722
723 old_lo = host->driver->hw_csr_reg(host, ((csraddr + 4) - CSR_BUS_MANAGER_ID) >> 2,
724 data_lo, arg_lo);
725
726 *store = cpu_to_be64(((octlet_t)old_hi << 32) | old_lo);
727 } else {
728 octlet_t old;
729 octlet_t affected_channels = arg ^ data;
730
731 spin_lock_irqsave(&host->csr.lock, flags);
732
733 old = ((octlet_t)host->csr.channels_available_hi << 32) | host->csr.channels_available_lo;
734
735 if ((arg & affected_channels) == (old & affected_channels)) {
736 host->csr.channels_available_hi ^= (affected_channels >> 32);
737 host->csr.channels_available_lo ^= (affected_channels & 0xffffffff);
738 *store = cpu_to_be64(arg);
739 } else {
740 *store = cpu_to_be64(old);
741 }
742
743 spin_unlock_irqrestore(&host->csr.lock, flags);
744 }
745
746 /* Is somebody erroneously releasing the broadcast_channel on us? */
747 if (host->csr.channels_available_hi & 0x1)
748 host->csr.channels_available_hi &= ~0x1;
749
750 return RCODE_COMPLETE;
751
752 unsupported_lock64req:
753 switch (csraddr) {
754 case CSR_STATE_CLEAR:
755 case CSR_STATE_SET:
756 case CSR_RESET_START:
757 case CSR_NODE_IDS:
758 case CSR_SPLIT_TIMEOUT_HI:
759 case CSR_SPLIT_TIMEOUT_LO:
760 case CSR_CYCLE_TIME:
761 case CSR_BUS_TIME:
762 case CSR_BUS_MANAGER_ID:
763 case CSR_BROADCAST_CHANNEL:
764 case CSR_BUSY_TIMEOUT:
765 case CSR_BANDWIDTH_AVAILABLE:
766 return RCODE_TYPE_ERROR;
767
768 default:
769 return RCODE_ADDRESS_ERROR;
770 }
771}
772
773static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
774 quadlet_t *data, u64 addr, size_t length, u16 flags)
775{
776 int csraddr = addr - CSR_REGISTER_BASE;
777
778 if (length > 512)
779 return RCODE_TYPE_ERROR;
780
781 switch (csraddr) {
782 case CSR_FCP_COMMAND:
783 highlevel_fcp_request(host, nodeid, 0, (u8 *)data, length);
784 break;
785 case CSR_FCP_RESPONSE:
786 highlevel_fcp_request(host, nodeid, 1, (u8 *)data, length);
787 break;
788 default:
789 return RCODE_TYPE_ERROR;
790 }
791
792 return RCODE_COMPLETE;
793}
794
795static int read_config_rom(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
796 u64 addr, size_t length, u16 fl)
797{
798 u32 offset = addr - CSR1212_REGISTER_SPACE_BASE;
799
800 if (csr1212_read(host->csr.rom, offset, buffer, length) == CSR1212_SUCCESS)
801 return RCODE_COMPLETE;
802 else
803 return RCODE_ADDRESS_ERROR;
804}
805
806static u64 allocate_addr_range(u64 size, u32 alignment, void *__host)
807{
808 struct hpsb_host *host = (struct hpsb_host*)__host;
809
810 return hpsb_allocate_and_register_addrspace(&csr_highlevel,
811 host,
812 &config_rom_ops,
813 size, alignment,
814 CSR1212_UNITS_SPACE_BASE,
815 CSR1212_UNITS_SPACE_END);
816}
817
818static void release_addr_range(u64 addr, void *__host)
819{
820 struct hpsb_host *host = (struct hpsb_host*)__host;
821 hpsb_unregister_addrspace(&csr_highlevel, host, addr);
822}
823
824
825int init_csr(void)
826{
827 node_cap = csr1212_new_immediate(CSR1212_KV_ID_NODE_CAPABILITIES, 0x0083c0);
828 if (!node_cap) {
829 HPSB_ERR("Failed to allocate memory for Node Capabilties ConfigROM entry!");
830 return -ENOMEM;
831 }
832
833 hpsb_register_highlevel(&csr_highlevel);
834
835 return 0;
836}
837
838void cleanup_csr(void)
839{
840 if (node_cap)
841 csr1212_release_keyval(node_cap);
842 hpsb_unregister_highlevel(&csr_highlevel);
843}
diff --git a/drivers/ieee1394/csr.h b/drivers/ieee1394/csr.h
deleted file mode 100644
index 90fb3f2192c3..000000000000
--- a/drivers/ieee1394/csr.h
+++ /dev/null
@@ -1,99 +0,0 @@
1#ifndef _IEEE1394_CSR_H
2#define _IEEE1394_CSR_H
3
4#include <linux/spinlock_types.h>
5
6#include "csr1212.h"
7#include "ieee1394_types.h"
8
9#define CSR_REGISTER_BASE 0xfffff0000000ULL
10
11/* register offsets relative to CSR_REGISTER_BASE */
12#define CSR_STATE_CLEAR 0x0
13#define CSR_STATE_SET 0x4
14#define CSR_NODE_IDS 0x8
15#define CSR_RESET_START 0xc
16#define CSR_SPLIT_TIMEOUT_HI 0x18
17#define CSR_SPLIT_TIMEOUT_LO 0x1c
18#define CSR_CYCLE_TIME 0x200
19#define CSR_BUS_TIME 0x204
20#define CSR_BUSY_TIMEOUT 0x210
21#define CSR_BUS_MANAGER_ID 0x21c
22#define CSR_BANDWIDTH_AVAILABLE 0x220
23#define CSR_CHANNELS_AVAILABLE 0x224
24#define CSR_CHANNELS_AVAILABLE_HI 0x224
25#define CSR_CHANNELS_AVAILABLE_LO 0x228
26#define CSR_BROADCAST_CHANNEL 0x234
27#define CSR_CONFIG_ROM 0x400
28#define CSR_CONFIG_ROM_END 0x800
29#define CSR_FCP_COMMAND 0xB00
30#define CSR_FCP_RESPONSE 0xD00
31#define CSR_FCP_END 0xF00
32#define CSR_TOPOLOGY_MAP 0x1000
33#define CSR_TOPOLOGY_MAP_END 0x1400
34#define CSR_SPEED_MAP 0x2000
35#define CSR_SPEED_MAP_END 0x3000
36
37/* IEEE 1394 bus specific Configuration ROM Key IDs */
38#define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30)
39
40/* IEEE 1394 Bus Information Block specifics */
41#define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t))
42
43#define CSR_IRMC_SHIFT 31
44#define CSR_CMC_SHIFT 30
45#define CSR_ISC_SHIFT 29
46#define CSR_BMC_SHIFT 28
47#define CSR_PMC_SHIFT 27
48#define CSR_CYC_CLK_ACC_SHIFT 16
49#define CSR_MAX_REC_SHIFT 12
50#define CSR_MAX_ROM_SHIFT 8
51#define CSR_GENERATION_SHIFT 4
52
53static inline void csr_set_bus_info_generation(struct csr1212_csr *csr, u8 gen)
54{
55 csr->bus_info_data[2] &= ~cpu_to_be32(0xf << CSR_GENERATION_SHIFT);
56 csr->bus_info_data[2] |= cpu_to_be32((u32)gen << CSR_GENERATION_SHIFT);
57}
58
59struct csr_control {
60 spinlock_t lock;
61
62 quadlet_t state;
63 quadlet_t node_ids;
64 quadlet_t split_timeout_hi, split_timeout_lo;
65 unsigned long expire; /* Calculated from split_timeout */
66 quadlet_t cycle_time;
67 quadlet_t bus_time;
68 quadlet_t bus_manager_id;
69 quadlet_t bandwidth_available;
70 quadlet_t channels_available_hi, channels_available_lo;
71 quadlet_t broadcast_channel;
72
73 /* Bus Info */
74 quadlet_t guid_hi, guid_lo;
75 u8 cyc_clk_acc;
76 u8 max_rec;
77 u8 max_rom;
78 u8 generation; /* Only use values between 0x2 and 0xf */
79 u8 lnk_spd;
80
81 unsigned long gen_timestamp[16];
82
83 struct csr1212_csr *rom;
84
85 quadlet_t topology_map[256];
86 quadlet_t speed_map[1024];
87};
88
89extern struct csr1212_bus_ops csr_bus_ops;
90
91int init_csr(void);
92void cleanup_csr(void);
93
94/* hpsb_update_config_rom() is deprecated */
95struct hpsb_host;
96int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
97 size_t size, unsigned char rom_version);
98
99#endif /* _IEEE1394_CSR_H */
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
deleted file mode 100644
index e76cac64c533..000000000000
--- a/drivers/ieee1394/csr1212.c
+++ /dev/null
@@ -1,1467 +0,0 @@
1/*
2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
3 *
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30
31/* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
34 */
35
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/kmemcheck.h>
39#include <linux/string.h>
40#include <asm/bug.h>
41#include <asm/byteorder.h>
42
43#include "csr1212.h"
44
45
46/* Permitted key type for each key id */
47#define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
48#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
49#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
50#define __L (1 << CSR1212_KV_TYPE_LEAF)
51static const u8 csr1212_key_id_type_map[0x30] = {
52 __C, /* used by Apple iSight */
53 __D | __L, /* Descriptor */
54 __I | __D | __L, /* Bus_Dependent_Info */
55 __I | __D | __L, /* Vendor */
56 __I, /* Hardware_Version */
57 0, 0, /* Reserved */
58 __D | __L | __I, /* Module */
59 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
60 __I, /* Node_Capabilities */
61 __L, /* EUI_64 */
62 0, 0, 0, /* Reserved */
63 __D, /* Unit */
64 __I, /* Specifier_ID */
65 __I, /* Version */
66 __I | __C | __D | __L, /* Dependent_Info */
67 __L, /* Unit_Location */
68 0, /* Reserved */
69 __I, /* Model */
70 __D, /* Instance */
71 __L, /* Keyword */
72 __D, /* Feature */
73 __L, /* Extended_ROM */
74 __I, /* Extended_Key_Specifier_ID */
75 __I, /* Extended_Key */
76 __I | __C | __D | __L, /* Extended_Data */
77 __L, /* Modifiable_Descriptor */
78 __I, /* Directory_ID */
79 __I, /* Revision */
80};
81#undef __I
82#undef __C
83#undef __D
84#undef __L
85
86
87#define quads_to_bytes(_q) ((_q) * sizeof(u32))
88#define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32))
89
90static void free_keyval(struct csr1212_keyval *kv)
91{
92 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
93 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
94 CSR1212_FREE(kv->value.leaf.data);
95
96 CSR1212_FREE(kv);
97}
98
99static u16 csr1212_crc16(const u32 *buffer, size_t length)
100{
101 int shift;
102 u32 data;
103 u16 sum, crc = 0;
104
105 for (; length; length--) {
106 data = be32_to_cpu(*buffer);
107 buffer++;
108 for (shift = 28; shift >= 0; shift -= 4 ) {
109 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
110 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
111 }
112 crc &= 0xffff;
113 }
114
115 return cpu_to_be16(crc);
116}
117
118/* Microsoft computes the CRC with the bytes in reverse order. */
119static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
120{
121 int shift;
122 u32 data;
123 u16 sum, crc = 0;
124
125 for (; length; length--) {
126 data = le32_to_cpu(*buffer);
127 buffer++;
128 for (shift = 28; shift >= 0; shift -= 4 ) {
129 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
130 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
131 }
132 crc &= 0xffff;
133 }
134
135 return cpu_to_be16(crc);
136}
137
138static struct csr1212_dentry *
139csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
140{
141 struct csr1212_dentry *pos;
142
143 for (pos = dir->value.directory.dentries_head;
144 pos != NULL; pos = pos->next)
145 if (pos->kv == kv)
146 return pos;
147 return NULL;
148}
149
150static struct csr1212_keyval *
151csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
152{
153 struct csr1212_keyval *kv;
154
155 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
156 if (kv->offset == offset)
157 return kv;
158 return NULL;
159}
160
161
162/* Creation Routines */
163
164struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
165 size_t bus_info_size, void *private)
166{
167 struct csr1212_csr *csr;
168
169 csr = CSR1212_MALLOC(sizeof(*csr));
170 if (!csr)
171 return NULL;
172
173 csr->cache_head =
174 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
175 CSR1212_CONFIG_ROM_SPACE_SIZE);
176 if (!csr->cache_head) {
177 CSR1212_FREE(csr);
178 return NULL;
179 }
180
181 /* The keyval key id is not used for the root node, but a valid key id
182 * that can be used for a directory needs to be passed to
183 * csr1212_new_directory(). */
184 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
185 if (!csr->root_kv) {
186 CSR1212_FREE(csr->cache_head);
187 CSR1212_FREE(csr);
188 return NULL;
189 }
190
191 csr->bus_info_data = csr->cache_head->data;
192 csr->bus_info_len = bus_info_size;
193 csr->crc_len = bus_info_size;
194 csr->ops = ops;
195 csr->private = private;
196 csr->cache_tail = csr->cache_head;
197
198 return csr;
199}
200
201void csr1212_init_local_csr(struct csr1212_csr *csr,
202 const u32 *bus_info_data, int max_rom)
203{
204 static const int mr_map[] = { 4, 64, 1024, 0 };
205
206 BUG_ON(max_rom & ~0x3);
207 csr->max_rom = mr_map[max_rom];
208 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
209}
210
211static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
212{
213 struct csr1212_keyval *kv;
214
215 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
216 return NULL;
217
218 kv = CSR1212_MALLOC(sizeof(*kv));
219 if (!kv)
220 return NULL;
221
222 atomic_set(&kv->refcnt, 1);
223 kv->key.type = type;
224 kv->key.id = key;
225 kv->associate = NULL;
226 kv->next = NULL;
227 kv->prev = NULL;
228 kv->offset = 0;
229 kv->valid = 0;
230 return kv;
231}
232
233struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
234{
235 struct csr1212_keyval *kv;
236
237 kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
238 if (!kv)
239 return NULL;
240
241 kv->value.immediate = value;
242 kv->valid = 1;
243 return kv;
244}
245
246static struct csr1212_keyval *
247csr1212_new_leaf(u8 key, const void *data, size_t data_len)
248{
249 struct csr1212_keyval *kv;
250
251 kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
252 if (!kv)
253 return NULL;
254
255 if (data_len > 0) {
256 kv->value.leaf.data = CSR1212_MALLOC(data_len);
257 if (!kv->value.leaf.data) {
258 CSR1212_FREE(kv);
259 return NULL;
260 }
261
262 if (data)
263 memcpy(kv->value.leaf.data, data, data_len);
264 } else {
265 kv->value.leaf.data = NULL;
266 }
267
268 kv->value.leaf.len = bytes_to_quads(data_len);
269 kv->offset = 0;
270 kv->valid = 1;
271
272 return kv;
273}
274
275static struct csr1212_keyval *
276csr1212_new_csr_offset(u8 key, u32 csr_offset)
277{
278 struct csr1212_keyval *kv;
279
280 kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
281 if (!kv)
282 return NULL;
283
284 kv->value.csr_offset = csr_offset;
285
286 kv->offset = 0;
287 kv->valid = 1;
288 return kv;
289}
290
291struct csr1212_keyval *csr1212_new_directory(u8 key)
292{
293 struct csr1212_keyval *kv;
294
295 kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
296 if (!kv)
297 return NULL;
298
299 kv->value.directory.len = 0;
300 kv->offset = 0;
301 kv->value.directory.dentries_head = NULL;
302 kv->value.directory.dentries_tail = NULL;
303 kv->valid = 1;
304 return kv;
305}
306
307void csr1212_associate_keyval(struct csr1212_keyval *kv,
308 struct csr1212_keyval *associate)
309{
310 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
311 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
312 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
313 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
314 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
315 associate->key.id < 0x30) ||
316 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
317 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
318 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
319 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
320 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
321 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
322 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
323 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
324
325 if (kv->associate)
326 csr1212_release_keyval(kv->associate);
327
328 csr1212_keep_keyval(associate);
329 kv->associate = associate;
330}
331
332static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
333 struct csr1212_keyval *kv,
334 bool keep_keyval)
335{
336 struct csr1212_dentry *dentry;
337
338 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
339
340 dentry = CSR1212_MALLOC(sizeof(*dentry));
341 if (!dentry)
342 return -ENOMEM;
343
344 if (keep_keyval)
345 csr1212_keep_keyval(kv);
346 dentry->kv = kv;
347
348 dentry->next = NULL;
349 dentry->prev = dir->value.directory.dentries_tail;
350
351 if (!dir->value.directory.dentries_head)
352 dir->value.directory.dentries_head = dentry;
353
354 if (dir->value.directory.dentries_tail)
355 dir->value.directory.dentries_tail->next = dentry;
356 dir->value.directory.dentries_tail = dentry;
357
358 return CSR1212_SUCCESS;
359}
360
361int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
362 struct csr1212_keyval *kv)
363{
364 return __csr1212_attach_keyval_to_directory(dir, kv, true);
365}
366
367#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
368 (&((kv)->value.leaf.data[1]))
369
370#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
371 ((kv)->value.leaf.data[0] = \
372 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
373 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
374#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
375 ((kv)->value.leaf.data[0] = \
376 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
377 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
378 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
379
380static struct csr1212_keyval *
381csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
382 const void *data, size_t data_len)
383{
384 struct csr1212_keyval *kv;
385
386 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
387 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
388 if (!kv)
389 return NULL;
390
391 kmemcheck_annotate_variable(kv->value.leaf.data[0]);
392 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
393 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
394
395 if (data)
396 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
397
398 return kv;
399}
400
401/* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
402static int csr1212_check_minimal_ascii(const char *s)
403{
404 static const char minimal_ascii_table[] = {
405 /* 1 2 4 8 16 32 64 128 */
406 128, /* --, --, --, --, --, --, --, 07, */
407 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
408 0, /* --, --, --, --, --, --, --, --, */
409 0, /* --, --, --, --, --, --, --, --, */
410 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
411 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
412 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
413 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
414 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
415 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
416 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
417 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
418 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
419 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
420 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
421 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
422 };
423 int i, j;
424
425 for (; *s; s++) {
426 i = *s >> 3; /* i = *s / 8; */
427 j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
428
429 if (i >= ARRAY_SIZE(minimal_ascii_table) ||
430 !(minimal_ascii_table[i] & j))
431 return -EINVAL;
432 }
433 return 0;
434}
435
436/* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
437struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
438{
439 struct csr1212_keyval *kv;
440 u32 *text;
441 size_t str_len, quads;
442
443 if (!s || !*s || csr1212_check_minimal_ascii(s))
444 return NULL;
445
446 str_len = strlen(s);
447 quads = bytes_to_quads(str_len);
448 kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
449 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
450 if (!kv)
451 return NULL;
452
453 kv->value.leaf.data[1] = 0; /* width, character_set, language */
454 text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
455 text[quads - 1] = 0; /* padding */
456 memcpy(text, s, str_len);
457
458 return kv;
459}
460
461
462/* Destruction Routines */
463
464void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
465 struct csr1212_keyval *kv)
466{
467 struct csr1212_dentry *dentry;
468
469 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
470 return;
471
472 dentry = csr1212_find_keyval(dir, kv);
473
474 if (!dentry)
475 return;
476
477 if (dentry->prev)
478 dentry->prev->next = dentry->next;
479 if (dentry->next)
480 dentry->next->prev = dentry->prev;
481 if (dir->value.directory.dentries_head == dentry)
482 dir->value.directory.dentries_head = dentry->next;
483 if (dir->value.directory.dentries_tail == dentry)
484 dir->value.directory.dentries_tail = dentry->prev;
485
486 CSR1212_FREE(dentry);
487
488 csr1212_release_keyval(kv);
489}
490
491/* This function is used to free the memory taken by a keyval. If the given
492 * keyval is a directory type, then any keyvals contained in that directory
493 * will be destroyed as well if noone holds a reference on them. By means of
494 * list manipulation, this routine will descend a directory structure in a
495 * non-recursive manner. */
496void csr1212_release_keyval(struct csr1212_keyval *kv)
497{
498 struct csr1212_keyval *k, *a;
499 struct csr1212_dentry dentry;
500 struct csr1212_dentry *head, *tail;
501
502 if (!atomic_dec_and_test(&kv->refcnt))
503 return;
504
505 dentry.kv = kv;
506 dentry.next = NULL;
507 dentry.prev = NULL;
508
509 head = &dentry;
510 tail = head;
511
512 while (head) {
513 k = head->kv;
514
515 while (k) {
516 /* must not dec_and_test kv->refcnt again */
517 if (k != kv && !atomic_dec_and_test(&k->refcnt))
518 break;
519
520 a = k->associate;
521
522 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
523 /* If the current entry is a directory, move all
524 * the entries to the destruction list. */
525 if (k->value.directory.dentries_head) {
526 tail->next =
527 k->value.directory.dentries_head;
528 k->value.directory.dentries_head->prev =
529 tail;
530 tail = k->value.directory.dentries_tail;
531 }
532 }
533 free_keyval(k);
534 k = a;
535 }
536
537 head = head->next;
538 if (head) {
539 if (head->prev && head->prev != &dentry)
540 CSR1212_FREE(head->prev);
541 head->prev = NULL;
542 } else if (tail != &dentry) {
543 CSR1212_FREE(tail);
544 }
545 }
546}
547
548void csr1212_destroy_csr(struct csr1212_csr *csr)
549{
550 struct csr1212_csr_rom_cache *c, *oc;
551 struct csr1212_cache_region *cr, *ocr;
552
553 csr1212_release_keyval(csr->root_kv);
554
555 c = csr->cache_head;
556 while (c) {
557 oc = c;
558 cr = c->filled_head;
559 while (cr) {
560 ocr = cr;
561 cr = cr->next;
562 CSR1212_FREE(ocr);
563 }
564 c = c->next;
565 CSR1212_FREE(oc);
566 }
567
568 CSR1212_FREE(csr);
569}
570
571
572/* CSR Image Creation */
573
574static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
575{
576 struct csr1212_csr_rom_cache *cache;
577 u64 csr_addr;
578
579 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
580 !csr->ops->release_addr || csr->max_rom < 1);
581
582 /* ROM size must be a multiple of csr->max_rom */
583 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
584
585 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
586 csr->private);
587 if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
588 return -ENOMEM;
589
590 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
591 /* Invalid address returned from allocate_addr_range(). */
592 csr->ops->release_addr(csr_addr, csr->private);
593 return -ENOMEM;
594 }
595
596 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
597 romsize);
598 if (!cache) {
599 csr->ops->release_addr(csr_addr, csr->private);
600 return -ENOMEM;
601 }
602
603 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
604 CSR1212_KV_ID_EXTENDED_ROM);
605 if (!cache->ext_rom) {
606 csr->ops->release_addr(csr_addr, csr->private);
607 CSR1212_FREE(cache);
608 return -ENOMEM;
609 }
610
611 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
612 CSR1212_SUCCESS) {
613 csr1212_release_keyval(cache->ext_rom);
614 csr->ops->release_addr(csr_addr, csr->private);
615 CSR1212_FREE(cache);
616 return -ENOMEM;
617 }
618 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
619 cache->ext_rom->value.leaf.len = -1;
620 cache->ext_rom->value.leaf.data = cache->data;
621
622 /* Add cache to tail of cache list */
623 cache->prev = csr->cache_tail;
624 csr->cache_tail->next = cache;
625 csr->cache_tail = cache;
626 return CSR1212_SUCCESS;
627}
628
629static void csr1212_remove_cache(struct csr1212_csr *csr,
630 struct csr1212_csr_rom_cache *cache)
631{
632 if (csr->cache_head == cache)
633 csr->cache_head = cache->next;
634 if (csr->cache_tail == cache)
635 csr->cache_tail = cache->prev;
636
637 if (cache->prev)
638 cache->prev->next = cache->next;
639 if (cache->next)
640 cache->next->prev = cache->prev;
641
642 if (cache->ext_rom) {
643 csr1212_detach_keyval_from_directory(csr->root_kv,
644 cache->ext_rom);
645 csr1212_release_keyval(cache->ext_rom);
646 }
647
648 CSR1212_FREE(cache);
649}
650
651static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
652 struct csr1212_keyval **layout_tail)
653{
654 struct csr1212_dentry *dentry;
655 struct csr1212_keyval *dkv;
656 struct csr1212_keyval *last_extkey_spec = NULL;
657 struct csr1212_keyval *last_extkey = NULL;
658 int num_entries = 0;
659
660 for (dentry = dir->value.directory.dentries_head; dentry;
661 dentry = dentry->next) {
662 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
663 /* Special Case: Extended Key Specifier_ID */
664 if (dkv->key.id ==
665 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
666 if (last_extkey_spec == NULL)
667 last_extkey_spec = dkv;
668 else if (dkv->value.immediate !=
669 last_extkey_spec->value.immediate)
670 last_extkey_spec = dkv;
671 else
672 continue;
673 /* Special Case: Extended Key */
674 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
675 if (last_extkey == NULL)
676 last_extkey = dkv;
677 else if (dkv->value.immediate !=
678 last_extkey->value.immediate)
679 last_extkey = dkv;
680 else
681 continue;
682 }
683
684 num_entries += 1;
685
686 switch (dkv->key.type) {
687 default:
688 case CSR1212_KV_TYPE_IMMEDIATE:
689 case CSR1212_KV_TYPE_CSR_OFFSET:
690 break;
691 case CSR1212_KV_TYPE_LEAF:
692 case CSR1212_KV_TYPE_DIRECTORY:
693 /* Remove from list */
694 if (dkv->prev && (dkv->prev->next == dkv))
695 dkv->prev->next = dkv->next;
696 if (dkv->next && (dkv->next->prev == dkv))
697 dkv->next->prev = dkv->prev;
698 //if (dkv == *layout_tail)
699 // *layout_tail = dkv->prev;
700
701 /* Special case: Extended ROM leafs */
702 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
703 dkv->value.leaf.len = -1;
704 /* Don't add Extended ROM leafs in the
705 * layout list, they are handled
706 * differently. */
707 break;
708 }
709
710 /* Add to tail of list */
711 dkv->next = NULL;
712 dkv->prev = *layout_tail;
713 (*layout_tail)->next = dkv;
714 *layout_tail = dkv;
715 break;
716 }
717 }
718 }
719 return num_entries;
720}
721
722static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
723{
724 struct csr1212_keyval *ltail = kv;
725 size_t agg_size = 0;
726
727 while (kv) {
728 switch (kv->key.type) {
729 case CSR1212_KV_TYPE_LEAF:
730 /* Add 1 quadlet for crc/len field */
731 agg_size += kv->value.leaf.len + 1;
732 break;
733
734 case CSR1212_KV_TYPE_DIRECTORY:
735 kv->value.directory.len =
736 csr1212_generate_layout_subdir(kv, &ltail);
737 /* Add 1 quadlet for crc/len field */
738 agg_size += kv->value.directory.len + 1;
739 break;
740 }
741 kv = kv->next;
742 }
743 return quads_to_bytes(agg_size);
744}
745
746static struct csr1212_keyval *
747csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
748 struct csr1212_keyval *start_kv, int start_pos)
749{
750 struct csr1212_keyval *kv = start_kv;
751 struct csr1212_keyval *okv = start_kv;
752 int pos = start_pos;
753 int kv_len = 0, okv_len = 0;
754
755 cache->layout_head = kv;
756
757 while (kv && pos < cache->size) {
758 /* Special case: Extended ROM leafs */
759 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
760 kv->offset = cache->offset + pos;
761
762 switch (kv->key.type) {
763 case CSR1212_KV_TYPE_LEAF:
764 kv_len = kv->value.leaf.len;
765 break;
766
767 case CSR1212_KV_TYPE_DIRECTORY:
768 kv_len = kv->value.directory.len;
769 break;
770
771 default:
772 /* Should never get here */
773 WARN_ON(1);
774 break;
775 }
776
777 pos += quads_to_bytes(kv_len + 1);
778
779 if (pos <= cache->size) {
780 okv = kv;
781 okv_len = kv_len;
782 kv = kv->next;
783 }
784 }
785
786 cache->layout_tail = okv;
787 cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
788
789 return kv;
790}
791
792#define CSR1212_KV_KEY_SHIFT 24
793#define CSR1212_KV_KEY_TYPE_SHIFT 6
794#define CSR1212_KV_KEY_ID_MASK 0x3f
795#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
796
797static void
798csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
799{
800 struct csr1212_dentry *dentry;
801 struct csr1212_keyval *last_extkey_spec = NULL;
802 struct csr1212_keyval *last_extkey = NULL;
803 int index = 0;
804
805 for (dentry = dir->value.directory.dentries_head;
806 dentry;
807 dentry = dentry->next) {
808 struct csr1212_keyval *a;
809
810 for (a = dentry->kv; a; a = a->associate) {
811 u32 value = 0;
812
813 /* Special Case: Extended Key Specifier_ID */
814 if (a->key.id ==
815 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
816 if (last_extkey_spec == NULL)
817 last_extkey_spec = a;
818 else if (a->value.immediate !=
819 last_extkey_spec->value.immediate)
820 last_extkey_spec = a;
821 else
822 continue;
823
824 /* Special Case: Extended Key */
825 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
826 if (last_extkey == NULL)
827 last_extkey = a;
828 else if (a->value.immediate !=
829 last_extkey->value.immediate)
830 last_extkey = a;
831 else
832 continue;
833 }
834
835 switch (a->key.type) {
836 case CSR1212_KV_TYPE_IMMEDIATE:
837 value = a->value.immediate;
838 break;
839 case CSR1212_KV_TYPE_CSR_OFFSET:
840 value = a->value.csr_offset;
841 break;
842 case CSR1212_KV_TYPE_LEAF:
843 value = a->offset;
844 value -= dir->offset + quads_to_bytes(1+index);
845 value = bytes_to_quads(value);
846 break;
847 case CSR1212_KV_TYPE_DIRECTORY:
848 value = a->offset;
849 value -= dir->offset + quads_to_bytes(1+index);
850 value = bytes_to_quads(value);
851 break;
852 default:
853 /* Should never get here */
854 WARN_ON(1);
855 break;
856 }
857
858 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
859 CSR1212_KV_KEY_SHIFT;
860 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
861 (CSR1212_KV_KEY_SHIFT +
862 CSR1212_KV_KEY_TYPE_SHIFT);
863 data_buffer[index] = cpu_to_be32(value);
864 index++;
865 }
866 }
867}
868
869struct csr1212_keyval_img {
870 u16 length;
871 u16 crc;
872
873 /* Must be last */
874 u32 data[0]; /* older gcc can't handle [] which is standard */
875};
876
877static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
878{
879 struct csr1212_keyval *kv, *nkv;
880 struct csr1212_keyval_img *kvi;
881
882 for (kv = cache->layout_head;
883 kv != cache->layout_tail->next;
884 kv = nkv) {
885 kvi = (struct csr1212_keyval_img *)(cache->data +
886 bytes_to_quads(kv->offset - cache->offset));
887 switch (kv->key.type) {
888 default:
889 case CSR1212_KV_TYPE_IMMEDIATE:
890 case CSR1212_KV_TYPE_CSR_OFFSET:
891 /* Should never get here */
892 WARN_ON(1);
893 break;
894
895 case CSR1212_KV_TYPE_LEAF:
896 /* Don't copy over Extended ROM areas, they are
897 * already filled out! */
898 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
899 memcpy(kvi->data, kv->value.leaf.data,
900 quads_to_bytes(kv->value.leaf.len));
901
902 kvi->length = cpu_to_be16(kv->value.leaf.len);
903 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
904 break;
905
906 case CSR1212_KV_TYPE_DIRECTORY:
907 csr1212_generate_tree_subdir(kv, kvi->data);
908
909 kvi->length = cpu_to_be16(kv->value.directory.len);
910 kvi->crc = csr1212_crc16(kvi->data,
911 kv->value.directory.len);
912 break;
913 }
914
915 nkv = kv->next;
916 if (kv->prev)
917 kv->prev->next = NULL;
918 if (kv->next)
919 kv->next->prev = NULL;
920 kv->prev = NULL;
921 kv->next = NULL;
922 }
923}
924
925/* This size is arbitrarily chosen.
926 * The struct overhead is subtracted for more economic allocations. */
927#define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
928
929int csr1212_generate_csr_image(struct csr1212_csr *csr)
930{
931 struct csr1212_bus_info_block_img *bi;
932 struct csr1212_csr_rom_cache *cache;
933 struct csr1212_keyval *kv;
934 size_t agg_size;
935 int ret;
936 int init_offset;
937
938 BUG_ON(!csr);
939
940 cache = csr->cache_head;
941
942 bi = (struct csr1212_bus_info_block_img*)cache->data;
943
944 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
945 bi->crc_length = bi->length;
946 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
947
948 csr->root_kv->next = NULL;
949 csr->root_kv->prev = NULL;
950
951 agg_size = csr1212_generate_layout_order(csr->root_kv);
952
953 init_offset = csr->bus_info_len;
954
955 for (kv = csr->root_kv, cache = csr->cache_head;
956 kv;
957 cache = cache->next) {
958 if (!cache) {
959 /* Estimate approximate number of additional cache
960 * regions needed (it assumes that the cache holding
961 * the first 1K Config ROM space always exists). */
962 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
963 (2 * sizeof(u32))) + 1;
964
965 /* Add additional cache regions, extras will be
966 * removed later */
967 for (; est_c; est_c--) {
968 ret = csr1212_append_new_cache(csr,
969 CSR1212_EXTENDED_ROM_SIZE);
970 if (ret != CSR1212_SUCCESS)
971 return ret;
972 }
973 /* Need to re-layout for additional cache regions */
974 agg_size = csr1212_generate_layout_order(csr->root_kv);
975 kv = csr->root_kv;
976 cache = csr->cache_head;
977 init_offset = csr->bus_info_len;
978 }
979 kv = csr1212_generate_positions(cache, kv, init_offset);
980 agg_size -= cache->len;
981 init_offset = sizeof(u32);
982 }
983
984 /* Remove unused, excess cache regions */
985 while (cache) {
986 struct csr1212_csr_rom_cache *oc = cache;
987
988 cache = cache->next;
989 csr1212_remove_cache(csr, oc);
990 }
991
992 /* Go through the list backward so that when done, the correct CRC
993 * will be calculated for the Extended ROM areas. */
994 for (cache = csr->cache_tail; cache; cache = cache->prev) {
995 /* Only Extended ROM caches should have this set. */
996 if (cache->ext_rom) {
997 int leaf_size;
998
999 /* Make sure the Extended ROM leaf is a multiple of
1000 * max_rom in size. */
1001 BUG_ON(csr->max_rom < 1);
1002 leaf_size = (cache->len + (csr->max_rom - 1)) &
1003 ~(csr->max_rom - 1);
1004
1005 /* Zero out the unused ROM region */
1006 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1007 leaf_size - cache->len);
1008
1009 /* Subtract leaf header */
1010 leaf_size -= sizeof(u32);
1011
1012 /* Update the Extended ROM leaf length */
1013 cache->ext_rom->value.leaf.len =
1014 bytes_to_quads(leaf_size);
1015 } else {
1016 /* Zero out the unused ROM region */
1017 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1018 cache->size - cache->len);
1019 }
1020
1021 /* Copy the data into the cache buffer */
1022 csr1212_fill_cache(cache);
1023
1024 if (cache != csr->cache_head) {
1025 /* Set the length and CRC of the extended ROM. */
1026 struct csr1212_keyval_img *kvi =
1027 (struct csr1212_keyval_img*)cache->data;
1028 u16 len = bytes_to_quads(cache->len) - 1;
1029
1030 kvi->length = cpu_to_be16(len);
1031 kvi->crc = csr1212_crc16(kvi->data, len);
1032 }
1033 }
1034
1035 return CSR1212_SUCCESS;
1036}
1037
1038int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1039{
1040 struct csr1212_csr_rom_cache *cache;
1041
1042 for (cache = csr->cache_head; cache; cache = cache->next)
1043 if (offset >= cache->offset &&
1044 (offset + len) <= (cache->offset + cache->size)) {
1045 memcpy(buffer, &cache->data[
1046 bytes_to_quads(offset - cache->offset)],
1047 len);
1048 return CSR1212_SUCCESS;
1049 }
1050
1051 return -ENOENT;
1052}
1053
1054/*
1055 * Apparently there are many different wrong implementations of the CRC
1056 * algorithm. We don't fail, we just warn... approximately once per GUID.
1057 */
1058static void
1059csr1212_check_crc(const u32 *buffer, size_t length, u16 crc, __be32 *guid)
1060{
1061 static u64 last_bad_eui64;
1062 u64 eui64 = ((u64)be32_to_cpu(guid[0]) << 32) | be32_to_cpu(guid[1]);
1063
1064 if (csr1212_crc16(buffer, length) == crc ||
1065 csr1212_msft_crc16(buffer, length) == crc ||
1066 eui64 == last_bad_eui64)
1067 return;
1068
1069 printk(KERN_DEBUG "ieee1394: config ROM CRC error\n");
1070 last_bad_eui64 = eui64;
1071}
1072
1073/* Parse a chunk of data as a Config ROM */
1074
1075static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1076{
1077 struct csr1212_bus_info_block_img *bi;
1078 struct csr1212_cache_region *cr;
1079 int i;
1080 int ret;
1081
1082 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1083 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1084 &csr->cache_head->data[bytes_to_quads(i)],
1085 csr->private);
1086 if (ret != CSR1212_SUCCESS)
1087 return ret;
1088
1089 /* check ROM header's info_length */
1090 if (i == 0 &&
1091 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1092 bytes_to_quads(csr->bus_info_len) - 1)
1093 return -EINVAL;
1094 }
1095
1096 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1097 csr->crc_len = quads_to_bytes(bi->crc_length);
1098
1099 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
1100 * is not always the case, so read the rest of the crc area 1 quadlet at
1101 * a time. */
1102 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1103 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1104 &csr->cache_head->data[bytes_to_quads(i)],
1105 csr->private);
1106 if (ret != CSR1212_SUCCESS)
1107 return ret;
1108 }
1109
1110 csr1212_check_crc(bi->data, bi->crc_length, bi->crc,
1111 &csr->bus_info_data[3]);
1112
1113 cr = CSR1212_MALLOC(sizeof(*cr));
1114 if (!cr)
1115 return -ENOMEM;
1116
1117 cr->next = NULL;
1118 cr->prev = NULL;
1119 cr->offset_start = 0;
1120 cr->offset_end = csr->crc_len + 4;
1121
1122 csr->cache_head->filled_head = cr;
1123 csr->cache_head->filled_tail = cr;
1124
1125 return CSR1212_SUCCESS;
1126}
1127
1128#define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1129#define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1130#define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1131#define CSR1212_KV_VAL_MASK 0xffffff
1132#define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1133
1134static int
1135csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1136{
1137 int ret = CSR1212_SUCCESS;
1138 struct csr1212_keyval *k = NULL;
1139 u32 offset;
1140 bool keep_keyval = true;
1141
1142 switch (CSR1212_KV_KEY_TYPE(ki)) {
1143 case CSR1212_KV_TYPE_IMMEDIATE:
1144 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1145 CSR1212_KV_VAL(ki));
1146 if (!k) {
1147 ret = -ENOMEM;
1148 goto out;
1149 }
1150 /* Don't keep local reference when parsing. */
1151 keep_keyval = false;
1152 break;
1153
1154 case CSR1212_KV_TYPE_CSR_OFFSET:
1155 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1156 CSR1212_KV_VAL(ki));
1157 if (!k) {
1158 ret = -ENOMEM;
1159 goto out;
1160 }
1161 /* Don't keep local reference when parsing. */
1162 keep_keyval = false;
1163 break;
1164
1165 default:
1166 /* Compute the offset from 0xffff f000 0000. */
1167 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1168 if (offset == kv_pos) {
1169 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1170 * or Directories. The Config ROM image is most likely
1171 * messed up, so we'll just abort here. */
1172 ret = -EIO;
1173 goto out;
1174 }
1175
1176 k = csr1212_find_keyval_offset(dir, offset);
1177
1178 if (k)
1179 break; /* Found it. */
1180
1181 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
1182 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1183 else
1184 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1185
1186 if (!k) {
1187 ret = -ENOMEM;
1188 goto out;
1189 }
1190 /* Don't keep local reference when parsing. */
1191 keep_keyval = false;
1192 /* Contents not read yet so it's not valid. */
1193 k->valid = 0;
1194 k->offset = offset;
1195
1196 k->prev = dir;
1197 k->next = dir->next;
1198 dir->next->prev = k;
1199 dir->next = k;
1200 }
1201 ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
1202out:
1203 if (ret != CSR1212_SUCCESS && k != NULL)
1204 free_keyval(k);
1205 return ret;
1206}
1207
1208int csr1212_parse_keyval(struct csr1212_keyval *kv,
1209 struct csr1212_csr_rom_cache *cache)
1210{
1211 struct csr1212_keyval_img *kvi;
1212 int i;
1213 int ret = CSR1212_SUCCESS;
1214 int kvi_len;
1215
1216 kvi = (struct csr1212_keyval_img*)
1217 &cache->data[bytes_to_quads(kv->offset - cache->offset)];
1218 kvi_len = be16_to_cpu(kvi->length);
1219
1220 /* GUID is wrong in here in case of extended ROM. We don't care. */
1221 csr1212_check_crc(kvi->data, kvi_len, kvi->crc, &cache->data[3]);
1222
1223 switch (kv->key.type) {
1224 case CSR1212_KV_TYPE_DIRECTORY:
1225 for (i = 0; i < kvi_len; i++) {
1226 u32 ki = kvi->data[i];
1227
1228 /* Some devices put null entries in their unit
1229 * directories. If we come across such an entry,
1230 * then skip it. */
1231 if (ki == 0x0)
1232 continue;
1233 ret = csr1212_parse_dir_entry(kv, ki,
1234 kv->offset + quads_to_bytes(i + 1));
1235 }
1236 kv->value.directory.len = kvi_len;
1237 break;
1238
1239 case CSR1212_KV_TYPE_LEAF:
1240 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1241 size_t size = quads_to_bytes(kvi_len);
1242
1243 kv->value.leaf.data = CSR1212_MALLOC(size);
1244 if (!kv->value.leaf.data) {
1245 ret = -ENOMEM;
1246 goto out;
1247 }
1248
1249 kv->value.leaf.len = kvi_len;
1250 memcpy(kv->value.leaf.data, kvi->data, size);
1251 }
1252 break;
1253 }
1254
1255 kv->valid = 1;
1256out:
1257 return ret;
1258}
1259
1260static int
1261csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1262{
1263 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1264 struct csr1212_keyval_img *kvi = NULL;
1265 struct csr1212_csr_rom_cache *cache;
1266 int cache_index;
1267 u64 addr;
1268 u32 *cache_ptr;
1269 u16 kv_len = 0;
1270
1271 BUG_ON(!csr || !kv || csr->max_rom < 1);
1272
1273 /* First find which cache the data should be in (or go in if not read
1274 * yet). */
1275 for (cache = csr->cache_head; cache; cache = cache->next)
1276 if (kv->offset >= cache->offset &&
1277 kv->offset < (cache->offset + cache->size))
1278 break;
1279
1280 if (!cache) {
1281 u32 q, cache_size;
1282
1283 /* Only create a new cache for Extended ROM leaves. */
1284 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1285 return -EINVAL;
1286
1287 if (csr->ops->bus_read(csr,
1288 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1289 &q, csr->private))
1290 return -EIO;
1291
1292 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1293
1294 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1295 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1296
1297 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1298 if (!cache)
1299 return -ENOMEM;
1300
1301 kv->value.leaf.data = &cache->data[1];
1302 csr->cache_tail->next = cache;
1303 cache->prev = csr->cache_tail;
1304 cache->next = NULL;
1305 csr->cache_tail = cache;
1306 cache->filled_head =
1307 CSR1212_MALLOC(sizeof(*cache->filled_head));
1308 if (!cache->filled_head)
1309 return -ENOMEM;
1310
1311 cache->filled_head->offset_start = 0;
1312 cache->filled_head->offset_end = sizeof(u32);
1313 cache->filled_tail = cache->filled_head;
1314 cache->filled_head->next = NULL;
1315 cache->filled_head->prev = NULL;
1316 cache->data[0] = q;
1317
1318 /* Don't read the entire extended ROM now. Pieces of it will
1319 * be read when entries inside it are read. */
1320 return csr1212_parse_keyval(kv, cache);
1321 }
1322
1323 cache_index = kv->offset - cache->offset;
1324
1325 /* Now seach read portions of the cache to see if it is there. */
1326 for (cr = cache->filled_head; cr; cr = cr->next) {
1327 if (cache_index < cr->offset_start) {
1328 newcr = CSR1212_MALLOC(sizeof(*newcr));
1329 if (!newcr)
1330 return -ENOMEM;
1331
1332 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1333 newcr->offset_end = newcr->offset_start;
1334 newcr->next = cr;
1335 newcr->prev = cr->prev;
1336 cr->prev = newcr;
1337 cr = newcr;
1338 break;
1339 } else if ((cache_index >= cr->offset_start) &&
1340 (cache_index < cr->offset_end)) {
1341 kvi = (struct csr1212_keyval_img*)
1342 (&cache->data[bytes_to_quads(cache_index)]);
1343 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1344 break;
1345 } else if (cache_index == cr->offset_end) {
1346 break;
1347 }
1348 }
1349
1350 if (!cr) {
1351 cr = cache->filled_tail;
1352 newcr = CSR1212_MALLOC(sizeof(*newcr));
1353 if (!newcr)
1354 return -ENOMEM;
1355
1356 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1357 newcr->offset_end = newcr->offset_start;
1358 newcr->prev = cr;
1359 newcr->next = cr->next;
1360 cr->next = newcr;
1361 cr = newcr;
1362 cache->filled_tail = newcr;
1363 }
1364
1365 while(!kvi || cr->offset_end < cache_index + kv_len) {
1366 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1367 ~(csr->max_rom - 1))];
1368
1369 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1370 cr->offset_end) & ~(csr->max_rom - 1);
1371
1372 if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private))
1373 return -EIO;
1374
1375 cr->offset_end += csr->max_rom - (cr->offset_end &
1376 (csr->max_rom - 1));
1377
1378 if (!kvi && (cr->offset_end > cache_index)) {
1379 kvi = (struct csr1212_keyval_img*)
1380 (&cache->data[bytes_to_quads(cache_index)]);
1381 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1382 }
1383
1384 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1385 /* The Leaf or Directory claims its length extends
1386 * beyond the ConfigROM image region and thus beyond the
1387 * end of our cache region. Therefore, we abort now
1388 * rather than seg faulting later. */
1389 return -EIO;
1390 }
1391
1392 ncr = cr->next;
1393
1394 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1395 /* consolidate region entries */
1396 ncr->offset_start = cr->offset_start;
1397
1398 if (cr->prev)
1399 cr->prev->next = cr->next;
1400 ncr->prev = cr->prev;
1401 if (cache->filled_head == cr)
1402 cache->filled_head = ncr;
1403 CSR1212_FREE(cr);
1404 cr = ncr;
1405 }
1406 }
1407
1408 return csr1212_parse_keyval(kv, cache);
1409}
1410
1411struct csr1212_keyval *
1412csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1413{
1414 if (!kv)
1415 return NULL;
1416 if (!kv->valid)
1417 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1418 return NULL;
1419 return kv;
1420}
1421
1422int csr1212_parse_csr(struct csr1212_csr *csr)
1423{
1424 struct csr1212_dentry *dentry;
1425 int ret;
1426
1427 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1428
1429 ret = csr1212_parse_bus_info_block(csr);
1430 if (ret != CSR1212_SUCCESS)
1431 return ret;
1432
1433 /*
1434 * There has been a buggy firmware with bus_info_block.max_rom > 0
1435 * spotted which actually only supported quadlet read requests to the
1436 * config ROM. Therefore read everything quadlet by quadlet regardless
1437 * of what the bus info block says.
1438 */
1439 csr->max_rom = 4;
1440
1441 csr->cache_head->layout_head = csr->root_kv;
1442 csr->cache_head->layout_tail = csr->root_kv;
1443
1444 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1445 csr->bus_info_len;
1446
1447 csr->root_kv->valid = 0;
1448 csr->root_kv->next = csr->root_kv;
1449 csr->root_kv->prev = csr->root_kv;
1450 ret = csr1212_read_keyval(csr, csr->root_kv);
1451 if (ret != CSR1212_SUCCESS)
1452 return ret;
1453
1454 /* Scan through the Root directory finding all extended ROM regions
1455 * and make cache regions for them */
1456 for (dentry = csr->root_kv->value.directory.dentries_head;
1457 dentry; dentry = dentry->next) {
1458 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1459 !dentry->kv->valid) {
1460 ret = csr1212_read_keyval(csr, dentry->kv);
1461 if (ret != CSR1212_SUCCESS)
1462 return ret;
1463 }
1464 }
1465
1466 return CSR1212_SUCCESS;
1467}
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
deleted file mode 100644
index a892d922dbc9..000000000000
--- a/drivers/ieee1394/csr1212.h
+++ /dev/null
@@ -1,383 +0,0 @@
1/*
2 * csr1212.h -- IEEE 1212 Control and Status Register support for Linux
3 *
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#ifndef __CSR1212_H__
31#define __CSR1212_H__
32
33#include <linux/types.h>
34#include <linux/slab.h>
35#include <asm/atomic.h>
36
37#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
38#define CSR1212_FREE(ptr) kfree(ptr)
39
40#define CSR1212_SUCCESS (0)
41
42
43/* CSR 1212 key types */
44#define CSR1212_KV_TYPE_IMMEDIATE 0
45#define CSR1212_KV_TYPE_CSR_OFFSET 1
46#define CSR1212_KV_TYPE_LEAF 2
47#define CSR1212_KV_TYPE_DIRECTORY 3
48
49
50/* CSR 1212 key ids */
51#define CSR1212_KV_ID_DESCRIPTOR 0x01
52#define CSR1212_KV_ID_BUS_DEPENDENT_INFO 0x02
53#define CSR1212_KV_ID_VENDOR 0x03
54#define CSR1212_KV_ID_HARDWARE_VERSION 0x04
55#define CSR1212_KV_ID_MODULE 0x07
56#define CSR1212_KV_ID_NODE_CAPABILITIES 0x0C
57#define CSR1212_KV_ID_EUI_64 0x0D
58#define CSR1212_KV_ID_UNIT 0x11
59#define CSR1212_KV_ID_SPECIFIER_ID 0x12
60#define CSR1212_KV_ID_VERSION 0x13
61#define CSR1212_KV_ID_DEPENDENT_INFO 0x14
62#define CSR1212_KV_ID_UNIT_LOCATION 0x15
63#define CSR1212_KV_ID_MODEL 0x17
64#define CSR1212_KV_ID_INSTANCE 0x18
65#define CSR1212_KV_ID_KEYWORD 0x19
66#define CSR1212_KV_ID_FEATURE 0x1A
67#define CSR1212_KV_ID_EXTENDED_ROM 0x1B
68#define CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID 0x1C
69#define CSR1212_KV_ID_EXTENDED_KEY 0x1D
70#define CSR1212_KV_ID_EXTENDED_DATA 0x1E
71#define CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR 0x1F
72#define CSR1212_KV_ID_DIRECTORY_ID 0x20
73#define CSR1212_KV_ID_REVISION 0x21
74
75
76/* IEEE 1212 Address space map */
77#define CSR1212_ALL_SPACE_BASE (0x000000000000ULL)
78#define CSR1212_ALL_SPACE_SIZE (1ULL << 48)
79#define CSR1212_ALL_SPACE_END (CSR1212_ALL_SPACE_BASE + CSR1212_ALL_SPACE_SIZE)
80
81#define CSR1212_MEMORY_SPACE_BASE (0x000000000000ULL)
82#define CSR1212_MEMORY_SPACE_SIZE ((256ULL * (1ULL << 40)) - (512ULL * (1ULL << 20)))
83#define CSR1212_MEMORY_SPACE_END (CSR1212_MEMORY_SPACE_BASE + CSR1212_MEMORY_SPACE_SIZE)
84
85#define CSR1212_PRIVATE_SPACE_BASE (0xffffe0000000ULL)
86#define CSR1212_PRIVATE_SPACE_SIZE (256ULL * (1ULL << 20))
87#define CSR1212_PRIVATE_SPACE_END (CSR1212_PRIVATE_SPACE_BASE + CSR1212_PRIVATE_SPACE_SIZE)
88
89#define CSR1212_REGISTER_SPACE_BASE (0xfffff0000000ULL)
90#define CSR1212_REGISTER_SPACE_SIZE (256ULL * (1ULL << 20))
91#define CSR1212_REGISTER_SPACE_END (CSR1212_REGISTER_SPACE_BASE + CSR1212_REGISTER_SPACE_SIZE)
92
93#define CSR1212_CSR_ARCH_REG_SPACE_BASE (0xfffff0000000ULL)
94#define CSR1212_CSR_ARCH_REG_SPACE_SIZE (512)
95#define CSR1212_CSR_ARCH_REG_SPACE_END (CSR1212_CSR_ARCH_REG_SPACE_BASE + CSR1212_CSR_ARCH_REG_SPACE_SIZE)
96#define CSR1212_CSR_ARCH_REG_SPACE_OFFSET (CSR1212_CSR_ARCH_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
97
98#define CSR1212_CSR_BUS_DEP_REG_SPACE_BASE (0xfffff0000200ULL)
99#define CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE (512)
100#define CSR1212_CSR_BUS_DEP_REG_SPACE_END (CSR1212_CSR_BUS_DEP_REG_SPACE_BASE + CSR1212_CSR_BUS_DEP_REG_SPACE_SIZE)
101#define CSR1212_CSR_BUS_DEP_REG_SPACE_OFFSET (CSR1212_CSR_BUS_DEP_REG_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
102
103#define CSR1212_CONFIG_ROM_SPACE_BASE (0xfffff0000400ULL)
104#define CSR1212_CONFIG_ROM_SPACE_SIZE (1024)
105#define CSR1212_CONFIG_ROM_SPACE_END (CSR1212_CONFIG_ROM_SPACE_BASE + CSR1212_CONFIG_ROM_SPACE_SIZE)
106#define CSR1212_CONFIG_ROM_SPACE_OFFSET (CSR1212_CONFIG_ROM_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
107
108#define CSR1212_UNITS_SPACE_BASE (0xfffff0000800ULL)
109#define CSR1212_UNITS_SPACE_SIZE ((256ULL * (1ULL << 20)) - 2048)
110#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
111#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
112
113#define CSR1212_INVALID_ADDR_SPACE -1
114
115
116/* Config ROM image structures */
117struct csr1212_bus_info_block_img {
118 u8 length;
119 u8 crc_length;
120 u16 crc;
121
122 /* Must be last */
123 u32 data[0]; /* older gcc can't handle [] which is standard */
124};
125
126struct csr1212_leaf {
127 int len;
128 u32 *data;
129};
130
131struct csr1212_dentry {
132 struct csr1212_dentry *next, *prev;
133 struct csr1212_keyval *kv;
134};
135
136struct csr1212_directory {
137 int len;
138 struct csr1212_dentry *dentries_head, *dentries_tail;
139};
140
141struct csr1212_keyval {
142 struct {
143 u8 type;
144 u8 id;
145 } key;
146 union {
147 u32 immediate;
148 u32 csr_offset;
149 struct csr1212_leaf leaf;
150 struct csr1212_directory directory;
151 } value;
152 struct csr1212_keyval *associate;
153 atomic_t refcnt;
154
155 /* used in generating and/or parsing CSR image */
156 struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
157 u32 offset; /* position in CSR from 0xffff f000 0000 */
158 u8 valid; /* flag indicating keyval has valid data*/
159};
160
161
162struct csr1212_cache_region {
163 struct csr1212_cache_region *next, *prev;
164 u32 offset_start; /* inclusive */
165 u32 offset_end; /* exclusive */
166};
167
168struct csr1212_csr_rom_cache {
169 struct csr1212_csr_rom_cache *next, *prev;
170 struct csr1212_cache_region *filled_head, *filled_tail;
171 struct csr1212_keyval *layout_head, *layout_tail;
172 size_t size;
173 u32 offset;
174 struct csr1212_keyval *ext_rom;
175 size_t len;
176
177 /* Must be last */
178 u32 data[0]; /* older gcc can't handle [] which is standard */
179};
180
181struct csr1212_csr {
182 size_t bus_info_len; /* bus info block length in bytes */
183 size_t crc_len; /* crc length in bytes */
184 __be32 *bus_info_data; /* bus info data incl bus name and EUI */
185
186 void *private; /* private, bus specific data */
187 struct csr1212_bus_ops *ops;
188
189 struct csr1212_keyval *root_kv;
190
191 int max_rom; /* max bytes readable in Config ROM region */
192
193 /* Items below used for image parsing and generation */
194 struct csr1212_csr_rom_cache *cache_head, *cache_tail;
195};
196
197struct csr1212_bus_ops {
198 /* This function is used by csr1212 to read additional information
199 * from remote nodes when parsing a Config ROM (i.e., read Config ROM
200 * entries located in the Units Space. Must return 0 on success
201 * anything else indicates an error. */
202 int (*bus_read) (struct csr1212_csr *csr, u64 addr,
203 void *buffer, void *private);
204
205 /* This function is used by csr1212 to allocate a region in units space
206 * in the event that Config ROM entries don't all fit in the predefined
207 * 1K region. The void *private parameter is private member of struct
208 * csr1212_csr. */
209 u64 (*allocate_addr_range) (u64 size, u32 alignment, void *private);
210
211 /* This function is used by csr1212 to release a region in units space
212 * that is no longer needed. */
213 void (*release_addr) (u64 addr, void *private);
214};
215
216
217/* Descriptor Leaf manipulation macros */
218#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
219#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
220#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
221
222#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
223 (be32_to_cpu((kv)->value.leaf.data[0]) >> \
224 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
225#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
226 (be32_to_cpu((kv)->value.leaf.data[0]) & \
227 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
228
229
230/* Text Descriptor Leaf manipulation macros */
231#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
232#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK 0xf /* after shift */
233#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
234#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */
235#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
236#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
237
238#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
239 (be32_to_cpu((kv)->value.leaf.data[1]) >> \
240 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
241#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
242 ((be32_to_cpu((kv)->value.leaf.data[1]) >> \
243 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
244 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
245#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
246 (be32_to_cpu((kv)->value.leaf.data[1]) & \
247 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
248#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
249 (&((kv)->value.leaf.data[2]))
250
251
252/* The following 2 function are for creating new Configuration ROM trees. The
253 * first function is used for both creating local trees and parsing remote
254 * trees. The second function adds pertinent information to local Configuration
255 * ROM trees - namely data for the bus information block. */
256extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
257 size_t bus_info_size,
258 void *private);
259extern void csr1212_init_local_csr(struct csr1212_csr *csr,
260 const u32 *bus_info_data, int max_rom);
261
262
263/* Destroy a Configuration ROM tree and release all memory taken by the tree. */
264extern void csr1212_destroy_csr(struct csr1212_csr *csr);
265
266
267/* The following set of functions are fore creating new keyvals for placement in
268 * a Configuration ROM tree. Code that creates new keyvals with these functions
269 * must release those keyvals with csr1212_release_keyval() when they are no
270 * longer needed. */
271extern struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value);
272extern struct csr1212_keyval *csr1212_new_directory(u8 key);
273extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
274
275
276/* The following function manages association between keyvals. Typically,
277 * Descriptor Leaves and Directories will be associated with another keyval and
278 * it is desirable for the Descriptor keyval to be place immediately after the
279 * keyval that it is associated with.
280 * Take care with subsequent ROM modifications: There is no function to remove
281 * previously specified associations.
282 */
283extern void csr1212_associate_keyval(struct csr1212_keyval *kv,
284 struct csr1212_keyval *associate);
285
286
287/* The following functions manage the association of a keyval and directories.
288 * A keyval may be attached to more than one directory. */
289extern int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
290 struct csr1212_keyval *kv);
291extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
292 struct csr1212_keyval *kv);
293
294
295/* Creates a complete Configuration ROM image in the list of caches available
296 * via csr->cache_head. */
297extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
298
299
300/* This is a convience function for reading a block of data out of one of the
301 * caches in the csr->cache_head list. */
302extern int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer,
303 u32 len);
304
305
306/* The following functions are in place for parsing Configuration ROM images.
307 * csr1212_parse_keyval() is used should there be a need to directly parse a
308 * Configuration ROM directly. */
309extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
310 struct csr1212_csr_rom_cache *cache);
311extern int csr1212_parse_csr(struct csr1212_csr *csr);
312
313
314/* This function allocates a new cache which may be used for either parsing or
315 * generating sub-sets of Configuration ROM images. */
316static inline struct csr1212_csr_rom_cache *
317csr1212_rom_cache_malloc(u32 offset, size_t size)
318{
319 struct csr1212_csr_rom_cache *cache;
320
321 cache = CSR1212_MALLOC(sizeof(*cache) + size);
322 if (!cache)
323 return NULL;
324
325 cache->next = NULL;
326 cache->prev = NULL;
327 cache->filled_head = NULL;
328 cache->filled_tail = NULL;
329 cache->layout_head = NULL;
330 cache->layout_tail = NULL;
331 cache->offset = offset;
332 cache->size = size;
333 cache->ext_rom = NULL;
334
335 return cache;
336}
337
338
339/* This function ensures that a keyval contains data when referencing a keyval
340 * created by parsing a Configuration ROM. */
341extern struct csr1212_keyval *
342csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
343
344
345/* This function increments the reference count for a keyval should there be a
346 * need for code to retain a keyval that has been parsed. */
347static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
348{
349 atomic_inc(&kv->refcnt);
350 smp_mb__after_atomic_inc();
351}
352
353
354/* This function decrements a keyval's reference count and will destroy the
355 * keyval when there are no more users of the keyval. This should be called by
356 * any code that calls csr1212_keep_keyval() or any of the keyval creation
357 * routines csr1212_new_*(). */
358extern void csr1212_release_keyval(struct csr1212_keyval *kv);
359
360
361/*
362 * This macro allows for looping over the keyval entries in a directory and it
363 * ensures that keyvals from remote ConfigROMs are parsed properly.
364 *
365 * struct csr1212_csr *_csr points to the CSR associated with dir.
366 * struct csr1212_keyval *_kv points to the current keyval (loop index).
367 * struct csr1212_keyval *_dir points to the directory to be looped.
368 * struct csr1212_dentry *_pos is used internally for indexing.
369 *
370 * kv will be NULL upon exit of the loop.
371 */
372#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
373 for (csr1212_get_keyval((_csr), (_dir)), \
374 _pos = (_dir)->value.directory.dentries_head, \
375 _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL;\
376 (_kv) && (_pos); \
377 (_kv->associate == NULL) ? \
378 ((_pos = _pos->next), (_kv = (_pos) ? \
379 csr1212_get_keyval((_csr), _pos->kv) : \
380 NULL)) : \
381 (_kv = csr1212_get_keyval((_csr), _kv->associate)))
382
383#endif /* __CSR1212_H__ */
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
deleted file mode 100644
index d178699b194a..000000000000
--- a/drivers/ieee1394/dma.c
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * DMA region bookkeeping routines
3 *
4 * Copyright (C) 2002 Maas Digital LLC
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
9
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/vmalloc.h>
14#include <linux/scatterlist.h>
15
16#include "dma.h"
17
18/* dma_prog_region */
19
20void dma_prog_region_init(struct dma_prog_region *prog)
21{
22 prog->kvirt = NULL;
23 prog->dev = NULL;
24 prog->n_pages = 0;
25 prog->bus_addr = 0;
26}
27
28int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
29 struct pci_dev *dev)
30{
31 /* round up to page size */
32 n_bytes = PAGE_ALIGN(n_bytes);
33
34 prog->n_pages = n_bytes >> PAGE_SHIFT;
35
36 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
37 if (!prog->kvirt) {
38 printk(KERN_ERR
39 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
40 dma_prog_region_free(prog);
41 return -ENOMEM;
42 }
43
44 prog->dev = dev;
45
46 return 0;
47}
48
49void dma_prog_region_free(struct dma_prog_region *prog)
50{
51 if (prog->kvirt) {
52 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
53 prog->kvirt, prog->bus_addr);
54 }
55
56 prog->kvirt = NULL;
57 prog->dev = NULL;
58 prog->n_pages = 0;
59 prog->bus_addr = 0;
60}
61
62/* dma_region */
63
64/**
65 * dma_region_init - clear out all fields but do not allocate anything
66 */
67void dma_region_init(struct dma_region *dma)
68{
69 dma->kvirt = NULL;
70 dma->dev = NULL;
71 dma->n_pages = 0;
72 dma->n_dma_pages = 0;
73 dma->sglist = NULL;
74}
75
76/**
77 * dma_region_alloc - allocate the buffer and map it to the IOMMU
78 */
79int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
80 struct pci_dev *dev, int direction)
81{
82 unsigned int i;
83
84 /* round up to page size */
85 n_bytes = PAGE_ALIGN(n_bytes);
86
87 dma->n_pages = n_bytes >> PAGE_SHIFT;
88
89 dma->kvirt = vmalloc_32(n_bytes);
90 if (!dma->kvirt) {
91 printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
92 goto err;
93 }
94
95 /* Clear the ram out, no junk to the user */
96 memset(dma->kvirt, 0, n_bytes);
97
98 /* allocate scatter/gather list */
99 dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
100 if (!dma->sglist) {
101 printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
102 goto err;
103 }
104
105 sg_init_table(dma->sglist, dma->n_pages);
106
107 /* fill scatter/gather list with pages */
108 for (i = 0; i < dma->n_pages; i++) {
109 unsigned long va =
110 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
111
112 sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
113 PAGE_SIZE, 0);
114 }
115
116 /* map sglist to the IOMMU */
117 dma->n_dma_pages =
118 pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
119
120 if (dma->n_dma_pages == 0) {
121 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
122 goto err;
123 }
124
125 dma->dev = dev;
126 dma->direction = direction;
127
128 return 0;
129
130 err:
131 dma_region_free(dma);
132 return -ENOMEM;
133}
134
135/**
136 * dma_region_free - unmap and free the buffer
137 */
138void dma_region_free(struct dma_region *dma)
139{
140 if (dma->n_dma_pages) {
141 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
142 dma->direction);
143 dma->n_dma_pages = 0;
144 dma->dev = NULL;
145 }
146
147 vfree(dma->sglist);
148 dma->sglist = NULL;
149
150 vfree(dma->kvirt);
151 dma->kvirt = NULL;
152 dma->n_pages = 0;
153}
154
155/* find the scatterlist index and remaining offset corresponding to a
156 given offset from the beginning of the buffer */
157static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
158 unsigned int start, unsigned long *rem)
159{
160 int i;
161 unsigned long off = offset;
162
163 for (i = start; i < dma->n_dma_pages; i++) {
164 if (off < sg_dma_len(&dma->sglist[i])) {
165 *rem = off;
166 break;
167 }
168
169 off -= sg_dma_len(&dma->sglist[i]);
170 }
171
172 BUG_ON(i >= dma->n_dma_pages);
173
174 return i;
175}
176
177/**
178 * dma_region_offset_to_bus - get bus address of an offset within a DMA region
179 *
180 * Returns the DMA bus address of the byte with the given @offset relative to
181 * the beginning of the @dma.
182 */
183dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
184 unsigned long offset)
185{
186 unsigned long rem = 0;
187
188 struct scatterlist *sg =
189 &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
190 return sg_dma_address(sg) + rem;
191}
192
193/**
194 * dma_region_sync_for_cpu - sync the CPU's view of the buffer
195 */
196void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
197 unsigned long len)
198{
199 int first, last;
200 unsigned long rem = 0;
201
202 if (!len)
203 len = 1;
204
205 first = dma_region_find(dma, offset, 0, &rem);
206 last = dma_region_find(dma, rem + len - 1, first, &rem);
207
208 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
209 dma->direction);
210}
211
212/**
213 * dma_region_sync_for_device - sync the IO bus' view of the buffer
214 */
215void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
216 unsigned long len)
217{
218 int first, last;
219 unsigned long rem = 0;
220
221 if (!len)
222 len = 1;
223
224 first = dma_region_find(dma, offset, 0, &rem);
225 last = dma_region_find(dma, rem + len - 1, first, &rem);
226
227 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
228 last - first + 1, dma->direction);
229}
230
231#ifdef CONFIG_MMU
232
233static int dma_region_pagefault(struct vm_area_struct *vma,
234 struct vm_fault *vmf)
235{
236 struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
237
238 if (!dma->kvirt)
239 return VM_FAULT_SIGBUS;
240
241 if (vmf->pgoff >= dma->n_pages)
242 return VM_FAULT_SIGBUS;
243
244 vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
245 get_page(vmf->page);
246 return 0;
247}
248
249static const struct vm_operations_struct dma_region_vm_ops = {
250 .fault = dma_region_pagefault,
251};
252
253/**
254 * dma_region_mmap - map the buffer into a user space process
255 */
256int dma_region_mmap(struct dma_region *dma, struct file *file,
257 struct vm_area_struct *vma)
258{
259 unsigned long size;
260
261 if (!dma->kvirt)
262 return -EINVAL;
263
264 /* must be page-aligned (XXX: comment is wrong, we could allow pgoff) */
265 if (vma->vm_pgoff != 0)
266 return -EINVAL;
267
268 /* check the length */
269 size = vma->vm_end - vma->vm_start;
270 if (size > (dma->n_pages << PAGE_SHIFT))
271 return -EINVAL;
272
273 vma->vm_ops = &dma_region_vm_ops;
274 vma->vm_private_data = dma;
275 vma->vm_file = file;
276 vma->vm_flags |= VM_RESERVED | VM_ALWAYSDUMP;
277
278 return 0;
279}
280
281#else /* CONFIG_MMU */
282
283int dma_region_mmap(struct dma_region *dma, struct file *file,
284 struct vm_area_struct *vma)
285{
286 return -EINVAL;
287}
288
289#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
deleted file mode 100644
index 467373cab8e5..000000000000
--- a/drivers/ieee1394/dma.h
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * DMA region bookkeeping routines
3 *
4 * Copyright (C) 2002 Maas Digital LLC
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
9
10#ifndef IEEE1394_DMA_H
11#define IEEE1394_DMA_H
12
13#include <asm/types.h>
14
15struct file;
16struct pci_dev;
17struct scatterlist;
18struct vm_area_struct;
19
20/**
21 * struct dma_prog_region - small contiguous DMA buffer
22 * @kvirt: kernel virtual address
23 * @dev: PCI device
24 * @n_pages: number of kernel pages
25 * @bus_addr: base bus address
26 *
27 * a small, physically contiguous DMA buffer with random-access, synchronous
28 * usage characteristics
29 */
30struct dma_prog_region {
31 unsigned char *kvirt;
32 struct pci_dev *dev;
33 unsigned int n_pages;
34 dma_addr_t bus_addr;
35};
36
37/* clear out all fields but do not allocate any memory */
38void dma_prog_region_init(struct dma_prog_region *prog);
39int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
40 struct pci_dev *dev);
41void dma_prog_region_free(struct dma_prog_region *prog);
42
43static inline dma_addr_t dma_prog_region_offset_to_bus(
44 struct dma_prog_region *prog, unsigned long offset)
45{
46 return prog->bus_addr + offset;
47}
48
49/**
50 * struct dma_region - large non-contiguous DMA buffer
51 * @virt: kernel virtual address
52 * @dev: PCI device
53 * @n_pages: number of kernel pages
54 * @n_dma_pages: number of IOMMU pages
55 * @sglist: IOMMU mapping
56 * @direction: PCI_DMA_TODEVICE, etc.
57 *
58 * a large, non-physically-contiguous DMA buffer with streaming, asynchronous
59 * usage characteristics
60 */
61struct dma_region {
62 unsigned char *kvirt;
63 struct pci_dev *dev;
64 unsigned int n_pages;
65 unsigned int n_dma_pages;
66 struct scatterlist *sglist;
67 int direction;
68};
69
70void dma_region_init(struct dma_region *dma);
71int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
72 struct pci_dev *dev, int direction);
73void dma_region_free(struct dma_region *dma);
74void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
75 unsigned long len);
76void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
77 unsigned long len);
78int dma_region_mmap(struct dma_region *dma, struct file *file,
79 struct vm_area_struct *vma);
80dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
81 unsigned long offset);
82
83/**
84 * dma_region_i - macro to index into a DMA region (or dma_prog_region)
85 */
86#define dma_region_i(_dma, _type, _index) \
87 ( ((_type*) ((_dma)->kvirt)) + (_index) )
88
89#endif /* IEEE1394_DMA_H */
diff --git a/drivers/ieee1394/dv1394-private.h b/drivers/ieee1394/dv1394-private.h
deleted file mode 100644
index 18b92cbf4a9f..000000000000
--- a/drivers/ieee1394/dv1394-private.h
+++ /dev/null
@@ -1,587 +0,0 @@
1/*
2 * dv1394-private.h - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.h - driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software Foundation,
23 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#ifndef _DV_1394_PRIVATE_H
27#define _DV_1394_PRIVATE_H
28
29#include "ieee1394.h"
30#include "ohci1394.h"
31#include "dma.h"
32
33/* data structures private to the dv1394 driver */
34/* none of this is exposed to user-space */
35
36
37/*
38 the 8-byte CIP (Common Isochronous Packet) header that precedes
39 each packet of DV data.
40
41 See the IEC 61883 standard.
42*/
43
44struct CIP_header { unsigned char b[8]; };
45
46static inline void fill_cip_header(struct CIP_header *cip,
47 unsigned char source_node_id,
48 unsigned long counter,
49 enum pal_or_ntsc format,
50 unsigned long timestamp)
51{
52 cip->b[0] = source_node_id;
53 cip->b[1] = 0x78; /* packet size in quadlets (480/4) - even for empty packets! */
54 cip->b[2] = 0x00;
55 cip->b[3] = counter;
56
57 cip->b[4] = 0x80; /* const */
58
59 switch(format) {
60 case DV1394_PAL:
61 cip->b[5] = 0x80;
62 break;
63 case DV1394_NTSC:
64 cip->b[5] = 0x00;
65 break;
66 }
67
68 cip->b[6] = timestamp >> 8;
69 cip->b[7] = timestamp & 0xFF;
70}
71
72
73
74/*
75 DMA commands used to program the OHCI's DMA engine
76
77 See the Texas Instruments OHCI 1394 chipset documentation.
78*/
79
80struct output_more_immediate { __le32 q[8]; };
81struct output_more { __le32 q[4]; };
82struct output_last { __le32 q[4]; };
83struct input_more { __le32 q[4]; };
84struct input_last { __le32 q[4]; };
85
86/* outputs */
87
88static inline void fill_output_more_immediate(struct output_more_immediate *omi,
89 unsigned char tag,
90 unsigned char channel,
91 unsigned char sync_tag,
92 unsigned int payload_size)
93{
94 omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
95 omi->q[1] = cpu_to_le32(0);
96 omi->q[2] = cpu_to_le32(0);
97 omi->q[3] = cpu_to_le32(0);
98
99 /* IT packet header */
100 omi->q[4] = cpu_to_le32( (0x0 << 16) /* IEEE1394_SPEED_100 */
101 | (tag << 14)
102 | (channel << 8)
103 | (TCODE_ISO_DATA << 4)
104 | (sync_tag) );
105
106 /* reserved field; mimic behavior of my Sony DSR-40 */
107 omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
108
109 omi->q[6] = cpu_to_le32(0);
110 omi->q[7] = cpu_to_le32(0);
111}
112
113static inline void fill_output_more(struct output_more *om,
114 unsigned int data_size,
115 unsigned long data_phys_addr)
116{
117 om->q[0] = cpu_to_le32(data_size);
118 om->q[1] = cpu_to_le32(data_phys_addr);
119 om->q[2] = cpu_to_le32(0);
120 om->q[3] = cpu_to_le32(0);
121}
122
123static inline void fill_output_last(struct output_last *ol,
124 int want_timestamp,
125 int want_interrupt,
126 unsigned int data_size,
127 unsigned long data_phys_addr)
128{
129 u32 temp = 0;
130 temp |= 1 << 28; /* OUTPUT_LAST */
131
132 if (want_timestamp) /* controller will update timestamp at DMA time */
133 temp |= 1 << 27;
134
135 if (want_interrupt)
136 temp |= 3 << 20;
137
138 temp |= 3 << 18; /* must take branch */
139 temp |= data_size;
140
141 ol->q[0] = cpu_to_le32(temp);
142 ol->q[1] = cpu_to_le32(data_phys_addr);
143 ol->q[2] = cpu_to_le32(0);
144 ol->q[3] = cpu_to_le32(0);
145}
146
147/* inputs */
148
149static inline void fill_input_more(struct input_more *im,
150 int want_interrupt,
151 unsigned int data_size,
152 unsigned long data_phys_addr)
153{
154 u32 temp = 2 << 28; /* INPUT_MORE */
155 temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
156 if (want_interrupt)
157 temp |= 0 << 20; /* interrupts, i=0 in packet-per-buffer mode */
158 temp |= 0x0 << 16; /* disable branch to address for packet-per-buffer mode */
159 /* disable wait on sync field, not used in DV :-( */
160 temp |= data_size;
161
162 im->q[0] = cpu_to_le32(temp);
163 im->q[1] = cpu_to_le32(data_phys_addr);
164 im->q[2] = cpu_to_le32(0); /* branchAddress and Z not use in packet-per-buffer mode */
165 im->q[3] = cpu_to_le32(0); /* xferStatus & resCount, resCount must be initialize to data_size */
166}
167
168static inline void fill_input_last(struct input_last *il,
169 int want_interrupt,
170 unsigned int data_size,
171 unsigned long data_phys_addr)
172{
173 u32 temp = 3 << 28; /* INPUT_LAST */
174 temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
175 if (want_interrupt)
176 temp |= 3 << 20; /* enable interrupts */
177 temp |= 0xC << 16; /* enable branch to address */
178 /* disable wait on sync field, not used in DV :-( */
179 temp |= data_size;
180
181 il->q[0] = cpu_to_le32(temp);
182 il->q[1] = cpu_to_le32(data_phys_addr);
183 il->q[2] = cpu_to_le32(1); /* branchAddress (filled in later) and Z = 1 descriptor in next block */
184 il->q[3] = cpu_to_le32(data_size); /* xferStatus & resCount, resCount must be initialize to data_size */
185}
186
187
188
189/*
190 A "DMA descriptor block" consists of several contiguous DMA commands.
191 struct DMA_descriptor_block encapsulates all of the commands necessary
192 to send one packet of DV data.
193
194 There are three different types of these blocks:
195
196 1) command to send an empty packet (CIP header only, no DV data):
197
198 OUTPUT_MORE-Immediate <-- contains the iso header in-line
199 OUTPUT_LAST <-- points to the CIP header
200
201 2) command to send a full packet when the DV data payload does NOT
202 cross a page boundary:
203
204 OUTPUT_MORE-Immediate <-- contains the iso header in-line
205 OUTPUT_MORE <-- points to the CIP header
206 OUTPUT_LAST <-- points to entire DV data payload
207
208 3) command to send a full packet when the DV payload DOES cross
209 a page boundary:
210
211 OUTPUT_MORE-Immediate <-- contains the iso header in-line
212 OUTPUT_MORE <-- points to the CIP header
213 OUTPUT_MORE <-- points to first part of DV data payload
214 OUTPUT_LAST <-- points to second part of DV data payload
215
216 This struct describes all three block types using unions.
217
218 !!! It is vital that an even number of these descriptor blocks fit on one
219 page of memory, since a block cannot cross a page boundary !!!
220
221 */
222
223struct DMA_descriptor_block {
224
225 union {
226 struct {
227 /* iso header, common to all output block types */
228 struct output_more_immediate omi;
229
230 union {
231 /* empty packet */
232 struct {
233 struct output_last ol; /* CIP header */
234 } empty;
235
236 /* full packet */
237 struct {
238 struct output_more om; /* CIP header */
239
240 union {
241 /* payload does not cross page boundary */
242 struct {
243 struct output_last ol; /* data payload */
244 } nocross;
245
246 /* payload crosses page boundary */
247 struct {
248 struct output_more om; /* data payload */
249 struct output_last ol; /* data payload */
250 } cross;
251 } u;
252
253 } full;
254 } u;
255 } out;
256
257 struct {
258 struct input_last il;
259 } in;
260
261 } u;
262
263 /* ensure that PAGE_SIZE % sizeof(struct DMA_descriptor_block) == 0
264 by padding out to 128 bytes */
265 u32 __pad__[12];
266};
267
268
269/* struct frame contains all data associated with one frame in the
270 ringbuffer these are allocated when the DMA context is initialized
271 do_dv1394_init(). They are re-used after the card finishes
272 transmitting the frame. */
273
274struct video_card; /* forward declaration */
275
276struct frame {
277
278 /* points to the struct video_card that owns this frame */
279 struct video_card *video;
280
281 /* index of this frame in video_card->frames[] */
282 unsigned int frame_num;
283
284 /* FRAME_CLEAR - DMA program not set up, waiting for data
285 FRAME_READY - DMA program written, ready to transmit
286
287 Changes to these should be locked against the interrupt
288 */
289 enum {
290 FRAME_CLEAR = 0,
291 FRAME_READY
292 } state;
293
294 /* whether this frame has been DMA'ed already; used only from
295 the IRQ handler to determine whether the frame can be reset */
296 int done;
297
298
299 /* kernel virtual pointer to the start of this frame's data in
300 the user ringbuffer. Use only for CPU access; to get the DMA
301 bus address you must go through the video->user_dma mapping */
302 unsigned long data;
303
304 /* Max # of packets per frame */
305#define MAX_PACKETS 500
306
307
308 /* a PAGE_SIZE memory pool for allocating CIP headers
309 !header_pool must be aligned to PAGE_SIZE! */
310 struct CIP_header *header_pool;
311 dma_addr_t header_pool_dma;
312
313
314 /* a physically contiguous memory pool for allocating DMA
315 descriptor blocks; usually around 64KB in size
316 !descriptor_pool must be aligned to PAGE_SIZE! */
317 struct DMA_descriptor_block *descriptor_pool;
318 dma_addr_t descriptor_pool_dma;
319 unsigned long descriptor_pool_size;
320
321
322 /* # of packets allocated for this frame */
323 unsigned int n_packets;
324
325
326 /* below are several pointers (kernel virtual addresses, not
327 DMA bus addresses) to parts of the DMA program. These are
328 set each time the DMA program is written in
329 frame_prepare(). They are used later on, e.g. from the
330 interrupt handler, to check the status of the frame */
331
332 /* points to status/timestamp field of first DMA packet */
333 /* (we'll check it later to monitor timestamp accuracy) */
334 __le32 *frame_begin_timestamp;
335
336 /* the timestamp we assigned to the first packet in the frame */
337 u32 assigned_timestamp;
338
339 /* pointer to the first packet's CIP header (where the timestamp goes) */
340 struct CIP_header *cip_syt1;
341
342 /* pointer to the second packet's CIP header
343 (only set if the first packet was empty) */
344 struct CIP_header *cip_syt2;
345
346 /* in order to figure out what caused an interrupt,
347 store pointers to the status fields of the two packets
348 that can cause interrupts. We'll check these from the
349 interrupt handler.
350 */
351 __le32 *mid_frame_timestamp;
352 __le32 *frame_end_timestamp;
353
354 /* branch address field of final packet. This is effectively
355 the "tail" in the chain of DMA descriptor blocks.
356 We will fill it with the address of the first DMA descriptor
357 block in the subsequent frame, once it is ready.
358 */
359 __le32 *frame_end_branch;
360
361 /* the number of descriptors in the first descriptor block
362 of the frame. Needed to start DMA */
363 int first_n_descriptors;
364};
365
366
367struct packet {
368 __le16 timestamp;
369 u16 invalid;
370 u16 iso_header;
371 __le16 data_length;
372 u32 cip_h1;
373 u32 cip_h2;
374 unsigned char data[480];
375 unsigned char padding[16]; /* force struct size =512 for page alignment */
376};
377
378
379/* allocate/free a frame */
380static struct frame* frame_new(unsigned int frame_num, struct video_card *video);
381static void frame_delete(struct frame *f);
382
383/* reset f so that it can be used again */
384static void frame_reset(struct frame *f);
385
386/* struct video_card contains all data associated with one instance
387 of the dv1394 driver
388*/
389enum modes {
390 MODE_RECEIVE,
391 MODE_TRANSMIT
392};
393
394struct video_card {
395
396 /* ohci card to which this instance corresponds */
397 struct ti_ohci *ohci;
398
399 /* OHCI card id; the link between the VFS inode and a specific video_card
400 (essentially the device minor number) */
401 int id;
402
403 /* entry in dv1394_cards */
404 struct list_head list;
405
406 /* OHCI card IT DMA context number, -1 if not in use */
407 int ohci_it_ctx;
408 struct ohci1394_iso_tasklet it_tasklet;
409
410 /* register offsets for current IT DMA context, 0 if not in use */
411 u32 ohci_IsoXmitContextControlSet;
412 u32 ohci_IsoXmitContextControlClear;
413 u32 ohci_IsoXmitCommandPtr;
414
415 /* OHCI card IR DMA context number, -1 if not in use */
416 struct ohci1394_iso_tasklet ir_tasklet;
417 int ohci_ir_ctx;
418
419 /* register offsets for current IR DMA context, 0 if not in use */
420 u32 ohci_IsoRcvContextControlSet;
421 u32 ohci_IsoRcvContextControlClear;
422 u32 ohci_IsoRcvCommandPtr;
423 u32 ohci_IsoRcvContextMatch;
424
425
426 /* CONCURRENCY CONTROL */
427
428 /* there are THREE levels of locking associated with video_card. */
429
430 /*
431 1) the 'open' flag - this prevents more than one process from
432 opening the device. (the driver currently assumes only one opener).
433 This is a regular int, but use test_and_set_bit() (on bit zero)
434 for atomicity.
435 */
436 unsigned long open;
437
438 /*
439 2) the spinlock - this provides mutual exclusion between the interrupt
440 handler and process-context operations. Generally you must take the
441 spinlock under the following conditions:
442 1) DMA (and hence the interrupt handler) may be running
443 AND
444 2) you need to operate on the video_card, especially active_frame
445
446 It is OK to play with video_card without taking the spinlock if
447 you are certain that DMA is not running. Even if DMA is running,
448 it is OK to *read* active_frame with the lock, then drop it
449 immediately. This is safe because the interrupt handler will never
450 advance active_frame onto a frame that is not READY (and the spinlock
451 must be held while marking a frame READY).
452
453 spinlock is also used to protect ohci_it_ctx and ohci_ir_ctx,
454 which can be accessed from both process and interrupt context
455 */
456 spinlock_t spinlock;
457
458 /* flag to prevent spurious interrupts (which OHCI seems to
459 generate a lot :) from accessing the struct */
460 int dma_running;
461
462 /*
463 3) the sleeping mutex 'mtx' - this is used from process context only,
464 to serialize various operations on the video_card. Even though only one
465 open() is allowed, we still need to prevent multiple threads of execution
466 from entering calls like read, write, ioctl, etc.
467
468 I honestly can't think of a good reason to use dv1394 from several threads
469 at once, but we need to serialize anyway to prevent oopses =).
470
471 NOTE: if you need both spinlock and mtx, take mtx first to avoid deadlock!
472 */
473 struct mutex mtx;
474
475 /* people waiting for buffer space, please form a line here... */
476 wait_queue_head_t waitq;
477
478 /* support asynchronous I/O signals (SIGIO) */
479 struct fasync_struct *fasync;
480
481 /* the large, non-contiguous (rvmalloc()) ringbuffer for DV
482 data, exposed to user-space via mmap() */
483 unsigned long dv_buf_size;
484 struct dma_region dv_buf;
485
486 /* next byte in the ringbuffer that a write() call will fill */
487 size_t write_off;
488
489 struct frame *frames[DV1394_MAX_FRAMES];
490
491 /* n_frames also serves as an indicator that this struct video_card is
492 initialized and ready to run DMA buffers */
493
494 int n_frames;
495
496 /* this is the frame that is currently "owned" by the OHCI DMA controller
497 (set to -1 iff DMA is not running)
498
499 ! must lock against the interrupt handler when accessing it !
500
501 RULES:
502
503 Only the interrupt handler may change active_frame if DMA
504 is running; if not, process may change it
505
506 If the next frame is READY, the interrupt handler will advance
507 active_frame when the current frame is finished.
508
509 If the next frame is CLEAR, the interrupt handler will re-transmit
510 the current frame, and the dropped_frames counter will be incremented.
511
512 The interrupt handler will NEVER advance active_frame to a
513 frame that is not READY.
514 */
515 int active_frame;
516 int first_run;
517
518 /* the same locking rules apply to these three fields also: */
519
520 /* altered ONLY from process context. Must check first_clear_frame->state;
521 if it's READY, that means the ringbuffer is full with READY frames;
522 if it's CLEAR, that means one or more ringbuffer frames are CLEAR */
523 unsigned int first_clear_frame;
524
525 /* altered both by process and interrupt */
526 unsigned int n_clear_frames;
527
528 /* only altered by the interrupt */
529 unsigned int dropped_frames;
530
531
532
533 /* the CIP accumulator and continuity counter are properties
534 of the DMA stream as a whole (not a single frame), so they
535 are stored here in the video_card */
536
537 unsigned long cip_accum;
538 unsigned long cip_n, cip_d;
539 unsigned int syt_offset;
540 unsigned int continuity_counter;
541
542 enum pal_or_ntsc pal_or_ntsc;
543
544 /* redundant, but simplifies the code somewhat */
545 unsigned int frame_size; /* in bytes */
546
547 /* the isochronous channel to use, -1 if video card is inactive */
548 int channel;
549
550
551 /* physically contiguous packet ringbuffer for receive */
552 struct dma_region packet_buf;
553 unsigned long packet_buf_size;
554
555 unsigned int current_packet;
556 int first_frame; /* received first start frame marker? */
557 enum modes mode;
558};
559
560/*
561 if the video_card is not initialized, then the ONLY fields that are valid are:
562 ohci
563 open
564 n_frames
565*/
566
567static inline int video_card_initialized(struct video_card *v)
568{
569 return v->n_frames > 0;
570}
571
572static int do_dv1394_init(struct video_card *video, struct dv1394_init *init);
573static int do_dv1394_init_default(struct video_card *video);
574static void do_dv1394_shutdown(struct video_card *video, int free_user_buf);
575
576
577/* NTSC empty packet rate accurate to within 0.01%,
578 calibrated against a Sony DSR-40 DVCAM deck */
579
580#define CIP_N_NTSC 68000000
581#define CIP_D_NTSC 1068000000
582
583#define CIP_N_PAL 1
584#define CIP_D_PAL 16
585
586#endif /* _DV_1394_PRIVATE_H */
587
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
deleted file mode 100644
index c5a031b79d03..000000000000
--- a/drivers/ieee1394/dv1394.c
+++ /dev/null
@@ -1,2584 +0,0 @@
1/*
2 * dv1394.c - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.c - video driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25/*
26 OVERVIEW
27
28 I designed dv1394 as a "pipe" that you can use to shoot DV onto a
29 FireWire bus. In transmission mode, dv1394 does the following:
30
31 1. accepts contiguous frames of DV data from user-space, via write()
32 or mmap() (see dv1394.h for the complete API)
33 2. wraps IEC 61883 packets around the DV data, inserting
34 empty synchronization packets as necessary
35 3. assigns accurate SYT timestamps to the outgoing packets
36 4. shoots them out using the OHCI card's IT DMA engine
37
38 Thanks to Dan Dennedy, we now have a receive mode that does the following:
39
40 1. accepts raw IEC 61883 packets from the OHCI card
41 2. re-assembles the DV data payloads into contiguous frames,
42 discarding empty packets
43 3. sends the DV data to user-space via read() or mmap()
44*/
45
46/*
47 TODO:
48
49 - tunable frame-drop behavior: either loop last frame, or halt transmission
50
51 - use a scatter/gather buffer for DMA programs (f->descriptor_pool)
52 so that we don't rely on allocating 64KB of contiguous kernel memory
53 via pci_alloc_consistent()
54
55 DONE:
56 - during reception, better handling of dropped frames and continuity errors
57 - during reception, prevent DMA from bypassing the irq tasklets
58 - reduce irq rate during reception (1/250 packets).
59 - add many more internal buffers during reception with scatter/gather dma.
60 - add dbc (continuity) checking on receive, increment status.dropped_frames
61 if not continuous.
62 - restart IT DMA after a bus reset
63 - safely obtain and release ISO Tx channels in cooperation with OHCI driver
64 - map received DIF blocks to their proper location in DV frame (ensure
65 recovery if dropped packet)
66 - handle bus resets gracefully (OHCI card seems to take care of this itself(!))
67 - do not allow resizing the user_buf once allocated; eliminate nuke_buffer_mappings
68 - eliminated #ifdef DV1394_DEBUG_LEVEL by inventing macros debug_printk and irq_printk
69 - added wmb() and mb() to places where PCI read/write ordering needs to be enforced
70 - set video->id correctly
71 - store video_cards in an array indexed by OHCI card ID, rather than a list
72 - implement DMA context allocation to cooperate with other users of the OHCI
73 - fix all XXX showstoppers
74 - disable IR/IT DMA interrupts on shutdown
75 - flush pci writes to the card by issuing a read
76 - character device dispatching
77 - switch over to the new kernel DMA API (pci_map_*()) (* needs testing on platforms with IOMMU!)
78 - keep all video_cards in a list (for open() via chardev), set file->private_data = video
79 - dv1394_poll should indicate POLLIN when receiving buffers are available
80 - add proc fs interface to set cip_n, cip_d, syt_offset, and video signal
81 - expose xmit and recv as separate devices (not exclusive)
82 - expose NTSC and PAL as separate devices (can be overridden)
83
84*/
85
86#include <linux/kernel.h>
87#include <linux/list.h>
88#include <linux/slab.h>
89#include <linux/interrupt.h>
90#include <linux/wait.h>
91#include <linux/errno.h>
92#include <linux/module.h>
93#include <linux/init.h>
94#include <linux/pci.h>
95#include <linux/fs.h>
96#include <linux/poll.h>
97#include <linux/mutex.h>
98#include <linux/bitops.h>
99#include <asm/byteorder.h>
100#include <asm/atomic.h>
101#include <asm/io.h>
102#include <asm/uaccess.h>
103#include <linux/delay.h>
104#include <asm/pgtable.h>
105#include <asm/page.h>
106#include <linux/sched.h>
107#include <linux/types.h>
108#include <linux/vmalloc.h>
109#include <linux/string.h>
110#include <linux/compat.h>
111#include <linux/cdev.h>
112
113#include "dv1394.h"
114#include "dv1394-private.h"
115#include "highlevel.h"
116#include "hosts.h"
117#include "ieee1394.h"
118#include "ieee1394_core.h"
119#include "ieee1394_hotplug.h"
120#include "ieee1394_types.h"
121#include "nodemgr.h"
122#include "ohci1394.h"
123
124/* DEBUG LEVELS:
125 0 - no debugging messages
126 1 - some debugging messages, but none during DMA frame transmission
127 2 - lots of messages, including during DMA frame transmission
128 (will cause underflows if your machine is too slow!)
129*/
130
131#define DV1394_DEBUG_LEVEL 0
132
133/* for debugging use ONLY: allow more than one open() of the device */
134/* #define DV1394_ALLOW_MORE_THAN_ONE_OPEN 1 */
135
136#if DV1394_DEBUG_LEVEL >= 2
137#define irq_printk( args... ) printk( args )
138#else
139#define irq_printk( args... ) do {} while (0)
140#endif
141
142#if DV1394_DEBUG_LEVEL >= 1
143#define debug_printk( args... ) printk( args)
144#else
145#define debug_printk( args... ) do {} while (0)
146#endif
147
148/* issue a dummy PCI read to force the preceding write
149 to be posted to the PCI bus immediately */
150
151static inline void flush_pci_write(struct ti_ohci *ohci)
152{
153 mb();
154 reg_read(ohci, OHCI1394_IsochronousCycleTimer);
155}
156
157static void it_tasklet_func(unsigned long data);
158static void ir_tasklet_func(unsigned long data);
159
160#ifdef CONFIG_COMPAT
161static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
162 unsigned long arg);
163#endif
164
165/* GLOBAL DATA */
166
167/* list of all video_cards */
168static LIST_HEAD(dv1394_cards);
169static DEFINE_SPINLOCK(dv1394_cards_lock);
170
171/* translate from a struct file* to the corresponding struct video_card* */
172
173static inline struct video_card* file_to_video_card(struct file *file)
174{
175 return file->private_data;
176}
177
178/*** FRAME METHODS *********************************************************/
179
180static void frame_reset(struct frame *f)
181{
182 f->state = FRAME_CLEAR;
183 f->done = 0;
184 f->n_packets = 0;
185 f->frame_begin_timestamp = NULL;
186 f->assigned_timestamp = 0;
187 f->cip_syt1 = NULL;
188 f->cip_syt2 = NULL;
189 f->mid_frame_timestamp = NULL;
190 f->frame_end_timestamp = NULL;
191 f->frame_end_branch = NULL;
192}
193
194static struct frame* frame_new(unsigned int frame_num, struct video_card *video)
195{
196 struct frame *f = kmalloc(sizeof(*f), GFP_KERNEL);
197 if (!f)
198 return NULL;
199
200 f->video = video;
201 f->frame_num = frame_num;
202
203 f->header_pool = pci_alloc_consistent(f->video->ohci->dev, PAGE_SIZE, &f->header_pool_dma);
204 if (!f->header_pool) {
205 printk(KERN_ERR "dv1394: failed to allocate CIP header pool\n");
206 kfree(f);
207 return NULL;
208 }
209
210 debug_printk("dv1394: frame_new: allocated CIP header pool at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
211 (unsigned long) f->header_pool, (unsigned long) f->header_pool_dma, PAGE_SIZE);
212
213 f->descriptor_pool_size = MAX_PACKETS * sizeof(struct DMA_descriptor_block);
214 /* make it an even # of pages */
215 f->descriptor_pool_size += PAGE_SIZE - (f->descriptor_pool_size%PAGE_SIZE);
216
217 f->descriptor_pool = pci_alloc_consistent(f->video->ohci->dev,
218 f->descriptor_pool_size,
219 &f->descriptor_pool_dma);
220 if (!f->descriptor_pool) {
221 pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
222 kfree(f);
223 return NULL;
224 }
225
226 debug_printk("dv1394: frame_new: allocated DMA program memory at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
227 (unsigned long) f->descriptor_pool, (unsigned long) f->descriptor_pool_dma, f->descriptor_pool_size);
228
229 f->data = 0;
230 frame_reset(f);
231
232 return f;
233}
234
235static void frame_delete(struct frame *f)
236{
237 pci_free_consistent(f->video->ohci->dev, PAGE_SIZE, f->header_pool, f->header_pool_dma);
238 pci_free_consistent(f->video->ohci->dev, f->descriptor_pool_size, f->descriptor_pool, f->descriptor_pool_dma);
239 kfree(f);
240}
241
242
243
244
245/*
246 frame_prepare() - build the DMA program for transmitting
247
248 Frame_prepare() must be called OUTSIDE the video->spinlock.
249 However, frame_prepare() must still be serialized, so
250 it should be called WITH the video->mtx taken.
251 */
252
253static void frame_prepare(struct video_card *video, unsigned int this_frame)
254{
255 struct frame *f = video->frames[this_frame];
256 int last_frame;
257
258 struct DMA_descriptor_block *block;
259 dma_addr_t block_dma;
260 struct CIP_header *cip;
261 dma_addr_t cip_dma;
262
263 unsigned int n_descriptors, full_packets, packets_per_frame, payload_size;
264
265 /* these flags denote packets that need special attention */
266 int empty_packet, first_packet, last_packet, mid_packet;
267
268 __le32 *branch_address, *last_branch_address = NULL;
269 unsigned long data_p;
270 int first_packet_empty = 0;
271 u32 cycleTimer, ct_sec, ct_cyc, ct_off;
272 unsigned long irq_flags;
273
274 irq_printk("frame_prepare( %d ) ---------------------\n", this_frame);
275
276 full_packets = 0;
277
278
279
280 if (video->pal_or_ntsc == DV1394_PAL)
281 packets_per_frame = DV1394_PAL_PACKETS_PER_FRAME;
282 else
283 packets_per_frame = DV1394_NTSC_PACKETS_PER_FRAME;
284
285 while ( full_packets < packets_per_frame ) {
286 empty_packet = first_packet = last_packet = mid_packet = 0;
287
288 data_p = f->data + full_packets * 480;
289
290 /************************************************/
291 /* allocate a descriptor block and a CIP header */
292 /************************************************/
293
294 /* note: these should NOT cross a page boundary (DMA restriction) */
295
296 if (f->n_packets >= MAX_PACKETS) {
297 printk(KERN_ERR "dv1394: FATAL ERROR: max packet count exceeded\n");
298 return;
299 }
300
301 /* the block surely won't cross a page boundary,
302 since an even number of descriptor_blocks fit on a page */
303 block = &(f->descriptor_pool[f->n_packets]);
304
305 /* DMA address of the block = offset of block relative
306 to the kernel base address of the descriptor pool
307 + DMA base address of the descriptor pool */
308 block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
309
310
311 /* the whole CIP pool fits on one page, so no worries about boundaries */
312 if ( ((unsigned long) &(f->header_pool[f->n_packets]) - (unsigned long) f->header_pool)
313 > PAGE_SIZE) {
314 printk(KERN_ERR "dv1394: FATAL ERROR: no room to allocate CIP header\n");
315 return;
316 }
317
318 cip = &(f->header_pool[f->n_packets]);
319
320 /* DMA address of the CIP header = offset of cip
321 relative to kernel base address of the header pool
322 + DMA base address of the header pool */
323 cip_dma = (unsigned long) cip % PAGE_SIZE + f->header_pool_dma;
324
325 /* is this an empty packet? */
326
327 if (video->cip_accum > (video->cip_d - video->cip_n)) {
328 empty_packet = 1;
329 payload_size = 8;
330 video->cip_accum -= (video->cip_d - video->cip_n);
331 } else {
332 payload_size = 488;
333 video->cip_accum += video->cip_n;
334 }
335
336 /* there are three important packets each frame:
337
338 the first packet in the frame - we ask the card to record the timestamp when
339 this packet is actually sent, so we can monitor
340 how accurate our timestamps are. Also, the first
341 packet serves as a semaphore to let us know that
342 it's OK to free the *previous* frame's DMA buffer
343
344 the last packet in the frame - this packet is used to detect buffer underflows.
345 if this is the last ready frame, the last DMA block
346 will have a branch back to the beginning of the frame
347 (so that the card will re-send the frame on underflow).
348 if this branch gets taken, we know that at least one
349 frame has been dropped. When the next frame is ready,
350 the branch is pointed to its first packet, and the
351 semaphore is disabled.
352
353 a "mid" packet slightly before the end of the frame - this packet should trigger
354 an interrupt so we can go and assign a timestamp to the first packet
355 in the next frame. We don't use the very last packet in the frame
356 for this purpose, because that would leave very little time to set
357 the timestamp before DMA starts on the next frame.
358 */
359
360 if (f->n_packets == 0) {
361 first_packet = 1;
362 } else if ( full_packets == (packets_per_frame-1) ) {
363 last_packet = 1;
364 } else if (f->n_packets == packets_per_frame) {
365 mid_packet = 1;
366 }
367
368
369 /********************/
370 /* setup CIP header */
371 /********************/
372
373 /* the timestamp will be written later from the
374 mid-frame interrupt handler. For now we just
375 store the address of the CIP header(s) that
376 need a timestamp. */
377
378 /* first packet in the frame needs a timestamp */
379 if (first_packet) {
380 f->cip_syt1 = cip;
381 if (empty_packet)
382 first_packet_empty = 1;
383
384 } else if (first_packet_empty && (f->n_packets == 1) ) {
385 /* if the first packet was empty, the second
386 packet's CIP header also needs a timestamp */
387 f->cip_syt2 = cip;
388 }
389
390 fill_cip_header(cip,
391 /* the node ID number of the OHCI card */
392 reg_read(video->ohci, OHCI1394_NodeID) & 0x3F,
393 video->continuity_counter,
394 video->pal_or_ntsc,
395 0xFFFF /* the timestamp is filled in later */);
396
397 /* advance counter, only for full packets */
398 if ( ! empty_packet )
399 video->continuity_counter++;
400
401 /******************************/
402 /* setup DMA descriptor block */
403 /******************************/
404
405 /* first descriptor - OUTPUT_MORE_IMMEDIATE, for the controller's IT header */
406 fill_output_more_immediate( &(block->u.out.omi), 1, video->channel, 0, payload_size);
407
408 if (empty_packet) {
409 /* second descriptor - OUTPUT_LAST for CIP header */
410 fill_output_last( &(block->u.out.u.empty.ol),
411
412 /* want completion status on all interesting packets */
413 (first_packet || mid_packet || last_packet) ? 1 : 0,
414
415 /* want interrupts on all interesting packets */
416 (first_packet || mid_packet || last_packet) ? 1 : 0,
417
418 sizeof(struct CIP_header), /* data size */
419 cip_dma);
420
421 if (first_packet)
422 f->frame_begin_timestamp = &(block->u.out.u.empty.ol.q[3]);
423 else if (mid_packet)
424 f->mid_frame_timestamp = &(block->u.out.u.empty.ol.q[3]);
425 else if (last_packet) {
426 f->frame_end_timestamp = &(block->u.out.u.empty.ol.q[3]);
427 f->frame_end_branch = &(block->u.out.u.empty.ol.q[2]);
428 }
429
430 branch_address = &(block->u.out.u.empty.ol.q[2]);
431 n_descriptors = 3;
432 if (first_packet)
433 f->first_n_descriptors = n_descriptors;
434
435 } else { /* full packet */
436
437 /* second descriptor - OUTPUT_MORE for CIP header */
438 fill_output_more( &(block->u.out.u.full.om),
439 sizeof(struct CIP_header), /* data size */
440 cip_dma);
441
442
443 /* third (and possibly fourth) descriptor - for DV data */
444 /* the 480-byte payload can cross a page boundary; if so,
445 we need to split it into two DMA descriptors */
446
447 /* does the 480-byte data payload cross a page boundary? */
448 if ( (PAGE_SIZE- ((unsigned long)data_p % PAGE_SIZE) ) < 480 ) {
449
450 /* page boundary crossed */
451
452 fill_output_more( &(block->u.out.u.full.u.cross.om),
453 /* data size - how much of data_p fits on the first page */
454 PAGE_SIZE - (data_p % PAGE_SIZE),
455
456 /* DMA address of data_p */
457 dma_region_offset_to_bus(&video->dv_buf,
458 data_p - (unsigned long) video->dv_buf.kvirt));
459
460 fill_output_last( &(block->u.out.u.full.u.cross.ol),
461
462 /* want completion status on all interesting packets */
463 (first_packet || mid_packet || last_packet) ? 1 : 0,
464
465 /* want interrupt on all interesting packets */
466 (first_packet || mid_packet || last_packet) ? 1 : 0,
467
468 /* data size - remaining portion of data_p */
469 480 - (PAGE_SIZE - (data_p % PAGE_SIZE)),
470
471 /* DMA address of data_p + PAGE_SIZE - (data_p % PAGE_SIZE) */
472 dma_region_offset_to_bus(&video->dv_buf,
473 data_p + PAGE_SIZE - (data_p % PAGE_SIZE) - (unsigned long) video->dv_buf.kvirt));
474
475 if (first_packet)
476 f->frame_begin_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
477 else if (mid_packet)
478 f->mid_frame_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
479 else if (last_packet) {
480 f->frame_end_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
481 f->frame_end_branch = &(block->u.out.u.full.u.cross.ol.q[2]);
482 }
483
484 branch_address = &(block->u.out.u.full.u.cross.ol.q[2]);
485
486 n_descriptors = 5;
487 if (first_packet)
488 f->first_n_descriptors = n_descriptors;
489
490 full_packets++;
491
492 } else {
493 /* fits on one page */
494
495 fill_output_last( &(block->u.out.u.full.u.nocross.ol),
496
497 /* want completion status on all interesting packets */
498 (first_packet || mid_packet || last_packet) ? 1 : 0,
499
500 /* want interrupt on all interesting packets */
501 (first_packet || mid_packet || last_packet) ? 1 : 0,
502
503 480, /* data size (480 bytes of DV data) */
504
505
506 /* DMA address of data_p */
507 dma_region_offset_to_bus(&video->dv_buf,
508 data_p - (unsigned long) video->dv_buf.kvirt));
509
510 if (first_packet)
511 f->frame_begin_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
512 else if (mid_packet)
513 f->mid_frame_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
514 else if (last_packet) {
515 f->frame_end_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
516 f->frame_end_branch = &(block->u.out.u.full.u.nocross.ol.q[2]);
517 }
518
519 branch_address = &(block->u.out.u.full.u.nocross.ol.q[2]);
520
521 n_descriptors = 4;
522 if (first_packet)
523 f->first_n_descriptors = n_descriptors;
524
525 full_packets++;
526 }
527 }
528
529 /* link this descriptor block into the DMA program by filling in
530 the branch address of the previous block */
531
532 /* note: we are not linked into the active DMA chain yet */
533
534 if (last_branch_address) {
535 *(last_branch_address) = cpu_to_le32(block_dma | n_descriptors);
536 }
537
538 last_branch_address = branch_address;
539
540
541 f->n_packets++;
542
543 }
544
545 /* when we first assemble a new frame, set the final branch
546 to loop back up to the top */
547 *(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
548
549 /* make the latest version of this frame visible to the PCI card */
550 dma_region_sync_for_device(&video->dv_buf, f->data - (unsigned long) video->dv_buf.kvirt, video->frame_size);
551
552 /* lock against DMA interrupt */
553 spin_lock_irqsave(&video->spinlock, irq_flags);
554
555 f->state = FRAME_READY;
556
557 video->n_clear_frames--;
558
559 last_frame = video->first_clear_frame - 1;
560 if (last_frame == -1)
561 last_frame = video->n_frames-1;
562
563 video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
564
565 irq_printk(" frame %d prepared, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n last=%d\n",
566 this_frame, video->active_frame, video->n_clear_frames, video->first_clear_frame, last_frame);
567
568 irq_printk(" begin_ts %08lx mid_ts %08lx end_ts %08lx end_br %08lx\n",
569 (unsigned long) f->frame_begin_timestamp,
570 (unsigned long) f->mid_frame_timestamp,
571 (unsigned long) f->frame_end_timestamp,
572 (unsigned long) f->frame_end_branch);
573
574 if (video->active_frame != -1) {
575
576 /* if DMA is already active, we are almost done */
577 /* just link us onto the active DMA chain */
578 if (video->frames[last_frame]->frame_end_branch) {
579 u32 temp;
580
581 /* point the previous frame's tail to this frame's head */
582 *(video->frames[last_frame]->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
583
584 /* this write MUST precede the next one, or we could silently drop frames */
585 wmb();
586
587 /* disable the want_status semaphore on the last packet */
588 temp = le32_to_cpu(*(video->frames[last_frame]->frame_end_branch - 2));
589 temp &= 0xF7CFFFFF;
590 *(video->frames[last_frame]->frame_end_branch - 2) = cpu_to_le32(temp);
591
592 /* flush these writes to memory ASAP */
593 flush_pci_write(video->ohci);
594
595 /* NOTE:
596 ideally the writes should be "atomic": if
597 the OHCI card reads the want_status flag in
598 between them, we'll falsely report a
599 dropped frame. Hopefully this window is too
600 small to really matter, and the consequence
601 is rather harmless. */
602
603
604 irq_printk(" new frame %d linked onto DMA chain\n", this_frame);
605
606 } else {
607 printk(KERN_ERR "dv1394: last frame not ready???\n");
608 }
609
610 } else {
611
612 u32 transmit_sec, transmit_cyc;
613 u32 ts_cyc;
614
615 /* DMA is stopped, so this is the very first frame */
616 video->active_frame = this_frame;
617
618 /* set CommandPtr to address and size of first descriptor block */
619 reg_write(video->ohci, video->ohci_IsoXmitCommandPtr,
620 video->frames[video->active_frame]->descriptor_pool_dma |
621 f->first_n_descriptors);
622
623 /* assign a timestamp based on the current cycle time...
624 We'll tell the card to begin DMA 100 cycles from now,
625 and assign a timestamp 103 cycles from now */
626
627 cycleTimer = reg_read(video->ohci, OHCI1394_IsochronousCycleTimer);
628
629 ct_sec = cycleTimer >> 25;
630 ct_cyc = (cycleTimer >> 12) & 0x1FFF;
631 ct_off = cycleTimer & 0xFFF;
632
633 transmit_sec = ct_sec;
634 transmit_cyc = ct_cyc + 100;
635
636 transmit_sec += transmit_cyc/8000;
637 transmit_cyc %= 8000;
638
639 ts_cyc = transmit_cyc + 3;
640 ts_cyc %= 8000;
641
642 f->assigned_timestamp = (ts_cyc&0xF) << 12;
643
644 /* now actually write the timestamp into the appropriate CIP headers */
645 if (f->cip_syt1) {
646 f->cip_syt1->b[6] = f->assigned_timestamp >> 8;
647 f->cip_syt1->b[7] = f->assigned_timestamp & 0xFF;
648 }
649 if (f->cip_syt2) {
650 f->cip_syt2->b[6] = f->assigned_timestamp >> 8;
651 f->cip_syt2->b[7] = f->assigned_timestamp & 0xFF;
652 }
653
654 /* --- start DMA --- */
655
656 /* clear all bits in ContextControl register */
657
658 reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, 0xFFFFFFFF);
659 wmb();
660
661 /* the OHCI card has the ability to start ISO transmission on a
662 particular cycle (start-on-cycle). This way we can ensure that
663 the first DV frame will have an accurate timestamp.
664
665 However, start-on-cycle only appears to work if the OHCI card
666 is cycle master! Since the consequences of messing up the first
667 timestamp are minimal*, just disable start-on-cycle for now.
668
669 * my DV deck drops the first few frames before it "locks in;"
670 so the first frame having an incorrect timestamp is inconsequential.
671 */
672
673#if 0
674 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet,
675 (1 << 31) /* enable start-on-cycle */
676 | ( (transmit_sec & 0x3) << 29)
677 | (transmit_cyc << 16));
678 wmb();
679#endif
680
681 video->dma_running = 1;
682
683 /* set the 'run' bit */
684 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, 0x8000);
685 flush_pci_write(video->ohci);
686
687 /* --- DMA should be running now --- */
688
689 debug_printk(" Cycle = %4u ContextControl = %08x CmdPtr = %08x\n",
690 (reg_read(video->ohci, OHCI1394_IsochronousCycleTimer) >> 12) & 0x1FFF,
691 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
692 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
693
694 debug_printk(" DMA start - current cycle %4u, transmit cycle %4u (%2u), assigning ts cycle %2u\n",
695 ct_cyc, transmit_cyc, transmit_cyc & 0xF, ts_cyc & 0xF);
696
697#if DV1394_DEBUG_LEVEL >= 2
698 {
699 /* check if DMA is really running */
700 int i = 0;
701 while (i < 20) {
702 mb();
703 mdelay(1);
704 if (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) {
705 printk("DMA ACTIVE after %d msec\n", i);
706 break;
707 }
708 i++;
709 }
710
711 printk("set = %08x, cmdPtr = %08x\n",
712 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
713 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
714 );
715
716 if ( ! (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
717 printk("DMA did NOT go active after 20ms, event = %x\n",
718 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & 0x1F);
719 } else
720 printk("DMA is RUNNING!\n");
721 }
722#endif
723
724 }
725
726
727 spin_unlock_irqrestore(&video->spinlock, irq_flags);
728}
729
730
731
732/*** RECEIVE FUNCTIONS *****************************************************/
733
734/*
735 frame method put_packet
736
737 map and copy the packet data to its location in the frame
738 based upon DIF section and sequence
739*/
740
741static void inline
742frame_put_packet (struct frame *f, struct packet *p)
743{
744 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
745 int dif_sequence = p->data[1] >> 4; /* dif sequence number is in bits 4 - 7 */
746 int dif_block = p->data[2];
747
748 /* sanity check */
749 if (dif_sequence > 11 || dif_block > 149) return;
750
751 switch (section_type) {
752 case 0: /* 1 Header block */
753 memcpy( (void *) f->data + dif_sequence * 150 * 80, p->data, 480);
754 break;
755
756 case 1: /* 2 Subcode blocks */
757 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (1 + dif_block) * 80, p->data, 480);
758 break;
759
760 case 2: /* 3 VAUX blocks */
761 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (3 + dif_block) * 80, p->data, 480);
762 break;
763
764 case 3: /* 9 Audio blocks interleaved with video */
765 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (6 + dif_block * 16) * 80, p->data, 480);
766 break;
767
768 case 4: /* 135 Video blocks interleaved with audio */
769 memcpy( (void *) f->data + dif_sequence * 150 * 80 + (7 + (dif_block / 15) + dif_block) * 80, p->data, 480);
770 break;
771
772 default: /* we can not handle any other data */
773 break;
774 }
775}
776
777
778static void start_dma_receive(struct video_card *video)
779{
780 if (video->first_run == 1) {
781 video->first_run = 0;
782
783 /* start DMA once all of the frames are READY */
784 video->n_clear_frames = 0;
785 video->first_clear_frame = -1;
786 video->current_packet = 0;
787 video->active_frame = 0;
788
789 /* reset iso recv control register */
790 reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, 0xFFFFFFFF);
791 wmb();
792
793 /* clear bufferFill, set isochHeader and speed (0=100) */
794 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x40000000);
795
796 /* match on all tags, listen on channel */
797 reg_write(video->ohci, video->ohci_IsoRcvContextMatch, 0xf0000000 | video->channel);
798
799 /* address and first descriptor block + Z=1 */
800 reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
801 video->frames[0]->descriptor_pool_dma | 1); /* Z=1 */
802 wmb();
803
804 video->dma_running = 1;
805
806 /* run */
807 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x8000);
808 flush_pci_write(video->ohci);
809
810 debug_printk("dv1394: DMA started\n");
811
812#if DV1394_DEBUG_LEVEL >= 2
813 {
814 int i;
815
816 for (i = 0; i < 1000; ++i) {
817 mdelay(1);
818 if (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) {
819 printk("DMA ACTIVE after %d msec\n", i);
820 break;
821 }
822 }
823 if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
824 printk("DEAD, event = %x\n",
825 reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
826 } else
827 printk("RUNNING!\n");
828 }
829#endif
830 } else if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
831 debug_printk("DEAD, event = %x\n",
832 reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
833
834 /* wake */
835 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
836 }
837}
838
839
840/*
841 receive_packets() - build the DMA program for receiving
842*/
843
844static void receive_packets(struct video_card *video)
845{
846 struct DMA_descriptor_block *block = NULL;
847 dma_addr_t block_dma = 0;
848 struct packet *data = NULL;
849 dma_addr_t data_dma = 0;
850 __le32 *last_branch_address = NULL;
851 unsigned long irq_flags;
852 int want_interrupt = 0;
853 struct frame *f = NULL;
854 int i, j;
855
856 spin_lock_irqsave(&video->spinlock, irq_flags);
857
858 for (j = 0; j < video->n_frames; j++) {
859
860 /* connect frames */
861 if (j > 0 && f != NULL && f->frame_end_branch != NULL)
862 *(f->frame_end_branch) = cpu_to_le32(video->frames[j]->descriptor_pool_dma | 1); /* set Z=1 */
863
864 f = video->frames[j];
865
866 for (i = 0; i < MAX_PACKETS; i++) {
867 /* locate a descriptor block and packet from the buffer */
868 block = &(f->descriptor_pool[i]);
869 block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
870
871 data = ((struct packet*)video->packet_buf.kvirt) + f->frame_num * MAX_PACKETS + i;
872 data_dma = dma_region_offset_to_bus( &video->packet_buf,
873 ((unsigned long) data - (unsigned long) video->packet_buf.kvirt) );
874
875 /* setup DMA descriptor block */
876 want_interrupt = ((i % (MAX_PACKETS/2)) == 0 || i == (MAX_PACKETS-1));
877 fill_input_last( &(block->u.in.il), want_interrupt, 512, data_dma);
878
879 /* link descriptors */
880 last_branch_address = f->frame_end_branch;
881
882 if (last_branch_address != NULL)
883 *(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
884
885 f->frame_end_branch = &(block->u.in.il.q[2]);
886 }
887
888 } /* next j */
889
890 spin_unlock_irqrestore(&video->spinlock, irq_flags);
891
892}
893
894
895
896/*** MANAGEMENT FUNCTIONS **************************************************/
897
898static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
899{
900 unsigned long flags, new_buf_size;
901 int i;
902 u64 chan_mask;
903 int retval = -EINVAL;
904
905 debug_printk("dv1394: initialising %d\n", video->id);
906 if (init->api_version != DV1394_API_VERSION)
907 return -EINVAL;
908
909 /* first sanitize all the parameters */
910 if ( (init->n_frames < 2) || (init->n_frames > DV1394_MAX_FRAMES) )
911 return -EINVAL;
912
913 if ( (init->format != DV1394_NTSC) && (init->format != DV1394_PAL) )
914 return -EINVAL;
915
916 if ( (init->syt_offset == 0) || (init->syt_offset > 50) )
917 /* default SYT offset is 3 cycles */
918 init->syt_offset = 3;
919
920 if (init->channel > 63)
921 init->channel = 63;
922
923 chan_mask = (u64)1 << init->channel;
924
925 /* calculate what size DMA buffer is needed */
926 if (init->format == DV1394_NTSC)
927 new_buf_size = DV1394_NTSC_FRAME_SIZE * init->n_frames;
928 else
929 new_buf_size = DV1394_PAL_FRAME_SIZE * init->n_frames;
930
931 /* round up to PAGE_SIZE */
932 if (new_buf_size % PAGE_SIZE) new_buf_size += PAGE_SIZE - (new_buf_size % PAGE_SIZE);
933
934 /* don't allow the user to allocate the DMA buffer more than once */
935 if (video->dv_buf.kvirt && video->dv_buf_size != new_buf_size) {
936 printk("dv1394: re-sizing the DMA buffer is not allowed\n");
937 return -EINVAL;
938 }
939
940 /* shutdown the card if it's currently active */
941 /* (the card should not be reset if the parameters are screwy) */
942
943 do_dv1394_shutdown(video, 0);
944
945 /* try to claim the ISO channel */
946 spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
947 if (video->ohci->ISO_channel_usage & chan_mask) {
948 spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
949 retval = -EBUSY;
950 goto err;
951 }
952 video->ohci->ISO_channel_usage |= chan_mask;
953 spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
954
955 video->channel = init->channel;
956
957 /* initialize misc. fields of video */
958 video->n_frames = init->n_frames;
959 video->pal_or_ntsc = init->format;
960
961 video->cip_accum = 0;
962 video->continuity_counter = 0;
963
964 video->active_frame = -1;
965 video->first_clear_frame = 0;
966 video->n_clear_frames = video->n_frames;
967 video->dropped_frames = 0;
968
969 video->write_off = 0;
970
971 video->first_run = 1;
972 video->current_packet = -1;
973 video->first_frame = 0;
974
975 if (video->pal_or_ntsc == DV1394_NTSC) {
976 video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_NTSC;
977 video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_NTSC;
978 video->frame_size = DV1394_NTSC_FRAME_SIZE;
979 } else {
980 video->cip_n = init->cip_n != 0 ? init->cip_n : CIP_N_PAL;
981 video->cip_d = init->cip_d != 0 ? init->cip_d : CIP_D_PAL;
982 video->frame_size = DV1394_PAL_FRAME_SIZE;
983 }
984
985 video->syt_offset = init->syt_offset;
986
987 /* find and claim DMA contexts on the OHCI card */
988
989 if (video->ohci_it_ctx == -1) {
990 ohci1394_init_iso_tasklet(&video->it_tasklet, OHCI_ISO_TRANSMIT,
991 it_tasklet_func, (unsigned long) video);
992
993 if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
994 printk(KERN_ERR "dv1394: could not find an available IT DMA context\n");
995 retval = -EBUSY;
996 goto err;
997 }
998
999 video->ohci_it_ctx = video->it_tasklet.context;
1000 debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
1001 }
1002
1003 if (video->ohci_ir_ctx == -1) {
1004 ohci1394_init_iso_tasklet(&video->ir_tasklet, OHCI_ISO_RECEIVE,
1005 ir_tasklet_func, (unsigned long) video);
1006
1007 if (ohci1394_register_iso_tasklet(video->ohci, &video->ir_tasklet) < 0) {
1008 printk(KERN_ERR "dv1394: could not find an available IR DMA context\n");
1009 retval = -EBUSY;
1010 goto err;
1011 }
1012 video->ohci_ir_ctx = video->ir_tasklet.context;
1013 debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
1014 }
1015
1016 /* allocate struct frames */
1017 for (i = 0; i < init->n_frames; i++) {
1018 video->frames[i] = frame_new(i, video);
1019
1020 if (!video->frames[i]) {
1021 printk(KERN_ERR "dv1394: Cannot allocate frame structs\n");
1022 retval = -ENOMEM;
1023 goto err;
1024 }
1025 }
1026
1027 if (!video->dv_buf.kvirt) {
1028 /* allocate the ringbuffer */
1029 retval = dma_region_alloc(&video->dv_buf, new_buf_size, video->ohci->dev, PCI_DMA_TODEVICE);
1030 if (retval)
1031 goto err;
1032
1033 video->dv_buf_size = new_buf_size;
1034
1035 debug_printk("dv1394: Allocated %d frame buffers, total %u pages (%u DMA pages), %lu bytes\n",
1036 video->n_frames, video->dv_buf.n_pages,
1037 video->dv_buf.n_dma_pages, video->dv_buf_size);
1038 }
1039
1040 /* set up the frame->data pointers */
1041 for (i = 0; i < video->n_frames; i++)
1042 video->frames[i]->data = (unsigned long) video->dv_buf.kvirt + i * video->frame_size;
1043
1044 if (!video->packet_buf.kvirt) {
1045 /* allocate packet buffer */
1046 video->packet_buf_size = sizeof(struct packet) * video->n_frames * MAX_PACKETS;
1047 if (video->packet_buf_size % PAGE_SIZE)
1048 video->packet_buf_size += PAGE_SIZE - (video->packet_buf_size % PAGE_SIZE);
1049
1050 retval = dma_region_alloc(&video->packet_buf, video->packet_buf_size,
1051 video->ohci->dev, PCI_DMA_FROMDEVICE);
1052 if (retval)
1053 goto err;
1054
1055 debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
1056 video->n_frames*MAX_PACKETS, video->packet_buf.n_pages,
1057 video->packet_buf.n_dma_pages, video->packet_buf_size);
1058 }
1059
1060 /* set up register offsets for IT context */
1061 /* IT DMA context registers are spaced 16 bytes apart */
1062 video->ohci_IsoXmitContextControlSet = OHCI1394_IsoXmitContextControlSet+16*video->ohci_it_ctx;
1063 video->ohci_IsoXmitContextControlClear = OHCI1394_IsoXmitContextControlClear+16*video->ohci_it_ctx;
1064 video->ohci_IsoXmitCommandPtr = OHCI1394_IsoXmitCommandPtr+16*video->ohci_it_ctx;
1065
1066 /* enable interrupts for IT context */
1067 reg_write(video->ohci, OHCI1394_IsoXmitIntMaskSet, (1 << video->ohci_it_ctx));
1068 debug_printk("dv1394: interrupts enabled for IT context %d\n", video->ohci_it_ctx);
1069
1070 /* set up register offsets for IR context */
1071 /* IR DMA context registers are spaced 32 bytes apart */
1072 video->ohci_IsoRcvContextControlSet = OHCI1394_IsoRcvContextControlSet+32*video->ohci_ir_ctx;
1073 video->ohci_IsoRcvContextControlClear = OHCI1394_IsoRcvContextControlClear+32*video->ohci_ir_ctx;
1074 video->ohci_IsoRcvCommandPtr = OHCI1394_IsoRcvCommandPtr+32*video->ohci_ir_ctx;
1075 video->ohci_IsoRcvContextMatch = OHCI1394_IsoRcvContextMatch+32*video->ohci_ir_ctx;
1076
1077 /* enable interrupts for IR context */
1078 reg_write(video->ohci, OHCI1394_IsoRecvIntMaskSet, (1 << video->ohci_ir_ctx) );
1079 debug_printk("dv1394: interrupts enabled for IR context %d\n", video->ohci_ir_ctx);
1080
1081 return 0;
1082
1083err:
1084 do_dv1394_shutdown(video, 1);
1085 return retval;
1086}
1087
1088/* if the user doesn't bother to call ioctl(INIT) before starting
1089 mmap() or read()/write(), just give him some default values */
1090
1091static int do_dv1394_init_default(struct video_card *video)
1092{
1093 struct dv1394_init init;
1094
1095 init.api_version = DV1394_API_VERSION;
1096 init.n_frames = DV1394_MAX_FRAMES / 4;
1097 init.channel = video->channel;
1098 init.format = video->pal_or_ntsc;
1099 init.cip_n = video->cip_n;
1100 init.cip_d = video->cip_d;
1101 init.syt_offset = video->syt_offset;
1102
1103 return do_dv1394_init(video, &init);
1104}
1105
1106/* do NOT call from interrupt context */
1107static void stop_dma(struct video_card *video)
1108{
1109 unsigned long flags;
1110 int i;
1111
1112 /* no interrupts */
1113 spin_lock_irqsave(&video->spinlock, flags);
1114
1115 video->dma_running = 0;
1116
1117 if ( (video->ohci_it_ctx == -1) && (video->ohci_ir_ctx == -1) )
1118 goto out;
1119
1120 /* stop DMA if in progress */
1121 if ( (video->active_frame != -1) ||
1122 (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
1123 (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
1124
1125 /* clear the .run bits */
1126 reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
1127 reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
1128 flush_pci_write(video->ohci);
1129
1130 video->active_frame = -1;
1131 video->first_run = 1;
1132
1133 /* wait until DMA really stops */
1134 i = 0;
1135 while (i < 1000) {
1136
1137 /* wait 0.1 millisecond */
1138 udelay(100);
1139
1140 if ( (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
1141 (reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
1142 /* still active */
1143 debug_printk("dv1394: stop_dma: DMA not stopped yet\n" );
1144 mb();
1145 } else {
1146 debug_printk("dv1394: stop_dma: DMA stopped safely after %d ms\n", i/10);
1147 break;
1148 }
1149
1150 i++;
1151 }
1152
1153 if (i == 1000) {
1154 printk(KERN_ERR "dv1394: stop_dma: DMA still going after %d ms!\n", i/10);
1155 }
1156 }
1157 else
1158 debug_printk("dv1394: stop_dma: already stopped.\n");
1159
1160out:
1161 spin_unlock_irqrestore(&video->spinlock, flags);
1162}
1163
1164
1165
1166static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
1167{
1168 int i;
1169
1170 debug_printk("dv1394: shutdown...\n");
1171
1172 /* stop DMA if in progress */
1173 stop_dma(video);
1174
1175 /* release the DMA contexts */
1176 if (video->ohci_it_ctx != -1) {
1177 video->ohci_IsoXmitContextControlSet = 0;
1178 video->ohci_IsoXmitContextControlClear = 0;
1179 video->ohci_IsoXmitCommandPtr = 0;
1180
1181 /* disable interrupts for IT context */
1182 reg_write(video->ohci, OHCI1394_IsoXmitIntMaskClear, (1 << video->ohci_it_ctx));
1183
1184 /* remove tasklet */
1185 ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
1186 debug_printk("dv1394: IT context %d released\n", video->ohci_it_ctx);
1187 video->ohci_it_ctx = -1;
1188 }
1189
1190 if (video->ohci_ir_ctx != -1) {
1191 video->ohci_IsoRcvContextControlSet = 0;
1192 video->ohci_IsoRcvContextControlClear = 0;
1193 video->ohci_IsoRcvCommandPtr = 0;
1194 video->ohci_IsoRcvContextMatch = 0;
1195
1196 /* disable interrupts for IR context */
1197 reg_write(video->ohci, OHCI1394_IsoRecvIntMaskClear, (1 << video->ohci_ir_ctx));
1198
1199 /* remove tasklet */
1200 ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
1201 debug_printk("dv1394: IR context %d released\n", video->ohci_ir_ctx);
1202 video->ohci_ir_ctx = -1;
1203 }
1204
1205 /* release the ISO channel */
1206 if (video->channel != -1) {
1207 u64 chan_mask;
1208 unsigned long flags;
1209
1210 chan_mask = (u64)1 << video->channel;
1211
1212 spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
1213 video->ohci->ISO_channel_usage &= ~(chan_mask);
1214 spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
1215
1216 video->channel = -1;
1217 }
1218
1219 /* free the frame structs */
1220 for (i = 0; i < DV1394_MAX_FRAMES; i++) {
1221 if (video->frames[i])
1222 frame_delete(video->frames[i]);
1223 video->frames[i] = NULL;
1224 }
1225
1226 video->n_frames = 0;
1227
1228 /* we can't free the DMA buffer unless it is guaranteed that
1229 no more user-space mappings exist */
1230
1231 if (free_dv_buf) {
1232 dma_region_free(&video->dv_buf);
1233 video->dv_buf_size = 0;
1234 }
1235
1236 /* free packet buffer */
1237 dma_region_free(&video->packet_buf);
1238 video->packet_buf_size = 0;
1239
1240 debug_printk("dv1394: shutdown OK\n");
1241}
1242
1243/*
1244 **********************************
1245 *** MMAP() THEORY OF OPERATION ***
1246 **********************************
1247
1248 The ringbuffer cannot be re-allocated or freed while
1249 a user program maintains a mapping of it. (note that a mapping
1250 can persist even after the device fd is closed!)
1251
1252 So, only let the user process allocate the DMA buffer once.
1253 To resize or deallocate it, you must close the device file
1254 and open it again.
1255
1256 Previously Dan M. hacked out a scheme that allowed the DMA
1257 buffer to change by forcefully unmapping it from the user's
1258 address space. It was prone to error because it's very hard to
1259 track all the places the buffer could have been mapped (we
1260 would have had to walk the vma list of every process in the
1261 system to be sure we found all the mappings!). Instead, we
1262 force the user to choose one buffer size and stick with
1263 it. This small sacrifice is worth the huge reduction in
1264 error-prone code in dv1394.
1265*/
1266
1267static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
1268{
1269 struct video_card *video = file_to_video_card(file);
1270 int retval = -EINVAL;
1271
1272 /*
1273 * We cannot use the blocking variant mutex_lock here because .mmap
1274 * is called with mmap_sem held, while .ioctl, .read, .write acquire
1275 * video->mtx and subsequently call copy_to/from_user which will
1276 * grab mmap_sem in case of a page fault.
1277 */
1278 if (!mutex_trylock(&video->mtx))
1279 return -EAGAIN;
1280
1281 if ( ! video_card_initialized(video) ) {
1282 retval = do_dv1394_init_default(video);
1283 if (retval)
1284 goto out;
1285 }
1286
1287 retval = dma_region_mmap(&video->dv_buf, file, vma);
1288out:
1289 mutex_unlock(&video->mtx);
1290 return retval;
1291}
1292
1293/*** DEVICE FILE INTERFACE *************************************************/
1294
1295/* no need to serialize, multiple threads OK */
1296static unsigned int dv1394_poll(struct file *file, struct poll_table_struct *wait)
1297{
1298 struct video_card *video = file_to_video_card(file);
1299 unsigned int mask = 0;
1300 unsigned long flags;
1301
1302 poll_wait(file, &video->waitq, wait);
1303
1304 spin_lock_irqsave(&video->spinlock, flags);
1305 if ( video->n_frames == 0 ) {
1306
1307 } else if ( video->active_frame == -1 ) {
1308 /* nothing going on */
1309 mask |= POLLOUT;
1310 } else {
1311 /* any clear/ready buffers? */
1312 if (video->n_clear_frames >0)
1313 mask |= POLLOUT | POLLIN;
1314 }
1315 spin_unlock_irqrestore(&video->spinlock, flags);
1316
1317 return mask;
1318}
1319
1320static int dv1394_fasync(int fd, struct file *file, int on)
1321{
1322 /* I just copied this code verbatim from Alan Cox's mouse driver example
1323 (Documentation/DocBook/) */
1324
1325 struct video_card *video = file_to_video_card(file);
1326
1327 return fasync_helper(fd, file, on, &video->fasync);
1328}
1329
1330static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1331{
1332 struct video_card *video = file_to_video_card(file);
1333 DECLARE_WAITQUEUE(wait, current);
1334 ssize_t ret;
1335 size_t cnt;
1336 unsigned long flags;
1337 int target_frame;
1338
1339 /* serialize this to prevent multi-threaded mayhem */
1340 if (file->f_flags & O_NONBLOCK) {
1341 if (!mutex_trylock(&video->mtx))
1342 return -EAGAIN;
1343 } else {
1344 if (mutex_lock_interruptible(&video->mtx))
1345 return -ERESTARTSYS;
1346 }
1347
1348 if ( !video_card_initialized(video) ) {
1349 ret = do_dv1394_init_default(video);
1350 if (ret) {
1351 mutex_unlock(&video->mtx);
1352 return ret;
1353 }
1354 }
1355
1356 ret = 0;
1357 add_wait_queue(&video->waitq, &wait);
1358
1359 while (count > 0) {
1360
1361 /* must set TASK_INTERRUPTIBLE *before* checking for free
1362 buffers; otherwise we could miss a wakeup if the interrupt
1363 fires between the check and the schedule() */
1364
1365 set_current_state(TASK_INTERRUPTIBLE);
1366
1367 spin_lock_irqsave(&video->spinlock, flags);
1368
1369 target_frame = video->first_clear_frame;
1370
1371 spin_unlock_irqrestore(&video->spinlock, flags);
1372
1373 if (video->frames[target_frame]->state == FRAME_CLEAR) {
1374
1375 /* how much room is left in the target frame buffer */
1376 cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
1377
1378 } else {
1379 /* buffer is already used */
1380 cnt = 0;
1381 }
1382
1383 if (cnt > count)
1384 cnt = count;
1385
1386 if (cnt <= 0) {
1387 /* no room left, gotta wait */
1388 if (file->f_flags & O_NONBLOCK) {
1389 if (!ret)
1390 ret = -EAGAIN;
1391 break;
1392 }
1393 if (signal_pending(current)) {
1394 if (!ret)
1395 ret = -ERESTARTSYS;
1396 break;
1397 }
1398
1399 schedule();
1400
1401 continue; /* start over from 'while(count > 0)...' */
1402 }
1403
1404 if (copy_from_user(video->dv_buf.kvirt + video->write_off, buffer, cnt)) {
1405 if (!ret)
1406 ret = -EFAULT;
1407 break;
1408 }
1409
1410 video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
1411
1412 count -= cnt;
1413 buffer += cnt;
1414 ret += cnt;
1415
1416 if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames))
1417 frame_prepare(video, target_frame);
1418 }
1419
1420 remove_wait_queue(&video->waitq, &wait);
1421 set_current_state(TASK_RUNNING);
1422 mutex_unlock(&video->mtx);
1423 return ret;
1424}
1425
1426
1427static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1428{
1429 struct video_card *video = file_to_video_card(file);
1430 DECLARE_WAITQUEUE(wait, current);
1431 ssize_t ret;
1432 size_t cnt;
1433 unsigned long flags;
1434 int target_frame;
1435
1436 /* serialize this to prevent multi-threaded mayhem */
1437 if (file->f_flags & O_NONBLOCK) {
1438 if (!mutex_trylock(&video->mtx))
1439 return -EAGAIN;
1440 } else {
1441 if (mutex_lock_interruptible(&video->mtx))
1442 return -ERESTARTSYS;
1443 }
1444
1445 if ( !video_card_initialized(video) ) {
1446 ret = do_dv1394_init_default(video);
1447 if (ret) {
1448 mutex_unlock(&video->mtx);
1449 return ret;
1450 }
1451 video->continuity_counter = -1;
1452
1453 receive_packets(video);
1454
1455 start_dma_receive(video);
1456 }
1457
1458 ret = 0;
1459 add_wait_queue(&video->waitq, &wait);
1460
1461 while (count > 0) {
1462
1463 /* must set TASK_INTERRUPTIBLE *before* checking for free
1464 buffers; otherwise we could miss a wakeup if the interrupt
1465 fires between the check and the schedule() */
1466
1467 set_current_state(TASK_INTERRUPTIBLE);
1468
1469 spin_lock_irqsave(&video->spinlock, flags);
1470
1471 target_frame = video->first_clear_frame;
1472
1473 spin_unlock_irqrestore(&video->spinlock, flags);
1474
1475 if (target_frame >= 0 &&
1476 video->n_clear_frames > 0 &&
1477 video->frames[target_frame]->state == FRAME_CLEAR) {
1478
1479 /* how much room is left in the target frame buffer */
1480 cnt = video->frame_size - (video->write_off - target_frame * video->frame_size);
1481
1482 } else {
1483 /* buffer is already used */
1484 cnt = 0;
1485 }
1486
1487 if (cnt > count)
1488 cnt = count;
1489
1490 if (cnt <= 0) {
1491 /* no room left, gotta wait */
1492 if (file->f_flags & O_NONBLOCK) {
1493 if (!ret)
1494 ret = -EAGAIN;
1495 break;
1496 }
1497 if (signal_pending(current)) {
1498 if (!ret)
1499 ret = -ERESTARTSYS;
1500 break;
1501 }
1502
1503 schedule();
1504
1505 continue; /* start over from 'while(count > 0)...' */
1506 }
1507
1508 if (copy_to_user(buffer, video->dv_buf.kvirt + video->write_off, cnt)) {
1509 if (!ret)
1510 ret = -EFAULT;
1511 break;
1512 }
1513
1514 video->write_off = (video->write_off + cnt) % (video->n_frames * video->frame_size);
1515
1516 count -= cnt;
1517 buffer += cnt;
1518 ret += cnt;
1519
1520 if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames)) {
1521 spin_lock_irqsave(&video->spinlock, flags);
1522 video->n_clear_frames--;
1523 video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
1524 spin_unlock_irqrestore(&video->spinlock, flags);
1525 }
1526 }
1527
1528 remove_wait_queue(&video->waitq, &wait);
1529 set_current_state(TASK_RUNNING);
1530 mutex_unlock(&video->mtx);
1531 return ret;
1532}
1533
1534
1535/*** DEVICE IOCTL INTERFACE ************************************************/
1536
1537static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1538{
1539 struct video_card *video = file_to_video_card(file);
1540 unsigned long flags;
1541 int ret = -EINVAL;
1542 void __user *argp = (void __user *)arg;
1543
1544 DECLARE_WAITQUEUE(wait, current);
1545
1546 /* serialize this to prevent multi-threaded mayhem */
1547 if (file->f_flags & O_NONBLOCK) {
1548 if (!mutex_trylock(&video->mtx))
1549 return -EAGAIN;
1550 } else {
1551 if (mutex_lock_interruptible(&video->mtx))
1552 return -ERESTARTSYS;
1553 }
1554
1555 switch(cmd)
1556 {
1557 case DV1394_IOC_SUBMIT_FRAMES: {
1558 unsigned int n_submit;
1559
1560 if ( !video_card_initialized(video) ) {
1561 ret = do_dv1394_init_default(video);
1562 if (ret)
1563 goto out;
1564 }
1565
1566 n_submit = (unsigned int) arg;
1567
1568 if (n_submit > video->n_frames) {
1569 ret = -EINVAL;
1570 goto out;
1571 }
1572
1573 while (n_submit > 0) {
1574
1575 add_wait_queue(&video->waitq, &wait);
1576 set_current_state(TASK_INTERRUPTIBLE);
1577
1578 spin_lock_irqsave(&video->spinlock, flags);
1579
1580 /* wait until video->first_clear_frame is really CLEAR */
1581 while (video->frames[video->first_clear_frame]->state != FRAME_CLEAR) {
1582
1583 spin_unlock_irqrestore(&video->spinlock, flags);
1584
1585 if (signal_pending(current)) {
1586 remove_wait_queue(&video->waitq, &wait);
1587 set_current_state(TASK_RUNNING);
1588 ret = -EINTR;
1589 goto out;
1590 }
1591
1592 schedule();
1593 set_current_state(TASK_INTERRUPTIBLE);
1594
1595 spin_lock_irqsave(&video->spinlock, flags);
1596 }
1597 spin_unlock_irqrestore(&video->spinlock, flags);
1598
1599 remove_wait_queue(&video->waitq, &wait);
1600 set_current_state(TASK_RUNNING);
1601
1602 frame_prepare(video, video->first_clear_frame);
1603
1604 n_submit--;
1605 }
1606
1607 ret = 0;
1608 break;
1609 }
1610
1611 case DV1394_IOC_WAIT_FRAMES: {
1612 unsigned int n_wait;
1613
1614 if ( !video_card_initialized(video) ) {
1615 ret = -EINVAL;
1616 goto out;
1617 }
1618
1619 n_wait = (unsigned int) arg;
1620
1621 /* since we re-run the last frame on underflow, we will
1622 never actually have n_frames clear frames; at most only
1623 n_frames - 1 */
1624
1625 if (n_wait > (video->n_frames-1) ) {
1626 ret = -EINVAL;
1627 goto out;
1628 }
1629
1630 add_wait_queue(&video->waitq, &wait);
1631 set_current_state(TASK_INTERRUPTIBLE);
1632
1633 spin_lock_irqsave(&video->spinlock, flags);
1634
1635 while (video->n_clear_frames < n_wait) {
1636
1637 spin_unlock_irqrestore(&video->spinlock, flags);
1638
1639 if (signal_pending(current)) {
1640 remove_wait_queue(&video->waitq, &wait);
1641 set_current_state(TASK_RUNNING);
1642 ret = -EINTR;
1643 goto out;
1644 }
1645
1646 schedule();
1647 set_current_state(TASK_INTERRUPTIBLE);
1648
1649 spin_lock_irqsave(&video->spinlock, flags);
1650 }
1651
1652 spin_unlock_irqrestore(&video->spinlock, flags);
1653
1654 remove_wait_queue(&video->waitq, &wait);
1655 set_current_state(TASK_RUNNING);
1656 ret = 0;
1657 break;
1658 }
1659
1660 case DV1394_IOC_RECEIVE_FRAMES: {
1661 unsigned int n_recv;
1662
1663 if ( !video_card_initialized(video) ) {
1664 ret = -EINVAL;
1665 goto out;
1666 }
1667
1668 n_recv = (unsigned int) arg;
1669
1670 /* at least one frame must be active */
1671 if (n_recv > (video->n_frames-1) ) {
1672 ret = -EINVAL;
1673 goto out;
1674 }
1675
1676 spin_lock_irqsave(&video->spinlock, flags);
1677
1678 /* release the clear frames */
1679 video->n_clear_frames -= n_recv;
1680
1681 /* advance the clear frame cursor */
1682 video->first_clear_frame = (video->first_clear_frame + n_recv) % video->n_frames;
1683
1684 /* reset dropped_frames */
1685 video->dropped_frames = 0;
1686
1687 spin_unlock_irqrestore(&video->spinlock, flags);
1688
1689 ret = 0;
1690 break;
1691 }
1692
1693 case DV1394_IOC_START_RECEIVE: {
1694 if ( !video_card_initialized(video) ) {
1695 ret = do_dv1394_init_default(video);
1696 if (ret)
1697 goto out;
1698 }
1699
1700 video->continuity_counter = -1;
1701
1702 receive_packets(video);
1703
1704 start_dma_receive(video);
1705
1706 ret = 0;
1707 break;
1708 }
1709
1710 case DV1394_IOC_INIT: {
1711 struct dv1394_init init;
1712 if (!argp) {
1713 ret = do_dv1394_init_default(video);
1714 } else {
1715 if (copy_from_user(&init, argp, sizeof(init))) {
1716 ret = -EFAULT;
1717 goto out;
1718 }
1719 ret = do_dv1394_init(video, &init);
1720 }
1721 break;
1722 }
1723
1724 case DV1394_IOC_SHUTDOWN:
1725 do_dv1394_shutdown(video, 0);
1726 ret = 0;
1727 break;
1728
1729
1730 case DV1394_IOC_GET_STATUS: {
1731 struct dv1394_status status;
1732
1733 if ( !video_card_initialized(video) ) {
1734 ret = -EINVAL;
1735 goto out;
1736 }
1737
1738 status.init.api_version = DV1394_API_VERSION;
1739 status.init.channel = video->channel;
1740 status.init.n_frames = video->n_frames;
1741 status.init.format = video->pal_or_ntsc;
1742 status.init.cip_n = video->cip_n;
1743 status.init.cip_d = video->cip_d;
1744 status.init.syt_offset = video->syt_offset;
1745
1746 status.first_clear_frame = video->first_clear_frame;
1747
1748 /* the rest of the fields need to be locked against the interrupt */
1749 spin_lock_irqsave(&video->spinlock, flags);
1750
1751 status.active_frame = video->active_frame;
1752 status.n_clear_frames = video->n_clear_frames;
1753
1754 status.dropped_frames = video->dropped_frames;
1755
1756 /* reset dropped_frames */
1757 video->dropped_frames = 0;
1758
1759 spin_unlock_irqrestore(&video->spinlock, flags);
1760
1761 if (copy_to_user(argp, &status, sizeof(status))) {
1762 ret = -EFAULT;
1763 goto out;
1764 }
1765
1766 ret = 0;
1767 break;
1768 }
1769
1770 default:
1771 break;
1772 }
1773
1774 out:
1775 mutex_unlock(&video->mtx);
1776 return ret;
1777}
1778
1779/*** DEVICE FILE INTERFACE CONTINUED ***************************************/
1780
1781static int dv1394_open(struct inode *inode, struct file *file)
1782{
1783 struct video_card *video = NULL;
1784
1785 if (file->private_data) {
1786 video = file->private_data;
1787
1788 } else {
1789 /* look up the card by ID */
1790 unsigned long flags;
1791 int idx = ieee1394_file_to_instance(file);
1792
1793 spin_lock_irqsave(&dv1394_cards_lock, flags);
1794 if (!list_empty(&dv1394_cards)) {
1795 struct video_card *p;
1796 list_for_each_entry(p, &dv1394_cards, list) {
1797 if ((p->id) == idx) {
1798 video = p;
1799 break;
1800 }
1801 }
1802 }
1803 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
1804
1805 if (!video) {
1806 debug_printk("dv1394: OHCI card %d not found", idx);
1807 return -ENODEV;
1808 }
1809
1810 file->private_data = (void*) video;
1811 }
1812
1813#ifndef DV1394_ALLOW_MORE_THAN_ONE_OPEN
1814
1815 if ( test_and_set_bit(0, &video->open) ) {
1816 /* video is already open by someone else */
1817 return -EBUSY;
1818 }
1819
1820#endif
1821
1822 printk(KERN_INFO "%s: NOTE, the dv1394 interface is unsupported "
1823 "and will not be available in the new firewire driver stack. "
1824 "Try libraw1394 based programs instead.\n", current->comm);
1825
1826 return nonseekable_open(inode, file);
1827}
1828
1829
1830static int dv1394_release(struct inode *inode, struct file *file)
1831{
1832 struct video_card *video = file_to_video_card(file);
1833
1834 /* OK to free the DMA buffer, no more mappings can exist */
1835 do_dv1394_shutdown(video, 1);
1836
1837 /* give someone else a turn */
1838 clear_bit(0, &video->open);
1839
1840 return 0;
1841}
1842
1843
1844/*** DEVICE DRIVER HANDLERS ************************************************/
1845
1846static void it_tasklet_func(unsigned long data)
1847{
1848 int wake = 0;
1849 struct video_card *video = (struct video_card*) data;
1850
1851 spin_lock(&video->spinlock);
1852
1853 if (!video->dma_running)
1854 goto out;
1855
1856 irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
1857 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
1858 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
1859 );
1860
1861
1862 if ( (video->ohci_it_ctx != -1) &&
1863 (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
1864
1865 struct frame *f;
1866 unsigned int frame, i;
1867
1868
1869 if (video->active_frame == -1)
1870 frame = 0;
1871 else
1872 frame = video->active_frame;
1873
1874 /* check all the DMA-able frames */
1875 for (i = 0; i < video->n_frames; i++, frame = (frame+1) % video->n_frames) {
1876
1877 irq_printk("IRQ checking frame %d...", frame);
1878 f = video->frames[frame];
1879 if (f->state != FRAME_READY) {
1880 irq_printk("clear, skipping\n");
1881 /* we don't own this frame */
1882 continue;
1883 }
1884
1885 irq_printk("DMA\n");
1886
1887 /* check the frame begin semaphore to see if we can free the previous frame */
1888 if ( *(f->frame_begin_timestamp) ) {
1889 int prev_frame;
1890 struct frame *prev_f;
1891
1892
1893
1894 /* don't reset, need this later *(f->frame_begin_timestamp) = 0; */
1895 irq_printk(" BEGIN\n");
1896
1897 prev_frame = frame - 1;
1898 if (prev_frame == -1)
1899 prev_frame += video->n_frames;
1900 prev_f = video->frames[prev_frame];
1901
1902 /* make sure we can actually garbage collect
1903 this frame */
1904 if ( (prev_f->state == FRAME_READY) &&
1905 prev_f->done && (!f->done) )
1906 {
1907 frame_reset(prev_f);
1908 video->n_clear_frames++;
1909 wake = 1;
1910 video->active_frame = frame;
1911
1912 irq_printk(" BEGIN - freeing previous frame %d, new active frame is %d\n", prev_frame, frame);
1913 } else {
1914 irq_printk(" BEGIN - can't free yet\n");
1915 }
1916
1917 f->done = 1;
1918 }
1919
1920
1921 /* see if we need to set the timestamp for the next frame */
1922 if ( *(f->mid_frame_timestamp) ) {
1923 struct frame *next_frame;
1924 u32 begin_ts, ts_cyc, ts_off;
1925
1926 *(f->mid_frame_timestamp) = 0;
1927
1928 begin_ts = le32_to_cpu(*(f->frame_begin_timestamp));
1929
1930 irq_printk(" MIDDLE - first packet was sent at cycle %4u (%2u), assigned timestamp was (%2u) %4u\n",
1931 begin_ts & 0x1FFF, begin_ts & 0xF,
1932 f->assigned_timestamp >> 12, f->assigned_timestamp & 0xFFF);
1933
1934 /* prepare next frame and assign timestamp */
1935 next_frame = video->frames[ (frame+1) % video->n_frames ];
1936
1937 if (next_frame->state == FRAME_READY) {
1938 irq_printk(" MIDDLE - next frame is ready, good\n");
1939 } else {
1940 debug_printk("dv1394: Underflow! At least one frame has been dropped.\n");
1941 next_frame = f;
1942 }
1943
1944 /* set the timestamp to the timestamp of the last frame sent,
1945 plus the length of the last frame sent, plus the syt latency */
1946 ts_cyc = begin_ts & 0xF;
1947 /* advance one frame, plus syt latency (typically 2-3) */
1948 ts_cyc += f->n_packets + video->syt_offset ;
1949
1950 ts_off = 0;
1951
1952 ts_cyc += ts_off/3072;
1953 ts_off %= 3072;
1954
1955 next_frame->assigned_timestamp = ((ts_cyc&0xF) << 12) + ts_off;
1956 if (next_frame->cip_syt1) {
1957 next_frame->cip_syt1->b[6] = next_frame->assigned_timestamp >> 8;
1958 next_frame->cip_syt1->b[7] = next_frame->assigned_timestamp & 0xFF;
1959 }
1960 if (next_frame->cip_syt2) {
1961 next_frame->cip_syt2->b[6] = next_frame->assigned_timestamp >> 8;
1962 next_frame->cip_syt2->b[7] = next_frame->assigned_timestamp & 0xFF;
1963 }
1964
1965 }
1966
1967 /* see if the frame looped */
1968 if ( *(f->frame_end_timestamp) ) {
1969
1970 *(f->frame_end_timestamp) = 0;
1971
1972 debug_printk(" END - the frame looped at least once\n");
1973
1974 video->dropped_frames++;
1975 }
1976
1977 } /* for (each frame) */
1978 }
1979
1980 if (wake) {
1981 kill_fasync(&video->fasync, SIGIO, POLL_OUT);
1982
1983 /* wake readers/writers/ioctl'ers */
1984 wake_up_interruptible(&video->waitq);
1985 }
1986
1987out:
1988 spin_unlock(&video->spinlock);
1989}
1990
1991static void ir_tasklet_func(unsigned long data)
1992{
1993 int wake = 0;
1994 struct video_card *video = (struct video_card*) data;
1995
1996 spin_lock(&video->spinlock);
1997
1998 if (!video->dma_running)
1999 goto out;
2000
2001 if ( (video->ohci_ir_ctx != -1) &&
2002 (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
2003
2004 int sof=0; /* start-of-frame flag */
2005 struct frame *f;
2006 u16 packet_length;
2007 int i, dbc=0;
2008 struct DMA_descriptor_block *block = NULL;
2009 u16 xferstatus;
2010
2011 int next_i, prev_i;
2012 struct DMA_descriptor_block *next = NULL;
2013 dma_addr_t next_dma = 0;
2014 struct DMA_descriptor_block *prev = NULL;
2015
2016 /* loop over all descriptors in all frames */
2017 for (i = 0; i < video->n_frames*MAX_PACKETS; i++) {
2018 struct packet *p = dma_region_i(&video->packet_buf, struct packet, video->current_packet);
2019
2020 /* make sure we are seeing the latest changes to p */
2021 dma_region_sync_for_cpu(&video->packet_buf,
2022 (unsigned long) p - (unsigned long) video->packet_buf.kvirt,
2023 sizeof(struct packet));
2024
2025 packet_length = le16_to_cpu(p->data_length);
2026
2027 /* get the descriptor based on packet_buffer cursor */
2028 f = video->frames[video->current_packet / MAX_PACKETS];
2029 block = &(f->descriptor_pool[video->current_packet % MAX_PACKETS]);
2030 xferstatus = le32_to_cpu(block->u.in.il.q[3]) >> 16;
2031 xferstatus &= 0x1F;
2032 irq_printk("ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x\n", i, le32_to_cpu(block->u.in.il.q[3]) );
2033
2034 /* get the current frame */
2035 f = video->frames[video->active_frame];
2036
2037 /* exclude empty packet */
2038 if (packet_length > 8 && xferstatus == 0x11) {
2039 /* check for start of frame */
2040 /* DRD> Changed to check section type ([0]>>5==0)
2041 and dif sequence ([1]>>4==0) */
2042 sof = ( (p->data[0] >> 5) == 0 && (p->data[1] >> 4) == 0);
2043
2044 dbc = (int) (p->cip_h1 >> 24);
2045 if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
2046 {
2047 printk(KERN_WARNING "dv1394: discontinuity detected, dropping all frames\n" );
2048 video->dropped_frames += video->n_clear_frames + 1;
2049 video->first_frame = 0;
2050 video->n_clear_frames = 0;
2051 video->first_clear_frame = -1;
2052 }
2053 video->continuity_counter = dbc;
2054
2055 if (!video->first_frame) {
2056 if (sof) {
2057 video->first_frame = 1;
2058 }
2059
2060 } else if (sof) {
2061 /* close current frame */
2062 frame_reset(f); /* f->state = STATE_CLEAR */
2063 video->n_clear_frames++;
2064 if (video->n_clear_frames > video->n_frames) {
2065 video->dropped_frames++;
2066 printk(KERN_WARNING "dv1394: dropped a frame during reception\n" );
2067 video->n_clear_frames = video->n_frames-1;
2068 video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
2069 }
2070 if (video->first_clear_frame == -1)
2071 video->first_clear_frame = video->active_frame;
2072
2073 /* get the next frame */
2074 video->active_frame = (video->active_frame + 1) % video->n_frames;
2075 f = video->frames[video->active_frame];
2076 irq_printk(" frame received, active_frame = %d, n_clear_frames = %d, first_clear_frame = %d\n",
2077 video->active_frame, video->n_clear_frames, video->first_clear_frame);
2078 }
2079 if (video->first_frame) {
2080 if (sof) {
2081 /* open next frame */
2082 f->state = FRAME_READY;
2083 }
2084
2085 /* copy to buffer */
2086 if (f->n_packets > (video->frame_size / 480)) {
2087 printk(KERN_ERR "frame buffer overflow during receive\n");
2088 }
2089
2090 frame_put_packet(f, p);
2091
2092 } /* first_frame */
2093 }
2094
2095 /* stop, end of ready packets */
2096 else if (xferstatus == 0) {
2097 break;
2098 }
2099
2100 /* reset xferStatus & resCount */
2101 block->u.in.il.q[3] = cpu_to_le32(512);
2102
2103 /* terminate dma chain at this (next) packet */
2104 next_i = video->current_packet;
2105 f = video->frames[next_i / MAX_PACKETS];
2106 next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
2107 next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
2108 next->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
2109 next->u.in.il.q[2] = cpu_to_le32(0); /* disable branch */
2110
2111 /* link previous to next */
2112 prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
2113 f = video->frames[prev_i / MAX_PACKETS];
2114 prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
2115 if (prev_i % (MAX_PACKETS/2)) {
2116 prev->u.in.il.q[0] &= ~cpu_to_le32(3 << 20); /* no interrupt */
2117 } else {
2118 prev->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
2119 }
2120 prev->u.in.il.q[2] = cpu_to_le32(next_dma | 1); /* set Z=1 */
2121 wmb();
2122
2123 /* wake up DMA in case it fell asleep */
2124 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
2125
2126 /* advance packet_buffer cursor */
2127 video->current_packet = (video->current_packet + 1) % (MAX_PACKETS * video->n_frames);
2128
2129 } /* for all packets */
2130
2131 wake = 1; /* why the hell not? */
2132
2133 } /* receive interrupt */
2134
2135 if (wake) {
2136 kill_fasync(&video->fasync, SIGIO, POLL_IN);
2137
2138 /* wake readers/writers/ioctl'ers */
2139 wake_up_interruptible(&video->waitq);
2140 }
2141
2142out:
2143 spin_unlock(&video->spinlock);
2144}
2145
2146static struct cdev dv1394_cdev;
2147static const struct file_operations dv1394_fops=
2148{
2149 .owner = THIS_MODULE,
2150 .poll = dv1394_poll,
2151 .unlocked_ioctl = dv1394_ioctl,
2152#ifdef CONFIG_COMPAT
2153 .compat_ioctl = dv1394_compat_ioctl,
2154#endif
2155 .mmap = dv1394_mmap,
2156 .open = dv1394_open,
2157 .write = dv1394_write,
2158 .read = dv1394_read,
2159 .release = dv1394_release,
2160 .fasync = dv1394_fasync,
2161 .llseek = no_llseek,
2162};
2163
2164
2165/*** HOTPLUG STUFF **********************************************************/
2166/*
2167 * Export information about protocols/devices supported by this driver.
2168 */
2169#ifdef MODULE
2170static const struct ieee1394_device_id dv1394_id_table[] = {
2171 {
2172 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2173 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
2174 .version = AVC_SW_VERSION_ENTRY & 0xffffff
2175 },
2176 { }
2177};
2178
2179MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
2180#endif /* MODULE */
2181
2182static struct hpsb_protocol_driver dv1394_driver = {
2183 .name = "dv1394",
2184};
2185
2186
2187/*** IEEE1394 HPSB CALLBACKS ***********************************************/
2188
2189static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes mode)
2190{
2191 struct video_card *video;
2192 unsigned long flags;
2193 int i;
2194
2195 video = kzalloc(sizeof(*video), GFP_KERNEL);
2196 if (!video) {
2197 printk(KERN_ERR "dv1394: cannot allocate video_card\n");
2198 return -1;
2199 }
2200
2201 video->ohci = ohci;
2202 /* lower 2 bits of id indicate which of four "plugs"
2203 per host */
2204 video->id = ohci->host->id << 2;
2205 if (format == DV1394_NTSC)
2206 video->id |= mode;
2207 else
2208 video->id |= 2 + mode;
2209
2210 video->ohci_it_ctx = -1;
2211 video->ohci_ir_ctx = -1;
2212
2213 video->ohci_IsoXmitContextControlSet = 0;
2214 video->ohci_IsoXmitContextControlClear = 0;
2215 video->ohci_IsoXmitCommandPtr = 0;
2216
2217 video->ohci_IsoRcvContextControlSet = 0;
2218 video->ohci_IsoRcvContextControlClear = 0;
2219 video->ohci_IsoRcvCommandPtr = 0;
2220 video->ohci_IsoRcvContextMatch = 0;
2221
2222 video->n_frames = 0; /* flag that video is not initialized */
2223 video->channel = 63; /* default to broadcast channel */
2224 video->active_frame = -1;
2225
2226 /* initialize the following */
2227 video->pal_or_ntsc = format;
2228 video->cip_n = 0; /* 0 = use builtin default */
2229 video->cip_d = 0;
2230 video->syt_offset = 0;
2231 video->mode = mode;
2232
2233 for (i = 0; i < DV1394_MAX_FRAMES; i++)
2234 video->frames[i] = NULL;
2235
2236 dma_region_init(&video->dv_buf);
2237 video->dv_buf_size = 0;
2238 dma_region_init(&video->packet_buf);
2239 video->packet_buf_size = 0;
2240
2241 clear_bit(0, &video->open);
2242 spin_lock_init(&video->spinlock);
2243 video->dma_running = 0;
2244 mutex_init(&video->mtx);
2245 init_waitqueue_head(&video->waitq);
2246 video->fasync = NULL;
2247
2248 spin_lock_irqsave(&dv1394_cards_lock, flags);
2249 INIT_LIST_HEAD(&video->list);
2250 list_add_tail(&video->list, &dv1394_cards);
2251 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2252
2253 debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
2254 return 0;
2255}
2256
2257static void dv1394_remove_host(struct hpsb_host *host)
2258{
2259 struct video_card *video, *tmp_video;
2260 unsigned long flags;
2261 int found_ohci_card = 0;
2262
2263 do {
2264 video = NULL;
2265 spin_lock_irqsave(&dv1394_cards_lock, flags);
2266 list_for_each_entry(tmp_video, &dv1394_cards, list) {
2267 if ((tmp_video->id >> 2) == host->id) {
2268 list_del(&tmp_video->list);
2269 video = tmp_video;
2270 found_ohci_card = 1;
2271 break;
2272 }
2273 }
2274 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2275
2276 if (video) {
2277 do_dv1394_shutdown(video, 1);
2278 kfree(video);
2279 }
2280 } while (video);
2281
2282 if (found_ohci_card)
2283 device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
2284 IEEE1394_MINOR_BLOCK_DV1394 * 16 + (host->id << 2)));
2285}
2286
2287static void dv1394_add_host(struct hpsb_host *host)
2288{
2289 struct ti_ohci *ohci;
2290 int id = host->id;
2291
2292 /* We only work with the OHCI-1394 driver */
2293 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
2294 return;
2295
2296 ohci = (struct ti_ohci *)host->hostdata;
2297
2298 device_create(hpsb_protocol_class, NULL,
2299 MKDEV(IEEE1394_MAJOR,
2300 IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)),
2301 NULL, "dv1394-%d", id);
2302
2303 dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
2304 dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
2305 dv1394_init(ohci, DV1394_PAL, MODE_RECEIVE);
2306 dv1394_init(ohci, DV1394_PAL, MODE_TRANSMIT);
2307}
2308
2309
2310/* Bus reset handler. In the event of a bus reset, we may need to
2311 re-start the DMA contexts - otherwise the user program would
2312 end up waiting forever.
2313*/
2314
2315static void dv1394_host_reset(struct hpsb_host *host)
2316{
2317 struct video_card *video = NULL, *tmp_vid;
2318 unsigned long flags;
2319
2320 /* We only work with the OHCI-1394 driver */
2321 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
2322 return;
2323
2324 /* find the corresponding video_cards */
2325 spin_lock_irqsave(&dv1394_cards_lock, flags);
2326 list_for_each_entry(tmp_vid, &dv1394_cards, list) {
2327 if ((tmp_vid->id >> 2) == host->id) {
2328 video = tmp_vid;
2329 break;
2330 }
2331 }
2332 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2333
2334 if (!video)
2335 return;
2336
2337
2338 spin_lock_irqsave(&video->spinlock, flags);
2339
2340 if (!video->dma_running)
2341 goto out;
2342
2343 /* check IT context */
2344 if (video->ohci_it_ctx != -1) {
2345 u32 ctx;
2346
2347 ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
2348
2349 /* if (RUN but not ACTIVE) */
2350 if ( (ctx & (1<<15)) &&
2351 !(ctx & (1<<10)) ) {
2352
2353 debug_printk("dv1394: IT context stopped due to bus reset; waking it up\n");
2354
2355 /* to be safe, assume a frame has been dropped. User-space programs
2356 should handle this condition like an underflow. */
2357 video->dropped_frames++;
2358
2359 /* for some reason you must clear, then re-set the RUN bit to restart DMA */
2360
2361 /* clear RUN */
2362 reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
2363 flush_pci_write(video->ohci);
2364
2365 /* set RUN */
2366 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 15));
2367 flush_pci_write(video->ohci);
2368
2369 /* set the WAKE bit (just in case; this isn't strictly necessary) */
2370 reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 12));
2371 flush_pci_write(video->ohci);
2372
2373 irq_printk("dv1394: AFTER IT restart ctx 0x%08x ptr 0x%08x\n",
2374 reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
2375 reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
2376 }
2377 }
2378
2379 /* check IR context */
2380 if (video->ohci_ir_ctx != -1) {
2381 u32 ctx;
2382
2383 ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
2384
2385 /* if (RUN but not ACTIVE) */
2386 if ( (ctx & (1<<15)) &&
2387 !(ctx & (1<<10)) ) {
2388
2389 debug_printk("dv1394: IR context stopped due to bus reset; waking it up\n");
2390
2391 /* to be safe, assume a frame has been dropped. User-space programs
2392 should handle this condition like an overflow. */
2393 video->dropped_frames++;
2394
2395 /* for some reason you must clear, then re-set the RUN bit to restart DMA */
2396 /* XXX this doesn't work for me, I can't get IR DMA to restart :[ */
2397
2398 /* clear RUN */
2399 reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
2400 flush_pci_write(video->ohci);
2401
2402 /* set RUN */
2403 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 15));
2404 flush_pci_write(video->ohci);
2405
2406 /* set the WAKE bit (just in case; this isn't strictly necessary) */
2407 reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
2408 flush_pci_write(video->ohci);
2409
2410 irq_printk("dv1394: AFTER IR restart ctx 0x%08x ptr 0x%08x\n",
2411 reg_read(video->ohci, video->ohci_IsoRcvContextControlSet),
2412 reg_read(video->ohci, video->ohci_IsoRcvCommandPtr));
2413 }
2414 }
2415
2416out:
2417 spin_unlock_irqrestore(&video->spinlock, flags);
2418
2419 /* wake readers/writers/ioctl'ers */
2420 wake_up_interruptible(&video->waitq);
2421}
2422
2423static struct hpsb_highlevel dv1394_highlevel = {
2424 .name = "dv1394",
2425 .add_host = dv1394_add_host,
2426 .remove_host = dv1394_remove_host,
2427 .host_reset = dv1394_host_reset,
2428};
2429
2430#ifdef CONFIG_COMPAT
2431
2432#define DV1394_IOC32_INIT _IOW('#', 0x06, struct dv1394_init32)
2433#define DV1394_IOC32_GET_STATUS _IOR('#', 0x0c, struct dv1394_status32)
2434
2435struct dv1394_init32 {
2436 u32 api_version;
2437 u32 channel;
2438 u32 n_frames;
2439 u32 format;
2440 u32 cip_n;
2441 u32 cip_d;
2442 u32 syt_offset;
2443};
2444
2445struct dv1394_status32 {
2446 struct dv1394_init32 init;
2447 s32 active_frame;
2448 u32 first_clear_frame;
2449 u32 n_clear_frames;
2450 u32 dropped_frames;
2451};
2452
2453/* RED-PEN: this should use compat_alloc_userspace instead */
2454
2455static int handle_dv1394_init(struct file *file, unsigned int cmd, unsigned long arg)
2456{
2457 struct dv1394_init32 dv32;
2458 struct dv1394_init dv;
2459 mm_segment_t old_fs;
2460 int ret;
2461
2462 if (file->f_op->unlocked_ioctl != dv1394_ioctl)
2463 return -EFAULT;
2464
2465 if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
2466 return -EFAULT;
2467
2468 dv.api_version = dv32.api_version;
2469 dv.channel = dv32.channel;
2470 dv.n_frames = dv32.n_frames;
2471 dv.format = dv32.format;
2472 dv.cip_n = (unsigned long)dv32.cip_n;
2473 dv.cip_d = (unsigned long)dv32.cip_d;
2474 dv.syt_offset = dv32.syt_offset;
2475
2476 old_fs = get_fs();
2477 set_fs(KERNEL_DS);
2478 ret = dv1394_ioctl(file, DV1394_IOC_INIT, (unsigned long)&dv);
2479 set_fs(old_fs);
2480
2481 return ret;
2482}
2483
2484static int handle_dv1394_get_status(struct file *file, unsigned int cmd, unsigned long arg)
2485{
2486 struct dv1394_status32 dv32;
2487 struct dv1394_status dv;
2488 mm_segment_t old_fs;
2489 int ret;
2490
2491 if (file->f_op->unlocked_ioctl != dv1394_ioctl)
2492 return -EFAULT;
2493
2494 old_fs = get_fs();
2495 set_fs(KERNEL_DS);
2496 ret = dv1394_ioctl(file, DV1394_IOC_GET_STATUS, (unsigned long)&dv);
2497 set_fs(old_fs);
2498
2499 if (!ret) {
2500 dv32.init.api_version = dv.init.api_version;
2501 dv32.init.channel = dv.init.channel;
2502 dv32.init.n_frames = dv.init.n_frames;
2503 dv32.init.format = dv.init.format;
2504 dv32.init.cip_n = (u32)dv.init.cip_n;
2505 dv32.init.cip_d = (u32)dv.init.cip_d;
2506 dv32.init.syt_offset = dv.init.syt_offset;
2507 dv32.active_frame = dv.active_frame;
2508 dv32.first_clear_frame = dv.first_clear_frame;
2509 dv32.n_clear_frames = dv.n_clear_frames;
2510 dv32.dropped_frames = dv.dropped_frames;
2511
2512 if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
2513 ret = -EFAULT;
2514 }
2515
2516 return ret;
2517}
2518
2519
2520
2521static long dv1394_compat_ioctl(struct file *file, unsigned int cmd,
2522 unsigned long arg)
2523{
2524 switch (cmd) {
2525 case DV1394_IOC_SHUTDOWN:
2526 case DV1394_IOC_SUBMIT_FRAMES:
2527 case DV1394_IOC_WAIT_FRAMES:
2528 case DV1394_IOC_RECEIVE_FRAMES:
2529 case DV1394_IOC_START_RECEIVE:
2530 return dv1394_ioctl(file, cmd, arg);
2531
2532 case DV1394_IOC32_INIT:
2533 return handle_dv1394_init(file, cmd, arg);
2534 case DV1394_IOC32_GET_STATUS:
2535 return handle_dv1394_get_status(file, cmd, arg);
2536 default:
2537 return -ENOIOCTLCMD;
2538 }
2539}
2540
2541#endif /* CONFIG_COMPAT */
2542
2543
2544/*** KERNEL MODULE HANDLERS ************************************************/
2545
2546MODULE_AUTHOR("Dan Maas <dmaas@dcine.com>, Dan Dennedy <dan@dennedy.org>");
2547MODULE_DESCRIPTION("driver for DV input/output on OHCI board");
2548MODULE_SUPPORTED_DEVICE("dv1394");
2549MODULE_LICENSE("GPL");
2550
2551static void __exit dv1394_exit_module(void)
2552{
2553 hpsb_unregister_protocol(&dv1394_driver);
2554 hpsb_unregister_highlevel(&dv1394_highlevel);
2555 cdev_del(&dv1394_cdev);
2556}
2557
2558static int __init dv1394_init_module(void)
2559{
2560 int ret;
2561
2562 cdev_init(&dv1394_cdev, &dv1394_fops);
2563 dv1394_cdev.owner = THIS_MODULE;
2564 ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
2565 if (ret) {
2566 printk(KERN_ERR "dv1394: unable to register character device\n");
2567 return ret;
2568 }
2569
2570 hpsb_register_highlevel(&dv1394_highlevel);
2571
2572 ret = hpsb_register_protocol(&dv1394_driver);
2573 if (ret) {
2574 printk(KERN_ERR "dv1394: failed to register protocol\n");
2575 hpsb_unregister_highlevel(&dv1394_highlevel);
2576 cdev_del(&dv1394_cdev);
2577 return ret;
2578 }
2579
2580 return 0;
2581}
2582
2583module_init(dv1394_init_module);
2584module_exit(dv1394_exit_module);
diff --git a/drivers/ieee1394/dv1394.h b/drivers/ieee1394/dv1394.h
deleted file mode 100644
index 5807f5289810..000000000000
--- a/drivers/ieee1394/dv1394.h
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
3 * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
4 * receive by Dan Dennedy <dan@dennedy.org>
5 *
6 * based on:
7 * video1394.h - driver for OHCI 1394 boards
8 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
9 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software Foundation,
23 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#ifndef _DV_1394_H
27#define _DV_1394_H
28
29/* This is the public user-space interface. Try not to break it. */
30
31#define DV1394_API_VERSION 0x20011127
32
33/* ********************
34 ** **
35 ** DV1394 API **
36 ** **
37 ********************
38
39 There are two methods of operating the DV1394 DV output device.
40
41 1)
42
43 The simplest is an interface based on write(): simply write
44 full DV frames of data to the device, and they will be transmitted
45 as quickly as possible. The FD may be set for non-blocking I/O,
46 in which case you can use select() or poll() to wait for output
47 buffer space.
48
49 To set the DV output parameters (e.g. whether you want NTSC or PAL
50 video), use the DV1394_INIT ioctl, passing in the parameters you
51 want in a struct dv1394_init.
52
53 Example 1:
54 To play a raw .DV file: cat foo.DV > /dev/dv1394
55 (cat will use write() internally)
56
57 Example 2:
58 static struct dv1394_init init = {
59 0x63, (broadcast channel)
60 4, (four-frame ringbuffer)
61 DV1394_NTSC, (send NTSC video)
62 0, 0 (default empty packet rate)
63 }
64
65 ioctl(fd, DV1394_INIT, &init);
66
67 while (1) {
68 read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
69 write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
70 }
71
72 2)
73
74 For more control over buffering, and to avoid unnecessary copies
75 of the DV data, you can use the more sophisticated the mmap() interface.
76 First, call the DV1394_INIT ioctl to specify your parameters,
77 including the number of frames in the ringbuffer. Then, calling mmap()
78 on the dv1394 device will give you direct access to the ringbuffer
79 from which the DV card reads your frame data.
80
81 The ringbuffer is simply one large, contiguous region of memory
82 containing two or more frames of packed DV data. Each frame of DV data
83 is 120000 bytes (NTSC) or 144000 bytes (PAL).
84
85 Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
86 ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
87 or select()/poll() to wait until the frames are transmitted. Next, you'll
88 need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
89 frames are clear (ready to be filled with new DV data). Finally, use
90 DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
91
92
93 Example: here is what a four-frame ringbuffer might look like
94 during DV transmission:
95
96
97 frame 0 frame 1 frame 2 frame 3
98
99 *--------------------------------------*
100 | CLEAR | DV data | DV data | CLEAR |
101 *--------------------------------------*
102 <ACTIVE>
103
104 transmission goes in this direction --->>>
105
106
107 The DV hardware is currently transmitting the data in frame 1.
108 Once frame 1 is finished, it will automatically transmit frame 2.
109 (if frame 2 finishes before frame 3 is submitted, the device
110 will continue to transmit frame 2, and will increase the dropped_frames
111 counter each time it repeats the transmission).
112
113
114 If you called DV1394_GET_STATUS at this instant, you would
115 receive the following values:
116
117 n_frames = 4
118 active_frame = 1
119 first_clear_frame = 3
120 n_clear_frames = 2
121
122 At this point, you should write new DV data into frame 3 and optionally
123 frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
124 it may transmit the new frames.
125
126 ERROR HANDLING
127
128 An error (buffer underflow/overflow or a break in the DV stream due
129 to a 1394 bus reset) can be detected by checking the dropped_frames
130 field of struct dv1394_status (obtained through the
131 DV1394_GET_STATUS ioctl).
132
133 The best way to recover from such an error is to re-initialize
134 dv1394, either by using the DV1394_INIT ioctl call, or closing the
135 file descriptor and opening it again. (note that you must unmap all
136 ringbuffer mappings when closing the file descriptor, or else
137 dv1394 will still be considered 'in use').
138
139 MAIN LOOP
140
141 For maximum efficiency and robustness against bus errors, you are
142 advised to model the main loop of your application after the
143 following pseudo-code example:
144
145 (checks of system call return values omitted for brevity; always
146 check return values in your code!)
147
148 while ( frames left ) {
149
150 struct pollfd *pfd = ...;
151
152 pfd->fd = dv1394_fd;
153 pfd->revents = 0;
154 pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
155
156 (add other sources of I/O here)
157
158 poll(pfd, 1, -1); (or select(); add a timeout if you want)
159
160 if (pfd->revents) {
161 struct dv1394_status status;
162
163 ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
164
165 if (status.dropped_frames > 0) {
166 reset_dv1394();
167 } else {
168 for (int i = 0; i < status.n_clear_frames; i++) {
169 copy_DV_frame();
170 }
171 }
172 }
173 }
174
175 where copy_DV_frame() reads or writes on the dv1394 file descriptor
176 (read/write mode) or copies data to/from the mmap ringbuffer and
177 then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
178 frames are availble (mmap mode).
179
180 reset_dv1394() is called in the event of a buffer
181 underflow/overflow or a halt in the DV stream (e.g. due to a 1394
182 bus reset). To guarantee recovery from the error, this function
183 should close the dv1394 file descriptor (and munmap() all
184 ringbuffer mappings, if you are using them), then re-open the
185 dv1394 device (and re-map the ringbuffer).
186
187*/
188
189
190/* maximum number of frames in the ringbuffer */
191#define DV1394_MAX_FRAMES 32
192
193/* number of *full* isochronous packets per DV frame */
194#define DV1394_NTSC_PACKETS_PER_FRAME 250
195#define DV1394_PAL_PACKETS_PER_FRAME 300
196
197/* size of one frame's worth of DV data, in bytes */
198#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
199#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
200
201
202/* ioctl() commands */
203#include "ieee1394-ioctl.h"
204
205
206enum pal_or_ntsc {
207 DV1394_NTSC = 0,
208 DV1394_PAL
209};
210
211
212
213
214/* this is the argument to DV1394_INIT */
215struct dv1394_init {
216 /* DV1394_API_VERSION */
217 unsigned int api_version;
218
219 /* isochronous transmission channel to use */
220 unsigned int channel;
221
222 /* number of frames in the ringbuffer. Must be at least 2
223 and at most DV1394_MAX_FRAMES. */
224 unsigned int n_frames;
225
226 /* send/receive PAL or NTSC video format */
227 enum pal_or_ntsc format;
228
229 /* the following are used only for transmission */
230
231 /* set these to zero unless you want a
232 non-default empty packet rate (see below) */
233 unsigned long cip_n;
234 unsigned long cip_d;
235
236 /* set this to zero unless you want a
237 non-default SYT cycle offset (default = 3 cycles) */
238 unsigned int syt_offset;
239};
240
241/* NOTE: you may only allocate the DV frame ringbuffer once each time
242 you open the dv1394 device. DV1394_INIT will fail if you call it a
243 second time with different 'n_frames' or 'format' arguments (which
244 would imply a different size for the ringbuffer). If you need a
245 different buffer size, simply close and re-open the device, then
246 initialize it with your new settings. */
247
248/* Q: What are cip_n and cip_d? */
249
250/*
251 A: DV video streams do not utilize 100% of the potential bandwidth offered
252 by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
253 DV devices must periodically insert empty packets into the 1394 data stream.
254 Typically there is one empty packet per 14-16 data-carrying packets.
255
256 Some DV devices will accept a wide range of empty packet rates, while others
257 require a precise rate. If the dv1394 driver produces empty packets at
258 a rate that your device does not accept, you may see ugly patterns on the
259 DV output, or even no output at all.
260
261 The default empty packet insertion rate seems to work for many people; if
262 your DV output is stable, you can simply ignore this discussion. However,
263 we have exposed the empty packet rate as a parameter to support devices that
264 do not work with the default rate.
265
266 The decision to insert an empty packet is made with a numerator/denominator
267 algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
268 You can alter the empty packet rate by passing non-zero values for cip_n
269 and cip_d to the INIT ioctl.
270
271 */
272
273
274
275struct dv1394_status {
276 /* this embedded init struct returns the current dv1394
277 parameters in use */
278 struct dv1394_init init;
279
280 /* the ringbuffer frame that is currently being
281 displayed. (-1 if the device is not transmitting anything) */
282 int active_frame;
283
284 /* index of the first buffer (ahead of active_frame) that
285 is ready to be filled with data */
286 unsigned int first_clear_frame;
287
288 /* how many buffers, including first_clear_buffer, are
289 ready to be filled with data */
290 unsigned int n_clear_frames;
291
292 /* how many times the DV stream has underflowed, overflowed,
293 or otherwise encountered an error, since the previous call
294 to DV1394_GET_STATUS */
295 unsigned int dropped_frames;
296
297 /* N.B. The dropped_frames counter is only a lower bound on the actual
298 number of dropped frames, with the special case that if dropped_frames
299 is zero, then it is guaranteed that NO frames have been dropped
300 since the last call to DV1394_GET_STATUS.
301 */
302};
303
304
305#endif /* _DV_1394_H */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
deleted file mode 100644
index bc289e367e30..000000000000
--- a/drivers/ieee1394/eth1394.c
+++ /dev/null
@@ -1,1736 +0,0 @@
1/*
2 * eth1394.c -- IPv4 driver for Linux IEEE-1394 Subsystem
3 *
4 * Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
5 * 2000 Bonin Franck <boninf@free.fr>
6 * 2003 Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 *
8 * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25/*
26 * This driver intends to support RFC 2734, which describes a method for
27 * transporting IPv4 datagrams over IEEE-1394 serial busses.
28 *
29 * TODO:
30 * RFC 2734 related:
31 * - Add MCAP. Limited Multicast exists only to 224.0.0.1 and 224.0.0.2.
32 *
33 * Non-RFC 2734 related:
34 * - Handle fragmented skb's coming from the networking layer.
35 * - Move generic GASP reception to core 1394 code
36 * - Convert kmalloc/kfree for link fragments to use kmem_cache_* instead
37 * - Stability improvements
38 * - Performance enhancements
39 * - Consider garbage collecting old partial datagrams after X amount of time
40 */
41
42#include <linux/module.h>
43
44#include <linux/kernel.h>
45#include <linux/slab.h>
46#include <linux/errno.h>
47#include <linux/types.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/workqueue.h>
51
52#include <linux/netdevice.h>
53#include <linux/inetdevice.h>
54#include <linux/if_arp.h>
55#include <linux/if_ether.h>
56#include <linux/ip.h>
57#include <linux/in.h>
58#include <linux/tcp.h>
59#include <linux/skbuff.h>
60#include <linux/bitops.h>
61#include <linux/ethtool.h>
62#include <asm/uaccess.h>
63#include <asm/delay.h>
64#include <asm/unaligned.h>
65#include <net/arp.h>
66
67#include "config_roms.h"
68#include "csr1212.h"
69#include "eth1394.h"
70#include "highlevel.h"
71#include "ieee1394.h"
72#include "ieee1394_core.h"
73#include "ieee1394_hotplug.h"
74#include "ieee1394_transactions.h"
75#include "ieee1394_types.h"
76#include "iso.h"
77#include "nodemgr.h"
78
79#define ETH1394_PRINT_G(level, fmt, args...) \
80 printk(level "%s: " fmt, driver_name, ## args)
81
82#define ETH1394_PRINT(level, dev_name, fmt, args...) \
83 printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
84
85struct fragment_info {
86 struct list_head list;
87 int offset;
88 int len;
89};
90
91struct partial_datagram {
92 struct list_head list;
93 u16 dgl;
94 u16 dg_size;
95 __be16 ether_type;
96 struct sk_buff *skb;
97 char *pbuf;
98 struct list_head frag_info;
99};
100
101struct pdg_list {
102 struct list_head list; /* partial datagram list per node */
103 unsigned int sz; /* partial datagram list size per node */
104 spinlock_t lock; /* partial datagram lock */
105};
106
107struct eth1394_host_info {
108 struct hpsb_host *host;
109 struct net_device *dev;
110};
111
112struct eth1394_node_ref {
113 struct unit_directory *ud;
114 struct list_head list;
115};
116
117struct eth1394_node_info {
118 u16 maxpayload; /* max payload */
119 u8 sspd; /* max speed */
120 u64 fifo; /* FIFO address */
121 struct pdg_list pdg; /* partial RX datagram lists */
122 int dgl; /* outgoing datagram label */
123};
124
125static const char driver_name[] = "eth1394";
126
127static struct kmem_cache *packet_task_cache;
128
129static struct hpsb_highlevel eth1394_highlevel;
130
131/* Use common.lf to determine header len */
132static const int hdr_type_len[] = {
133 sizeof(struct eth1394_uf_hdr),
134 sizeof(struct eth1394_ff_hdr),
135 sizeof(struct eth1394_sf_hdr),
136 sizeof(struct eth1394_sf_hdr)
137};
138
139static const u16 eth1394_speedto_maxpayload[] = {
140/* S100, S200, S400, S800, S1600, S3200 */
141 512, 1024, 2048, 4096, 4096, 4096
142};
143
144MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
145MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
146MODULE_LICENSE("GPL");
147
148/*
149 * The max_partial_datagrams parameter is the maximum number of fragmented
150 * datagrams per node that eth1394 will keep in memory. Providing an upper
151 * bound allows us to limit the amount of memory that partial datagrams
152 * consume in the event that some partial datagrams are never completed.
153 */
154static int max_partial_datagrams = 25;
155module_param(max_partial_datagrams, int, S_IRUGO | S_IWUSR);
156MODULE_PARM_DESC(max_partial_datagrams,
157 "Maximum number of partially received fragmented datagrams "
158 "(default = 25).");
159
160
161static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
162 unsigned short type, const void *daddr,
163 const void *saddr, unsigned len);
164static int ether1394_rebuild_header(struct sk_buff *skb);
165static int ether1394_header_parse(const struct sk_buff *skb,
166 unsigned char *haddr);
167static int ether1394_header_cache(const struct neighbour *neigh,
168 struct hh_cache *hh);
169static void ether1394_header_cache_update(struct hh_cache *hh,
170 const struct net_device *dev,
171 const unsigned char *haddr);
172static netdev_tx_t ether1394_tx(struct sk_buff *skb,
173 struct net_device *dev);
174static void ether1394_iso(struct hpsb_iso *iso);
175
176static const struct ethtool_ops ethtool_ops;
177
178static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
179 quadlet_t *data, u64 addr, size_t len, u16 flags);
180static void ether1394_add_host(struct hpsb_host *host);
181static void ether1394_remove_host(struct hpsb_host *host);
182static void ether1394_host_reset(struct hpsb_host *host);
183
184/* Function for incoming 1394 packets */
185static const struct hpsb_address_ops addr_ops = {
186 .write = ether1394_write,
187};
188
189/* Ieee1394 highlevel driver functions */
190static struct hpsb_highlevel eth1394_highlevel = {
191 .name = driver_name,
192 .add_host = ether1394_add_host,
193 .remove_host = ether1394_remove_host,
194 .host_reset = ether1394_host_reset,
195};
196
197static int ether1394_recv_init(struct eth1394_priv *priv)
198{
199 unsigned int iso_buf_size;
200
201 /* FIXME: rawiso limits us to PAGE_SIZE */
202 iso_buf_size = min((unsigned int)PAGE_SIZE,
203 2 * (1U << (priv->host->csr.max_rec + 1)));
204
205 priv->iso = hpsb_iso_recv_init(priv->host,
206 ETHER1394_GASP_BUFFERS * iso_buf_size,
207 ETHER1394_GASP_BUFFERS,
208 priv->broadcast_channel,
209 HPSB_ISO_DMA_PACKET_PER_BUFFER,
210 1, ether1394_iso);
211 if (priv->iso == NULL) {
212 ETH1394_PRINT_G(KERN_ERR, "Failed to allocate IR context\n");
213 priv->bc_state = ETHER1394_BC_ERROR;
214 return -EAGAIN;
215 }
216
217 if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
218 priv->bc_state = ETHER1394_BC_STOPPED;
219 else
220 priv->bc_state = ETHER1394_BC_RUNNING;
221 return 0;
222}
223
224/* This is called after an "ifup" */
225static int ether1394_open(struct net_device *dev)
226{
227 struct eth1394_priv *priv = netdev_priv(dev);
228 int ret;
229
230 if (priv->bc_state == ETHER1394_BC_ERROR) {
231 ret = ether1394_recv_init(priv);
232 if (ret)
233 return ret;
234 }
235 netif_start_queue(dev);
236 return 0;
237}
238
239/* This is called after an "ifdown" */
240static int ether1394_stop(struct net_device *dev)
241{
242 /* flush priv->wake */
243 flush_scheduled_work();
244
245 netif_stop_queue(dev);
246 return 0;
247}
248
249/* FIXME: What to do if we timeout? I think a host reset is probably in order,
250 * so that's what we do. Should we increment the stat counters too? */
251static void ether1394_tx_timeout(struct net_device *dev)
252{
253 struct hpsb_host *host =
254 ((struct eth1394_priv *)netdev_priv(dev))->host;
255
256 ETH1394_PRINT(KERN_ERR, dev->name, "Timeout, resetting host\n");
257 ether1394_host_reset(host);
258}
259
260static inline int ether1394_max_mtu(struct hpsb_host* host)
261{
262 return (1 << (host->csr.max_rec + 1))
263 - sizeof(union eth1394_hdr) - ETHER1394_GASP_OVERHEAD;
264}
265
266static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
267{
268 int max_mtu;
269
270 if (new_mtu < 68)
271 return -EINVAL;
272
273 max_mtu = ether1394_max_mtu(
274 ((struct eth1394_priv *)netdev_priv(dev))->host);
275 if (new_mtu > max_mtu) {
276 ETH1394_PRINT(KERN_INFO, dev->name,
277 "Local node constrains MTU to %d\n", max_mtu);
278 return -ERANGE;
279 }
280
281 dev->mtu = new_mtu;
282 return 0;
283}
284
285static void purge_partial_datagram(struct list_head *old)
286{
287 struct partial_datagram *pd;
288 struct list_head *lh, *n;
289 struct fragment_info *fi;
290
291 pd = list_entry(old, struct partial_datagram, list);
292
293 list_for_each_safe(lh, n, &pd->frag_info) {
294 fi = list_entry(lh, struct fragment_info, list);
295 list_del(lh);
296 kfree(fi);
297 }
298 list_del(old);
299 kfree_skb(pd->skb);
300 kfree(pd);
301}
302
303/******************************************
304 * 1394 bus activity functions
305 ******************************************/
306
307static struct eth1394_node_ref *eth1394_find_node(struct list_head *inl,
308 struct unit_directory *ud)
309{
310 struct eth1394_node_ref *node;
311
312 list_for_each_entry(node, inl, list)
313 if (node->ud == ud)
314 return node;
315
316 return NULL;
317}
318
319static struct eth1394_node_ref *eth1394_find_node_guid(struct list_head *inl,
320 u64 guid)
321{
322 struct eth1394_node_ref *node;
323
324 list_for_each_entry(node, inl, list)
325 if (node->ud->ne->guid == guid)
326 return node;
327
328 return NULL;
329}
330
331static struct eth1394_node_ref *eth1394_find_node_nodeid(struct list_head *inl,
332 nodeid_t nodeid)
333{
334 struct eth1394_node_ref *node;
335
336 list_for_each_entry(node, inl, list)
337 if (node->ud->ne->nodeid == nodeid)
338 return node;
339
340 return NULL;
341}
342
343static int eth1394_new_node(struct eth1394_host_info *hi,
344 struct unit_directory *ud)
345{
346 struct eth1394_priv *priv;
347 struct eth1394_node_ref *new_node;
348 struct eth1394_node_info *node_info;
349
350 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
351 if (!new_node)
352 return -ENOMEM;
353
354 node_info = kmalloc(sizeof(*node_info), GFP_KERNEL);
355 if (!node_info) {
356 kfree(new_node);
357 return -ENOMEM;
358 }
359
360 spin_lock_init(&node_info->pdg.lock);
361 INIT_LIST_HEAD(&node_info->pdg.list);
362 node_info->pdg.sz = 0;
363 node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
364
365 dev_set_drvdata(&ud->device, node_info);
366 new_node->ud = ud;
367
368 priv = netdev_priv(hi->dev);
369 list_add_tail(&new_node->list, &priv->ip_node_list);
370 return 0;
371}
372
373static int eth1394_probe(struct device *dev)
374{
375 struct unit_directory *ud;
376 struct eth1394_host_info *hi;
377
378 ud = container_of(dev, struct unit_directory, device);
379 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
380 if (!hi)
381 return -ENOENT;
382
383 return eth1394_new_node(hi, ud);
384}
385
386static int eth1394_remove(struct device *dev)
387{
388 struct unit_directory *ud;
389 struct eth1394_host_info *hi;
390 struct eth1394_priv *priv;
391 struct eth1394_node_ref *old_node;
392 struct eth1394_node_info *node_info;
393 struct list_head *lh, *n;
394 unsigned long flags;
395
396 ud = container_of(dev, struct unit_directory, device);
397 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
398 if (!hi)
399 return -ENOENT;
400
401 priv = netdev_priv(hi->dev);
402
403 old_node = eth1394_find_node(&priv->ip_node_list, ud);
404 if (!old_node)
405 return 0;
406
407 list_del(&old_node->list);
408 kfree(old_node);
409
410 node_info = dev_get_drvdata(&ud->device);
411
412 spin_lock_irqsave(&node_info->pdg.lock, flags);
413 /* The partial datagram list should be empty, but we'll just
414 * make sure anyway... */
415 list_for_each_safe(lh, n, &node_info->pdg.list)
416 purge_partial_datagram(lh);
417 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
418
419 kfree(node_info);
420 dev_set_drvdata(&ud->device, NULL);
421 return 0;
422}
423
424static int eth1394_update(struct unit_directory *ud)
425{
426 struct eth1394_host_info *hi;
427 struct eth1394_priv *priv;
428 struct eth1394_node_ref *node;
429
430 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
431 if (!hi)
432 return -ENOENT;
433
434 priv = netdev_priv(hi->dev);
435 node = eth1394_find_node(&priv->ip_node_list, ud);
436 if (node)
437 return 0;
438
439 return eth1394_new_node(hi, ud);
440}
441
442static const struct ieee1394_device_id eth1394_id_table[] = {
443 {
444 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
445 IEEE1394_MATCH_VERSION),
446 .specifier_id = ETHER1394_GASP_SPECIFIER_ID,
447 .version = ETHER1394_GASP_VERSION,
448 },
449 {}
450};
451
452MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
453
454static struct hpsb_protocol_driver eth1394_proto_driver = {
455 .name = driver_name,
456 .id_table = eth1394_id_table,
457 .update = eth1394_update,
458 .driver = {
459 .probe = eth1394_probe,
460 .remove = eth1394_remove,
461 },
462};
463
464static void ether1394_reset_priv(struct net_device *dev, int set_mtu)
465{
466 unsigned long flags;
467 int i;
468 struct eth1394_priv *priv = netdev_priv(dev);
469 struct hpsb_host *host = priv->host;
470 u64 guid = get_unaligned((u64 *)&(host->csr.rom->bus_info_data[3]));
471 int max_speed = IEEE1394_SPEED_MAX;
472
473 spin_lock_irqsave(&priv->lock, flags);
474
475 memset(priv->ud_list, 0, sizeof(priv->ud_list));
476 priv->bc_maxpayload = 512;
477
478 /* Determine speed limit */
479 /* FIXME: This is broken for nodes with link speed < PHY speed,
480 * and it is suboptimal for S200B...S800B hardware.
481 * The result of nodemgr's speed probe should be used somehow. */
482 for (i = 0; i < host->node_count; i++) {
483 /* take care of S100B...S400B PHY ports */
484 if (host->speed[i] == SELFID_SPEED_UNKNOWN) {
485 max_speed = IEEE1394_SPEED_100;
486 break;
487 }
488 if (max_speed > host->speed[i])
489 max_speed = host->speed[i];
490 }
491 priv->bc_sspd = max_speed;
492
493 if (set_mtu) {
494 /* Use the RFC 2734 default 1500 octets or the maximum payload
495 * as initial MTU */
496 dev->mtu = min(1500, ether1394_max_mtu(host));
497
498 /* Set our hardware address while we're at it */
499 memcpy(dev->dev_addr, &guid, sizeof(u64));
500 memset(dev->broadcast, 0xff, sizeof(u64));
501 }
502
503 spin_unlock_irqrestore(&priv->lock, flags);
504}
505
506static const struct header_ops ether1394_header_ops = {
507 .create = ether1394_header,
508 .rebuild = ether1394_rebuild_header,
509 .cache = ether1394_header_cache,
510 .cache_update = ether1394_header_cache_update,
511 .parse = ether1394_header_parse,
512};
513
514static const struct net_device_ops ether1394_netdev_ops = {
515 .ndo_open = ether1394_open,
516 .ndo_stop = ether1394_stop,
517 .ndo_start_xmit = ether1394_tx,
518 .ndo_tx_timeout = ether1394_tx_timeout,
519 .ndo_change_mtu = ether1394_change_mtu,
520};
521
522static void ether1394_init_dev(struct net_device *dev)
523{
524
525 dev->header_ops = &ether1394_header_ops;
526 dev->netdev_ops = &ether1394_netdev_ops;
527
528 SET_ETHTOOL_OPS(dev, &ethtool_ops);
529
530 dev->watchdog_timeo = ETHER1394_TIMEOUT;
531 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
532 dev->features = NETIF_F_HIGHDMA;
533 dev->addr_len = ETH1394_ALEN;
534 dev->hard_header_len = ETH1394_HLEN;
535 dev->type = ARPHRD_IEEE1394;
536
537 /* FIXME: This value was copied from ether_setup(). Is it too much? */
538 dev->tx_queue_len = 1000;
539}
540
541/*
542 * Wake the queue up after commonly encountered transmit failure conditions are
543 * hopefully over. Currently only tlabel exhaustion is accounted for.
544 */
545static void ether1394_wake_queue(struct work_struct *work)
546{
547 struct eth1394_priv *priv;
548 struct hpsb_packet *packet;
549
550 priv = container_of(work, struct eth1394_priv, wake);
551 packet = hpsb_alloc_packet(0);
552
553 /* This is really bad, but unjam the queue anyway. */
554 if (!packet)
555 goto out;
556
557 packet->host = priv->host;
558 packet->node_id = priv->wake_node;
559 /*
560 * A transaction label is all we really want. If we get one, it almost
561 * always means we can get a lot more because the ieee1394 core recycled
562 * a whole batch of tlabels, at last.
563 */
564 if (hpsb_get_tlabel(packet) == 0)
565 hpsb_free_tlabel(packet);
566
567 hpsb_free_packet(packet);
568out:
569 netif_wake_queue(priv->wake_dev);
570}
571
572/*
573 * This function is called every time a card is found. It is generally called
574 * when the module is installed. This is where we add all of our ethernet
575 * devices. One for each host.
576 */
577static void ether1394_add_host(struct hpsb_host *host)
578{
579 struct eth1394_host_info *hi = NULL;
580 struct net_device *dev = NULL;
581 struct eth1394_priv *priv;
582 u64 fifo_addr;
583
584 if (hpsb_config_rom_ip1394_add(host) != 0) {
585 ETH1394_PRINT_G(KERN_ERR, "Can't add IP-over-1394 ROM entry\n");
586 return;
587 }
588
589 fifo_addr = hpsb_allocate_and_register_addrspace(
590 &eth1394_highlevel, host, &addr_ops,
591 ETHER1394_REGION_ADDR_LEN, ETHER1394_REGION_ADDR_LEN,
592 CSR1212_INVALID_ADDR_SPACE, CSR1212_INVALID_ADDR_SPACE);
593 if (fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
594 ETH1394_PRINT_G(KERN_ERR, "Cannot register CSR space\n");
595 hpsb_config_rom_ip1394_remove(host);
596 return;
597 }
598
599 dev = alloc_netdev(sizeof(*priv), "eth%d", ether1394_init_dev);
600 if (dev == NULL) {
601 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
602 goto out;
603 }
604
605 SET_NETDEV_DEV(dev, &host->device);
606
607 priv = netdev_priv(dev);
608 INIT_LIST_HEAD(&priv->ip_node_list);
609 spin_lock_init(&priv->lock);
610 priv->host = host;
611 priv->local_fifo = fifo_addr;
612 INIT_WORK(&priv->wake, ether1394_wake_queue);
613 priv->wake_dev = dev;
614
615 hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
616 if (hi == NULL) {
617 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
618 goto out;
619 }
620
621 ether1394_reset_priv(dev, 1);
622
623 if (register_netdev(dev)) {
624 ETH1394_PRINT_G(KERN_ERR, "Cannot register the driver\n");
625 goto out;
626 }
627
628 ETH1394_PRINT(KERN_INFO, dev->name, "IPv4 over IEEE 1394 (fw-host%d)\n",
629 host->id);
630
631 hi->host = host;
632 hi->dev = dev;
633
634 /* Ignore validity in hopes that it will be set in the future. It'll
635 * be checked when the eth device is opened. */
636 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
637
638 ether1394_recv_init(priv);
639 return;
640out:
641 if (dev)
642 free_netdev(dev);
643 if (hi)
644 hpsb_destroy_hostinfo(&eth1394_highlevel, host);
645 hpsb_unregister_addrspace(&eth1394_highlevel, host, fifo_addr);
646 hpsb_config_rom_ip1394_remove(host);
647}
648
649/* Remove a card from our list */
650static void ether1394_remove_host(struct hpsb_host *host)
651{
652 struct eth1394_host_info *hi;
653 struct eth1394_priv *priv;
654
655 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
656 if (!hi)
657 return;
658 priv = netdev_priv(hi->dev);
659 hpsb_unregister_addrspace(&eth1394_highlevel, host, priv->local_fifo);
660 hpsb_config_rom_ip1394_remove(host);
661 if (priv->iso)
662 hpsb_iso_shutdown(priv->iso);
663 unregister_netdev(hi->dev);
664 free_netdev(hi->dev);
665}
666
667/* A bus reset happened */
668static void ether1394_host_reset(struct hpsb_host *host)
669{
670 struct eth1394_host_info *hi;
671 struct eth1394_priv *priv;
672 struct net_device *dev;
673 struct list_head *lh, *n;
674 struct eth1394_node_ref *node;
675 struct eth1394_node_info *node_info;
676 unsigned long flags;
677
678 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
679
680 /* This can happen for hosts that we don't use */
681 if (!hi)
682 return;
683
684 dev = hi->dev;
685 priv = netdev_priv(dev);
686
687 /* Reset our private host data, but not our MTU */
688 netif_stop_queue(dev);
689 ether1394_reset_priv(dev, 0);
690
691 list_for_each_entry(node, &priv->ip_node_list, list) {
692 node_info = dev_get_drvdata(&node->ud->device);
693
694 spin_lock_irqsave(&node_info->pdg.lock, flags);
695
696 list_for_each_safe(lh, n, &node_info->pdg.list)
697 purge_partial_datagram(lh);
698
699 INIT_LIST_HEAD(&(node_info->pdg.list));
700 node_info->pdg.sz = 0;
701
702 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
703 }
704
705 netif_wake_queue(dev);
706}
707
708/******************************************
709 * HW Header net device functions
710 ******************************************/
711/* These functions have been adapted from net/ethernet/eth.c */
712
713/* Create a fake MAC header for an arbitrary protocol layer.
714 * saddr=NULL means use device source address
715 * daddr=NULL means leave destination address (eg unresolved arp). */
716static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
717 unsigned short type, const void *daddr,
718 const void *saddr, unsigned len)
719{
720 struct eth1394hdr *eth =
721 (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
722
723 eth->h_proto = htons(type);
724
725 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
726 memset(eth->h_dest, 0, dev->addr_len);
727 return dev->hard_header_len;
728 }
729
730 if (daddr) {
731 memcpy(eth->h_dest, daddr, dev->addr_len);
732 return dev->hard_header_len;
733 }
734
735 return -dev->hard_header_len;
736}
737
738/* Rebuild the faked MAC header. This is called after an ARP
739 * (or in future other address resolution) has completed on this
740 * sk_buff. We now let ARP fill in the other fields.
741 *
742 * This routine CANNOT use cached dst->neigh!
743 * Really, it is used only when dst->neigh is wrong.
744 */
745static int ether1394_rebuild_header(struct sk_buff *skb)
746{
747 struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
748
749 if (eth->h_proto == htons(ETH_P_IP))
750 return arp_find((unsigned char *)&eth->h_dest, skb);
751
752 ETH1394_PRINT(KERN_DEBUG, skb->dev->name,
753 "unable to resolve type %04x addresses\n",
754 ntohs(eth->h_proto));
755 return 0;
756}
757
758static int ether1394_header_parse(const struct sk_buff *skb,
759 unsigned char *haddr)
760{
761 memcpy(haddr, skb->dev->dev_addr, ETH1394_ALEN);
762 return ETH1394_ALEN;
763}
764
765static int ether1394_header_cache(const struct neighbour *neigh,
766 struct hh_cache *hh)
767{
768 __be16 type = hh->hh_type;
769 struct net_device *dev = neigh->dev;
770 struct eth1394hdr *eth =
771 (struct eth1394hdr *)((u8 *)hh->hh_data + 16 - ETH1394_HLEN);
772
773 if (type == htons(ETH_P_802_3))
774 return -1;
775
776 eth->h_proto = type;
777 memcpy(eth->h_dest, neigh->ha, dev->addr_len);
778
779 hh->hh_len = ETH1394_HLEN;
780 return 0;
781}
782
783/* Called by Address Resolution module to notify changes in address. */
784static void ether1394_header_cache_update(struct hh_cache *hh,
785 const struct net_device *dev,
786 const unsigned char * haddr)
787{
788 memcpy((u8 *)hh->hh_data + 16 - ETH1394_HLEN, haddr, dev->addr_len);
789}
790
791/******************************************
792 * Datagram reception code
793 ******************************************/
794
795/* Copied from net/ethernet/eth.c */
796static __be16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
797{
798 struct eth1394hdr *eth;
799 unsigned char *rawp;
800
801 skb_reset_mac_header(skb);
802 skb_pull(skb, ETH1394_HLEN);
803 eth = eth1394_hdr(skb);
804
805 if (*eth->h_dest & 1) {
806 if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len) == 0)
807 skb->pkt_type = PACKET_BROADCAST;
808#if 0
809 else
810 skb->pkt_type = PACKET_MULTICAST;
811#endif
812 } else {
813 if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
814 skb->pkt_type = PACKET_OTHERHOST;
815 }
816
817 if (ntohs(eth->h_proto) >= 1536)
818 return eth->h_proto;
819
820 rawp = skb->data;
821
822 if (*(unsigned short *)rawp == 0xFFFF)
823 return htons(ETH_P_802_3);
824
825 return htons(ETH_P_802_2);
826}
827
828/* Parse an encapsulated IP1394 header into an ethernet frame packet.
829 * We also perform ARP translation here, if need be. */
830static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
831 nodeid_t srcid, nodeid_t destid,
832 __be16 ether_type)
833{
834 struct eth1394_priv *priv = netdev_priv(dev);
835 __be64 dest_hw;
836 __be16 ret = 0;
837
838 /* Setup our hw addresses. We use these to build the ethernet header. */
839 if (destid == (LOCAL_BUS | ALL_NODES))
840 dest_hw = ~cpu_to_be64(0); /* broadcast */
841 else
842 dest_hw = cpu_to_be64((u64)priv->host->csr.guid_hi << 32 |
843 priv->host->csr.guid_lo);
844
845 /* If this is an ARP packet, convert it. First, we want to make
846 * use of some of the fields, since they tell us a little bit
847 * about the sending machine. */
848 if (ether_type == htons(ETH_P_ARP)) {
849 struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
850 struct arphdr *arp = (struct arphdr *)skb->data;
851 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
852 u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
853 ntohl(arp1394->fifo_lo);
854 u8 max_rec = min(priv->host->csr.max_rec,
855 (u8)(arp1394->max_rec));
856 int sspd = arp1394->sspd;
857 u16 maxpayload;
858 struct eth1394_node_ref *node;
859 struct eth1394_node_info *node_info;
860 __be64 guid;
861
862 /* Sanity check. MacOSX seems to be sending us 131 in this
863 * field (atleast on my Panther G5). Not sure why. */
864 if (sspd > 5 || sspd < 0)
865 sspd = 0;
866
867 maxpayload = min(eth1394_speedto_maxpayload[sspd],
868 (u16)(1 << (max_rec + 1)));
869
870 guid = get_unaligned(&arp1394->s_uniq_id);
871 node = eth1394_find_node_guid(&priv->ip_node_list,
872 be64_to_cpu(guid));
873 if (!node)
874 return cpu_to_be16(0);
875
876 node_info = dev_get_drvdata(&node->ud->device);
877
878 /* Update our speed/payload/fifo_offset table */
879 node_info->maxpayload = maxpayload;
880 node_info->sspd = sspd;
881 node_info->fifo = fifo_addr;
882
883 /* Now that we're done with the 1394 specific stuff, we'll
884 * need to alter some of the data. Believe it or not, all
885 * that needs to be done is sender_IP_address needs to be
886 * moved, the destination hardware address get stuffed
887 * in and the hardware address length set to 8.
888 *
889 * IMPORTANT: The code below overwrites 1394 specific data
890 * needed above so keep the munging of the data for the
891 * higher level IP stack last. */
892
893 arp->ar_hln = 8;
894 arp_ptr += arp->ar_hln; /* skip over sender unique id */
895 *(u32 *)arp_ptr = arp1394->sip; /* move sender IP addr */
896 arp_ptr += arp->ar_pln; /* skip over sender IP addr */
897
898 if (arp->ar_op == htons(ARPOP_REQUEST))
899 memset(arp_ptr, 0, sizeof(u64));
900 else
901 memcpy(arp_ptr, dev->dev_addr, sizeof(u64));
902 }
903
904 /* Now add the ethernet header. */
905 if (dev_hard_header(skb, dev, ntohs(ether_type), &dest_hw, NULL,
906 skb->len) >= 0)
907 ret = ether1394_type_trans(skb, dev);
908
909 return ret;
910}
911
912static int fragment_overlap(struct list_head *frag_list, int offset, int len)
913{
914 struct fragment_info *fi;
915 int end = offset + len;
916
917 list_for_each_entry(fi, frag_list, list)
918 if (offset < fi->offset + fi->len && end > fi->offset)
919 return 1;
920
921 return 0;
922}
923
924static struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
925{
926 struct partial_datagram *pd;
927
928 list_for_each_entry(pd, pdgl, list)
929 if (pd->dgl == dgl)
930 return &pd->list;
931
932 return NULL;
933}
934
935/* Assumes that new fragment does not overlap any existing fragments */
936static int new_fragment(struct list_head *frag_info, int offset, int len)
937{
938 struct list_head *lh;
939 struct fragment_info *fi, *fi2, *new;
940
941 list_for_each(lh, frag_info) {
942 fi = list_entry(lh, struct fragment_info, list);
943 if (fi->offset + fi->len == offset) {
944 /* The new fragment can be tacked on to the end */
945 fi->len += len;
946 /* Did the new fragment plug a hole? */
947 fi2 = list_entry(lh->next, struct fragment_info, list);
948 if (fi->offset + fi->len == fi2->offset) {
949 /* glue fragments together */
950 fi->len += fi2->len;
951 list_del(lh->next);
952 kfree(fi2);
953 }
954 return 0;
955 } else if (offset + len == fi->offset) {
956 /* The new fragment can be tacked on to the beginning */
957 fi->offset = offset;
958 fi->len += len;
959 /* Did the new fragment plug a hole? */
960 fi2 = list_entry(lh->prev, struct fragment_info, list);
961 if (fi2->offset + fi2->len == fi->offset) {
962 /* glue fragments together */
963 fi2->len += fi->len;
964 list_del(lh);
965 kfree(fi);
966 }
967 return 0;
968 } else if (offset > fi->offset + fi->len) {
969 break;
970 } else if (offset + len < fi->offset) {
971 lh = lh->prev;
972 break;
973 }
974 }
975
976 new = kmalloc(sizeof(*new), GFP_ATOMIC);
977 if (!new)
978 return -ENOMEM;
979
980 new->offset = offset;
981 new->len = len;
982
983 list_add(&new->list, lh);
984 return 0;
985}
986
987static int new_partial_datagram(struct net_device *dev, struct list_head *pdgl,
988 int dgl, int dg_size, char *frag_buf,
989 int frag_off, int frag_len)
990{
991 struct partial_datagram *new;
992
993 new = kmalloc(sizeof(*new), GFP_ATOMIC);
994 if (!new)
995 return -ENOMEM;
996
997 INIT_LIST_HEAD(&new->frag_info);
998
999 if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) {
1000 kfree(new);
1001 return -ENOMEM;
1002 }
1003
1004 new->dgl = dgl;
1005 new->dg_size = dg_size;
1006
1007 new->skb = dev_alloc_skb(dg_size + dev->hard_header_len + 15);
1008 if (!new->skb) {
1009 struct fragment_info *fi = list_entry(new->frag_info.next,
1010 struct fragment_info,
1011 list);
1012 kfree(fi);
1013 kfree(new);
1014 return -ENOMEM;
1015 }
1016
1017 skb_reserve(new->skb, (dev->hard_header_len + 15) & ~15);
1018 new->pbuf = skb_put(new->skb, dg_size);
1019 memcpy(new->pbuf + frag_off, frag_buf, frag_len);
1020
1021 list_add(&new->list, pdgl);
1022 return 0;
1023}
1024
1025static int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
1026 char *frag_buf, int frag_off, int frag_len)
1027{
1028 struct partial_datagram *pd =
1029 list_entry(lh, struct partial_datagram, list);
1030
1031 if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0)
1032 return -ENOMEM;
1033
1034 memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
1035
1036 /* Move list entry to beginnig of list so that oldest partial
1037 * datagrams percolate to the end of the list */
1038 list_move(lh, pdgl);
1039 return 0;
1040}
1041
1042static int is_datagram_complete(struct list_head *lh, int dg_size)
1043{
1044 struct partial_datagram *pd;
1045 struct fragment_info *fi;
1046
1047 pd = list_entry(lh, struct partial_datagram, list);
1048 fi = list_entry(pd->frag_info.next, struct fragment_info, list);
1049
1050 return (fi->len == dg_size);
1051}
1052
1053/* Packet reception. We convert the IP1394 encapsulation header to an
1054 * ethernet header, and fill it with some of our other fields. This is
1055 * an incoming packet from the 1394 bus. */
1056static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1057 char *buf, int len)
1058{
1059 struct sk_buff *skb;
1060 unsigned long flags;
1061 struct eth1394_priv *priv = netdev_priv(dev);
1062 union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
1063 __be16 ether_type = cpu_to_be16(0); /* initialized to clear warning */
1064 int hdr_len;
1065 struct unit_directory *ud = priv->ud_list[NODEID_TO_NODE(srcid)];
1066 struct eth1394_node_info *node_info;
1067
1068 if (!ud) {
1069 struct eth1394_node_ref *node;
1070 node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid);
1071 if (unlikely(!node)) {
1072 HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
1073 "lookup failure: " NODE_BUS_FMT,
1074 NODE_BUS_ARGS(priv->host, srcid));
1075 dev->stats.rx_dropped++;
1076 return -1;
1077 }
1078 ud = node->ud;
1079
1080 priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
1081 }
1082
1083 node_info = dev_get_drvdata(&ud->device);
1084
1085 /* First, did we receive a fragmented or unfragmented datagram? */
1086 hdr->words.word1 = ntohs(hdr->words.word1);
1087
1088 hdr_len = hdr_type_len[hdr->common.lf];
1089
1090 if (hdr->common.lf == ETH1394_HDR_LF_UF) {
1091 /* An unfragmented datagram has been received by the ieee1394
1092 * bus. Build an skbuff around it so we can pass it to the
1093 * high level network layer. */
1094
1095 skb = dev_alloc_skb(len + dev->hard_header_len + 15);
1096 if (unlikely(!skb)) {
1097 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
1098 dev->stats.rx_dropped++;
1099 return -1;
1100 }
1101 skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
1102 memcpy(skb_put(skb, len - hdr_len), buf + hdr_len,
1103 len - hdr_len);
1104 ether_type = hdr->uf.ether_type;
1105 } else {
1106 /* A datagram fragment has been received, now the fun begins. */
1107
1108 struct list_head *pdgl, *lh;
1109 struct partial_datagram *pd;
1110 int fg_off;
1111 int fg_len = len - hdr_len;
1112 int dg_size;
1113 int dgl;
1114 int retval;
1115 struct pdg_list *pdg = &(node_info->pdg);
1116
1117 hdr->words.word3 = ntohs(hdr->words.word3);
1118 /* The 4th header word is reserved so no need to do ntohs() */
1119
1120 if (hdr->common.lf == ETH1394_HDR_LF_FF) {
1121 ether_type = hdr->ff.ether_type;
1122 dgl = hdr->ff.dgl;
1123 dg_size = hdr->ff.dg_size + 1;
1124 fg_off = 0;
1125 } else {
1126 hdr->words.word2 = ntohs(hdr->words.word2);
1127 dgl = hdr->sf.dgl;
1128 dg_size = hdr->sf.dg_size + 1;
1129 fg_off = hdr->sf.fg_off;
1130 }
1131 spin_lock_irqsave(&pdg->lock, flags);
1132
1133 pdgl = &(pdg->list);
1134 lh = find_partial_datagram(pdgl, dgl);
1135
1136 if (lh == NULL) {
1137 while (pdg->sz >= max_partial_datagrams) {
1138 /* remove the oldest */
1139 purge_partial_datagram(pdgl->prev);
1140 pdg->sz--;
1141 }
1142
1143 retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
1144 buf + hdr_len, fg_off,
1145 fg_len);
1146 if (retval < 0) {
1147 spin_unlock_irqrestore(&pdg->lock, flags);
1148 goto bad_proto;
1149 }
1150 pdg->sz++;
1151 lh = find_partial_datagram(pdgl, dgl);
1152 } else {
1153 pd = list_entry(lh, struct partial_datagram, list);
1154
1155 if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
1156 /* Overlapping fragments, obliterate old
1157 * datagram and start new one. */
1158 purge_partial_datagram(lh);
1159 retval = new_partial_datagram(dev, pdgl, dgl,
1160 dg_size,
1161 buf + hdr_len,
1162 fg_off, fg_len);
1163 if (retval < 0) {
1164 pdg->sz--;
1165 spin_unlock_irqrestore(&pdg->lock, flags);
1166 goto bad_proto;
1167 }
1168 } else {
1169 retval = update_partial_datagram(pdgl, lh,
1170 buf + hdr_len,
1171 fg_off, fg_len);
1172 if (retval < 0) {
1173 /* Couldn't save off fragment anyway
1174 * so might as well obliterate the
1175 * datagram now. */
1176 purge_partial_datagram(lh);
1177 pdg->sz--;
1178 spin_unlock_irqrestore(&pdg->lock, flags);
1179 goto bad_proto;
1180 }
1181 } /* fragment overlap */
1182 } /* new datagram or add to existing one */
1183
1184 pd = list_entry(lh, struct partial_datagram, list);
1185
1186 if (hdr->common.lf == ETH1394_HDR_LF_FF)
1187 pd->ether_type = ether_type;
1188
1189 if (is_datagram_complete(lh, dg_size)) {
1190 ether_type = pd->ether_type;
1191 pdg->sz--;
1192 skb = skb_get(pd->skb);
1193 purge_partial_datagram(lh);
1194 spin_unlock_irqrestore(&pdg->lock, flags);
1195 } else {
1196 /* Datagram is not complete, we're done for the
1197 * moment. */
1198 spin_unlock_irqrestore(&pdg->lock, flags);
1199 return 0;
1200 }
1201 } /* unframgented datagram or fragmented one */
1202
1203 /* Write metadata, and then pass to the receive level */
1204 skb->dev = dev;
1205 skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
1206
1207 /* Parse the encapsulation header. This actually does the job of
1208 * converting to an ethernet frame header, aswell as arp
1209 * conversion if needed. ARP conversion is easier in this
1210 * direction, since we are using ethernet as our backend. */
1211 skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid,
1212 ether_type);
1213
1214 spin_lock_irqsave(&priv->lock, flags);
1215
1216 if (!skb->protocol) {
1217 dev->stats.rx_errors++;
1218 dev->stats.rx_dropped++;
1219 dev_kfree_skb_any(skb);
1220 } else if (netif_rx(skb) == NET_RX_DROP) {
1221 dev->stats.rx_errors++;
1222 dev->stats.rx_dropped++;
1223 } else {
1224 dev->stats.rx_packets++;
1225 dev->stats.rx_bytes += skb->len;
1226 }
1227
1228 spin_unlock_irqrestore(&priv->lock, flags);
1229
1230bad_proto:
1231 if (netif_queue_stopped(dev))
1232 netif_wake_queue(dev);
1233
1234 return 0;
1235}
1236
1237static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
1238 quadlet_t *data, u64 addr, size_t len, u16 flags)
1239{
1240 struct eth1394_host_info *hi;
1241
1242 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
1243 if (unlikely(!hi)) {
1244 ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
1245 host->id);
1246 return RCODE_ADDRESS_ERROR;
1247 }
1248
1249 if (ether1394_data_handler(hi->dev, srcid, destid, (char*)data, len))
1250 return RCODE_ADDRESS_ERROR;
1251 else
1252 return RCODE_COMPLETE;
1253}
1254
1255static void ether1394_iso(struct hpsb_iso *iso)
1256{
1257 __be32 *data;
1258 char *buf;
1259 struct eth1394_host_info *hi;
1260 struct net_device *dev;
1261 unsigned int len;
1262 u32 specifier_id;
1263 u16 source_id;
1264 int i;
1265 int nready;
1266
1267 hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
1268 if (unlikely(!hi)) {
1269 ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
1270 iso->host->id);
1271 return;
1272 }
1273
1274 dev = hi->dev;
1275
1276 nready = hpsb_iso_n_ready(iso);
1277 for (i = 0; i < nready; i++) {
1278 struct hpsb_iso_packet_info *info =
1279 &iso->infos[(iso->first_packet + i) % iso->buf_packets];
1280 data = (__be32 *)(iso->data_buf.kvirt + info->offset);
1281
1282 /* skip over GASP header */
1283 buf = (char *)data + 8;
1284 len = info->len - 8;
1285
1286 specifier_id = (be32_to_cpu(data[0]) & 0xffff) << 8 |
1287 (be32_to_cpu(data[1]) & 0xff000000) >> 24;
1288 source_id = be32_to_cpu(data[0]) >> 16;
1289
1290 if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
1291 || specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
1292 /* This packet is not for us */
1293 continue;
1294 }
1295 ether1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES,
1296 buf, len);
1297 }
1298
1299 hpsb_iso_recv_release_packets(iso, i);
1300
1301}
1302
1303/******************************************
1304 * Datagram transmission code
1305 ******************************************/
1306
1307/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire
1308 * arphdr) is the same format as the ip1394 header, so they overlap. The rest
1309 * needs to be munged a bit. The remainder of the arphdr is formatted based
1310 * on hwaddr len and ipaddr len. We know what they'll be, so it's easy to
1311 * judge.
1312 *
1313 * Now that the EUI is used for the hardware address all we need to do to make
1314 * this work for 1394 is to insert 2 quadlets that contain max_rec size,
1315 * speed, and unicast FIFO address information between the sender_unique_id
1316 * and the IP addresses.
1317 */
1318static void ether1394_arp_to_1394arp(struct sk_buff *skb,
1319 struct net_device *dev)
1320{
1321 struct eth1394_priv *priv = netdev_priv(dev);
1322 struct arphdr *arp = (struct arphdr *)skb->data;
1323 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
1324 struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
1325
1326 arp1394->hw_addr_len = 16;
1327 arp1394->sip = *(u32*)(arp_ptr + ETH1394_ALEN);
1328 arp1394->max_rec = priv->host->csr.max_rec;
1329 arp1394->sspd = priv->host->csr.lnk_spd;
1330 arp1394->fifo_hi = htons(priv->local_fifo >> 32);
1331 arp1394->fifo_lo = htonl(priv->local_fifo & ~0x0);
1332}
1333
1334/* We need to encapsulate the standard header with our own. We use the
1335 * ethernet header's proto for our own. */
1336static unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
1337 __be16 proto,
1338 union eth1394_hdr *hdr,
1339 u16 dg_size, u16 dgl)
1340{
1341 unsigned int adj_max_payload =
1342 max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
1343
1344 /* Does it all fit in one packet? */
1345 if (dg_size <= adj_max_payload) {
1346 hdr->uf.lf = ETH1394_HDR_LF_UF;
1347 hdr->uf.ether_type = proto;
1348 } else {
1349 hdr->ff.lf = ETH1394_HDR_LF_FF;
1350 hdr->ff.ether_type = proto;
1351 hdr->ff.dg_size = dg_size - 1;
1352 hdr->ff.dgl = dgl;
1353 adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
1354 }
1355 return DIV_ROUND_UP(dg_size, adj_max_payload);
1356}
1357
1358static unsigned int ether1394_encapsulate(struct sk_buff *skb,
1359 unsigned int max_payload,
1360 union eth1394_hdr *hdr)
1361{
1362 union eth1394_hdr *bufhdr;
1363 int ftype = hdr->common.lf;
1364 int hdrsz = hdr_type_len[ftype];
1365 unsigned int adj_max_payload = max_payload - hdrsz;
1366
1367 switch (ftype) {
1368 case ETH1394_HDR_LF_UF:
1369 bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
1370 bufhdr->words.word1 = htons(hdr->words.word1);
1371 bufhdr->words.word2 = hdr->words.word2;
1372 break;
1373
1374 case ETH1394_HDR_LF_FF:
1375 bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
1376 bufhdr->words.word1 = htons(hdr->words.word1);
1377 bufhdr->words.word2 = hdr->words.word2;
1378 bufhdr->words.word3 = htons(hdr->words.word3);
1379 bufhdr->words.word4 = 0;
1380
1381 /* Set frag type here for future interior fragments */
1382 hdr->common.lf = ETH1394_HDR_LF_IF;
1383 hdr->sf.fg_off = 0;
1384 break;
1385
1386 default:
1387 hdr->sf.fg_off += adj_max_payload;
1388 bufhdr = (union eth1394_hdr *)skb_pull(skb, adj_max_payload);
1389 if (max_payload >= skb->len)
1390 hdr->common.lf = ETH1394_HDR_LF_LF;
1391 bufhdr->words.word1 = htons(hdr->words.word1);
1392 bufhdr->words.word2 = htons(hdr->words.word2);
1393 bufhdr->words.word3 = htons(hdr->words.word3);
1394 bufhdr->words.word4 = 0;
1395 }
1396 return min(max_payload, skb->len);
1397}
1398
1399static struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
1400{
1401 struct hpsb_packet *p;
1402
1403 p = hpsb_alloc_packet(0);
1404 if (p) {
1405 p->host = host;
1406 p->generation = get_hpsb_generation(host);
1407 p->type = hpsb_async;
1408 }
1409 return p;
1410}
1411
1412static int ether1394_prep_write_packet(struct hpsb_packet *p,
1413 struct hpsb_host *host, nodeid_t node,
1414 u64 addr, void *data, int tx_len)
1415{
1416 p->node_id = node;
1417
1418 if (hpsb_get_tlabel(p))
1419 return -EAGAIN;
1420
1421 p->tcode = TCODE_WRITEB;
1422 p->header_size = 16;
1423 p->expect_response = 1;
1424 p->header[0] =
1425 p->node_id << 16 | p->tlabel << 10 | 1 << 8 | TCODE_WRITEB << 4;
1426 p->header[1] = host->node_id << 16 | addr >> 32;
1427 p->header[2] = addr & 0xffffffff;
1428 p->header[3] = tx_len << 16;
1429 p->data_size = (tx_len + 3) & ~3;
1430 p->data = data;
1431
1432 return 0;
1433}
1434
1435static void ether1394_prep_gasp_packet(struct hpsb_packet *p,
1436 struct eth1394_priv *priv,
1437 struct sk_buff *skb, int length)
1438{
1439 p->header_size = 4;
1440 p->tcode = TCODE_STREAM_DATA;
1441
1442 p->header[0] = length << 16 | 3 << 14 | priv->broadcast_channel << 8 |
1443 TCODE_STREAM_DATA << 4;
1444 p->data_size = length;
1445 p->data = (quadlet_t *)skb->data - 2;
1446 p->data[0] = cpu_to_be32(priv->host->node_id << 16 |
1447 ETHER1394_GASP_SPECIFIER_ID_HI);
1448 p->data[1] = cpu_to_be32(ETHER1394_GASP_SPECIFIER_ID_LO << 24 |
1449 ETHER1394_GASP_VERSION);
1450
1451 p->speed_code = priv->bc_sspd;
1452
1453 /* prevent hpsb_send_packet() from overriding our speed code */
1454 p->node_id = LOCAL_BUS | ALL_NODES;
1455}
1456
1457static void ether1394_free_packet(struct hpsb_packet *packet)
1458{
1459 if (packet->tcode != TCODE_STREAM_DATA)
1460 hpsb_free_tlabel(packet);
1461 hpsb_free_packet(packet);
1462}
1463
1464static void ether1394_complete_cb(void *__ptask);
1465
1466static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
1467{
1468 struct eth1394_priv *priv = ptask->priv;
1469 struct hpsb_packet *packet = NULL;
1470
1471 packet = ether1394_alloc_common_packet(priv->host);
1472 if (!packet)
1473 return -ENOMEM;
1474
1475 if (ptask->tx_type == ETH1394_GASP) {
1476 int length = tx_len + 2 * sizeof(quadlet_t);
1477
1478 ether1394_prep_gasp_packet(packet, priv, ptask->skb, length);
1479 } else if (ether1394_prep_write_packet(packet, priv->host,
1480 ptask->dest_node,
1481 ptask->addr, ptask->skb->data,
1482 tx_len)) {
1483 hpsb_free_packet(packet);
1484 return -EAGAIN;
1485 }
1486
1487 ptask->packet = packet;
1488 hpsb_set_packet_complete_task(ptask->packet, ether1394_complete_cb,
1489 ptask);
1490
1491 if (hpsb_send_packet(packet) < 0) {
1492 ether1394_free_packet(packet);
1493 return -EIO;
1494 }
1495
1496 return 0;
1497}
1498
1499/* Task function to be run when a datagram transmission is completed */
1500static void ether1394_dg_complete(struct packet_task *ptask, int fail)
1501{
1502 struct sk_buff *skb = ptask->skb;
1503 struct net_device *dev = skb->dev;
1504 struct eth1394_priv *priv = netdev_priv(dev);
1505 unsigned long flags;
1506
1507 /* Statistics */
1508 spin_lock_irqsave(&priv->lock, flags);
1509 if (fail) {
1510 dev->stats.tx_dropped++;
1511 dev->stats.tx_errors++;
1512 } else {
1513 dev->stats.tx_bytes += skb->len;
1514 dev->stats.tx_packets++;
1515 }
1516 spin_unlock_irqrestore(&priv->lock, flags);
1517
1518 dev_kfree_skb_any(skb);
1519 kmem_cache_free(packet_task_cache, ptask);
1520}
1521
1522/* Callback for when a packet has been sent and the status of that packet is
1523 * known */
1524static void ether1394_complete_cb(void *__ptask)
1525{
1526 struct packet_task *ptask = (struct packet_task *)__ptask;
1527 struct hpsb_packet *packet = ptask->packet;
1528 int fail = 0;
1529
1530 if (packet->tcode != TCODE_STREAM_DATA)
1531 fail = hpsb_packet_success(packet);
1532
1533 ether1394_free_packet(packet);
1534
1535 ptask->outstanding_pkts--;
1536 if (ptask->outstanding_pkts > 0 && !fail) {
1537 int tx_len, err;
1538
1539 /* Add the encapsulation header to the fragment */
1540 tx_len = ether1394_encapsulate(ptask->skb, ptask->max_payload,
1541 &ptask->hdr);
1542 err = ether1394_send_packet(ptask, tx_len);
1543 if (err) {
1544 if (err == -EAGAIN)
1545 ETH1394_PRINT_G(KERN_ERR, "Out of tlabels\n");
1546
1547 ether1394_dg_complete(ptask, 1);
1548 }
1549 } else {
1550 ether1394_dg_complete(ptask, fail);
1551 }
1552}
1553
1554/* Transmit a packet (called by kernel) */
1555static netdev_tx_t ether1394_tx(struct sk_buff *skb,
1556 struct net_device *dev)
1557{
1558 struct eth1394hdr hdr_buf;
1559 struct eth1394_priv *priv = netdev_priv(dev);
1560 __be16 proto;
1561 unsigned long flags;
1562 nodeid_t dest_node;
1563 eth1394_tx_type tx_type;
1564 unsigned int tx_len;
1565 unsigned int max_payload;
1566 u16 dg_size;
1567 u16 dgl;
1568 struct packet_task *ptask;
1569 struct eth1394_node_ref *node;
1570 struct eth1394_node_info *node_info = NULL;
1571
1572 ptask = kmem_cache_alloc(packet_task_cache, GFP_ATOMIC);
1573 if (ptask == NULL)
1574 goto fail;
1575
1576 /* XXX Ignore this for now. Noticed that when MacOSX is the IRM,
1577 * it does not set our validity bit. We need to compensate for
1578 * that somewhere else, but not in eth1394. */
1579#if 0
1580 if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000)
1581 goto fail;
1582#endif
1583
1584 skb = skb_share_check(skb, GFP_ATOMIC);
1585 if (!skb)
1586 goto fail;
1587
1588 /* Get rid of the fake eth1394 header, but first make a copy.
1589 * We might need to rebuild the header on tx failure. */
1590 memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
1591 skb_pull(skb, ETH1394_HLEN);
1592
1593 proto = hdr_buf.h_proto;
1594 dg_size = skb->len;
1595
1596 /* Set the transmission type for the packet. ARP packets and IP
1597 * broadcast packets are sent via GASP. */
1598 if (memcmp(hdr_buf.h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
1599 proto == htons(ETH_P_ARP) ||
1600 (proto == htons(ETH_P_IP) &&
1601 IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
1602 tx_type = ETH1394_GASP;
1603 dest_node = LOCAL_BUS | ALL_NODES;
1604 max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
1605 BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
1606 dgl = priv->bc_dgl;
1607 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
1608 priv->bc_dgl++;
1609 } else {
1610 __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
1611
1612 node = eth1394_find_node_guid(&priv->ip_node_list,
1613 be64_to_cpu(guid));
1614 if (!node)
1615 goto fail;
1616
1617 node_info = dev_get_drvdata(&node->ud->device);
1618 if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
1619 goto fail;
1620
1621 dest_node = node->ud->ne->nodeid;
1622 max_payload = node_info->maxpayload;
1623 BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
1624
1625 dgl = node_info->dgl;
1626 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
1627 node_info->dgl++;
1628 tx_type = ETH1394_WRREQ;
1629 }
1630
1631 /* If this is an ARP packet, convert it */
1632 if (proto == htons(ETH_P_ARP))
1633 ether1394_arp_to_1394arp(skb, dev);
1634
1635 ptask->hdr.words.word1 = 0;
1636 ptask->hdr.words.word2 = 0;
1637 ptask->hdr.words.word3 = 0;
1638 ptask->hdr.words.word4 = 0;
1639 ptask->skb = skb;
1640 ptask->priv = priv;
1641 ptask->tx_type = tx_type;
1642
1643 if (tx_type != ETH1394_GASP) {
1644 u64 addr;
1645
1646 spin_lock_irqsave(&priv->lock, flags);
1647 addr = node_info->fifo;
1648 spin_unlock_irqrestore(&priv->lock, flags);
1649
1650 ptask->addr = addr;
1651 ptask->dest_node = dest_node;
1652 }
1653
1654 ptask->tx_type = tx_type;
1655 ptask->max_payload = max_payload;
1656 ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload,
1657 proto, &ptask->hdr, dg_size, dgl);
1658
1659 /* Add the encapsulation header to the fragment */
1660 tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
1661 dev->trans_start = jiffies;
1662 if (ether1394_send_packet(ptask, tx_len)) {
1663 if (dest_node == (LOCAL_BUS | ALL_NODES))
1664 goto fail;
1665
1666 /* At this point we want to restore the packet. When we return
1667 * here with NETDEV_TX_BUSY we will get another entrance in this
1668 * routine with the same skb and we need it to look the same.
1669 * So we pull 4 more bytes, then build the header again. */
1670 skb_pull(skb, 4);
1671 ether1394_header(skb, dev, ntohs(hdr_buf.h_proto),
1672 hdr_buf.h_dest, NULL, 0);
1673
1674 /* Most failures of ether1394_send_packet are recoverable. */
1675 netif_stop_queue(dev);
1676 priv->wake_node = dest_node;
1677 schedule_work(&priv->wake);
1678 kmem_cache_free(packet_task_cache, ptask);
1679 return NETDEV_TX_BUSY;
1680 }
1681
1682 return NETDEV_TX_OK;
1683fail:
1684 if (ptask)
1685 kmem_cache_free(packet_task_cache, ptask);
1686
1687 if (skb != NULL)
1688 dev_kfree_skb(skb);
1689
1690 spin_lock_irqsave(&priv->lock, flags);
1691 dev->stats.tx_dropped++;
1692 dev->stats.tx_errors++;
1693 spin_unlock_irqrestore(&priv->lock, flags);
1694
1695 return NETDEV_TX_OK;
1696}
1697
1698static void ether1394_get_drvinfo(struct net_device *dev,
1699 struct ethtool_drvinfo *info)
1700{
1701 strcpy(info->driver, driver_name);
1702 strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */
1703}
1704
1705static const struct ethtool_ops ethtool_ops = {
1706 .get_drvinfo = ether1394_get_drvinfo
1707};
1708
1709static int __init ether1394_init_module(void)
1710{
1711 int err;
1712
1713 packet_task_cache = kmem_cache_create("packet_task",
1714 sizeof(struct packet_task),
1715 0, 0, NULL);
1716 if (!packet_task_cache)
1717 return -ENOMEM;
1718
1719 hpsb_register_highlevel(&eth1394_highlevel);
1720 err = hpsb_register_protocol(&eth1394_proto_driver);
1721 if (err) {
1722 hpsb_unregister_highlevel(&eth1394_highlevel);
1723 kmem_cache_destroy(packet_task_cache);
1724 }
1725 return err;
1726}
1727
1728static void __exit ether1394_exit_module(void)
1729{
1730 hpsb_unregister_protocol(&eth1394_proto_driver);
1731 hpsb_unregister_highlevel(&eth1394_highlevel);
1732 kmem_cache_destroy(packet_task_cache);
1733}
1734
1735module_init(ether1394_init_module);
1736module_exit(ether1394_exit_module);
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
deleted file mode 100644
index d53bac47b86f..000000000000
--- a/drivers/ieee1394/eth1394.h
+++ /dev/null
@@ -1,234 +0,0 @@
1/*
2 * eth1394.h -- Ethernet driver for Linux IEEE-1394 Subsystem
3 *
4 * Copyright (C) 2000 Bonin Franck <boninf@free.fr>
5 * (C) 2001 Ben Collins <bcollins@debian.org>
6 *
7 * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef __ETH1394_H
25#define __ETH1394_H
26
27#include <linux/netdevice.h>
28#include <linux/skbuff.h>
29#include <asm/byteorder.h>
30
31#include "ieee1394.h"
32#include "ieee1394_types.h"
33
34/* Register for incoming packets. This is 4096 bytes, which supports up to
35 * S3200 (per Table 16-3 of IEEE 1394b-2002). */
36#define ETHER1394_REGION_ADDR_LEN 4096
37
38/* GASP identifier numbers for IPv4 over IEEE 1394 */
39#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
40#define ETHER1394_GASP_SPECIFIER_ID_HI ((0x00005E >> 8) & 0xffff)
41#define ETHER1394_GASP_SPECIFIER_ID_LO (0x00005E & 0xff)
42#define ETHER1394_GASP_VERSION 1
43
44#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* for GASP header */
45
46#define ETHER1394_GASP_BUFFERS 16
47
48#define NODE_SET (ALL_NODES + 1) /* Node set == 64 */
49
50enum eth1394_bc_states { ETHER1394_BC_ERROR,
51 ETHER1394_BC_RUNNING,
52 ETHER1394_BC_STOPPED };
53
54
55/* Private structure for our ethernet driver */
56struct eth1394_priv {
57 struct hpsb_host *host; /* The card for this dev */
58 u16 bc_maxpayload; /* Max broadcast payload */
59 u8 bc_sspd; /* Max broadcast speed */
60 u64 local_fifo; /* Local FIFO Address */
61 spinlock_t lock; /* Private lock */
62 int broadcast_channel; /* Async stream Broadcast Channel */
63 enum eth1394_bc_states bc_state; /* broadcast channel state */
64 struct hpsb_iso *iso; /* Async stream recv handle */
65 int bc_dgl; /* Outgoing broadcast datagram label */
66 struct list_head ip_node_list; /* List of IP capable nodes */
67 struct unit_directory *ud_list[ALL_NODES]; /* Cached unit dir list */
68
69 struct work_struct wake; /* Wake up after xmit failure */
70 struct net_device *wake_dev; /* Stupid backlink for .wake */
71 nodeid_t wake_node; /* Destination of failed xmit */
72};
73
74
75/* Define a fake hardware header format for the networking core. Note that
76 * header size cannot exceed 16 bytes as that is the size of the header cache.
77 * Also, we do not need the source address in the header so we omit it and
78 * keep the header to under 16 bytes */
79#define ETH1394_ALEN (8)
80#define ETH1394_HLEN (10)
81
82struct eth1394hdr {
83 unsigned char h_dest[ETH1394_ALEN]; /* destination eth1394 addr */
84 __be16 h_proto; /* packet type ID field */
85} __attribute__((packed));
86
87static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
88{
89 return (struct eth1394hdr *)skb_mac_header(skb);
90}
91
92typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
93
94/* IP1394 headers */
95
96/* Unfragmented */
97#if defined __BIG_ENDIAN_BITFIELD
98struct eth1394_uf_hdr {
99 u16 lf:2;
100 u16 res:14;
101 __be16 ether_type; /* Ethernet packet type */
102} __attribute__((packed));
103#elif defined __LITTLE_ENDIAN_BITFIELD
104struct eth1394_uf_hdr {
105 u16 res:14;
106 u16 lf:2;
107 __be16 ether_type;
108} __attribute__((packed));
109#else
110#error Unknown bit field type
111#endif
112
113/* First fragment */
114#if defined __BIG_ENDIAN_BITFIELD
115struct eth1394_ff_hdr {
116 u16 lf:2;
117 u16 res1:2;
118 u16 dg_size:12; /* Datagram size */
119 __be16 ether_type; /* Ethernet packet type */
120 u16 dgl; /* Datagram label */
121 u16 res2;
122} __attribute__((packed));
123#elif defined __LITTLE_ENDIAN_BITFIELD
124struct eth1394_ff_hdr {
125 u16 dg_size:12;
126 u16 res1:2;
127 u16 lf:2;
128 __be16 ether_type;
129 u16 dgl;
130 u16 res2;
131} __attribute__((packed));
132#else
133#error Unknown bit field type
134#endif
135
136/* XXX: Subsequent fragments, including last */
137#if defined __BIG_ENDIAN_BITFIELD
138struct eth1394_sf_hdr {
139 u16 lf:2;
140 u16 res1:2;
141 u16 dg_size:12; /* Datagram size */
142 u16 res2:4;
143 u16 fg_off:12; /* Fragment offset */
144 u16 dgl; /* Datagram label */
145 u16 res3;
146} __attribute__((packed));
147#elif defined __LITTLE_ENDIAN_BITFIELD
148struct eth1394_sf_hdr {
149 u16 dg_size:12;
150 u16 res1:2;
151 u16 lf:2;
152 u16 fg_off:12;
153 u16 res2:4;
154 u16 dgl;
155 u16 res3;
156} __attribute__((packed));
157#else
158#error Unknown bit field type
159#endif
160
161#if defined __BIG_ENDIAN_BITFIELD
162struct eth1394_common_hdr {
163 u16 lf:2;
164 u16 pad1:14;
165} __attribute__((packed));
166#elif defined __LITTLE_ENDIAN_BITFIELD
167struct eth1394_common_hdr {
168 u16 pad1:14;
169 u16 lf:2;
170} __attribute__((packed));
171#else
172#error Unknown bit field type
173#endif
174
175struct eth1394_hdr_words {
176 u16 word1;
177 u16 word2;
178 u16 word3;
179 u16 word4;
180};
181
182union eth1394_hdr {
183 struct eth1394_common_hdr common;
184 struct eth1394_uf_hdr uf;
185 struct eth1394_ff_hdr ff;
186 struct eth1394_sf_hdr sf;
187 struct eth1394_hdr_words words;
188};
189
190/* End of IP1394 headers */
191
192/* Fragment types */
193#define ETH1394_HDR_LF_UF 0 /* unfragmented */
194#define ETH1394_HDR_LF_FF 1 /* first fragment */
195#define ETH1394_HDR_LF_LF 2 /* last fragment */
196#define ETH1394_HDR_LF_IF 3 /* interior fragment */
197
198#define IP1394_HW_ADDR_LEN 16 /* As per RFC */
199
200/* Our arp packet (ARPHRD_IEEE1394) */
201struct eth1394_arp {
202 u16 hw_type; /* 0x0018 */
203 u16 proto_type; /* 0x0806 */
204 u8 hw_addr_len; /* 16 */
205 u8 ip_addr_len; /* 4 */
206 u16 opcode; /* ARP Opcode */
207 /* Above is exactly the same format as struct arphdr */
208
209 __be64 s_uniq_id; /* Sender's 64bit EUI */
210 u8 max_rec; /* Sender's max packet size */
211 u8 sspd; /* Sender's max speed */
212 __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
213 __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
214 u32 sip; /* Sender's IP Address */
215 u32 tip; /* IP Address of requested hw addr */
216};
217
218/* Network timeout */
219#define ETHER1394_TIMEOUT 100000
220
221/* This is our task struct. It's used for the packet complete callback. */
222struct packet_task {
223 struct sk_buff *skb;
224 int outstanding_pkts;
225 eth1394_tx_type tx_type;
226 int max_payload;
227 struct hpsb_packet *packet;
228 struct eth1394_priv *priv;
229 union eth1394_hdr hdr;
230 u64 addr;
231 u16 dest_node;
232};
233
234#endif /* __ETH1394_H */
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
deleted file mode 100644
index 4bc443546e04..000000000000
--- a/drivers/ieee1394/highlevel.c
+++ /dev/null
@@ -1,691 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Copyright (C) 1999 Andreas E. Bombe
5 *
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 *
9 *
10 * Contributions:
11 *
12 * Christian Toegel <christian.toegel@gmx.at>
13 * unregister address space
14 *
15 * Manfred Weihs <weihs@ict.tuwien.ac.at>
16 * unregister address space
17 *
18 */
19
20#include <linux/slab.h>
21#include <linux/list.h>
22#include <linux/bitops.h>
23
24#include "ieee1394.h"
25#include "ieee1394_types.h"
26#include "hosts.h"
27#include "ieee1394_core.h"
28#include "highlevel.h"
29#include "nodemgr.h"
30
31
32struct hl_host_info {
33 struct list_head list;
34 struct hpsb_host *host;
35 size_t size;
36 unsigned long key;
37 void *data;
38};
39
40
41static LIST_HEAD(hl_drivers);
42static DECLARE_RWSEM(hl_drivers_sem);
43
44static LIST_HEAD(hl_irqs);
45static DEFINE_RWLOCK(hl_irqs_lock);
46
47static DEFINE_RWLOCK(addr_space_lock);
48
49
50static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
51 struct hpsb_host *host)
52{
53 struct hl_host_info *hi = NULL;
54
55 if (!hl || !host)
56 return NULL;
57
58 read_lock(&hl->host_info_lock);
59 list_for_each_entry(hi, &hl->host_info_list, list) {
60 if (hi->host == host) {
61 read_unlock(&hl->host_info_lock);
62 return hi;
63 }
64 }
65 read_unlock(&hl->host_info_lock);
66 return NULL;
67}
68
69/**
70 * hpsb_get_hostinfo - retrieve a hostinfo pointer bound to this driver/host
71 *
72 * Returns a per @host and @hl driver data structure that was previously stored
73 * by hpsb_create_hostinfo.
74 */
75void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
76{
77 struct hl_host_info *hi = hl_get_hostinfo(hl, host);
78
79 return hi ? hi->data : NULL;
80}
81
82/**
83 * hpsb_create_hostinfo - allocate a hostinfo pointer bound to this driver/host
84 *
85 * Allocate a hostinfo pointer backed by memory with @data_size and bind it to
86 * to this @hl driver and @host. If @data_size is zero, then the return here is
87 * only valid for error checking.
88 */
89void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
90 size_t data_size)
91{
92 struct hl_host_info *hi;
93 void *data;
94 unsigned long flags;
95
96 hi = hl_get_hostinfo(hl, host);
97 if (hi) {
98 HPSB_ERR("%s called hpsb_create_hostinfo when hostinfo already"
99 " exists", hl->name);
100 return NULL;
101 }
102
103 hi = kzalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
104 if (!hi)
105 return NULL;
106
107 if (data_size) {
108 data = hi->data = hi + 1;
109 hi->size = data_size;
110 } else
111 data = hi;
112
113 hi->host = host;
114
115 write_lock_irqsave(&hl->host_info_lock, flags);
116 list_add_tail(&hi->list, &hl->host_info_list);
117 write_unlock_irqrestore(&hl->host_info_lock, flags);
118
119 return data;
120}
121
122/**
123 * hpsb_set_hostinfo - set the hostinfo pointer to something useful
124 *
125 * Usually follows a call to hpsb_create_hostinfo, where the size is 0.
126 */
127int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
128 void *data)
129{
130 struct hl_host_info *hi;
131
132 hi = hl_get_hostinfo(hl, host);
133 if (hi) {
134 if (!hi->size && !hi->data) {
135 hi->data = data;
136 return 0;
137 } else
138 HPSB_ERR("%s called hpsb_set_hostinfo when hostinfo "
139 "already has data", hl->name);
140 } else
141 HPSB_ERR("%s called hpsb_set_hostinfo when no hostinfo exists",
142 hl->name);
143 return -EINVAL;
144}
145
146/**
147 * hpsb_destroy_hostinfo - free and remove a hostinfo pointer
148 *
149 * Free and remove the hostinfo pointer bound to this @hl driver and @host.
150 */
151void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
152{
153 struct hl_host_info *hi;
154
155 hi = hl_get_hostinfo(hl, host);
156 if (hi) {
157 unsigned long flags;
158 write_lock_irqsave(&hl->host_info_lock, flags);
159 list_del(&hi->list);
160 write_unlock_irqrestore(&hl->host_info_lock, flags);
161 kfree(hi);
162 }
163 return;
164}
165
166/**
167 * hpsb_set_hostinfo_key - set an alternate lookup key for an hostinfo
168 *
169 * Sets an alternate lookup key for the hostinfo bound to this @hl driver and
170 * @host.
171 */
172void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
173 unsigned long key)
174{
175 struct hl_host_info *hi;
176
177 hi = hl_get_hostinfo(hl, host);
178 if (hi)
179 hi->key = key;
180 return;
181}
182
183/**
184 * hpsb_get_hostinfo_bykey - retrieve a hostinfo pointer by its alternate key
185 */
186void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
187{
188 struct hl_host_info *hi;
189 void *data = NULL;
190
191 if (!hl)
192 return NULL;
193
194 read_lock(&hl->host_info_lock);
195 list_for_each_entry(hi, &hl->host_info_list, list) {
196 if (hi->key == key) {
197 data = hi->data;
198 break;
199 }
200 }
201 read_unlock(&hl->host_info_lock);
202 return data;
203}
204
205static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
206{
207 struct hpsb_highlevel *hl = __data;
208
209 hl->add_host(host);
210
211 if (host->update_config_rom && hpsb_update_config_rom_image(host) < 0)
212 HPSB_ERR("Failed to generate Configuration ROM image for host "
213 "%s-%d", hl->name, host->id);
214 return 0;
215}
216
217/**
218 * hpsb_register_highlevel - register highlevel driver
219 *
220 * The name pointer in @hl has to stay valid at all times because the string is
221 * not copied.
222 */
223void hpsb_register_highlevel(struct hpsb_highlevel *hl)
224{
225 unsigned long flags;
226
227 hpsb_init_highlevel(hl);
228 INIT_LIST_HEAD(&hl->addr_list);
229
230 down_write(&hl_drivers_sem);
231 list_add_tail(&hl->hl_list, &hl_drivers);
232 up_write(&hl_drivers_sem);
233
234 write_lock_irqsave(&hl_irqs_lock, flags);
235 list_add_tail(&hl->irq_list, &hl_irqs);
236 write_unlock_irqrestore(&hl_irqs_lock, flags);
237
238 if (hl->add_host)
239 nodemgr_for_each_host(hl, highlevel_for_each_host_reg);
240 return;
241}
242
243static void __delete_addr(struct hpsb_address_serve *as)
244{
245 list_del(&as->host_list);
246 list_del(&as->hl_list);
247 kfree(as);
248}
249
250static void __unregister_host(struct hpsb_highlevel *hl, struct hpsb_host *host,
251 int update_cr)
252{
253 unsigned long flags;
254 struct list_head *lh, *next;
255 struct hpsb_address_serve *as;
256
257 /* First, let the highlevel driver unreg */
258 if (hl->remove_host)
259 hl->remove_host(host);
260
261 /* Remove any addresses that are matched for this highlevel driver
262 * and this particular host. */
263 write_lock_irqsave(&addr_space_lock, flags);
264 list_for_each_safe (lh, next, &hl->addr_list) {
265 as = list_entry(lh, struct hpsb_address_serve, hl_list);
266 if (as->host == host)
267 __delete_addr(as);
268 }
269 write_unlock_irqrestore(&addr_space_lock, flags);
270
271 /* Now update the config-rom to reflect anything removed by the
272 * highlevel driver. */
273 if (update_cr && host->update_config_rom &&
274 hpsb_update_config_rom_image(host) < 0)
275 HPSB_ERR("Failed to generate Configuration ROM image for host "
276 "%s-%d", hl->name, host->id);
277
278 /* Finally remove all the host info associated between these two. */
279 hpsb_destroy_hostinfo(hl, host);
280}
281
282static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
283{
284 struct hpsb_highlevel *hl = __data;
285
286 __unregister_host(hl, host, 1);
287 return 0;
288}
289
290/**
291 * hpsb_unregister_highlevel - unregister highlevel driver
292 */
293void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
294{
295 unsigned long flags;
296
297 write_lock_irqsave(&hl_irqs_lock, flags);
298 list_del(&hl->irq_list);
299 write_unlock_irqrestore(&hl_irqs_lock, flags);
300
301 down_write(&hl_drivers_sem);
302 list_del(&hl->hl_list);
303 up_write(&hl_drivers_sem);
304
305 nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
306}
307
308/**
309 * hpsb_allocate_and_register_addrspace - alloc' and reg' a host address space
310 *
311 * @start and @end are 48 bit pointers and have to be quadlet aligned.
312 * @end points to the first address behind the handled addresses. This
313 * function can be called multiple times for a single hpsb_highlevel @hl to
314 * implement sparse register sets. The requested region must not overlap any
315 * previously allocated region, otherwise registering will fail.
316 *
317 * It returns true for successful allocation. Address spaces can be
318 * unregistered with hpsb_unregister_addrspace. All remaining address spaces
319 * are automatically deallocated together with the hpsb_highlevel @hl.
320 */
321u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
322 struct hpsb_host *host,
323 const struct hpsb_address_ops *ops,
324 u64 size, u64 alignment,
325 u64 start, u64 end)
326{
327 struct hpsb_address_serve *as, *a1, *a2;
328 struct list_head *entry;
329 u64 retval = CSR1212_INVALID_ADDR_SPACE;
330 unsigned long flags;
331 u64 align_mask = ~(alignment - 1);
332
333 if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
334 (hweight64(alignment) != 1)) {
335 HPSB_ERR("%s called with invalid alignment: 0x%048llx",
336 __func__, (unsigned long long)alignment);
337 return retval;
338 }
339
340 /* default range,
341 * avoids controller's posted write area (see OHCI 1.1 clause 1.5) */
342 if (start == CSR1212_INVALID_ADDR_SPACE &&
343 end == CSR1212_INVALID_ADDR_SPACE) {
344 start = host->middle_addr_space;
345 end = CSR1212_ALL_SPACE_END;
346 }
347
348 if (((start|end) & ~align_mask) || (start >= end) ||
349 (end > CSR1212_ALL_SPACE_END)) {
350 HPSB_ERR("%s called with invalid addresses "
351 "(start = %012Lx end = %012Lx)", __func__,
352 (unsigned long long)start,(unsigned long long)end);
353 return retval;
354 }
355
356 as = kmalloc(sizeof(*as), GFP_KERNEL);
357 if (!as)
358 return retval;
359
360 INIT_LIST_HEAD(&as->host_list);
361 INIT_LIST_HEAD(&as->hl_list);
362 as->op = ops;
363 as->host = host;
364
365 write_lock_irqsave(&addr_space_lock, flags);
366 list_for_each(entry, &host->addr_space) {
367 u64 a1sa, a1ea;
368 u64 a2sa, a2ea;
369
370 a1 = list_entry(entry, struct hpsb_address_serve, host_list);
371 a2 = list_entry(entry->next, struct hpsb_address_serve,
372 host_list);
373
374 a1sa = a1->start & align_mask;
375 a1ea = (a1->end + alignment -1) & align_mask;
376 a2sa = a2->start & align_mask;
377 a2ea = (a2->end + alignment -1) & align_mask;
378
379 if ((a2sa - a1ea >= size) && (a2sa - start >= size) &&
380 (a2sa > start)) {
381 as->start = max(start, a1ea);
382 as->end = as->start + size;
383 list_add(&as->host_list, entry);
384 list_add_tail(&as->hl_list, &hl->addr_list);
385 retval = as->start;
386 break;
387 }
388 }
389 write_unlock_irqrestore(&addr_space_lock, flags);
390
391 if (retval == CSR1212_INVALID_ADDR_SPACE)
392 kfree(as);
393 return retval;
394}
395
396/**
397 * hpsb_register_addrspace - register a host address space
398 *
399 * @start and @end are 48 bit pointers and have to be quadlet aligned.
400 * @end points to the first address behind the handled addresses. This
401 * function can be called multiple times for a single hpsb_highlevel @hl to
402 * implement sparse register sets. The requested region must not overlap any
403 * previously allocated region, otherwise registering will fail.
404 *
405 * It returns true for successful allocation. Address spaces can be
406 * unregistered with hpsb_unregister_addrspace. All remaining address spaces
407 * are automatically deallocated together with the hpsb_highlevel @hl.
408 */
409int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
410 const struct hpsb_address_ops *ops,
411 u64 start, u64 end)
412{
413 struct hpsb_address_serve *as;
414 struct list_head *lh;
415 int retval = 0;
416 unsigned long flags;
417
418 if (((start|end) & 3) || (start >= end) ||
419 (end > CSR1212_ALL_SPACE_END)) {
420 HPSB_ERR("%s called with invalid addresses", __func__);
421 return 0;
422 }
423
424 as = kmalloc(sizeof(*as), GFP_KERNEL);
425 if (!as)
426 return 0;
427
428 INIT_LIST_HEAD(&as->host_list);
429 INIT_LIST_HEAD(&as->hl_list);
430 as->op = ops;
431 as->start = start;
432 as->end = end;
433 as->host = host;
434
435 write_lock_irqsave(&addr_space_lock, flags);
436 list_for_each(lh, &host->addr_space) {
437 struct hpsb_address_serve *as_this =
438 list_entry(lh, struct hpsb_address_serve, host_list);
439 struct hpsb_address_serve *as_next =
440 list_entry(lh->next, struct hpsb_address_serve,
441 host_list);
442
443 if (as_this->end > as->start)
444 break;
445
446 if (as_next->start >= as->end) {
447 list_add(&as->host_list, lh);
448 list_add_tail(&as->hl_list, &hl->addr_list);
449 retval = 1;
450 break;
451 }
452 }
453 write_unlock_irqrestore(&addr_space_lock, flags);
454
455 if (retval == 0)
456 kfree(as);
457 return retval;
458}
459
460int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
461 u64 start)
462{
463 int retval = 0;
464 struct hpsb_address_serve *as;
465 struct list_head *lh, *next;
466 unsigned long flags;
467
468 write_lock_irqsave(&addr_space_lock, flags);
469 list_for_each_safe (lh, next, &hl->addr_list) {
470 as = list_entry(lh, struct hpsb_address_serve, hl_list);
471 if (as->start == start && as->host == host) {
472 __delete_addr(as);
473 retval = 1;
474 break;
475 }
476 }
477 write_unlock_irqrestore(&addr_space_lock, flags);
478 return retval;
479}
480
481static const struct hpsb_address_ops dummy_ops;
482
483/* dummy address spaces as lower and upper bounds of the host's a.s. list */
484static void init_hpsb_highlevel(struct hpsb_host *host)
485{
486 INIT_LIST_HEAD(&host->dummy_zero_addr.host_list);
487 INIT_LIST_HEAD(&host->dummy_zero_addr.hl_list);
488 INIT_LIST_HEAD(&host->dummy_max_addr.host_list);
489 INIT_LIST_HEAD(&host->dummy_max_addr.hl_list);
490
491 host->dummy_zero_addr.op = host->dummy_max_addr.op = &dummy_ops;
492
493 host->dummy_zero_addr.start = host->dummy_zero_addr.end = 0;
494 host->dummy_max_addr.start = host->dummy_max_addr.end = ((u64) 1) << 48;
495
496 list_add_tail(&host->dummy_zero_addr.host_list, &host->addr_space);
497 list_add_tail(&host->dummy_max_addr.host_list, &host->addr_space);
498}
499
500void highlevel_add_host(struct hpsb_host *host)
501{
502 struct hpsb_highlevel *hl;
503
504 init_hpsb_highlevel(host);
505
506 down_read(&hl_drivers_sem);
507 list_for_each_entry(hl, &hl_drivers, hl_list) {
508 if (hl->add_host)
509 hl->add_host(host);
510 }
511 up_read(&hl_drivers_sem);
512 if (host->update_config_rom && hpsb_update_config_rom_image(host) < 0)
513 HPSB_ERR("Failed to generate Configuration ROM image for host "
514 "%s-%d", hl->name, host->id);
515}
516
517void highlevel_remove_host(struct hpsb_host *host)
518{
519 struct hpsb_highlevel *hl;
520
521 down_read(&hl_drivers_sem);
522 list_for_each_entry(hl, &hl_drivers, hl_list)
523 __unregister_host(hl, host, 0);
524 up_read(&hl_drivers_sem);
525}
526
527void highlevel_host_reset(struct hpsb_host *host)
528{
529 unsigned long flags;
530 struct hpsb_highlevel *hl;
531
532 read_lock_irqsave(&hl_irqs_lock, flags);
533 list_for_each_entry(hl, &hl_irqs, irq_list) {
534 if (hl->host_reset)
535 hl->host_reset(host);
536 }
537 read_unlock_irqrestore(&hl_irqs_lock, flags);
538}
539
540void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
541 void *data, size_t length)
542{
543 unsigned long flags;
544 struct hpsb_highlevel *hl;
545 int cts = ((quadlet_t *)data)[0] >> 4;
546
547 read_lock_irqsave(&hl_irqs_lock, flags);
548 list_for_each_entry(hl, &hl_irqs, irq_list) {
549 if (hl->fcp_request)
550 hl->fcp_request(host, nodeid, direction, cts, data,
551 length);
552 }
553 read_unlock_irqrestore(&hl_irqs_lock, flags);
554}
555
556/*
557 * highlevel_read, highlevel_write, highlevel_lock, highlevel_lock64:
558 *
559 * These functions are called to handle transactions. They are called when a
560 * packet arrives. The flags argument contains the second word of the first
561 * header quadlet of the incoming packet (containing transaction label, retry
562 * code, transaction code and priority). These functions either return a
563 * response code or a negative number. In the first case a response will be
564 * generated. In the latter case, no response will be sent and the driver which
565 * handled the request will send the response itself.
566 */
567int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
568 unsigned int length, u16 flags)
569{
570 struct hpsb_address_serve *as;
571 unsigned int partlength;
572 int rcode = RCODE_ADDRESS_ERROR;
573
574 read_lock(&addr_space_lock);
575 list_for_each_entry(as, &host->addr_space, host_list) {
576 if (as->start > addr)
577 break;
578
579 if (as->end > addr) {
580 partlength = min(as->end - addr, (u64) length);
581
582 if (as->op->read)
583 rcode = as->op->read(host, nodeid, data,
584 addr, partlength, flags);
585 else
586 rcode = RCODE_TYPE_ERROR;
587
588 data += partlength;
589 length -= partlength;
590 addr += partlength;
591
592 if ((rcode != RCODE_COMPLETE) || !length)
593 break;
594 }
595 }
596 read_unlock(&addr_space_lock);
597
598 if (length && (rcode == RCODE_COMPLETE))
599 rcode = RCODE_ADDRESS_ERROR;
600 return rcode;
601}
602
603int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
604 u64 addr, unsigned int length, u16 flags)
605{
606 struct hpsb_address_serve *as;
607 unsigned int partlength;
608 int rcode = RCODE_ADDRESS_ERROR;
609
610 read_lock(&addr_space_lock);
611 list_for_each_entry(as, &host->addr_space, host_list) {
612 if (as->start > addr)
613 break;
614
615 if (as->end > addr) {
616 partlength = min(as->end - addr, (u64) length);
617
618 if (as->op->write)
619 rcode = as->op->write(host, nodeid, destid,
620 data, addr, partlength,
621 flags);
622 else
623 rcode = RCODE_TYPE_ERROR;
624
625 data += partlength;
626 length -= partlength;
627 addr += partlength;
628
629 if ((rcode != RCODE_COMPLETE) || !length)
630 break;
631 }
632 }
633 read_unlock(&addr_space_lock);
634
635 if (length && (rcode == RCODE_COMPLETE))
636 rcode = RCODE_ADDRESS_ERROR;
637 return rcode;
638}
639
640int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
641 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
642 u16 flags)
643{
644 struct hpsb_address_serve *as;
645 int rcode = RCODE_ADDRESS_ERROR;
646
647 read_lock(&addr_space_lock);
648 list_for_each_entry(as, &host->addr_space, host_list) {
649 if (as->start > addr)
650 break;
651
652 if (as->end > addr) {
653 if (as->op->lock)
654 rcode = as->op->lock(host, nodeid, store, addr,
655 data, arg, ext_tcode,
656 flags);
657 else
658 rcode = RCODE_TYPE_ERROR;
659 break;
660 }
661 }
662 read_unlock(&addr_space_lock);
663 return rcode;
664}
665
666int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
667 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
668 u16 flags)
669{
670 struct hpsb_address_serve *as;
671 int rcode = RCODE_ADDRESS_ERROR;
672
673 read_lock(&addr_space_lock);
674
675 list_for_each_entry(as, &host->addr_space, host_list) {
676 if (as->start > addr)
677 break;
678
679 if (as->end > addr) {
680 if (as->op->lock64)
681 rcode = as->op->lock64(host, nodeid, store,
682 addr, data, arg,
683 ext_tcode, flags);
684 else
685 rcode = RCODE_TYPE_ERROR;
686 break;
687 }
688 }
689 read_unlock(&addr_space_lock);
690 return rcode;
691}
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
deleted file mode 100644
index 9dba89fc60ad..000000000000
--- a/drivers/ieee1394/highlevel.h
+++ /dev/null
@@ -1,141 +0,0 @@
1#ifndef IEEE1394_HIGHLEVEL_H
2#define IEEE1394_HIGHLEVEL_H
3
4#include <linux/list.h>
5#include <linux/spinlock.h>
6#include <linux/types.h>
7
8struct module;
9
10#include "ieee1394_types.h"
11
12struct hpsb_host;
13
14/* internal to ieee1394 core */
15struct hpsb_address_serve {
16 struct list_head host_list; /* per host list */
17 struct list_head hl_list; /* hpsb_highlevel list */
18 const struct hpsb_address_ops *op;
19 struct hpsb_host *host;
20 u64 start; /* first address handled, quadlet aligned */
21 u64 end; /* first address behind, quadlet aligned */
22};
23
24/* Only the following structures are of interest to actual highlevel drivers. */
25
26struct hpsb_highlevel {
27 const char *name;
28
29 /* Any of the following pointers can legally be NULL. */
30
31 /* New host initialized. Will also be called during
32 * hpsb_register_highlevel for all hosts already installed. */
33 void (*add_host)(struct hpsb_host *host);
34
35 /* Host about to be removed. Will also be called during
36 * hpsb_unregister_highlevel once for each host. */
37 void (*remove_host)(struct hpsb_host *host);
38
39 /* Host experienced bus reset with possible configuration changes.
40 * Note that this one may occur during interrupt/bottom half handling.
41 * You can not expect to be able to do stock hpsb_reads. */
42 void (*host_reset)(struct hpsb_host *host);
43
44 /* A write request was received on either the FCP_COMMAND (direction =
45 * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg
46 * contains the cts field (first byte of data). */
47 void (*fcp_request)(struct hpsb_host *host, int nodeid, int direction,
48 int cts, u8 *data, size_t length);
49
50 /* These are initialized by the subsystem when the
51 * hpsb_higlevel is registered. */
52 struct list_head hl_list;
53 struct list_head irq_list;
54 struct list_head addr_list;
55
56 struct list_head host_info_list;
57 rwlock_t host_info_lock;
58};
59
60struct hpsb_address_ops {
61 /*
62 * Null function pointers will make the respective operation complete
63 * with RCODE_TYPE_ERROR. Makes for easy to implement read-only
64 * registers (just leave everything but read NULL).
65 *
66 * All functions shall return appropriate IEEE 1394 rcodes.
67 */
68
69 /* These functions have to implement block reads for themselves.
70 *
71 * These functions either return a response code or a negative number.
72 * In the first case a response will be generated. In the latter case,
73 * no response will be sent and the driver which handled the request
74 * will send the response itself. */
75 int (*read)(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
76 u64 addr, size_t length, u16 flags);
77 int (*write)(struct hpsb_host *host, int nodeid, int destid,
78 quadlet_t *data, u64 addr, size_t length, u16 flags);
79
80 /* Lock transactions: write results of ext_tcode operation into
81 * *store. */
82 int (*lock)(struct hpsb_host *host, int nodeid, quadlet_t *store,
83 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
84 u16 flags);
85 int (*lock64)(struct hpsb_host *host, int nodeid, octlet_t *store,
86 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
87 u16 flags);
88};
89
90void highlevel_add_host(struct hpsb_host *host);
91void highlevel_remove_host(struct hpsb_host *host);
92void highlevel_host_reset(struct hpsb_host *host);
93int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
94 unsigned int length, u16 flags);
95int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
96 u64 addr, unsigned int length, u16 flags);
97int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
98 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
99 u16 flags);
100int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
101 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
102 u16 flags);
103void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
104 void *data, size_t length);
105
106/**
107 * hpsb_init_highlevel - initialize a struct hpsb_highlevel
108 *
109 * This is only necessary if hpsb_get_hostinfo_bykey can be called
110 * before hpsb_register_highlevel.
111 */
112static inline void hpsb_init_highlevel(struct hpsb_highlevel *hl)
113{
114 rwlock_init(&hl->host_info_lock);
115 INIT_LIST_HEAD(&hl->host_info_list);
116}
117void hpsb_register_highlevel(struct hpsb_highlevel *hl);
118void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
119
120u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
121 struct hpsb_host *host,
122 const struct hpsb_address_ops *ops,
123 u64 size, u64 alignment,
124 u64 start, u64 end);
125int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
126 const struct hpsb_address_ops *ops,
127 u64 start, u64 end);
128int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
129 u64 start);
130
131void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
132void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
133 size_t data_size);
134void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
135void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
136 unsigned long key);
137void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
138int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
139 void *data);
140
141#endif /* IEEE1394_HIGHLEVEL_H */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
deleted file mode 100644
index e947d8ffac85..000000000000
--- a/drivers/ieee1394/hosts.c
+++ /dev/null
@@ -1,249 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Low level (host adapter) management.
5 *
6 * Copyright (C) 1999 Andreas E. Bombe
7 * Copyright (C) 1999 Emanuel Pirker
8 *
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/list.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/timer.h>
19#include <linux/jiffies.h>
20#include <linux/mutex.h>
21
22#include "csr1212.h"
23#include "ieee1394.h"
24#include "ieee1394_types.h"
25#include "hosts.h"
26#include "ieee1394_core.h"
27#include "highlevel.h"
28#include "nodemgr.h"
29#include "csr.h"
30#include "config_roms.h"
31
32
33static void delayed_reset_bus(struct work_struct *work)
34{
35 struct hpsb_host *host =
36 container_of(work, struct hpsb_host, delayed_reset.work);
37 u8 generation = host->csr.generation + 1;
38
39 /* The generation field rolls over to 2 rather than 0 per IEEE
40 * 1394a-2000. */
41 if (generation > 0xf || generation < 2)
42 generation = 2;
43
44 csr_set_bus_info_generation(host->csr.rom, generation);
45 if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
46 /* CSR image creation failed.
47 * Reset generation field and do not issue a bus reset. */
48 csr_set_bus_info_generation(host->csr.rom,
49 host->csr.generation);
50 return;
51 }
52
53 host->csr.generation = generation;
54
55 host->update_config_rom = 0;
56 if (host->driver->set_hw_config_rom)
57 host->driver->set_hw_config_rom(host,
58 host->csr.rom->bus_info_data);
59
60 host->csr.gen_timestamp[host->csr.generation] = jiffies;
61 hpsb_reset_bus(host, SHORT_RESET);
62}
63
64static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
65{
66 return 0;
67}
68
69static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
70{
71 return -1;
72}
73
74static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
75 unsigned long arg)
76{
77 return -1;
78}
79
80static struct hpsb_host_driver dummy_driver = {
81 .transmit_packet = dummy_transmit_packet,
82 .devctl = dummy_devctl,
83 .isoctl = dummy_isoctl
84};
85
86static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
87{
88 int *hostnum = __data;
89
90 if (host->id == *hostnum)
91 return 1;
92
93 return 0;
94}
95
96static DEFINE_MUTEX(host_num_alloc);
97
98/**
99 * hpsb_alloc_host - allocate a new host controller.
100 * @drv: the driver that will manage the host controller
101 * @extra: number of extra bytes to allocate for the driver
102 *
103 * Allocate a &hpsb_host and initialize the general subsystem specific
104 * fields. If the driver needs to store per host data, as drivers
105 * usually do, the amount of memory required can be specified by the
106 * @extra parameter. Once allocated, the driver should initialize the
107 * driver specific parts, enable the controller and make it available
108 * to the general subsystem using hpsb_add_host().
109 *
110 * Return Value: a pointer to the &hpsb_host if successful, %NULL if
111 * no memory was available.
112 */
113struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
114 struct device *dev)
115{
116 struct hpsb_host *h;
117 int i;
118 int hostnum = 0;
119
120 h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
121 if (!h)
122 return NULL;
123
124 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
125 if (!h->csr.rom)
126 goto fail;
127
128 h->hostdata = h + 1;
129 h->driver = drv;
130
131 INIT_LIST_HEAD(&h->pending_packets);
132 INIT_LIST_HEAD(&h->addr_space);
133
134 for (i = 2; i < 16; i++)
135 h->csr.gen_timestamp[i] = jiffies - 60 * HZ;
136
137 atomic_set(&h->generation, 0);
138
139 INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
140
141 init_timer(&h->timeout);
142 h->timeout.data = (unsigned long) h;
143 h->timeout.function = abort_timedouts;
144 h->timeout_interval = HZ / 20; /* 50ms, half of minimum SPLIT_TIMEOUT */
145
146 h->topology_map = h->csr.topology_map + 3;
147 h->speed_map = (u8 *)(h->csr.speed_map + 2);
148
149 mutex_lock(&host_num_alloc);
150 while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
151 hostnum++;
152 mutex_unlock(&host_num_alloc);
153 h->id = hostnum;
154
155 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
156 h->device.parent = dev;
157 set_dev_node(&h->device, dev_to_node(dev));
158 dev_set_name(&h->device, "fw-host%d", h->id);
159
160 h->host_dev.parent = &h->device;
161 h->host_dev.class = &hpsb_host_class;
162 dev_set_name(&h->host_dev, "fw-host%d", h->id);
163
164 if (device_register(&h->device))
165 goto fail;
166 if (device_register(&h->host_dev)) {
167 device_unregister(&h->device);
168 goto fail;
169 }
170 get_device(&h->device);
171
172 return h;
173
174fail:
175 kfree(h);
176 return NULL;
177}
178
179int hpsb_add_host(struct hpsb_host *host)
180{
181 if (hpsb_default_host_entry(host))
182 return -ENOMEM;
183
184 highlevel_add_host(host);
185 return 0;
186}
187
188void hpsb_resume_host(struct hpsb_host *host)
189{
190 if (host->driver->set_hw_config_rom)
191 host->driver->set_hw_config_rom(host,
192 host->csr.rom->bus_info_data);
193 host->driver->devctl(host, RESET_BUS, SHORT_RESET);
194}
195
196void hpsb_remove_host(struct hpsb_host *host)
197{
198 host->is_shutdown = 1;
199
200 cancel_delayed_work(&host->delayed_reset);
201 flush_scheduled_work();
202
203 host->driver = &dummy_driver;
204 highlevel_remove_host(host);
205
206 device_unregister(&host->host_dev);
207 device_unregister(&host->device);
208}
209
210/**
211 * hpsb_update_config_rom_image - updates configuration ROM image of a host
212 *
213 * Updates the configuration ROM image of a host. rom_version must be the
214 * current version, otherwise it will fail with return value -1. If this
215 * host does not support config-rom-update, it will return -%EINVAL.
216 * Return value 0 indicates success.
217 */
218int hpsb_update_config_rom_image(struct hpsb_host *host)
219{
220 unsigned long reset_delay;
221 int next_gen = host->csr.generation + 1;
222
223 if (!host->update_config_rom)
224 return -EINVAL;
225
226 if (next_gen > 0xf)
227 next_gen = 2;
228
229 /* Stop the delayed interrupt, we're about to change the config rom and
230 * it would be a waste to do a bus reset twice. */
231 cancel_delayed_work(&host->delayed_reset);
232
233 /* IEEE 1394a-2000 prohibits using the same generation number
234 * twice in a 60 second period. */
235 if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
236 /* Wait 60 seconds from the last time this generation number was
237 * used. */
238 reset_delay =
239 (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
240 else
241 /* Wait 1 second in case some other code wants to change the
242 * Config ROM in the near future. */
243 reset_delay = HZ;
244
245 PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
246 schedule_delayed_work(&host->delayed_reset, reset_delay);
247
248 return 0;
249}
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
deleted file mode 100644
index 49c359022c54..000000000000
--- a/drivers/ieee1394/hosts.h
+++ /dev/null
@@ -1,201 +0,0 @@
1#ifndef _IEEE1394_HOSTS_H
2#define _IEEE1394_HOSTS_H
3
4#include <linux/device.h>
5#include <linux/list.h>
6#include <linux/timer.h>
7#include <linux/types.h>
8#include <linux/workqueue.h>
9#include <asm/atomic.h>
10
11struct pci_dev;
12struct module;
13
14#include "ieee1394_types.h"
15#include "csr.h"
16#include "highlevel.h"
17
18struct hpsb_packet;
19struct hpsb_iso;
20
21struct hpsb_host {
22 struct list_head host_list;
23
24 void *hostdata;
25
26 atomic_t generation;
27
28 struct list_head pending_packets;
29 struct timer_list timeout;
30 unsigned long timeout_interval;
31
32 int node_count; /* number of identified nodes on this bus */
33 int selfid_count; /* total number of SelfIDs received */
34 int nodes_active; /* number of nodes with active link layer */
35
36 nodeid_t node_id; /* node ID of this host */
37 nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
38 nodeid_t busmgr_id; /* ID of this bus' bus manager */
39
40 /* this nodes state */
41 unsigned in_bus_reset:1;
42 unsigned is_shutdown:1;
43 unsigned resume_packet_sent:1;
44
45 /* this nodes' duties on the bus */
46 unsigned is_root:1;
47 unsigned is_cycmst:1;
48 unsigned is_irm:1;
49 unsigned is_busmgr:1;
50
51 int reset_retries;
52 quadlet_t *topology_map;
53 u8 *speed_map;
54
55 int id;
56 struct hpsb_host_driver *driver;
57 struct pci_dev *pdev;
58 struct device device;
59 struct device host_dev;
60
61 struct delayed_work delayed_reset;
62 unsigned config_roms:31;
63 unsigned update_config_rom:1;
64
65 struct list_head addr_space;
66 u64 low_addr_space; /* upper bound of physical DMA area */
67 u64 middle_addr_space; /* upper bound of posted write area */
68
69 u8 speed[ALL_NODES]; /* speed between each node and local node */
70
71 /* per node tlabel allocation */
72 u8 next_tl[ALL_NODES];
73 struct { DECLARE_BITMAP(map, 64); } tl_pool[ALL_NODES];
74
75 struct csr_control csr;
76
77 struct hpsb_address_serve dummy_zero_addr;
78 struct hpsb_address_serve dummy_max_addr;
79};
80
81enum devctl_cmd {
82 /* Host is requested to reset its bus and cancel all outstanding async
83 * requests. If arg == 1, it shall also attempt to become root on the
84 * bus. Return void. */
85 RESET_BUS,
86
87 /* Arg is void, return value is the hardware cycle counter value. */
88 GET_CYCLE_COUNTER,
89
90 /* Set the hardware cycle counter to the value in arg, return void.
91 * FIXME - setting is probably not required. */
92 SET_CYCLE_COUNTER,
93
94 /* Configure hardware for new bus ID in arg, return void. */
95 SET_BUS_ID,
96
97 /* If arg true, start sending cycle start packets, stop if arg == 0.
98 * Return void. */
99 ACT_CYCLE_MASTER,
100
101 /* Cancel all outstanding async requests without resetting the bus.
102 * Return void. */
103 CANCEL_REQUESTS,
104};
105
106enum isoctl_cmd {
107 /* rawiso API - see iso.h for the meanings of these commands
108 * (they correspond exactly to the hpsb_iso_* API functions)
109 * INIT = allocate resources
110 * START = begin transmission/reception
111 * STOP = halt transmission/reception
112 * QUEUE/RELEASE = produce/consume packets
113 * SHUTDOWN = deallocate resources
114 */
115
116 XMIT_INIT,
117 XMIT_START,
118 XMIT_STOP,
119 XMIT_QUEUE,
120 XMIT_SHUTDOWN,
121
122 RECV_INIT,
123 RECV_LISTEN_CHANNEL, /* multi-channel only */
124 RECV_UNLISTEN_CHANNEL, /* multi-channel only */
125 RECV_SET_CHANNEL_MASK, /* multi-channel only; arg is a *u64 */
126 RECV_START,
127 RECV_STOP,
128 RECV_RELEASE,
129 RECV_SHUTDOWN,
130 RECV_FLUSH
131};
132
133enum reset_types {
134 /* 166 microsecond reset -- only type of reset available on
135 non-1394a capable controllers */
136 LONG_RESET,
137
138 /* Short (arbitrated) reset -- only available on 1394a capable
139 controllers */
140 SHORT_RESET,
141
142 /* Variants that set force_root before issueing the bus reset */
143 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
144
145 /* Variants that clear force_root before issueing the bus reset */
146 LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
147};
148
149struct hpsb_host_driver {
150 struct module *owner;
151 const char *name;
152
153 /* The hardware driver may optionally support a function that is used
154 * to set the hardware ConfigROM if the hardware supports handling
155 * reads to the ConfigROM on its own. */
156 void (*set_hw_config_rom)(struct hpsb_host *host,
157 __be32 *config_rom);
158
159 /* This function shall implement packet transmission based on
160 * packet->type. It shall CRC both parts of the packet (unless
161 * packet->type == raw) and do byte-swapping as necessary or instruct
162 * the hardware to do so. It can return immediately after the packet
163 * was queued for sending. After sending, hpsb_sent_packet() has to be
164 * called. Return 0 on success, negative errno on failure.
165 * NOTE: The function must be callable in interrupt context.
166 */
167 int (*transmit_packet)(struct hpsb_host *host,
168 struct hpsb_packet *packet);
169
170 /* This function requests miscellanous services from the driver, see
171 * above for command codes and expected actions. Return -1 for unknown
172 * command, though that should never happen.
173 */
174 int (*devctl)(struct hpsb_host *host, enum devctl_cmd command, int arg);
175
176 /* ISO transmission/reception functions. Return 0 on success, -1
177 * (or -EXXX errno code) on failure. If the low-level driver does not
178 * support the new ISO API, set isoctl to NULL.
179 */
180 int (*isoctl)(struct hpsb_iso *iso, enum isoctl_cmd command,
181 unsigned long arg);
182
183 /* This function is mainly to redirect local CSR reads/locks to the iso
184 * management registers (bus manager id, bandwidth available, channels
185 * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
186 * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
187 * as OHCI uses). data and compare are the new data and expected data
188 * respectively, return value is the old value.
189 */
190 quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
191 quadlet_t data, quadlet_t compare);
192};
193
194struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
195 struct device *dev);
196int hpsb_add_host(struct hpsb_host *host);
197void hpsb_resume_host(struct hpsb_host *host);
198void hpsb_remove_host(struct hpsb_host *host);
199int hpsb_update_config_rom_image(struct hpsb_host *host);
200
201#endif /* _IEEE1394_HOSTS_H */
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
deleted file mode 100644
index 46878fef136c..000000000000
--- a/drivers/ieee1394/ieee1394-ioctl.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Base file for all ieee1394 ioctl's.
3 * Linux-1394 has allocated base '#' with a range of 0x00-0x3f.
4 */
5
6#ifndef __IEEE1394_IOCTL_H
7#define __IEEE1394_IOCTL_H
8
9#include <linux/ioctl.h>
10#include <linux/types.h>
11
12/* DV1394 Gets 10 */
13
14/* Get the driver ready to transmit video. pass a struct dv1394_init* as
15 * the parameter (see below), or NULL to get default parameters */
16#define DV1394_IOC_INIT _IOW('#', 0x06, struct dv1394_init)
17
18/* Stop transmitting video and free the ringbuffer */
19#define DV1394_IOC_SHUTDOWN _IO ('#', 0x07)
20
21/* Submit N new frames to be transmitted, where the index of the first new
22 * frame is first_clear_buffer, and the index of the last new frame is
23 * (first_clear_buffer + N) % n_frames */
24#define DV1394_IOC_SUBMIT_FRAMES _IO ('#', 0x08)
25
26/* Block until N buffers are clear (pass N as the parameter) Because we
27 * re-transmit the last frame on underrun, there will at most be n_frames
28 * - 1 clear frames at any time */
29#define DV1394_IOC_WAIT_FRAMES _IO ('#', 0x09)
30
31/* Capture new frames that have been received, where the index of the
32 * first new frame is first_clear_buffer, and the index of the last new
33 * frame is (first_clear_buffer + N) % n_frames */
34#define DV1394_IOC_RECEIVE_FRAMES _IO ('#', 0x0a)
35
36/* Tell card to start receiving DMA */
37#define DV1394_IOC_START_RECEIVE _IO ('#', 0x0b)
38
39/* Pass a struct dv1394_status* as the parameter */
40#define DV1394_IOC_GET_STATUS _IOR('#', 0x0c, struct dv1394_status)
41
42
43/* Video1394 Gets 10 */
44
45#define VIDEO1394_IOC_LISTEN_CHANNEL \
46 _IOWR('#', 0x10, struct video1394_mmap)
47#define VIDEO1394_IOC_UNLISTEN_CHANNEL \
48 _IOW ('#', 0x11, int)
49#define VIDEO1394_IOC_LISTEN_QUEUE_BUFFER \
50 _IOW ('#', 0x12, struct video1394_wait)
51#define VIDEO1394_IOC_LISTEN_WAIT_BUFFER \
52 _IOWR('#', 0x13, struct video1394_wait)
53#define VIDEO1394_IOC_TALK_CHANNEL \
54 _IOWR('#', 0x14, struct video1394_mmap)
55#define VIDEO1394_IOC_UNTALK_CHANNEL \
56 _IOW ('#', 0x15, int)
57/*
58 * This one is broken: it really wanted
59 * "sizeof (struct video1394_wait) + sizeof (struct video1394_queue_variable)"
60 * but got just a "size_t"
61 */
62#define VIDEO1394_IOC_TALK_QUEUE_BUFFER \
63 _IOW ('#', 0x16, size_t)
64#define VIDEO1394_IOC_TALK_WAIT_BUFFER \
65 _IOW ('#', 0x17, struct video1394_wait)
66#define VIDEO1394_IOC_LISTEN_POLL_BUFFER \
67 _IOWR('#', 0x18, struct video1394_wait)
68
69
70/* Raw1394's ISO interface */
71#define RAW1394_IOC_ISO_XMIT_INIT \
72 _IOW ('#', 0x1a, struct raw1394_iso_status)
73#define RAW1394_IOC_ISO_RECV_INIT \
74 _IOWR('#', 0x1b, struct raw1394_iso_status)
75#define RAW1394_IOC_ISO_RECV_START \
76 _IOC (_IOC_WRITE, '#', 0x1c, sizeof(int) * 3)
77#define RAW1394_IOC_ISO_XMIT_START \
78 _IOC (_IOC_WRITE, '#', 0x1d, sizeof(int) * 2)
79#define RAW1394_IOC_ISO_XMIT_RECV_STOP \
80 _IO ('#', 0x1e)
81#define RAW1394_IOC_ISO_GET_STATUS \
82 _IOR ('#', 0x1f, struct raw1394_iso_status)
83#define RAW1394_IOC_ISO_SHUTDOWN \
84 _IO ('#', 0x20)
85#define RAW1394_IOC_ISO_QUEUE_ACTIVITY \
86 _IO ('#', 0x21)
87#define RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL \
88 _IOW ('#', 0x22, unsigned char)
89#define RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL \
90 _IOW ('#', 0x23, unsigned char)
91#define RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK \
92 _IOW ('#', 0x24, __u64)
93#define RAW1394_IOC_ISO_RECV_PACKETS \
94 _IOW ('#', 0x25, struct raw1394_iso_packets)
95#define RAW1394_IOC_ISO_RECV_RELEASE_PACKETS \
96 _IOW ('#', 0x26, unsigned int)
97#define RAW1394_IOC_ISO_XMIT_PACKETS \
98 _IOW ('#', 0x27, struct raw1394_iso_packets)
99#define RAW1394_IOC_ISO_XMIT_SYNC \
100 _IO ('#', 0x28)
101#define RAW1394_IOC_ISO_RECV_FLUSH \
102 _IO ('#', 0x29)
103#define RAW1394_IOC_GET_CYCLE_TIMER \
104 _IOR ('#', 0x30, struct raw1394_cycle_timer)
105
106#endif /* __IEEE1394_IOCTL_H */
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
deleted file mode 100644
index af320e2c5079..000000000000
--- a/drivers/ieee1394/ieee1394.h
+++ /dev/null
@@ -1,220 +0,0 @@
1/*
2 * Generic IEEE 1394 definitions
3 */
4
5#ifndef _IEEE1394_IEEE1394_H
6#define _IEEE1394_IEEE1394_H
7
8#define TCODE_WRITEQ 0x0
9#define TCODE_WRITEB 0x1
10#define TCODE_WRITE_RESPONSE 0x2
11#define TCODE_READQ 0x4
12#define TCODE_READB 0x5
13#define TCODE_READQ_RESPONSE 0x6
14#define TCODE_READB_RESPONSE 0x7
15#define TCODE_CYCLE_START 0x8
16#define TCODE_LOCK_REQUEST 0x9
17#define TCODE_ISO_DATA 0xa
18#define TCODE_STREAM_DATA 0xa
19#define TCODE_LOCK_RESPONSE 0xb
20
21#define RCODE_COMPLETE 0x0
22#define RCODE_CONFLICT_ERROR 0x4
23#define RCODE_DATA_ERROR 0x5
24#define RCODE_TYPE_ERROR 0x6
25#define RCODE_ADDRESS_ERROR 0x7
26
27#define EXTCODE_MASK_SWAP 0x1
28#define EXTCODE_COMPARE_SWAP 0x2
29#define EXTCODE_FETCH_ADD 0x3
30#define EXTCODE_LITTLE_ADD 0x4
31#define EXTCODE_BOUNDED_ADD 0x5
32#define EXTCODE_WRAP_ADD 0x6
33
34#define ACK_COMPLETE 0x1
35#define ACK_PENDING 0x2
36#define ACK_BUSY_X 0x4
37#define ACK_BUSY_A 0x5
38#define ACK_BUSY_B 0x6
39#define ACK_TARDY 0xb
40#define ACK_CONFLICT_ERROR 0xc
41#define ACK_DATA_ERROR 0xd
42#define ACK_TYPE_ERROR 0xe
43#define ACK_ADDRESS_ERROR 0xf
44
45/* Non-standard "ACK codes" for internal use */
46#define ACKX_NONE (-1)
47#define ACKX_SEND_ERROR (-2)
48#define ACKX_ABORTED (-3)
49#define ACKX_TIMEOUT (-4)
50
51#define IEEE1394_SPEED_100 0x00
52#define IEEE1394_SPEED_200 0x01
53#define IEEE1394_SPEED_400 0x02
54#define IEEE1394_SPEED_800 0x03
55#define IEEE1394_SPEED_1600 0x04
56#define IEEE1394_SPEED_3200 0x05
57#define IEEE1394_SPEED_MAX IEEE1394_SPEED_3200
58
59/* Maps speed values above to a string representation */
60extern const char *hpsb_speedto_str[];
61
62/* 1394a cable PHY packets */
63#define SELFID_PWRCL_NO_POWER 0x0
64#define SELFID_PWRCL_PROVIDE_15W 0x1
65#define SELFID_PWRCL_PROVIDE_30W 0x2
66#define SELFID_PWRCL_PROVIDE_45W 0x3
67#define SELFID_PWRCL_USE_1W 0x4
68#define SELFID_PWRCL_USE_3W 0x5
69#define SELFID_PWRCL_USE_6W 0x6
70#define SELFID_PWRCL_USE_10W 0x7
71
72#define SELFID_PORT_CHILD 0x3
73#define SELFID_PORT_PARENT 0x2
74#define SELFID_PORT_NCONN 0x1
75#define SELFID_PORT_NONE 0x0
76
77#define SELFID_SPEED_UNKNOWN 0x3 /* 1394b PHY */
78
79#define PHYPACKET_LINKON 0x40000000
80#define PHYPACKET_PHYCONFIG_R 0x00800000
81#define PHYPACKET_PHYCONFIG_T 0x00400000
82#define EXTPHYPACKET_TYPE_PING 0x00000000
83#define EXTPHYPACKET_TYPE_REMOTEACCESS_BASE 0x00040000
84#define EXTPHYPACKET_TYPE_REMOTEACCESS_PAGED 0x00140000
85#define EXTPHYPACKET_TYPE_REMOTEREPLY_BASE 0x000C0000
86#define EXTPHYPACKET_TYPE_REMOTEREPLY_PAGED 0x001C0000
87#define EXTPHYPACKET_TYPE_REMOTECOMMAND 0x00200000
88#define EXTPHYPACKET_TYPE_REMOTECONFIRMATION 0x00280000
89#define EXTPHYPACKET_TYPE_RESUME 0x003C0000
90
91#define EXTPHYPACKET_TYPEMASK 0xC0FC0000
92
93#define PHYPACKET_PORT_SHIFT 24
94#define PHYPACKET_GAPCOUNT_SHIFT 16
95
96/* 1394a PHY register map bitmasks */
97#define PHY_00_PHYSICAL_ID 0xFC
98#define PHY_00_R 0x02 /* Root */
99#define PHY_00_PS 0x01 /* Power Status*/
100#define PHY_01_RHB 0x80 /* Root Hold-Off */
101#define PHY_01_IBR 0x80 /* Initiate Bus Reset */
102#define PHY_01_GAP_COUNT 0x3F
103#define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */
104#define PHY_02_TOTAL_PORTS 0x1F
105#define PHY_03_MAX_SPEED 0xE0
106#define PHY_03_DELAY 0x0F
107#define PHY_04_LCTRL 0x80 /* Link Active Report Control */
108#define PHY_04_CONTENDER 0x40
109#define PHY_04_JITTER 0x38
110#define PHY_04_PWR_CLASS 0x07 /* Power Class */
111#define PHY_05_WATCHDOG 0x80
112#define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */
113#define PHY_05_LOOP 0x20 /* Loop Detect */
114#define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */
115#define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */
116#define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */
117#define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */
118#define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */
119
120#include <asm/byteorder.h>
121
122/* '1' '3' '9' '4' in ASCII */
123#define IEEE1394_BUSID_MAGIC cpu_to_be32(0x31333934)
124
125#ifdef __BIG_ENDIAN_BITFIELD
126
127struct selfid {
128 u32 packet_identifier:2; /* always binary 10 */
129 u32 phy_id:6;
130 /* byte */
131 u32 extended:1; /* if true is struct ext_selfid */
132 u32 link_active:1;
133 u32 gap_count:6;
134 /* byte */
135 u32 speed:2;
136 u32 phy_delay:2;
137 u32 contender:1;
138 u32 power_class:3;
139 /* byte */
140 u32 port0:2;
141 u32 port1:2;
142 u32 port2:2;
143 u32 initiated_reset:1;
144 u32 more_packets:1;
145} __attribute__((packed));
146
147struct ext_selfid {
148 u32 packet_identifier:2; /* always binary 10 */
149 u32 phy_id:6;
150 /* byte */
151 u32 extended:1; /* if false is struct selfid */
152 u32 seq_nr:3;
153 u32 reserved:2;
154 u32 porta:2;
155 /* byte */
156 u32 portb:2;
157 u32 portc:2;
158 u32 portd:2;
159 u32 porte:2;
160 /* byte */
161 u32 portf:2;
162 u32 portg:2;
163 u32 porth:2;
164 u32 reserved2:1;
165 u32 more_packets:1;
166} __attribute__((packed));
167
168#elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */
169
170/*
171 * Note: these mean to be bit fields of a big endian SelfID as seen on a little
172 * endian machine. Without swapping.
173 */
174
175struct selfid {
176 u32 phy_id:6;
177 u32 packet_identifier:2; /* always binary 10 */
178 /* byte */
179 u32 gap_count:6;
180 u32 link_active:1;
181 u32 extended:1; /* if true is struct ext_selfid */
182 /* byte */
183 u32 power_class:3;
184 u32 contender:1;
185 u32 phy_delay:2;
186 u32 speed:2;
187 /* byte */
188 u32 more_packets:1;
189 u32 initiated_reset:1;
190 u32 port2:2;
191 u32 port1:2;
192 u32 port0:2;
193} __attribute__((packed));
194
195struct ext_selfid {
196 u32 phy_id:6;
197 u32 packet_identifier:2; /* always binary 10 */
198 /* byte */
199 u32 porta:2;
200 u32 reserved:2;
201 u32 seq_nr:3;
202 u32 extended:1; /* if false is struct selfid */
203 /* byte */
204 u32 porte:2;
205 u32 portd:2;
206 u32 portc:2;
207 u32 portb:2;
208 /* byte */
209 u32 more_packets:1;
210 u32 reserved2:1;
211 u32 porth:2;
212 u32 portg:2;
213 u32 portf:2;
214} __attribute__((packed));
215
216#else
217#error What? PDP endian?
218#endif /* __BIG_ENDIAN_BITFIELD */
219
220#endif /* _IEEE1394_IEEE1394_H */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
deleted file mode 100644
index 872338003721..000000000000
--- a/drivers/ieee1394/ieee1394_core.c
+++ /dev/null
@@ -1,1380 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
6 *
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
9 *
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
12 *
13 *
14 * Contributions:
15 *
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * loopback functionality in hpsb_send_packet
18 * allow highlevel drivers to disable automatic response generation
19 * and to generate responses themselves (deferred)
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/list.h>
25#include <linux/string.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/bitops.h>
32#include <linux/kdev_t.h>
33#include <linux/freezer.h>
34#include <linux/suspend.h>
35#include <linux/kthread.h>
36#include <linux/preempt.h>
37#include <linux/time.h>
38
39#include <asm/system.h>
40#include <asm/byteorder.h>
41
42#include "ieee1394_types.h"
43#include "ieee1394.h"
44#include "hosts.h"
45#include "ieee1394_core.h"
46#include "highlevel.h"
47#include "ieee1394_transactions.h"
48#include "csr.h"
49#include "nodemgr.h"
50#include "dma.h"
51#include "iso.h"
52#include "config_roms.h"
53
54/*
55 * Disable the nodemgr detection and config rom reading functionality.
56 */
57static int disable_nodemgr;
58module_param(disable_nodemgr, int, 0444);
59MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
60
61/* Disable Isochronous Resource Manager functionality */
62int hpsb_disable_irm = 0;
63module_param_named(disable_irm, hpsb_disable_irm, bool, 0444);
64MODULE_PARM_DESC(disable_irm,
65 "Disable Isochronous Resource Manager functionality.");
66
67/* We are GPL, so treat us special */
68MODULE_LICENSE("GPL");
69
70/* Some globals used */
71const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
72struct class *hpsb_protocol_class;
73
74#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
75static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
76{
77 int i;
78
79 size /= 4;
80 size = (size > 4 ? 4 : size);
81
82 printk(KERN_DEBUG "ieee1394: %s", text);
83 if (speed > -1 && speed < 6)
84 printk(" at %s", hpsb_speedto_str[speed]);
85 printk(":");
86 for (i = 0; i < size; i++)
87 printk(" %08x", data[i]);
88 printk("\n");
89}
90#else
91#define dump_packet(a,b,c,d) do {} while (0)
92#endif
93
94static void abort_requests(struct hpsb_host *host);
95static void queue_packet_complete(struct hpsb_packet *packet);
96
97
98/**
99 * hpsb_set_packet_complete_task - set task that runs when a packet completes
100 * @packet: the packet whose completion we want the task added to
101 * @routine: function to call
102 * @data: data (if any) to pass to the above function
103 *
104 * Set the task that runs when a packet completes. You cannot call this more
105 * than once on a single packet before it is sent.
106 *
107 * Typically, the complete @routine is responsible to call hpsb_free_packet().
108 */
109void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
110 void (*routine)(void *), void *data)
111{
112 WARN_ON(packet->complete_routine != NULL);
113 packet->complete_routine = routine;
114 packet->complete_data = data;
115 return;
116}
117
118/**
119 * hpsb_alloc_packet - allocate new packet structure
120 * @data_size: size of the data block to be allocated, in bytes
121 *
122 * This function allocates, initializes and returns a new &struct hpsb_packet.
123 * It can be used in interrupt context. A header block is always included and
124 * initialized with zeros. Its size is big enough to contain all possible 1394
125 * headers. The data block is only allocated if @data_size is not zero.
126 *
127 * For packets for which responses will be received the @data_size has to be big
128 * enough to contain the response's data block since no further allocation
129 * occurs at response matching time.
130 *
131 * The packet's generation value will be set to the current generation number
132 * for ease of use. Remember to overwrite it with your own recorded generation
133 * number if you can not be sure that your code will not race with a bus reset.
134 *
135 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
136 * failure.
137 */
138struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
139{
140 struct hpsb_packet *packet;
141
142 data_size = ((data_size + 3) & ~3);
143
144 packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC);
145 if (!packet)
146 return NULL;
147
148 packet->state = hpsb_unused;
149 packet->generation = -1;
150 INIT_LIST_HEAD(&packet->driver_list);
151 INIT_LIST_HEAD(&packet->queue);
152 atomic_set(&packet->refcnt, 1);
153
154 if (data_size) {
155 packet->data = packet->embedded_data;
156 packet->allocated_data_size = data_size;
157 }
158 return packet;
159}
160
161/**
162 * hpsb_free_packet - free packet and data associated with it
163 * @packet: packet to free (is NULL safe)
164 *
165 * Frees @packet->data only if it was allocated through hpsb_alloc_packet().
166 */
167void hpsb_free_packet(struct hpsb_packet *packet)
168{
169 if (packet && atomic_dec_and_test(&packet->refcnt)) {
170 BUG_ON(!list_empty(&packet->driver_list) ||
171 !list_empty(&packet->queue));
172 kfree(packet);
173 }
174}
175
176/**
177 * hpsb_reset_bus - initiate bus reset on the given host
178 * @host: host controller whose bus to reset
179 * @type: one of enum reset_types
180 *
181 * Returns 1 if bus reset already in progress, 0 otherwise.
182 */
183int hpsb_reset_bus(struct hpsb_host *host, int type)
184{
185 if (!host->in_bus_reset) {
186 host->driver->devctl(host, RESET_BUS, type);
187 return 0;
188 } else {
189 return 1;
190 }
191}
192
193/**
194 * hpsb_read_cycle_timer - read cycle timer register and system time
195 * @host: host whose isochronous cycle timer register is read
196 * @cycle_timer: address of bitfield to return the register contents
197 * @local_time: address to return the system time
198 *
199 * The format of * @cycle_timer, is described in OHCI 1.1 clause 5.13. This
200 * format is also read from non-OHCI controllers. * @local_time contains the
201 * system time in microseconds since the Epoch, read at the moment when the
202 * cycle timer was read.
203 *
204 * Return value: 0 for success or error number otherwise.
205 */
206int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
207 u64 *local_time)
208{
209 int ctr;
210 struct timeval tv;
211 unsigned long flags;
212
213 if (!host || !cycle_timer || !local_time)
214 return -EINVAL;
215
216 preempt_disable();
217 local_irq_save(flags);
218
219 ctr = host->driver->devctl(host, GET_CYCLE_COUNTER, 0);
220 if (ctr)
221 do_gettimeofday(&tv);
222
223 local_irq_restore(flags);
224 preempt_enable();
225
226 if (!ctr)
227 return -EIO;
228 *cycle_timer = ctr;
229 *local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
230 return 0;
231}
232
233/**
234 * hpsb_bus_reset - notify a bus reset to the core
235 *
236 * For host driver module usage. Safe to use in interrupt context, although
237 * quite complex; so you may want to run it in the bottom rather than top half.
238 *
239 * Returns 1 if bus reset already in progress, 0 otherwise.
240 */
241int hpsb_bus_reset(struct hpsb_host *host)
242{
243 if (host->in_bus_reset) {
244 HPSB_NOTICE("%s called while bus reset already in progress",
245 __func__);
246 return 1;
247 }
248
249 abort_requests(host);
250 host->in_bus_reset = 1;
251 host->irm_id = -1;
252 host->is_irm = 0;
253 host->busmgr_id = -1;
254 host->is_busmgr = 0;
255 host->is_cycmst = 0;
256 host->node_count = 0;
257 host->selfid_count = 0;
258
259 return 0;
260}
261
262
263/*
264 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
265 * case verification failed.
266 */
267static int check_selfids(struct hpsb_host *host)
268{
269 int nodeid = -1;
270 int rest_of_selfids = host->selfid_count;
271 struct selfid *sid = (struct selfid *)host->topology_map;
272 struct ext_selfid *esid;
273 int esid_seq = 23;
274
275 host->nodes_active = 0;
276
277 while (rest_of_selfids--) {
278 if (!sid->extended) {
279 nodeid++;
280 esid_seq = 0;
281
282 if (sid->phy_id != nodeid) {
283 HPSB_INFO("SelfIDs failed monotony check with "
284 "%d", sid->phy_id);
285 return 0;
286 }
287
288 if (sid->link_active) {
289 host->nodes_active++;
290 if (sid->contender)
291 host->irm_id = LOCAL_BUS | sid->phy_id;
292 }
293 } else {
294 esid = (struct ext_selfid *)sid;
295
296 if ((esid->phy_id != nodeid)
297 || (esid->seq_nr != esid_seq)) {
298 HPSB_INFO("SelfIDs failed monotony check with "
299 "%d/%d", esid->phy_id, esid->seq_nr);
300 return 0;
301 }
302 esid_seq++;
303 }
304 sid++;
305 }
306
307 esid = (struct ext_selfid *)(sid - 1);
308 while (esid->extended) {
309 if ((esid->porta == SELFID_PORT_PARENT) ||
310 (esid->portb == SELFID_PORT_PARENT) ||
311 (esid->portc == SELFID_PORT_PARENT) ||
312 (esid->portd == SELFID_PORT_PARENT) ||
313 (esid->porte == SELFID_PORT_PARENT) ||
314 (esid->portf == SELFID_PORT_PARENT) ||
315 (esid->portg == SELFID_PORT_PARENT) ||
316 (esid->porth == SELFID_PORT_PARENT)) {
317 HPSB_INFO("SelfIDs failed root check on "
318 "extended SelfID");
319 return 0;
320 }
321 esid--;
322 }
323
324 sid = (struct selfid *)esid;
325 if ((sid->port0 == SELFID_PORT_PARENT) ||
326 (sid->port1 == SELFID_PORT_PARENT) ||
327 (sid->port2 == SELFID_PORT_PARENT)) {
328 HPSB_INFO("SelfIDs failed root check");
329 return 0;
330 }
331
332 host->node_count = nodeid + 1;
333 return 1;
334}
335
336static void build_speed_map(struct hpsb_host *host, int nodecount)
337{
338 u8 cldcnt[nodecount];
339 u8 *map = host->speed_map;
340 u8 *speedcap = host->speed;
341 u8 local_link_speed = host->csr.lnk_spd;
342 struct selfid *sid;
343 struct ext_selfid *esid;
344 int i, j, n;
345
346 for (i = 0; i < (nodecount * 64); i += 64) {
347 for (j = 0; j < nodecount; j++) {
348 map[i+j] = IEEE1394_SPEED_MAX;
349 }
350 }
351
352 for (i = 0; i < nodecount; i++) {
353 cldcnt[i] = 0;
354 }
355
356 /* find direct children count and speed */
357 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
358 n = nodecount - 1;
359 (void *)sid >= (void *)host->topology_map; sid--) {
360 if (sid->extended) {
361 esid = (struct ext_selfid *)sid;
362
363 if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
364 if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
365 if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
366 if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
367 if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
368 if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
369 if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
370 if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
371 } else {
372 if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
373 if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
374 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
375
376 speedcap[n] = sid->speed;
377 if (speedcap[n] > local_link_speed)
378 speedcap[n] = local_link_speed;
379 n--;
380 }
381 }
382
383 /* set self mapping */
384 for (i = 0; i < nodecount; i++) {
385 map[64*i + i] = speedcap[i];
386 }
387
388 /* fix up direct children count to total children count;
389 * also fix up speedcaps for sibling and parent communication */
390 for (i = 1; i < nodecount; i++) {
391 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
392 cldcnt[i] += cldcnt[n];
393 speedcap[n] = min(speedcap[n], speedcap[i]);
394 n -= cldcnt[n] + 1;
395 }
396 }
397
398 for (n = 0; n < nodecount; n++) {
399 for (i = n - cldcnt[n]; i <= n; i++) {
400 for (j = 0; j < (n - cldcnt[n]); j++) {
401 map[j*64 + i] = map[i*64 + j] =
402 min(map[i*64 + j], speedcap[n]);
403 }
404 for (j = n + 1; j < nodecount; j++) {
405 map[j*64 + i] = map[i*64 + j] =
406 min(map[i*64 + j], speedcap[n]);
407 }
408 }
409 }
410
411 /* assume a maximum speed for 1394b PHYs, nodemgr will correct it */
412 if (local_link_speed > SELFID_SPEED_UNKNOWN)
413 for (i = 0; i < nodecount; i++)
414 if (speedcap[i] == SELFID_SPEED_UNKNOWN)
415 speedcap[i] = local_link_speed;
416}
417
418
419/**
420 * hpsb_selfid_received - hand over received selfid packet to the core
421 *
422 * For host driver module usage. Safe to use in interrupt context.
423 *
424 * The host driver should have done a successful complement check (second
425 * quadlet is complement of first) beforehand.
426 */
427void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
428{
429 if (host->in_bus_reset) {
430 HPSB_VERBOSE("Including SelfID 0x%x", sid);
431 host->topology_map[host->selfid_count++] = sid;
432 } else {
433 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
434 sid, NODEID_TO_BUS(host->node_id));
435 }
436}
437
438/**
439 * hpsb_selfid_complete - notify completion of SelfID stage to the core
440 *
441 * For host driver module usage. Safe to use in interrupt context, although
442 * quite complex; so you may want to run it in the bottom rather than top half.
443 *
444 * Notify completion of SelfID stage to the core and report new physical ID
445 * and whether host is root now.
446 */
447void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
448{
449 if (!host->in_bus_reset)
450 HPSB_NOTICE("SelfID completion called outside of bus reset!");
451
452 host->node_id = LOCAL_BUS | phyid;
453 host->is_root = isroot;
454
455 if (!check_selfids(host)) {
456 if (host->reset_retries++ < 20) {
457 /* selfid stage did not complete without error */
458 HPSB_NOTICE("Error in SelfID stage, resetting");
459 host->in_bus_reset = 0;
460 /* this should work from ohci1394 now... */
461 hpsb_reset_bus(host, LONG_RESET);
462 return;
463 } else {
464 HPSB_NOTICE("Stopping out-of-control reset loop");
465 HPSB_NOTICE("Warning - topology map and speed map will not be valid");
466 host->reset_retries = 0;
467 }
468 } else {
469 host->reset_retries = 0;
470 build_speed_map(host, host->node_count);
471 }
472
473 HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
474 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
475
476 /* irm_id is kept up to date by check_selfids() */
477 if (host->irm_id == host->node_id) {
478 host->is_irm = 1;
479 } else {
480 host->is_busmgr = 0;
481 host->is_irm = 0;
482 }
483
484 if (isroot) {
485 host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
486 host->is_cycmst = 1;
487 }
488 atomic_inc(&host->generation);
489 host->in_bus_reset = 0;
490 highlevel_host_reset(host);
491}
492
493static DEFINE_SPINLOCK(pending_packets_lock);
494
495/**
496 * hpsb_packet_sent - notify core of sending a packet
497 *
498 * For host driver module usage. Safe to call from within a transmit packet
499 * routine.
500 *
501 * Notify core of sending a packet. Ackcode is the ack code returned for async
502 * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
503 * for other cases (internal errors that don't justify a panic).
504 */
505void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
506 int ackcode)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&pending_packets_lock, flags);
511
512 packet->ack_code = ackcode;
513
514 if (packet->no_waiter || packet->state == hpsb_complete) {
515 /* if packet->no_waiter, must not have a tlabel allocated */
516 spin_unlock_irqrestore(&pending_packets_lock, flags);
517 hpsb_free_packet(packet);
518 return;
519 }
520
521 atomic_dec(&packet->refcnt); /* drop HC's reference */
522 /* here the packet must be on the host->pending_packets queue */
523
524 if (ackcode != ACK_PENDING || !packet->expect_response) {
525 packet->state = hpsb_complete;
526 list_del_init(&packet->queue);
527 spin_unlock_irqrestore(&pending_packets_lock, flags);
528 queue_packet_complete(packet);
529 return;
530 }
531
532 packet->state = hpsb_pending;
533 packet->sendtime = jiffies;
534
535 spin_unlock_irqrestore(&pending_packets_lock, flags);
536
537 mod_timer(&host->timeout, jiffies + host->timeout_interval);
538}
539
540/**
541 * hpsb_send_phy_config - transmit a PHY configuration packet on the bus
542 * @host: host that PHY config packet gets sent through
543 * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
544 * @gapcnt: gap count value to set (-1 = don't set gap count)
545 *
546 * This function sends a PHY config packet on the bus through the specified
547 * host.
548 *
549 * Return value: 0 for success or negative error number otherwise.
550 */
551int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
552{
553 struct hpsb_packet *packet;
554 quadlet_t d = 0;
555 int retval = 0;
556
557 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
558 (rootid == -1 && gapcnt == -1)) {
559 HPSB_DEBUG("Invalid Parameter: rootid = %d gapcnt = %d",
560 rootid, gapcnt);
561 return -EINVAL;
562 }
563
564 if (rootid != -1)
565 d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
566 if (gapcnt != -1)
567 d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
568
569 packet = hpsb_make_phypacket(host, d);
570 if (!packet)
571 return -ENOMEM;
572
573 packet->generation = get_hpsb_generation(host);
574 retval = hpsb_send_packet_and_wait(packet);
575 hpsb_free_packet(packet);
576
577 return retval;
578}
579
580/**
581 * hpsb_send_packet - transmit a packet on the bus
582 * @packet: packet to send
583 *
584 * The packet is sent through the host specified in the packet->host field.
585 * Before sending, the packet's transmit speed is automatically determined
586 * using the local speed map when it is an async, non-broadcast packet.
587 *
588 * Possibilities for failure are that host is either not initialized, in bus
589 * reset, the packet's generation number doesn't match the current generation
590 * number or the host reports a transmit error.
591 *
592 * Return value: 0 on success, negative errno on failure.
593 */
594int hpsb_send_packet(struct hpsb_packet *packet)
595{
596 struct hpsb_host *host = packet->host;
597
598 if (host->is_shutdown)
599 return -EINVAL;
600 if (host->in_bus_reset ||
601 (packet->generation != get_hpsb_generation(host)))
602 return -EAGAIN;
603
604 packet->state = hpsb_queued;
605
606 /* This just seems silly to me */
607 WARN_ON(packet->no_waiter && packet->expect_response);
608
609 if (!packet->no_waiter || packet->expect_response) {
610 unsigned long flags;
611
612 atomic_inc(&packet->refcnt);
613 /* Set the initial "sendtime" to 10 seconds from now, to
614 prevent premature expiry. If a packet takes more than
615 10 seconds to hit the wire, we have bigger problems :) */
616 packet->sendtime = jiffies + 10 * HZ;
617 spin_lock_irqsave(&pending_packets_lock, flags);
618 list_add_tail(&packet->queue, &host->pending_packets);
619 spin_unlock_irqrestore(&pending_packets_lock, flags);
620 }
621
622 if (packet->node_id == host->node_id) {
623 /* it is a local request, so handle it locally */
624
625 quadlet_t *data;
626 size_t size = packet->data_size + packet->header_size;
627
628 data = kmalloc(size, GFP_ATOMIC);
629 if (!data) {
630 HPSB_ERR("unable to allocate memory for concatenating header and data");
631 return -ENOMEM;
632 }
633
634 memcpy(data, packet->header, packet->header_size);
635
636 if (packet->data_size)
637 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
638
639 dump_packet("send packet local", packet->header, packet->header_size, -1);
640
641 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
642 hpsb_packet_received(host, data, size, 0);
643
644 kfree(data);
645
646 return 0;
647 }
648
649 if (packet->type == hpsb_async &&
650 NODEID_TO_NODE(packet->node_id) != ALL_NODES)
651 packet->speed_code =
652 host->speed[NODEID_TO_NODE(packet->node_id)];
653
654 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
655
656 return host->driver->transmit_packet(host, packet);
657}
658
659/* We could just use complete() directly as the packet complete
660 * callback, but this is more typesafe, in the sense that we get a
661 * compiler error if the prototype for complete() changes. */
662
663static void complete_packet(void *data)
664{
665 complete((struct completion *) data);
666}
667
668/**
669 * hpsb_send_packet_and_wait - enqueue packet, block until transaction completes
670 * @packet: packet to send
671 *
672 * Return value: 0 on success, negative errno on failure.
673 */
674int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
675{
676 struct completion done;
677 int retval;
678
679 init_completion(&done);
680 hpsb_set_packet_complete_task(packet, complete_packet, &done);
681 retval = hpsb_send_packet(packet);
682 if (retval == 0)
683 wait_for_completion(&done);
684
685 return retval;
686}
687
688static void send_packet_nocare(struct hpsb_packet *packet)
689{
690 if (hpsb_send_packet(packet) < 0) {
691 hpsb_free_packet(packet);
692 }
693}
694
695static size_t packet_size_to_data_size(size_t packet_size, size_t header_size,
696 size_t buffer_size, int tcode)
697{
698 size_t ret = packet_size <= header_size ? 0 : packet_size - header_size;
699
700 if (unlikely(ret > buffer_size))
701 ret = buffer_size;
702
703 if (unlikely(ret + header_size != packet_size))
704 HPSB_ERR("unexpected packet size %zd (tcode %d), bug?",
705 packet_size, tcode);
706 return ret;
707}
708
709static void handle_packet_response(struct hpsb_host *host, int tcode,
710 quadlet_t *data, size_t size)
711{
712 struct hpsb_packet *packet;
713 int tlabel = (data[0] >> 10) & 0x3f;
714 size_t header_size;
715 unsigned long flags;
716
717 spin_lock_irqsave(&pending_packets_lock, flags);
718
719 list_for_each_entry(packet, &host->pending_packets, queue)
720 if (packet->tlabel == tlabel &&
721 packet->node_id == (data[1] >> 16))
722 goto found;
723
724 spin_unlock_irqrestore(&pending_packets_lock, flags);
725 HPSB_DEBUG("unsolicited response packet received - %s",
726 "no tlabel match");
727 dump_packet("contents", data, 16, -1);
728 return;
729
730found:
731 switch (packet->tcode) {
732 case TCODE_WRITEQ:
733 case TCODE_WRITEB:
734 if (unlikely(tcode != TCODE_WRITE_RESPONSE))
735 break;
736 header_size = 12;
737 size = 0;
738 goto dequeue;
739
740 case TCODE_READQ:
741 if (unlikely(tcode != TCODE_READQ_RESPONSE))
742 break;
743 header_size = 16;
744 size = 0;
745 goto dequeue;
746
747 case TCODE_READB:
748 if (unlikely(tcode != TCODE_READB_RESPONSE))
749 break;
750 header_size = 16;
751 size = packet_size_to_data_size(size, header_size,
752 packet->allocated_data_size,
753 tcode);
754 goto dequeue;
755
756 case TCODE_LOCK_REQUEST:
757 if (unlikely(tcode != TCODE_LOCK_RESPONSE))
758 break;
759 header_size = 16;
760 size = packet_size_to_data_size(min(size, (size_t)(16 + 8)),
761 header_size,
762 packet->allocated_data_size,
763 tcode);
764 goto dequeue;
765 }
766
767 spin_unlock_irqrestore(&pending_packets_lock, flags);
768 HPSB_DEBUG("unsolicited response packet received - %s",
769 "tcode mismatch");
770 dump_packet("contents", data, 16, -1);
771 return;
772
773dequeue:
774 list_del_init(&packet->queue);
775 spin_unlock_irqrestore(&pending_packets_lock, flags);
776
777 if (packet->state == hpsb_queued) {
778 packet->sendtime = jiffies;
779 packet->ack_code = ACK_PENDING;
780 }
781 packet->state = hpsb_complete;
782
783 memcpy(packet->header, data, header_size);
784 if (size)
785 memcpy(packet->data, data + 4, size);
786
787 queue_packet_complete(packet);
788}
789
790
791static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
792 quadlet_t *data, size_t dsize)
793{
794 struct hpsb_packet *p;
795
796 p = hpsb_alloc_packet(dsize);
797 if (unlikely(p == NULL)) {
798 /* FIXME - send data_error response */
799 HPSB_ERR("out of memory, cannot send response packet");
800 return NULL;
801 }
802
803 p->type = hpsb_async;
804 p->state = hpsb_unused;
805 p->host = host;
806 p->node_id = data[1] >> 16;
807 p->tlabel = (data[0] >> 10) & 0x3f;
808 p->no_waiter = 1;
809
810 p->generation = get_hpsb_generation(host);
811
812 if (dsize % 4)
813 p->data[dsize / 4] = 0;
814
815 return p;
816}
817
818#define PREP_ASYNC_HEAD_RCODE(tc) \
819 packet->tcode = tc; \
820 packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
821 | (1 << 8) | (tc << 4); \
822 packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
823 packet->header[2] = 0
824
825static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
826 quadlet_t data)
827{
828 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
829 packet->header[3] = data;
830 packet->header_size = 16;
831 packet->data_size = 0;
832}
833
834static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
835 int length)
836{
837 if (rcode != RCODE_COMPLETE)
838 length = 0;
839
840 PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
841 packet->header[3] = length << 16;
842 packet->header_size = 16;
843 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
844}
845
846static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
847{
848 PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
849 packet->header_size = 12;
850 packet->data_size = 0;
851}
852
853static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
854 int length)
855{
856 if (rcode != RCODE_COMPLETE)
857 length = 0;
858
859 PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
860 packet->header[3] = (length << 16) | extcode;
861 packet->header_size = 16;
862 packet->data_size = length;
863}
864
865static void handle_incoming_packet(struct hpsb_host *host, int tcode,
866 quadlet_t *data, size_t size,
867 int write_acked)
868{
869 struct hpsb_packet *packet;
870 int length, rcode, extcode;
871 quadlet_t buffer;
872 nodeid_t source = data[1] >> 16;
873 nodeid_t dest = data[0] >> 16;
874 u16 flags = (u16) data[0];
875 u64 addr;
876
877 /* FIXME?
878 * Out-of-bounds lengths are left for highlevel_read|write to cap. */
879
880 switch (tcode) {
881 case TCODE_WRITEQ:
882 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
883 rcode = highlevel_write(host, source, dest, data + 3,
884 addr, 4, flags);
885 goto handle_write_request;
886
887 case TCODE_WRITEB:
888 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
889 rcode = highlevel_write(host, source, dest, data + 4,
890 addr, data[3] >> 16, flags);
891handle_write_request:
892 if (rcode < 0 || write_acked ||
893 NODEID_TO_NODE(data[0] >> 16) == NODE_MASK)
894 return;
895 /* not a broadcast write, reply */
896 packet = create_reply_packet(host, data, 0);
897 if (packet) {
898 fill_async_write_resp(packet, rcode);
899 send_packet_nocare(packet);
900 }
901 return;
902
903 case TCODE_READQ:
904 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
905 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
906 if (rcode < 0)
907 return;
908
909 packet = create_reply_packet(host, data, 0);
910 if (packet) {
911 fill_async_readquad_resp(packet, rcode, buffer);
912 send_packet_nocare(packet);
913 }
914 return;
915
916 case TCODE_READB:
917 length = data[3] >> 16;
918 packet = create_reply_packet(host, data, length);
919 if (!packet)
920 return;
921
922 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
923 rcode = highlevel_read(host, source, packet->data, addr,
924 length, flags);
925 if (rcode < 0) {
926 hpsb_free_packet(packet);
927 return;
928 }
929 fill_async_readblock_resp(packet, rcode, length);
930 send_packet_nocare(packet);
931 return;
932
933 case TCODE_LOCK_REQUEST:
934 length = data[3] >> 16;
935 extcode = data[3] & 0xffff;
936 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
937
938 packet = create_reply_packet(host, data, 8);
939 if (!packet)
940 return;
941
942 if (extcode == 0 || extcode >= 7) {
943 /* let switch default handle error */
944 length = 0;
945 }
946
947 switch (length) {
948 case 4:
949 rcode = highlevel_lock(host, source, packet->data, addr,
950 data[4], 0, extcode, flags);
951 fill_async_lock_resp(packet, rcode, extcode, 4);
952 break;
953 case 8:
954 if (extcode != EXTCODE_FETCH_ADD &&
955 extcode != EXTCODE_LITTLE_ADD) {
956 rcode = highlevel_lock(host, source,
957 packet->data, addr,
958 data[5], data[4],
959 extcode, flags);
960 fill_async_lock_resp(packet, rcode, extcode, 4);
961 } else {
962 rcode = highlevel_lock64(host, source,
963 (octlet_t *)packet->data, addr,
964 *(octlet_t *)(data + 4), 0ULL,
965 extcode, flags);
966 fill_async_lock_resp(packet, rcode, extcode, 8);
967 }
968 break;
969 case 16:
970 rcode = highlevel_lock64(host, source,
971 (octlet_t *)packet->data, addr,
972 *(octlet_t *)(data + 6),
973 *(octlet_t *)(data + 4),
974 extcode, flags);
975 fill_async_lock_resp(packet, rcode, extcode, 8);
976 break;
977 default:
978 rcode = RCODE_TYPE_ERROR;
979 fill_async_lock_resp(packet, rcode, extcode, 0);
980 }
981
982 if (rcode < 0)
983 hpsb_free_packet(packet);
984 else
985 send_packet_nocare(packet);
986 return;
987 }
988}
989
990/**
991 * hpsb_packet_received - hand over received packet to the core
992 *
993 * For host driver module usage.
994 *
995 * The contents of data are expected to be the full packet but with the CRCs
996 * left out (data block follows header immediately), with the header (i.e. the
997 * first four quadlets) in machine byte order and the data block in big endian.
998 * *@data can be safely overwritten after this call.
999 *
1000 * If the packet is a write request, @write_acked is to be set to true if it was
1001 * ack_complete'd already, false otherwise. This argument is ignored for any
1002 * other packet type.
1003 */
1004void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
1005 int write_acked)
1006{
1007 int tcode;
1008
1009 if (unlikely(host->in_bus_reset)) {
1010 HPSB_DEBUG("received packet during reset; ignoring");
1011 return;
1012 }
1013
1014 dump_packet("received packet", data, size, -1);
1015
1016 tcode = (data[0] >> 4) & 0xf;
1017
1018 switch (tcode) {
1019 case TCODE_WRITE_RESPONSE:
1020 case TCODE_READQ_RESPONSE:
1021 case TCODE_READB_RESPONSE:
1022 case TCODE_LOCK_RESPONSE:
1023 handle_packet_response(host, tcode, data, size);
1024 break;
1025
1026 case TCODE_WRITEQ:
1027 case TCODE_WRITEB:
1028 case TCODE_READQ:
1029 case TCODE_READB:
1030 case TCODE_LOCK_REQUEST:
1031 handle_incoming_packet(host, tcode, data, size, write_acked);
1032 break;
1033
1034 case TCODE_CYCLE_START:
1035 /* simply ignore this packet if it is passed on */
1036 break;
1037
1038 default:
1039 HPSB_DEBUG("received packet with bogus transaction code %d",
1040 tcode);
1041 break;
1042 }
1043}
1044
1045static void abort_requests(struct hpsb_host *host)
1046{
1047 struct hpsb_packet *packet, *p;
1048 struct list_head tmp;
1049 unsigned long flags;
1050
1051 host->driver->devctl(host, CANCEL_REQUESTS, 0);
1052
1053 INIT_LIST_HEAD(&tmp);
1054 spin_lock_irqsave(&pending_packets_lock, flags);
1055 list_splice_init(&host->pending_packets, &tmp);
1056 spin_unlock_irqrestore(&pending_packets_lock, flags);
1057
1058 list_for_each_entry_safe(packet, p, &tmp, queue) {
1059 list_del_init(&packet->queue);
1060 packet->state = hpsb_complete;
1061 packet->ack_code = ACKX_ABORTED;
1062 queue_packet_complete(packet);
1063 }
1064}
1065
1066void abort_timedouts(unsigned long __opaque)
1067{
1068 struct hpsb_host *host = (struct hpsb_host *)__opaque;
1069 struct hpsb_packet *packet, *p;
1070 struct list_head tmp;
1071 unsigned long flags, expire, j;
1072
1073 spin_lock_irqsave(&host->csr.lock, flags);
1074 expire = host->csr.expire;
1075 spin_unlock_irqrestore(&host->csr.lock, flags);
1076
1077 j = jiffies;
1078 INIT_LIST_HEAD(&tmp);
1079 spin_lock_irqsave(&pending_packets_lock, flags);
1080
1081 list_for_each_entry_safe(packet, p, &host->pending_packets, queue) {
1082 if (time_before(packet->sendtime + expire, j))
1083 list_move_tail(&packet->queue, &tmp);
1084 else
1085 /* Since packets are added to the tail, the oldest
1086 * ones are first, always. When we get to one that
1087 * isn't timed out, the rest aren't either. */
1088 break;
1089 }
1090 if (!list_empty(&host->pending_packets))
1091 mod_timer(&host->timeout, j + host->timeout_interval);
1092
1093 spin_unlock_irqrestore(&pending_packets_lock, flags);
1094
1095 list_for_each_entry_safe(packet, p, &tmp, queue) {
1096 list_del_init(&packet->queue);
1097 packet->state = hpsb_complete;
1098 packet->ack_code = ACKX_TIMEOUT;
1099 queue_packet_complete(packet);
1100 }
1101}
1102
1103static struct task_struct *khpsbpkt_thread;
1104static LIST_HEAD(hpsbpkt_queue);
1105
1106static void queue_packet_complete(struct hpsb_packet *packet)
1107{
1108 unsigned long flags;
1109
1110 if (packet->no_waiter) {
1111 hpsb_free_packet(packet);
1112 return;
1113 }
1114 if (packet->complete_routine != NULL) {
1115 spin_lock_irqsave(&pending_packets_lock, flags);
1116 list_add_tail(&packet->queue, &hpsbpkt_queue);
1117 spin_unlock_irqrestore(&pending_packets_lock, flags);
1118 wake_up_process(khpsbpkt_thread);
1119 }
1120 return;
1121}
1122
1123/*
1124 * Kernel thread which handles packets that are completed. This way the
1125 * packet's "complete" function is asynchronously run in process context.
1126 * Only packets which have a "complete" function may be sent here.
1127 */
1128static int hpsbpkt_thread(void *__hi)
1129{
1130 struct hpsb_packet *packet, *p;
1131 struct list_head tmp;
1132 int may_schedule;
1133
1134 while (!kthread_should_stop()) {
1135
1136 INIT_LIST_HEAD(&tmp);
1137 spin_lock_irq(&pending_packets_lock);
1138 list_splice_init(&hpsbpkt_queue, &tmp);
1139 spin_unlock_irq(&pending_packets_lock);
1140
1141 list_for_each_entry_safe(packet, p, &tmp, queue) {
1142 list_del_init(&packet->queue);
1143 packet->complete_routine(packet->complete_data);
1144 }
1145
1146 set_current_state(TASK_INTERRUPTIBLE);
1147 spin_lock_irq(&pending_packets_lock);
1148 may_schedule = list_empty(&hpsbpkt_queue);
1149 spin_unlock_irq(&pending_packets_lock);
1150 if (may_schedule)
1151 schedule();
1152 __set_current_state(TASK_RUNNING);
1153 }
1154 return 0;
1155}
1156
1157static int __init ieee1394_init(void)
1158{
1159 int i, ret;
1160
1161 /* non-fatal error */
1162 if (hpsb_init_config_roms()) {
1163 HPSB_ERR("Failed to initialize some config rom entries.\n");
1164 HPSB_ERR("Some features may not be available\n");
1165 }
1166
1167 khpsbpkt_thread = kthread_run(hpsbpkt_thread, NULL, "khpsbpkt");
1168 if (IS_ERR(khpsbpkt_thread)) {
1169 HPSB_ERR("Failed to start hpsbpkt thread!\n");
1170 ret = PTR_ERR(khpsbpkt_thread);
1171 goto exit_cleanup_config_roms;
1172 }
1173
1174 if (register_chrdev_region(IEEE1394_CORE_DEV, 256, "ieee1394")) {
1175 HPSB_ERR("unable to register character device major %d!\n", IEEE1394_MAJOR);
1176 ret = -ENODEV;
1177 goto exit_release_kernel_thread;
1178 }
1179
1180 ret = bus_register(&ieee1394_bus_type);
1181 if (ret < 0) {
1182 HPSB_INFO("bus register failed");
1183 goto release_chrdev;
1184 }
1185
1186 for (i = 0; fw_bus_attrs[i]; i++) {
1187 ret = bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1188 if (ret < 0) {
1189 while (i >= 0) {
1190 bus_remove_file(&ieee1394_bus_type,
1191 fw_bus_attrs[i--]);
1192 }
1193 bus_unregister(&ieee1394_bus_type);
1194 goto release_chrdev;
1195 }
1196 }
1197
1198 ret = class_register(&hpsb_host_class);
1199 if (ret < 0)
1200 goto release_all_bus;
1201
1202 hpsb_protocol_class = class_create(THIS_MODULE, "ieee1394_protocol");
1203 if (IS_ERR(hpsb_protocol_class)) {
1204 ret = PTR_ERR(hpsb_protocol_class);
1205 goto release_class_host;
1206 }
1207
1208 ret = init_csr();
1209 if (ret) {
1210 HPSB_INFO("init csr failed");
1211 ret = -ENOMEM;
1212 goto release_class_protocol;
1213 }
1214
1215 if (disable_nodemgr) {
1216 HPSB_INFO("nodemgr and IRM functionality disabled");
1217 /* We shouldn't contend for IRM with nodemgr disabled, since
1218 nodemgr implements functionality required of ieee1394a-2000
1219 IRMs */
1220 hpsb_disable_irm = 1;
1221
1222 return 0;
1223 }
1224
1225 if (hpsb_disable_irm) {
1226 HPSB_INFO("IRM functionality disabled");
1227 }
1228
1229 ret = init_ieee1394_nodemgr();
1230 if (ret < 0) {
1231 HPSB_INFO("init nodemgr failed");
1232 goto cleanup_csr;
1233 }
1234
1235 return 0;
1236
1237cleanup_csr:
1238 cleanup_csr();
1239release_class_protocol:
1240 class_destroy(hpsb_protocol_class);
1241release_class_host:
1242 class_unregister(&hpsb_host_class);
1243release_all_bus:
1244 for (i = 0; fw_bus_attrs[i]; i++)
1245 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1246 bus_unregister(&ieee1394_bus_type);
1247release_chrdev:
1248 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1249exit_release_kernel_thread:
1250 kthread_stop(khpsbpkt_thread);
1251exit_cleanup_config_roms:
1252 hpsb_cleanup_config_roms();
1253 return ret;
1254}
1255
1256static void __exit ieee1394_cleanup(void)
1257{
1258 int i;
1259
1260 if (!disable_nodemgr)
1261 cleanup_ieee1394_nodemgr();
1262
1263 cleanup_csr();
1264
1265 class_destroy(hpsb_protocol_class);
1266 class_unregister(&hpsb_host_class);
1267 for (i = 0; fw_bus_attrs[i]; i++)
1268 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1269 bus_unregister(&ieee1394_bus_type);
1270
1271 kthread_stop(khpsbpkt_thread);
1272
1273 hpsb_cleanup_config_roms();
1274
1275 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1276}
1277
1278fs_initcall(ieee1394_init);
1279module_exit(ieee1394_cleanup);
1280
1281/* Exported symbols */
1282
1283/** hosts.c **/
1284EXPORT_SYMBOL(hpsb_alloc_host);
1285EXPORT_SYMBOL(hpsb_add_host);
1286EXPORT_SYMBOL(hpsb_resume_host);
1287EXPORT_SYMBOL(hpsb_remove_host);
1288EXPORT_SYMBOL(hpsb_update_config_rom_image);
1289
1290/** ieee1394_core.c **/
1291EXPORT_SYMBOL(hpsb_speedto_str);
1292EXPORT_SYMBOL(hpsb_protocol_class);
1293EXPORT_SYMBOL(hpsb_set_packet_complete_task);
1294EXPORT_SYMBOL(hpsb_alloc_packet);
1295EXPORT_SYMBOL(hpsb_free_packet);
1296EXPORT_SYMBOL(hpsb_send_packet);
1297EXPORT_SYMBOL(hpsb_reset_bus);
1298EXPORT_SYMBOL(hpsb_read_cycle_timer);
1299EXPORT_SYMBOL(hpsb_bus_reset);
1300EXPORT_SYMBOL(hpsb_selfid_received);
1301EXPORT_SYMBOL(hpsb_selfid_complete);
1302EXPORT_SYMBOL(hpsb_packet_sent);
1303EXPORT_SYMBOL(hpsb_packet_received);
1304EXPORT_SYMBOL_GPL(hpsb_disable_irm);
1305
1306/** ieee1394_transactions.c **/
1307EXPORT_SYMBOL(hpsb_get_tlabel);
1308EXPORT_SYMBOL(hpsb_free_tlabel);
1309EXPORT_SYMBOL(hpsb_make_readpacket);
1310EXPORT_SYMBOL(hpsb_make_writepacket);
1311EXPORT_SYMBOL(hpsb_make_streampacket);
1312EXPORT_SYMBOL(hpsb_make_lockpacket);
1313EXPORT_SYMBOL(hpsb_make_lock64packet);
1314EXPORT_SYMBOL(hpsb_make_phypacket);
1315EXPORT_SYMBOL(hpsb_read);
1316EXPORT_SYMBOL(hpsb_write);
1317EXPORT_SYMBOL(hpsb_lock);
1318EXPORT_SYMBOL(hpsb_packet_success);
1319
1320/** highlevel.c **/
1321EXPORT_SYMBOL(hpsb_register_highlevel);
1322EXPORT_SYMBOL(hpsb_unregister_highlevel);
1323EXPORT_SYMBOL(hpsb_register_addrspace);
1324EXPORT_SYMBOL(hpsb_unregister_addrspace);
1325EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace);
1326EXPORT_SYMBOL(hpsb_get_hostinfo);
1327EXPORT_SYMBOL(hpsb_create_hostinfo);
1328EXPORT_SYMBOL(hpsb_destroy_hostinfo);
1329EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1330EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1331EXPORT_SYMBOL(hpsb_set_hostinfo);
1332
1333/** nodemgr.c **/
1334EXPORT_SYMBOL(hpsb_node_fill_packet);
1335EXPORT_SYMBOL(hpsb_node_write);
1336EXPORT_SYMBOL(__hpsb_register_protocol);
1337EXPORT_SYMBOL(hpsb_unregister_protocol);
1338
1339/** csr.c **/
1340EXPORT_SYMBOL(hpsb_update_config_rom);
1341
1342/** dma.c **/
1343EXPORT_SYMBOL(dma_prog_region_init);
1344EXPORT_SYMBOL(dma_prog_region_alloc);
1345EXPORT_SYMBOL(dma_prog_region_free);
1346EXPORT_SYMBOL(dma_region_init);
1347EXPORT_SYMBOL(dma_region_alloc);
1348EXPORT_SYMBOL(dma_region_free);
1349EXPORT_SYMBOL(dma_region_sync_for_cpu);
1350EXPORT_SYMBOL(dma_region_sync_for_device);
1351EXPORT_SYMBOL(dma_region_mmap);
1352EXPORT_SYMBOL(dma_region_offset_to_bus);
1353
1354/** iso.c **/
1355EXPORT_SYMBOL(hpsb_iso_xmit_init);
1356EXPORT_SYMBOL(hpsb_iso_recv_init);
1357EXPORT_SYMBOL(hpsb_iso_xmit_start);
1358EXPORT_SYMBOL(hpsb_iso_recv_start);
1359EXPORT_SYMBOL(hpsb_iso_recv_listen_channel);
1360EXPORT_SYMBOL(hpsb_iso_recv_unlisten_channel);
1361EXPORT_SYMBOL(hpsb_iso_recv_set_channel_mask);
1362EXPORT_SYMBOL(hpsb_iso_stop);
1363EXPORT_SYMBOL(hpsb_iso_shutdown);
1364EXPORT_SYMBOL(hpsb_iso_xmit_queue_packet);
1365EXPORT_SYMBOL(hpsb_iso_xmit_sync);
1366EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
1367EXPORT_SYMBOL(hpsb_iso_n_ready);
1368EXPORT_SYMBOL(hpsb_iso_packet_sent);
1369EXPORT_SYMBOL(hpsb_iso_packet_received);
1370EXPORT_SYMBOL(hpsb_iso_wake);
1371EXPORT_SYMBOL(hpsb_iso_recv_flush);
1372
1373/** csr1212.c **/
1374EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
1375EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
1376EXPORT_SYMBOL(csr1212_get_keyval);
1377EXPORT_SYMBOL(csr1212_new_directory);
1378EXPORT_SYMBOL(csr1212_parse_keyval);
1379EXPORT_SYMBOL(csr1212_read);
1380EXPORT_SYMBOL(csr1212_release_keyval);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
deleted file mode 100644
index 28b9f58bafd2..000000000000
--- a/drivers/ieee1394/ieee1394_core.h
+++ /dev/null
@@ -1,172 +0,0 @@
1#ifndef _IEEE1394_CORE_H
2#define _IEEE1394_CORE_H
3
4#include <linux/device.h>
5#include <linux/fs.h>
6#include <linux/list.h>
7#include <linux/types.h>
8#include <linux/cdev.h>
9#include <asm/atomic.h>
10
11#include "hosts.h"
12#include "ieee1394_types.h"
13
14struct hpsb_packet {
15 /* This struct is basically read-only for hosts with the exception of
16 * the data buffer contents and driver_list. */
17
18 /* This can be used for host driver internal linking.
19 *
20 * NOTE: This must be left in init state when the driver is done
21 * with it (e.g. by using list_del_init()), since the core does
22 * some sanity checks to make sure the packet is not on a
23 * driver_list when free'ing it. */
24 struct list_head driver_list;
25
26 nodeid_t node_id;
27
28 /* hpsb_raw = send as-is, do not CRC (but still byte-swap it) */
29 enum { hpsb_async, hpsb_raw } __attribute__((packed)) type;
30
31 /* Okay, this is core internal and a no care for hosts.
32 * queued = queued for sending
33 * pending = sent, waiting for response
34 * complete = processing completed, successful or not
35 */
36 enum {
37 hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
38 } __attribute__((packed)) state;
39
40 /* These are core-internal. */
41 signed char tlabel;
42 signed char ack_code;
43 unsigned char tcode;
44
45 unsigned expect_response:1;
46 unsigned no_waiter:1;
47
48 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
49 unsigned speed_code:2;
50
51 struct hpsb_host *host;
52 unsigned int generation;
53
54 atomic_t refcnt;
55 struct list_head queue;
56
57 /* Function (and possible data to pass to it) to call when this
58 * packet is completed. */
59 void (*complete_routine)(void *);
60 void *complete_data;
61
62 /* Store jiffies for implementing bus timeouts. */
63 unsigned long sendtime;
64
65 /* Core-internal. */
66 size_t allocated_data_size; /* as allocated */
67
68 /* Sizes are in bytes. To be set by caller of hpsb_alloc_packet. */
69 size_t data_size; /* as filled in */
70 size_t header_size; /* as filled in, not counting the CRC */
71
72 /* Buffers */
73 quadlet_t *data; /* can be DMA-mapped */
74 quadlet_t header[5];
75 quadlet_t embedded_data[0]; /* keep as last member */
76};
77
78void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
79 void (*routine)(void *), void *data);
80static inline struct hpsb_packet *driver_packet(struct list_head *l)
81{
82 return list_entry(l, struct hpsb_packet, driver_list);
83}
84void abort_timedouts(unsigned long __opaque);
85struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
86void hpsb_free_packet(struct hpsb_packet *packet);
87
88/**
89 * get_hpsb_generation - generation counter for the complete 1394 subsystem
90 *
91 * Generation gets incremented on every change in the subsystem (notably on bus
92 * resets). Use the functions, not the variable.
93 */
94static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
95{
96 return atomic_read(&host->generation);
97}
98
99int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
100int hpsb_send_packet(struct hpsb_packet *packet);
101int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
102int hpsb_reset_bus(struct hpsb_host *host, int type);
103int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
104 u64 *local_time);
105
106int hpsb_bus_reset(struct hpsb_host *host);
107void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
108void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
109void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
110 int ackcode);
111void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
112 int write_acked);
113
114/*
115 * CHARACTER DEVICE DISPATCHING
116 *
117 * All ieee1394 character device drivers share the same major number
118 * (major 171). The 256 minor numbers are allocated to the various
119 * task-specific interfaces (raw1394, video1394, dv1394, etc) in
120 * blocks of 16.
121 *
122 * The core ieee1394.o module allocates the device number region
123 * 171:0-255, the various drivers must then cdev_add() their cdev
124 * objects to handle their respective sub-regions.
125 *
126 * Minor device number block allocations:
127 *
128 * Block 0 ( 0- 15) raw1394
129 * Block 1 ( 16- 31) video1394
130 * Block 2 ( 32- 47) dv1394
131 *
132 * Blocks 3-14 free for future allocation
133 *
134 * Block 15 (240-255) reserved for drivers under development, etc.
135 */
136
137#define IEEE1394_MAJOR 171
138
139#define IEEE1394_MINOR_BLOCK_RAW1394 0
140#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
141#define IEEE1394_MINOR_BLOCK_DV1394 2
142#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
143
144#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
145#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, \
146 IEEE1394_MINOR_BLOCK_RAW1394 * 16)
147#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, \
148 IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
149#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, \
150 IEEE1394_MINOR_BLOCK_DV1394 * 16)
151#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \
152 IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
153
154/**
155 * ieee1394_file_to_instance - get the index within a minor number block
156 */
157static inline unsigned char ieee1394_file_to_instance(struct file *file)
158{
159 int idx = cdev_index(file->f_path.dentry->d_inode);
160 if (idx < 0)
161 idx = 0;
162 return idx;
163}
164
165extern int hpsb_disable_irm;
166
167/* Our sysfs bus entry */
168extern struct bus_type ieee1394_bus_type;
169extern struct class hpsb_host_class;
170extern struct class *hpsb_protocol_class;
171
172#endif /* _IEEE1394_CORE_H */
diff --git a/drivers/ieee1394/ieee1394_hotplug.h b/drivers/ieee1394/ieee1394_hotplug.h
deleted file mode 100644
index dd5500ed8322..000000000000
--- a/drivers/ieee1394/ieee1394_hotplug.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _IEEE1394_HOTPLUG_H
2#define _IEEE1394_HOTPLUG_H
3
4/* Unit spec id and sw version entry for some protocols */
5#define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D
6#define AVC_SW_VERSION_ENTRY 0x00010001
7#define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D
8#define CAMERA_SW_VERSION_ENTRY 0x00000100
9
10/* /include/linux/mod_devicetable.h defines:
11 * IEEE1394_MATCH_VENDOR_ID
12 * IEEE1394_MATCH_MODEL_ID
13 * IEEE1394_MATCH_SPECIFIER_ID
14 * IEEE1394_MATCH_VERSION
15 * struct ieee1394_device_id
16 */
17#include <linux/mod_devicetable.h>
18
19#endif /* _IEEE1394_HOTPLUG_H */
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
deleted file mode 100644
index 675b3135d5f1..000000000000
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ /dev/null
@@ -1,595 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Transaction support.
5 *
6 * Copyright (C) 1999 Andreas E. Bombe
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#include <linux/bitops.h>
13#include <linux/compiler.h>
14#include <linux/hardirq.h>
15#include <linux/spinlock.h>
16#include <linux/string.h>
17#include <linux/sched.h> /* because linux/wait.h is broken if CONFIG_SMP=n */
18#include <linux/wait.h>
19
20#include <asm/bug.h>
21#include <asm/errno.h>
22#include <asm/system.h>
23
24#include "ieee1394.h"
25#include "ieee1394_types.h"
26#include "hosts.h"
27#include "ieee1394_core.h"
28#include "ieee1394_transactions.h"
29
30#define PREP_ASYNC_HEAD_ADDRESS(tc) \
31 packet->tcode = tc; \
32 packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
33 | (1 << 8) | (tc << 4); \
34 packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
35 packet->header[2] = addr & 0xffffffff
36
37#ifndef HPSB_DEBUG_TLABELS
38static
39#endif
40DEFINE_SPINLOCK(hpsb_tlabel_lock);
41
42static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
43
44static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
45{
46 PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
47 packet->header_size = 12;
48 packet->data_size = 0;
49 packet->expect_response = 1;
50}
51
52static void fill_async_readblock(struct hpsb_packet *packet, u64 addr,
53 int length)
54{
55 PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
56 packet->header[3] = length << 16;
57 packet->header_size = 16;
58 packet->data_size = 0;
59 packet->expect_response = 1;
60}
61
62static void fill_async_writequad(struct hpsb_packet *packet, u64 addr,
63 quadlet_t data)
64{
65 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
66 packet->header[3] = data;
67 packet->header_size = 16;
68 packet->data_size = 0;
69 packet->expect_response = 1;
70}
71
72static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr,
73 int length)
74{
75 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
76 packet->header[3] = length << 16;
77 packet->header_size = 16;
78 packet->expect_response = 1;
79 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
80}
81
82static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
83 int length)
84{
85 PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
86 packet->header[3] = (length << 16) | extcode;
87 packet->header_size = 16;
88 packet->data_size = length;
89 packet->expect_response = 1;
90}
91
92static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
93{
94 packet->header[0] = data;
95 packet->header[1] = ~data;
96 packet->header_size = 8;
97 packet->data_size = 0;
98 packet->expect_response = 0;
99 packet->type = hpsb_raw; /* No CRC added */
100 packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
101}
102
103static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
104 int channel, int tag, int sync)
105{
106 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
107 | (TCODE_STREAM_DATA << 4) | sync;
108
109 packet->header_size = 4;
110 packet->data_size = length;
111 packet->type = hpsb_async;
112 packet->tcode = TCODE_ISO_DATA;
113}
114
115/* same as hpsb_get_tlabel, except that it returns immediately */
116static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet)
117{
118 unsigned long flags, *tp;
119 u8 *next;
120 int tlabel, n = NODEID_TO_NODE(packet->node_id);
121
122 /* Broadcast transactions are complete once the request has been sent.
123 * Use the same transaction label for all broadcast transactions. */
124 if (unlikely(n == ALL_NODES)) {
125 packet->tlabel = 0;
126 return 0;
127 }
128 tp = packet->host->tl_pool[n].map;
129 next = &packet->host->next_tl[n];
130
131 spin_lock_irqsave(&hpsb_tlabel_lock, flags);
132 tlabel = find_next_zero_bit(tp, 64, *next);
133 if (tlabel > 63)
134 tlabel = find_first_zero_bit(tp, 64);
135 if (tlabel > 63) {
136 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
137 return -EAGAIN;
138 }
139 __set_bit(tlabel, tp);
140 *next = (tlabel + 1) & 63;
141 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
142
143 packet->tlabel = tlabel;
144 return 0;
145}
146
147/**
148 * hpsb_get_tlabel - allocate a transaction label
149 * @packet: the packet whose tlabel and tl_pool we set
150 *
151 * Every asynchronous transaction on the 1394 bus needs a transaction
152 * label to match the response to the request. This label has to be
153 * different from any other transaction label in an outstanding request to
154 * the same node to make matching possible without ambiguity.
155 *
156 * There are 64 different tlabels, so an allocated tlabel has to be freed
157 * with hpsb_free_tlabel() after the transaction is complete (unless it's
158 * reused again for the same target node).
159 *
160 * Return value: Zero on success, otherwise non-zero. A non-zero return
161 * generally means there are no available tlabels. If this is called out
162 * of interrupt or atomic context, then it will sleep until can return a
163 * tlabel or a signal is received.
164 */
165int hpsb_get_tlabel(struct hpsb_packet *packet)
166{
167 if (irqs_disabled() || in_atomic())
168 return hpsb_get_tlabel_atomic(packet);
169
170 /* NB: The macro wait_event_interruptible() is called with a condition
171 * argument with side effect. This is only possible because the side
172 * effect does not occur until the condition became true, and
173 * wait_event_interruptible() won't evaluate the condition again after
174 * that. */
175 return wait_event_interruptible(tlabel_wq,
176 !hpsb_get_tlabel_atomic(packet));
177}
178
179/**
180 * hpsb_free_tlabel - free an allocated transaction label
181 * @packet: packet whose tlabel and tl_pool needs to be cleared
182 *
183 * Frees the transaction label allocated with hpsb_get_tlabel(). The
184 * tlabel has to be freed after the transaction is complete (i.e. response
185 * was received for a split transaction or packet was sent for a unified
186 * transaction).
187 *
188 * A tlabel must not be freed twice.
189 */
190void hpsb_free_tlabel(struct hpsb_packet *packet)
191{
192 unsigned long flags, *tp;
193 int tlabel, n = NODEID_TO_NODE(packet->node_id);
194
195 if (unlikely(n == ALL_NODES))
196 return;
197 tp = packet->host->tl_pool[n].map;
198 tlabel = packet->tlabel;
199 BUG_ON(tlabel > 63 || tlabel < 0);
200
201 spin_lock_irqsave(&hpsb_tlabel_lock, flags);
202 BUG_ON(!__test_and_clear_bit(tlabel, tp));
203 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
204
205 wake_up_interruptible(&tlabel_wq);
206}
207
208/**
209 * hpsb_packet_success - Make sense of the ack and reply codes
210 *
211 * Make sense of the ack and reply codes and return more convenient error codes:
212 * 0 = success. -%EBUSY = node is busy, try again. -%EAGAIN = error which can
213 * probably resolved by retry. -%EREMOTEIO = node suffers from an internal
214 * error. -%EACCES = this transaction is not allowed on requested address.
215 * -%EINVAL = invalid address at node.
216 */
217int hpsb_packet_success(struct hpsb_packet *packet)
218{
219 switch (packet->ack_code) {
220 case ACK_PENDING:
221 switch ((packet->header[1] >> 12) & 0xf) {
222 case RCODE_COMPLETE:
223 return 0;
224 case RCODE_CONFLICT_ERROR:
225 return -EAGAIN;
226 case RCODE_DATA_ERROR:
227 return -EREMOTEIO;
228 case RCODE_TYPE_ERROR:
229 return -EACCES;
230 case RCODE_ADDRESS_ERROR:
231 return -EINVAL;
232 default:
233 HPSB_ERR("received reserved rcode %d from node %d",
234 (packet->header[1] >> 12) & 0xf,
235 packet->node_id);
236 return -EAGAIN;
237 }
238
239 case ACK_BUSY_X:
240 case ACK_BUSY_A:
241 case ACK_BUSY_B:
242 return -EBUSY;
243
244 case ACK_TYPE_ERROR:
245 return -EACCES;
246
247 case ACK_COMPLETE:
248 if (packet->tcode == TCODE_WRITEQ
249 || packet->tcode == TCODE_WRITEB) {
250 return 0;
251 } else {
252 HPSB_ERR("impossible ack_complete from node %d "
253 "(tcode %d)", packet->node_id, packet->tcode);
254 return -EAGAIN;
255 }
256
257 case ACK_DATA_ERROR:
258 if (packet->tcode == TCODE_WRITEB
259 || packet->tcode == TCODE_LOCK_REQUEST) {
260 return -EAGAIN;
261 } else {
262 HPSB_ERR("impossible ack_data_error from node %d "
263 "(tcode %d)", packet->node_id, packet->tcode);
264 return -EAGAIN;
265 }
266
267 case ACK_ADDRESS_ERROR:
268 return -EINVAL;
269
270 case ACK_TARDY:
271 case ACK_CONFLICT_ERROR:
272 case ACKX_NONE:
273 case ACKX_SEND_ERROR:
274 case ACKX_ABORTED:
275 case ACKX_TIMEOUT:
276 /* error while sending */
277 return -EAGAIN;
278
279 default:
280 HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
281 packet->ack_code, packet->node_id, packet->tcode);
282 return -EAGAIN;
283 }
284}
285
286struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
287 u64 addr, size_t length)
288{
289 struct hpsb_packet *packet;
290
291 if (length == 0)
292 return NULL;
293
294 packet = hpsb_alloc_packet(length);
295 if (!packet)
296 return NULL;
297
298 packet->host = host;
299 packet->node_id = node;
300
301 if (hpsb_get_tlabel(packet)) {
302 hpsb_free_packet(packet);
303 return NULL;
304 }
305
306 if (length == 4)
307 fill_async_readquad(packet, addr);
308 else
309 fill_async_readblock(packet, addr, length);
310
311 return packet;
312}
313
314struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, nodeid_t node,
315 u64 addr, quadlet_t * buffer,
316 size_t length)
317{
318 struct hpsb_packet *packet;
319
320 if (length == 0)
321 return NULL;
322
323 packet = hpsb_alloc_packet(length);
324 if (!packet)
325 return NULL;
326
327 if (length % 4) { /* zero padding bytes */
328 packet->data[length >> 2] = 0;
329 }
330 packet->host = host;
331 packet->node_id = node;
332
333 if (hpsb_get_tlabel(packet)) {
334 hpsb_free_packet(packet);
335 return NULL;
336 }
337
338 if (length == 4) {
339 fill_async_writequad(packet, addr, buffer ? *buffer : 0);
340 } else {
341 fill_async_writeblock(packet, addr, length);
342 if (buffer)
343 memcpy(packet->data, buffer, length);
344 }
345
346 return packet;
347}
348
349struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
350 int length, int channel, int tag,
351 int sync)
352{
353 struct hpsb_packet *packet;
354
355 if (length == 0)
356 return NULL;
357
358 packet = hpsb_alloc_packet(length);
359 if (!packet)
360 return NULL;
361
362 if (length % 4) { /* zero padding bytes */
363 packet->data[length >> 2] = 0;
364 }
365 packet->host = host;
366
367 /* Because it is too difficult to determine all PHY speeds and link
368 * speeds here, we use S100... */
369 packet->speed_code = IEEE1394_SPEED_100;
370
371 /* ...and prevent hpsb_send_packet() from overriding it. */
372 packet->node_id = LOCAL_BUS | ALL_NODES;
373
374 if (hpsb_get_tlabel(packet)) {
375 hpsb_free_packet(packet);
376 return NULL;
377 }
378
379 fill_async_stream_packet(packet, length, channel, tag, sync);
380 if (buffer)
381 memcpy(packet->data, buffer, length);
382
383 return packet;
384}
385
386struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
387 u64 addr, int extcode,
388 quadlet_t * data, quadlet_t arg)
389{
390 struct hpsb_packet *p;
391 u32 length;
392
393 p = hpsb_alloc_packet(8);
394 if (!p)
395 return NULL;
396
397 p->host = host;
398 p->node_id = node;
399 if (hpsb_get_tlabel(p)) {
400 hpsb_free_packet(p);
401 return NULL;
402 }
403
404 switch (extcode) {
405 case EXTCODE_FETCH_ADD:
406 case EXTCODE_LITTLE_ADD:
407 length = 4;
408 if (data)
409 p->data[0] = *data;
410 break;
411 default:
412 length = 8;
413 if (data) {
414 p->data[0] = arg;
415 p->data[1] = *data;
416 }
417 break;
418 }
419 fill_async_lock(p, addr, extcode, length);
420
421 return p;
422}
423
424struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
425 nodeid_t node, u64 addr, int extcode,
426 octlet_t * data, octlet_t arg)
427{
428 struct hpsb_packet *p;
429 u32 length;
430
431 p = hpsb_alloc_packet(16);
432 if (!p)
433 return NULL;
434
435 p->host = host;
436 p->node_id = node;
437 if (hpsb_get_tlabel(p)) {
438 hpsb_free_packet(p);
439 return NULL;
440 }
441
442 switch (extcode) {
443 case EXTCODE_FETCH_ADD:
444 case EXTCODE_LITTLE_ADD:
445 length = 8;
446 if (data) {
447 p->data[0] = *data >> 32;
448 p->data[1] = *data & 0xffffffff;
449 }
450 break;
451 default:
452 length = 16;
453 if (data) {
454 p->data[0] = arg >> 32;
455 p->data[1] = arg & 0xffffffff;
456 p->data[2] = *data >> 32;
457 p->data[3] = *data & 0xffffffff;
458 }
459 break;
460 }
461 fill_async_lock(p, addr, extcode, length);
462
463 return p;
464}
465
466struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data)
467{
468 struct hpsb_packet *p;
469
470 p = hpsb_alloc_packet(0);
471 if (!p)
472 return NULL;
473
474 p->host = host;
475 fill_phy_packet(p, data);
476
477 return p;
478}
479
480/*
481 * FIXME - these functions should probably read from / write to user space to
482 * avoid in kernel buffers for user space callers
483 */
484
485/**
486 * hpsb_read - generic read function
487 *
488 * Recognizes the local node ID and act accordingly. Automatically uses a
489 * quadlet read request if @length == 4 and and a block read request otherwise.
490 * It does not yet support lengths that are not a multiple of 4.
491 *
492 * You must explicitly specifiy the @generation for which the node ID is valid,
493 * to avoid sending packets to the wrong nodes when we race with a bus reset.
494 */
495int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
496 u64 addr, quadlet_t * buffer, size_t length)
497{
498 struct hpsb_packet *packet;
499 int retval = 0;
500
501 if (length == 0)
502 return -EINVAL;
503
504 packet = hpsb_make_readpacket(host, node, addr, length);
505
506 if (!packet) {
507 return -ENOMEM;
508 }
509
510 packet->generation = generation;
511 retval = hpsb_send_packet_and_wait(packet);
512 if (retval < 0)
513 goto hpsb_read_fail;
514
515 retval = hpsb_packet_success(packet);
516
517 if (retval == 0) {
518 if (length == 4) {
519 *buffer = packet->header[3];
520 } else {
521 memcpy(buffer, packet->data, length);
522 }
523 }
524
525 hpsb_read_fail:
526 hpsb_free_tlabel(packet);
527 hpsb_free_packet(packet);
528
529 return retval;
530}
531
532/**
533 * hpsb_write - generic write function
534 *
535 * Recognizes the local node ID and act accordingly. Automatically uses a
536 * quadlet write request if @length == 4 and and a block write request
537 * otherwise. It does not yet support lengths that are not a multiple of 4.
538 *
539 * You must explicitly specifiy the @generation for which the node ID is valid,
540 * to avoid sending packets to the wrong nodes when we race with a bus reset.
541 */
542int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
543 u64 addr, quadlet_t * buffer, size_t length)
544{
545 struct hpsb_packet *packet;
546 int retval;
547
548 if (length == 0)
549 return -EINVAL;
550
551 packet = hpsb_make_writepacket(host, node, addr, buffer, length);
552
553 if (!packet)
554 return -ENOMEM;
555
556 packet->generation = generation;
557 retval = hpsb_send_packet_and_wait(packet);
558 if (retval < 0)
559 goto hpsb_write_fail;
560
561 retval = hpsb_packet_success(packet);
562
563 hpsb_write_fail:
564 hpsb_free_tlabel(packet);
565 hpsb_free_packet(packet);
566
567 return retval;
568}
569
570int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
571 u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
572{
573 struct hpsb_packet *packet;
574 int retval = 0;
575
576 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
577 if (!packet)
578 return -ENOMEM;
579
580 packet->generation = generation;
581 retval = hpsb_send_packet_and_wait(packet);
582 if (retval < 0)
583 goto hpsb_lock_fail;
584
585 retval = hpsb_packet_success(packet);
586
587 if (retval == 0)
588 *data = packet->data[0];
589
590hpsb_lock_fail:
591 hpsb_free_tlabel(packet);
592 hpsb_free_packet(packet);
593
594 return retval;
595}
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
deleted file mode 100644
index 20b693be14b2..000000000000
--- a/drivers/ieee1394/ieee1394_transactions.h
+++ /dev/null
@@ -1,40 +0,0 @@
1#ifndef _IEEE1394_TRANSACTIONS_H
2#define _IEEE1394_TRANSACTIONS_H
3
4#include <linux/types.h>
5
6#include "ieee1394_types.h"
7
8struct hpsb_packet;
9struct hpsb_host;
10
11int hpsb_get_tlabel(struct hpsb_packet *packet);
12void hpsb_free_tlabel(struct hpsb_packet *packet);
13struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
14 u64 addr, size_t length);
15struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
16 u64 addr, int extcode, quadlet_t *data,
17 quadlet_t arg);
18struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
19 nodeid_t node, u64 addr, int extcode,
20 octlet_t *data, octlet_t arg);
21struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data);
22struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
23 nodeid_t node, u64 addr,
24 quadlet_t *buffer, size_t length);
25struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
26 int length, int channel, int tag,
27 int sync);
28int hpsb_packet_success(struct hpsb_packet *packet);
29int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
30 u64 addr, quadlet_t *buffer, size_t length);
31int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
32 u64 addr, quadlet_t *buffer, size_t length);
33int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
34 u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
35
36#ifdef HPSB_DEBUG_TLABELS
37extern spinlock_t hpsb_tlabel_lock;
38#endif
39
40#endif /* _IEEE1394_TRANSACTIONS_H */
diff --git a/drivers/ieee1394/ieee1394_types.h b/drivers/ieee1394/ieee1394_types.h
deleted file mode 100644
index 9803aaa15be0..000000000000
--- a/drivers/ieee1394/ieee1394_types.h
+++ /dev/null
@@ -1,69 +0,0 @@
1#ifndef _IEEE1394_TYPES_H
2#define _IEEE1394_TYPES_H
3
4#include <linux/kernel.h>
5#include <linux/string.h>
6#include <linux/types.h>
7#include <asm/byteorder.h>
8
9typedef u32 quadlet_t;
10typedef u64 octlet_t;
11typedef u16 nodeid_t;
12
13typedef u8 byte_t;
14typedef u64 nodeaddr_t;
15typedef u16 arm_length_t;
16
17#define BUS_MASK 0xffc0
18#define BUS_SHIFT 6
19#define NODE_MASK 0x003f
20#define LOCAL_BUS 0xffc0
21#define ALL_NODES 0x003f
22
23#define NODEID_TO_BUS(nodeid) ((nodeid & BUS_MASK) >> BUS_SHIFT)
24#define NODEID_TO_NODE(nodeid) (nodeid & NODE_MASK)
25
26/* Can be used to consistently print a node/bus ID. */
27#define NODE_BUS_FMT "%d-%02d:%04d"
28#define NODE_BUS_ARGS(__host, __nodeid) \
29 __host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid)
30
31#define HPSB_PRINT(level, fmt, args...) \
32 printk(level "ieee1394: " fmt "\n" , ## args)
33
34#define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
35#define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args)
36#define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args)
37#define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args)
38#define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args)
39
40#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
41#define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args)
42#define HPSB_DEBUG_TLABELS
43#else
44#define HPSB_VERBOSE(fmt, args...) do {} while (0)
45#endif
46
47#ifdef __BIG_ENDIAN
48
49static inline void *memcpy_le32(u32 *dest, const u32 *__src, size_t count)
50{
51 void *tmp = dest;
52 u32 *src = (u32 *)__src;
53
54 count /= 4;
55 while (count--)
56 *dest++ = swab32p(src++);
57 return tmp;
58}
59
60#else
61
62static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count)
63{
64 return memcpy(dest, src, count);
65}
66
67#endif /* __BIG_ENDIAN */
68
69#endif /* _IEEE1394_TYPES_H */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
deleted file mode 100644
index 1cf6487b65ba..000000000000
--- a/drivers/ieee1394/iso.c
+++ /dev/null
@@ -1,568 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * kernel ISO transmission/reception
5 *
6 * Copyright (C) 2002 Maas Digital LLC
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#include <linux/pci.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/slab.h>
16
17#include "hosts.h"
18#include "iso.h"
19
20/**
21 * hpsb_iso_stop - stop DMA
22 */
23void hpsb_iso_stop(struct hpsb_iso *iso)
24{
25 if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
26 return;
27
28 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
29 XMIT_STOP : RECV_STOP, 0);
30 iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
31}
32
33/**
34 * hpsb_iso_shutdown - deallocate buffer and DMA context
35 */
36void hpsb_iso_shutdown(struct hpsb_iso *iso)
37{
38 if (iso->flags & HPSB_ISO_DRIVER_INIT) {
39 hpsb_iso_stop(iso);
40 iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ?
41 XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
42 iso->flags &= ~HPSB_ISO_DRIVER_INIT;
43 }
44
45 dma_region_free(&iso->data_buf);
46 kfree(iso);
47}
48
49static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
50 enum hpsb_iso_type type,
51 unsigned int data_buf_size,
52 unsigned int buf_packets,
53 int channel, int dma_mode,
54 int irq_interval,
55 void (*callback) (struct hpsb_iso
56 *))
57{
58 struct hpsb_iso *iso;
59 int dma_direction;
60
61 /* make sure driver supports the ISO API */
62 if (!host->driver->isoctl) {
63 printk(KERN_INFO
64 "ieee1394: host driver '%s' does not support the rawiso API\n",
65 host->driver->name);
66 return NULL;
67 }
68
69 /* sanitize parameters */
70
71 if (buf_packets < 2)
72 buf_packets = 2;
73
74 if ((dma_mode < HPSB_ISO_DMA_DEFAULT)
75 || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
76 dma_mode = HPSB_ISO_DMA_DEFAULT;
77
78 if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
79 irq_interval = buf_packets / 4;
80 if (irq_interval == 0) /* really interrupt for each packet */
81 irq_interval = 1;
82
83 if (channel < -1 || channel >= 64)
84 return NULL;
85
86 /* channel = -1 is OK for multi-channel recv but not for xmit */
87 if (type == HPSB_ISO_XMIT && channel < 0)
88 return NULL;
89
90 /* allocate and write the struct hpsb_iso */
91
92 iso =
93 kmalloc(sizeof(*iso) +
94 buf_packets * sizeof(struct hpsb_iso_packet_info),
95 GFP_KERNEL);
96 if (!iso)
97 return NULL;
98
99 iso->infos = (struct hpsb_iso_packet_info *)(iso + 1);
100
101 iso->type = type;
102 iso->host = host;
103 iso->hostdata = NULL;
104 iso->callback = callback;
105 init_waitqueue_head(&iso->waitq);
106 iso->channel = channel;
107 iso->irq_interval = irq_interval;
108 iso->dma_mode = dma_mode;
109 dma_region_init(&iso->data_buf);
110 iso->buf_size = PAGE_ALIGN(data_buf_size);
111 iso->buf_packets = buf_packets;
112 iso->pkt_dma = 0;
113 iso->first_packet = 0;
114 spin_lock_init(&iso->lock);
115
116 if (iso->type == HPSB_ISO_XMIT) {
117 iso->n_ready_packets = iso->buf_packets;
118 dma_direction = PCI_DMA_TODEVICE;
119 } else {
120 iso->n_ready_packets = 0;
121 dma_direction = PCI_DMA_FROMDEVICE;
122 }
123
124 atomic_set(&iso->overflows, 0);
125 iso->bytes_discarded = 0;
126 iso->flags = 0;
127 iso->prebuffer = 0;
128
129 /* allocate the packet buffer */
130 if (dma_region_alloc
131 (&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
132 goto err;
133
134 return iso;
135
136 err:
137 hpsb_iso_shutdown(iso);
138 return NULL;
139}
140
141/**
142 * hpsb_iso_n_ready - returns number of packets ready to send or receive
143 */
144int hpsb_iso_n_ready(struct hpsb_iso *iso)
145{
146 unsigned long flags;
147 int val;
148
149 spin_lock_irqsave(&iso->lock, flags);
150 val = iso->n_ready_packets;
151 spin_unlock_irqrestore(&iso->lock, flags);
152
153 return val;
154}
155
156/**
157 * hpsb_iso_xmit_init - allocate the buffer and DMA context
158 */
159struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
160 unsigned int data_buf_size,
161 unsigned int buf_packets,
162 int channel,
163 int speed,
164 int irq_interval,
165 void (*callback) (struct hpsb_iso *))
166{
167 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
168 data_buf_size, buf_packets,
169 channel,
170 HPSB_ISO_DMA_DEFAULT,
171 irq_interval, callback);
172 if (!iso)
173 return NULL;
174
175 iso->speed = speed;
176
177 /* tell the driver to start working */
178 if (host->driver->isoctl(iso, XMIT_INIT, 0))
179 goto err;
180
181 iso->flags |= HPSB_ISO_DRIVER_INIT;
182 return iso;
183
184 err:
185 hpsb_iso_shutdown(iso);
186 return NULL;
187}
188
189/**
190 * hpsb_iso_recv_init - allocate the buffer and DMA context
191 *
192 * Note, if channel = -1, multi-channel receive is enabled.
193 */
194struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
195 unsigned int data_buf_size,
196 unsigned int buf_packets,
197 int channel,
198 int dma_mode,
199 int irq_interval,
200 void (*callback) (struct hpsb_iso *))
201{
202 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
203 data_buf_size, buf_packets,
204 channel, dma_mode,
205 irq_interval, callback);
206 if (!iso)
207 return NULL;
208
209 /* tell the driver to start working */
210 if (host->driver->isoctl(iso, RECV_INIT, 0))
211 goto err;
212
213 iso->flags |= HPSB_ISO_DRIVER_INIT;
214 return iso;
215
216 err:
217 hpsb_iso_shutdown(iso);
218 return NULL;
219}
220
221/**
222 * hpsb_iso_recv_listen_channel
223 *
224 * multi-channel only
225 */
226int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
227{
228 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
229 return -EINVAL;
230 return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
231}
232
233/**
234 * hpsb_iso_recv_unlisten_channel
235 *
236 * multi-channel only
237 */
238int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
239{
240 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
241 return -EINVAL;
242 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
243}
244
245/**
246 * hpsb_iso_recv_set_channel_mask
247 *
248 * multi-channel only
249 */
250int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
251{
252 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
253 return -EINVAL;
254 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK,
255 (unsigned long)&mask);
256}
257
258/**
259 * hpsb_iso_recv_flush - check for arrival of new packets
260 *
261 * check for arrival of new packets immediately (even if irq_interval
262 * has not yet been reached)
263 */
264int hpsb_iso_recv_flush(struct hpsb_iso *iso)
265{
266 if (iso->type != HPSB_ISO_RECV)
267 return -EINVAL;
268 return iso->host->driver->isoctl(iso, RECV_FLUSH, 0);
269}
270
271static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
272{
273 int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
274 if (retval)
275 return retval;
276
277 iso->flags |= HPSB_ISO_DRIVER_STARTED;
278 return retval;
279}
280
281/**
282 * hpsb_iso_xmit_start - start DMA
283 */
284int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
285{
286 if (iso->type != HPSB_ISO_XMIT)
287 return -1;
288
289 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
290 return 0;
291
292 if (cycle < -1)
293 cycle = -1;
294 else if (cycle >= 8000)
295 cycle %= 8000;
296
297 iso->xmit_cycle = cycle;
298
299 if (prebuffer < 0)
300 prebuffer = iso->buf_packets - 1;
301 else if (prebuffer == 0)
302 prebuffer = 1;
303
304 if (prebuffer >= iso->buf_packets)
305 prebuffer = iso->buf_packets - 1;
306
307 iso->prebuffer = prebuffer;
308
309 /* remember the starting cycle; DMA will commence from xmit_queue_packets()
310 once enough packets have been buffered */
311 iso->start_cycle = cycle;
312
313 return 0;
314}
315
316/**
317 * hpsb_iso_recv_start - start DMA
318 */
319int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
320{
321 int retval = 0;
322 int isoctl_args[3];
323
324 if (iso->type != HPSB_ISO_RECV)
325 return -1;
326
327 if (iso->flags & HPSB_ISO_DRIVER_STARTED)
328 return 0;
329
330 if (cycle < -1)
331 cycle = -1;
332 else if (cycle >= 8000)
333 cycle %= 8000;
334
335 isoctl_args[0] = cycle;
336
337 if (tag_mask < 0)
338 /* match all tags */
339 tag_mask = 0xF;
340 isoctl_args[1] = tag_mask;
341
342 isoctl_args[2] = sync;
343
344 retval =
345 iso->host->driver->isoctl(iso, RECV_START,
346 (unsigned long)&isoctl_args[0]);
347 if (retval)
348 return retval;
349
350 iso->flags |= HPSB_ISO_DRIVER_STARTED;
351 return retval;
352}
353
354/* check to make sure the user has not supplied bogus values of offset/len
355 * that would cause the kernel to access memory outside the buffer */
356static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
357 unsigned int offset, unsigned short len,
358 unsigned int *out_offset,
359 unsigned short *out_len)
360{
361 if (offset >= iso->buf_size)
362 return -EFAULT;
363
364 /* make sure the packet does not go beyond the end of the buffer */
365 if (offset + len > iso->buf_size)
366 return -EFAULT;
367
368 /* check for wrap-around */
369 if (offset + len < offset)
370 return -EFAULT;
371
372 /* now we can trust 'offset' and 'length' */
373 *out_offset = offset;
374 *out_len = len;
375
376 return 0;
377}
378
379/**
380 * hpsb_iso_xmit_queue_packet - queue a packet for transmission.
381 *
382 * @offset is relative to the beginning of the DMA buffer, where the packet's
383 * data payload should already have been placed.
384 */
385int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
386 u8 tag, u8 sy)
387{
388 struct hpsb_iso_packet_info *info;
389 unsigned long flags;
390 int rv;
391
392 if (iso->type != HPSB_ISO_XMIT)
393 return -EINVAL;
394
395 /* is there space in the buffer? */
396 if (iso->n_ready_packets <= 0) {
397 return -EBUSY;
398 }
399
400 info = &iso->infos[iso->first_packet];
401
402 /* check for bogus offset/length */
403 if (hpsb_iso_check_offset_len
404 (iso, offset, len, &info->offset, &info->len))
405 return -EFAULT;
406
407 info->tag = tag;
408 info->sy = sy;
409
410 spin_lock_irqsave(&iso->lock, flags);
411
412 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info);
413 if (rv)
414 goto out;
415
416 /* increment cursors */
417 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
418 iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000;
419 iso->n_ready_packets--;
420
421 if (iso->prebuffer != 0) {
422 iso->prebuffer--;
423 if (iso->prebuffer <= 0) {
424 iso->prebuffer = 0;
425 rv = do_iso_xmit_start(iso, iso->start_cycle);
426 }
427 }
428
429 out:
430 spin_unlock_irqrestore(&iso->lock, flags);
431 return rv;
432}
433
434/**
435 * hpsb_iso_xmit_sync - wait until all queued packets have been transmitted
436 */
437int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
438{
439 if (iso->type != HPSB_ISO_XMIT)
440 return -EINVAL;
441
442 return wait_event_interruptible(iso->waitq,
443 hpsb_iso_n_ready(iso) ==
444 iso->buf_packets);
445}
446
447/**
448 * hpsb_iso_packet_sent
449 *
450 * Available to low-level drivers.
451 *
452 * Call after a packet has been transmitted to the bus (interrupt context is
453 * OK). @cycle is the _exact_ cycle the packet was sent on. @error should be
454 * non-zero if some sort of error occurred when sending the packet.
455 */
456void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
457{
458 unsigned long flags;
459 spin_lock_irqsave(&iso->lock, flags);
460
461 /* predict the cycle of the next packet to be queued */
462
463 /* jump ahead by the number of packets that are already buffered */
464 cycle += iso->buf_packets - iso->n_ready_packets;
465 cycle %= 8000;
466
467 iso->xmit_cycle = cycle;
468 iso->n_ready_packets++;
469 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
470
471 if (iso->n_ready_packets == iso->buf_packets || error != 0) {
472 /* the buffer has run empty! */
473 atomic_inc(&iso->overflows);
474 }
475
476 spin_unlock_irqrestore(&iso->lock, flags);
477}
478
479/**
480 * hpsb_iso_packet_received
481 *
482 * Available to low-level drivers.
483 *
484 * Call after a packet has been received (interrupt context is OK).
485 */
486void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
487 u16 total_len, u16 cycle, u8 channel, u8 tag,
488 u8 sy)
489{
490 unsigned long flags;
491 spin_lock_irqsave(&iso->lock, flags);
492
493 if (iso->n_ready_packets == iso->buf_packets) {
494 /* overflow! */
495 atomic_inc(&iso->overflows);
496 /* Record size of this discarded packet */
497 iso->bytes_discarded += total_len;
498 } else {
499 struct hpsb_iso_packet_info *info = &iso->infos[iso->pkt_dma];
500 info->offset = offset;
501 info->len = len;
502 info->total_len = total_len;
503 info->cycle = cycle;
504 info->channel = channel;
505 info->tag = tag;
506 info->sy = sy;
507
508 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
509 iso->n_ready_packets++;
510 }
511
512 spin_unlock_irqrestore(&iso->lock, flags);
513}
514
515/**
516 * hpsb_iso_recv_release_packets - release packets, reuse buffer
517 *
518 * @n_packets have been read out of the buffer, re-use the buffer space
519 */
520int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
521{
522 unsigned long flags;
523 unsigned int i;
524 int rv = 0;
525
526 if (iso->type != HPSB_ISO_RECV)
527 return -1;
528
529 spin_lock_irqsave(&iso->lock, flags);
530 for (i = 0; i < n_packets; i++) {
531 rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
532 (unsigned long)&iso->infos[iso->
533 first_packet]);
534 if (rv)
535 break;
536
537 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
538 iso->n_ready_packets--;
539
540 /* release memory from packets discarded when queue was full */
541 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
542 if (iso->bytes_discarded != 0) {
543 struct hpsb_iso_packet_info inf;
544 inf.total_len = iso->bytes_discarded;
545 iso->host->driver->isoctl(iso, RECV_RELEASE,
546 (unsigned long)&inf);
547 iso->bytes_discarded = 0;
548 }
549 }
550 }
551 spin_unlock_irqrestore(&iso->lock, flags);
552 return rv;
553}
554
555/**
556 * hpsb_iso_wake
557 *
558 * Available to low-level drivers.
559 *
560 * Call to wake waiting processes after buffer space has opened up.
561 */
562void hpsb_iso_wake(struct hpsb_iso *iso)
563{
564 wake_up_interruptible(&iso->waitq);
565
566 if (iso->callback)
567 iso->callback(iso);
568}
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
deleted file mode 100644
index c2089c093aa7..000000000000
--- a/drivers/ieee1394/iso.h
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * kernel ISO transmission/reception
5 *
6 * Copyright (C) 2002 Maas Digital LLC
7 *
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
10 */
11
12#ifndef IEEE1394_ISO_H
13#define IEEE1394_ISO_H
14
15#include <linux/spinlock_types.h>
16#include <linux/wait.h>
17#include <asm/atomic.h>
18#include <asm/types.h>
19
20#include "dma.h"
21
22struct hpsb_host;
23
24/* high-level ISO interface */
25
26/*
27 * This API sends and receives isochronous packets on a large,
28 * virtually-contiguous kernel memory buffer. The buffer may be mapped
29 * into a user-space process for zero-copy transmission and reception.
30 *
31 * There are no explicit boundaries between packets in the buffer. A
32 * packet may be transmitted or received at any location. However,
33 * low-level drivers may impose certain restrictions on alignment or
34 * size of packets. (e.g. in OHCI no packet may cross a page boundary,
35 * and packets should be quadlet-aligned)
36 */
37
38/* Packet descriptor - the API maintains a ring buffer of these packet
39 * descriptors in kernel memory (hpsb_iso.infos[]). */
40struct hpsb_iso_packet_info {
41 /* offset of data payload relative to the first byte of the buffer */
42 __u32 offset;
43
44 /* length of the data payload, in bytes (not including the isochronous
45 * header) */
46 __u16 len;
47
48 /* (recv only) the cycle number (mod 8000) on which the packet was
49 * received */
50 __u16 cycle;
51
52 /* (recv only) channel on which the packet was received */
53 __u8 channel;
54
55 /* 2-bit 'tag' and 4-bit 'sy' fields of the isochronous header */
56 __u8 tag;
57 __u8 sy;
58
59 /* length in bytes of the packet including header/trailer.
60 * MUST be at structure end, since the first part of this structure is
61 * also defined in raw1394.h (i.e. struct raw1394_iso_packet_info), is
62 * copied to userspace and is accessed there through libraw1394. */
63 __u16 total_len;
64};
65
66enum hpsb_iso_type { HPSB_ISO_RECV = 0, HPSB_ISO_XMIT = 1 };
67
68/* The mode of the dma when receiving iso data. Must be supported by chip */
69enum raw1394_iso_dma_recv_mode {
70 HPSB_ISO_DMA_DEFAULT = -1,
71 HPSB_ISO_DMA_OLD_ABI = 0,
72 HPSB_ISO_DMA_BUFFERFILL = 1,
73 HPSB_ISO_DMA_PACKET_PER_BUFFER = 2
74};
75
76struct hpsb_iso {
77 enum hpsb_iso_type type;
78
79 /* pointer to low-level driver and its private data */
80 struct hpsb_host *host;
81 void *hostdata;
82
83 /* a function to be called (from interrupt context) after
84 * outgoing packets have been sent, or incoming packets have
85 * arrived */
86 void (*callback)(struct hpsb_iso*);
87
88 /* wait for buffer space */
89 wait_queue_head_t waitq;
90
91 int speed; /* IEEE1394_SPEED_100, 200, or 400 */
92 int channel; /* -1 if multichannel */
93 int dma_mode; /* dma receive mode */
94
95
96 /* greatest # of packets between interrupts - controls
97 * the maximum latency of the buffer */
98 int irq_interval;
99
100 /* the buffer for packet data payloads */
101 struct dma_region data_buf;
102
103 /* size of data_buf, in bytes (always a multiple of PAGE_SIZE) */
104 unsigned int buf_size;
105
106 /* # of packets in the ringbuffer */
107 unsigned int buf_packets;
108
109 /* protects packet cursors */
110 spinlock_t lock;
111
112 /* the index of the next packet that will be produced
113 or consumed by the user */
114 int first_packet;
115
116 /* the index of the next packet that will be transmitted
117 or received by the 1394 hardware */
118 int pkt_dma;
119
120 /* how many packets, starting at first_packet:
121 * (transmit) are ready to be filled with data
122 * (receive) contain received data */
123 int n_ready_packets;
124
125 /* how many times the buffer has overflowed or underflowed */
126 atomic_t overflows;
127 /* how many cycles were skipped for a given context */
128 atomic_t skips;
129
130 /* Current number of bytes lost in discarded packets */
131 int bytes_discarded;
132
133 /* private flags to track initialization progress */
134#define HPSB_ISO_DRIVER_INIT (1<<0)
135#define HPSB_ISO_DRIVER_STARTED (1<<1)
136 unsigned int flags;
137
138 /* # of packets left to prebuffer (xmit only) */
139 int prebuffer;
140
141 /* starting cycle for DMA (xmit only) */
142 int start_cycle;
143
144 /* cycle at which next packet will be transmitted,
145 * -1 if not known */
146 int xmit_cycle;
147
148 /* ringbuffer of packet descriptors in regular kernel memory
149 * XXX Keep this last, since we use over-allocated memory from
150 * this entry to fill this field. */
151 struct hpsb_iso_packet_info *infos;
152};
153
154/* functions available to high-level drivers (e.g. raw1394) */
155
156struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
157 unsigned int data_buf_size,
158 unsigned int buf_packets,
159 int channel,
160 int speed,
161 int irq_interval,
162 void (*callback)(struct hpsb_iso*));
163struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
164 unsigned int data_buf_size,
165 unsigned int buf_packets,
166 int channel,
167 int dma_mode,
168 int irq_interval,
169 void (*callback)(struct hpsb_iso*));
170int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
171int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
172int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
173int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle,
174 int prebuffer);
175int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle,
176 int tag_mask, int sync);
177void hpsb_iso_stop(struct hpsb_iso *iso);
178void hpsb_iso_shutdown(struct hpsb_iso *iso);
179int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
180 u8 tag, u8 sy);
181int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
182int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
183 unsigned int n_packets);
184int hpsb_iso_recv_flush(struct hpsb_iso *iso);
185int hpsb_iso_n_ready(struct hpsb_iso *iso);
186
187/* the following are callbacks available to low-level drivers */
188
189void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
190void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
191 u16 total_len, u16 cycle, u8 channel, u8 tag,
192 u8 sy);
193void hpsb_iso_wake(struct hpsb_iso *iso);
194
195#endif /* IEEE1394_ISO_H */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
deleted file mode 100644
index 18350213479e..000000000000
--- a/drivers/ieee1394/nodemgr.c
+++ /dev/null
@@ -1,1901 +0,0 @@
1/*
2 * Node information (ConfigROM) collection and management.
3 *
4 * Copyright (C) 2000 Andreas E. Bombe
5 * 2001-2003 Ben Collins <bcollins@debian.net>
6 *
7 * This code is licensed under the GPL. See the file COPYING in the root
8 * directory of the kernel sources for details.
9 */
10
11#include <linux/bitmap.h>
12#include <linux/kernel.h>
13#include <linux/kmemcheck.h>
14#include <linux/list.h>
15#include <linux/slab.h>
16#include <linux/delay.h>
17#include <linux/kthread.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/mutex.h>
21#include <linux/freezer.h>
22#include <asm/atomic.h>
23
24#include "csr.h"
25#include "highlevel.h"
26#include "hosts.h"
27#include "ieee1394.h"
28#include "ieee1394_core.h"
29#include "ieee1394_hotplug.h"
30#include "ieee1394_types.h"
31#include "ieee1394_transactions.h"
32#include "nodemgr.h"
33
34static int ignore_drivers;
35module_param(ignore_drivers, int, S_IRUGO | S_IWUSR);
36MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers.");
37
38struct nodemgr_csr_info {
39 struct hpsb_host *host;
40 nodeid_t nodeid;
41 unsigned int generation;
42
43 kmemcheck_bitfield_begin(flags);
44 unsigned int speed_unverified:1;
45 kmemcheck_bitfield_end(flags);
46};
47
48
49/*
50 * Correct the speed map entry. This is necessary
51 * - for nodes with link speed < phy speed,
52 * - for 1394b nodes with negotiated phy port speed < IEEE1394_SPEED_MAX.
53 * A possible speed is determined by trial and error, using quadlet reads.
54 */
55static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
56 quadlet_t *buffer)
57{
58 quadlet_t q;
59 u8 i, *speed, old_speed, good_speed;
60 int error;
61
62 speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]);
63 old_speed = *speed;
64 good_speed = IEEE1394_SPEED_MAX + 1;
65
66 /* Try every speed from S100 to old_speed.
67 * If we did it the other way around, a too low speed could be caught
68 * if the retry succeeded for some other reason, e.g. because the link
69 * just finished its initialization. */
70 for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
71 *speed = i;
72 error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
73 &q, 4);
74 if (error)
75 break;
76 *buffer = q;
77 good_speed = i;
78 }
79 if (good_speed <= IEEE1394_SPEED_MAX) {
80 HPSB_DEBUG("Speed probe of node " NODE_BUS_FMT " yields %s",
81 NODE_BUS_ARGS(ci->host, ci->nodeid),
82 hpsb_speedto_str[good_speed]);
83 *speed = good_speed;
84 ci->speed_unverified = 0;
85 return 0;
86 }
87 *speed = old_speed;
88 return error;
89}
90
91static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr,
92 void *buffer, void *__ci)
93{
94 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
95 int i, error;
96
97 for (i = 1; ; i++) {
98 error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
99 buffer, 4);
100 if (!error) {
101 ci->speed_unverified = 0;
102 break;
103 }
104 /* Give up after 3rd failure. */
105 if (i == 3)
106 break;
107
108 /* The ieee1394_core guessed the node's speed capability from
109 * the self ID. Check whether a lower speed works. */
110 if (ci->speed_unverified) {
111 error = nodemgr_check_speed(ci, addr, buffer);
112 if (!error)
113 break;
114 }
115 if (msleep_interruptible(334))
116 return -EINTR;
117 }
118 return error;
119}
120
121static struct csr1212_bus_ops nodemgr_csr_ops = {
122 .bus_read = nodemgr_bus_read,
123};
124
125
126/*
127 * Basically what we do here is start off retrieving the bus_info block.
128 * From there will fill in some info about the node, verify it is of IEEE
129 * 1394 type, and that the crc checks out ok. After that we start off with
130 * the root directory, and subdirectories. To do this, we retrieve the
131 * quadlet header for a directory, find out the length, and retrieve the
132 * complete directory entry (be it a leaf or a directory). We then process
133 * it and add the info to our structure for that particular node.
134 *
135 * We verify CRC's along the way for each directory/block/leaf. The entire
136 * node structure is generic, and simply stores the information in a way
137 * that's easy to parse by the protocol interface.
138 */
139
140/*
141 * The nodemgr relies heavily on the Driver Model for device callbacks and
142 * driver/device mappings. The old nodemgr used to handle all this itself,
143 * but now we are much simpler because of the LDM.
144 */
145
146struct host_info {
147 struct hpsb_host *host;
148 struct list_head list;
149 struct task_struct *thread;
150};
151
152static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
153static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env);
154
155struct bus_type ieee1394_bus_type = {
156 .name = "ieee1394",
157 .match = nodemgr_bus_match,
158};
159
160static void host_cls_release(struct device *dev)
161{
162 put_device(&container_of((dev), struct hpsb_host, host_dev)->device);
163}
164
165struct class hpsb_host_class = {
166 .name = "ieee1394_host",
167 .dev_release = host_cls_release,
168};
169
170static void ne_cls_release(struct device *dev)
171{
172 put_device(&container_of((dev), struct node_entry, node_dev)->device);
173}
174
175static struct class nodemgr_ne_class = {
176 .name = "ieee1394_node",
177 .dev_release = ne_cls_release,
178};
179
180static void ud_cls_release(struct device *dev)
181{
182 put_device(&container_of((dev), struct unit_directory, unit_dev)->device);
183}
184
185/* The name here is only so that unit directory hotplug works with old
186 * style hotplug, which only ever did unit directories anyway.
187 */
188static struct class nodemgr_ud_class = {
189 .name = "ieee1394",
190 .dev_release = ud_cls_release,
191 .dev_uevent = nodemgr_uevent,
192};
193
194static struct hpsb_highlevel nodemgr_highlevel;
195
196
197static void nodemgr_release_ud(struct device *dev)
198{
199 struct unit_directory *ud = container_of(dev, struct unit_directory, device);
200
201 if (ud->vendor_name_kv)
202 csr1212_release_keyval(ud->vendor_name_kv);
203 if (ud->model_name_kv)
204 csr1212_release_keyval(ud->model_name_kv);
205
206 kfree(ud);
207}
208
209static void nodemgr_release_ne(struct device *dev)
210{
211 struct node_entry *ne = container_of(dev, struct node_entry, device);
212
213 if (ne->vendor_name_kv)
214 csr1212_release_keyval(ne->vendor_name_kv);
215
216 kfree(ne);
217}
218
219
220static void nodemgr_release_host(struct device *dev)
221{
222 struct hpsb_host *host = container_of(dev, struct hpsb_host, device);
223
224 csr1212_destroy_csr(host->csr.rom);
225
226 kfree(host);
227}
228
229static int nodemgr_ud_platform_data;
230
231static struct device nodemgr_dev_template_ud = {
232 .bus = &ieee1394_bus_type,
233 .release = nodemgr_release_ud,
234 .platform_data = &nodemgr_ud_platform_data,
235};
236
237static struct device nodemgr_dev_template_ne = {
238 .bus = &ieee1394_bus_type,
239 .release = nodemgr_release_ne,
240};
241
242/* This dummy driver prevents the host devices from being scanned. We have no
243 * useful drivers for them yet, and there would be a deadlock possible if the
244 * driver core scans the host device while the host's low-level driver (i.e.
245 * the host's parent device) is being removed. */
246static struct device_driver nodemgr_mid_layer_driver = {
247 .bus = &ieee1394_bus_type,
248 .name = "nodemgr",
249 .owner = THIS_MODULE,
250};
251
252struct device nodemgr_dev_template_host = {
253 .bus = &ieee1394_bus_type,
254 .release = nodemgr_release_host,
255};
256
257
258#define fw_attr(class, class_type, field, type, format_string) \
259static ssize_t fw_show_##class##_##field (struct device *dev, struct device_attribute *attr, char *buf)\
260{ \
261 class_type *class; \
262 class = container_of(dev, class_type, device); \
263 return sprintf(buf, format_string, (type)class->field); \
264} \
265static struct device_attribute dev_attr_##class##_##field = { \
266 .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
267 .show = fw_show_##class##_##field, \
268};
269
270#define fw_attr_td(class, class_type, td_kv) \
271static ssize_t fw_show_##class##_##td_kv (struct device *dev, struct device_attribute *attr, char *buf)\
272{ \
273 int len; \
274 class_type *class = container_of(dev, class_type, device); \
275 len = (class->td_kv->value.leaf.len - 2) * sizeof(quadlet_t); \
276 memcpy(buf, \
277 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(class->td_kv), \
278 len); \
279 while (buf[len - 1] == '\0') \
280 len--; \
281 buf[len++] = '\n'; \
282 buf[len] = '\0'; \
283 return len; \
284} \
285static struct device_attribute dev_attr_##class##_##td_kv = { \
286 .attr = {.name = __stringify(td_kv), .mode = S_IRUGO }, \
287 .show = fw_show_##class##_##td_kv, \
288};
289
290
291#define fw_drv_attr(field, type, format_string) \
292static ssize_t fw_drv_show_##field (struct device_driver *drv, char *buf) \
293{ \
294 struct hpsb_protocol_driver *driver; \
295 driver = container_of(drv, struct hpsb_protocol_driver, driver); \
296 return sprintf(buf, format_string, (type)driver->field);\
297} \
298static struct driver_attribute driver_attr_drv_##field = { \
299 .attr = {.name = __stringify(field), .mode = S_IRUGO }, \
300 .show = fw_drv_show_##field, \
301};
302
303
304static ssize_t fw_show_ne_bus_options(struct device *dev, struct device_attribute *attr, char *buf)
305{
306 struct node_entry *ne = container_of(dev, struct node_entry, device);
307
308 return sprintf(buf, "IRMC(%d) CMC(%d) ISC(%d) BMC(%d) PMC(%d) GEN(%d) "
309 "LSPD(%d) MAX_REC(%d) MAX_ROM(%d) CYC_CLK_ACC(%d)\n",
310 ne->busopt.irmc,
311 ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
312 ne->busopt.pmc, ne->busopt.generation, ne->busopt.lnkspd,
313 ne->busopt.max_rec,
314 ne->busopt.max_rom,
315 ne->busopt.cyc_clk_acc);
316}
317static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL);
318
319
320#ifdef HPSB_DEBUG_TLABELS
321static ssize_t fw_show_ne_tlabels_free(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 struct node_entry *ne = container_of(dev, struct node_entry, device);
325 unsigned long flags;
326 unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map;
327 int tf;
328
329 spin_lock_irqsave(&hpsb_tlabel_lock, flags);
330 tf = 64 - bitmap_weight(tp, 64);
331 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
332
333 return sprintf(buf, "%d\n", tf);
334}
335static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL);
336
337
338static ssize_t fw_show_ne_tlabels_mask(struct device *dev,
339 struct device_attribute *attr, char *buf)
340{
341 struct node_entry *ne = container_of(dev, struct node_entry, device);
342 unsigned long flags;
343 unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map;
344 u64 tm;
345
346 spin_lock_irqsave(&hpsb_tlabel_lock, flags);
347#if (BITS_PER_LONG <= 32)
348 tm = ((u64)tp[0] << 32) + tp[1];
349#else
350 tm = tp[0];
351#endif
352 spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
353
354 return sprintf(buf, "0x%016llx\n", (unsigned long long)tm);
355}
356static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL);
357#endif /* HPSB_DEBUG_TLABELS */
358
359
360static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
361{
362 struct unit_directory *ud = container_of(dev, struct unit_directory, device);
363 int state = simple_strtoul(buf, NULL, 10);
364
365 if (state == 1) {
366 ud->ignore_driver = 1;
367 device_release_driver(dev);
368 } else if (state == 0)
369 ud->ignore_driver = 0;
370
371 return count;
372}
373static ssize_t fw_get_ignore_driver(struct device *dev, struct device_attribute *attr, char *buf)
374{
375 struct unit_directory *ud = container_of(dev, struct unit_directory, device);
376
377 return sprintf(buf, "%d\n", ud->ignore_driver);
378}
379static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver);
380
381
382static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf,
383 size_t count)
384{
385 int error = 0;
386
387 if (simple_strtoul(buf, NULL, 10) == 1)
388 error = bus_rescan_devices(&ieee1394_bus_type);
389 return error ? error : count;
390}
391static ssize_t fw_get_rescan(struct bus_type *bus, char *buf)
392{
393 return sprintf(buf, "You can force a rescan of the bus for "
394 "drivers by writing a 1 to this file\n");
395}
396static BUS_ATTR(rescan, S_IWUSR | S_IRUGO, fw_get_rescan, fw_set_rescan);
397
398
399static ssize_t fw_set_ignore_drivers(struct bus_type *bus, const char *buf, size_t count)
400{
401 int state = simple_strtoul(buf, NULL, 10);
402
403 if (state == 1)
404 ignore_drivers = 1;
405 else if (state == 0)
406 ignore_drivers = 0;
407
408 return count;
409}
410static ssize_t fw_get_ignore_drivers(struct bus_type *bus, char *buf)
411{
412 return sprintf(buf, "%d\n", ignore_drivers);
413}
414static BUS_ATTR(ignore_drivers, S_IWUSR | S_IRUGO, fw_get_ignore_drivers, fw_set_ignore_drivers);
415
416
417struct bus_attribute *const fw_bus_attrs[] = {
418 &bus_attr_rescan,
419 &bus_attr_ignore_drivers,
420 NULL
421};
422
423
424fw_attr(ne, struct node_entry, capabilities, unsigned int, "0x%06x\n")
425fw_attr(ne, struct node_entry, nodeid, unsigned int, "0x%04x\n")
426
427fw_attr(ne, struct node_entry, vendor_id, unsigned int, "0x%06x\n")
428fw_attr_td(ne, struct node_entry, vendor_name_kv)
429
430fw_attr(ne, struct node_entry, guid, unsigned long long, "0x%016Lx\n")
431fw_attr(ne, struct node_entry, guid_vendor_id, unsigned int, "0x%06x\n")
432fw_attr(ne, struct node_entry, in_limbo, int, "%d\n");
433
434static struct device_attribute *const fw_ne_attrs[] = {
435 &dev_attr_ne_guid,
436 &dev_attr_ne_guid_vendor_id,
437 &dev_attr_ne_capabilities,
438 &dev_attr_ne_vendor_id,
439 &dev_attr_ne_nodeid,
440 &dev_attr_bus_options,
441#ifdef HPSB_DEBUG_TLABELS
442 &dev_attr_tlabels_free,
443 &dev_attr_tlabels_mask,
444#endif
445};
446
447
448
449fw_attr(ud, struct unit_directory, address, unsigned long long, "0x%016Lx\n")
450fw_attr(ud, struct unit_directory, length, int, "%d\n")
451/* These are all dependent on the value being provided */
452fw_attr(ud, struct unit_directory, vendor_id, unsigned int, "0x%06x\n")
453fw_attr(ud, struct unit_directory, model_id, unsigned int, "0x%06x\n")
454fw_attr(ud, struct unit_directory, specifier_id, unsigned int, "0x%06x\n")
455fw_attr(ud, struct unit_directory, version, unsigned int, "0x%06x\n")
456fw_attr_td(ud, struct unit_directory, vendor_name_kv)
457fw_attr_td(ud, struct unit_directory, model_name_kv)
458
459static struct device_attribute *const fw_ud_attrs[] = {
460 &dev_attr_ud_address,
461 &dev_attr_ud_length,
462 &dev_attr_ignore_driver,
463};
464
465
466fw_attr(host, struct hpsb_host, node_count, int, "%d\n")
467fw_attr(host, struct hpsb_host, selfid_count, int, "%d\n")
468fw_attr(host, struct hpsb_host, nodes_active, int, "%d\n")
469fw_attr(host, struct hpsb_host, in_bus_reset, int, "%d\n")
470fw_attr(host, struct hpsb_host, is_root, int, "%d\n")
471fw_attr(host, struct hpsb_host, is_cycmst, int, "%d\n")
472fw_attr(host, struct hpsb_host, is_irm, int, "%d\n")
473fw_attr(host, struct hpsb_host, is_busmgr, int, "%d\n")
474
475static struct device_attribute *const fw_host_attrs[] = {
476 &dev_attr_host_node_count,
477 &dev_attr_host_selfid_count,
478 &dev_attr_host_nodes_active,
479 &dev_attr_host_in_bus_reset,
480 &dev_attr_host_is_root,
481 &dev_attr_host_is_cycmst,
482 &dev_attr_host_is_irm,
483 &dev_attr_host_is_busmgr,
484};
485
486
487static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
488{
489 struct hpsb_protocol_driver *driver;
490 const struct ieee1394_device_id *id;
491 int length = 0;
492 char *scratch = buf;
493
494 driver = container_of(drv, struct hpsb_protocol_driver, driver);
495 id = driver->id_table;
496 if (!id)
497 return 0;
498
499 for (; id->match_flags != 0; id++) {
500 int need_coma = 0;
501
502 if (id->match_flags & IEEE1394_MATCH_VENDOR_ID) {
503 length += sprintf(scratch, "vendor_id=0x%06x", id->vendor_id);
504 scratch = buf + length;
505 need_coma++;
506 }
507
508 if (id->match_flags & IEEE1394_MATCH_MODEL_ID) {
509 length += sprintf(scratch, "%smodel_id=0x%06x",
510 need_coma++ ? "," : "",
511 id->model_id);
512 scratch = buf + length;
513 }
514
515 if (id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) {
516 length += sprintf(scratch, "%sspecifier_id=0x%06x",
517 need_coma++ ? "," : "",
518 id->specifier_id);
519 scratch = buf + length;
520 }
521
522 if (id->match_flags & IEEE1394_MATCH_VERSION) {
523 length += sprintf(scratch, "%sversion=0x%06x",
524 need_coma++ ? "," : "",
525 id->version);
526 scratch = buf + length;
527 }
528
529 if (need_coma) {
530 *scratch++ = '\n';
531 length++;
532 }
533 }
534
535 return length;
536}
537static DRIVER_ATTR(device_ids,S_IRUGO,fw_show_drv_device_ids,NULL);
538
539
540fw_drv_attr(name, const char *, "%s\n")
541
542static struct driver_attribute *const fw_drv_attrs[] = {
543 &driver_attr_drv_name,
544 &driver_attr_device_ids,
545};
546
547
548static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
549{
550 struct device_driver *drv = &driver->driver;
551 int i;
552
553 for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
554 if (driver_create_file(drv, fw_drv_attrs[i]))
555 goto fail;
556 return;
557fail:
558 HPSB_ERR("Failed to add sysfs attribute");
559}
560
561
562static void nodemgr_remove_drv_files(struct hpsb_protocol_driver *driver)
563{
564 struct device_driver *drv = &driver->driver;
565 int i;
566
567 for (i = 0; i < ARRAY_SIZE(fw_drv_attrs); i++)
568 driver_remove_file(drv, fw_drv_attrs[i]);
569}
570
571
572static void nodemgr_create_ne_dev_files(struct node_entry *ne)
573{
574 struct device *dev = &ne->device;
575 int i;
576
577 for (i = 0; i < ARRAY_SIZE(fw_ne_attrs); i++)
578 if (device_create_file(dev, fw_ne_attrs[i]))
579 goto fail;
580 return;
581fail:
582 HPSB_ERR("Failed to add sysfs attribute");
583}
584
585
586static void nodemgr_create_host_dev_files(struct hpsb_host *host)
587{
588 struct device *dev = &host->device;
589 int i;
590
591 for (i = 0; i < ARRAY_SIZE(fw_host_attrs); i++)
592 if (device_create_file(dev, fw_host_attrs[i]))
593 goto fail;
594 return;
595fail:
596 HPSB_ERR("Failed to add sysfs attribute");
597}
598
599
600static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
601 nodeid_t nodeid);
602
603static void nodemgr_update_host_dev_links(struct hpsb_host *host)
604{
605 struct device *dev = &host->device;
606 struct node_entry *ne;
607
608 sysfs_remove_link(&dev->kobj, "irm_id");
609 sysfs_remove_link(&dev->kobj, "busmgr_id");
610 sysfs_remove_link(&dev->kobj, "host_id");
611
612 if ((ne = find_entry_by_nodeid(host, host->irm_id)) &&
613 sysfs_create_link(&dev->kobj, &ne->device.kobj, "irm_id"))
614 goto fail;
615 if ((ne = find_entry_by_nodeid(host, host->busmgr_id)) &&
616 sysfs_create_link(&dev->kobj, &ne->device.kobj, "busmgr_id"))
617 goto fail;
618 if ((ne = find_entry_by_nodeid(host, host->node_id)) &&
619 sysfs_create_link(&dev->kobj, &ne->device.kobj, "host_id"))
620 goto fail;
621 return;
622fail:
623 HPSB_ERR("Failed to update sysfs attributes for host %d", host->id);
624}
625
626static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
627{
628 struct device *dev = &ud->device;
629 int i;
630
631 for (i = 0; i < ARRAY_SIZE(fw_ud_attrs); i++)
632 if (device_create_file(dev, fw_ud_attrs[i]))
633 goto fail;
634 if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
635 if (device_create_file(dev, &dev_attr_ud_specifier_id))
636 goto fail;
637 if (ud->flags & UNIT_DIRECTORY_VERSION)
638 if (device_create_file(dev, &dev_attr_ud_version))
639 goto fail;
640 if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
641 if (device_create_file(dev, &dev_attr_ud_vendor_id))
642 goto fail;
643 if (ud->vendor_name_kv &&
644 device_create_file(dev, &dev_attr_ud_vendor_name_kv))
645 goto fail;
646 }
647 if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
648 if (device_create_file(dev, &dev_attr_ud_model_id))
649 goto fail;
650 if (ud->model_name_kv &&
651 device_create_file(dev, &dev_attr_ud_model_name_kv))
652 goto fail;
653 }
654 return;
655fail:
656 HPSB_ERR("Failed to add sysfs attribute");
657}
658
659
660static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
661{
662 struct hpsb_protocol_driver *driver;
663 struct unit_directory *ud;
664 const struct ieee1394_device_id *id;
665
666 /* We only match unit directories */
667 if (dev->platform_data != &nodemgr_ud_platform_data)
668 return 0;
669
670 ud = container_of(dev, struct unit_directory, device);
671 if (ud->ne->in_limbo || ud->ignore_driver)
672 return 0;
673
674 /* We only match drivers of type hpsb_protocol_driver */
675 if (drv == &nodemgr_mid_layer_driver)
676 return 0;
677
678 driver = container_of(drv, struct hpsb_protocol_driver, driver);
679 id = driver->id_table;
680 if (!id)
681 return 0;
682
683 for (; id->match_flags != 0; id++) {
684 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
685 id->vendor_id != ud->vendor_id)
686 continue;
687
688 if ((id->match_flags & IEEE1394_MATCH_MODEL_ID) &&
689 id->model_id != ud->model_id)
690 continue;
691
692 if ((id->match_flags & IEEE1394_MATCH_SPECIFIER_ID) &&
693 id->specifier_id != ud->specifier_id)
694 continue;
695
696 if ((id->match_flags & IEEE1394_MATCH_VERSION) &&
697 id->version != ud->version)
698 continue;
699
700 return 1;
701 }
702
703 return 0;
704}
705
706
707static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
708
709static int match_ne(struct device *dev, void *data)
710{
711 struct unit_directory *ud;
712 struct node_entry *ne = data;
713
714 ud = container_of(dev, struct unit_directory, unit_dev);
715 return ud->ne == ne;
716}
717
718static void nodemgr_remove_uds(struct node_entry *ne)
719{
720 struct device *dev;
721 struct unit_directory *ud;
722
723 /* Use class_find device to iterate the devices. Since this code
724 * may be called from other contexts besides the knodemgrds,
725 * protect it by nodemgr_serialize_remove_uds.
726 */
727 mutex_lock(&nodemgr_serialize_remove_uds);
728 for (;;) {
729 dev = class_find_device(&nodemgr_ud_class, NULL, ne, match_ne);
730 if (!dev)
731 break;
732 ud = container_of(dev, struct unit_directory, unit_dev);
733 put_device(dev);
734 device_unregister(&ud->unit_dev);
735 device_unregister(&ud->device);
736 }
737 mutex_unlock(&nodemgr_serialize_remove_uds);
738}
739
740
741static void nodemgr_remove_ne(struct node_entry *ne)
742{
743 struct device *dev;
744
745 dev = get_device(&ne->device);
746 if (!dev)
747 return;
748
749 HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
750 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
751 nodemgr_remove_uds(ne);
752
753 device_unregister(&ne->node_dev);
754 device_unregister(dev);
755
756 put_device(dev);
757}
758
759static int remove_host_dev(struct device *dev, void *data)
760{
761 if (dev->bus == &ieee1394_bus_type)
762 nodemgr_remove_ne(container_of(dev, struct node_entry,
763 device));
764 return 0;
765}
766
767static void nodemgr_remove_host_dev(struct device *dev)
768{
769 device_for_each_child(dev, NULL, remove_host_dev);
770 sysfs_remove_link(&dev->kobj, "irm_id");
771 sysfs_remove_link(&dev->kobj, "busmgr_id");
772 sysfs_remove_link(&dev->kobj, "host_id");
773}
774
775
776static void nodemgr_update_bus_options(struct node_entry *ne)
777{
778#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
779 static const u16 mr[] = { 4, 64, 1024, 0};
780#endif
781 quadlet_t busoptions = be32_to_cpu(ne->csr->bus_info_data[2]);
782
783 ne->busopt.irmc = (busoptions >> 31) & 1;
784 ne->busopt.cmc = (busoptions >> 30) & 1;
785 ne->busopt.isc = (busoptions >> 29) & 1;
786 ne->busopt.bmc = (busoptions >> 28) & 1;
787 ne->busopt.pmc = (busoptions >> 27) & 1;
788 ne->busopt.cyc_clk_acc = (busoptions >> 16) & 0xff;
789 ne->busopt.max_rec = 1 << (((busoptions >> 12) & 0xf) + 1);
790 ne->busopt.max_rom = (busoptions >> 8) & 0x3;
791 ne->busopt.generation = (busoptions >> 4) & 0xf;
792 ne->busopt.lnkspd = busoptions & 0x7;
793
794 HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
795 "cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
796 busoptions, ne->busopt.irmc, ne->busopt.cmc,
797 ne->busopt.isc, ne->busopt.bmc, ne->busopt.pmc,
798 ne->busopt.cyc_clk_acc, ne->busopt.max_rec,
799 mr[ne->busopt.max_rom],
800 ne->busopt.generation, ne->busopt.lnkspd);
801}
802
803
804static struct node_entry *nodemgr_create_node(octlet_t guid,
805 struct csr1212_csr *csr, struct hpsb_host *host,
806 nodeid_t nodeid, unsigned int generation)
807{
808 struct node_entry *ne;
809
810 ne = kzalloc(sizeof(*ne), GFP_KERNEL);
811 if (!ne)
812 goto fail_alloc;
813
814 ne->host = host;
815 ne->nodeid = nodeid;
816 ne->generation = generation;
817 ne->needs_probe = true;
818
819 ne->guid = guid;
820 ne->guid_vendor_id = (guid >> 40) & 0xffffff;
821 ne->csr = csr;
822
823 memcpy(&ne->device, &nodemgr_dev_template_ne,
824 sizeof(ne->device));
825 ne->device.parent = &host->device;
826 dev_set_name(&ne->device, "%016Lx", (unsigned long long)(ne->guid));
827
828 ne->node_dev.parent = &ne->device;
829 ne->node_dev.class = &nodemgr_ne_class;
830 dev_set_name(&ne->node_dev, "%016Lx", (unsigned long long)(ne->guid));
831
832 if (device_register(&ne->device))
833 goto fail_devreg;
834 if (device_register(&ne->node_dev))
835 goto fail_classdevreg;
836 get_device(&ne->device);
837
838 nodemgr_create_ne_dev_files(ne);
839
840 nodemgr_update_bus_options(ne);
841
842 HPSB_DEBUG("%s added: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
843 (host->node_id == nodeid) ? "Host" : "Node",
844 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
845
846 return ne;
847
848fail_classdevreg:
849 device_unregister(&ne->device);
850fail_devreg:
851 kfree(ne);
852fail_alloc:
853 HPSB_ERR("Failed to create node ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
854 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
855
856 return NULL;
857}
858
859static int match_ne_guid(struct device *dev, void *data)
860{
861 struct node_entry *ne;
862 u64 *guid = data;
863
864 ne = container_of(dev, struct node_entry, node_dev);
865 return ne->guid == *guid;
866}
867
868static struct node_entry *find_entry_by_guid(u64 guid)
869{
870 struct device *dev;
871 struct node_entry *ne;
872
873 dev = class_find_device(&nodemgr_ne_class, NULL, &guid, match_ne_guid);
874 if (!dev)
875 return NULL;
876 ne = container_of(dev, struct node_entry, node_dev);
877 put_device(dev);
878
879 return ne;
880}
881
882struct match_nodeid_parameter {
883 struct hpsb_host *host;
884 nodeid_t nodeid;
885};
886
887static int match_ne_nodeid(struct device *dev, void *data)
888{
889 int found = 0;
890 struct node_entry *ne;
891 struct match_nodeid_parameter *p = data;
892
893 if (!dev)
894 goto ret;
895 ne = container_of(dev, struct node_entry, node_dev);
896 if (ne->host == p->host && ne->nodeid == p->nodeid)
897 found = 1;
898ret:
899 return found;
900}
901
902static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
903 nodeid_t nodeid)
904{
905 struct device *dev;
906 struct node_entry *ne;
907 struct match_nodeid_parameter p;
908
909 p.host = host;
910 p.nodeid = nodeid;
911
912 dev = class_find_device(&nodemgr_ne_class, NULL, &p, match_ne_nodeid);
913 if (!dev)
914 return NULL;
915 ne = container_of(dev, struct node_entry, node_dev);
916 put_device(dev);
917
918 return ne;
919}
920
921
922static void nodemgr_register_device(struct node_entry *ne,
923 struct unit_directory *ud, struct device *parent)
924{
925 memcpy(&ud->device, &nodemgr_dev_template_ud,
926 sizeof(ud->device));
927
928 ud->device.parent = parent;
929
930 dev_set_name(&ud->device, "%s-%u", dev_name(&ne->device), ud->id);
931
932 ud->unit_dev.parent = &ud->device;
933 ud->unit_dev.class = &nodemgr_ud_class;
934 dev_set_name(&ud->unit_dev, "%s-%u", dev_name(&ne->device), ud->id);
935
936 if (device_register(&ud->device))
937 goto fail_devreg;
938 if (device_register(&ud->unit_dev))
939 goto fail_classdevreg;
940 get_device(&ud->device);
941
942 nodemgr_create_ud_dev_files(ud);
943
944 return;
945
946fail_classdevreg:
947 device_unregister(&ud->device);
948fail_devreg:
949 HPSB_ERR("Failed to create unit %s", dev_name(&ud->device));
950}
951
952
953/* This implementation currently only scans the config rom and its
954 * immediate unit directories looking for software_id and
955 * software_version entries, in order to get driver autoloading working. */
956static struct unit_directory *nodemgr_process_unit_directory
957 (struct node_entry *ne, struct csr1212_keyval *ud_kv,
958 unsigned int *id, struct unit_directory *parent)
959{
960 struct unit_directory *ud;
961 struct unit_directory *ud_child = NULL;
962 struct csr1212_dentry *dentry;
963 struct csr1212_keyval *kv;
964 u8 last_key_id = 0;
965
966 ud = kzalloc(sizeof(*ud), GFP_KERNEL);
967 if (!ud)
968 goto unit_directory_error;
969
970 ud->ne = ne;
971 ud->ignore_driver = ignore_drivers;
972 ud->address = ud_kv->offset + CSR1212_REGISTER_SPACE_BASE;
973 ud->directory_id = ud->address & 0xffffff;
974 ud->ud_kv = ud_kv;
975 ud->id = (*id)++;
976
977 /* inherit vendor_id from root directory if none exists in unit dir */
978 ud->vendor_id = ne->vendor_id;
979
980 csr1212_for_each_dir_entry(ne->csr, kv, ud_kv, dentry) {
981 switch (kv->key.id) {
982 case CSR1212_KV_ID_VENDOR:
983 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
984 ud->vendor_id = kv->value.immediate;
985 ud->flags |= UNIT_DIRECTORY_VENDOR_ID;
986 }
987 break;
988
989 case CSR1212_KV_ID_MODEL:
990 ud->model_id = kv->value.immediate;
991 ud->flags |= UNIT_DIRECTORY_MODEL_ID;
992 break;
993
994 case CSR1212_KV_ID_SPECIFIER_ID:
995 ud->specifier_id = kv->value.immediate;
996 ud->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
997 break;
998
999 case CSR1212_KV_ID_VERSION:
1000 ud->version = kv->value.immediate;
1001 ud->flags |= UNIT_DIRECTORY_VERSION;
1002 break;
1003
1004 case CSR1212_KV_ID_DESCRIPTOR:
1005 if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
1006 CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
1007 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
1008 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
1009 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
1010 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
1011 switch (last_key_id) {
1012 case CSR1212_KV_ID_VENDOR:
1013 csr1212_keep_keyval(kv);
1014 ud->vendor_name_kv = kv;
1015 break;
1016
1017 case CSR1212_KV_ID_MODEL:
1018 csr1212_keep_keyval(kv);
1019 ud->model_name_kv = kv;
1020 break;
1021
1022 }
1023 } /* else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) ... */
1024 break;
1025
1026 case CSR1212_KV_ID_DEPENDENT_INFO:
1027 /* Logical Unit Number */
1028 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1029 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
1030 ud_child = kmemdup(ud, sizeof(*ud_child), GFP_KERNEL);
1031 if (!ud_child)
1032 goto unit_directory_error;
1033 nodemgr_register_device(ne, ud_child, &ne->device);
1034 ud_child = NULL;
1035
1036 ud->id = (*id)++;
1037 }
1038 ud->lun = kv->value.immediate;
1039 ud->flags |= UNIT_DIRECTORY_HAS_LUN;
1040
1041 /* Logical Unit Directory */
1042 } else if (kv->key.type == CSR1212_KV_TYPE_DIRECTORY) {
1043 /* This should really be done in SBP2 as this is
1044 * doing SBP2 specific parsing.
1045 */
1046
1047 /* first register the parent unit */
1048 ud->flags |= UNIT_DIRECTORY_HAS_LUN_DIRECTORY;
1049 if (ud->device.bus != &ieee1394_bus_type)
1050 nodemgr_register_device(ne, ud, &ne->device);
1051
1052 /* process the child unit */
1053 ud_child = nodemgr_process_unit_directory(ne, kv, id, ud);
1054
1055 if (ud_child == NULL)
1056 break;
1057
1058 /* inherit unspecified values, the driver core picks it up */
1059 if ((ud->flags & UNIT_DIRECTORY_MODEL_ID) &&
1060 !(ud_child->flags & UNIT_DIRECTORY_MODEL_ID))
1061 {
1062 ud_child->flags |= UNIT_DIRECTORY_MODEL_ID;
1063 ud_child->model_id = ud->model_id;
1064 }
1065 if ((ud->flags & UNIT_DIRECTORY_SPECIFIER_ID) &&
1066 !(ud_child->flags & UNIT_DIRECTORY_SPECIFIER_ID))
1067 {
1068 ud_child->flags |= UNIT_DIRECTORY_SPECIFIER_ID;
1069 ud_child->specifier_id = ud->specifier_id;
1070 }
1071 if ((ud->flags & UNIT_DIRECTORY_VERSION) &&
1072 !(ud_child->flags & UNIT_DIRECTORY_VERSION))
1073 {
1074 ud_child->flags |= UNIT_DIRECTORY_VERSION;
1075 ud_child->version = ud->version;
1076 }
1077
1078 /* register the child unit */
1079 ud_child->flags |= UNIT_DIRECTORY_LUN_DIRECTORY;
1080 nodemgr_register_device(ne, ud_child, &ud->device);
1081 }
1082
1083 break;
1084
1085 case CSR1212_KV_ID_DIRECTORY_ID:
1086 ud->directory_id = kv->value.immediate;
1087 break;
1088
1089 default:
1090 break;
1091 }
1092 last_key_id = kv->key.id;
1093 }
1094
1095 /* do not process child units here and only if not already registered */
1096 if (!parent && ud->device.bus != &ieee1394_bus_type)
1097 nodemgr_register_device(ne, ud, &ne->device);
1098
1099 return ud;
1100
1101unit_directory_error:
1102 kfree(ud);
1103 return NULL;
1104}
1105
1106
1107static void nodemgr_process_root_directory(struct node_entry *ne)
1108{
1109 unsigned int ud_id = 0;
1110 struct csr1212_dentry *dentry;
1111 struct csr1212_keyval *kv, *vendor_name_kv = NULL;
1112 u8 last_key_id = 0;
1113
1114 ne->needs_probe = false;
1115
1116 csr1212_for_each_dir_entry(ne->csr, kv, ne->csr->root_kv, dentry) {
1117 switch (kv->key.id) {
1118 case CSR1212_KV_ID_VENDOR:
1119 ne->vendor_id = kv->value.immediate;
1120 break;
1121
1122 case CSR1212_KV_ID_NODE_CAPABILITIES:
1123 ne->capabilities = kv->value.immediate;
1124 break;
1125
1126 case CSR1212_KV_ID_UNIT:
1127 nodemgr_process_unit_directory(ne, kv, &ud_id, NULL);
1128 break;
1129
1130 case CSR1212_KV_ID_DESCRIPTOR:
1131 if (last_key_id == CSR1212_KV_ID_VENDOR) {
1132 if (kv->key.type == CSR1212_KV_TYPE_LEAF &&
1133 CSR1212_DESCRIPTOR_LEAF_TYPE(kv) == 0 &&
1134 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) == 0 &&
1135 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
1136 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
1137 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
1138 csr1212_keep_keyval(kv);
1139 vendor_name_kv = kv;
1140 }
1141 }
1142 break;
1143 }
1144 last_key_id = kv->key.id;
1145 }
1146
1147 if (ne->vendor_name_kv) {
1148 kv = ne->vendor_name_kv;
1149 ne->vendor_name_kv = vendor_name_kv;
1150 csr1212_release_keyval(kv);
1151 } else if (vendor_name_kv) {
1152 ne->vendor_name_kv = vendor_name_kv;
1153 if (device_create_file(&ne->device,
1154 &dev_attr_ne_vendor_name_kv) != 0)
1155 HPSB_ERR("Failed to add sysfs attribute");
1156 }
1157}
1158
1159#ifdef CONFIG_HOTPLUG
1160
1161static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env)
1162{
1163 struct unit_directory *ud;
1164 int retval = 0;
1165 /* ieee1394:venNmoNspNverN */
1166 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
1167
1168 if (!dev)
1169 return -ENODEV;
1170
1171 ud = container_of(dev, struct unit_directory, unit_dev);
1172
1173 if (ud->ne->in_limbo || ud->ignore_driver)
1174 return -ENODEV;
1175
1176#define PUT_ENVP(fmt,val) \
1177do { \
1178 retval = add_uevent_var(env, fmt, val); \
1179 if (retval) \
1180 return retval; \
1181} while (0)
1182
1183 PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id);
1184 PUT_ENVP("MODEL_ID=%06x", ud->model_id);
1185 PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid);
1186 PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id);
1187 PUT_ENVP("VERSION=%06x", ud->version);
1188 snprintf(buf, sizeof(buf), "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
1189 ud->vendor_id,
1190 ud->model_id,
1191 ud->specifier_id,
1192 ud->version);
1193 PUT_ENVP("MODALIAS=%s", buf);
1194
1195#undef PUT_ENVP
1196
1197 return 0;
1198}
1199
1200#else
1201
1202static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env)
1203{
1204 return -ENODEV;
1205}
1206
1207#endif /* CONFIG_HOTPLUG */
1208
1209
1210int __hpsb_register_protocol(struct hpsb_protocol_driver *drv,
1211 struct module *owner)
1212{
1213 int error;
1214
1215 drv->driver.bus = &ieee1394_bus_type;
1216 drv->driver.owner = owner;
1217 drv->driver.name = drv->name;
1218
1219 /* This will cause a probe for devices */
1220 error = driver_register(&drv->driver);
1221 if (!error)
1222 nodemgr_create_drv_files(drv);
1223 return error;
1224}
1225
1226void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
1227{
1228 nodemgr_remove_drv_files(driver);
1229 /* This will subsequently disconnect all devices that our driver
1230 * is attached to. */
1231 driver_unregister(&driver->driver);
1232}
1233
1234
1235/*
1236 * This function updates nodes that were present on the bus before the
1237 * reset and still are after the reset. The nodeid and the config rom
1238 * may have changed, and the drivers managing this device must be
1239 * informed that this device just went through a bus reset, to allow
1240 * the to take whatever actions required.
1241 */
1242static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
1243 nodeid_t nodeid, unsigned int generation)
1244{
1245 if (ne->nodeid != nodeid) {
1246 HPSB_DEBUG("Node changed: " NODE_BUS_FMT " -> " NODE_BUS_FMT,
1247 NODE_BUS_ARGS(ne->host, ne->nodeid),
1248 NODE_BUS_ARGS(ne->host, nodeid));
1249 ne->nodeid = nodeid;
1250 }
1251
1252 if (ne->busopt.generation != ((be32_to_cpu(csr->bus_info_data[2]) >> 4) & 0xf)) {
1253 kfree(ne->csr->private);
1254 csr1212_destroy_csr(ne->csr);
1255 ne->csr = csr;
1256
1257 /* If the node's configrom generation has changed, we
1258 * unregister all the unit directories. */
1259 nodemgr_remove_uds(ne);
1260
1261 nodemgr_update_bus_options(ne);
1262
1263 /* Mark the node as new, so it gets re-probed */
1264 ne->needs_probe = true;
1265 } else {
1266 /* old cache is valid, so update its generation */
1267 struct nodemgr_csr_info *ci = ne->csr->private;
1268 ci->generation = generation;
1269 /* free the partially filled now unneeded new cache */
1270 kfree(csr->private);
1271 csr1212_destroy_csr(csr);
1272 }
1273
1274 /* Finally, mark the node current */
1275 smp_wmb();
1276 ne->generation = generation;
1277
1278 if (ne->in_limbo) {
1279 device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
1280 ne->in_limbo = false;
1281
1282 HPSB_DEBUG("Node reactivated: "
1283 "ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1284 NODE_BUS_ARGS(ne->host, ne->nodeid),
1285 (unsigned long long)ne->guid);
1286 }
1287}
1288
1289static void nodemgr_node_scan_one(struct hpsb_host *host,
1290 nodeid_t nodeid, int generation)
1291{
1292 struct node_entry *ne;
1293 octlet_t guid;
1294 struct csr1212_csr *csr;
1295 struct nodemgr_csr_info *ci;
1296 u8 *speed;
1297
1298 ci = kmalloc(sizeof(*ci), GFP_KERNEL);
1299 kmemcheck_annotate_bitfield(ci, flags);
1300 if (!ci)
1301 return;
1302
1303 ci->host = host;
1304 ci->nodeid = nodeid;
1305 ci->generation = generation;
1306
1307 /* Prepare for speed probe which occurs when reading the ROM */
1308 speed = &(host->speed[NODEID_TO_NODE(nodeid)]);
1309 if (*speed > host->csr.lnk_spd)
1310 *speed = host->csr.lnk_spd;
1311 ci->speed_unverified = *speed > IEEE1394_SPEED_100;
1312
1313 /* We need to detect when the ConfigROM's generation has changed,
1314 * so we only update the node's info when it needs to be. */
1315
1316 csr = csr1212_create_csr(&nodemgr_csr_ops, 5 * sizeof(quadlet_t), ci);
1317 if (!csr || csr1212_parse_csr(csr) != CSR1212_SUCCESS) {
1318 HPSB_ERR("Error parsing configrom for node " NODE_BUS_FMT,
1319 NODE_BUS_ARGS(host, nodeid));
1320 if (csr)
1321 csr1212_destroy_csr(csr);
1322 kfree(ci);
1323 return;
1324 }
1325
1326 if (csr->bus_info_data[1] != IEEE1394_BUSID_MAGIC) {
1327 /* This isn't a 1394 device, but we let it slide. There
1328 * was a report of a device with broken firmware which
1329 * reported '2394' instead of '1394', which is obviously a
1330 * mistake. One would hope that a non-1394 device never
1331 * gets connected to Firewire bus. If someone does, we
1332 * shouldn't be held responsible, so we'll allow it with a
1333 * warning. */
1334 HPSB_WARN("Node " NODE_BUS_FMT " has invalid busID magic [0x%08x]",
1335 NODE_BUS_ARGS(host, nodeid), csr->bus_info_data[1]);
1336 }
1337
1338 guid = ((u64)be32_to_cpu(csr->bus_info_data[3]) << 32) | be32_to_cpu(csr->bus_info_data[4]);
1339 ne = find_entry_by_guid(guid);
1340
1341 if (ne && ne->host != host && ne->in_limbo) {
1342 /* Must have moved this device from one host to another */
1343 nodemgr_remove_ne(ne);
1344 ne = NULL;
1345 }
1346
1347 if (!ne)
1348 nodemgr_create_node(guid, csr, host, nodeid, generation);
1349 else
1350 nodemgr_update_node(ne, csr, nodeid, generation);
1351}
1352
1353
1354static void nodemgr_node_scan(struct hpsb_host *host, int generation)
1355{
1356 int count;
1357 struct selfid *sid = (struct selfid *)host->topology_map;
1358 nodeid_t nodeid = LOCAL_BUS;
1359
1360 /* Scan each node on the bus */
1361 for (count = host->selfid_count; count; count--, sid++) {
1362 if (sid->extended)
1363 continue;
1364
1365 if (!sid->link_active) {
1366 nodeid++;
1367 continue;
1368 }
1369 nodemgr_node_scan_one(host, nodeid++, generation);
1370 }
1371}
1372
1373static void nodemgr_pause_ne(struct node_entry *ne)
1374{
1375 HPSB_DEBUG("Node paused: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1376 NODE_BUS_ARGS(ne->host, ne->nodeid),
1377 (unsigned long long)ne->guid);
1378
1379 ne->in_limbo = true;
1380 WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
1381}
1382
1383static int update_pdrv(struct device *dev, void *data)
1384{
1385 struct unit_directory *ud;
1386 struct device_driver *drv;
1387 struct hpsb_protocol_driver *pdrv;
1388 struct node_entry *ne = data;
1389 int error;
1390
1391 ud = container_of(dev, struct unit_directory, unit_dev);
1392 if (ud->ne == ne) {
1393 drv = get_driver(ud->device.driver);
1394 if (drv) {
1395 error = 0;
1396 pdrv = container_of(drv, struct hpsb_protocol_driver,
1397 driver);
1398 if (pdrv->update) {
1399 device_lock(&ud->device);
1400 error = pdrv->update(ud);
1401 device_unlock(&ud->device);
1402 }
1403 if (error)
1404 device_release_driver(&ud->device);
1405 put_driver(drv);
1406 }
1407 }
1408
1409 return 0;
1410}
1411
1412static void nodemgr_update_pdrv(struct node_entry *ne)
1413{
1414 class_for_each_device(&nodemgr_ud_class, NULL, ne, update_pdrv);
1415}
1416
1417/* Write the BROADCAST_CHANNEL as per IEEE1394a 8.3.2.3.11 and 8.4.2.3. This
1418 * seems like an optional service but in the end it is practically mandatory
1419 * as a consequence of these clauses.
1420 *
1421 * Note that we cannot do a broadcast write to all nodes at once because some
1422 * pre-1394a devices would hang. */
1423static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
1424{
1425 const u64 bc_addr = (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL);
1426 quadlet_t bc_remote, bc_local;
1427 int error;
1428
1429 if (!ne->host->is_irm || ne->generation != generation ||
1430 ne->nodeid == ne->host->node_id)
1431 return;
1432
1433 bc_local = cpu_to_be32(ne->host->csr.broadcast_channel);
1434
1435 /* Check if the register is implemented and 1394a compliant. */
1436 error = hpsb_read(ne->host, ne->nodeid, generation, bc_addr, &bc_remote,
1437 sizeof(bc_remote));
1438 if (!error && bc_remote & cpu_to_be32(0x80000000) &&
1439 bc_remote != bc_local)
1440 hpsb_node_write(ne, bc_addr, &bc_local, sizeof(bc_local));
1441}
1442
1443
1444static void nodemgr_probe_ne(struct hpsb_host *host, struct node_entry *ne,
1445 int generation)
1446{
1447 struct device *dev;
1448
1449 if (ne->host != host || ne->in_limbo)
1450 return;
1451
1452 dev = get_device(&ne->device);
1453 if (!dev)
1454 return;
1455
1456 nodemgr_irm_write_bc(ne, generation);
1457
1458 /* If "needs_probe", then this is either a new or changed node we
1459 * rescan totally. If the generation matches for an existing node
1460 * (one that existed prior to the bus reset) we send update calls
1461 * down to the drivers. Otherwise, this is a dead node and we
1462 * suspend it. */
1463 if (ne->needs_probe)
1464 nodemgr_process_root_directory(ne);
1465 else if (ne->generation == generation)
1466 nodemgr_update_pdrv(ne);
1467 else
1468 nodemgr_pause_ne(ne);
1469
1470 put_device(dev);
1471}
1472
1473struct node_probe_parameter {
1474 struct hpsb_host *host;
1475 int generation;
1476 bool probe_now;
1477};
1478
1479static int node_probe(struct device *dev, void *data)
1480{
1481 struct node_probe_parameter *p = data;
1482 struct node_entry *ne;
1483
1484 if (p->generation != get_hpsb_generation(p->host))
1485 return -EAGAIN;
1486
1487 ne = container_of(dev, struct node_entry, node_dev);
1488 if (ne->needs_probe == p->probe_now)
1489 nodemgr_probe_ne(p->host, ne, p->generation);
1490 return 0;
1491}
1492
1493static int nodemgr_node_probe(struct hpsb_host *host, int generation)
1494{
1495 struct node_probe_parameter p;
1496
1497 p.host = host;
1498 p.generation = generation;
1499 /*
1500 * Do some processing of the nodes we've probed. This pulls them
1501 * into the sysfs layer if needed, and can result in processing of
1502 * unit-directories, or just updating the node and it's
1503 * unit-directories.
1504 *
1505 * Run updates before probes. Usually, updates are time-critical
1506 * while probes are time-consuming.
1507 *
1508 * Meanwhile, another bus reset may have happened. In this case we
1509 * skip everything here and let the next bus scan handle it.
1510 * Otherwise we may prematurely remove nodes which are still there.
1511 */
1512 p.probe_now = false;
1513 if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
1514 return 0;
1515
1516 p.probe_now = true;
1517 if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
1518 return 0;
1519 /*
1520 * Now let's tell the bus to rescan our devices. This may seem
1521 * like overhead, but the driver-model core will only scan a
1522 * device for a driver when either the device is added, or when a
1523 * new driver is added. A bus reset is a good reason to rescan
1524 * devices that were there before. For example, an sbp2 device
1525 * may become available for login, if the host that held it was
1526 * just removed.
1527 */
1528 if (bus_rescan_devices(&ieee1394_bus_type) != 0)
1529 HPSB_DEBUG("bus_rescan_devices had an error");
1530
1531 return 1;
1532}
1533
1534static int remove_nodes_in_limbo(struct device *dev, void *data)
1535{
1536 struct node_entry *ne;
1537
1538 if (dev->bus != &ieee1394_bus_type)
1539 return 0;
1540
1541 ne = container_of(dev, struct node_entry, device);
1542 if (ne->in_limbo)
1543 nodemgr_remove_ne(ne);
1544
1545 return 0;
1546}
1547
1548static void nodemgr_remove_nodes_in_limbo(struct hpsb_host *host)
1549{
1550 device_for_each_child(&host->device, NULL, remove_nodes_in_limbo);
1551}
1552
1553static int nodemgr_send_resume_packet(struct hpsb_host *host)
1554{
1555 struct hpsb_packet *packet;
1556 int error = -ENOMEM;
1557
1558 packet = hpsb_make_phypacket(host,
1559 EXTPHYPACKET_TYPE_RESUME |
1560 NODEID_TO_NODE(host->node_id) << PHYPACKET_PORT_SHIFT);
1561 if (packet) {
1562 packet->no_waiter = 1;
1563 packet->generation = get_hpsb_generation(host);
1564 error = hpsb_send_packet(packet);
1565 }
1566 if (error)
1567 HPSB_WARN("fw-host%d: Failed to broadcast resume packet",
1568 host->id);
1569 return error;
1570}
1571
1572/* Perform a few high-level IRM responsibilities. */
1573static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
1574{
1575 quadlet_t bc;
1576
1577 /* if irm_id == -1 then there is no IRM on this bus */
1578 if (!host->is_irm || host->irm_id == (nodeid_t)-1)
1579 return 1;
1580
1581 /* We are a 1394a-2000 compliant IRM. Set the validity bit. */
1582 host->csr.broadcast_channel |= 0x40000000;
1583
1584 /* If there is no bus manager then we should set the root node's
1585 * force_root bit to promote bus stability per the 1394
1586 * spec. (8.4.2.6) */
1587 if (host->busmgr_id == 0xffff && host->node_count > 1)
1588 {
1589 u16 root_node = host->node_count - 1;
1590
1591 /* get cycle master capability flag from root node */
1592 if (host->is_cycmst ||
1593 (!hpsb_read(host, LOCAL_BUS | root_node, get_hpsb_generation(host),
1594 (CSR_REGISTER_BASE + CSR_CONFIG_ROM + 2 * sizeof(quadlet_t)),
1595 &bc, sizeof(quadlet_t)) &&
1596 be32_to_cpu(bc) & 1 << CSR_CMC_SHIFT))
1597 hpsb_send_phy_config(host, root_node, -1);
1598 else {
1599 HPSB_DEBUG("The root node is not cycle master capable; "
1600 "selecting a new root node and resetting...");
1601
1602 if (cycles >= 5) {
1603 /* Oh screw it! Just leave the bus as it is */
1604 HPSB_DEBUG("Stopping reset loop for IRM sanity");
1605 return 1;
1606 }
1607
1608 hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
1609 hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
1610
1611 return 0;
1612 }
1613 }
1614
1615 /* Some devices suspend their ports while being connected to an inactive
1616 * host adapter, i.e. if connected before the low-level driver is
1617 * loaded. They become visible either when physically unplugged and
1618 * replugged, or when receiving a resume packet. Send one once. */
1619 if (!host->resume_packet_sent && !nodemgr_send_resume_packet(host))
1620 host->resume_packet_sent = 1;
1621
1622 return 1;
1623}
1624
1625/* We need to ensure that if we are not the IRM, that the IRM node is capable of
1626 * everything we can do, otherwise issue a bus reset and try to become the IRM
1627 * ourselves. */
1628static int nodemgr_check_irm_capability(struct hpsb_host *host, int cycles)
1629{
1630 quadlet_t bc;
1631 int status;
1632
1633 if (hpsb_disable_irm || host->is_irm)
1634 return 1;
1635
1636 status = hpsb_read(host, LOCAL_BUS | (host->irm_id),
1637 get_hpsb_generation(host),
1638 (CSR_REGISTER_BASE | CSR_BROADCAST_CHANNEL),
1639 &bc, sizeof(quadlet_t));
1640
1641 if (status < 0 || !(be32_to_cpu(bc) & 0x80000000)) {
1642 /* The current irm node does not have a valid BROADCAST_CHANNEL
1643 * register and we do, so reset the bus with force_root set */
1644 HPSB_DEBUG("Current remote IRM is not 1394a-2000 compliant, resetting...");
1645
1646 if (cycles >= 5) {
1647 /* Oh screw it! Just leave the bus as it is */
1648 HPSB_DEBUG("Stopping reset loop for IRM sanity");
1649 return 1;
1650 }
1651
1652 hpsb_send_phy_config(host, NODEID_TO_NODE(host->node_id), -1);
1653 hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
1654
1655 return 0;
1656 }
1657
1658 return 1;
1659}
1660
1661static int nodemgr_host_thread(void *data)
1662{
1663 struct hpsb_host *host = data;
1664 unsigned int g, generation = 0;
1665 int i, reset_cycles = 0;
1666
1667 set_freezable();
1668 /* Setup our device-model entries */
1669 nodemgr_create_host_dev_files(host);
1670
1671 for (;;) {
1672 /* Sleep until next bus reset */
1673 set_current_state(TASK_INTERRUPTIBLE);
1674 if (get_hpsb_generation(host) == generation &&
1675 !kthread_should_stop())
1676 schedule();
1677 __set_current_state(TASK_RUNNING);
1678
1679 /* Thread may have been woken up to freeze or to exit */
1680 if (try_to_freeze())
1681 continue;
1682 if (kthread_should_stop())
1683 goto exit;
1684
1685 /* Pause for 1/4 second in 1/16 second intervals,
1686 * to make sure things settle down. */
1687 g = get_hpsb_generation(host);
1688 for (i = 0; i < 4 ; i++) {
1689 msleep_interruptible(63);
1690 try_to_freeze();
1691 if (kthread_should_stop())
1692 goto exit;
1693
1694 /* Now get the generation in which the node ID's we collect
1695 * are valid. During the bus scan we will use this generation
1696 * for the read transactions, so that if another reset occurs
1697 * during the scan the transactions will fail instead of
1698 * returning bogus data. */
1699 generation = get_hpsb_generation(host);
1700
1701 /* If we get a reset before we are done waiting, then
1702 * start the waiting over again */
1703 if (generation != g)
1704 g = generation, i = 0;
1705 }
1706
1707 if (!nodemgr_check_irm_capability(host, reset_cycles) ||
1708 !nodemgr_do_irm_duties(host, reset_cycles)) {
1709 reset_cycles++;
1710 continue;
1711 }
1712 reset_cycles = 0;
1713
1714 /* Scan our nodes to get the bus options and create node
1715 * entries. This does not do the sysfs stuff, since that
1716 * would trigger uevents and such, which is a bad idea at
1717 * this point. */
1718 nodemgr_node_scan(host, generation);
1719
1720 /* This actually does the full probe, with sysfs
1721 * registration. */
1722 if (!nodemgr_node_probe(host, generation))
1723 continue;
1724
1725 /* Update some of our sysfs symlinks */
1726 nodemgr_update_host_dev_links(host);
1727
1728 /* Sleep 3 seconds */
1729 for (i = 3000/200; i; i--) {
1730 msleep_interruptible(200);
1731 try_to_freeze();
1732 if (kthread_should_stop())
1733 goto exit;
1734
1735 if (generation != get_hpsb_generation(host))
1736 break;
1737 }
1738 /* Remove nodes which are gone, unless a bus reset happened */
1739 if (!i)
1740 nodemgr_remove_nodes_in_limbo(host);
1741 }
1742exit:
1743 HPSB_VERBOSE("NodeMgr: Exiting thread");
1744 return 0;
1745}
1746
1747struct per_host_parameter {
1748 void *data;
1749 int (*cb)(struct hpsb_host *, void *);
1750};
1751
1752static int per_host(struct device *dev, void *data)
1753{
1754 struct hpsb_host *host;
1755 struct per_host_parameter *p = data;
1756
1757 host = container_of(dev, struct hpsb_host, host_dev);
1758 return p->cb(host, p->data);
1759}
1760
1761/**
1762 * nodemgr_for_each_host - call a function for each IEEE 1394 host
1763 * @data: an address to supply to the callback
1764 * @cb: function to call for each host
1765 *
1766 * Iterate the hosts, calling a given function with supplied data for each host.
1767 * If the callback fails on a host, i.e. if it returns a non-zero value, the
1768 * iteration is stopped.
1769 *
1770 * Return value: 0 on success, non-zero on failure (same as returned by last run
1771 * of the callback).
1772 */
1773int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
1774{
1775 struct per_host_parameter p;
1776
1777 p.cb = cb;
1778 p.data = data;
1779 return class_for_each_device(&hpsb_host_class, NULL, &p, per_host);
1780}
1781
1782/* The following two convenience functions use a struct node_entry
1783 * for addressing a node on the bus. They are intended for use by any
1784 * process context, not just the nodemgr thread, so we need to be a
1785 * little careful when reading out the node ID and generation. The
1786 * thing that can go wrong is that we get the node ID, then a bus
1787 * reset occurs, and then we read the generation. The node ID is
1788 * possibly invalid, but the generation is current, and we end up
1789 * sending a packet to a the wrong node.
1790 *
1791 * The solution is to make sure we read the generation first, so that
1792 * if a reset occurs in the process, we end up with a stale generation
1793 * and the transactions will fail instead of silently using wrong node
1794 * ID's.
1795 */
1796
1797/**
1798 * hpsb_node_fill_packet - fill some destination information into a packet
1799 * @ne: destination node
1800 * @packet: packet to fill in
1801 *
1802 * This will fill in the given, pre-initialised hpsb_packet with the current
1803 * information from the node entry (host, node ID, bus generation number).
1804 */
1805void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
1806{
1807 packet->host = ne->host;
1808 packet->generation = ne->generation;
1809 smp_rmb();
1810 packet->node_id = ne->nodeid;
1811}
1812
1813int hpsb_node_write(struct node_entry *ne, u64 addr,
1814 quadlet_t *buffer, size_t length)
1815{
1816 unsigned int generation = ne->generation;
1817
1818 smp_rmb();
1819 return hpsb_write(ne->host, ne->nodeid, generation,
1820 addr, buffer, length);
1821}
1822
1823static void nodemgr_add_host(struct hpsb_host *host)
1824{
1825 struct host_info *hi;
1826
1827 hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi));
1828 if (!hi) {
1829 HPSB_ERR("NodeMgr: out of memory in add host");
1830 return;
1831 }
1832 hi->host = host;
1833 hi->thread = kthread_run(nodemgr_host_thread, host, "knodemgrd_%d",
1834 host->id);
1835 if (IS_ERR(hi->thread)) {
1836 HPSB_ERR("NodeMgr: cannot start thread for host %d", host->id);
1837 hpsb_destroy_hostinfo(&nodemgr_highlevel, host);
1838 }
1839}
1840
1841static void nodemgr_host_reset(struct hpsb_host *host)
1842{
1843 struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
1844
1845 if (hi) {
1846 HPSB_VERBOSE("NodeMgr: Processing reset for host %d", host->id);
1847 wake_up_process(hi->thread);
1848 }
1849}
1850
1851static void nodemgr_remove_host(struct hpsb_host *host)
1852{
1853 struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host);
1854
1855 if (hi) {
1856 kthread_stop(hi->thread);
1857 nodemgr_remove_host_dev(&host->device);
1858 }
1859}
1860
1861static struct hpsb_highlevel nodemgr_highlevel = {
1862 .name = "Node manager",
1863 .add_host = nodemgr_add_host,
1864 .host_reset = nodemgr_host_reset,
1865 .remove_host = nodemgr_remove_host,
1866};
1867
1868int init_ieee1394_nodemgr(void)
1869{
1870 int error;
1871
1872 error = class_register(&nodemgr_ne_class);
1873 if (error)
1874 goto fail_ne;
1875 error = class_register(&nodemgr_ud_class);
1876 if (error)
1877 goto fail_ud;
1878 error = driver_register(&nodemgr_mid_layer_driver);
1879 if (error)
1880 goto fail_ml;
1881 /* This driver is not used if nodemgr is off (disable_nodemgr=1). */
1882 nodemgr_dev_template_host.driver = &nodemgr_mid_layer_driver;
1883
1884 hpsb_register_highlevel(&nodemgr_highlevel);
1885 return 0;
1886
1887fail_ml:
1888 class_unregister(&nodemgr_ud_class);
1889fail_ud:
1890 class_unregister(&nodemgr_ne_class);
1891fail_ne:
1892 return error;
1893}
1894
1895void cleanup_ieee1394_nodemgr(void)
1896{
1897 hpsb_unregister_highlevel(&nodemgr_highlevel);
1898 driver_unregister(&nodemgr_mid_layer_driver);
1899 class_unregister(&nodemgr_ud_class);
1900 class_unregister(&nodemgr_ne_class);
1901}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
deleted file mode 100644
index 749b271d3107..000000000000
--- a/drivers/ieee1394/nodemgr.h
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Copyright (C) 2000 Andreas E. Bombe
3 * 2001 Ben Collins <bcollins@debian.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef _IEEE1394_NODEMGR_H
21#define _IEEE1394_NODEMGR_H
22
23#include <linux/device.h>
24#include <asm/system.h>
25#include <asm/types.h>
26
27#include "ieee1394_core.h"
28#include "ieee1394_transactions.h"
29#include "ieee1394_types.h"
30
31struct csr1212_csr;
32struct csr1212_keyval;
33struct hpsb_host;
34struct ieee1394_device_id;
35
36/* This is the start of a Node entry structure. It should be a stable API
37 * for which to gather info from the Node Manager about devices attached
38 * to the bus. */
39struct bus_options {
40 u8 irmc; /* Iso Resource Manager Capable */
41 u8 cmc; /* Cycle Master Capable */
42 u8 isc; /* Iso Capable */
43 u8 bmc; /* Bus Master Capable */
44 u8 pmc; /* Power Manager Capable (PNP spec) */
45 u8 cyc_clk_acc; /* Cycle clock accuracy */
46 u8 max_rom; /* Maximum block read supported in the CSR */
47 u8 generation; /* Incremented when configrom changes */
48 u8 lnkspd; /* Link speed */
49 u16 max_rec; /* Maximum packet size node can receive */
50};
51
52#define UNIT_DIRECTORY_VENDOR_ID 0x01
53#define UNIT_DIRECTORY_MODEL_ID 0x02
54#define UNIT_DIRECTORY_SPECIFIER_ID 0x04
55#define UNIT_DIRECTORY_VERSION 0x08
56#define UNIT_DIRECTORY_HAS_LUN_DIRECTORY 0x10
57#define UNIT_DIRECTORY_LUN_DIRECTORY 0x20
58#define UNIT_DIRECTORY_HAS_LUN 0x40
59
60/*
61 * A unit directory corresponds to a protocol supported by the
62 * node. If a node supports eg. IP/1394 and AV/C, its config rom has a
63 * unit directory for each of these protocols.
64 */
65struct unit_directory {
66 struct node_entry *ne; /* The node which this directory belongs to */
67 octlet_t address; /* Address of the unit directory on the node */
68 u8 flags; /* Indicates which entries were read */
69
70 quadlet_t vendor_id;
71 struct csr1212_keyval *vendor_name_kv;
72
73 quadlet_t model_id;
74 struct csr1212_keyval *model_name_kv;
75 quadlet_t specifier_id;
76 quadlet_t version;
77 quadlet_t directory_id;
78
79 unsigned int id;
80
81 int ignore_driver;
82
83 int length; /* Number of quadlets */
84
85 struct device device;
86 struct device unit_dev;
87
88 struct csr1212_keyval *ud_kv;
89 u32 lun; /* logical unit number immediate value */
90};
91
92struct node_entry {
93 u64 guid; /* GUID of this node */
94 u32 guid_vendor_id; /* Top 24bits of guid */
95
96 struct hpsb_host *host; /* Host this node is attached to */
97 nodeid_t nodeid; /* NodeID */
98 struct bus_options busopt; /* Bus Options */
99 bool needs_probe;
100 unsigned int generation; /* Synced with hpsb generation */
101
102 /* The following is read from the config rom */
103 u32 vendor_id;
104 struct csr1212_keyval *vendor_name_kv;
105
106 u32 capabilities;
107
108 struct device device;
109 struct device node_dev;
110
111 /* Means this node is not attached anymore */
112 bool in_limbo;
113
114 struct csr1212_csr *csr;
115};
116
117struct hpsb_protocol_driver {
118 /* The name of the driver, e.g. SBP2 or IP1394 */
119 const char *name;
120
121 /*
122 * The device id table describing the protocols and/or devices
123 * supported by this driver. This is used by the nodemgr to
124 * decide if a driver could support a given node, but the
125 * probe function below can implement further protocol
126 * dependent or vendor dependent checking.
127 */
128 const struct ieee1394_device_id *id_table;
129
130 /*
131 * The update function is called when the node has just
132 * survived a bus reset, i.e. it is still present on the bus.
133 * However, it may be necessary to reestablish the connection
134 * or login into the node again, depending on the protocol. If the
135 * probe fails (returns non-zero), we unbind the driver from this
136 * device.
137 */
138 int (*update)(struct unit_directory *ud);
139
140 /* Our LDM structure */
141 struct device_driver driver;
142};
143
144int __hpsb_register_protocol(struct hpsb_protocol_driver *, struct module *);
145static inline int hpsb_register_protocol(struct hpsb_protocol_driver *driver)
146{
147 return __hpsb_register_protocol(driver, THIS_MODULE);
148}
149
150void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver);
151
152static inline int hpsb_node_entry_valid(struct node_entry *ne)
153{
154 return ne->generation == get_hpsb_generation(ne->host);
155}
156void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
157int hpsb_node_write(struct node_entry *ne, u64 addr,
158 quadlet_t *buffer, size_t length);
159static inline int hpsb_node_read(struct node_entry *ne, u64 addr,
160 quadlet_t *buffer, size_t length)
161{
162 unsigned int g = ne->generation;
163
164 smp_rmb();
165 return hpsb_read(ne->host, ne->nodeid, g, addr, buffer, length);
166}
167static inline int hpsb_node_lock(struct node_entry *ne, u64 addr, int extcode,
168 quadlet_t *buffer, quadlet_t arg)
169{
170 unsigned int g = ne->generation;
171
172 smp_rmb();
173 return hpsb_lock(ne->host, ne->nodeid, g, addr, extcode, buffer, arg);
174}
175int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
176
177int init_ieee1394_nodemgr(void);
178void cleanup_ieee1394_nodemgr(void);
179
180/* The template for a host device */
181extern struct device nodemgr_dev_template_host;
182
183/* Bus attributes we export */
184extern struct bus_attribute *const fw_bus_attrs[];
185
186#endif /* _IEEE1394_NODEMGR_H */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
deleted file mode 100644
index 50815022cff1..000000000000
--- a/drivers/ieee1394/ohci1394.c
+++ /dev/null
@@ -1,3590 +0,0 @@
1/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85#include <linux/bitops.h>
86#include <linux/kernel.h>
87#include <linux/list.h>
88#include <linux/slab.h>
89#include <linux/interrupt.h>
90#include <linux/wait.h>
91#include <linux/errno.h>
92#include <linux/module.h>
93#include <linux/moduleparam.h>
94#include <linux/pci.h>
95#include <linux/fs.h>
96#include <linux/poll.h>
97#include <asm/byteorder.h>
98#include <asm/atomic.h>
99#include <asm/uaccess.h>
100#include <linux/delay.h>
101#include <linux/spinlock.h>
102
103#include <asm/pgtable.h>
104#include <asm/page.h>
105#include <asm/irq.h>
106#include <linux/types.h>
107#include <linux/vmalloc.h>
108#include <linux/init.h>
109
110#ifdef CONFIG_PPC_PMAC
111#include <asm/machdep.h>
112#include <asm/pmac_feature.h>
113#include <asm/prom.h>
114#include <asm/pci-bridge.h>
115#endif
116
117#include "csr1212.h"
118#include "ieee1394.h"
119#include "ieee1394_types.h"
120#include "hosts.h"
121#include "dma.h"
122#include "iso.h"
123#include "ieee1394_core.h"
124#include "highlevel.h"
125#include "ohci1394.h"
126
127#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
128#define OHCI1394_DEBUG
129#endif
130
131#ifdef DBGMSG
132#undef DBGMSG
133#endif
134
135#ifdef OHCI1394_DEBUG
136#define DBGMSG(fmt, args...) \
137printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
138#else
139#define DBGMSG(fmt, args...) do {} while (0)
140#endif
141
142/* print general (card independent) information */
143#define PRINT_G(level, fmt, args...) \
144printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
145
146/* print card specific information */
147#define PRINT(level, fmt, args...) \
148printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
149
150/* Module Parameters */
151static int phys_dma = 1;
152module_param(phys_dma, int, 0444);
153MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
154
155static void dma_trm_tasklet(unsigned long data);
156static void dma_trm_reset(struct dma_trm_ctx *d);
157
158static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
159 enum context_type type, int ctx, int num_desc,
160 int buf_size, int split_buf_size, int context_base);
161static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
162
163static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
164 enum context_type type, int ctx, int num_desc,
165 int context_base);
166
167static void ohci1394_pci_remove(struct pci_dev *pdev);
168
169#ifndef __LITTLE_ENDIAN
170static const size_t hdr_sizes[] = {
171 3, /* TCODE_WRITEQ */
172 4, /* TCODE_WRITEB */
173 3, /* TCODE_WRITE_RESPONSE */
174 0, /* reserved */
175 3, /* TCODE_READQ */
176 4, /* TCODE_READB */
177 3, /* TCODE_READQ_RESPONSE */
178 4, /* TCODE_READB_RESPONSE */
179 1, /* TCODE_CYCLE_START */
180 4, /* TCODE_LOCK_REQUEST */
181 2, /* TCODE_ISO_DATA */
182 4, /* TCODE_LOCK_RESPONSE */
183 /* rest is reserved or link-internal */
184};
185
186static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
187{
188 size_t size;
189
190 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
191 return;
192
193 size = hdr_sizes[tcode];
194 while (size--)
195 data[size] = le32_to_cpu(data[size]);
196}
197#else
198#define header_le32_to_cpu(w,x) do {} while (0)
199#endif /* !LITTLE_ENDIAN */
200
201/***********************************
202 * IEEE-1394 functionality section *
203 ***********************************/
204
205static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
206{
207 int i;
208 unsigned long flags;
209 quadlet_t r;
210
211 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
212
213 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
214
215 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
216 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
217 break;
218
219 mdelay(1);
220 }
221
222 r = reg_read(ohci, OHCI1394_PhyControl);
223
224 if (i >= OHCI_LOOP_COUNT)
225 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
226 r, r & 0x80000000, i);
227
228 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
229
230 return (r & 0x00ff0000) >> 16;
231}
232
233static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
234{
235 int i;
236 unsigned long flags;
237 u32 r = 0;
238
239 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
240
241 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
242
243 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
244 r = reg_read(ohci, OHCI1394_PhyControl);
245 if (!(r & 0x00004000))
246 break;
247
248 mdelay(1);
249 }
250
251 if (i == OHCI_LOOP_COUNT)
252 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
253 r, r & 0x00004000, i);
254
255 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
256
257 return;
258}
259
260/* Or's our value into the current value */
261static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
262{
263 u8 old;
264
265 old = get_phy_reg (ohci, addr);
266 old |= data;
267 set_phy_reg (ohci, addr, old);
268
269 return;
270}
271
272static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
273 int phyid, int isroot)
274{
275 quadlet_t *q = ohci->selfid_buf_cpu;
276 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
277 size_t size;
278 quadlet_t q0, q1;
279
280 /* Check status of self-id reception */
281
282 if (ohci->selfid_swap)
283 q0 = le32_to_cpu(q[0]);
284 else
285 q0 = q[0];
286
287 if ((self_id_count & 0x80000000) ||
288 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
289 PRINT(KERN_ERR,
290 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
291 self_id_count, q0, ohci->self_id_errors);
292
293 /* Tip by James Goodwin <jamesg@Filanet.com>:
294 * We had an error, generate another bus reset in response. */
295 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
296 set_phy_reg_mask (ohci, 1, 0x40);
297 ohci->self_id_errors++;
298 } else {
299 PRINT(KERN_ERR,
300 "Too many errors on SelfID error reception, giving up!");
301 }
302 return;
303 }
304
305 /* SelfID Ok, reset error counter. */
306 ohci->self_id_errors = 0;
307
308 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
309 q++;
310
311 while (size > 0) {
312 if (ohci->selfid_swap) {
313 q0 = le32_to_cpu(q[0]);
314 q1 = le32_to_cpu(q[1]);
315 } else {
316 q0 = q[0];
317 q1 = q[1];
318 }
319
320 if (q0 == ~q1) {
321 DBGMSG ("SelfID packet 0x%x received", q0);
322 hpsb_selfid_received(host, cpu_to_be32(q0));
323 if (((q0 & 0x3f000000) >> 24) == phyid)
324 DBGMSG ("SelfID for this node is 0x%08x", q0);
325 } else {
326 PRINT(KERN_ERR,
327 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
328 }
329 q += 2;
330 size -= 2;
331 }
332
333 DBGMSG("SelfID complete");
334
335 return;
336}
337
338static void ohci_soft_reset(struct ti_ohci *ohci) {
339 int i;
340
341 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
342
343 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
344 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
345 break;
346 mdelay(1);
347 }
348 DBGMSG ("Soft reset finished");
349}
350
351
352/* Generate the dma receive prgs and start the context */
353static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
354{
355 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
356 int i;
357
358 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
359
360 for (i=0; i<d->num_desc; i++) {
361 u32 c;
362
363 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
364 if (generate_irq)
365 c |= DMA_CTL_IRQ;
366
367 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
368
369 /* End of descriptor list? */
370 if (i + 1 < d->num_desc) {
371 d->prg_cpu[i]->branchAddress =
372 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
373 } else {
374 d->prg_cpu[i]->branchAddress =
375 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
376 }
377
378 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
379 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
380 }
381
382 d->buf_ind = 0;
383 d->buf_offset = 0;
384
385 if (d->type == DMA_CTX_ISO) {
386 /* Clear contextControl */
387 reg_write(ohci, d->ctrlClear, 0xffffffff);
388
389 /* Set bufferFill, isochHeader, multichannel for IR context */
390 reg_write(ohci, d->ctrlSet, 0xd0000000);
391
392 /* Set the context match register to match on all tags */
393 reg_write(ohci, d->ctxtMatch, 0xf0000000);
394
395 /* Clear the multi channel mask high and low registers */
396 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
397 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
398
399 /* Set up isoRecvIntMask to generate interrupts */
400 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
401 }
402
403 /* Tell the controller where the first AR program is */
404 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
405
406 /* Run context */
407 reg_write(ohci, d->ctrlSet, 0x00008000);
408
409 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
410}
411
412/* Initialize the dma transmit context */
413static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
414{
415 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
416
417 /* Stop the context */
418 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
419
420 d->prg_ind = 0;
421 d->sent_ind = 0;
422 d->free_prgs = d->num_desc;
423 d->branchAddrPtr = NULL;
424 INIT_LIST_HEAD(&d->fifo_list);
425 INIT_LIST_HEAD(&d->pending_list);
426
427 if (d->type == DMA_CTX_ISO) {
428 /* enable interrupts */
429 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
430 }
431
432 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
433}
434
435/* Count the number of available iso contexts */
436static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
437{
438 u32 tmp;
439
440 reg_write(ohci, reg, 0xffffffff);
441 tmp = reg_read(ohci, reg);
442
443 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
444
445 /* Count the number of contexts */
446 return hweight32(tmp);
447}
448
449/* Global initialization */
450static void ohci_initialize(struct ti_ohci *ohci)
451{
452 quadlet_t buf;
453 int num_ports, i;
454
455 spin_lock_init(&ohci->phy_reg_lock);
456
457 /* Put some defaults to these undefined bus options */
458 buf = reg_read(ohci, OHCI1394_BusOptions);
459 buf |= 0x60000000; /* Enable CMC and ISC */
460 if (hpsb_disable_irm)
461 buf &= ~0x80000000;
462 else
463 buf |= 0x80000000; /* Enable IRMC */
464 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
465 buf &= ~0x18000000; /* Disable PMC and BMC */
466 reg_write(ohci, OHCI1394_BusOptions, buf);
467
468 /* Set the bus number */
469 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
470
471 /* Enable posted writes */
472 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
473
474 /* Clear link control register */
475 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
476
477 /* Enable cycle timer and cycle master and set the IRM
478 * contender bit in our self ID packets if appropriate. */
479 reg_write(ohci, OHCI1394_LinkControlSet,
480 OHCI1394_LinkControl_CycleTimerEnable |
481 OHCI1394_LinkControl_CycleMaster);
482 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
483 if (hpsb_disable_irm)
484 i &= ~PHY_04_CONTENDER;
485 else
486 i |= PHY_04_CONTENDER;
487 set_phy_reg(ohci, 4, i);
488
489 /* Set up self-id dma buffer */
490 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
491
492 /* enable self-id */
493 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
494
495 /* Set the Config ROM mapping register */
496 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
497
498 /* Now get our max packet size */
499 ohci->max_packet_size =
500 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
501
502 /* Clear the interrupt mask */
503 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
504 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
505
506 /* Clear the interrupt mask */
507 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
508 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
509
510 /* Initialize AR dma */
511 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
512 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
513
514 /* Initialize AT dma */
515 initialize_dma_trm_ctx(&ohci->at_req_context);
516 initialize_dma_trm_ctx(&ohci->at_resp_context);
517
518 /* Accept AR requests from all nodes */
519 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
520
521 /* Set the address range of the physical response unit.
522 * Most controllers do not implement it as a writable register though.
523 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
524 * register content.
525 * To actually enable physical responses is the job of our interrupt
526 * handler which programs the physical request filter. */
527 reg_write(ohci, OHCI1394_PhyUpperBound,
528 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
529
530 DBGMSG("physUpperBoundOffset=%08x",
531 reg_read(ohci, OHCI1394_PhyUpperBound));
532
533 /* Specify AT retries */
534 reg_write(ohci, OHCI1394_ATRetries,
535 OHCI1394_MAX_AT_REQ_RETRIES |
536 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
537 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
538
539 /* We don't want hardware swapping */
540 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
541
542 /* Enable interrupts */
543 reg_write(ohci, OHCI1394_IntMaskSet,
544 OHCI1394_unrecoverableError |
545 OHCI1394_masterIntEnable |
546 OHCI1394_busReset |
547 OHCI1394_selfIDComplete |
548 OHCI1394_RSPkt |
549 OHCI1394_RQPkt |
550 OHCI1394_respTxComplete |
551 OHCI1394_reqTxComplete |
552 OHCI1394_isochRx |
553 OHCI1394_isochTx |
554 OHCI1394_postedWriteErr |
555 OHCI1394_cycleTooLong |
556 OHCI1394_cycleInconsistent);
557
558 /* Enable link */
559 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
560
561 buf = reg_read(ohci, OHCI1394_Version);
562 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
563 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
564 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
565 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
566 (unsigned long long)pci_resource_start(ohci->dev, 0),
567 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
568 ohci->max_packet_size,
569 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
570
571 /* Check all of our ports to make sure that if anything is
572 * connected, we enable that port. */
573 num_ports = get_phy_reg(ohci, 2) & 0xf;
574 for (i = 0; i < num_ports; i++) {
575 unsigned int status;
576
577 set_phy_reg(ohci, 7, i);
578 status = get_phy_reg(ohci, 8);
579
580 if (status & 0x20)
581 set_phy_reg(ohci, 8, status & ~1);
582 }
583
584 /* Serial EEPROM Sanity check. */
585 if ((ohci->max_packet_size < 512) ||
586 (ohci->max_packet_size > 4096)) {
587 /* Serial EEPROM contents are suspect, set a sane max packet
588 * size and print the raw contents for bug reports if verbose
589 * debug is enabled. */
590#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
591 int i;
592#endif
593
594 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
595 "attempting to set max_packet_size to 512 bytes");
596 reg_write(ohci, OHCI1394_BusOptions,
597 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
598 ohci->max_packet_size = 512;
599#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
600 PRINT(KERN_DEBUG, " EEPROM Present: %d",
601 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
602 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
603
604 for (i = 0;
605 ((i < 1000) &&
606 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
607 udelay(10);
608
609 for (i = 0; i < 0x20; i++) {
610 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
611 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
612 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
613 }
614#endif
615 }
616}
617
618/*
619 * Insert a packet in the DMA fifo and generate the DMA prg
620 * FIXME: rewrite the program in order to accept packets crossing
621 * page boundaries.
622 * check also that a single dma descriptor doesn't cross a
623 * page boundary.
624 */
625static void insert_packet(struct ti_ohci *ohci,
626 struct dma_trm_ctx *d, struct hpsb_packet *packet)
627{
628 u32 cycleTimer;
629 int idx = d->prg_ind;
630
631 DBGMSG("Inserting packet for node " NODE_BUS_FMT
632 ", tlabel=%d, tcode=0x%x, speed=%d",
633 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
634 packet->tcode, packet->speed_code);
635
636 d->prg_cpu[idx]->begin.address = 0;
637 d->prg_cpu[idx]->begin.branchAddress = 0;
638
639 if (d->type == DMA_CTX_ASYNC_RESP) {
640 /*
641 * For response packets, we need to put a timeout value in
642 * the 16 lower bits of the status... let's try 1 sec timeout
643 */
644 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
645 d->prg_cpu[idx]->begin.status = cpu_to_le32(
646 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
647 ((cycleTimer&0x01fff000)>>12));
648
649 DBGMSG("cycleTimer: %08x timeStamp: %08x",
650 cycleTimer, d->prg_cpu[idx]->begin.status);
651 } else
652 d->prg_cpu[idx]->begin.status = 0;
653
654 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
655
656 if (packet->type == hpsb_raw) {
657 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
658 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
659 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
660 } else {
661 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
662 (packet->header[0] & 0xFFFF);
663
664 if (packet->tcode == TCODE_ISO_DATA) {
665 /* Sending an async stream packet */
666 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
667 } else {
668 /* Sending a normal async request or response */
669 d->prg_cpu[idx]->data[1] =
670 (packet->header[1] & 0xFFFF) |
671 (packet->header[0] & 0xFFFF0000);
672 d->prg_cpu[idx]->data[2] = packet->header[2];
673 d->prg_cpu[idx]->data[3] = packet->header[3];
674 }
675 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
676 }
677
678 if (packet->data_size) { /* block transmit */
679 if (packet->tcode == TCODE_STREAM_DATA){
680 d->prg_cpu[idx]->begin.control =
681 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
682 DMA_CTL_IMMEDIATE | 0x8);
683 } else {
684 d->prg_cpu[idx]->begin.control =
685 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
686 DMA_CTL_IMMEDIATE | 0x10);
687 }
688 d->prg_cpu[idx]->end.control =
689 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
690 DMA_CTL_IRQ |
691 DMA_CTL_BRANCH |
692 packet->data_size);
693 /*
694 * Check that the packet data buffer
695 * does not cross a page boundary.
696 *
697 * XXX Fix this some day. eth1394 seems to trigger
698 * it, but ignoring it doesn't seem to cause a
699 * problem.
700 */
701#if 0
702 if (cross_bound((unsigned long)packet->data,
703 packet->data_size)>0) {
704 /* FIXME: do something about it */
705 PRINT(KERN_ERR,
706 "%s: packet data addr: %p size %Zd bytes "
707 "cross page boundary", __func__,
708 packet->data, packet->data_size);
709 }
710#endif
711 d->prg_cpu[idx]->end.address = cpu_to_le32(
712 pci_map_single(ohci->dev, packet->data,
713 packet->data_size,
714 PCI_DMA_TODEVICE));
715
716 d->prg_cpu[idx]->end.branchAddress = 0;
717 d->prg_cpu[idx]->end.status = 0;
718 if (d->branchAddrPtr)
719 *(d->branchAddrPtr) =
720 cpu_to_le32(d->prg_bus[idx] | 0x3);
721 d->branchAddrPtr =
722 &(d->prg_cpu[idx]->end.branchAddress);
723 } else { /* quadlet transmit */
724 if (packet->type == hpsb_raw)
725 d->prg_cpu[idx]->begin.control =
726 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
727 DMA_CTL_IMMEDIATE |
728 DMA_CTL_IRQ |
729 DMA_CTL_BRANCH |
730 (packet->header_size + 4));
731 else
732 d->prg_cpu[idx]->begin.control =
733 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
734 DMA_CTL_IMMEDIATE |
735 DMA_CTL_IRQ |
736 DMA_CTL_BRANCH |
737 packet->header_size);
738
739 if (d->branchAddrPtr)
740 *(d->branchAddrPtr) =
741 cpu_to_le32(d->prg_bus[idx] | 0x2);
742 d->branchAddrPtr =
743 &(d->prg_cpu[idx]->begin.branchAddress);
744 }
745
746 } else { /* iso packet */
747 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
748 (packet->header[0] & 0xFFFF);
749 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
750 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
751
752 d->prg_cpu[idx]->begin.control =
753 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
754 DMA_CTL_IMMEDIATE | 0x8);
755 d->prg_cpu[idx]->end.control =
756 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
757 DMA_CTL_UPDATE |
758 DMA_CTL_IRQ |
759 DMA_CTL_BRANCH |
760 packet->data_size);
761 d->prg_cpu[idx]->end.address = cpu_to_le32(
762 pci_map_single(ohci->dev, packet->data,
763 packet->data_size, PCI_DMA_TODEVICE));
764
765 d->prg_cpu[idx]->end.branchAddress = 0;
766 d->prg_cpu[idx]->end.status = 0;
767 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
768 " begin=%08x %08x %08x %08x\n"
769 " %08x %08x %08x %08x\n"
770 " end =%08x %08x %08x %08x",
771 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
772 d->prg_cpu[idx]->begin.control,
773 d->prg_cpu[idx]->begin.address,
774 d->prg_cpu[idx]->begin.branchAddress,
775 d->prg_cpu[idx]->begin.status,
776 d->prg_cpu[idx]->data[0],
777 d->prg_cpu[idx]->data[1],
778 d->prg_cpu[idx]->data[2],
779 d->prg_cpu[idx]->data[3],
780 d->prg_cpu[idx]->end.control,
781 d->prg_cpu[idx]->end.address,
782 d->prg_cpu[idx]->end.branchAddress,
783 d->prg_cpu[idx]->end.status);
784 if (d->branchAddrPtr)
785 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
786 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
787 }
788 d->free_prgs--;
789
790 /* queue the packet in the appropriate context queue */
791 list_add_tail(&packet->driver_list, &d->fifo_list);
792 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
793}
794
795/*
796 * This function fills the FIFO with the (eventual) pending packets
797 * and runs or wakes up the DMA prg if necessary.
798 *
799 * The function MUST be called with the d->lock held.
800 */
801static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
802{
803 struct hpsb_packet *packet, *ptmp;
804 int idx = d->prg_ind;
805 int z = 0;
806
807 /* insert the packets into the dma fifo */
808 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
809 if (!d->free_prgs)
810 break;
811
812 /* For the first packet only */
813 if (!z)
814 z = (packet->data_size) ? 3 : 2;
815
816 /* Insert the packet */
817 list_del_init(&packet->driver_list);
818 insert_packet(ohci, d, packet);
819 }
820
821 /* Nothing must have been done, either no free_prgs or no packets */
822 if (z == 0)
823 return;
824
825 /* Is the context running ? (should be unless it is
826 the first packet to be sent in this context) */
827 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
828 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
829
830 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
831 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
832
833 /* Check that the node id is valid, and not 63 */
834 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
835 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
836 else
837 reg_write(ohci, d->ctrlSet, 0x8000);
838 } else {
839 /* Wake up the dma context if necessary */
840 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
841 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
842
843 /* do this always, to avoid race condition */
844 reg_write(ohci, d->ctrlSet, 0x1000);
845 }
846
847 return;
848}
849
850/* Transmission of an async or iso packet */
851static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
852{
853 struct ti_ohci *ohci = host->hostdata;
854 struct dma_trm_ctx *d;
855 unsigned long flags;
856
857 if (packet->data_size > ohci->max_packet_size) {
858 PRINT(KERN_ERR,
859 "Transmit packet size %Zd is too big",
860 packet->data_size);
861 return -EOVERFLOW;
862 }
863
864 if (packet->type == hpsb_raw)
865 d = &ohci->at_req_context;
866 else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
867 d = &ohci->at_resp_context;
868 else
869 d = &ohci->at_req_context;
870
871 spin_lock_irqsave(&d->lock,flags);
872
873 list_add_tail(&packet->driver_list, &d->pending_list);
874
875 dma_trm_flush(ohci, d);
876
877 spin_unlock_irqrestore(&d->lock,flags);
878
879 return 0;
880}
881
882static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
883{
884 struct ti_ohci *ohci = host->hostdata;
885 int retval = 0, phy_reg;
886
887 switch (cmd) {
888 case RESET_BUS:
889 switch (arg) {
890 case SHORT_RESET:
891 phy_reg = get_phy_reg(ohci, 5);
892 phy_reg |= 0x40;
893 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
894 break;
895 case LONG_RESET:
896 phy_reg = get_phy_reg(ohci, 1);
897 phy_reg |= 0x40;
898 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
899 break;
900 case SHORT_RESET_NO_FORCE_ROOT:
901 phy_reg = get_phy_reg(ohci, 1);
902 if (phy_reg & 0x80) {
903 phy_reg &= ~0x80;
904 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
905 }
906
907 phy_reg = get_phy_reg(ohci, 5);
908 phy_reg |= 0x40;
909 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
910 break;
911 case LONG_RESET_NO_FORCE_ROOT:
912 phy_reg = get_phy_reg(ohci, 1);
913 phy_reg &= ~0x80;
914 phy_reg |= 0x40;
915 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
916 break;
917 case SHORT_RESET_FORCE_ROOT:
918 phy_reg = get_phy_reg(ohci, 1);
919 if (!(phy_reg & 0x80)) {
920 phy_reg |= 0x80;
921 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
922 }
923
924 phy_reg = get_phy_reg(ohci, 5);
925 phy_reg |= 0x40;
926 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
927 break;
928 case LONG_RESET_FORCE_ROOT:
929 phy_reg = get_phy_reg(ohci, 1);
930 phy_reg |= 0xc0;
931 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
932 break;
933 default:
934 retval = -1;
935 }
936 break;
937
938 case GET_CYCLE_COUNTER:
939 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
940 break;
941
942 case SET_CYCLE_COUNTER:
943 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
944 break;
945
946 case SET_BUS_ID:
947 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
948 break;
949
950 case ACT_CYCLE_MASTER:
951 if (arg) {
952 /* check if we are root and other nodes are present */
953 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
954 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
955 /*
956 * enable cycleTimer, cycleMaster
957 */
958 DBGMSG("Cycle master enabled");
959 reg_write(ohci, OHCI1394_LinkControlSet,
960 OHCI1394_LinkControl_CycleTimerEnable |
961 OHCI1394_LinkControl_CycleMaster);
962 }
963 } else {
964 /* disable cycleTimer, cycleMaster, cycleSource */
965 reg_write(ohci, OHCI1394_LinkControlClear,
966 OHCI1394_LinkControl_CycleTimerEnable |
967 OHCI1394_LinkControl_CycleMaster |
968 OHCI1394_LinkControl_CycleSource);
969 }
970 break;
971
972 case CANCEL_REQUESTS:
973 DBGMSG("Cancel request received");
974 dma_trm_reset(&ohci->at_req_context);
975 dma_trm_reset(&ohci->at_resp_context);
976 break;
977
978 default:
979 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
980 cmd);
981 break;
982 }
983 return retval;
984}
985
986/***********************************
987 * rawiso ISO reception *
988 ***********************************/
989
990/*
991 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
992 buffer is split into "blocks" (regions described by one DMA
993 descriptor). Each block must be one page or less in size, and
994 must not cross a page boundary.
995
996 There is one little wrinkle with buffer-fill mode: a packet that
997 starts in the final block may wrap around into the first block. But
998 the user API expects all packets to be contiguous. Our solution is
999 to keep the very last page of the DMA buffer in reserve - if a
1000 packet spans the gap, we copy its tail into this page.
1001*/
1002
1003struct ohci_iso_recv {
1004 struct ti_ohci *ohci;
1005
1006 struct ohci1394_iso_tasklet task;
1007 int task_active;
1008
1009 enum { BUFFER_FILL_MODE = 0,
1010 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1011
1012 /* memory and PCI mapping for the DMA descriptors */
1013 struct dma_prog_region prog;
1014 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1015
1016 /* how many DMA blocks fit in the buffer */
1017 unsigned int nblocks;
1018
1019 /* stride of DMA blocks */
1020 unsigned int buf_stride;
1021
1022 /* number of blocks to batch between interrupts */
1023 int block_irq_interval;
1024
1025 /* block that DMA will finish next */
1026 int block_dma;
1027
1028 /* (buffer-fill only) block that the reader will release next */
1029 int block_reader;
1030
1031 /* (buffer-fill only) bytes of buffer the reader has released,
1032 less than one block */
1033 int released_bytes;
1034
1035 /* (buffer-fill only) buffer offset at which the next packet will appear */
1036 int dma_offset;
1037
1038 /* OHCI DMA context control registers */
1039 u32 ContextControlSet;
1040 u32 ContextControlClear;
1041 u32 CommandPtr;
1042 u32 ContextMatch;
1043};
1044
1045static void ohci_iso_recv_task(unsigned long data);
1046static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1047static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1048static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1049static void ohci_iso_recv_program(struct hpsb_iso *iso);
1050
1051static int ohci_iso_recv_init(struct hpsb_iso *iso)
1052{
1053 struct ti_ohci *ohci = iso->host->hostdata;
1054 struct ohci_iso_recv *recv;
1055 int ctx;
1056 int ret = -ENOMEM;
1057
1058 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1059 if (!recv)
1060 return -ENOMEM;
1061
1062 iso->hostdata = recv;
1063 recv->ohci = ohci;
1064 recv->task_active = 0;
1065 dma_prog_region_init(&recv->prog);
1066 recv->block = NULL;
1067
1068 /* use buffer-fill mode, unless irq_interval is 1
1069 (note: multichannel requires buffer-fill) */
1070
1071 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1072 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1073 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1074 } else {
1075 recv->dma_mode = BUFFER_FILL_MODE;
1076 }
1077
1078 /* set nblocks, buf_stride, block_irq_interval */
1079
1080 if (recv->dma_mode == BUFFER_FILL_MODE) {
1081 recv->buf_stride = PAGE_SIZE;
1082
1083 /* one block per page of data in the DMA buffer, minus the final guard page */
1084 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1085 if (recv->nblocks < 3) {
1086 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1087 goto err;
1088 }
1089
1090 /* iso->irq_interval is in packets - translate that to blocks */
1091 if (iso->irq_interval == 1)
1092 recv->block_irq_interval = 1;
1093 else
1094 recv->block_irq_interval = iso->irq_interval *
1095 ((recv->nblocks+1)/iso->buf_packets);
1096 if (recv->block_irq_interval*4 > recv->nblocks)
1097 recv->block_irq_interval = recv->nblocks/4;
1098 if (recv->block_irq_interval < 1)
1099 recv->block_irq_interval = 1;
1100
1101 } else {
1102 int max_packet_size;
1103
1104 recv->nblocks = iso->buf_packets;
1105 recv->block_irq_interval = iso->irq_interval;
1106 if (recv->block_irq_interval * 4 > iso->buf_packets)
1107 recv->block_irq_interval = iso->buf_packets / 4;
1108 if (recv->block_irq_interval < 1)
1109 recv->block_irq_interval = 1;
1110
1111 /* choose a buffer stride */
1112 /* must be a power of 2, and <= PAGE_SIZE */
1113
1114 max_packet_size = iso->buf_size / iso->buf_packets;
1115
1116 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1117 recv->buf_stride *= 2);
1118
1119 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1120 recv->buf_stride > PAGE_SIZE) {
1121 /* this shouldn't happen, but anyway... */
1122 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1123 goto err;
1124 }
1125 }
1126
1127 recv->block_reader = 0;
1128 recv->released_bytes = 0;
1129 recv->block_dma = 0;
1130 recv->dma_offset = 0;
1131
1132 /* size of DMA program = one descriptor per block */
1133 if (dma_prog_region_alloc(&recv->prog,
1134 sizeof(struct dma_cmd) * recv->nblocks,
1135 recv->ohci->dev))
1136 goto err;
1137
1138 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1139
1140 ohci1394_init_iso_tasklet(&recv->task,
1141 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1142 OHCI_ISO_RECEIVE,
1143 ohci_iso_recv_task, (unsigned long) iso);
1144
1145 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1146 ret = -EBUSY;
1147 goto err;
1148 }
1149
1150 recv->task_active = 1;
1151
1152 /* recv context registers are spaced 32 bytes apart */
1153 ctx = recv->task.context;
1154 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1155 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1156 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1157 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1158
1159 if (iso->channel == -1) {
1160 /* clear multi-channel selection mask */
1161 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1162 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1163 }
1164
1165 /* write the DMA program */
1166 ohci_iso_recv_program(iso);
1167
1168 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1169 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1170 recv->dma_mode == BUFFER_FILL_MODE ?
1171 "buffer-fill" : "packet-per-buffer",
1172 iso->buf_size/PAGE_SIZE, iso->buf_size,
1173 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1174
1175 return 0;
1176
1177err:
1178 ohci_iso_recv_shutdown(iso);
1179 return ret;
1180}
1181
1182static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1183{
1184 struct ohci_iso_recv *recv = iso->hostdata;
1185
1186 /* disable interrupts */
1187 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1188
1189 /* halt DMA */
1190 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1191}
1192
1193static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1194{
1195 struct ohci_iso_recv *recv = iso->hostdata;
1196
1197 if (recv->task_active) {
1198 ohci_iso_recv_stop(iso);
1199 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1200 recv->task_active = 0;
1201 }
1202
1203 dma_prog_region_free(&recv->prog);
1204 kfree(recv);
1205 iso->hostdata = NULL;
1206}
1207
1208/* set up a "gapped" ring buffer DMA program */
1209static void ohci_iso_recv_program(struct hpsb_iso *iso)
1210{
1211 struct ohci_iso_recv *recv = iso->hostdata;
1212 int blk;
1213
1214 /* address of 'branch' field in previous DMA descriptor */
1215 u32 *prev_branch = NULL;
1216
1217 for (blk = 0; blk < recv->nblocks; blk++) {
1218 u32 control;
1219
1220 /* the DMA descriptor */
1221 struct dma_cmd *cmd = &recv->block[blk];
1222
1223 /* offset of the DMA descriptor relative to the DMA prog buffer */
1224 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1225
1226 /* offset of this packet's data within the DMA buffer */
1227 unsigned long buf_offset = blk * recv->buf_stride;
1228
1229 if (recv->dma_mode == BUFFER_FILL_MODE) {
1230 control = 2 << 28; /* INPUT_MORE */
1231 } else {
1232 control = 3 << 28; /* INPUT_LAST */
1233 }
1234
1235 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1236
1237 /* interrupt on last block, and at intervals */
1238 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1239 control |= 3 << 20; /* want interrupt */
1240 }
1241
1242 control |= 3 << 18; /* enable branch to address */
1243 control |= recv->buf_stride;
1244
1245 cmd->control = cpu_to_le32(control);
1246 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1247 cmd->branchAddress = 0; /* filled in on next loop */
1248 cmd->status = cpu_to_le32(recv->buf_stride);
1249
1250 /* link the previous descriptor to this one */
1251 if (prev_branch) {
1252 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1253 }
1254
1255 prev_branch = &cmd->branchAddress;
1256 }
1257
1258 /* the final descriptor's branch address and Z should be left at 0 */
1259}
1260
1261/* listen or unlisten to a specific channel (multi-channel mode only) */
1262static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1263{
1264 struct ohci_iso_recv *recv = iso->hostdata;
1265 int reg, i;
1266
1267 if (channel < 32) {
1268 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1269 i = channel;
1270 } else {
1271 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1272 i = channel - 32;
1273 }
1274
1275 reg_write(recv->ohci, reg, (1 << i));
1276
1277 /* issue a dummy read to force all PCI writes to be posted immediately */
1278 mb();
1279 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1280}
1281
1282static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1283{
1284 struct ohci_iso_recv *recv = iso->hostdata;
1285 int i;
1286
1287 for (i = 0; i < 64; i++) {
1288 if (mask & (1ULL << i)) {
1289 if (i < 32)
1290 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1291 else
1292 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1293 } else {
1294 if (i < 32)
1295 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1296 else
1297 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1298 }
1299 }
1300
1301 /* issue a dummy read to force all PCI writes to be posted immediately */
1302 mb();
1303 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1304}
1305
1306static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1307{
1308 struct ohci_iso_recv *recv = iso->hostdata;
1309 struct ti_ohci *ohci = recv->ohci;
1310 u32 command, contextMatch;
1311
1312 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1313 wmb();
1314
1315 /* always keep ISO headers */
1316 command = (1 << 30);
1317
1318 if (recv->dma_mode == BUFFER_FILL_MODE)
1319 command |= (1 << 31);
1320
1321 reg_write(recv->ohci, recv->ContextControlSet, command);
1322
1323 /* match on specified tags */
1324 contextMatch = tag_mask << 28;
1325
1326 if (iso->channel == -1) {
1327 /* enable multichannel reception */
1328 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1329 } else {
1330 /* listen on channel */
1331 contextMatch |= iso->channel;
1332 }
1333
1334 if (cycle != -1) {
1335 u32 seconds;
1336
1337 /* enable cycleMatch */
1338 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1339
1340 /* set starting cycle */
1341 cycle &= 0x1FFF;
1342
1343 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1344 just snarf them from the current time */
1345 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1346
1347 /* advance one second to give some extra time for DMA to start */
1348 seconds += 1;
1349
1350 cycle |= (seconds & 3) << 13;
1351
1352 contextMatch |= cycle << 12;
1353 }
1354
1355 if (sync != -1) {
1356 /* set sync flag on first DMA descriptor */
1357 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1358 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1359
1360 /* match sync field */
1361 contextMatch |= (sync&0xf)<<8;
1362 }
1363
1364 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1365
1366 /* address of first descriptor block */
1367 command = dma_prog_region_offset_to_bus(&recv->prog,
1368 recv->block_dma * sizeof(struct dma_cmd));
1369 command |= 1; /* Z=1 */
1370
1371 reg_write(recv->ohci, recv->CommandPtr, command);
1372
1373 /* enable interrupts */
1374 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1375
1376 wmb();
1377
1378 /* run */
1379 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1380
1381 /* issue a dummy read of the cycle timer register to force
1382 all PCI writes to be posted immediately */
1383 mb();
1384 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1385
1386 /* check RUN */
1387 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1388 PRINT(KERN_ERR,
1389 "Error starting IR DMA (ContextControl 0x%08x)\n",
1390 reg_read(recv->ohci, recv->ContextControlSet));
1391 return -1;
1392 }
1393
1394 return 0;
1395}
1396
1397static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1398{
1399 /* re-use the DMA descriptor for the block */
1400 /* by linking the previous descriptor to it */
1401
1402 int next_i = block;
1403 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1404
1405 struct dma_cmd *next = &recv->block[next_i];
1406 struct dma_cmd *prev = &recv->block[prev_i];
1407
1408 /* ignore out-of-range requests */
1409 if ((block < 0) || (block > recv->nblocks))
1410 return;
1411
1412 /* 'next' becomes the new end of the DMA chain,
1413 so disable branch and enable interrupt */
1414 next->branchAddress = 0;
1415 next->control |= cpu_to_le32(3 << 20);
1416 next->status = cpu_to_le32(recv->buf_stride);
1417
1418 /* link prev to next */
1419 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1420 sizeof(struct dma_cmd) * next_i)
1421 | 1); /* Z=1 */
1422
1423 /* disable interrupt on previous DMA descriptor, except at intervals */
1424 if ((prev_i % recv->block_irq_interval) == 0) {
1425 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1426 } else {
1427 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1428 }
1429 wmb();
1430
1431 /* wake up DMA in case it fell asleep */
1432 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1433}
1434
1435static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1436 struct hpsb_iso_packet_info *info)
1437{
1438 /* release the memory where the packet was */
1439 recv->released_bytes += info->total_len;
1440
1441 /* have we released enough memory for one block? */
1442 while (recv->released_bytes > recv->buf_stride) {
1443 ohci_iso_recv_release_block(recv, recv->block_reader);
1444 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1445 recv->released_bytes -= recv->buf_stride;
1446 }
1447}
1448
1449static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1450{
1451 struct ohci_iso_recv *recv = iso->hostdata;
1452 if (recv->dma_mode == BUFFER_FILL_MODE) {
1453 ohci_iso_recv_bufferfill_release(recv, info);
1454 } else {
1455 ohci_iso_recv_release_block(recv, info - iso->infos);
1456 }
1457}
1458
1459/* parse all packets from blocks that have been fully received */
1460static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1461{
1462 int wake = 0;
1463 int runaway = 0;
1464 struct ti_ohci *ohci = recv->ohci;
1465
1466 while (1) {
1467 /* we expect the next parsable packet to begin at recv->dma_offset */
1468 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1469
1470 unsigned int offset;
1471 unsigned short len, cycle, total_len;
1472 unsigned char channel, tag, sy;
1473
1474 unsigned char *p = iso->data_buf.kvirt;
1475
1476 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1477
1478 /* don't loop indefinitely */
1479 if (runaway++ > 100000) {
1480 atomic_inc(&iso->overflows);
1481 PRINT(KERN_ERR,
1482 "IR DMA error - Runaway during buffer parsing!\n");
1483 break;
1484 }
1485
1486 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1487 if (this_block == recv->block_dma)
1488 break;
1489
1490 wake = 1;
1491
1492 /* parse data length, tag, channel, and sy */
1493
1494 /* note: we keep our own local copies of 'len' and 'offset'
1495 so the user can't mess with them by poking in the mmap area */
1496
1497 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1498
1499 if (len > 4096) {
1500 PRINT(KERN_ERR,
1501 "IR DMA error - bogus 'len' value %u\n", len);
1502 }
1503
1504 channel = p[recv->dma_offset+1] & 0x3F;
1505 tag = p[recv->dma_offset+1] >> 6;
1506 sy = p[recv->dma_offset+0] & 0xF;
1507
1508 /* advance to data payload */
1509 recv->dma_offset += 4;
1510
1511 /* check for wrap-around */
1512 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1513 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1514 }
1515
1516 /* dma_offset now points to the first byte of the data payload */
1517 offset = recv->dma_offset;
1518
1519 /* advance to xferStatus/timeStamp */
1520 recv->dma_offset += len;
1521
1522 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1523 /* payload is padded to 4 bytes */
1524 if (len % 4) {
1525 recv->dma_offset += 4 - (len%4);
1526 total_len += 4 - (len%4);
1527 }
1528
1529 /* check for wrap-around */
1530 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1531 /* uh oh, the packet data wraps from the last
1532 to the first DMA block - make the packet
1533 contiguous by copying its "tail" into the
1534 guard page */
1535
1536 int guard_off = recv->buf_stride*recv->nblocks;
1537 int tail_len = len - (guard_off - offset);
1538
1539 if (tail_len > 0 && tail_len < recv->buf_stride) {
1540 memcpy(iso->data_buf.kvirt + guard_off,
1541 iso->data_buf.kvirt,
1542 tail_len);
1543 }
1544
1545 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1546 }
1547
1548 /* parse timestamp */
1549 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1550 cycle &= 0x1FFF;
1551
1552 /* advance to next packet */
1553 recv->dma_offset += 4;
1554
1555 /* check for wrap-around */
1556 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1557 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1558 }
1559
1560 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1561 }
1562
1563 if (wake)
1564 hpsb_iso_wake(iso);
1565}
1566
1567static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1568{
1569 int loop;
1570 struct ti_ohci *ohci = recv->ohci;
1571
1572 /* loop over all blocks */
1573 for (loop = 0; loop < recv->nblocks; loop++) {
1574
1575 /* check block_dma to see if it's done */
1576 struct dma_cmd *im = &recv->block[recv->block_dma];
1577
1578 /* check the DMA descriptor for new writes to xferStatus */
1579 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1580
1581 /* rescount is the number of bytes *remaining to be written* in the block */
1582 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1583
1584 unsigned char event = xferstatus & 0x1F;
1585
1586 if (!event) {
1587 /* nothing has happened to this block yet */
1588 break;
1589 }
1590
1591 if (event != 0x11) {
1592 atomic_inc(&iso->overflows);
1593 PRINT(KERN_ERR,
1594 "IR DMA error - OHCI error code 0x%02x\n", event);
1595 }
1596
1597 if (rescount != 0) {
1598 /* the card is still writing to this block;
1599 we can't touch it until it's done */
1600 break;
1601 }
1602
1603 /* OK, the block is finished... */
1604
1605 /* sync our view of the block */
1606 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1607
1608 /* reset the DMA descriptor */
1609 im->status = recv->buf_stride;
1610
1611 /* advance block_dma */
1612 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1613
1614 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1615 atomic_inc(&iso->overflows);
1616 DBGMSG("ISO reception overflow - "
1617 "ran out of DMA blocks");
1618 }
1619 }
1620
1621 /* parse any packets that have arrived */
1622 ohci_iso_recv_bufferfill_parse(iso, recv);
1623}
1624
1625static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1626{
1627 int count;
1628 int wake = 0;
1629 struct ti_ohci *ohci = recv->ohci;
1630
1631 /* loop over the entire buffer */
1632 for (count = 0; count < recv->nblocks; count++) {
1633 u32 packet_len = 0;
1634
1635 /* pointer to the DMA descriptor */
1636 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1637
1638 /* check the DMA descriptor for new writes to xferStatus */
1639 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1640 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1641
1642 unsigned char event = xferstatus & 0x1F;
1643
1644 if (!event) {
1645 /* this packet hasn't come in yet; we are done for now */
1646 goto out;
1647 }
1648
1649 if (event == 0x11) {
1650 /* packet received successfully! */
1651
1652 /* rescount is the number of bytes *remaining* in the packet buffer,
1653 after the packet was written */
1654 packet_len = recv->buf_stride - rescount;
1655
1656 } else if (event == 0x02) {
1657 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1658 } else if (event) {
1659 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1660 }
1661
1662 /* sync our view of the buffer */
1663 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1664
1665 /* record the per-packet info */
1666 {
1667 /* iso header is 8 bytes ahead of the data payload */
1668 unsigned char *hdr;
1669
1670 unsigned int offset;
1671 unsigned short cycle;
1672 unsigned char channel, tag, sy;
1673
1674 offset = iso->pkt_dma * recv->buf_stride;
1675 hdr = iso->data_buf.kvirt + offset;
1676
1677 /* skip iso header */
1678 offset += 8;
1679 packet_len -= 8;
1680
1681 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1682 channel = hdr[5] & 0x3F;
1683 tag = hdr[5] >> 6;
1684 sy = hdr[4] & 0xF;
1685
1686 hpsb_iso_packet_received(iso, offset, packet_len,
1687 recv->buf_stride, cycle, channel, tag, sy);
1688 }
1689
1690 /* reset the DMA descriptor */
1691 il->status = recv->buf_stride;
1692
1693 wake = 1;
1694 recv->block_dma = iso->pkt_dma;
1695 }
1696
1697out:
1698 if (wake)
1699 hpsb_iso_wake(iso);
1700}
1701
1702static void ohci_iso_recv_task(unsigned long data)
1703{
1704 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1705 struct ohci_iso_recv *recv = iso->hostdata;
1706
1707 if (recv->dma_mode == BUFFER_FILL_MODE)
1708 ohci_iso_recv_bufferfill_task(iso, recv);
1709 else
1710 ohci_iso_recv_packetperbuf_task(iso, recv);
1711}
1712
1713/***********************************
1714 * rawiso ISO transmission *
1715 ***********************************/
1716
1717struct ohci_iso_xmit {
1718 struct ti_ohci *ohci;
1719 struct dma_prog_region prog;
1720 struct ohci1394_iso_tasklet task;
1721 int task_active;
1722 int last_cycle;
1723 atomic_t skips;
1724
1725 u32 ContextControlSet;
1726 u32 ContextControlClear;
1727 u32 CommandPtr;
1728};
1729
1730/* transmission DMA program:
1731 one OUTPUT_MORE_IMMEDIATE for the IT header
1732 one OUTPUT_LAST for the buffer data */
1733
1734struct iso_xmit_cmd {
1735 struct dma_cmd output_more_immediate;
1736 u8 iso_hdr[8];
1737 u32 unused[2];
1738 struct dma_cmd output_last;
1739};
1740
1741static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1742static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1743static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1744static void ohci_iso_xmit_task(unsigned long data);
1745
1746static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1747{
1748 struct ohci_iso_xmit *xmit;
1749 unsigned int prog_size;
1750 int ctx;
1751 int ret = -ENOMEM;
1752
1753 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1754 if (!xmit)
1755 return -ENOMEM;
1756
1757 iso->hostdata = xmit;
1758 xmit->ohci = iso->host->hostdata;
1759 xmit->task_active = 0;
1760 xmit->last_cycle = -1;
1761 atomic_set(&iso->skips, 0);
1762
1763 dma_prog_region_init(&xmit->prog);
1764
1765 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1766
1767 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1768 goto err;
1769
1770 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1771 ohci_iso_xmit_task, (unsigned long) iso);
1772
1773 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1774 ret = -EBUSY;
1775 goto err;
1776 }
1777
1778 xmit->task_active = 1;
1779
1780 /* xmit context registers are spaced 16 bytes apart */
1781 ctx = xmit->task.context;
1782 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1783 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1784 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1785
1786 return 0;
1787
1788err:
1789 ohci_iso_xmit_shutdown(iso);
1790 return ret;
1791}
1792
1793static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1794{
1795 struct ohci_iso_xmit *xmit = iso->hostdata;
1796 struct ti_ohci *ohci = xmit->ohci;
1797
1798 /* disable interrupts */
1799 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1800
1801 /* halt DMA */
1802 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1803 /* XXX the DMA context will lock up if you try to send too much data! */
1804 PRINT(KERN_ERR,
1805 "you probably exceeded the OHCI card's bandwidth limit - "
1806 "reload the module and reduce xmit bandwidth");
1807 }
1808}
1809
1810static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1811{
1812 struct ohci_iso_xmit *xmit = iso->hostdata;
1813
1814 if (xmit->task_active) {
1815 ohci_iso_xmit_stop(iso);
1816 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1817 xmit->task_active = 0;
1818 }
1819
1820 dma_prog_region_free(&xmit->prog);
1821 kfree(xmit);
1822 iso->hostdata = NULL;
1823}
1824
1825static void ohci_iso_xmit_task(unsigned long data)
1826{
1827 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1828 struct ohci_iso_xmit *xmit = iso->hostdata;
1829 struct ti_ohci *ohci = xmit->ohci;
1830 int wake = 0;
1831 int count;
1832
1833 /* check the whole buffer if necessary, starting at pkt_dma */
1834 for (count = 0; count < iso->buf_packets; count++) {
1835 int cycle;
1836
1837 /* DMA descriptor */
1838 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1839
1840 /* check for new writes to xferStatus */
1841 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1842 u8 event = xferstatus & 0x1F;
1843
1844 if (!event) {
1845 /* packet hasn't been sent yet; we are done for now */
1846 break;
1847 }
1848
1849 if (event != 0x11)
1850 PRINT(KERN_ERR,
1851 "IT DMA error - OHCI error code 0x%02x\n", event);
1852
1853 /* at least one packet went out, so wake up the writer */
1854 wake = 1;
1855
1856 /* parse cycle */
1857 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1858
1859 if (xmit->last_cycle > -1) {
1860 int cycle_diff = cycle - xmit->last_cycle;
1861 int skip;
1862
1863 /* unwrap */
1864 if (cycle_diff < 0) {
1865 cycle_diff += 8000;
1866 if (cycle_diff < 0)
1867 PRINT(KERN_ERR, "bogus cycle diff %d\n",
1868 cycle_diff);
1869 }
1870
1871 skip = cycle_diff - 1;
1872 if (skip > 0) {
1873 DBGMSG("skipped %d cycles without packet loss", skip);
1874 atomic_add(skip, &iso->skips);
1875 }
1876 }
1877 xmit->last_cycle = cycle;
1878
1879 /* tell the subsystem the packet has gone out */
1880 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1881
1882 /* reset the DMA descriptor for next time */
1883 cmd->output_last.status = 0;
1884 }
1885
1886 if (wake)
1887 hpsb_iso_wake(iso);
1888}
1889
1890static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1891{
1892 struct ohci_iso_xmit *xmit = iso->hostdata;
1893 struct ti_ohci *ohci = xmit->ohci;
1894
1895 int next_i, prev_i;
1896 struct iso_xmit_cmd *next, *prev;
1897
1898 unsigned int offset;
1899 unsigned short len;
1900 unsigned char tag, sy;
1901
1902 /* check that the packet doesn't cross a page boundary
1903 (we could allow this if we added OUTPUT_MORE descriptor support) */
1904 if (cross_bound(info->offset, info->len)) {
1905 PRINT(KERN_ERR,
1906 "rawiso xmit: packet %u crosses a page boundary",
1907 iso->first_packet);
1908 return -EINVAL;
1909 }
1910
1911 offset = info->offset;
1912 len = info->len;
1913 tag = info->tag;
1914 sy = info->sy;
1915
1916 /* sync up the card's view of the buffer */
1917 dma_region_sync_for_device(&iso->data_buf, offset, len);
1918
1919 /* append first_packet to the DMA chain */
1920 /* by linking the previous descriptor to it */
1921 /* (next will become the new end of the DMA chain) */
1922
1923 next_i = iso->first_packet;
1924 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1925
1926 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1927 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1928
1929 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1930 memset(next, 0, sizeof(struct iso_xmit_cmd));
1931 next->output_more_immediate.control = cpu_to_le32(0x02000008);
1932
1933 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1934
1935 /* tcode = 0xA, and sy */
1936 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
1937
1938 /* tag and channel number */
1939 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
1940
1941 /* transmission speed */
1942 next->iso_hdr[2] = iso->speed & 0x7;
1943
1944 /* payload size */
1945 next->iso_hdr[6] = len & 0xFF;
1946 next->iso_hdr[7] = len >> 8;
1947
1948 /* set up the OUTPUT_LAST */
1949 next->output_last.control = cpu_to_le32(1 << 28);
1950 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
1951 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
1952 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
1953 next->output_last.control |= cpu_to_le32(len);
1954
1955 /* payload bus address */
1956 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
1957
1958 /* leave branchAddress at zero for now */
1959
1960 /* re-write the previous DMA descriptor to chain to this one */
1961
1962 /* set prev branch address to point to next (Z=3) */
1963 prev->output_last.branchAddress = cpu_to_le32(
1964 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1965
1966 /*
1967 * Link the skip address to this descriptor itself. This causes a
1968 * context to skip a cycle whenever lost cycles or FIFO overruns occur,
1969 * without dropping the data at that point the application should then
1970 * decide whether this is an error condition or not. Some protocols
1971 * can deal with this by dropping some rate-matching padding packets.
1972 */
1973 next->output_more_immediate.branchAddress =
1974 prev->output_last.branchAddress;
1975
1976 /* disable interrupt, unless required by the IRQ interval */
1977 if (prev_i % iso->irq_interval) {
1978 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
1979 } else {
1980 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
1981 }
1982
1983 wmb();
1984
1985 /* wake DMA in case it is sleeping */
1986 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
1987
1988 /* issue a dummy read of the cycle timer to force all PCI
1989 writes to be posted immediately */
1990 mb();
1991 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
1992
1993 return 0;
1994}
1995
1996static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
1997{
1998 struct ohci_iso_xmit *xmit = iso->hostdata;
1999 struct ti_ohci *ohci = xmit->ohci;
2000
2001 /* clear out the control register */
2002 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2003 wmb();
2004
2005 /* address and length of first descriptor block (Z=3) */
2006 reg_write(xmit->ohci, xmit->CommandPtr,
2007 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2008
2009 /* cycle match */
2010 if (cycle != -1) {
2011 u32 start = cycle & 0x1FFF;
2012
2013 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2014 just snarf them from the current time */
2015 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2016
2017 /* advance one second to give some extra time for DMA to start */
2018 seconds += 1;
2019
2020 start |= (seconds & 3) << 13;
2021
2022 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2023 }
2024
2025 /* enable interrupts */
2026 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2027
2028 /* run */
2029 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2030 mb();
2031
2032 /* wait 100 usec to give the card time to go active */
2033 udelay(100);
2034
2035 /* check the RUN bit */
2036 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2037 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2038 reg_read(xmit->ohci, xmit->ContextControlSet));
2039 return -1;
2040 }
2041
2042 return 0;
2043}
2044
2045static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2046{
2047
2048 switch(cmd) {
2049 case XMIT_INIT:
2050 return ohci_iso_xmit_init(iso);
2051 case XMIT_START:
2052 return ohci_iso_xmit_start(iso, arg);
2053 case XMIT_STOP:
2054 ohci_iso_xmit_stop(iso);
2055 return 0;
2056 case XMIT_QUEUE:
2057 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2058 case XMIT_SHUTDOWN:
2059 ohci_iso_xmit_shutdown(iso);
2060 return 0;
2061
2062 case RECV_INIT:
2063 return ohci_iso_recv_init(iso);
2064 case RECV_START: {
2065 int *args = (int*) arg;
2066 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2067 }
2068 case RECV_STOP:
2069 ohci_iso_recv_stop(iso);
2070 return 0;
2071 case RECV_RELEASE:
2072 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2073 return 0;
2074 case RECV_FLUSH:
2075 ohci_iso_recv_task((unsigned long) iso);
2076 return 0;
2077 case RECV_SHUTDOWN:
2078 ohci_iso_recv_shutdown(iso);
2079 return 0;
2080 case RECV_LISTEN_CHANNEL:
2081 ohci_iso_recv_change_channel(iso, arg, 1);
2082 return 0;
2083 case RECV_UNLISTEN_CHANNEL:
2084 ohci_iso_recv_change_channel(iso, arg, 0);
2085 return 0;
2086 case RECV_SET_CHANNEL_MASK:
2087 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2088 return 0;
2089
2090 default:
2091 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2092 cmd);
2093 break;
2094 }
2095 return -EINVAL;
2096}
2097
2098/***************************************
2099 * IEEE-1394 functionality section END *
2100 ***************************************/
2101
2102
2103/********************************************************
2104 * Global stuff (interrupt handler, init/shutdown code) *
2105 ********************************************************/
2106
2107static void dma_trm_reset(struct dma_trm_ctx *d)
2108{
2109 unsigned long flags;
2110 LIST_HEAD(packet_list);
2111 struct ti_ohci *ohci = d->ohci;
2112 struct hpsb_packet *packet, *ptmp;
2113
2114 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2115
2116 /* Lock the context, reset it and release it. Move the packets
2117 * that were pending in the context to packet_list and free
2118 * them after releasing the lock. */
2119
2120 spin_lock_irqsave(&d->lock, flags);
2121
2122 list_splice_init(&d->fifo_list, &packet_list);
2123 list_splice_init(&d->pending_list, &packet_list);
2124
2125 d->branchAddrPtr = NULL;
2126 d->sent_ind = d->prg_ind;
2127 d->free_prgs = d->num_desc;
2128
2129 spin_unlock_irqrestore(&d->lock, flags);
2130
2131 if (list_empty(&packet_list))
2132 return;
2133
2134 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2135
2136 /* Now process subsystem callbacks for the packets from this
2137 * context. */
2138 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2139 list_del_init(&packet->driver_list);
2140 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2141 }
2142}
2143
2144static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2145 quadlet_t rx_event,
2146 quadlet_t tx_event)
2147{
2148 struct ohci1394_iso_tasklet *t;
2149 unsigned long mask;
2150 unsigned long flags;
2151
2152 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2153
2154 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2155 mask = 1 << t->context;
2156
2157 if (t->type == OHCI_ISO_TRANSMIT) {
2158 if (tx_event & mask)
2159 tasklet_schedule(&t->tasklet);
2160 } else {
2161 /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
2162 if (rx_event & mask)
2163 tasklet_schedule(&t->tasklet);
2164 }
2165 }
2166
2167 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2168}
2169
2170static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2171{
2172 quadlet_t event, node_id;
2173 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2174 struct hpsb_host *host = ohci->host;
2175 int phyid = -1, isroot = 0;
2176 unsigned long flags;
2177
2178 /* Read and clear the interrupt event register. Don't clear
2179 * the busReset event, though. This is done when we get the
2180 * selfIDComplete interrupt. */
2181 spin_lock_irqsave(&ohci->event_lock, flags);
2182 event = reg_read(ohci, OHCI1394_IntEventClear);
2183 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2184 spin_unlock_irqrestore(&ohci->event_lock, flags);
2185
2186 if (!event)
2187 return IRQ_NONE;
2188
2189 /* If event is ~(u32)0 cardbus card was ejected. In this case
2190 * we just return, and clean up in the ohci1394_pci_remove
2191 * function. */
2192 if (event == ~(u32) 0) {
2193 DBGMSG("Device removed.");
2194 return IRQ_NONE;
2195 }
2196
2197 DBGMSG("IntEvent: %08x", event);
2198
2199 if (event & OHCI1394_unrecoverableError) {
2200 int ctx;
2201 PRINT(KERN_ERR, "Unrecoverable error!");
2202
2203 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2204 PRINT(KERN_ERR, "Async Req Tx Context died: "
2205 "ctrl[%08x] cmdptr[%08x]",
2206 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2207 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2208
2209 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2210 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2211 "ctrl[%08x] cmdptr[%08x]",
2212 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2213 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2214
2215 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2216 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2217 "ctrl[%08x] cmdptr[%08x]",
2218 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2219 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2220
2221 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2222 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2223 "ctrl[%08x] cmdptr[%08x]",
2224 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2225 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2226
2227 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2228 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2229 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2230 "ctrl[%08x] cmdptr[%08x]", ctx,
2231 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2232 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2233 }
2234
2235 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2236 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2237 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2238 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2239 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2240 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2241 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2242 }
2243
2244 event &= ~OHCI1394_unrecoverableError;
2245 }
2246 if (event & OHCI1394_postedWriteErr) {
2247 PRINT(KERN_ERR, "physical posted write error");
2248 /* no recovery strategy yet, had to involve protocol drivers */
2249 event &= ~OHCI1394_postedWriteErr;
2250 }
2251 if (event & OHCI1394_cycleTooLong) {
2252 if(printk_ratelimit())
2253 PRINT(KERN_WARNING, "isochronous cycle too long");
2254 else
2255 DBGMSG("OHCI1394_cycleTooLong");
2256 reg_write(ohci, OHCI1394_LinkControlSet,
2257 OHCI1394_LinkControl_CycleMaster);
2258 event &= ~OHCI1394_cycleTooLong;
2259 }
2260 if (event & OHCI1394_cycleInconsistent) {
2261 /* We subscribe to the cycleInconsistent event only to
2262 * clear the corresponding event bit... otherwise,
2263 * isochronous cycleMatch DMA won't work. */
2264 DBGMSG("OHCI1394_cycleInconsistent");
2265 event &= ~OHCI1394_cycleInconsistent;
2266 }
2267 if (event & OHCI1394_busReset) {
2268 /* The busReset event bit can't be cleared during the
2269 * selfID phase, so we disable busReset interrupts, to
2270 * avoid burying the cpu in interrupt requests. */
2271 spin_lock_irqsave(&ohci->event_lock, flags);
2272 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2273
2274 if (ohci->check_busreset) {
2275 int loop_count = 0;
2276
2277 udelay(10);
2278
2279 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2280 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2281
2282 spin_unlock_irqrestore(&ohci->event_lock, flags);
2283 udelay(10);
2284 spin_lock_irqsave(&ohci->event_lock, flags);
2285
2286 /* The loop counter check is to prevent the driver
2287 * from remaining in this state forever. For the
2288 * initial bus reset, the loop continues for ever
2289 * and the system hangs, until some device is plugged-in
2290 * or out manually into a port! The forced reset seems
2291 * to solve this problem. This mainly effects nForce2. */
2292 if (loop_count > 10000) {
2293 ohci_devctl(host, RESET_BUS, LONG_RESET);
2294 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2295 loop_count = 0;
2296 }
2297
2298 loop_count++;
2299 }
2300 }
2301 spin_unlock_irqrestore(&ohci->event_lock, flags);
2302 if (!host->in_bus_reset) {
2303 DBGMSG("irq_handler: Bus reset requested");
2304
2305 /* Subsystem call */
2306 hpsb_bus_reset(ohci->host);
2307 }
2308 event &= ~OHCI1394_busReset;
2309 }
2310 if (event & OHCI1394_reqTxComplete) {
2311 struct dma_trm_ctx *d = &ohci->at_req_context;
2312 DBGMSG("Got reqTxComplete interrupt "
2313 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2314 if (reg_read(ohci, d->ctrlSet) & 0x800)
2315 ohci1394_stop_context(ohci, d->ctrlClear,
2316 "reqTxComplete");
2317 else
2318 dma_trm_tasklet((unsigned long)d);
2319 //tasklet_schedule(&d->task);
2320 event &= ~OHCI1394_reqTxComplete;
2321 }
2322 if (event & OHCI1394_respTxComplete) {
2323 struct dma_trm_ctx *d = &ohci->at_resp_context;
2324 DBGMSG("Got respTxComplete interrupt "
2325 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2326 if (reg_read(ohci, d->ctrlSet) & 0x800)
2327 ohci1394_stop_context(ohci, d->ctrlClear,
2328 "respTxComplete");
2329 else
2330 tasklet_schedule(&d->task);
2331 event &= ~OHCI1394_respTxComplete;
2332 }
2333 if (event & OHCI1394_RQPkt) {
2334 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2335 DBGMSG("Got RQPkt interrupt status=0x%08X",
2336 reg_read(ohci, d->ctrlSet));
2337 if (reg_read(ohci, d->ctrlSet) & 0x800)
2338 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2339 else
2340 tasklet_schedule(&d->task);
2341 event &= ~OHCI1394_RQPkt;
2342 }
2343 if (event & OHCI1394_RSPkt) {
2344 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2345 DBGMSG("Got RSPkt interrupt status=0x%08X",
2346 reg_read(ohci, d->ctrlSet));
2347 if (reg_read(ohci, d->ctrlSet) & 0x800)
2348 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2349 else
2350 tasklet_schedule(&d->task);
2351 event &= ~OHCI1394_RSPkt;
2352 }
2353 if (event & OHCI1394_isochRx) {
2354 quadlet_t rx_event;
2355
2356 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2357 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2358 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2359 event &= ~OHCI1394_isochRx;
2360 }
2361 if (event & OHCI1394_isochTx) {
2362 quadlet_t tx_event;
2363
2364 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2365 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2366 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2367 event &= ~OHCI1394_isochTx;
2368 }
2369 if (event & OHCI1394_selfIDComplete) {
2370 if (host->in_bus_reset) {
2371 node_id = reg_read(ohci, OHCI1394_NodeID);
2372
2373 if (!(node_id & 0x80000000)) {
2374 PRINT(KERN_ERR,
2375 "SelfID received, but NodeID invalid "
2376 "(probably new bus reset occurred): %08X",
2377 node_id);
2378 goto selfid_not_valid;
2379 }
2380
2381 phyid = node_id & 0x0000003f;
2382 isroot = (node_id & 0x40000000) != 0;
2383
2384 DBGMSG("SelfID interrupt received "
2385 "(phyid %d, %s)", phyid,
2386 (isroot ? "root" : "not root"));
2387
2388 handle_selfid(ohci, host, phyid, isroot);
2389
2390 /* Clear the bus reset event and re-enable the
2391 * busReset interrupt. */
2392 spin_lock_irqsave(&ohci->event_lock, flags);
2393 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2394 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2395 spin_unlock_irqrestore(&ohci->event_lock, flags);
2396
2397 /* Turn on phys dma reception.
2398 *
2399 * TODO: Enable some sort of filtering management.
2400 */
2401 if (phys_dma) {
2402 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2403 0xffffffff);
2404 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2405 0xffffffff);
2406 }
2407
2408 DBGMSG("PhyReqFilter=%08x%08x",
2409 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2410 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2411
2412 hpsb_selfid_complete(host, phyid, isroot);
2413 } else
2414 PRINT(KERN_ERR,
2415 "SelfID received outside of bus reset sequence");
2416
2417selfid_not_valid:
2418 event &= ~OHCI1394_selfIDComplete;
2419 }
2420
2421 /* Make sure we handle everything, just in case we accidentally
2422 * enabled an interrupt that we didn't write a handler for. */
2423 if (event)
2424 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2425 event);
2426
2427 return IRQ_HANDLED;
2428}
2429
2430/* Put the buffer back into the dma context */
2431static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2432{
2433 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2434 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2435
2436 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2437 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2438 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2439 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2440
2441 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2442 * context program descriptors before it sees the wakeup bit set. */
2443 wmb();
2444
2445 /* wake up the dma context if necessary */
2446 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2447 PRINT(KERN_INFO,
2448 "Waking dma ctx=%d ... processing is probably too slow",
2449 d->ctx);
2450 }
2451
2452 /* do this always, to avoid race condition */
2453 reg_write(ohci, d->ctrlSet, 0x1000);
2454}
2455
2456#define cond_le32_to_cpu(data, noswap) \
2457 (noswap ? data : le32_to_cpu(data))
2458
2459static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2460 -1, 0, -1, 0, -1, -1, 16, -1};
2461
2462/*
2463 * Determine the length of a packet in the buffer
2464 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2465 */
2466static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2467 quadlet_t *buf_ptr, int offset,
2468 unsigned char tcode, int noswap)
2469{
2470 int length = -1;
2471
2472 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2473 length = TCODE_SIZE[tcode];
2474 if (length == 0) {
2475 if (offset + 12 >= d->buf_size) {
2476 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2477 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2478 } else {
2479 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2480 }
2481 length += 20;
2482 }
2483 } else if (d->type == DMA_CTX_ISO) {
2484 /* Assumption: buffer fill mode with header/trailer */
2485 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2486 }
2487
2488 if (length > 0 && length % 4)
2489 length += 4 - (length % 4);
2490
2491 return length;
2492}
2493
2494/* Tasklet that processes dma receive buffers */
2495static void dma_rcv_tasklet (unsigned long data)
2496{
2497 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2498 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2499 unsigned int split_left, idx, offset, rescount;
2500 unsigned char tcode;
2501 int length, bytes_left, ack;
2502 unsigned long flags;
2503 quadlet_t *buf_ptr;
2504 char *split_ptr;
2505 char msg[256];
2506
2507 spin_lock_irqsave(&d->lock, flags);
2508
2509 idx = d->buf_ind;
2510 offset = d->buf_offset;
2511 buf_ptr = d->buf_cpu[idx] + offset/4;
2512
2513 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2514 bytes_left = d->buf_size - rescount - offset;
2515
2516 while (bytes_left > 0) {
2517 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2518
2519 /* packet_length() will return < 4 for an error */
2520 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2521
2522 if (length < 4) { /* something is wrong */
2523 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2524 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2525 d->ctx, length);
2526 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2527 spin_unlock_irqrestore(&d->lock, flags);
2528 return;
2529 }
2530
2531 /* The first case is where we have a packet that crosses
2532 * over more than one descriptor. The next case is where
2533 * it's all in the first descriptor. */
2534 if ((offset + length) > d->buf_size) {
2535 DBGMSG("Split packet rcv'd");
2536 if (length > d->split_buf_size) {
2537 ohci1394_stop_context(ohci, d->ctrlClear,
2538 "Split packet size exceeded");
2539 d->buf_ind = idx;
2540 d->buf_offset = offset;
2541 spin_unlock_irqrestore(&d->lock, flags);
2542 return;
2543 }
2544
2545 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2546 == d->buf_size) {
2547 /* Other part of packet not written yet.
2548 * this should never happen I think
2549 * anyway we'll get it on the next call. */
2550 PRINT(KERN_INFO,
2551 "Got only half a packet!");
2552 d->buf_ind = idx;
2553 d->buf_offset = offset;
2554 spin_unlock_irqrestore(&d->lock, flags);
2555 return;
2556 }
2557
2558 split_left = length;
2559 split_ptr = (char *)d->spb;
2560 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2561 split_left -= d->buf_size-offset;
2562 split_ptr += d->buf_size-offset;
2563 insert_dma_buffer(d, idx);
2564 idx = (idx+1) % d->num_desc;
2565 buf_ptr = d->buf_cpu[idx];
2566 offset=0;
2567
2568 while (split_left >= d->buf_size) {
2569 memcpy(split_ptr,buf_ptr,d->buf_size);
2570 split_ptr += d->buf_size;
2571 split_left -= d->buf_size;
2572 insert_dma_buffer(d, idx);
2573 idx = (idx+1) % d->num_desc;
2574 buf_ptr = d->buf_cpu[idx];
2575 }
2576
2577 if (split_left > 0) {
2578 memcpy(split_ptr, buf_ptr, split_left);
2579 offset = split_left;
2580 buf_ptr += offset/4;
2581 }
2582 } else {
2583 DBGMSG("Single packet rcv'd");
2584 memcpy(d->spb, buf_ptr, length);
2585 offset += length;
2586 buf_ptr += length/4;
2587 if (offset==d->buf_size) {
2588 insert_dma_buffer(d, idx);
2589 idx = (idx+1) % d->num_desc;
2590 buf_ptr = d->buf_cpu[idx];
2591 offset=0;
2592 }
2593 }
2594
2595 /* We get one phy packet to the async descriptor for each
2596 * bus reset. We always ignore it. */
2597 if (tcode != OHCI1394_TCODE_PHY) {
2598 if (!ohci->no_swap_incoming)
2599 header_le32_to_cpu(d->spb, tcode);
2600 DBGMSG("Packet received from node"
2601 " %d ack=0x%02X spd=%d tcode=0x%X"
2602 " length=%d ctx=%d tlabel=%d",
2603 (d->spb[1]>>16)&0x3f,
2604 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2605 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2606 tcode, length, d->ctx,
2607 (d->spb[0]>>10)&0x3f);
2608
2609 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2610 == 0x11) ? 1 : 0;
2611
2612 hpsb_packet_received(ohci->host, d->spb,
2613 length-4, ack);
2614 }
2615#ifdef OHCI1394_DEBUG
2616 else
2617 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2618 d->ctx);
2619#endif
2620
2621 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2622
2623 bytes_left = d->buf_size - rescount - offset;
2624
2625 }
2626
2627 d->buf_ind = idx;
2628 d->buf_offset = offset;
2629
2630 spin_unlock_irqrestore(&d->lock, flags);
2631}
2632
2633/* Bottom half that processes sent packets */
2634static void dma_trm_tasklet (unsigned long data)
2635{
2636 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2637 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2638 struct hpsb_packet *packet, *ptmp;
2639 unsigned long flags;
2640 u32 status, ack;
2641 size_t datasize;
2642
2643 spin_lock_irqsave(&d->lock, flags);
2644
2645 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2646 datasize = packet->data_size;
2647 if (datasize && packet->type != hpsb_raw)
2648 status = le32_to_cpu(
2649 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2650 else
2651 status = le32_to_cpu(
2652 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2653
2654 if (status == 0)
2655 /* this packet hasn't been sent yet*/
2656 break;
2657
2658#ifdef OHCI1394_DEBUG
2659 if (datasize)
2660 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2661 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2662 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2663 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2664 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2665 status&0x1f, (status>>5)&0x3,
2666 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2667 d->ctx);
2668 else
2669 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2670 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2671 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2672 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2673 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2674 status&0x1f, (status>>5)&0x3,
2675 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2676 d->ctx);
2677 else
2678 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2679 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2680 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2681 >>16)&0x3f,
2682 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2683 >>4)&0xf,
2684 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2685 >>10)&0x3f,
2686 status&0x1f, (status>>5)&0x3,
2687 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2688 d->ctx);
2689#endif
2690
2691 if (status & 0x10) {
2692 ack = status & 0xf;
2693 } else {
2694 switch (status & 0x1f) {
2695 case EVT_NO_STATUS: /* that should never happen */
2696 case EVT_RESERVED_A: /* that should never happen */
2697 case EVT_LONG_PACKET: /* that should never happen */
2698 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2699 ack = ACKX_SEND_ERROR;
2700 break;
2701 case EVT_MISSING_ACK:
2702 ack = ACKX_TIMEOUT;
2703 break;
2704 case EVT_UNDERRUN:
2705 ack = ACKX_SEND_ERROR;
2706 break;
2707 case EVT_OVERRUN: /* that should never happen */
2708 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2709 ack = ACKX_SEND_ERROR;
2710 break;
2711 case EVT_DESCRIPTOR_READ:
2712 case EVT_DATA_READ:
2713 case EVT_DATA_WRITE:
2714 ack = ACKX_SEND_ERROR;
2715 break;
2716 case EVT_BUS_RESET: /* that should never happen */
2717 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2718 ack = ACKX_SEND_ERROR;
2719 break;
2720 case EVT_TIMEOUT:
2721 ack = ACKX_TIMEOUT;
2722 break;
2723 case EVT_TCODE_ERR:
2724 ack = ACKX_SEND_ERROR;
2725 break;
2726 case EVT_RESERVED_B: /* that should never happen */
2727 case EVT_RESERVED_C: /* that should never happen */
2728 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2729 ack = ACKX_SEND_ERROR;
2730 break;
2731 case EVT_UNKNOWN:
2732 case EVT_FLUSHED:
2733 ack = ACKX_SEND_ERROR;
2734 break;
2735 default:
2736 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2737 ack = ACKX_SEND_ERROR;
2738 BUG();
2739 }
2740 }
2741
2742 list_del_init(&packet->driver_list);
2743 hpsb_packet_sent(ohci->host, packet, ack);
2744
2745 if (datasize)
2746 pci_unmap_single(ohci->dev,
2747 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2748 datasize, PCI_DMA_TODEVICE);
2749
2750 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2751 d->free_prgs++;
2752 }
2753
2754 dma_trm_flush(ohci, d);
2755
2756 spin_unlock_irqrestore(&d->lock, flags);
2757}
2758
2759static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2760{
2761 int i;
2762 struct ti_ohci *ohci = d->ohci;
2763
2764 if (ohci == NULL)
2765 return;
2766
2767 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2768
2769 if (d->buf_cpu) {
2770 for (i=0; i<d->num_desc; i++)
2771 if (d->buf_cpu[i] && d->buf_bus[i])
2772 pci_free_consistent(
2773 ohci->dev, d->buf_size,
2774 d->buf_cpu[i], d->buf_bus[i]);
2775 kfree(d->buf_cpu);
2776 kfree(d->buf_bus);
2777 }
2778 if (d->prg_cpu) {
2779 for (i=0; i<d->num_desc; i++)
2780 if (d->prg_cpu[i] && d->prg_bus[i])
2781 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2782 d->prg_bus[i]);
2783 pci_pool_destroy(d->prg_pool);
2784 kfree(d->prg_cpu);
2785 kfree(d->prg_bus);
2786 }
2787 kfree(d->spb);
2788
2789 /* Mark this context as freed. */
2790 d->ohci = NULL;
2791}
2792
2793static int
2794alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2795 enum context_type type, int ctx, int num_desc,
2796 int buf_size, int split_buf_size, int context_base)
2797{
2798 int i, len;
2799 static int num_allocs;
2800 static char pool_name[20];
2801
2802 d->ohci = ohci;
2803 d->type = type;
2804 d->ctx = ctx;
2805
2806 d->num_desc = num_desc;
2807 d->buf_size = buf_size;
2808 d->split_buf_size = split_buf_size;
2809
2810 d->ctrlSet = 0;
2811 d->ctrlClear = 0;
2812 d->cmdPtr = 0;
2813
2814 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2815 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2816
2817 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2818 PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
2819 free_dma_rcv_ctx(d);
2820 return -ENOMEM;
2821 }
2822
2823 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2824 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2825
2826 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2827 PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
2828 free_dma_rcv_ctx(d);
2829 return -ENOMEM;
2830 }
2831
2832 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2833
2834 if (d->spb == NULL) {
2835 PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
2836 free_dma_rcv_ctx(d);
2837 return -ENOMEM;
2838 }
2839
2840 len = sprintf(pool_name, "ohci1394_rcv_prg");
2841 sprintf(pool_name+len, "%d", num_allocs);
2842 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2843 sizeof(struct dma_cmd), 4, 0);
2844 if(d->prg_pool == NULL)
2845 {
2846 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2847 free_dma_rcv_ctx(d);
2848 return -ENOMEM;
2849 }
2850 num_allocs++;
2851
2852 for (i=0; i<d->num_desc; i++) {
2853 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2854 d->buf_size,
2855 d->buf_bus+i);
2856
2857 if (d->buf_cpu[i] != NULL) {
2858 memset(d->buf_cpu[i], 0, d->buf_size);
2859 } else {
2860 PRINT(KERN_ERR,
2861 "Failed to allocate %s", "DMA buffer");
2862 free_dma_rcv_ctx(d);
2863 return -ENOMEM;
2864 }
2865
2866 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2867
2868 if (d->prg_cpu[i] != NULL) {
2869 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2870 } else {
2871 PRINT(KERN_ERR,
2872 "Failed to allocate %s", "DMA prg");
2873 free_dma_rcv_ctx(d);
2874 return -ENOMEM;
2875 }
2876 }
2877
2878 spin_lock_init(&d->lock);
2879
2880 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2881 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2882 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2883
2884 tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
2885 return 0;
2886}
2887
2888static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2889{
2890 int i;
2891 struct ti_ohci *ohci = d->ohci;
2892
2893 if (ohci == NULL)
2894 return;
2895
2896 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
2897
2898 if (d->prg_cpu) {
2899 for (i=0; i<d->num_desc; i++)
2900 if (d->prg_cpu[i] && d->prg_bus[i])
2901 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2902 d->prg_bus[i]);
2903 pci_pool_destroy(d->prg_pool);
2904 kfree(d->prg_cpu);
2905 kfree(d->prg_bus);
2906 }
2907
2908 /* Mark this context as freed. */
2909 d->ohci = NULL;
2910}
2911
2912static int
2913alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2914 enum context_type type, int ctx, int num_desc,
2915 int context_base)
2916{
2917 int i, len;
2918 static char pool_name[20];
2919 static int num_allocs=0;
2920
2921 d->ohci = ohci;
2922 d->type = type;
2923 d->ctx = ctx;
2924 d->num_desc = num_desc;
2925 d->ctrlSet = 0;
2926 d->ctrlClear = 0;
2927 d->cmdPtr = 0;
2928
2929 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
2930 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
2931
2932 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2933 PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
2934 free_dma_trm_ctx(d);
2935 return -ENOMEM;
2936 }
2937
2938 len = sprintf(pool_name, "ohci1394_trm_prg");
2939 sprintf(pool_name+len, "%d", num_allocs);
2940 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2941 sizeof(struct at_dma_prg), 4, 0);
2942 if (d->prg_pool == NULL) {
2943 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2944 free_dma_trm_ctx(d);
2945 return -ENOMEM;
2946 }
2947 num_allocs++;
2948
2949 for (i = 0; i < d->num_desc; i++) {
2950 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2951
2952 if (d->prg_cpu[i] != NULL) {
2953 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2954 } else {
2955 PRINT(KERN_ERR,
2956 "Failed to allocate %s", "AT DMA prg");
2957 free_dma_trm_ctx(d);
2958 return -ENOMEM;
2959 }
2960 }
2961
2962 spin_lock_init(&d->lock);
2963
2964 /* initialize tasklet */
2965 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2966 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2967 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2968 tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
2969 return 0;
2970}
2971
2972static void ohci_set_hw_config_rom(struct hpsb_host *host, __be32 *config_rom)
2973{
2974 struct ti_ohci *ohci = host->hostdata;
2975
2976 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
2977 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
2978
2979 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
2980}
2981
2982
2983static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
2984 quadlet_t data, quadlet_t compare)
2985{
2986 struct ti_ohci *ohci = host->hostdata;
2987 int i;
2988
2989 reg_write(ohci, OHCI1394_CSRData, data);
2990 reg_write(ohci, OHCI1394_CSRCompareData, compare);
2991 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
2992
2993 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
2994 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
2995 break;
2996
2997 mdelay(1);
2998 }
2999
3000 return reg_read(ohci, OHCI1394_CSRData);
3001}
3002
3003static struct hpsb_host_driver ohci1394_driver = {
3004 .owner = THIS_MODULE,
3005 .name = OHCI1394_DRIVER_NAME,
3006 .set_hw_config_rom = ohci_set_hw_config_rom,
3007 .transmit_packet = ohci_transmit,
3008 .devctl = ohci_devctl,
3009 .isoctl = ohci_isoctl,
3010 .hw_csr_reg = ohci_hw_csr_reg,
3011};
3012
3013/***********************************
3014 * PCI Driver Interface functions *
3015 ***********************************/
3016
3017#ifdef CONFIG_PPC_PMAC
3018static void ohci1394_pmac_on(struct pci_dev *dev)
3019{
3020 if (machine_is(powermac)) {
3021 struct device_node *ofn = pci_device_to_OF_node(dev);
3022
3023 if (ofn) {
3024 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3025 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3026 }
3027 }
3028}
3029
3030static void ohci1394_pmac_off(struct pci_dev *dev)
3031{
3032 if (machine_is(powermac)) {
3033 struct device_node *ofn = pci_device_to_OF_node(dev);
3034
3035 if (ofn) {
3036 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3037 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3038 }
3039 }
3040}
3041#else
3042#define ohci1394_pmac_on(dev)
3043#define ohci1394_pmac_off(dev)
3044#endif /* CONFIG_PPC_PMAC */
3045
3046static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3047 const struct pci_device_id *ent)
3048{
3049 struct hpsb_host *host;
3050 struct ti_ohci *ohci; /* shortcut to currently handled device */
3051 resource_size_t ohci_base;
3052 int err = -ENOMEM;
3053
3054 ohci1394_pmac_on(dev);
3055 if (pci_enable_device(dev)) {
3056 PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
3057 err = -ENXIO;
3058 goto err;
3059 }
3060 pci_set_master(dev);
3061
3062 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3063 if (!host) {
3064 PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
3065 goto err;
3066 }
3067 ohci = host->hostdata;
3068 ohci->dev = dev;
3069 ohci->host = host;
3070 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3071 host->pdev = dev;
3072 pci_set_drvdata(dev, ohci);
3073
3074 /* We don't want hardware swapping */
3075 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3076
3077 /* Some oddball Apple controllers do not order the selfid
3078 * properly, so we make up for it here. */
3079#ifndef __LITTLE_ENDIAN
3080 /* XXX: Need a better way to check this. I'm wondering if we can
3081 * read the values of the OHCI1394_PCI_HCI_Control and the
3082 * noByteSwapData registers to see if they were not cleared to
3083 * zero. Should this work? Obviously it's not defined what these
3084 * registers will read when they aren't supported. Bleh! */
3085 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3086 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3087 ohci->no_swap_incoming = 1;
3088 ohci->selfid_swap = 0;
3089 } else
3090 ohci->selfid_swap = 1;
3091#endif
3092
3093
3094#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3095#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3096#endif
3097
3098 /* These chipsets require a bit of extra care when checking after
3099 * a busreset. */
3100 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3101 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3102 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3103 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3104 ohci->check_busreset = 1;
3105
3106 /* We hardwire the MMIO length, since some CardBus adaptors
3107 * fail to report the right length. Anyway, the ohci spec
3108 * clearly says it's 2kb, so this shouldn't be a problem. */
3109 ohci_base = pci_resource_start(dev, 0);
3110 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3111 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3112 (unsigned long long)pci_resource_len(dev, 0));
3113
3114 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3115 OHCI1394_DRIVER_NAME)) {
3116 PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
3117 (unsigned long long)ohci_base,
3118 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3119 goto err;
3120 }
3121 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3122
3123 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3124 if (ohci->registers == NULL) {
3125 PRINT_G(KERN_ERR, "Failed to remap registers");
3126 err = -ENXIO;
3127 goto err;
3128 }
3129 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3130 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3131
3132 /* csr_config rom allocation */
3133 ohci->csr_config_rom_cpu =
3134 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3135 &ohci->csr_config_rom_bus);
3136 if (ohci->csr_config_rom_cpu == NULL) {
3137 PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
3138 goto err;
3139 }
3140 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3141
3142 /* self-id dma buffer allocation */
3143 ohci->selfid_buf_cpu =
3144 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3145 &ohci->selfid_buf_bus);
3146 if (ohci->selfid_buf_cpu == NULL) {
3147 PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
3148 goto err;
3149 }
3150 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3151
3152 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3153 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3154 "8Kb boundary... may cause problems on some CXD3222 chip",
3155 ohci->selfid_buf_cpu);
3156
3157 /* No self-id errors at startup */
3158 ohci->self_id_errors = 0;
3159
3160 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3161 /* AR DMA request context allocation */
3162 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3163 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3164 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3165 OHCI1394_AsReqRcvContextBase) < 0) {
3166 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
3167 goto err;
3168 }
3169 /* AR DMA response context allocation */
3170 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3171 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3172 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3173 OHCI1394_AsRspRcvContextBase) < 0) {
3174 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
3175 goto err;
3176 }
3177 /* AT DMA request context */
3178 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3179 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3180 OHCI1394_AsReqTrContextBase) < 0) {
3181 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
3182 goto err;
3183 }
3184 /* AT DMA response context */
3185 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3186 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3187 OHCI1394_AsRspTrContextBase) < 0) {
3188 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
3189 goto err;
3190 }
3191 /* Start off with a soft reset, to clear everything to a sane
3192 * state. */
3193 ohci_soft_reset(ohci);
3194
3195 /* Now enable LPS, which we need in order to start accessing
3196 * most of the registers. In fact, on some cards (ALI M5251),
3197 * accessing registers in the SClk domain without LPS enabled
3198 * will lock up the machine. */
3199 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3200
3201 /* Disable and clear interrupts */
3202 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3203 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3204
3205 /* Flush MMIO writes and wait to make sure we have full link enabled. */
3206 reg_read(ohci, OHCI1394_Version);
3207 msleep(50);
3208
3209 /* Determine the number of available IR and IT contexts. */
3210 ohci->nb_iso_rcv_ctx =
3211 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3212 ohci->nb_iso_xmit_ctx =
3213 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3214
3215 /* Set the usage bits for non-existent contexts so they can't
3216 * be allocated */
3217 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3218 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3219
3220 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3221 spin_lock_init(&ohci->iso_tasklet_list_lock);
3222 ohci->ISO_channel_usage = 0;
3223 spin_lock_init(&ohci->IR_channel_lock);
3224
3225 spin_lock_init(&ohci->event_lock);
3226
3227 /*
3228 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3229 * might get called anyway. We'll see no event, of course, but
3230 * we need to get to that "no event", so enough should be initialized
3231 * by that point.
3232 */
3233 err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3234 OHCI1394_DRIVER_NAME, ohci);
3235 if (err) {
3236 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3237 goto err;
3238 }
3239 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3240 ohci_initialize(ohci);
3241
3242 /* Set certain csr values */
3243 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3244 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3245 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3246 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3247 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3248
3249 if (phys_dma) {
3250 host->low_addr_space =
3251 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3252 if (!host->low_addr_space)
3253 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3254 }
3255 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3256
3257 /* Tell the highlevel this host is ready */
3258 if (hpsb_add_host(host)) {
3259 PRINT_G(KERN_ERR, "Failed to register host with highlevel");
3260 goto err;
3261 }
3262 ohci->init_state = OHCI_INIT_DONE;
3263
3264 return 0;
3265err:
3266 ohci1394_pci_remove(dev);
3267 return err;
3268}
3269
3270static void ohci1394_pci_remove(struct pci_dev *dev)
3271{
3272 struct ti_ohci *ohci;
3273 struct device *device;
3274
3275 ohci = pci_get_drvdata(dev);
3276 if (!ohci)
3277 goto out;
3278
3279 device = get_device(&ohci->host->device);
3280
3281 switch (ohci->init_state) {
3282 case OHCI_INIT_DONE:
3283 hpsb_remove_host(ohci->host);
3284
3285 /* Clear out BUS Options */
3286 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3287 reg_write(ohci, OHCI1394_BusOptions,
3288 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3289 0x00ff0000);
3290 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3291
3292 case OHCI_INIT_HAVE_IRQ:
3293 /* Clear interrupt registers */
3294 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3295 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3296 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3297 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3298 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3299 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3300
3301 /* Disable IRM Contender */
3302 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3303
3304 /* Clear link control register */
3305 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3306
3307 /* Let all other nodes know to ignore us */
3308 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3309
3310 /* Soft reset before we start - this disables
3311 * interrupts and clears linkEnable and LPS. */
3312 ohci_soft_reset(ohci);
3313 free_irq(dev->irq, ohci);
3314
3315 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3316 /* The ohci_soft_reset() stops all DMA contexts, so we
3317 * dont need to do this. */
3318 free_dma_rcv_ctx(&ohci->ar_req_context);
3319 free_dma_rcv_ctx(&ohci->ar_resp_context);
3320 free_dma_trm_ctx(&ohci->at_req_context);
3321 free_dma_trm_ctx(&ohci->at_resp_context);
3322
3323 case OHCI_INIT_HAVE_SELFID_BUFFER:
3324 pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
3325 ohci->selfid_buf_cpu,
3326 ohci->selfid_buf_bus);
3327
3328 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3329 pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
3330 ohci->csr_config_rom_cpu,
3331 ohci->csr_config_rom_bus);
3332
3333 case OHCI_INIT_HAVE_IOMAPPING:
3334 iounmap(ohci->registers);
3335
3336 case OHCI_INIT_HAVE_MEM_REGION:
3337 release_mem_region(pci_resource_start(dev, 0),
3338 OHCI1394_REGISTER_SIZE);
3339
3340 case OHCI_INIT_ALLOC_HOST:
3341 pci_set_drvdata(dev, NULL);
3342 }
3343
3344 if (device)
3345 put_device(device);
3346out:
3347 ohci1394_pmac_off(dev);
3348}
3349
3350#ifdef CONFIG_PM
3351static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
3352{
3353 int err;
3354 struct ti_ohci *ohci = pci_get_drvdata(dev);
3355
3356 if (!ohci) {
3357 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3358 OHCI1394_DRIVER_NAME);
3359 return -ENXIO;
3360 }
3361 DBGMSG("suspend called");
3362
3363 /* Clear the async DMA contexts and stop using the controller */
3364 hpsb_bus_reset(ohci->host);
3365
3366 /* See ohci1394_pci_remove() for comments on this sequence */
3367 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3368 reg_write(ohci, OHCI1394_BusOptions,
3369 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3370 0x00ff0000);
3371 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3372 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3373 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3374 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3375 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3376 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3377 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3378 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3379 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3380 ohci_soft_reset(ohci);
3381
3382 free_irq(dev->irq, ohci);
3383 err = pci_save_state(dev);
3384 if (err) {
3385 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3386 return err;
3387 }
3388 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3389 if (err)
3390 DBGMSG("pci_set_power_state failed with %d", err);
3391 ohci1394_pmac_off(dev);
3392
3393 return 0;
3394}
3395
3396static int ohci1394_pci_resume(struct pci_dev *dev)
3397{
3398 int err;
3399 struct ti_ohci *ohci = pci_get_drvdata(dev);
3400
3401 if (!ohci) {
3402 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3403 OHCI1394_DRIVER_NAME);
3404 return -ENXIO;
3405 }
3406 DBGMSG("resume called");
3407
3408 ohci1394_pmac_on(dev);
3409 pci_set_power_state(dev, PCI_D0);
3410 pci_restore_state(dev);
3411 err = pci_enable_device(dev);
3412 if (err) {
3413 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3414 return err;
3415 }
3416
3417 /* See ohci1394_pci_probe() for comments on this sequence */
3418 ohci_soft_reset(ohci);
3419 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3420 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3421 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3422 reg_read(ohci, OHCI1394_Version);
3423 msleep(50);
3424
3425 err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3426 OHCI1394_DRIVER_NAME, ohci);
3427 if (err) {
3428 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3429 return err;
3430 }
3431
3432 ohci_initialize(ohci);
3433
3434 hpsb_resume_host(ohci->host);
3435 return 0;
3436}
3437#endif /* CONFIG_PM */
3438
3439static struct pci_device_id ohci1394_pci_tbl[] = {
3440 {
3441 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3442 .class_mask = PCI_ANY_ID,
3443 .vendor = PCI_ANY_ID,
3444 .device = PCI_ANY_ID,
3445 .subvendor = PCI_ANY_ID,
3446 .subdevice = PCI_ANY_ID,
3447 },
3448 { 0, },
3449};
3450
3451MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3452
3453static struct pci_driver ohci1394_pci_driver = {
3454 .name = OHCI1394_DRIVER_NAME,
3455 .id_table = ohci1394_pci_tbl,
3456 .probe = ohci1394_pci_probe,
3457 .remove = ohci1394_pci_remove,
3458#ifdef CONFIG_PM
3459 .resume = ohci1394_pci_resume,
3460 .suspend = ohci1394_pci_suspend,
3461#endif
3462};
3463
3464/***********************************
3465 * OHCI1394 Video Interface *
3466 ***********************************/
3467
3468/* essentially the only purpose of this code is to allow another
3469 module to hook into ohci's interrupt handler */
3470
3471/* returns zero if successful, one if DMA context is locked up */
3472int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3473{
3474 int i=0;
3475
3476 /* stop the channel program if it's still running */
3477 reg_write(ohci, reg, 0x8000);
3478
3479 /* Wait until it effectively stops */
3480 while (reg_read(ohci, reg) & 0x400) {
3481 i++;
3482 if (i>5000) {
3483 PRINT(KERN_ERR,
3484 "Runaway loop while stopping context: %s...", msg ? msg : "");
3485 return 1;
3486 }
3487
3488 mb();
3489 udelay(10);
3490 }
3491 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3492 return 0;
3493}
3494
3495void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3496 void (*func)(unsigned long), unsigned long data)
3497{
3498 tasklet_init(&tasklet->tasklet, func, data);
3499 tasklet->type = type;
3500 /* We init the tasklet->link field, so we can list_del() it
3501 * without worrying whether it was added to the list or not. */
3502 INIT_LIST_HEAD(&tasklet->link);
3503}
3504
3505int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3506 struct ohci1394_iso_tasklet *tasklet)
3507{
3508 unsigned long flags, *usage;
3509 int n, i, r = -EBUSY;
3510
3511 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3512 n = ohci->nb_iso_xmit_ctx;
3513 usage = &ohci->it_ctx_usage;
3514 }
3515 else {
3516 n = ohci->nb_iso_rcv_ctx;
3517 usage = &ohci->ir_ctx_usage;
3518
3519 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3520 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3521 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3522 return r;
3523 }
3524 }
3525 }
3526
3527 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3528
3529 for (i = 0; i < n; i++)
3530 if (!test_and_set_bit(i, usage)) {
3531 tasklet->context = i;
3532 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3533 r = 0;
3534 break;
3535 }
3536
3537 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3538
3539 return r;
3540}
3541
3542void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3543 struct ohci1394_iso_tasklet *tasklet)
3544{
3545 unsigned long flags;
3546
3547 tasklet_kill(&tasklet->tasklet);
3548
3549 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3550
3551 if (tasklet->type == OHCI_ISO_TRANSMIT)
3552 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3553 else {
3554 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3555
3556 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3557 clear_bit(0, &ohci->ir_multichannel_used);
3558 }
3559 }
3560
3561 list_del(&tasklet->link);
3562
3563 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3564}
3565
3566EXPORT_SYMBOL(ohci1394_stop_context);
3567EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3568EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3569EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3570
3571/***********************************
3572 * General module initialization *
3573 ***********************************/
3574
3575MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3576MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3577MODULE_LICENSE("GPL");
3578
3579static void __exit ohci1394_cleanup (void)
3580{
3581 pci_unregister_driver(&ohci1394_pci_driver);
3582}
3583
3584static int __init ohci1394_init(void)
3585{
3586 return pci_register_driver(&ohci1394_pci_driver);
3587}
3588
3589module_init(ohci1394_init);
3590module_exit(ohci1394_cleanup);
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
deleted file mode 100644
index 7fb8ab9780ae..000000000000
--- a/drivers/ieee1394/ohci1394.h
+++ /dev/null
@@ -1,453 +0,0 @@
1/*
2 * ohci1394.h - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _OHCI1394_H
22#define _OHCI1394_H
23
24#include "ieee1394_types.h"
25#include <asm/io.h>
26
27#define OHCI1394_DRIVER_NAME "ohci1394"
28
29#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
30#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
31#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
32#define OHCI1394_MAX_SELF_ID_ERRORS 16
33
34#define AR_REQ_NUM_DESC 4 /* number of AR req descriptors */
35#define AR_REQ_BUF_SIZE PAGE_SIZE /* size of AR req buffers */
36#define AR_REQ_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
37
38#define AR_RESP_NUM_DESC 4 /* number of AR resp descriptors */
39#define AR_RESP_BUF_SIZE PAGE_SIZE /* size of AR resp buffers */
40#define AR_RESP_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
41
42#define IR_NUM_DESC 16 /* number of IR descriptors */
43#define IR_BUF_SIZE PAGE_SIZE /* 4096 bytes/buffer */
44#define IR_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
45
46#define IT_NUM_DESC 16 /* number of IT descriptors */
47
48#define AT_REQ_NUM_DESC 32 /* number of AT req descriptors */
49#define AT_RESP_NUM_DESC 32 /* number of AT resp descriptors */
50
51#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
52
53#define OHCI_CONFIG_ROM_LEN 1024 /* Length of the mapped configrom space */
54
55#define OHCI1394_SI_DMA_BUF_SIZE 8192 /* length of the selfid buffer */
56
57/* PCI configuration space addresses */
58#define OHCI1394_PCI_HCI_Control 0x40
59
60struct dma_cmd {
61 u32 control;
62 u32 address;
63 u32 branchAddress;
64 u32 status;
65};
66
67/*
68 * FIXME:
69 * It is important that a single at_dma_prg does not cross a page boundary
70 * The proper way to do it would be to do the check dynamically as the
71 * programs are inserted into the AT fifo.
72 */
73struct at_dma_prg {
74 struct dma_cmd begin;
75 quadlet_t data[4];
76 struct dma_cmd end;
77 quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
78};
79
80/* identify whether a DMA context is asynchronous or isochronous */
81enum context_type { DMA_CTX_ASYNC_REQ, DMA_CTX_ASYNC_RESP, DMA_CTX_ISO };
82
83/* DMA receive context */
84struct dma_rcv_ctx {
85 struct ti_ohci *ohci;
86 enum context_type type;
87 int ctx;
88 unsigned int num_desc;
89
90 unsigned int buf_size;
91 unsigned int split_buf_size;
92
93 /* dma block descriptors */
94 struct dma_cmd **prg_cpu;
95 dma_addr_t *prg_bus;
96 struct pci_pool *prg_pool;
97
98 /* dma buffers */
99 quadlet_t **buf_cpu;
100 dma_addr_t *buf_bus;
101
102 unsigned int buf_ind;
103 unsigned int buf_offset;
104 quadlet_t *spb;
105 spinlock_t lock;
106 struct tasklet_struct task;
107 int ctrlClear;
108 int ctrlSet;
109 int cmdPtr;
110 int ctxtMatch;
111};
112
113/* DMA transmit context */
114struct dma_trm_ctx {
115 struct ti_ohci *ohci;
116 enum context_type type;
117 int ctx;
118 unsigned int num_desc;
119
120 /* dma block descriptors */
121 struct at_dma_prg **prg_cpu;
122 dma_addr_t *prg_bus;
123 struct pci_pool *prg_pool;
124
125 unsigned int prg_ind;
126 unsigned int sent_ind;
127 int free_prgs;
128 quadlet_t *branchAddrPtr;
129
130 /* list of packets inserted in the AT FIFO */
131 struct list_head fifo_list;
132
133 /* list of pending packets to be inserted in the AT FIFO */
134 struct list_head pending_list;
135
136 spinlock_t lock;
137 struct tasklet_struct task;
138 int ctrlClear;
139 int ctrlSet;
140 int cmdPtr;
141};
142
143struct ohci1394_iso_tasklet {
144 struct tasklet_struct tasklet;
145 struct list_head link;
146 int context;
147 enum { OHCI_ISO_TRANSMIT, OHCI_ISO_RECEIVE,
148 OHCI_ISO_MULTICHANNEL_RECEIVE } type;
149};
150
151struct ti_ohci {
152 struct pci_dev *dev;
153
154 enum {
155 OHCI_INIT_ALLOC_HOST,
156 OHCI_INIT_HAVE_MEM_REGION,
157 OHCI_INIT_HAVE_IOMAPPING,
158 OHCI_INIT_HAVE_CONFIG_ROM_BUFFER,
159 OHCI_INIT_HAVE_SELFID_BUFFER,
160 OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE,
161 OHCI_INIT_HAVE_IRQ,
162 OHCI_INIT_DONE,
163 } init_state;
164
165 /* remapped memory spaces */
166 void __iomem *registers;
167
168 /* dma buffer for self-id packets */
169 quadlet_t *selfid_buf_cpu;
170 dma_addr_t selfid_buf_bus;
171
172 /* buffer for csr config rom */
173 quadlet_t *csr_config_rom_cpu;
174 dma_addr_t csr_config_rom_bus;
175 int csr_config_rom_length;
176
177 unsigned int max_packet_size;
178
179 /* async receive */
180 struct dma_rcv_ctx ar_resp_context;
181 struct dma_rcv_ctx ar_req_context;
182
183 /* async transmit */
184 struct dma_trm_ctx at_resp_context;
185 struct dma_trm_ctx at_req_context;
186
187 /* iso receive */
188 int nb_iso_rcv_ctx;
189 unsigned long ir_ctx_usage; /* use test_and_set_bit() for atomicity */
190 unsigned long ir_multichannel_used; /* ditto */
191 spinlock_t IR_channel_lock;
192
193 /* iso transmit */
194 int nb_iso_xmit_ctx;
195 unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
196
197 u64 ISO_channel_usage;
198
199 /* IEEE-1394 part follows */
200 struct hpsb_host *host;
201
202 int phyid, isroot;
203
204 spinlock_t phy_reg_lock;
205 spinlock_t event_lock;
206
207 int self_id_errors;
208
209 /* Tasklets for iso receive and transmit, used by video1394
210 * and dv1394 */
211 struct list_head iso_tasklet_list;
212 spinlock_t iso_tasklet_list_lock;
213
214 /* Swap the selfid buffer? */
215 unsigned int selfid_swap:1;
216 /* Some Apple chipset seem to swap incoming headers for us */
217 unsigned int no_swap_incoming:1;
218
219 /* Force extra paranoia checking on bus-reset handling */
220 unsigned int check_busreset:1;
221};
222
223static inline int cross_bound(unsigned long addr, unsigned int size)
224{
225 if (size == 0)
226 return 0;
227
228 if (size > PAGE_SIZE)
229 return 1;
230
231 if (addr >> PAGE_SHIFT != (addr + size - 1) >> PAGE_SHIFT)
232 return 1;
233
234 return 0;
235}
236
237/*
238 * Register read and write helper functions.
239 */
240static inline void reg_write(const struct ti_ohci *ohci, int offset, u32 data)
241{
242 writel(data, ohci->registers + offset);
243}
244
245static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
246{
247 return readl(ohci->registers + offset);
248}
249
250
251/* 2 KiloBytes of register space */
252#define OHCI1394_REGISTER_SIZE 0x800
253
254/* Offsets relative to context bases defined below */
255
256#define OHCI1394_ContextControlSet 0x000
257#define OHCI1394_ContextControlClear 0x004
258#define OHCI1394_ContextCommandPtr 0x00C
259
260/* register map */
261#define OHCI1394_Version 0x000
262#define OHCI1394_GUID_ROM 0x004
263#define OHCI1394_ATRetries 0x008
264#define OHCI1394_CSRData 0x00C
265#define OHCI1394_CSRCompareData 0x010
266#define OHCI1394_CSRControl 0x014
267#define OHCI1394_ConfigROMhdr 0x018
268#define OHCI1394_BusID 0x01C
269#define OHCI1394_BusOptions 0x020
270#define OHCI1394_GUIDHi 0x024
271#define OHCI1394_GUIDLo 0x028
272#define OHCI1394_ConfigROMmap 0x034
273#define OHCI1394_PostedWriteAddressLo 0x038
274#define OHCI1394_PostedWriteAddressHi 0x03C
275#define OHCI1394_VendorID 0x040
276#define OHCI1394_HCControlSet 0x050
277#define OHCI1394_HCControlClear 0x054
278#define OHCI1394_HCControl_noByteSwap 0x40000000
279#define OHCI1394_HCControl_programPhyEnable 0x00800000
280#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
281#define OHCI1394_HCControl_LPS 0x00080000
282#define OHCI1394_HCControl_postedWriteEnable 0x00040000
283#define OHCI1394_HCControl_linkEnable 0x00020000
284#define OHCI1394_HCControl_softReset 0x00010000
285#define OHCI1394_SelfIDBuffer 0x064
286#define OHCI1394_SelfIDCount 0x068
287#define OHCI1394_IRMultiChanMaskHiSet 0x070
288#define OHCI1394_IRMultiChanMaskHiClear 0x074
289#define OHCI1394_IRMultiChanMaskLoSet 0x078
290#define OHCI1394_IRMultiChanMaskLoClear 0x07C
291#define OHCI1394_IntEventSet 0x080
292#define OHCI1394_IntEventClear 0x084
293#define OHCI1394_IntMaskSet 0x088
294#define OHCI1394_IntMaskClear 0x08C
295#define OHCI1394_IsoXmitIntEventSet 0x090
296#define OHCI1394_IsoXmitIntEventClear 0x094
297#define OHCI1394_IsoXmitIntMaskSet 0x098
298#define OHCI1394_IsoXmitIntMaskClear 0x09C
299#define OHCI1394_IsoRecvIntEventSet 0x0A0
300#define OHCI1394_IsoRecvIntEventClear 0x0A4
301#define OHCI1394_IsoRecvIntMaskSet 0x0A8
302#define OHCI1394_IsoRecvIntMaskClear 0x0AC
303#define OHCI1394_InitialBandwidthAvailable 0x0B0
304#define OHCI1394_InitialChannelsAvailableHi 0x0B4
305#define OHCI1394_InitialChannelsAvailableLo 0x0B8
306#define OHCI1394_FairnessControl 0x0DC
307#define OHCI1394_LinkControlSet 0x0E0
308#define OHCI1394_LinkControlClear 0x0E4
309#define OHCI1394_LinkControl_RcvSelfID 0x00000200
310#define OHCI1394_LinkControl_RcvPhyPkt 0x00000400
311#define OHCI1394_LinkControl_CycleTimerEnable 0x00100000
312#define OHCI1394_LinkControl_CycleMaster 0x00200000
313#define OHCI1394_LinkControl_CycleSource 0x00400000
314#define OHCI1394_NodeID 0x0E8
315#define OHCI1394_PhyControl 0x0EC
316#define OHCI1394_IsochronousCycleTimer 0x0F0
317#define OHCI1394_AsReqFilterHiSet 0x100
318#define OHCI1394_AsReqFilterHiClear 0x104
319#define OHCI1394_AsReqFilterLoSet 0x108
320#define OHCI1394_AsReqFilterLoClear 0x10C
321#define OHCI1394_PhyReqFilterHiSet 0x110
322#define OHCI1394_PhyReqFilterHiClear 0x114
323#define OHCI1394_PhyReqFilterLoSet 0x118
324#define OHCI1394_PhyReqFilterLoClear 0x11C
325#define OHCI1394_PhyUpperBound 0x120
326
327#define OHCI1394_AsReqTrContextBase 0x180
328#define OHCI1394_AsReqTrContextControlSet 0x180
329#define OHCI1394_AsReqTrContextControlClear 0x184
330#define OHCI1394_AsReqTrCommandPtr 0x18C
331
332#define OHCI1394_AsRspTrContextBase 0x1A0
333#define OHCI1394_AsRspTrContextControlSet 0x1A0
334#define OHCI1394_AsRspTrContextControlClear 0x1A4
335#define OHCI1394_AsRspTrCommandPtr 0x1AC
336
337#define OHCI1394_AsReqRcvContextBase 0x1C0
338#define OHCI1394_AsReqRcvContextControlSet 0x1C0
339#define OHCI1394_AsReqRcvContextControlClear 0x1C4
340#define OHCI1394_AsReqRcvCommandPtr 0x1CC
341
342#define OHCI1394_AsRspRcvContextBase 0x1E0
343#define OHCI1394_AsRspRcvContextControlSet 0x1E0
344#define OHCI1394_AsRspRcvContextControlClear 0x1E4
345#define OHCI1394_AsRspRcvCommandPtr 0x1EC
346
347/* Isochronous transmit registers */
348/* Add (16 * n) for context n */
349#define OHCI1394_IsoXmitContextBase 0x200
350#define OHCI1394_IsoXmitContextControlSet 0x200
351#define OHCI1394_IsoXmitContextControlClear 0x204
352#define OHCI1394_IsoXmitCommandPtr 0x20C
353
354/* Isochronous receive registers */
355/* Add (32 * n) for context n */
356#define OHCI1394_IsoRcvContextBase 0x400
357#define OHCI1394_IsoRcvContextControlSet 0x400
358#define OHCI1394_IsoRcvContextControlClear 0x404
359#define OHCI1394_IsoRcvCommandPtr 0x40C
360#define OHCI1394_IsoRcvContextMatch 0x410
361
362/* Interrupts Mask/Events */
363
364#define OHCI1394_reqTxComplete 0x00000001
365#define OHCI1394_respTxComplete 0x00000002
366#define OHCI1394_ARRQ 0x00000004
367#define OHCI1394_ARRS 0x00000008
368#define OHCI1394_RQPkt 0x00000010
369#define OHCI1394_RSPkt 0x00000020
370#define OHCI1394_isochTx 0x00000040
371#define OHCI1394_isochRx 0x00000080
372#define OHCI1394_postedWriteErr 0x00000100
373#define OHCI1394_lockRespErr 0x00000200
374#define OHCI1394_selfIDComplete 0x00010000
375#define OHCI1394_busReset 0x00020000
376#define OHCI1394_phy 0x00080000
377#define OHCI1394_cycleSynch 0x00100000
378#define OHCI1394_cycle64Seconds 0x00200000
379#define OHCI1394_cycleLost 0x00400000
380#define OHCI1394_cycleInconsistent 0x00800000
381#define OHCI1394_unrecoverableError 0x01000000
382#define OHCI1394_cycleTooLong 0x02000000
383#define OHCI1394_phyRegRcvd 0x04000000
384#define OHCI1394_masterIntEnable 0x80000000
385
386/* DMA Control flags */
387#define DMA_CTL_OUTPUT_MORE 0x00000000
388#define DMA_CTL_OUTPUT_LAST 0x10000000
389#define DMA_CTL_INPUT_MORE 0x20000000
390#define DMA_CTL_INPUT_LAST 0x30000000
391#define DMA_CTL_UPDATE 0x08000000
392#define DMA_CTL_IMMEDIATE 0x02000000
393#define DMA_CTL_IRQ 0x00300000
394#define DMA_CTL_BRANCH 0x000c0000
395#define DMA_CTL_WAIT 0x00030000
396
397/* OHCI evt_* error types, table 3-2 of the OHCI 1.1 spec. */
398#define EVT_NO_STATUS 0x0 /* No event status */
399#define EVT_RESERVED_A 0x1 /* Reserved, not used !!! */
400#define EVT_LONG_PACKET 0x2 /* The revc data was longer than the buf */
401#define EVT_MISSING_ACK 0x3 /* A subaction gap was detected before an ack
402 arrived, or recv'd ack had a parity error */
403#define EVT_UNDERRUN 0x4 /* Underrun on corresponding FIFO, packet
404 truncated */
405#define EVT_OVERRUN 0x5 /* A recv FIFO overflowed on reception of ISO
406 packet */
407#define EVT_DESCRIPTOR_READ 0x6 /* An unrecoverable error occurred while host was
408 reading a descriptor block */
409#define EVT_DATA_READ 0x7 /* An error occurred while host controller was
410 attempting to read from host memory in the data
411 stage of descriptor processing */
412#define EVT_DATA_WRITE 0x8 /* An error occurred while host controller was
413 attempting to write either during the data stage
414 of descriptor processing, or when processing a single
415 16-bit host memory write */
416#define EVT_BUS_RESET 0x9 /* Identifies a PHY packet in the recv buffer as
417 being a synthesized bus reset packet */
418#define EVT_TIMEOUT 0xa /* Indicates that the asynchronous transmit response
419 packet expired and was not transmitted, or that an
420 IT DMA context experienced a skip processing overflow */
421#define EVT_TCODE_ERR 0xb /* A bad tCode is associated with this packet.
422 The packet was flushed */
423#define EVT_RESERVED_B 0xc /* Reserved, not used !!! */
424#define EVT_RESERVED_C 0xd /* Reserved, not used !!! */
425#define EVT_UNKNOWN 0xe /* An error condition has occurred that cannot be
426 represented by any other event codes defined herein. */
427#define EVT_FLUSHED 0xf /* Send by the link side of output FIFO when asynchronous
428 packets are being flushed due to a bus reset. */
429
430#define OHCI1394_TCODE_PHY 0xE
431
432/* Node offset map (phys DMA area, posted write area).
433 * The value of OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED may be modified but must
434 * be lower than OHCI1394_MIDDLE_ADDRESS_SPACE.
435 * OHCI1394_PHYS_UPPER_BOUND_FIXED and OHCI1394_MIDDLE_ADDRESS_SPACE are
436 * constants given by the OHCI spec.
437 */
438#define OHCI1394_PHYS_UPPER_BOUND_FIXED 0x000100000000ULL /* 4 GB */
439#define OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED 0x010000000000ULL /* 1 TB */
440#define OHCI1394_MIDDLE_ADDRESS_SPACE 0xffff00000000ULL
441
442void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
443 int type,
444 void (*func)(unsigned long),
445 unsigned long data);
446int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
447 struct ohci1394_iso_tasklet *tasklet);
448void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
449 struct ohci1394_iso_tasklet *tasklet);
450int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
451struct ti_ohci *ohci1394_get_struct(int card_num);
452
453#endif
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
deleted file mode 100644
index bf47fee79808..000000000000
--- a/drivers/ieee1394/pcilynx.c
+++ /dev/null
@@ -1,1554 +0,0 @@
1/*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Contributions:
24 *
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
31 */
32
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/interrupt.h>
36#include <linux/wait.h>
37#include <linux/errno.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/init.h>
41#include <linux/pci.h>
42#include <linux/fs.h>
43#include <linux/poll.h>
44#include <linux/kdev_t.h>
45#include <linux/dma-mapping.h>
46#include <asm/byteorder.h>
47#include <asm/atomic.h>
48#include <asm/io.h>
49#include <asm/uaccess.h>
50#include <asm/irq.h>
51
52#include "csr1212.h"
53#include "ieee1394.h"
54#include "ieee1394_types.h"
55#include "hosts.h"
56#include "ieee1394_core.h"
57#include "highlevel.h"
58#include "pcilynx.h"
59
60#include <linux/i2c.h>
61#include <linux/i2c-algo-bit.h>
62
63/* print general (card independent) information */
64#define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
65/* print card specific information */
66#define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
67
68#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
69#define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
70#define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
71#else
72#define PRINT_GD(level, fmt, args...) do {} while (0)
73#define PRINTD(level, card, fmt, args...) do {} while (0)
74#endif
75
76
77/* Module Parameters */
78static int skip_eeprom;
79module_param(skip_eeprom, int, 0444);
80MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
81
82
83static struct hpsb_host_driver lynx_driver;
84static unsigned int card_id;
85
86
87
88/*
89 * I2C stuff
90 */
91
92/* the i2c stuff was inspired by i2c-philips-par.c */
93
94static void bit_setscl(void *data, int state)
95{
96 if (state) {
97 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
98 } else {
99 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
100 }
101 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
102}
103
104static void bit_setsda(void *data, int state)
105{
106 if (state) {
107 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
108 } else {
109 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
110 }
111 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
112}
113
114static int bit_getscl(void *data)
115{
116 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
117}
118
119static int bit_getsda(void *data)
120{
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
122}
123
124static struct i2c_algo_bit_data bit_data = {
125 .setsda = bit_setsda,
126 .setscl = bit_setscl,
127 .getsda = bit_getsda,
128 .getscl = bit_getscl,
129 .udelay = 5,
130 .timeout = 100,
131};
132
133
134/*
135 * PCL handling functions.
136 */
137
138static pcl_t alloc_pcl(struct ti_lynx *lynx)
139{
140 u8 m;
141 int i, j;
142
143 spin_lock(&lynx->lock);
144 /* FIXME - use ffz() to make this readable */
145 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
146 m = lynx->pcl_bmap[i];
147 for (j = 0; j < 8; j++) {
148 if (m & 1<<j) {
149 continue;
150 }
151 m |= 1<<j;
152 lynx->pcl_bmap[i] = m;
153 spin_unlock(&lynx->lock);
154 return 8 * i + j;
155 }
156 }
157 spin_unlock(&lynx->lock);
158
159 return -1;
160}
161
162
163#if 0
164static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
165{
166 int off, bit;
167
168 off = pclid / 8;
169 bit = pclid % 8;
170
171 if (pclid < 0) {
172 return;
173 }
174
175 spin_lock(&lynx->lock);
176 if (lynx->pcl_bmap[off] & 1<<bit) {
177 lynx->pcl_bmap[off] &= ~(1<<bit);
178 } else {
179 PRINT(KERN_ERR, lynx->id,
180 "attempted to free unallocated PCL %d", pclid);
181 }
182 spin_unlock(&lynx->lock);
183}
184
185/* functions useful for debugging */
186static void pretty_print_pcl(const struct ti_pcl *pcl)
187{
188 int i;
189
190 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
191 pcl->next, pcl->user_data, pcl->pcl_status,
192 pcl->remaining_transfer_count, pcl->next_data_buffer);
193
194 printk("PCL");
195 for (i=0; i<13; i++) {
196 printk(" c%x:%08x d%x:%08x",
197 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
198 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
199 }
200 printk("\n");
201}
202
203static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
204{
205 struct ti_pcl pcl;
206
207 get_pcl(lynx, pclid, &pcl);
208 pretty_print_pcl(&pcl);
209}
210#endif
211
212
213
214/***********************************
215 * IEEE-1394 functionality section *
216 ***********************************/
217
218
219static int get_phy_reg(struct ti_lynx *lynx, int addr)
220{
221 int retval;
222 int i = 0;
223
224 unsigned long flags;
225
226 if (addr > 15) {
227 PRINT(KERN_ERR, lynx->id,
228 "%s: PHY register address %d out of range",
229 __func__, addr);
230 return -1;
231 }
232
233 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
234
235 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
236 do {
237 retval = reg_read(lynx, LINK_PHY);
238
239 if (i > 10000) {
240 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
241 __func__);
242 retval = -1;
243 break;
244 }
245 i++;
246 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
247
248 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
249 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
250
251 if (retval != -1) {
252 return retval & 0xff;
253 } else {
254 return -1;
255 }
256}
257
258static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
259{
260 unsigned long flags;
261
262 if (addr > 15) {
263 PRINT(KERN_ERR, lynx->id,
264 "%s: PHY register address %d out of range", __func__, addr);
265 return -1;
266 }
267
268 if (val > 0xff) {
269 PRINT(KERN_ERR, lynx->id,
270 "%s: PHY register value %d out of range", __func__, val);
271 return -1;
272 }
273
274 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
275
276 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
277 | LINK_PHY_WDATA(val));
278
279 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
280
281 return 0;
282}
283
284static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
285{
286 int reg;
287
288 if (page > 7) {
289 PRINT(KERN_ERR, lynx->id,
290 "%s: PHY page %d out of range", __func__, page);
291 return -1;
292 }
293
294 reg = get_phy_reg(lynx, 7);
295 if (reg != -1) {
296 reg &= 0x1f;
297 reg |= (page << 5);
298 set_phy_reg(lynx, 7, reg);
299 return 0;
300 } else {
301 return -1;
302 }
303}
304
305#if 0 /* not needed at this time */
306static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
307{
308 int reg;
309
310 if (port > 15) {
311 PRINT(KERN_ERR, lynx->id,
312 "%s: PHY port %d out of range", __func__, port);
313 return -1;
314 }
315
316 reg = get_phy_reg(lynx, 7);
317 if (reg != -1) {
318 reg &= 0xf0;
319 reg |= port;
320 set_phy_reg(lynx, 7, reg);
321 return 0;
322 } else {
323 return -1;
324 }
325}
326#endif
327
328static u32 get_phy_vendorid(struct ti_lynx *lynx)
329{
330 u32 pvid = 0;
331 sel_phy_reg_page(lynx, 1);
332 pvid |= (get_phy_reg(lynx, 10) << 16);
333 pvid |= (get_phy_reg(lynx, 11) << 8);
334 pvid |= get_phy_reg(lynx, 12);
335 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
336 return pvid;
337}
338
339static u32 get_phy_productid(struct ti_lynx *lynx)
340{
341 u32 id = 0;
342 sel_phy_reg_page(lynx, 1);
343 id |= (get_phy_reg(lynx, 13) << 16);
344 id |= (get_phy_reg(lynx, 14) << 8);
345 id |= get_phy_reg(lynx, 15);
346 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
347 return id;
348}
349
350static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
351 struct hpsb_host *host)
352{
353 quadlet_t lsid;
354 char phyreg[7];
355 int i;
356
357 phyreg[0] = lynx->phy_reg0;
358 for (i = 1; i < 7; i++) {
359 phyreg[i] = get_phy_reg(lynx, i);
360 }
361
362 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
363 more than 3 ports on the PHY anyway. */
364
365 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
366 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
367 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
368 if (!hpsb_disable_irm)
369 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
370 /* lsid |= 1 << 11; *//* set contender (hack) */
371 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
372
373 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
374 if (phyreg[3 + i] & 0x4) {
375 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
376 << (6 - i*2);
377 } else {
378 lsid |= 1 << (6 - i*2);
379 }
380 }
381
382 cpu_to_be32s(&lsid);
383 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
384 return lsid;
385}
386
387static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
388{
389 quadlet_t *q = lynx->rcv_page;
390 int phyid, isroot, size;
391 quadlet_t lsid = 0;
392 int i;
393
394 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
395
396 size = lynx->selfid_size;
397 phyid = lynx->phy_reg0;
398
399 i = (size > 16 ? 16 : size) / 4 - 1;
400 while (i >= 0) {
401 cpu_to_be32s(&q[i]);
402 i--;
403 }
404
405 if (!lynx->phyic.reg_1394a) {
406 lsid = generate_own_selfid(lynx, host);
407 }
408
409 isroot = (phyid & 2) != 0;
410 phyid >>= 2;
411 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
412 phyid, (isroot ? "root" : "not root"));
413 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
414
415 if (!lynx->phyic.reg_1394a && !size) {
416 hpsb_selfid_received(host, lsid);
417 }
418
419 while (size > 0) {
420 struct selfid *sid = (struct selfid *)q;
421
422 if (!lynx->phyic.reg_1394a && !sid->extended
423 && (sid->phy_id == (phyid + 1))) {
424 hpsb_selfid_received(host, lsid);
425 }
426
427 if (q[0] == ~q[1]) {
428 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
429 q[0]);
430 hpsb_selfid_received(host, q[0]);
431 } else {
432 PRINT(KERN_INFO, lynx->id,
433 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
434 }
435 q += 2;
436 size -= 8;
437 }
438
439 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
440 hpsb_selfid_received(host, lsid);
441 }
442
443 hpsb_selfid_complete(host, phyid, isroot);
444
445 if (host->in_bus_reset) return; /* in bus reset again */
446
447 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
448 reg_set_bits(lynx, LINK_CONTROL,
449 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
450 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
451}
452
453
454
455/* This must be called with the respective queue_lock held. */
456static void send_next(struct ti_lynx *lynx, int what)
457{
458 struct ti_pcl pcl;
459 struct lynx_send_data *d;
460 struct hpsb_packet *packet;
461
462#if 0 /* has been removed from ieee1394 core */
463 d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
464#else
465 d = &lynx->async;
466#endif
467 if (!list_empty(&d->pcl_queue)) {
468 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
469 BUG();
470 }
471
472 packet = driver_packet(d->queue.next);
473 list_move_tail(&packet->driver_list, &d->pcl_queue);
474
475 d->header_dma = pci_map_single(lynx->dev, packet->header,
476 packet->header_size, PCI_DMA_TODEVICE);
477 if (packet->data_size) {
478 d->data_dma = pci_map_single(lynx->dev, packet->data,
479 packet->data_size,
480 PCI_DMA_TODEVICE);
481 } else {
482 d->data_dma = 0;
483 }
484
485 pcl.next = PCL_NEXT_INVALID;
486 pcl.async_error_next = PCL_NEXT_INVALID;
487 pcl.pcl_status = 0;
488 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
489#ifndef __BIG_ENDIAN
490 pcl.buffer[0].control |= PCL_BIGENDIAN;
491#endif
492 pcl.buffer[0].pointer = d->header_dma;
493 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
494 pcl.buffer[1].pointer = d->data_dma;
495
496 switch (packet->type) {
497 case hpsb_async:
498 pcl.buffer[0].control |= PCL_CMD_XMT;
499 break;
500#if 0 /* has been removed from ieee1394 core */
501 case hpsb_iso:
502 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
503 break;
504#endif
505 case hpsb_raw:
506 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
507 break;
508 }
509
510 put_pcl(lynx, d->pcl, &pcl);
511 run_pcl(lynx, d->pcl_start, d->channel);
512}
513
514
515/* called from subsystem core */
516static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
517{
518 struct ti_lynx *lynx = host->hostdata;
519 struct lynx_send_data *d;
520 unsigned long flags;
521
522 if (packet->data_size >= 4096) {
523 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
524 packet->data_size);
525 return -EOVERFLOW;
526 }
527
528 switch (packet->type) {
529 case hpsb_async:
530 case hpsb_raw:
531 d = &lynx->async;
532 break;
533#if 0 /* has been removed from ieee1394 core */
534 case hpsb_iso:
535 d = &lynx->iso_send;
536 break;
537#endif
538 default:
539 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
540 packet->type);
541 return -EINVAL;
542 }
543
544 if (packet->tcode == TCODE_WRITEQ
545 || packet->tcode == TCODE_READQ_RESPONSE) {
546 cpu_to_be32s(&packet->header[3]);
547 }
548
549 spin_lock_irqsave(&d->queue_lock, flags);
550
551 list_add_tail(&packet->driver_list, &d->queue);
552 if (list_empty(&d->pcl_queue))
553 send_next(lynx, packet->type);
554
555 spin_unlock_irqrestore(&d->queue_lock, flags);
556
557 return 0;
558}
559
560
561/* called from subsystem core */
562static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
563{
564 struct ti_lynx *lynx = host->hostdata;
565 int retval = 0;
566 struct hpsb_packet *packet;
567 LIST_HEAD(packet_list);
568 unsigned long flags;
569 int phy_reg;
570
571 switch (cmd) {
572 case RESET_BUS:
573 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
574 retval = 0;
575 break;
576 }
577
578 switch (arg) {
579 case SHORT_RESET:
580 if (lynx->phyic.reg_1394a) {
581 phy_reg = get_phy_reg(lynx, 5);
582 if (phy_reg == -1) {
583 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
584 retval = -1;
585 break;
586 }
587 phy_reg |= 0x40;
588
589 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
590
591 lynx->selfid_size = -1;
592 lynx->phy_reg0 = -1;
593 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
594 break;
595 } else {
596 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
597 /* fall through to long bus reset */
598 }
599 case LONG_RESET:
600 phy_reg = get_phy_reg(lynx, 1);
601 if (phy_reg == -1) {
602 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
603 retval = -1;
604 break;
605 }
606 phy_reg |= 0x40;
607
608 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
609
610 lynx->selfid_size = -1;
611 lynx->phy_reg0 = -1;
612 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
613 break;
614 case SHORT_RESET_NO_FORCE_ROOT:
615 if (lynx->phyic.reg_1394a) {
616 phy_reg = get_phy_reg(lynx, 1);
617 if (phy_reg == -1) {
618 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
619 retval = -1;
620 break;
621 }
622 if (phy_reg & 0x80) {
623 phy_reg &= ~0x80;
624 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
625 }
626
627 phy_reg = get_phy_reg(lynx, 5);
628 if (phy_reg == -1) {
629 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
630 retval = -1;
631 break;
632 }
633 phy_reg |= 0x40;
634
635 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
636
637 lynx->selfid_size = -1;
638 lynx->phy_reg0 = -1;
639 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
640 break;
641 } else {
642 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
643 /* fall through to long bus reset */
644 }
645 case LONG_RESET_NO_FORCE_ROOT:
646 phy_reg = get_phy_reg(lynx, 1);
647 if (phy_reg == -1) {
648 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
649 retval = -1;
650 break;
651 }
652 phy_reg &= ~0x80;
653 phy_reg |= 0x40;
654
655 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
656
657 lynx->selfid_size = -1;
658 lynx->phy_reg0 = -1;
659 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
660 break;
661 case SHORT_RESET_FORCE_ROOT:
662 if (lynx->phyic.reg_1394a) {
663 phy_reg = get_phy_reg(lynx, 1);
664 if (phy_reg == -1) {
665 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
666 retval = -1;
667 break;
668 }
669 if (!(phy_reg & 0x80)) {
670 phy_reg |= 0x80;
671 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
672 }
673
674 phy_reg = get_phy_reg(lynx, 5);
675 if (phy_reg == -1) {
676 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
677 retval = -1;
678 break;
679 }
680 phy_reg |= 0x40;
681
682 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
683
684 lynx->selfid_size = -1;
685 lynx->phy_reg0 = -1;
686 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
687 break;
688 } else {
689 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
690 /* fall through to long bus reset */
691 }
692 case LONG_RESET_FORCE_ROOT:
693 phy_reg = get_phy_reg(lynx, 1);
694 if (phy_reg == -1) {
695 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
696 retval = -1;
697 break;
698 }
699 phy_reg |= 0xc0;
700
701 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
702
703 lynx->selfid_size = -1;
704 lynx->phy_reg0 = -1;
705 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
706 break;
707 default:
708 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
709 retval = -1;
710 }
711
712 break;
713
714 case GET_CYCLE_COUNTER:
715 retval = reg_read(lynx, CYCLE_TIMER);
716 break;
717
718 case SET_CYCLE_COUNTER:
719 reg_write(lynx, CYCLE_TIMER, arg);
720 break;
721
722 case SET_BUS_ID:
723 reg_write(lynx, LINK_ID,
724 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
725 break;
726
727 case ACT_CYCLE_MASTER:
728 if (arg) {
729 reg_set_bits(lynx, LINK_CONTROL,
730 LINK_CONTROL_CYCMASTER);
731 } else {
732 reg_clear_bits(lynx, LINK_CONTROL,
733 LINK_CONTROL_CYCMASTER);
734 }
735 break;
736
737 case CANCEL_REQUESTS:
738 spin_lock_irqsave(&lynx->async.queue_lock, flags);
739
740 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
741 list_splice_init(&lynx->async.queue, &packet_list);
742
743 if (list_empty(&lynx->async.pcl_queue)) {
744 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
745 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
746 } else {
747 struct ti_pcl pcl;
748 u32 ack;
749
750 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
751
752 get_pcl(lynx, lynx->async.pcl, &pcl);
753
754 packet = driver_packet(lynx->async.pcl_queue.next);
755 list_del_init(&packet->driver_list);
756
757 pci_unmap_single(lynx->dev, lynx->async.header_dma,
758 packet->header_size, PCI_DMA_TODEVICE);
759 if (packet->data_size) {
760 pci_unmap_single(lynx->dev, lynx->async.data_dma,
761 packet->data_size, PCI_DMA_TODEVICE);
762 }
763
764 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
765
766 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
767 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
768 ack = (pcl.pcl_status >> 15) & 0xf;
769 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
770 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
771 } else {
772 ack = (pcl.pcl_status >> 15) & 0xf;
773 }
774 } else {
775 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
776 ack = ACKX_ABORTED;
777 }
778 hpsb_packet_sent(host, packet, ack);
779 }
780
781 while (!list_empty(&packet_list)) {
782 packet = driver_packet(packet_list.next);
783 list_del_init(&packet->driver_list);
784 hpsb_packet_sent(host, packet, ACKX_ABORTED);
785 }
786
787 break;
788#if 0 /* has been removed from ieee1394 core */
789 case ISO_LISTEN_CHANNEL:
790 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
791
792 if (lynx->iso_rcv.chan_count++ == 0) {
793 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
794 DMA_WORD1_CMP_ENABLE_MASTER);
795 }
796
797 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
798 break;
799
800 case ISO_UNLISTEN_CHANNEL:
801 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
802
803 if (--lynx->iso_rcv.chan_count == 0) {
804 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
805 0);
806 }
807
808 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
809 break;
810#endif
811 default:
812 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
813 retval = -1;
814 }
815
816 return retval;
817}
818
819
820/***************************************
821 * IEEE-1394 functionality section END *
822 ***************************************/
823
824
825/********************************************************
826 * Global stuff (interrupt handler, init/shutdown code) *
827 ********************************************************/
828
829
830static irqreturn_t lynx_irq_handler(int irq, void *dev_id)
831{
832 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
833 struct hpsb_host *host = lynx->host;
834 u32 intmask;
835 u32 linkint;
836
837 linkint = reg_read(lynx, LINK_INT_STATUS);
838 intmask = reg_read(lynx, PCI_INT_STATUS);
839
840 if (!(intmask & PCI_INT_INT_PEND))
841 return IRQ_NONE;
842
843 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
844 linkint);
845
846 reg_write(lynx, LINK_INT_STATUS, linkint);
847 reg_write(lynx, PCI_INT_STATUS, intmask);
848
849 if (intmask & PCI_INT_1394) {
850 if (linkint & LINK_INT_PHY_TIMEOUT) {
851 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
852 }
853 if (linkint & LINK_INT_PHY_BUSRESET) {
854 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
855 lynx->selfid_size = -1;
856 lynx->phy_reg0 = -1;
857 if (!host->in_bus_reset)
858 hpsb_bus_reset(host);
859 }
860 if (linkint & LINK_INT_PHY_REG_RCVD) {
861 u32 reg;
862
863 spin_lock(&lynx->phy_reg_lock);
864 reg = reg_read(lynx, LINK_PHY);
865 spin_unlock(&lynx->phy_reg_lock);
866
867 if (!host->in_bus_reset) {
868 PRINT(KERN_INFO, lynx->id,
869 "phy reg received without reset");
870 } else if (reg & 0xf00) {
871 PRINT(KERN_INFO, lynx->id,
872 "unsolicited phy reg %d received",
873 (reg >> 8) & 0xf);
874 } else {
875 lynx->phy_reg0 = reg & 0xff;
876 handle_selfid(lynx, host);
877 }
878 }
879 if (linkint & LINK_INT_ISO_STUCK) {
880 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
881 }
882 if (linkint & LINK_INT_ASYNC_STUCK) {
883 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
884 }
885 if (linkint & LINK_INT_SENT_REJECT) {
886 PRINT(KERN_INFO, lynx->id, "sent reject");
887 }
888 if (linkint & LINK_INT_TX_INVALID_TC) {
889 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
890 }
891 if (linkint & LINK_INT_GRF_OVERFLOW) {
892 /* flush FIFO if overflow happens during reset */
893 if (host->in_bus_reset)
894 reg_write(lynx, FIFO_CONTROL,
895 FIFO_CONTROL_GRF_FLUSH);
896 PRINT(KERN_INFO, lynx->id, "GRF overflow");
897 }
898 if (linkint & LINK_INT_ITF_UNDERFLOW) {
899 PRINT(KERN_INFO, lynx->id, "ITF underflow");
900 }
901 if (linkint & LINK_INT_ATF_UNDERFLOW) {
902 PRINT(KERN_INFO, lynx->id, "ATF underflow");
903 }
904 }
905
906 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
907 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
908
909 spin_lock(&lynx->iso_rcv.lock);
910
911 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
912 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
913
914 lynx->iso_rcv.used++;
915 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
916
917 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
918 || !lynx->iso_rcv.chan_count) {
919 PRINTD(KERN_DEBUG, lynx->id, "stopped");
920 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
921 }
922
923 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
924 CHANNEL_ISO_RCV);
925
926 spin_unlock(&lynx->iso_rcv.lock);
927
928 tasklet_schedule(&lynx->iso_rcv.tq);
929 }
930
931 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
932 PRINTD(KERN_DEBUG, lynx->id, "async sent");
933 spin_lock(&lynx->async.queue_lock);
934
935 if (list_empty(&lynx->async.pcl_queue)) {
936 spin_unlock(&lynx->async.queue_lock);
937 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
938 } else {
939 struct ti_pcl pcl;
940 u32 ack;
941 struct hpsb_packet *packet;
942
943 get_pcl(lynx, lynx->async.pcl, &pcl);
944
945 packet = driver_packet(lynx->async.pcl_queue.next);
946 list_del_init(&packet->driver_list);
947
948 pci_unmap_single(lynx->dev, lynx->async.header_dma,
949 packet->header_size, PCI_DMA_TODEVICE);
950 if (packet->data_size) {
951 pci_unmap_single(lynx->dev, lynx->async.data_dma,
952 packet->data_size, PCI_DMA_TODEVICE);
953 }
954
955 if (!list_empty(&lynx->async.queue)) {
956 send_next(lynx, hpsb_async);
957 }
958
959 spin_unlock(&lynx->async.queue_lock);
960
961 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
962 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
963 ack = (pcl.pcl_status >> 15) & 0xf;
964 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
965 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
966 } else {
967 ack = (pcl.pcl_status >> 15) & 0xf;
968 }
969 } else {
970 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
971 ack = ACKX_SEND_ERROR;
972 }
973 hpsb_packet_sent(host, packet, ack);
974 }
975 }
976
977 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
978 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
979 spin_lock(&lynx->iso_send.queue_lock);
980
981 if (list_empty(&lynx->iso_send.pcl_queue)) {
982 spin_unlock(&lynx->iso_send.queue_lock);
983 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
984 } else {
985 struct ti_pcl pcl;
986 u32 ack;
987 struct hpsb_packet *packet;
988
989 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
990
991 packet = driver_packet(lynx->iso_send.pcl_queue.next);
992 list_del_init(&packet->driver_list);
993
994 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
995 packet->header_size, PCI_DMA_TODEVICE);
996 if (packet->data_size) {
997 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
998 packet->data_size, PCI_DMA_TODEVICE);
999 }
1000#if 0 /* has been removed from ieee1394 core */
1001 if (!list_empty(&lynx->iso_send.queue)) {
1002 send_next(lynx, hpsb_iso);
1003 }
1004#endif
1005 spin_unlock(&lynx->iso_send.queue_lock);
1006
1007 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1008 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1009 ack = (pcl.pcl_status >> 15) & 0xf;
1010 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1011 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1012 } else {
1013 ack = (pcl.pcl_status >> 15) & 0xf;
1014 }
1015 } else {
1016 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
1017 ack = ACKX_SEND_ERROR;
1018 }
1019
1020 hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
1021 }
1022 }
1023
1024 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1025 /* general receive DMA completed */
1026 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1027
1028 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1029 stat & 0x1fff);
1030
1031 if (stat & DMA_CHAN_STAT_SELFID) {
1032 lynx->selfid_size = stat & 0x1fff;
1033 handle_selfid(lynx, host);
1034 } else {
1035 quadlet_t *q_data = lynx->rcv_page;
1036 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1037 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1038 cpu_to_be32s(q_data + 3);
1039 }
1040 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1041 }
1042
1043 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1044 }
1045
1046 return IRQ_HANDLED;
1047}
1048
1049
1050static void iso_rcv_bh(struct ti_lynx *lynx)
1051{
1052 unsigned int idx;
1053 quadlet_t *data;
1054 unsigned long flags;
1055
1056 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1057
1058 while (lynx->iso_rcv.used) {
1059 idx = lynx->iso_rcv.last;
1060 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1061
1062 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1063 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1064
1065 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1066 PRINT(KERN_ERR, lynx->id,
1067 "iso length mismatch 0x%08x/0x%08x", *data,
1068 lynx->iso_rcv.stat[idx]);
1069 }
1070
1071 if (lynx->iso_rcv.stat[idx]
1072 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1073 PRINT(KERN_INFO, lynx->id,
1074 "iso receive error on %d to 0x%p", idx, data);
1075 } else {
1076 hpsb_packet_received(lynx->host, data,
1077 lynx->iso_rcv.stat[idx] & 0x1fff,
1078 0);
1079 }
1080
1081 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1082 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1083 lynx->iso_rcv.used--;
1084 }
1085
1086 if (lynx->iso_rcv.chan_count) {
1087 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1088 DMA_WORD1_CMP_ENABLE_MASTER);
1089 }
1090 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1091}
1092
1093
1094static void remove_card(struct pci_dev *dev)
1095{
1096 struct ti_lynx *lynx;
1097 struct device *lynx_dev;
1098 int i;
1099
1100 lynx = pci_get_drvdata(dev);
1101 if (!lynx) return;
1102 pci_set_drvdata(dev, NULL);
1103
1104 lynx_dev = get_device(&lynx->host->device);
1105
1106 switch (lynx->state) {
1107 case is_host:
1108 reg_write(lynx, PCI_INT_ENABLE, 0);
1109 hpsb_remove_host(lynx->host);
1110 case have_intr:
1111 reg_write(lynx, PCI_INT_ENABLE, 0);
1112 free_irq(lynx->dev->irq, lynx);
1113
1114 /* Disable IRM Contender and LCtrl */
1115 if (lynx->phyic.reg_1394a)
1116 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1117
1118 /* Let all other nodes know to ignore us */
1119 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1120
1121 case have_iomappings:
1122 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1123 /* Fix buggy cards with autoboot pin not tied low: */
1124 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1125 iounmap(lynx->registers);
1126 iounmap(lynx->local_rom);
1127 iounmap(lynx->local_ram);
1128 iounmap(lynx->aux_port);
1129 case have_1394_buffers:
1130 for (i = 0; i < ISORCV_PAGES; i++) {
1131 if (lynx->iso_rcv.page[i]) {
1132 pci_free_consistent(lynx->dev, PAGE_SIZE,
1133 lynx->iso_rcv.page[i],
1134 lynx->iso_rcv.page_dma[i]);
1135 }
1136 }
1137 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1138 lynx->rcv_page_dma);
1139 case have_aux_buf:
1140 case have_pcl_mem:
1141 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1142 lynx->pcl_mem_dma);
1143 case clear:
1144 /* do nothing - already freed */
1145 ;
1146 }
1147
1148 tasklet_kill(&lynx->iso_rcv.tq);
1149
1150 if (lynx_dev)
1151 put_device(lynx_dev);
1152}
1153
1154
1155static int __devinit add_card(struct pci_dev *dev,
1156 const struct pci_device_id *devid_is_unused)
1157{
1158#define FAIL(fmt, args...) do { \
1159 PRINT_G(KERN_ERR, fmt , ## args); \
1160 remove_card(dev); \
1161 return error; \
1162 } while (0)
1163
1164 char irq_buf[16];
1165 struct hpsb_host *host;
1166 struct ti_lynx *lynx; /* shortcut to currently handled device */
1167 struct ti_pcl pcl;
1168 u32 *pcli;
1169 int i;
1170 int error;
1171
1172 error = -ENXIO;
1173
1174 if (pci_set_dma_mask(dev, DMA_BIT_MASK(32)))
1175 FAIL("DMA address limits not supported for PCILynx hardware");
1176 if (pci_enable_device(dev))
1177 FAIL("failed to enable PCILynx hardware");
1178 pci_set_master(dev);
1179
1180 error = -ENOMEM;
1181
1182 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1183 if (!host) FAIL("failed to allocate control structure memory");
1184
1185 lynx = host->hostdata;
1186 lynx->id = card_id++;
1187 lynx->dev = dev;
1188 lynx->state = clear;
1189 lynx->host = host;
1190 host->pdev = dev;
1191 pci_set_drvdata(dev, lynx);
1192
1193 spin_lock_init(&lynx->lock);
1194 spin_lock_init(&lynx->phy_reg_lock);
1195
1196 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1197 &lynx->pcl_mem_dma);
1198
1199 if (lynx->pcl_mem != NULL) {
1200 lynx->state = have_pcl_mem;
1201 PRINT(KERN_INFO, lynx->id,
1202 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1203 lynx->pcl_mem);
1204 } else {
1205 FAIL("failed to allocate PCL memory area");
1206 }
1207
1208 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1209 &lynx->rcv_page_dma);
1210 if (lynx->rcv_page == NULL) {
1211 FAIL("failed to allocate receive buffer");
1212 }
1213 lynx->state = have_1394_buffers;
1214
1215 for (i = 0; i < ISORCV_PAGES; i++) {
1216 lynx->iso_rcv.page[i] =
1217 pci_alloc_consistent(dev, PAGE_SIZE,
1218 &lynx->iso_rcv.page_dma[i]);
1219 if (lynx->iso_rcv.page[i] == NULL) {
1220 FAIL("failed to allocate iso receive buffers");
1221 }
1222 }
1223
1224 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1225 PCILYNX_MAX_REGISTER);
1226 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1227 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1228 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1229 PCILYNX_MAX_MEMORY);
1230 lynx->state = have_iomappings;
1231
1232 if (lynx->registers == NULL) {
1233 FAIL("failed to remap registers - card not accessible");
1234 }
1235
1236 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1237 /* Fix buggy cards with autoboot pin not tied low: */
1238 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1239
1240 sprintf (irq_buf, "%d", dev->irq);
1241
1242 if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
1243 PCILYNX_DRIVER_NAME, lynx)) {
1244 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1245 lynx->state = have_intr;
1246 } else {
1247 FAIL("failed to allocate shared interrupt %s", irq_buf);
1248 }
1249
1250 /* alloc_pcl return values are not checked, it is expected that the
1251 * provided PCL space is sufficient for the initial allocations */
1252 lynx->rcv_pcl = alloc_pcl(lynx);
1253 lynx->rcv_pcl_start = alloc_pcl(lynx);
1254 lynx->async.pcl = alloc_pcl(lynx);
1255 lynx->async.pcl_start = alloc_pcl(lynx);
1256 lynx->iso_send.pcl = alloc_pcl(lynx);
1257 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1258
1259 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1260 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1261 }
1262 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1263
1264 /* all allocations successful - simple init stuff follows */
1265
1266 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1267
1268 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1269 (unsigned long)lynx);
1270
1271 spin_lock_init(&lynx->iso_rcv.lock);
1272
1273 spin_lock_init(&lynx->async.queue_lock);
1274 lynx->async.channel = CHANNEL_ASYNC_SEND;
1275 spin_lock_init(&lynx->iso_send.queue_lock);
1276 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1277
1278 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1279 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1280 lynx->local_ram, lynx->aux_port);
1281
1282 /* now, looking for PHY register set */
1283 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1284 lynx->phyic.reg_1394a = 1;
1285 PRINT(KERN_INFO, lynx->id,
1286 "found 1394a conform PHY (using extended register set)");
1287 lynx->phyic.vendor = get_phy_vendorid(lynx);
1288 lynx->phyic.product = get_phy_productid(lynx);
1289 } else {
1290 lynx->phyic.reg_1394a = 0;
1291 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1292 }
1293
1294 lynx->selfid_size = -1;
1295 lynx->phy_reg0 = -1;
1296
1297 INIT_LIST_HEAD(&lynx->async.queue);
1298 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1299 INIT_LIST_HEAD(&lynx->iso_send.queue);
1300 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1301
1302 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1303 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1304
1305 pcl.next = PCL_NEXT_INVALID;
1306 pcl.async_error_next = PCL_NEXT_INVALID;
1307
1308 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1309#ifndef __BIG_ENDIAN
1310 pcl.buffer[0].control |= PCL_BIGENDIAN;
1311#endif
1312 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1313
1314 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1315 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1316 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1317
1318 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1319 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1320 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1321
1322 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1323 pcl.async_error_next = PCL_NEXT_INVALID;
1324 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1325
1326 pcl.next = PCL_NEXT_INVALID;
1327 pcl.async_error_next = PCL_NEXT_INVALID;
1328 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1329#ifndef __BIG_ENDIAN
1330 pcl.buffer[0].control |= PCL_BIGENDIAN;
1331#endif
1332 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1333
1334 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1335 int page = i / ISORCV_PER_PAGE;
1336 int sec = i % ISORCV_PER_PAGE;
1337
1338 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1339 + sec * MAX_ISORCV_SIZE;
1340 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1341 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1342 }
1343
1344 pcli = (u32 *)&pcl;
1345 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1346 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1347 }
1348 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1349
1350 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1351 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1352 /* 20 byte threshold before triggering PCI transfer */
1353 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1354 /* threshold on both send FIFOs before transmitting:
1355 FIFO size - cache line size - 1 */
1356 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1357 i = 0x30 - i - 1;
1358 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1359
1360 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1361
1362 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1363 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1364 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1365 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1366 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1367 | LINK_INT_ATF_UNDERFLOW);
1368
1369 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1370 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1371 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1372 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1373 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1374 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1375 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1376
1377 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1378
1379 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1380 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1381 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1382 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1383
1384 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1385
1386 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1387 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1388 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1389 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1390
1391 if (!lynx->phyic.reg_1394a) {
1392 if (!hpsb_disable_irm) {
1393 /* attempt to enable contender bit -FIXME- would this
1394 * work elsewhere? */
1395 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1396 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1397 }
1398 } else {
1399 /* set the contender (if appropriate) and LCtrl bit in the
1400 * extended PHY register set. (Should check that PHY_02_EXTENDED
1401 * is set in register 2?)
1402 */
1403 i = get_phy_reg(lynx, 4);
1404 i |= PHY_04_LCTRL;
1405 if (hpsb_disable_irm)
1406 i &= ~PHY_04_CONTENDER;
1407 else
1408 i |= PHY_04_CONTENDER;
1409 if (i != -1) set_phy_reg(lynx, 4, i);
1410 }
1411
1412 if (!skip_eeprom)
1413 {
1414 /* needed for i2c communication with serial eeprom */
1415 struct i2c_adapter *i2c_ad;
1416 struct i2c_algo_bit_data i2c_adapter_data;
1417
1418 error = -ENOMEM;
1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1421
1422 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1423 i2c_adapter_data = bit_data;
1424 i2c_ad->algo_data = &i2c_adapter_data;
1425 i2c_adapter_data.data = lynx;
1426 i2c_ad->dev.parent = &dev->dev;
1427
1428 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1429 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1430
1431 /* reset hardware to sane state */
1432 lynx->i2c_driven_state = 0x00000070;
1433 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1434
1435 if (i2c_bit_add_bus(i2c_ad) < 0)
1436 {
1437 kfree(i2c_ad);
1438 error = -ENXIO;
1439 FAIL("unable to register i2c");
1440 }
1441 else
1442 {
1443 /* do i2c stuff */
1444 unsigned char i2c_cmd = 0x10;
1445 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1446 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1447 };
1448
1449 /* we use i2c_transfer because we have no i2c_client
1450 at hand */
1451 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1452 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1453 } else {
1454 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1455 /* FIXME: probably we should rewrite the max_rec, max_ROM(1394a),
1456 * generation(1394a) and link_spd(1394a) field and recalculate
1457 * the CRC */
1458
1459 for (i = 0; i < 5 ; i++)
1460 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1461 i, be32_to_cpu(lynx->bus_info_block[i]));
1462
1463 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1464 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1465 (lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
1466 {
1467 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1468 } else {
1469 kfree(i2c_ad);
1470 error = -ENXIO;
1471 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1472 }
1473
1474 }
1475
1476 i2c_del_adapter(i2c_ad);
1477 kfree(i2c_ad);
1478 }
1479 }
1480
1481 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1482 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1483 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1484 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1485 if (!lynx->phyic.reg_1394a)
1486 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1487 else
1488 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1489
1490 if (hpsb_add_host(host)) {
1491 error = -ENOMEM;
1492 FAIL("Failed to register host with highlevel");
1493 }
1494
1495 lynx->state = is_host;
1496
1497 return 0;
1498#undef FAIL
1499}
1500
1501
1502static struct pci_device_id pci_table[] = {
1503 {
1504 .vendor = PCI_VENDOR_ID_TI,
1505 .device = PCI_DEVICE_ID_TI_PCILYNX,
1506 .subvendor = PCI_ANY_ID,
1507 .subdevice = PCI_ANY_ID,
1508 },
1509 { } /* Terminating entry */
1510};
1511
1512static struct pci_driver lynx_pci_driver = {
1513 .name = PCILYNX_DRIVER_NAME,
1514 .id_table = pci_table,
1515 .probe = add_card,
1516 .remove = remove_card,
1517};
1518
1519static struct hpsb_host_driver lynx_driver = {
1520 .owner = THIS_MODULE,
1521 .name = PCILYNX_DRIVER_NAME,
1522 .set_hw_config_rom = NULL,
1523 .transmit_packet = lynx_transmit,
1524 .devctl = lynx_devctl,
1525 .isoctl = NULL,
1526};
1527
1528MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1529MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1530MODULE_LICENSE("GPL");
1531MODULE_SUPPORTED_DEVICE("pcilynx");
1532MODULE_DEVICE_TABLE(pci, pci_table);
1533
1534static int __init pcilynx_init(void)
1535{
1536 int ret;
1537
1538 ret = pci_register_driver(&lynx_pci_driver);
1539 if (ret < 0) {
1540 PRINT_G(KERN_ERR, "PCI module init failed");
1541 return ret;
1542 }
1543
1544 return 0;
1545}
1546
1547static void __exit pcilynx_cleanup(void)
1548{
1549 pci_unregister_driver(&lynx_pci_driver);
1550}
1551
1552
1553module_init(pcilynx_init);
1554module_exit(pcilynx_cleanup);
diff --git a/drivers/ieee1394/pcilynx.h b/drivers/ieee1394/pcilynx.h
deleted file mode 100644
index 693a169acea3..000000000000
--- a/drivers/ieee1394/pcilynx.h
+++ /dev/null
@@ -1,468 +0,0 @@
1#ifndef __PCILYNX_H__
2#define __PCILYNX_H__
3
4
5#define PCILYNX_DRIVER_NAME "pcilynx"
6#define PCILYNX_MAJOR 177
7
8#define PCILYNX_MINOR_AUX_START 0
9#define PCILYNX_MINOR_ROM_START 16
10#define PCILYNX_MINOR_RAM_START 32
11
12#define PCILYNX_MAX_REGISTER 0xfff
13#define PCILYNX_MAX_MEMORY 0xffff
14
15#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
16#define MAX_PCILYNX_CARDS 4
17#define LOCALRAM_SIZE 4096
18
19#define NUM_ISORCV_PCL 4
20#define MAX_ISORCV_SIZE 2048
21#define ISORCV_PER_PAGE (PAGE_SIZE / MAX_ISORCV_SIZE)
22#define ISORCV_PAGES (NUM_ISORCV_PCL / ISORCV_PER_PAGE)
23
24#define CHANNEL_LOCALBUS 0
25#define CHANNEL_ASYNC_RCV 1
26#define CHANNEL_ISO_RCV 2
27#define CHANNEL_ASYNC_SEND 3
28#define CHANNEL_ISO_SEND 4
29
30#define PCILYNX_CONFIG_ROM_LENGTH 1024
31
32typedef int pcl_t;
33
34struct ti_lynx {
35 int id; /* sequential card number */
36
37 spinlock_t lock;
38
39 struct pci_dev *dev;
40
41 struct {
42 unsigned reg_1394a:1;
43 u32 vendor;
44 u32 product;
45 } phyic;
46
47 enum { clear, have_intr, have_aux_buf, have_pcl_mem,
48 have_1394_buffers, have_iomappings, is_host } state;
49
50 /* remapped memory spaces */
51 void __iomem *registers;
52 void __iomem *local_rom;
53 void __iomem *local_ram;
54 void __iomem *aux_port;
55 __be32 bus_info_block[5];
56
57 /*
58 * use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
59 * LOCALRAM_SIZE * 8 PCLs (each sized 128 bytes);
60 * the following is an allocation bitmap
61 */
62 u8 pcl_bmap[LOCALRAM_SIZE / 1024];
63
64 /* point to PCLs memory area if needed */
65 void *pcl_mem;
66 dma_addr_t pcl_mem_dma;
67
68 /* PCLs for local mem / aux transfers */
69 pcl_t dmem_pcl;
70
71 /* IEEE-1394 part follows */
72 struct hpsb_host *host;
73
74 int phyid, isroot;
75 int selfid_size;
76 int phy_reg0;
77
78 spinlock_t phy_reg_lock;
79
80 pcl_t rcv_pcl_start, rcv_pcl;
81 void *rcv_page;
82 dma_addr_t rcv_page_dma;
83 int rcv_active;
84
85 struct lynx_send_data {
86 pcl_t pcl_start, pcl;
87 struct list_head queue;
88 struct list_head pcl_queue; /* this queue contains at most one packet */
89 spinlock_t queue_lock;
90 dma_addr_t header_dma, data_dma;
91 int channel;
92 } async, iso_send;
93
94 struct {
95 pcl_t pcl[NUM_ISORCV_PCL];
96 u32 stat[NUM_ISORCV_PCL];
97 void *page[ISORCV_PAGES];
98 dma_addr_t page_dma[ISORCV_PAGES];
99 pcl_t pcl_start;
100 int chan_count;
101 int next, last, used, running;
102 struct tasklet_struct tq;
103 spinlock_t lock;
104 } iso_rcv;
105
106 u32 i2c_driven_state; /* the state we currently drive the Serial EEPROM Control register */
107};
108
109/* the per-file data structure for mem space access */
110struct memdata {
111 struct ti_lynx *lynx;
112 int cid;
113 atomic_t aux_intr_last_seen;
114 /* enum values are the same as LBUS_ADDR_SEL_* values below */
115 enum { rom = 0x10000, aux = 0x20000, ram = 0 } type;
116};
117
118
119
120/*
121 * Register read and write helper functions.
122 */
123static inline void reg_write(const struct ti_lynx *lynx, int offset, u32 data)
124{
125 writel(data, lynx->registers + offset);
126}
127
128static inline u32 reg_read(const struct ti_lynx *lynx, int offset)
129{
130 return readl(lynx->registers + offset);
131}
132
133static inline void reg_set_bits(const struct ti_lynx *lynx, int offset,
134 u32 mask)
135{
136 reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
137}
138
139static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
140 u32 mask)
141{
142 reg_write(lynx, offset, (reg_read(lynx, offset) & ~mask));
143}
144
145
146
147/* chip register definitions follow */
148
149#define PCI_LATENCY_CACHELINE 0x0c
150
151#define MISC_CONTROL 0x40
152#define MISC_CONTROL_SWRESET (1<<0)
153
154#define SERIAL_EEPROM_CONTROL 0x44
155
156#define PCI_INT_STATUS 0x48
157#define PCI_INT_ENABLE 0x4c
158/* status and enable have identical bit numbers */
159#define PCI_INT_INT_PEND (1<<31)
160#define PCI_INT_FORCED_INT (1<<30)
161#define PCI_INT_SLV_ADR_PERR (1<<28)
162#define PCI_INT_SLV_DAT_PERR (1<<27)
163#define PCI_INT_MST_DAT_PERR (1<<26)
164#define PCI_INT_MST_DEV_TIMEOUT (1<<25)
165#define PCI_INT_INTERNAL_SLV_TIMEOUT (1<<23)
166#define PCI_INT_AUX_TIMEOUT (1<<18)
167#define PCI_INT_AUX_INT (1<<17)
168#define PCI_INT_1394 (1<<16)
169#define PCI_INT_DMA4_PCL (1<<9)
170#define PCI_INT_DMA4_HLT (1<<8)
171#define PCI_INT_DMA3_PCL (1<<7)
172#define PCI_INT_DMA3_HLT (1<<6)
173#define PCI_INT_DMA2_PCL (1<<5)
174#define PCI_INT_DMA2_HLT (1<<4)
175#define PCI_INT_DMA1_PCL (1<<3)
176#define PCI_INT_DMA1_HLT (1<<2)
177#define PCI_INT_DMA0_PCL (1<<1)
178#define PCI_INT_DMA0_HLT (1<<0)
179/* all DMA interrupts combined: */
180#define PCI_INT_DMA_ALL 0x3ff
181
182#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
183#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
184
185#define LBUS_ADDR 0xb4
186#define LBUS_ADDR_SEL_RAM (0x0<<16)
187#define LBUS_ADDR_SEL_ROM (0x1<<16)
188#define LBUS_ADDR_SEL_AUX (0x2<<16)
189#define LBUS_ADDR_SEL_ZV (0x3<<16)
190
191#define GPIO_CTRL_A 0xb8
192#define GPIO_CTRL_B 0xbc
193#define GPIO_DATA_BASE 0xc0
194
195#define DMA_BREG(base, chan) (base + chan * 0x20)
196#define DMA_SREG(base, chan) (base + chan * 0x10)
197
198#define DMA0_PREV_PCL 0x100
199#define DMA1_PREV_PCL 0x120
200#define DMA2_PREV_PCL 0x140
201#define DMA3_PREV_PCL 0x160
202#define DMA4_PREV_PCL 0x180
203#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
204
205#define DMA0_CURRENT_PCL 0x104
206#define DMA1_CURRENT_PCL 0x124
207#define DMA2_CURRENT_PCL 0x144
208#define DMA3_CURRENT_PCL 0x164
209#define DMA4_CURRENT_PCL 0x184
210#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
211
212#define DMA0_CHAN_STAT 0x10c
213#define DMA1_CHAN_STAT 0x12c
214#define DMA2_CHAN_STAT 0x14c
215#define DMA3_CHAN_STAT 0x16c
216#define DMA4_CHAN_STAT 0x18c
217#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
218/* CHAN_STATUS registers share bits */
219#define DMA_CHAN_STAT_SELFID (1<<31)
220#define DMA_CHAN_STAT_ISOPKT (1<<30)
221#define DMA_CHAN_STAT_PCIERR (1<<29)
222#define DMA_CHAN_STAT_PKTERR (1<<28)
223#define DMA_CHAN_STAT_PKTCMPL (1<<27)
224#define DMA_CHAN_STAT_SPECIALACK (1<<14)
225
226
227#define DMA0_CHAN_CTRL 0x110
228#define DMA1_CHAN_CTRL 0x130
229#define DMA2_CHAN_CTRL 0x150
230#define DMA3_CHAN_CTRL 0x170
231#define DMA4_CHAN_CTRL 0x190
232#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
233/* CHAN_CTRL registers share bits */
234#define DMA_CHAN_CTRL_ENABLE (1<<31)
235#define DMA_CHAN_CTRL_BUSY (1<<30)
236#define DMA_CHAN_CTRL_LINK (1<<29)
237
238#define DMA0_READY 0x114
239#define DMA1_READY 0x134
240#define DMA2_READY 0x154
241#define DMA3_READY 0x174
242#define DMA4_READY 0x194
243#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
244
245#define DMA_GLOBAL_REGISTER 0x908
246
247#define FIFO_SIZES 0xa00
248
249#define FIFO_CONTROL 0xa10
250#define FIFO_CONTROL_GRF_FLUSH (1<<4)
251#define FIFO_CONTROL_ITF_FLUSH (1<<3)
252#define FIFO_CONTROL_ATF_FLUSH (1<<2)
253
254#define FIFO_XMIT_THRESHOLD 0xa14
255
256#define DMA0_WORD0_CMP_VALUE 0xb00
257#define DMA1_WORD0_CMP_VALUE 0xb10
258#define DMA2_WORD0_CMP_VALUE 0xb20
259#define DMA3_WORD0_CMP_VALUE 0xb30
260#define DMA4_WORD0_CMP_VALUE 0xb40
261#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
262
263#define DMA0_WORD0_CMP_ENABLE 0xb04
264#define DMA1_WORD0_CMP_ENABLE 0xb14
265#define DMA2_WORD0_CMP_ENABLE 0xb24
266#define DMA3_WORD0_CMP_ENABLE 0xb34
267#define DMA4_WORD0_CMP_ENABLE 0xb44
268#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE,chan))
269
270#define DMA0_WORD1_CMP_VALUE 0xb08
271#define DMA1_WORD1_CMP_VALUE 0xb18
272#define DMA2_WORD1_CMP_VALUE 0xb28
273#define DMA3_WORD1_CMP_VALUE 0xb38
274#define DMA4_WORD1_CMP_VALUE 0xb48
275#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
276
277#define DMA0_WORD1_CMP_ENABLE 0xb0c
278#define DMA1_WORD1_CMP_ENABLE 0xb1c
279#define DMA2_WORD1_CMP_ENABLE 0xb2c
280#define DMA3_WORD1_CMP_ENABLE 0xb3c
281#define DMA4_WORD1_CMP_ENABLE 0xb4c
282#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE,chan))
283/* word 1 compare enable flags */
284#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
285#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
286#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
287#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
288#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
289#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
290#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
291
292#define LINK_ID 0xf00
293#define LINK_ID_BUS(id) (id<<22)
294#define LINK_ID_NODE(id) (id<<16)
295
296#define LINK_CONTROL 0xf04
297#define LINK_CONTROL_BUSY (1<<29)
298#define LINK_CONTROL_TX_ISO_EN (1<<26)
299#define LINK_CONTROL_RX_ISO_EN (1<<25)
300#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
301#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
302#define LINK_CONTROL_RESET_TX (1<<21)
303#define LINK_CONTROL_RESET_RX (1<<20)
304#define LINK_CONTROL_CYCMASTER (1<<11)
305#define LINK_CONTROL_CYCSOURCE (1<<10)
306#define LINK_CONTROL_CYCTIMEREN (1<<9)
307#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
308#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
309
310#define CYCLE_TIMER 0xf08
311
312#define LINK_PHY 0xf0c
313#define LINK_PHY_READ (1<<31)
314#define LINK_PHY_WRITE (1<<30)
315#define LINK_PHY_ADDR(addr) (addr<<24)
316#define LINK_PHY_WDATA(data) (data<<16)
317#define LINK_PHY_RADDR(addr) (addr<<8)
318
319
320#define LINK_INT_STATUS 0xf14
321#define LINK_INT_ENABLE 0xf18
322/* status and enable have identical bit numbers */
323#define LINK_INT_LINK_INT (1<<31)
324#define LINK_INT_PHY_TIMEOUT (1<<30)
325#define LINK_INT_PHY_REG_RCVD (1<<29)
326#define LINK_INT_PHY_BUSRESET (1<<28)
327#define LINK_INT_TX_RDY (1<<26)
328#define LINK_INT_RX_DATA_RDY (1<<25)
329#define LINK_INT_ISO_STUCK (1<<20)
330#define LINK_INT_ASYNC_STUCK (1<<19)
331#define LINK_INT_SENT_REJECT (1<<17)
332#define LINK_INT_HDR_ERR (1<<16)
333#define LINK_INT_TX_INVALID_TC (1<<15)
334#define LINK_INT_CYC_SECOND (1<<11)
335#define LINK_INT_CYC_START (1<<10)
336#define LINK_INT_CYC_DONE (1<<9)
337#define LINK_INT_CYC_PENDING (1<<8)
338#define LINK_INT_CYC_LOST (1<<7)
339#define LINK_INT_CYC_ARB_FAILED (1<<6)
340#define LINK_INT_GRF_OVERFLOW (1<<5)
341#define LINK_INT_ITF_UNDERFLOW (1<<4)
342#define LINK_INT_ATF_UNDERFLOW (1<<3)
343#define LINK_INT_ISOARB_FAILED (1<<0)
344
345/* PHY specifics */
346#define PHY_VENDORID_TI 0x800028
347#define PHY_PRODUCTID_TSB41LV03 0x000000
348
349
350/* this is the physical layout of a PCL, its size is 128 bytes */
351struct ti_pcl {
352 u32 next;
353 u32 async_error_next;
354 u32 user_data;
355 u32 pcl_status;
356 u32 remaining_transfer_count;
357 u32 next_data_buffer;
358 struct {
359 u32 control;
360 u32 pointer;
361 } buffer[13] __attribute__ ((packed));
362} __attribute__ ((packed));
363
364#include <linux/stddef.h>
365#define pcloffs(MEMBER) (offsetof(struct ti_pcl, MEMBER))
366
367
368static inline void put_pcl(const struct ti_lynx *lynx, pcl_t pclid,
369 const struct ti_pcl *pcl)
370{
371 memcpy_le32((u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
372 (u32 *)pcl, sizeof(struct ti_pcl));
373}
374
375static inline void get_pcl(const struct ti_lynx *lynx, pcl_t pclid,
376 struct ti_pcl *pcl)
377{
378 memcpy_le32((u32 *)pcl,
379 (u32 *)(lynx->pcl_mem + pclid * sizeof(struct ti_pcl)),
380 sizeof(struct ti_pcl));
381}
382
383static inline u32 pcl_bus(const struct ti_lynx *lynx, pcl_t pclid)
384{
385 return lynx->pcl_mem_dma + pclid * sizeof(struct ti_pcl);
386}
387
388
389#if defined (__BIG_ENDIAN)
390typedef struct ti_pcl pcltmp_t;
391
392static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
393 pcltmp_t *tmp)
394{
395 get_pcl(lynx, pclid, tmp);
396 return tmp;
397}
398
399static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
400 pcltmp_t *tmp)
401{
402 put_pcl(lynx, pclid, tmp);
403}
404
405#else
406typedef int pcltmp_t; /* just a dummy */
407
408static inline struct ti_pcl *edit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
409 pcltmp_t *tmp)
410{
411 return lynx->pcl_mem + pclid * sizeof(struct ti_pcl);
412}
413
414static inline void commit_pcl(const struct ti_lynx *lynx, pcl_t pclid,
415 pcltmp_t *tmp)
416{
417}
418#endif
419
420
421static inline void run_sub_pcl(const struct ti_lynx *lynx, pcl_t pclid, int idx,
422 int dmachan)
423{
424 reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20,
425 pcl_bus(lynx, pclid) + idx * 4);
426 reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
427 DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
428}
429
430static inline void run_pcl(const struct ti_lynx *lynx, pcl_t pclid, int dmachan)
431{
432 run_sub_pcl(lynx, pclid, 0, dmachan);
433}
434
435#define PCL_NEXT_INVALID (1<<0)
436
437/* transfer commands */
438#define PCL_CMD_RCV (0x1<<24)
439#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
440#define PCL_CMD_XMT (0x2<<24)
441#define PCL_CMD_UNFXMT (0xc<<24)
442#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
443#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
444
445/* aux commands */
446#define PCL_CMD_NOP (0x0<<24)
447#define PCL_CMD_LOAD (0x3<<24)
448#define PCL_CMD_STOREQ (0x4<<24)
449#define PCL_CMD_STORED (0xb<<24)
450#define PCL_CMD_STORE0 (0x5<<24)
451#define PCL_CMD_STORE1 (0x6<<24)
452#define PCL_CMD_COMPARE (0xe<<24)
453#define PCL_CMD_SWAP_COMPARE (0xf<<24)
454#define PCL_CMD_ADD (0xd<<24)
455#define PCL_CMD_BRANCH (0x7<<24)
456
457/* BRANCH condition codes */
458#define PCL_COND_DMARDY_SET (0x1<<20)
459#define PCL_COND_DMARDY_CLEAR (0x2<<20)
460
461#define PCL_GEN_INTR (1<<19)
462#define PCL_LAST_BUFF (1<<18)
463#define PCL_LAST_CMD (PCL_LAST_BUFF)
464#define PCL_WAITSTAT (1<<17)
465#define PCL_BIGENDIAN (1<<16)
466#define PCL_ISOMODE (1<<12)
467
468#endif
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h
deleted file mode 100644
index 7a225a405987..000000000000
--- a/drivers/ieee1394/raw1394-private.h
+++ /dev/null
@@ -1,81 +0,0 @@
1#ifndef IEEE1394_RAW1394_PRIVATE_H
2#define IEEE1394_RAW1394_PRIVATE_H
3
4/* header for definitions that are private to the raw1394 driver
5 and not visible to user-space */
6
7#define RAW1394_DEVICE_MAJOR 171
8#define RAW1394_DEVICE_NAME "raw1394"
9
10#define RAW1394_MAX_USER_CSR_DIRS 16
11
12struct iso_block_store {
13 atomic_t refcount;
14 size_t data_size;
15 quadlet_t data[0];
16};
17
18enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
19 RAW1394_ISO_RECV = 1,
20 RAW1394_ISO_XMIT = 2 };
21
22struct file_info {
23 struct list_head list;
24
25 struct mutex state_mutex;
26 enum { opened, initialized, connected } state;
27 unsigned int protocol_version;
28
29 struct hpsb_host *host;
30
31 struct list_head req_pending; /* protected by reqlists_lock */
32 struct list_head req_complete; /* protected by reqlists_lock */
33 spinlock_t reqlists_lock;
34 wait_queue_head_t wait_complete;
35
36 struct list_head addr_list; /* protected by host_info_lock */
37
38 u8 __user *fcp_buffer;
39
40 u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
41
42 /* new rawiso API */
43 enum raw1394_iso_state iso_state;
44 struct hpsb_iso *iso_handle;
45
46 /* User space's CSR1212 dynamic ConfigROM directories */
47 struct csr1212_keyval *csr1212_dirs[RAW1394_MAX_USER_CSR_DIRS];
48
49 /* Legacy ConfigROM update flag */
50 u8 cfgrom_upd;
51};
52
53struct arm_addr {
54 struct list_head addr_list; /* file_info list */
55 u64 start, end;
56 u64 arm_tag;
57 u8 access_rights;
58 u8 notification_options;
59 u8 client_transactions;
60 u64 recvb;
61 u16 rec_length;
62 u8 *addr_space_buffer; /* accessed by read/write/lock requests */
63};
64
65struct pending_request {
66 struct list_head list;
67 struct file_info *file_info;
68 struct hpsb_packet *packet;
69 struct iso_block_store *ibs;
70 quadlet_t *data;
71 int free_data;
72 struct raw1394_request req;
73};
74
75struct host_info {
76 struct list_head list;
77 struct hpsb_host *host;
78 struct list_head file_info_list; /* protected by host_info_lock */
79};
80
81#endif /* IEEE1394_RAW1394_PRIVATE_H */
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
deleted file mode 100644
index f3401427404c..000000000000
--- a/drivers/ieee1394/raw1394.c
+++ /dev/null
@@ -1,3096 +0,0 @@
1/*
2 * IEEE 1394 for Linux
3 *
4 * Raw interface to the bus
5 *
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
7 * 2001, 2002 Manfred Weihs <weihs@ict.tuwien.ac.at>
8 * 2002 Christian Toegel <christian.toegel@gmx.at>
9 *
10 * This code is licensed under the GPL. See the file COPYING in the root
11 * directory of the kernel sources for details.
12 *
13 *
14 * Contributions:
15 *
16 * Manfred Weihs <weihs@ict.tuwien.ac.at>
17 * configuration ROM manipulation
18 * address range mapping
19 * adaptation for new (transparent) loopback mechanism
20 * sending of arbitrary async packets
21 * Christian Toegel <christian.toegel@gmx.at>
22 * address range mapping
23 * lock64 request
24 * transmit physical packet
25 * busreset notification control (switch on/off)
26 * busreset with selection of type (short/long)
27 * request_reply
28 */
29
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/sched.h>
33#include <linux/string.h>
34#include <linux/slab.h>
35#include <linux/fs.h>
36#include <linux/poll.h>
37#include <linux/module.h>
38#include <linux/mutex.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/vmalloc.h>
42#include <linux/cdev.h>
43#include <asm/uaccess.h>
44#include <asm/atomic.h>
45#include <linux/compat.h>
46
47#include "csr1212.h"
48#include "highlevel.h"
49#include "hosts.h"
50#include "ieee1394.h"
51#include "ieee1394_core.h"
52#include "ieee1394_hotplug.h"
53#include "ieee1394_transactions.h"
54#include "ieee1394_types.h"
55#include "iso.h"
56#include "nodemgr.h"
57#include "raw1394.h"
58#include "raw1394-private.h"
59
60#define int2ptr(x) ((void __user *)(unsigned long)x)
61#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
62
63#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
64#define RAW1394_DEBUG
65#endif
66
67#ifdef RAW1394_DEBUG
68#define DBGMSG(fmt, args...) \
69printk(KERN_INFO "raw1394:" fmt "\n" , ## args)
70#else
71#define DBGMSG(fmt, args...) do {} while (0)
72#endif
73
74static LIST_HEAD(host_info_list);
75static int host_count;
76static DEFINE_SPINLOCK(host_info_lock);
77static atomic_t internal_generation = ATOMIC_INIT(0);
78
79static atomic_t iso_buffer_size;
80static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
81
82static struct hpsb_highlevel raw1394_highlevel;
83
84static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
85 u64 addr, size_t length, u16 flags);
86static int arm_write(struct hpsb_host *host, int nodeid, int destid,
87 quadlet_t * data, u64 addr, size_t length, u16 flags);
88static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
89 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
90 u16 flags);
91static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
92 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
93 u16 flags);
94static const struct hpsb_address_ops arm_ops = {
95 .read = arm_read,
96 .write = arm_write,
97 .lock = arm_lock,
98 .lock64 = arm_lock64,
99};
100
101static void queue_complete_cb(struct pending_request *req);
102
103static struct pending_request *__alloc_pending_request(gfp_t flags)
104{
105 struct pending_request *req;
106
107 req = kzalloc(sizeof(*req), flags);
108 if (req)
109 INIT_LIST_HEAD(&req->list);
110
111 return req;
112}
113
114static inline struct pending_request *alloc_pending_request(void)
115{
116 return __alloc_pending_request(GFP_KERNEL);
117}
118
119static void free_pending_request(struct pending_request *req)
120{
121 if (req->ibs) {
122 if (atomic_dec_and_test(&req->ibs->refcount)) {
123 atomic_sub(req->ibs->data_size, &iso_buffer_size);
124 kfree(req->ibs);
125 }
126 } else if (req->free_data) {
127 kfree(req->data);
128 }
129 hpsb_free_packet(req->packet);
130 kfree(req);
131}
132
133/* fi->reqlists_lock must be taken */
134static void __queue_complete_req(struct pending_request *req)
135{
136 struct file_info *fi = req->file_info;
137
138 list_move_tail(&req->list, &fi->req_complete);
139 wake_up(&fi->wait_complete);
140}
141
142static void queue_complete_req(struct pending_request *req)
143{
144 unsigned long flags;
145 struct file_info *fi = req->file_info;
146
147 spin_lock_irqsave(&fi->reqlists_lock, flags);
148 __queue_complete_req(req);
149 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
150}
151
152static void queue_complete_cb(struct pending_request *req)
153{
154 struct hpsb_packet *packet = req->packet;
155 int rcode = (packet->header[1] >> 12) & 0xf;
156
157 switch (packet->ack_code) {
158 case ACKX_NONE:
159 case ACKX_SEND_ERROR:
160 req->req.error = RAW1394_ERROR_SEND_ERROR;
161 break;
162 case ACKX_ABORTED:
163 req->req.error = RAW1394_ERROR_ABORTED;
164 break;
165 case ACKX_TIMEOUT:
166 req->req.error = RAW1394_ERROR_TIMEOUT;
167 break;
168 default:
169 req->req.error = (packet->ack_code << 16) | rcode;
170 break;
171 }
172
173 if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
174 req->req.length = 0;
175 }
176
177 if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
178 (req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
179 (req->req.type == RAW1394_REQ_ASYNC_STREAM) ||
180 (req->req.type == RAW1394_REQ_LOCK) ||
181 (req->req.type == RAW1394_REQ_LOCK64))
182 hpsb_free_tlabel(packet);
183
184 queue_complete_req(req);
185}
186
187static void add_host(struct hpsb_host *host)
188{
189 struct host_info *hi;
190 unsigned long flags;
191
192 hi = kmalloc(sizeof(*hi), GFP_KERNEL);
193
194 if (hi) {
195 INIT_LIST_HEAD(&hi->list);
196 hi->host = host;
197 INIT_LIST_HEAD(&hi->file_info_list);
198
199 spin_lock_irqsave(&host_info_lock, flags);
200 list_add_tail(&hi->list, &host_info_list);
201 host_count++;
202 spin_unlock_irqrestore(&host_info_lock, flags);
203 }
204
205 atomic_inc(&internal_generation);
206}
207
208static struct host_info *find_host_info(struct hpsb_host *host)
209{
210 struct host_info *hi;
211
212 list_for_each_entry(hi, &host_info_list, list)
213 if (hi->host == host)
214 return hi;
215
216 return NULL;
217}
218
219static void remove_host(struct hpsb_host *host)
220{
221 struct host_info *hi;
222 unsigned long flags;
223
224 spin_lock_irqsave(&host_info_lock, flags);
225 hi = find_host_info(host);
226
227 if (hi != NULL) {
228 list_del(&hi->list);
229 host_count--;
230 /*
231 FIXME: address ranges should be removed
232 and fileinfo states should be initialized
233 (including setting generation to
234 internal-generation ...)
235 */
236 }
237 spin_unlock_irqrestore(&host_info_lock, flags);
238
239 if (hi == NULL) {
240 printk(KERN_ERR "raw1394: attempt to remove unknown host "
241 "0x%p\n", host);
242 return;
243 }
244
245 kfree(hi);
246
247 atomic_inc(&internal_generation);
248}
249
250static void host_reset(struct hpsb_host *host)
251{
252 unsigned long flags;
253 struct host_info *hi;
254 struct file_info *fi;
255 struct pending_request *req;
256
257 spin_lock_irqsave(&host_info_lock, flags);
258 hi = find_host_info(host);
259
260 if (hi != NULL) {
261 list_for_each_entry(fi, &hi->file_info_list, list) {
262 if (fi->notification == RAW1394_NOTIFY_ON) {
263 req = __alloc_pending_request(GFP_ATOMIC);
264
265 if (req != NULL) {
266 req->file_info = fi;
267 req->req.type = RAW1394_REQ_BUS_RESET;
268 req->req.generation =
269 get_hpsb_generation(host);
270 req->req.misc = (host->node_id << 16)
271 | host->node_count;
272 if (fi->protocol_version > 3) {
273 req->req.misc |=
274 (NODEID_TO_NODE
275 (host->irm_id)
276 << 8);
277 }
278
279 queue_complete_req(req);
280 }
281 }
282 }
283 }
284 spin_unlock_irqrestore(&host_info_lock, flags);
285}
286
287static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
288 int cts, u8 * data, size_t length)
289{
290 unsigned long flags;
291 struct host_info *hi;
292 struct file_info *fi;
293 struct pending_request *req, *req_next;
294 struct iso_block_store *ibs = NULL;
295 LIST_HEAD(reqs);
296
297 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
298 HPSB_INFO("dropped fcp request");
299 return;
300 }
301
302 spin_lock_irqsave(&host_info_lock, flags);
303 hi = find_host_info(host);
304
305 if (hi != NULL) {
306 list_for_each_entry(fi, &hi->file_info_list, list) {
307 if (!fi->fcp_buffer)
308 continue;
309
310 req = __alloc_pending_request(GFP_ATOMIC);
311 if (!req)
312 break;
313
314 if (!ibs) {
315 ibs = kmalloc(sizeof(*ibs) + length,
316 GFP_ATOMIC);
317 if (!ibs) {
318 kfree(req);
319 break;
320 }
321
322 atomic_add(length, &iso_buffer_size);
323 atomic_set(&ibs->refcount, 0);
324 ibs->data_size = length;
325 memcpy(ibs->data, data, length);
326 }
327
328 atomic_inc(&ibs->refcount);
329
330 req->file_info = fi;
331 req->ibs = ibs;
332 req->data = ibs->data;
333 req->req.type = RAW1394_REQ_FCP_REQUEST;
334 req->req.generation = get_hpsb_generation(host);
335 req->req.misc = nodeid | (direction << 16);
336 req->req.recvb = ptr2int(fi->fcp_buffer);
337 req->req.length = length;
338
339 list_add_tail(&req->list, &reqs);
340 }
341 }
342 spin_unlock_irqrestore(&host_info_lock, flags);
343
344 list_for_each_entry_safe(req, req_next, &reqs, list)
345 queue_complete_req(req);
346}
347
348#ifdef CONFIG_COMPAT
349struct compat_raw1394_req {
350 __u32 type;
351 __s32 error;
352 __u32 misc;
353
354 __u32 generation;
355 __u32 length;
356
357 __u64 address;
358
359 __u64 tag;
360
361 __u64 sendb;
362 __u64 recvb;
363}
364#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
365__attribute__((packed))
366#endif
367;
368
369static const char __user *raw1394_compat_write(const char __user *buf)
370{
371 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
372 struct raw1394_request __user *r;
373
374 r = compat_alloc_user_space(sizeof(struct raw1394_request));
375
376#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
377
378 if (copy_in_user(r, cr, sizeof(struct compat_raw1394_req)) ||
379 C(address) ||
380 C(tag) ||
381 C(sendb) ||
382 C(recvb))
383 return (__force const char __user *)ERR_PTR(-EFAULT);
384
385 return (const char __user *)r;
386}
387#undef C
388
389#define P(x) __put_user(r->x, &cr->x)
390
391static int
392raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
393{
394 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
395
396 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
397 P(type) ||
398 P(error) ||
399 P(misc) ||
400 P(generation) ||
401 P(length) ||
402 P(address) ||
403 P(tag) ||
404 P(sendb) ||
405 P(recvb))
406 return -EFAULT;
407
408 return sizeof(struct compat_raw1394_req);
409}
410#undef P
411
412#endif
413
414/* get next completed request (caller must hold fi->reqlists_lock) */
415static inline struct pending_request *__next_complete_req(struct file_info *fi)
416{
417 struct list_head *lh;
418 struct pending_request *req = NULL;
419
420 if (!list_empty(&fi->req_complete)) {
421 lh = fi->req_complete.next;
422 list_del(lh);
423 req = list_entry(lh, struct pending_request, list);
424 }
425 return req;
426}
427
428/* atomically get next completed request */
429static struct pending_request *next_complete_req(struct file_info *fi)
430{
431 unsigned long flags;
432 struct pending_request *req;
433
434 spin_lock_irqsave(&fi->reqlists_lock, flags);
435 req = __next_complete_req(fi);
436 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
437 return req;
438}
439
440static ssize_t raw1394_read(struct file *file, char __user * buffer,
441 size_t count, loff_t * offset_is_ignored)
442{
443 struct file_info *fi = file->private_data;
444 struct pending_request *req;
445 ssize_t ret;
446
447#ifdef CONFIG_COMPAT
448 if (count == sizeof(struct compat_raw1394_req)) {
449 /* ok */
450 } else
451#endif
452 if (count != sizeof(struct raw1394_request)) {
453 return -EINVAL;
454 }
455
456 if (!access_ok(VERIFY_WRITE, buffer, count)) {
457 return -EFAULT;
458 }
459
460 if (file->f_flags & O_NONBLOCK) {
461 if (!(req = next_complete_req(fi)))
462 return -EAGAIN;
463 } else {
464 /*
465 * NB: We call the macro wait_event_interruptible() with a
466 * condition argument with side effect. This is only possible
467 * because the side effect does not occur until the condition
468 * became true, and wait_event_interruptible() won't evaluate
469 * the condition again after that.
470 */
471 if (wait_event_interruptible(fi->wait_complete,
472 (req = next_complete_req(fi))))
473 return -ERESTARTSYS;
474 }
475
476 if (req->req.length) {
477 if (copy_to_user(int2ptr(req->req.recvb), req->data,
478 req->req.length)) {
479 req->req.error = RAW1394_ERROR_MEMFAULT;
480 }
481 }
482
483#ifdef CONFIG_COMPAT
484 if (count == sizeof(struct compat_raw1394_req) &&
485 sizeof(struct compat_raw1394_req) !=
486 sizeof(struct raw1394_request)) {
487 ret = raw1394_compat_read(buffer, &req->req);
488 } else
489#endif
490 {
491 if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
492 ret = -EFAULT;
493 goto out;
494 }
495 ret = (ssize_t) sizeof(struct raw1394_request);
496 }
497 out:
498 free_pending_request(req);
499 return ret;
500}
501
502static int state_opened(struct file_info *fi, struct pending_request *req)
503{
504 if (req->req.type == RAW1394_REQ_INITIALIZE) {
505 switch (req->req.misc) {
506 case RAW1394_KERNELAPI_VERSION:
507 case 3:
508 fi->state = initialized;
509 fi->protocol_version = req->req.misc;
510 req->req.error = RAW1394_ERROR_NONE;
511 req->req.generation = atomic_read(&internal_generation);
512 break;
513
514 default:
515 req->req.error = RAW1394_ERROR_COMPAT;
516 req->req.misc = RAW1394_KERNELAPI_VERSION;
517 }
518 } else {
519 req->req.error = RAW1394_ERROR_STATE_ORDER;
520 }
521
522 req->req.length = 0;
523 queue_complete_req(req);
524 return 0;
525}
526
527static int state_initialized(struct file_info *fi, struct pending_request *req)
528{
529 unsigned long flags;
530 struct host_info *hi;
531 struct raw1394_khost_list *khl;
532
533 if (req->req.generation != atomic_read(&internal_generation)) {
534 req->req.error = RAW1394_ERROR_GENERATION;
535 req->req.generation = atomic_read(&internal_generation);
536 req->req.length = 0;
537 queue_complete_req(req);
538 return 0;
539 }
540
541 switch (req->req.type) {
542 case RAW1394_REQ_LIST_CARDS:
543 spin_lock_irqsave(&host_info_lock, flags);
544 khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC);
545
546 if (khl) {
547 req->req.misc = host_count;
548 req->data = (quadlet_t *) khl;
549
550 list_for_each_entry(hi, &host_info_list, list) {
551 khl->nodes = hi->host->node_count;
552 strcpy(khl->name, hi->host->driver->name);
553 khl++;
554 }
555 }
556 spin_unlock_irqrestore(&host_info_lock, flags);
557
558 if (khl) {
559 req->req.error = RAW1394_ERROR_NONE;
560 req->req.length = min(req->req.length,
561 (u32) (sizeof
562 (struct raw1394_khost_list)
563 * req->req.misc));
564 req->free_data = 1;
565 } else {
566 return -ENOMEM;
567 }
568 break;
569
570 case RAW1394_REQ_SET_CARD:
571 spin_lock_irqsave(&host_info_lock, flags);
572 if (req->req.misc >= host_count) {
573 req->req.error = RAW1394_ERROR_INVALID_ARG;
574 goto out_set_card;
575 }
576 list_for_each_entry(hi, &host_info_list, list)
577 if (!req->req.misc--)
578 break;
579 get_device(&hi->host->device); /* FIXME handle failure case */
580 list_add_tail(&fi->list, &hi->file_info_list);
581
582 /* prevent unloading of the host's low-level driver */
583 if (!try_module_get(hi->host->driver->owner)) {
584 req->req.error = RAW1394_ERROR_ABORTED;
585 goto out_set_card;
586 }
587 WARN_ON(fi->host);
588 fi->host = hi->host;
589 fi->state = connected;
590
591 req->req.error = RAW1394_ERROR_NONE;
592 req->req.generation = get_hpsb_generation(fi->host);
593 req->req.misc = (fi->host->node_id << 16)
594 | fi->host->node_count;
595 if (fi->protocol_version > 3)
596 req->req.misc |= NODEID_TO_NODE(fi->host->irm_id) << 8;
597out_set_card:
598 spin_unlock_irqrestore(&host_info_lock, flags);
599
600 req->req.length = 0;
601 break;
602
603 default:
604 req->req.error = RAW1394_ERROR_STATE_ORDER;
605 req->req.length = 0;
606 break;
607 }
608
609 queue_complete_req(req);
610 return 0;
611}
612
613static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
614{
615 if (req->req.misc) {
616 if (fi->fcp_buffer) {
617 req->req.error = RAW1394_ERROR_ALREADY;
618 } else {
619 fi->fcp_buffer = int2ptr(req->req.recvb);
620 }
621 } else {
622 if (!fi->fcp_buffer) {
623 req->req.error = RAW1394_ERROR_ALREADY;
624 } else {
625 fi->fcp_buffer = NULL;
626 }
627 }
628
629 req->req.length = 0;
630 queue_complete_req(req);
631}
632
633static int handle_async_request(struct file_info *fi,
634 struct pending_request *req, int node)
635{
636 unsigned long flags;
637 struct hpsb_packet *packet = NULL;
638 u64 addr = req->req.address & 0xffffffffffffULL;
639
640 switch (req->req.type) {
641 case RAW1394_REQ_ASYNC_READ:
642 DBGMSG("read_request called");
643 packet =
644 hpsb_make_readpacket(fi->host, node, addr, req->req.length);
645
646 if (!packet)
647 return -ENOMEM;
648
649 if (req->req.length == 4)
650 req->data = &packet->header[3];
651 else
652 req->data = packet->data;
653
654 break;
655
656 case RAW1394_REQ_ASYNC_WRITE:
657 DBGMSG("write_request called");
658
659 packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
660 req->req.length);
661 if (!packet)
662 return -ENOMEM;
663
664 if (req->req.length == 4) {
665 if (copy_from_user
666 (&packet->header[3], int2ptr(req->req.sendb),
667 req->req.length))
668 req->req.error = RAW1394_ERROR_MEMFAULT;
669 } else {
670 if (copy_from_user
671 (packet->data, int2ptr(req->req.sendb),
672 req->req.length))
673 req->req.error = RAW1394_ERROR_MEMFAULT;
674 }
675
676 req->req.length = 0;
677 break;
678
679 case RAW1394_REQ_ASYNC_STREAM:
680 DBGMSG("stream_request called");
681
682 packet =
683 hpsb_make_streampacket(fi->host, NULL, req->req.length,
684 node & 0x3f /*channel */ ,
685 (req->req.misc >> 16) & 0x3,
686 req->req.misc & 0xf);
687 if (!packet)
688 return -ENOMEM;
689
690 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
691 req->req.length))
692 req->req.error = RAW1394_ERROR_MEMFAULT;
693
694 req->req.length = 0;
695 break;
696
697 case RAW1394_REQ_LOCK:
698 DBGMSG("lock_request called");
699 if ((req->req.misc == EXTCODE_FETCH_ADD)
700 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
701 if (req->req.length != 4) {
702 req->req.error = RAW1394_ERROR_INVALID_ARG;
703 break;
704 }
705 } else {
706 if (req->req.length != 8) {
707 req->req.error = RAW1394_ERROR_INVALID_ARG;
708 break;
709 }
710 }
711
712 packet = hpsb_make_lockpacket(fi->host, node, addr,
713 req->req.misc, NULL, 0);
714 if (!packet)
715 return -ENOMEM;
716
717 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
718 req->req.length)) {
719 req->req.error = RAW1394_ERROR_MEMFAULT;
720 break;
721 }
722
723 req->data = packet->data;
724 req->req.length = 4;
725 break;
726
727 case RAW1394_REQ_LOCK64:
728 DBGMSG("lock64_request called");
729 if ((req->req.misc == EXTCODE_FETCH_ADD)
730 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
731 if (req->req.length != 8) {
732 req->req.error = RAW1394_ERROR_INVALID_ARG;
733 break;
734 }
735 } else {
736 if (req->req.length != 16) {
737 req->req.error = RAW1394_ERROR_INVALID_ARG;
738 break;
739 }
740 }
741 packet = hpsb_make_lock64packet(fi->host, node, addr,
742 req->req.misc, NULL, 0);
743 if (!packet)
744 return -ENOMEM;
745
746 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
747 req->req.length)) {
748 req->req.error = RAW1394_ERROR_MEMFAULT;
749 break;
750 }
751
752 req->data = packet->data;
753 req->req.length = 8;
754 break;
755
756 default:
757 req->req.error = RAW1394_ERROR_STATE_ORDER;
758 }
759
760 req->packet = packet;
761
762 if (req->req.error) {
763 req->req.length = 0;
764 queue_complete_req(req);
765 return 0;
766 }
767
768 hpsb_set_packet_complete_task(packet,
769 (void (*)(void *))queue_complete_cb, req);
770
771 spin_lock_irqsave(&fi->reqlists_lock, flags);
772 list_add_tail(&req->list, &fi->req_pending);
773 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
774
775 packet->generation = req->req.generation;
776
777 if (hpsb_send_packet(packet) < 0) {
778 req->req.error = RAW1394_ERROR_SEND_ERROR;
779 req->req.length = 0;
780 hpsb_free_tlabel(packet);
781 queue_complete_req(req);
782 }
783 return 0;
784}
785
786static int handle_async_send(struct file_info *fi, struct pending_request *req)
787{
788 unsigned long flags;
789 struct hpsb_packet *packet;
790 int header_length = req->req.misc & 0xffff;
791 int expect_response = req->req.misc >> 16;
792 size_t data_size;
793
794 if (header_length > req->req.length || header_length < 12 ||
795 header_length > FIELD_SIZEOF(struct hpsb_packet, header)) {
796 req->req.error = RAW1394_ERROR_INVALID_ARG;
797 req->req.length = 0;
798 queue_complete_req(req);
799 return 0;
800 }
801
802 data_size = req->req.length - header_length;
803 packet = hpsb_alloc_packet(data_size);
804 req->packet = packet;
805 if (!packet)
806 return -ENOMEM;
807
808 if (copy_from_user(packet->header, int2ptr(req->req.sendb),
809 header_length)) {
810 req->req.error = RAW1394_ERROR_MEMFAULT;
811 req->req.length = 0;
812 queue_complete_req(req);
813 return 0;
814 }
815
816 if (copy_from_user
817 (packet->data, int2ptr(req->req.sendb) + header_length,
818 data_size)) {
819 req->req.error = RAW1394_ERROR_MEMFAULT;
820 req->req.length = 0;
821 queue_complete_req(req);
822 return 0;
823 }
824
825 packet->type = hpsb_async;
826 packet->node_id = packet->header[0] >> 16;
827 packet->tcode = (packet->header[0] >> 4) & 0xf;
828 packet->tlabel = (packet->header[0] >> 10) & 0x3f;
829 packet->host = fi->host;
830 packet->expect_response = expect_response;
831 packet->header_size = header_length;
832 packet->data_size = data_size;
833
834 req->req.length = 0;
835 hpsb_set_packet_complete_task(packet,
836 (void (*)(void *))queue_complete_cb, req);
837
838 spin_lock_irqsave(&fi->reqlists_lock, flags);
839 list_add_tail(&req->list, &fi->req_pending);
840 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
841
842 /* Update the generation of the packet just before sending. */
843 packet->generation = req->req.generation;
844
845 if (hpsb_send_packet(packet) < 0) {
846 req->req.error = RAW1394_ERROR_SEND_ERROR;
847 queue_complete_req(req);
848 }
849
850 return 0;
851}
852
853static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
854 u64 addr, size_t length, u16 flags)
855{
856 unsigned long irqflags;
857 struct pending_request *req;
858 struct host_info *hi;
859 struct file_info *fi = NULL;
860 struct list_head *entry;
861 struct arm_addr *arm_addr = NULL;
862 struct arm_request *arm_req = NULL;
863 struct arm_response *arm_resp = NULL;
864 int found = 0, size = 0, rcode = -1;
865 struct arm_request_response *arm_req_resp = NULL;
866
867 DBGMSG("arm_read called by node: %X "
868 "addr: %4.4x %8.8x length: %Zu", nodeid,
869 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
870 length);
871 spin_lock_irqsave(&host_info_lock, irqflags);
872 hi = find_host_info(host); /* search address-entry */
873 if (hi != NULL) {
874 list_for_each_entry(fi, &hi->file_info_list, list) {
875 entry = fi->addr_list.next;
876 while (entry != &(fi->addr_list)) {
877 arm_addr =
878 list_entry(entry, struct arm_addr,
879 addr_list);
880 if (((arm_addr->start) <= (addr))
881 && ((arm_addr->end) >= (addr + length))) {
882 found = 1;
883 break;
884 }
885 entry = entry->next;
886 }
887 if (found) {
888 break;
889 }
890 }
891 }
892 rcode = -1;
893 if (!found) {
894 printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found"
895 " -> rcode_address_error\n");
896 spin_unlock_irqrestore(&host_info_lock, irqflags);
897 return (RCODE_ADDRESS_ERROR);
898 } else {
899 DBGMSG("arm_read addr_entry FOUND");
900 }
901 if (arm_addr->rec_length < length) {
902 DBGMSG("arm_read blocklength too big -> rcode_data_error");
903 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
904 }
905 if (rcode == -1) {
906 if (arm_addr->access_rights & ARM_READ) {
907 if (!(arm_addr->client_transactions & ARM_READ)) {
908 memcpy(buffer,
909 (arm_addr->addr_space_buffer) + (addr -
910 (arm_addr->
911 start)),
912 length);
913 DBGMSG("arm_read -> (rcode_complete)");
914 rcode = RCODE_COMPLETE;
915 }
916 } else {
917 rcode = RCODE_TYPE_ERROR; /* function not allowed */
918 DBGMSG("arm_read -> rcode_type_error (access denied)");
919 }
920 }
921 if (arm_addr->notification_options & ARM_READ) {
922 DBGMSG("arm_read -> entering notification-section");
923 req = __alloc_pending_request(GFP_ATOMIC);
924 if (!req) {
925 DBGMSG("arm_read -> rcode_conflict_error");
926 spin_unlock_irqrestore(&host_info_lock, irqflags);
927 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
928 The request may be retried */
929 }
930 if (rcode == RCODE_COMPLETE) {
931 size =
932 sizeof(struct arm_request) +
933 sizeof(struct arm_response) +
934 length * sizeof(byte_t) +
935 sizeof(struct arm_request_response);
936 } else {
937 size =
938 sizeof(struct arm_request) +
939 sizeof(struct arm_response) +
940 sizeof(struct arm_request_response);
941 }
942 req->data = kmalloc(size, GFP_ATOMIC);
943 if (!(req->data)) {
944 free_pending_request(req);
945 DBGMSG("arm_read -> rcode_conflict_error");
946 spin_unlock_irqrestore(&host_info_lock, irqflags);
947 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
948 The request may be retried */
949 }
950 req->free_data = 1;
951 req->file_info = fi;
952 req->req.type = RAW1394_REQ_ARM;
953 req->req.generation = get_hpsb_generation(host);
954 req->req.misc =
955 (((length << 16) & (0xFFFF0000)) | (ARM_READ & 0xFF));
956 req->req.tag = arm_addr->arm_tag;
957 req->req.recvb = arm_addr->recvb;
958 req->req.length = size;
959 arm_req_resp = (struct arm_request_response *)(req->data);
960 arm_req = (struct arm_request *)((byte_t *) (req->data) +
961 (sizeof
962 (struct
963 arm_request_response)));
964 arm_resp =
965 (struct arm_response *)((byte_t *) (arm_req) +
966 (sizeof(struct arm_request)));
967 arm_req->buffer = NULL;
968 arm_resp->buffer = NULL;
969 if (rcode == RCODE_COMPLETE) {
970 byte_t *buf =
971 (byte_t *) arm_resp + sizeof(struct arm_response);
972 memcpy(buf,
973 (arm_addr->addr_space_buffer) + (addr -
974 (arm_addr->
975 start)),
976 length);
977 arm_resp->buffer =
978 int2ptr((arm_addr->recvb) +
979 sizeof(struct arm_request_response) +
980 sizeof(struct arm_request) +
981 sizeof(struct arm_response));
982 }
983 arm_resp->buffer_length =
984 (rcode == RCODE_COMPLETE) ? length : 0;
985 arm_resp->response_code = rcode;
986 arm_req->buffer_length = 0;
987 arm_req->generation = req->req.generation;
988 arm_req->extended_transaction_code = 0;
989 arm_req->destination_offset = addr;
990 arm_req->source_nodeid = nodeid;
991 arm_req->destination_nodeid = host->node_id;
992 arm_req->tlabel = (flags >> 10) & 0x3f;
993 arm_req->tcode = (flags >> 4) & 0x0f;
994 arm_req_resp->request = int2ptr((arm_addr->recvb) +
995 sizeof(struct
996 arm_request_response));
997 arm_req_resp->response =
998 int2ptr((arm_addr->recvb) +
999 sizeof(struct arm_request_response) +
1000 sizeof(struct arm_request));
1001 queue_complete_req(req);
1002 }
1003 spin_unlock_irqrestore(&host_info_lock, irqflags);
1004 return (rcode);
1005}
1006
1007static int arm_write(struct hpsb_host *host, int nodeid, int destid,
1008 quadlet_t * data, u64 addr, size_t length, u16 flags)
1009{
1010 unsigned long irqflags;
1011 struct pending_request *req;
1012 struct host_info *hi;
1013 struct file_info *fi = NULL;
1014 struct list_head *entry;
1015 struct arm_addr *arm_addr = NULL;
1016 struct arm_request *arm_req = NULL;
1017 struct arm_response *arm_resp = NULL;
1018 int found = 0, size = 0, rcode = -1;
1019 struct arm_request_response *arm_req_resp = NULL;
1020
1021 DBGMSG("arm_write called by node: %X "
1022 "addr: %4.4x %8.8x length: %Zu", nodeid,
1023 (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
1024 length);
1025 spin_lock_irqsave(&host_info_lock, irqflags);
1026 hi = find_host_info(host); /* search address-entry */
1027 if (hi != NULL) {
1028 list_for_each_entry(fi, &hi->file_info_list, list) {
1029 entry = fi->addr_list.next;
1030 while (entry != &(fi->addr_list)) {
1031 arm_addr =
1032 list_entry(entry, struct arm_addr,
1033 addr_list);
1034 if (((arm_addr->start) <= (addr))
1035 && ((arm_addr->end) >= (addr + length))) {
1036 found = 1;
1037 break;
1038 }
1039 entry = entry->next;
1040 }
1041 if (found) {
1042 break;
1043 }
1044 }
1045 }
1046 rcode = -1;
1047 if (!found) {
1048 printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found"
1049 " -> rcode_address_error\n");
1050 spin_unlock_irqrestore(&host_info_lock, irqflags);
1051 return (RCODE_ADDRESS_ERROR);
1052 } else {
1053 DBGMSG("arm_write addr_entry FOUND");
1054 }
1055 if (arm_addr->rec_length < length) {
1056 DBGMSG("arm_write blocklength too big -> rcode_data_error");
1057 rcode = RCODE_DATA_ERROR; /* hardware error, data is unavailable */
1058 }
1059 if (rcode == -1) {
1060 if (arm_addr->access_rights & ARM_WRITE) {
1061 if (!(arm_addr->client_transactions & ARM_WRITE)) {
1062 memcpy((arm_addr->addr_space_buffer) +
1063 (addr - (arm_addr->start)), data,
1064 length);
1065 DBGMSG("arm_write -> (rcode_complete)");
1066 rcode = RCODE_COMPLETE;
1067 }
1068 } else {
1069 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1070 DBGMSG("arm_write -> rcode_type_error (access denied)");
1071 }
1072 }
1073 if (arm_addr->notification_options & ARM_WRITE) {
1074 DBGMSG("arm_write -> entering notification-section");
1075 req = __alloc_pending_request(GFP_ATOMIC);
1076 if (!req) {
1077 DBGMSG("arm_write -> rcode_conflict_error");
1078 spin_unlock_irqrestore(&host_info_lock, irqflags);
1079 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1080 The request my be retried */
1081 }
1082 size =
1083 sizeof(struct arm_request) + sizeof(struct arm_response) +
1084 (length) * sizeof(byte_t) +
1085 sizeof(struct arm_request_response);
1086 req->data = kmalloc(size, GFP_ATOMIC);
1087 if (!(req->data)) {
1088 free_pending_request(req);
1089 DBGMSG("arm_write -> rcode_conflict_error");
1090 spin_unlock_irqrestore(&host_info_lock, irqflags);
1091 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1092 The request may be retried */
1093 }
1094 req->free_data = 1;
1095 req->file_info = fi;
1096 req->req.type = RAW1394_REQ_ARM;
1097 req->req.generation = get_hpsb_generation(host);
1098 req->req.misc =
1099 (((length << 16) & (0xFFFF0000)) | (ARM_WRITE & 0xFF));
1100 req->req.tag = arm_addr->arm_tag;
1101 req->req.recvb = arm_addr->recvb;
1102 req->req.length = size;
1103 arm_req_resp = (struct arm_request_response *)(req->data);
1104 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1105 (sizeof
1106 (struct
1107 arm_request_response)));
1108 arm_resp =
1109 (struct arm_response *)((byte_t *) (arm_req) +
1110 (sizeof(struct arm_request)));
1111 arm_resp->buffer = NULL;
1112 memcpy((byte_t *) arm_resp + sizeof(struct arm_response),
1113 data, length);
1114 arm_req->buffer = int2ptr((arm_addr->recvb) +
1115 sizeof(struct arm_request_response) +
1116 sizeof(struct arm_request) +
1117 sizeof(struct arm_response));
1118 arm_req->buffer_length = length;
1119 arm_req->generation = req->req.generation;
1120 arm_req->extended_transaction_code = 0;
1121 arm_req->destination_offset = addr;
1122 arm_req->source_nodeid = nodeid;
1123 arm_req->destination_nodeid = destid;
1124 arm_req->tlabel = (flags >> 10) & 0x3f;
1125 arm_req->tcode = (flags >> 4) & 0x0f;
1126 arm_resp->buffer_length = 0;
1127 arm_resp->response_code = rcode;
1128 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1129 sizeof(struct
1130 arm_request_response));
1131 arm_req_resp->response =
1132 int2ptr((arm_addr->recvb) +
1133 sizeof(struct arm_request_response) +
1134 sizeof(struct arm_request));
1135 queue_complete_req(req);
1136 }
1137 spin_unlock_irqrestore(&host_info_lock, irqflags);
1138 return (rcode);
1139}
1140
1141static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
1142 u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode,
1143 u16 flags)
1144{
1145 unsigned long irqflags;
1146 struct pending_request *req;
1147 struct host_info *hi;
1148 struct file_info *fi = NULL;
1149 struct list_head *entry;
1150 struct arm_addr *arm_addr = NULL;
1151 struct arm_request *arm_req = NULL;
1152 struct arm_response *arm_resp = NULL;
1153 int found = 0, size = 0, rcode = -1;
1154 quadlet_t old, new;
1155 struct arm_request_response *arm_req_resp = NULL;
1156
1157 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1158 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1159 DBGMSG("arm_lock called by node: %X "
1160 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
1161 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1162 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1163 be32_to_cpu(data));
1164 } else {
1165 DBGMSG("arm_lock called by node: %X "
1166 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
1167 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1168 (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF,
1169 be32_to_cpu(data), be32_to_cpu(arg));
1170 }
1171 spin_lock_irqsave(&host_info_lock, irqflags);
1172 hi = find_host_info(host); /* search address-entry */
1173 if (hi != NULL) {
1174 list_for_each_entry(fi, &hi->file_info_list, list) {
1175 entry = fi->addr_list.next;
1176 while (entry != &(fi->addr_list)) {
1177 arm_addr =
1178 list_entry(entry, struct arm_addr,
1179 addr_list);
1180 if (((arm_addr->start) <= (addr))
1181 && ((arm_addr->end) >=
1182 (addr + sizeof(*store)))) {
1183 found = 1;
1184 break;
1185 }
1186 entry = entry->next;
1187 }
1188 if (found) {
1189 break;
1190 }
1191 }
1192 }
1193 rcode = -1;
1194 if (!found) {
1195 printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found"
1196 " -> rcode_address_error\n");
1197 spin_unlock_irqrestore(&host_info_lock, irqflags);
1198 return (RCODE_ADDRESS_ERROR);
1199 } else {
1200 DBGMSG("arm_lock addr_entry FOUND");
1201 }
1202 if (rcode == -1) {
1203 if (arm_addr->access_rights & ARM_LOCK) {
1204 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1205 memcpy(&old,
1206 (arm_addr->addr_space_buffer) + (addr -
1207 (arm_addr->
1208 start)),
1209 sizeof(old));
1210 switch (ext_tcode) {
1211 case (EXTCODE_MASK_SWAP):
1212 new = data | (old & ~arg);
1213 break;
1214 case (EXTCODE_COMPARE_SWAP):
1215 if (old == arg) {
1216 new = data;
1217 } else {
1218 new = old;
1219 }
1220 break;
1221 case (EXTCODE_FETCH_ADD):
1222 new =
1223 cpu_to_be32(be32_to_cpu(data) +
1224 be32_to_cpu(old));
1225 break;
1226 case (EXTCODE_LITTLE_ADD):
1227 new =
1228 cpu_to_le32(le32_to_cpu(data) +
1229 le32_to_cpu(old));
1230 break;
1231 case (EXTCODE_BOUNDED_ADD):
1232 if (old != arg) {
1233 new =
1234 cpu_to_be32(be32_to_cpu
1235 (data) +
1236 be32_to_cpu
1237 (old));
1238 } else {
1239 new = old;
1240 }
1241 break;
1242 case (EXTCODE_WRAP_ADD):
1243 if (old != arg) {
1244 new =
1245 cpu_to_be32(be32_to_cpu
1246 (data) +
1247 be32_to_cpu
1248 (old));
1249 } else {
1250 new = data;
1251 }
1252 break;
1253 default:
1254 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1255 printk(KERN_ERR
1256 "raw1394: arm_lock FAILED "
1257 "ext_tcode not allowed -> rcode_type_error\n");
1258 break;
1259 } /*switch */
1260 if (rcode == -1) {
1261 DBGMSG("arm_lock -> (rcode_complete)");
1262 rcode = RCODE_COMPLETE;
1263 memcpy(store, &old, sizeof(*store));
1264 memcpy((arm_addr->addr_space_buffer) +
1265 (addr - (arm_addr->start)),
1266 &new, sizeof(*store));
1267 }
1268 }
1269 } else {
1270 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1271 DBGMSG("arm_lock -> rcode_type_error (access denied)");
1272 }
1273 }
1274 if (arm_addr->notification_options & ARM_LOCK) {
1275 byte_t *buf1, *buf2;
1276 DBGMSG("arm_lock -> entering notification-section");
1277 req = __alloc_pending_request(GFP_ATOMIC);
1278 if (!req) {
1279 DBGMSG("arm_lock -> rcode_conflict_error");
1280 spin_unlock_irqrestore(&host_info_lock, irqflags);
1281 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1282 The request may be retried */
1283 }
1284 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1285 req->data = kmalloc(size, GFP_ATOMIC);
1286 if (!(req->data)) {
1287 free_pending_request(req);
1288 DBGMSG("arm_lock -> rcode_conflict_error");
1289 spin_unlock_irqrestore(&host_info_lock, irqflags);
1290 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1291 The request may be retried */
1292 }
1293 req->free_data = 1;
1294 arm_req_resp = (struct arm_request_response *)(req->data);
1295 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1296 (sizeof
1297 (struct
1298 arm_request_response)));
1299 arm_resp =
1300 (struct arm_response *)((byte_t *) (arm_req) +
1301 (sizeof(struct arm_request)));
1302 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1303 buf2 = buf1 + 2 * sizeof(*store);
1304 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1305 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1306 arm_req->buffer_length = sizeof(*store);
1307 memcpy(buf1, &data, sizeof(*store));
1308
1309 } else {
1310 arm_req->buffer_length = 2 * sizeof(*store);
1311 memcpy(buf1, &arg, sizeof(*store));
1312 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1313 }
1314 if (rcode == RCODE_COMPLETE) {
1315 arm_resp->buffer_length = sizeof(*store);
1316 memcpy(buf2, &old, sizeof(*store));
1317 } else {
1318 arm_resp->buffer_length = 0;
1319 }
1320 req->file_info = fi;
1321 req->req.type = RAW1394_REQ_ARM;
1322 req->req.generation = get_hpsb_generation(host);
1323 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1324 (ARM_LOCK & 0xFF));
1325 req->req.tag = arm_addr->arm_tag;
1326 req->req.recvb = arm_addr->recvb;
1327 req->req.length = size;
1328 arm_req->generation = req->req.generation;
1329 arm_req->extended_transaction_code = ext_tcode;
1330 arm_req->destination_offset = addr;
1331 arm_req->source_nodeid = nodeid;
1332 arm_req->destination_nodeid = host->node_id;
1333 arm_req->tlabel = (flags >> 10) & 0x3f;
1334 arm_req->tcode = (flags >> 4) & 0x0f;
1335 arm_resp->response_code = rcode;
1336 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1337 sizeof(struct
1338 arm_request_response));
1339 arm_req_resp->response =
1340 int2ptr((arm_addr->recvb) +
1341 sizeof(struct arm_request_response) +
1342 sizeof(struct arm_request));
1343 arm_req->buffer =
1344 int2ptr((arm_addr->recvb) +
1345 sizeof(struct arm_request_response) +
1346 sizeof(struct arm_request) +
1347 sizeof(struct arm_response));
1348 arm_resp->buffer =
1349 int2ptr((arm_addr->recvb) +
1350 sizeof(struct arm_request_response) +
1351 sizeof(struct arm_request) +
1352 sizeof(struct arm_response) + 2 * sizeof(*store));
1353 queue_complete_req(req);
1354 }
1355 spin_unlock_irqrestore(&host_info_lock, irqflags);
1356 return (rcode);
1357}
1358
1359static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
1360 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
1361 u16 flags)
1362{
1363 unsigned long irqflags;
1364 struct pending_request *req;
1365 struct host_info *hi;
1366 struct file_info *fi = NULL;
1367 struct list_head *entry;
1368 struct arm_addr *arm_addr = NULL;
1369 struct arm_request *arm_req = NULL;
1370 struct arm_response *arm_resp = NULL;
1371 int found = 0, size = 0, rcode = -1;
1372 octlet_t old, new;
1373 struct arm_request_response *arm_req_resp = NULL;
1374
1375 if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
1376 ((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
1377 DBGMSG("arm_lock64 called by node: %X "
1378 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
1379 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1380 (u32) (addr & 0xFFFFFFFF),
1381 ext_tcode & 0xFF,
1382 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1383 (u32) (be64_to_cpu(data) & 0xFFFFFFFF));
1384 } else {
1385 DBGMSG("arm_lock64 called by node: %X "
1386 "addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
1387 "%8.8X %8.8X ",
1388 nodeid, (u16) ((addr >> 32) & 0xFFFF),
1389 (u32) (addr & 0xFFFFFFFF),
1390 ext_tcode & 0xFF,
1391 (u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
1392 (u32) (be64_to_cpu(data) & 0xFFFFFFFF),
1393 (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
1394 (u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
1395 }
1396 spin_lock_irqsave(&host_info_lock, irqflags);
1397 hi = find_host_info(host); /* search addressentry in file_info's for host */
1398 if (hi != NULL) {
1399 list_for_each_entry(fi, &hi->file_info_list, list) {
1400 entry = fi->addr_list.next;
1401 while (entry != &(fi->addr_list)) {
1402 arm_addr =
1403 list_entry(entry, struct arm_addr,
1404 addr_list);
1405 if (((arm_addr->start) <= (addr))
1406 && ((arm_addr->end) >=
1407 (addr + sizeof(*store)))) {
1408 found = 1;
1409 break;
1410 }
1411 entry = entry->next;
1412 }
1413 if (found) {
1414 break;
1415 }
1416 }
1417 }
1418 rcode = -1;
1419 if (!found) {
1420 printk(KERN_ERR
1421 "raw1394: arm_lock64 FAILED addr_entry not found"
1422 " -> rcode_address_error\n");
1423 spin_unlock_irqrestore(&host_info_lock, irqflags);
1424 return (RCODE_ADDRESS_ERROR);
1425 } else {
1426 DBGMSG("arm_lock64 addr_entry FOUND");
1427 }
1428 if (rcode == -1) {
1429 if (arm_addr->access_rights & ARM_LOCK) {
1430 if (!(arm_addr->client_transactions & ARM_LOCK)) {
1431 memcpy(&old,
1432 (arm_addr->addr_space_buffer) + (addr -
1433 (arm_addr->
1434 start)),
1435 sizeof(old));
1436 switch (ext_tcode) {
1437 case (EXTCODE_MASK_SWAP):
1438 new = data | (old & ~arg);
1439 break;
1440 case (EXTCODE_COMPARE_SWAP):
1441 if (old == arg) {
1442 new = data;
1443 } else {
1444 new = old;
1445 }
1446 break;
1447 case (EXTCODE_FETCH_ADD):
1448 new =
1449 cpu_to_be64(be64_to_cpu(data) +
1450 be64_to_cpu(old));
1451 break;
1452 case (EXTCODE_LITTLE_ADD):
1453 new =
1454 cpu_to_le64(le64_to_cpu(data) +
1455 le64_to_cpu(old));
1456 break;
1457 case (EXTCODE_BOUNDED_ADD):
1458 if (old != arg) {
1459 new =
1460 cpu_to_be64(be64_to_cpu
1461 (data) +
1462 be64_to_cpu
1463 (old));
1464 } else {
1465 new = old;
1466 }
1467 break;
1468 case (EXTCODE_WRAP_ADD):
1469 if (old != arg) {
1470 new =
1471 cpu_to_be64(be64_to_cpu
1472 (data) +
1473 be64_to_cpu
1474 (old));
1475 } else {
1476 new = data;
1477 }
1478 break;
1479 default:
1480 printk(KERN_ERR
1481 "raw1394: arm_lock64 FAILED "
1482 "ext_tcode not allowed -> rcode_type_error\n");
1483 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1484 break;
1485 } /*switch */
1486 if (rcode == -1) {
1487 DBGMSG
1488 ("arm_lock64 -> (rcode_complete)");
1489 rcode = RCODE_COMPLETE;
1490 memcpy(store, &old, sizeof(*store));
1491 memcpy((arm_addr->addr_space_buffer) +
1492 (addr - (arm_addr->start)),
1493 &new, sizeof(*store));
1494 }
1495 }
1496 } else {
1497 rcode = RCODE_TYPE_ERROR; /* function not allowed */
1498 DBGMSG
1499 ("arm_lock64 -> rcode_type_error (access denied)");
1500 }
1501 }
1502 if (arm_addr->notification_options & ARM_LOCK) {
1503 byte_t *buf1, *buf2;
1504 DBGMSG("arm_lock64 -> entering notification-section");
1505 req = __alloc_pending_request(GFP_ATOMIC);
1506 if (!req) {
1507 spin_unlock_irqrestore(&host_info_lock, irqflags);
1508 DBGMSG("arm_lock64 -> rcode_conflict_error");
1509 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1510 The request may be retried */
1511 }
1512 size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response); /* maximum */
1513 req->data = kmalloc(size, GFP_ATOMIC);
1514 if (!(req->data)) {
1515 free_pending_request(req);
1516 spin_unlock_irqrestore(&host_info_lock, irqflags);
1517 DBGMSG("arm_lock64 -> rcode_conflict_error");
1518 return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
1519 The request may be retried */
1520 }
1521 req->free_data = 1;
1522 arm_req_resp = (struct arm_request_response *)(req->data);
1523 arm_req = (struct arm_request *)((byte_t *) (req->data) +
1524 (sizeof
1525 (struct
1526 arm_request_response)));
1527 arm_resp =
1528 (struct arm_response *)((byte_t *) (arm_req) +
1529 (sizeof(struct arm_request)));
1530 buf1 = (byte_t *) arm_resp + sizeof(struct arm_response);
1531 buf2 = buf1 + 2 * sizeof(*store);
1532 if ((ext_tcode == EXTCODE_FETCH_ADD) ||
1533 (ext_tcode == EXTCODE_LITTLE_ADD)) {
1534 arm_req->buffer_length = sizeof(*store);
1535 memcpy(buf1, &data, sizeof(*store));
1536
1537 } else {
1538 arm_req->buffer_length = 2 * sizeof(*store);
1539 memcpy(buf1, &arg, sizeof(*store));
1540 memcpy(buf1 + sizeof(*store), &data, sizeof(*store));
1541 }
1542 if (rcode == RCODE_COMPLETE) {
1543 arm_resp->buffer_length = sizeof(*store);
1544 memcpy(buf2, &old, sizeof(*store));
1545 } else {
1546 arm_resp->buffer_length = 0;
1547 }
1548 req->file_info = fi;
1549 req->req.type = RAW1394_REQ_ARM;
1550 req->req.generation = get_hpsb_generation(host);
1551 req->req.misc = ((((sizeof(*store)) << 16) & (0xFFFF0000)) |
1552 (ARM_LOCK & 0xFF));
1553 req->req.tag = arm_addr->arm_tag;
1554 req->req.recvb = arm_addr->recvb;
1555 req->req.length = size;
1556 arm_req->generation = req->req.generation;
1557 arm_req->extended_transaction_code = ext_tcode;
1558 arm_req->destination_offset = addr;
1559 arm_req->source_nodeid = nodeid;
1560 arm_req->destination_nodeid = host->node_id;
1561 arm_req->tlabel = (flags >> 10) & 0x3f;
1562 arm_req->tcode = (flags >> 4) & 0x0f;
1563 arm_resp->response_code = rcode;
1564 arm_req_resp->request = int2ptr((arm_addr->recvb) +
1565 sizeof(struct
1566 arm_request_response));
1567 arm_req_resp->response =
1568 int2ptr((arm_addr->recvb) +
1569 sizeof(struct arm_request_response) +
1570 sizeof(struct arm_request));
1571 arm_req->buffer =
1572 int2ptr((arm_addr->recvb) +
1573 sizeof(struct arm_request_response) +
1574 sizeof(struct arm_request) +
1575 sizeof(struct arm_response));
1576 arm_resp->buffer =
1577 int2ptr((arm_addr->recvb) +
1578 sizeof(struct arm_request_response) +
1579 sizeof(struct arm_request) +
1580 sizeof(struct arm_response) + 2 * sizeof(*store));
1581 queue_complete_req(req);
1582 }
1583 spin_unlock_irqrestore(&host_info_lock, irqflags);
1584 return (rcode);
1585}
1586
1587static int arm_register(struct file_info *fi, struct pending_request *req)
1588{
1589 int retval;
1590 struct arm_addr *addr;
1591 struct host_info *hi;
1592 struct file_info *fi_hlp = NULL;
1593 struct list_head *entry;
1594 struct arm_addr *arm_addr = NULL;
1595 int same_host, another_host;
1596 unsigned long flags;
1597
1598 DBGMSG("arm_register called "
1599 "addr(Offset): %8.8x %8.8x length: %u "
1600 "rights: %2.2X notify: %2.2X "
1601 "max_blk_len: %4.4X",
1602 (u32) ((req->req.address >> 32) & 0xFFFF),
1603 (u32) (req->req.address & 0xFFFFFFFF),
1604 req->req.length, ((req->req.misc >> 8) & 0xFF),
1605 (req->req.misc & 0xFF), ((req->req.misc >> 16) & 0xFFFF));
1606 /* check addressrange */
1607 if ((((req->req.address) & ~(0xFFFFFFFFFFFFULL)) != 0) ||
1608 (((req->req.address + req->req.length) & ~(0xFFFFFFFFFFFFULL)) !=
1609 0)) {
1610 req->req.length = 0;
1611 return (-EINVAL);
1612 }
1613 /* addr-list-entry for fileinfo */
1614 addr = kmalloc(sizeof(*addr), GFP_KERNEL);
1615 if (!addr) {
1616 req->req.length = 0;
1617 return (-ENOMEM);
1618 }
1619 /* allocation of addr_space_buffer */
1620 addr->addr_space_buffer = vmalloc(req->req.length);
1621 if (!(addr->addr_space_buffer)) {
1622 kfree(addr);
1623 req->req.length = 0;
1624 return (-ENOMEM);
1625 }
1626 /* initialization of addr_space_buffer */
1627 if ((req->req.sendb) == (unsigned long)NULL) {
1628 /* init: set 0 */
1629 memset(addr->addr_space_buffer, 0, req->req.length);
1630 } else {
1631 /* init: user -> kernel */
1632 if (copy_from_user
1633 (addr->addr_space_buffer, int2ptr(req->req.sendb),
1634 req->req.length)) {
1635 vfree(addr->addr_space_buffer);
1636 kfree(addr);
1637 return (-EFAULT);
1638 }
1639 }
1640 INIT_LIST_HEAD(&addr->addr_list);
1641 addr->arm_tag = req->req.tag;
1642 addr->start = req->req.address;
1643 addr->end = req->req.address + req->req.length;
1644 addr->access_rights = (u8) (req->req.misc & 0x0F);
1645 addr->notification_options = (u8) ((req->req.misc >> 4) & 0x0F);
1646 addr->client_transactions = (u8) ((req->req.misc >> 8) & 0x0F);
1647 addr->access_rights |= addr->client_transactions;
1648 addr->notification_options |= addr->client_transactions;
1649 addr->recvb = req->req.recvb;
1650 addr->rec_length = (u16) ((req->req.misc >> 16) & 0xFFFF);
1651
1652 spin_lock_irqsave(&host_info_lock, flags);
1653 hi = find_host_info(fi->host);
1654 same_host = 0;
1655 another_host = 0;
1656 /* same host with address-entry containing same addressrange ? */
1657 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1658 entry = fi_hlp->addr_list.next;
1659 while (entry != &(fi_hlp->addr_list)) {
1660 arm_addr =
1661 list_entry(entry, struct arm_addr, addr_list);
1662 if ((arm_addr->start == addr->start)
1663 && (arm_addr->end == addr->end)) {
1664 DBGMSG("same host ownes same "
1665 "addressrange -> EALREADY");
1666 same_host = 1;
1667 break;
1668 }
1669 entry = entry->next;
1670 }
1671 if (same_host) {
1672 break;
1673 }
1674 }
1675 if (same_host) {
1676 /* addressrange occupied by same host */
1677 spin_unlock_irqrestore(&host_info_lock, flags);
1678 vfree(addr->addr_space_buffer);
1679 kfree(addr);
1680 return (-EALREADY);
1681 }
1682 /* another host with valid address-entry containing same addressrange */
1683 list_for_each_entry(hi, &host_info_list, list) {
1684 if (hi->host != fi->host) {
1685 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1686 entry = fi_hlp->addr_list.next;
1687 while (entry != &(fi_hlp->addr_list)) {
1688 arm_addr =
1689 list_entry(entry, struct arm_addr,
1690 addr_list);
1691 if ((arm_addr->start == addr->start)
1692 && (arm_addr->end == addr->end)) {
1693 DBGMSG
1694 ("another host ownes same "
1695 "addressrange");
1696 another_host = 1;
1697 break;
1698 }
1699 entry = entry->next;
1700 }
1701 if (another_host) {
1702 break;
1703 }
1704 }
1705 }
1706 }
1707 spin_unlock_irqrestore(&host_info_lock, flags);
1708
1709 if (another_host) {
1710 DBGMSG("another hosts entry is valid -> SUCCESS");
1711 if (copy_to_user(int2ptr(req->req.recvb),
1712 &addr->start, sizeof(u64))) {
1713 printk(KERN_ERR "raw1394: arm_register failed "
1714 " address-range-entry is invalid -> EFAULT !!!\n");
1715 vfree(addr->addr_space_buffer);
1716 kfree(addr);
1717 return (-EFAULT);
1718 }
1719 free_pending_request(req); /* immediate success or fail */
1720 /* INSERT ENTRY */
1721 spin_lock_irqsave(&host_info_lock, flags);
1722 list_add_tail(&addr->addr_list, &fi->addr_list);
1723 spin_unlock_irqrestore(&host_info_lock, flags);
1724 return 0;
1725 }
1726 retval =
1727 hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops,
1728 req->req.address,
1729 req->req.address + req->req.length);
1730 if (retval) {
1731 /* INSERT ENTRY */
1732 spin_lock_irqsave(&host_info_lock, flags);
1733 list_add_tail(&addr->addr_list, &fi->addr_list);
1734 spin_unlock_irqrestore(&host_info_lock, flags);
1735 } else {
1736 DBGMSG("arm_register failed errno: %d \n", retval);
1737 vfree(addr->addr_space_buffer);
1738 kfree(addr);
1739 return (-EALREADY);
1740 }
1741 free_pending_request(req); /* immediate success or fail */
1742 return 0;
1743}
1744
1745static int arm_unregister(struct file_info *fi, struct pending_request *req)
1746{
1747 int found = 0;
1748 int retval = 0;
1749 struct list_head *entry;
1750 struct arm_addr *addr = NULL;
1751 struct host_info *hi;
1752 struct file_info *fi_hlp = NULL;
1753 struct arm_addr *arm_addr = NULL;
1754 int another_host;
1755 unsigned long flags;
1756
1757 DBGMSG("arm_Unregister called addr(Offset): "
1758 "%8.8x %8.8x",
1759 (u32) ((req->req.address >> 32) & 0xFFFF),
1760 (u32) (req->req.address & 0xFFFFFFFF));
1761 spin_lock_irqsave(&host_info_lock, flags);
1762 /* get addr */
1763 entry = fi->addr_list.next;
1764 while (entry != &(fi->addr_list)) {
1765 addr = list_entry(entry, struct arm_addr, addr_list);
1766 if (addr->start == req->req.address) {
1767 found = 1;
1768 break;
1769 }
1770 entry = entry->next;
1771 }
1772 if (!found) {
1773 DBGMSG("arm_Unregister addr not found");
1774 spin_unlock_irqrestore(&host_info_lock, flags);
1775 return (-EINVAL);
1776 }
1777 DBGMSG("arm_Unregister addr found");
1778 another_host = 0;
1779 /* another host with valid address-entry containing
1780 same addressrange */
1781 list_for_each_entry(hi, &host_info_list, list) {
1782 if (hi->host != fi->host) {
1783 list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
1784 entry = fi_hlp->addr_list.next;
1785 while (entry != &(fi_hlp->addr_list)) {
1786 arm_addr = list_entry(entry,
1787 struct arm_addr,
1788 addr_list);
1789 if (arm_addr->start == addr->start) {
1790 DBGMSG("another host ownes "
1791 "same addressrange");
1792 another_host = 1;
1793 break;
1794 }
1795 entry = entry->next;
1796 }
1797 if (another_host) {
1798 break;
1799 }
1800 }
1801 }
1802 }
1803 if (another_host) {
1804 DBGMSG("delete entry from list -> success");
1805 list_del(&addr->addr_list);
1806 spin_unlock_irqrestore(&host_info_lock, flags);
1807 vfree(addr->addr_space_buffer);
1808 kfree(addr);
1809 free_pending_request(req); /* immediate success or fail */
1810 return 0;
1811 }
1812 retval =
1813 hpsb_unregister_addrspace(&raw1394_highlevel, fi->host,
1814 addr->start);
1815 if (!retval) {
1816 printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
1817 spin_unlock_irqrestore(&host_info_lock, flags);
1818 return (-EINVAL);
1819 }
1820 DBGMSG("delete entry from list -> success");
1821 list_del(&addr->addr_list);
1822 spin_unlock_irqrestore(&host_info_lock, flags);
1823 vfree(addr->addr_space_buffer);
1824 kfree(addr);
1825 free_pending_request(req); /* immediate success or fail */
1826 return 0;
1827}
1828
1829/* Copy data from ARM buffer(s) to user buffer. */
1830static int arm_get_buf(struct file_info *fi, struct pending_request *req)
1831{
1832 struct arm_addr *arm_addr = NULL;
1833 unsigned long flags;
1834 unsigned long offset;
1835
1836 struct list_head *entry;
1837
1838 DBGMSG("arm_get_buf "
1839 "addr(Offset): %04X %08X length: %u",
1840 (u32) ((req->req.address >> 32) & 0xFFFF),
1841 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
1842
1843 spin_lock_irqsave(&host_info_lock, flags);
1844 entry = fi->addr_list.next;
1845 while (entry != &(fi->addr_list)) {
1846 arm_addr = list_entry(entry, struct arm_addr, addr_list);
1847 if ((arm_addr->start <= req->req.address) &&
1848 (arm_addr->end > req->req.address)) {
1849 if (req->req.address + req->req.length <= arm_addr->end) {
1850 offset = req->req.address - arm_addr->start;
1851 spin_unlock_irqrestore(&host_info_lock, flags);
1852
1853 DBGMSG
1854 ("arm_get_buf copy_to_user( %08X, %p, %u )",
1855 (u32) req->req.recvb,
1856 arm_addr->addr_space_buffer + offset,
1857 (u32) req->req.length);
1858 if (copy_to_user
1859 (int2ptr(req->req.recvb),
1860 arm_addr->addr_space_buffer + offset,
1861 req->req.length))
1862 return (-EFAULT);
1863
1864 /* We have to free the request, because we
1865 * queue no response, and therefore nobody
1866 * will free it. */
1867 free_pending_request(req);
1868 return 0;
1869 } else {
1870 DBGMSG("arm_get_buf request exceeded mapping");
1871 spin_unlock_irqrestore(&host_info_lock, flags);
1872 return (-EINVAL);
1873 }
1874 }
1875 entry = entry->next;
1876 }
1877 spin_unlock_irqrestore(&host_info_lock, flags);
1878 return (-EINVAL);
1879}
1880
1881/* Copy data from user buffer to ARM buffer(s). */
1882static int arm_set_buf(struct file_info *fi, struct pending_request *req)
1883{
1884 struct arm_addr *arm_addr = NULL;
1885 unsigned long flags;
1886 unsigned long offset;
1887
1888 struct list_head *entry;
1889
1890 DBGMSG("arm_set_buf "
1891 "addr(Offset): %04X %08X length: %u",
1892 (u32) ((req->req.address >> 32) & 0xFFFF),
1893 (u32) (req->req.address & 0xFFFFFFFF), (u32) req->req.length);
1894
1895 spin_lock_irqsave(&host_info_lock, flags);
1896 entry = fi->addr_list.next;
1897 while (entry != &(fi->addr_list)) {
1898 arm_addr = list_entry(entry, struct arm_addr, addr_list);
1899 if ((arm_addr->start <= req->req.address) &&
1900 (arm_addr->end > req->req.address)) {
1901 if (req->req.address + req->req.length <= arm_addr->end) {
1902 offset = req->req.address - arm_addr->start;
1903 spin_unlock_irqrestore(&host_info_lock, flags);
1904
1905 DBGMSG
1906 ("arm_set_buf copy_from_user( %p, %08X, %u )",
1907 arm_addr->addr_space_buffer + offset,
1908 (u32) req->req.sendb,
1909 (u32) req->req.length);
1910 if (copy_from_user
1911 (arm_addr->addr_space_buffer + offset,
1912 int2ptr(req->req.sendb),
1913 req->req.length))
1914 return (-EFAULT);
1915
1916 /* We have to free the request, because we
1917 * queue no response, and therefore nobody
1918 * will free it. */
1919 free_pending_request(req);
1920 return 0;
1921 } else {
1922 DBGMSG("arm_set_buf request exceeded mapping");
1923 spin_unlock_irqrestore(&host_info_lock, flags);
1924 return (-EINVAL);
1925 }
1926 }
1927 entry = entry->next;
1928 }
1929 spin_unlock_irqrestore(&host_info_lock, flags);
1930 return (-EINVAL);
1931}
1932
1933static int reset_notification(struct file_info *fi, struct pending_request *req)
1934{
1935 DBGMSG("reset_notification called - switch %s ",
1936 (req->req.misc == RAW1394_NOTIFY_OFF) ? "OFF" : "ON");
1937 if ((req->req.misc == RAW1394_NOTIFY_OFF) ||
1938 (req->req.misc == RAW1394_NOTIFY_ON)) {
1939 fi->notification = (u8) req->req.misc;
1940 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
1941 return 0;
1942 }
1943 /* error EINVAL (22) invalid argument */
1944 return (-EINVAL);
1945}
1946
1947static int write_phypacket(struct file_info *fi, struct pending_request *req)
1948{
1949 struct hpsb_packet *packet = NULL;
1950 int retval = 0;
1951 quadlet_t data;
1952 unsigned long flags;
1953
1954 data = be32_to_cpu((u32) req->req.sendb);
1955 DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data);
1956 packet = hpsb_make_phypacket(fi->host, data);
1957 if (!packet)
1958 return -ENOMEM;
1959 req->req.length = 0;
1960 req->packet = packet;
1961 hpsb_set_packet_complete_task(packet,
1962 (void (*)(void *))queue_complete_cb, req);
1963 spin_lock_irqsave(&fi->reqlists_lock, flags);
1964 list_add_tail(&req->list, &fi->req_pending);
1965 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
1966 packet->generation = req->req.generation;
1967 retval = hpsb_send_packet(packet);
1968 DBGMSG("write_phypacket send_packet called => retval: %d ", retval);
1969 if (retval < 0) {
1970 req->req.error = RAW1394_ERROR_SEND_ERROR;
1971 req->req.length = 0;
1972 queue_complete_req(req);
1973 }
1974 return 0;
1975}
1976
1977static int get_config_rom(struct file_info *fi, struct pending_request *req)
1978{
1979 int ret = 0;
1980 quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
1981 int status;
1982
1983 if (!data)
1984 return -ENOMEM;
1985
1986 status =
1987 csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
1988 data, req->req.length);
1989 if (copy_to_user(int2ptr(req->req.recvb), data, req->req.length))
1990 ret = -EFAULT;
1991 if (copy_to_user
1992 (int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
1993 sizeof(fi->host->csr.rom->cache_head->len)))
1994 ret = -EFAULT;
1995 if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
1996 sizeof(fi->host->csr.generation)))
1997 ret = -EFAULT;
1998 if (copy_to_user(int2ptr(req->req.sendb), &status, sizeof(status)))
1999 ret = -EFAULT;
2000 kfree(data);
2001 if (ret >= 0) {
2002 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2003 }
2004 return ret;
2005}
2006
2007static int update_config_rom(struct file_info *fi, struct pending_request *req)
2008{
2009 int ret = 0;
2010 quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
2011 if (!data)
2012 return -ENOMEM;
2013 if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
2014 ret = -EFAULT;
2015 } else {
2016 int status = hpsb_update_config_rom(fi->host,
2017 data, req->req.length,
2018 (unsigned char)req->req.
2019 misc);
2020 if (copy_to_user
2021 (int2ptr(req->req.recvb), &status, sizeof(status)))
2022 ret = -ENOMEM;
2023 }
2024 kfree(data);
2025 if (ret >= 0) {
2026 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2027 fi->cfgrom_upd = 1;
2028 }
2029 return ret;
2030}
2031
2032static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2033{
2034 struct csr1212_keyval *kv;
2035 struct csr1212_csr_rom_cache *cache;
2036 struct csr1212_dentry *dentry;
2037 u32 dr;
2038 int ret = 0;
2039
2040 if (req->req.misc == ~0) {
2041 if (req->req.length == 0)
2042 return -EINVAL;
2043
2044 /* Find an unused slot */
2045 for (dr = 0;
2046 dr < RAW1394_MAX_USER_CSR_DIRS && fi->csr1212_dirs[dr];
2047 dr++) ;
2048
2049 if (dr == RAW1394_MAX_USER_CSR_DIRS)
2050 return -ENOMEM;
2051
2052 fi->csr1212_dirs[dr] =
2053 csr1212_new_directory(CSR1212_KV_ID_VENDOR);
2054 if (!fi->csr1212_dirs[dr])
2055 return -ENOMEM;
2056 } else {
2057 dr = req->req.misc;
2058 if (!fi->csr1212_dirs[dr])
2059 return -EINVAL;
2060
2061 /* Delete old stuff */
2062 for (dentry =
2063 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2064 dentry; dentry = dentry->next) {
2065 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2066 root_kv,
2067 dentry->kv);
2068 }
2069
2070 if (req->req.length == 0) {
2071 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2072 fi->csr1212_dirs[dr] = NULL;
2073
2074 hpsb_update_config_rom_image(fi->host);
2075 free_pending_request(req);
2076 return 0;
2077 }
2078 }
2079
2080 cache = csr1212_rom_cache_malloc(0, req->req.length);
2081 if (!cache) {
2082 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2083 fi->csr1212_dirs[dr] = NULL;
2084 return -ENOMEM;
2085 }
2086
2087 cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
2088 if (!cache->filled_head) {
2089 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2090 fi->csr1212_dirs[dr] = NULL;
2091 CSR1212_FREE(cache);
2092 return -ENOMEM;
2093 }
2094 cache->filled_tail = cache->filled_head;
2095
2096 if (copy_from_user(cache->data, int2ptr(req->req.sendb),
2097 req->req.length)) {
2098 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2099 fi->csr1212_dirs[dr] = NULL;
2100 ret = -EFAULT;
2101 } else {
2102 cache->len = req->req.length;
2103 cache->filled_head->offset_start = 0;
2104 cache->filled_head->offset_end = cache->size - 1;
2105
2106 cache->layout_head = cache->layout_tail = fi->csr1212_dirs[dr];
2107
2108 ret = CSR1212_SUCCESS;
2109 /* parse all the items */
2110 for (kv = cache->layout_head; ret == CSR1212_SUCCESS && kv;
2111 kv = kv->next) {
2112 ret = csr1212_parse_keyval(kv, cache);
2113 }
2114
2115 /* attach top level items to the root directory */
2116 for (dentry =
2117 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2118 ret == CSR1212_SUCCESS && dentry; dentry = dentry->next) {
2119 ret =
2120 csr1212_attach_keyval_to_directory(fi->host->csr.
2121 rom->root_kv,
2122 dentry->kv);
2123 }
2124
2125 if (ret == CSR1212_SUCCESS) {
2126 ret = hpsb_update_config_rom_image(fi->host);
2127
2128 if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
2129 &dr, sizeof(dr))) {
2130 ret = -ENOMEM;
2131 }
2132 }
2133 }
2134 kfree(cache->filled_head);
2135 CSR1212_FREE(cache);
2136
2137 if (ret >= 0) {
2138 /* we have to free the request, because we queue no response,
2139 * and therefore nobody will free it */
2140 free_pending_request(req);
2141 return 0;
2142 } else {
2143 for (dentry =
2144 fi->csr1212_dirs[dr]->value.directory.dentries_head;
2145 dentry; dentry = dentry->next) {
2146 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2147 root_kv,
2148 dentry->kv);
2149 }
2150 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2151 fi->csr1212_dirs[dr] = NULL;
2152 return ret;
2153 }
2154}
2155
2156static int state_connected(struct file_info *fi, struct pending_request *req)
2157{
2158 int node = req->req.address >> 48;
2159
2160 req->req.error = RAW1394_ERROR_NONE;
2161
2162 switch (req->req.type) {
2163
2164 case RAW1394_REQ_ECHO:
2165 queue_complete_req(req);
2166 return 0;
2167
2168 case RAW1394_REQ_ARM_REGISTER:
2169 return arm_register(fi, req);
2170
2171 case RAW1394_REQ_ARM_UNREGISTER:
2172 return arm_unregister(fi, req);
2173
2174 case RAW1394_REQ_ARM_SET_BUF:
2175 return arm_set_buf(fi, req);
2176
2177 case RAW1394_REQ_ARM_GET_BUF:
2178 return arm_get_buf(fi, req);
2179
2180 case RAW1394_REQ_RESET_NOTIFY:
2181 return reset_notification(fi, req);
2182
2183 case RAW1394_REQ_ISO_SEND:
2184 case RAW1394_REQ_ISO_LISTEN:
2185 printk(KERN_DEBUG "raw1394: old iso ABI has been removed\n");
2186 req->req.error = RAW1394_ERROR_COMPAT;
2187 req->req.misc = RAW1394_KERNELAPI_VERSION;
2188 queue_complete_req(req);
2189 return 0;
2190
2191 case RAW1394_REQ_FCP_LISTEN:
2192 handle_fcp_listen(fi, req);
2193 return 0;
2194
2195 case RAW1394_REQ_RESET_BUS:
2196 if (req->req.misc == RAW1394_LONG_RESET) {
2197 DBGMSG("busreset called (type: LONG)");
2198 hpsb_reset_bus(fi->host, LONG_RESET);
2199 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2200 return 0;
2201 }
2202 if (req->req.misc == RAW1394_SHORT_RESET) {
2203 DBGMSG("busreset called (type: SHORT)");
2204 hpsb_reset_bus(fi->host, SHORT_RESET);
2205 free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
2206 return 0;
2207 }
2208 /* error EINVAL (22) invalid argument */
2209 return (-EINVAL);
2210 case RAW1394_REQ_GET_ROM:
2211 return get_config_rom(fi, req);
2212
2213 case RAW1394_REQ_UPDATE_ROM:
2214 return update_config_rom(fi, req);
2215
2216 case RAW1394_REQ_MODIFY_ROM:
2217 return modify_config_rom(fi, req);
2218 }
2219
2220 if (req->req.generation != get_hpsb_generation(fi->host)) {
2221 req->req.error = RAW1394_ERROR_GENERATION;
2222 req->req.generation = get_hpsb_generation(fi->host);
2223 req->req.length = 0;
2224 queue_complete_req(req);
2225 return 0;
2226 }
2227
2228 switch (req->req.type) {
2229 case RAW1394_REQ_PHYPACKET:
2230 return write_phypacket(fi, req);
2231 case RAW1394_REQ_ASYNC_SEND:
2232 return handle_async_send(fi, req);
2233 }
2234
2235 if (req->req.length == 0) {
2236 req->req.error = RAW1394_ERROR_INVALID_ARG;
2237 queue_complete_req(req);
2238 return 0;
2239 }
2240
2241 return handle_async_request(fi, req, node);
2242}
2243
2244static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2245 size_t count, loff_t * offset_is_ignored)
2246{
2247 struct file_info *fi = file->private_data;
2248 struct pending_request *req;
2249 ssize_t retval = -EBADFD;
2250
2251#ifdef CONFIG_COMPAT
2252 if (count == sizeof(struct compat_raw1394_req) &&
2253 sizeof(struct compat_raw1394_req) !=
2254 sizeof(struct raw1394_request)) {
2255 buffer = raw1394_compat_write(buffer);
2256 if (IS_ERR((__force void *)buffer))
2257 return PTR_ERR((__force void *)buffer);
2258 } else
2259#endif
2260 if (count != sizeof(struct raw1394_request)) {
2261 return -EINVAL;
2262 }
2263
2264 req = alloc_pending_request();
2265 if (req == NULL) {
2266 return -ENOMEM;
2267 }
2268 req->file_info = fi;
2269
2270 if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
2271 free_pending_request(req);
2272 return -EFAULT;
2273 }
2274
2275 if (!mutex_trylock(&fi->state_mutex)) {
2276 free_pending_request(req);
2277 return -EAGAIN;
2278 }
2279
2280 switch (fi->state) {
2281 case opened:
2282 retval = state_opened(fi, req);
2283 break;
2284
2285 case initialized:
2286 retval = state_initialized(fi, req);
2287 break;
2288
2289 case connected:
2290 retval = state_connected(fi, req);
2291 break;
2292 }
2293
2294 mutex_unlock(&fi->state_mutex);
2295
2296 if (retval < 0) {
2297 free_pending_request(req);
2298 } else {
2299 BUG_ON(retval);
2300 retval = count;
2301 }
2302
2303 return retval;
2304}
2305
2306/* rawiso operations */
2307
2308/* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
2309 * completion queue (reqlists_lock must be taken) */
2310static inline int __rawiso_event_in_queue(struct file_info *fi)
2311{
2312 struct pending_request *req;
2313
2314 list_for_each_entry(req, &fi->req_complete, list)
2315 if (req->req.type == RAW1394_REQ_RAWISO_ACTIVITY)
2316 return 1;
2317
2318 return 0;
2319}
2320
2321/* put a RAWISO_ACTIVITY event in the queue, if one isn't there already */
2322static void queue_rawiso_event(struct file_info *fi)
2323{
2324 unsigned long flags;
2325
2326 spin_lock_irqsave(&fi->reqlists_lock, flags);
2327
2328 /* only one ISO activity event may be in the queue */
2329 if (!__rawiso_event_in_queue(fi)) {
2330 struct pending_request *req =
2331 __alloc_pending_request(GFP_ATOMIC);
2332
2333 if (req) {
2334 req->file_info = fi;
2335 req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
2336 req->req.generation = get_hpsb_generation(fi->host);
2337 __queue_complete_req(req);
2338 } else {
2339 /* on allocation failure, signal an overflow */
2340 if (fi->iso_handle) {
2341 atomic_inc(&fi->iso_handle->overflows);
2342 }
2343 }
2344 }
2345 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2346}
2347
2348static void rawiso_activity_cb(struct hpsb_iso *iso)
2349{
2350 unsigned long flags;
2351 struct host_info *hi;
2352 struct file_info *fi;
2353
2354 spin_lock_irqsave(&host_info_lock, flags);
2355 hi = find_host_info(iso->host);
2356
2357 if (hi != NULL) {
2358 list_for_each_entry(fi, &hi->file_info_list, list) {
2359 if (fi->iso_handle == iso)
2360 queue_rawiso_event(fi);
2361 }
2362 }
2363
2364 spin_unlock_irqrestore(&host_info_lock, flags);
2365}
2366
2367/* helper function - gather all the kernel iso status bits for returning to user-space */
2368static void raw1394_iso_fill_status(struct hpsb_iso *iso,
2369 struct raw1394_iso_status *stat)
2370{
2371 int overflows = atomic_read(&iso->overflows);
2372 int skips = atomic_read(&iso->skips);
2373
2374 stat->config.data_buf_size = iso->buf_size;
2375 stat->config.buf_packets = iso->buf_packets;
2376 stat->config.channel = iso->channel;
2377 stat->config.speed = iso->speed;
2378 stat->config.irq_interval = iso->irq_interval;
2379 stat->n_packets = hpsb_iso_n_ready(iso);
2380 stat->overflows = ((skips & 0xFFFF) << 16) | ((overflows & 0xFFFF));
2381 stat->xmit_cycle = iso->xmit_cycle;
2382}
2383
2384static int raw1394_iso_xmit_init(struct file_info *fi, void __user * uaddr)
2385{
2386 struct raw1394_iso_status stat;
2387
2388 if (!fi->host)
2389 return -EINVAL;
2390
2391 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2392 return -EFAULT;
2393
2394 fi->iso_handle = hpsb_iso_xmit_init(fi->host,
2395 stat.config.data_buf_size,
2396 stat.config.buf_packets,
2397 stat.config.channel,
2398 stat.config.speed,
2399 stat.config.irq_interval,
2400 rawiso_activity_cb);
2401 if (!fi->iso_handle)
2402 return -ENOMEM;
2403
2404 fi->iso_state = RAW1394_ISO_XMIT;
2405
2406 raw1394_iso_fill_status(fi->iso_handle, &stat);
2407 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2408 return -EFAULT;
2409
2410 /* queue an event to get things started */
2411 rawiso_activity_cb(fi->iso_handle);
2412
2413 return 0;
2414}
2415
2416static int raw1394_iso_recv_init(struct file_info *fi, void __user * uaddr)
2417{
2418 struct raw1394_iso_status stat;
2419
2420 if (!fi->host)
2421 return -EINVAL;
2422
2423 if (copy_from_user(&stat, uaddr, sizeof(stat)))
2424 return -EFAULT;
2425
2426 fi->iso_handle = hpsb_iso_recv_init(fi->host,
2427 stat.config.data_buf_size,
2428 stat.config.buf_packets,
2429 stat.config.channel,
2430 stat.config.dma_mode,
2431 stat.config.irq_interval,
2432 rawiso_activity_cb);
2433 if (!fi->iso_handle)
2434 return -ENOMEM;
2435
2436 fi->iso_state = RAW1394_ISO_RECV;
2437
2438 raw1394_iso_fill_status(fi->iso_handle, &stat);
2439 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2440 return -EFAULT;
2441 return 0;
2442}
2443
2444static int raw1394_iso_get_status(struct file_info *fi, void __user * uaddr)
2445{
2446 struct raw1394_iso_status stat;
2447 struct hpsb_iso *iso = fi->iso_handle;
2448
2449 raw1394_iso_fill_status(fi->iso_handle, &stat);
2450 if (copy_to_user(uaddr, &stat, sizeof(stat)))
2451 return -EFAULT;
2452
2453 /* reset overflow counter */
2454 atomic_set(&iso->overflows, 0);
2455 /* reset skip counter */
2456 atomic_set(&iso->skips, 0);
2457
2458 return 0;
2459}
2460
2461/* copy N packet_infos out of the ringbuffer into user-supplied array */
2462static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
2463{
2464 struct raw1394_iso_packets upackets;
2465 unsigned int packet = fi->iso_handle->first_packet;
2466 int i;
2467
2468 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2469 return -EFAULT;
2470
2471 if (upackets.n_packets > hpsb_iso_n_ready(fi->iso_handle))
2472 return -EINVAL;
2473
2474 /* ensure user-supplied buffer is accessible and big enough */
2475 if (!access_ok(VERIFY_WRITE, upackets.infos,
2476 upackets.n_packets *
2477 sizeof(struct raw1394_iso_packet_info)))
2478 return -EFAULT;
2479
2480 /* copy the packet_infos out */
2481 for (i = 0; i < upackets.n_packets; i++) {
2482 if (__copy_to_user(&upackets.infos[i],
2483 &fi->iso_handle->infos[packet],
2484 sizeof(struct raw1394_iso_packet_info)))
2485 return -EFAULT;
2486
2487 packet = (packet + 1) % fi->iso_handle->buf_packets;
2488 }
2489
2490 return 0;
2491}
2492
2493/* copy N packet_infos from user to ringbuffer, and queue them for transmission */
2494static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
2495{
2496 struct raw1394_iso_packets upackets;
2497 int i, rv;
2498
2499 if (copy_from_user(&upackets, uaddr, sizeof(upackets)))
2500 return -EFAULT;
2501
2502 if (upackets.n_packets >= fi->iso_handle->buf_packets)
2503 return -EINVAL;
2504
2505 if (upackets.n_packets >= hpsb_iso_n_ready(fi->iso_handle))
2506 return -EAGAIN;
2507
2508 /* ensure user-supplied buffer is accessible and big enough */
2509 if (!access_ok(VERIFY_READ, upackets.infos,
2510 upackets.n_packets *
2511 sizeof(struct raw1394_iso_packet_info)))
2512 return -EFAULT;
2513
2514 /* copy the infos structs in and queue the packets */
2515 for (i = 0; i < upackets.n_packets; i++) {
2516 struct raw1394_iso_packet_info info;
2517
2518 if (__copy_from_user(&info, &upackets.infos[i],
2519 sizeof(struct raw1394_iso_packet_info)))
2520 return -EFAULT;
2521
2522 rv = hpsb_iso_xmit_queue_packet(fi->iso_handle, info.offset,
2523 info.len, info.tag, info.sy);
2524 if (rv)
2525 return rv;
2526 }
2527
2528 return 0;
2529}
2530
2531static void raw1394_iso_shutdown(struct file_info *fi)
2532{
2533 if (fi->iso_handle)
2534 hpsb_iso_shutdown(fi->iso_handle);
2535
2536 fi->iso_handle = NULL;
2537 fi->iso_state = RAW1394_ISO_INACTIVE;
2538}
2539
2540static int raw1394_read_cycle_timer(struct file_info *fi, void __user * uaddr)
2541{
2542 struct raw1394_cycle_timer ct;
2543 int err;
2544
2545 err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time);
2546 if (!err)
2547 if (copy_to_user(uaddr, &ct, sizeof(ct)))
2548 err = -EFAULT;
2549 return err;
2550}
2551
2552/* mmap the rawiso xmit/recv buffer */
2553static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2554{
2555 struct file_info *fi = file->private_data;
2556 int ret;
2557
2558 if (!mutex_trylock(&fi->state_mutex))
2559 return -EAGAIN;
2560
2561 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2562 ret = -EINVAL;
2563 else
2564 ret = dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
2565
2566 mutex_unlock(&fi->state_mutex);
2567
2568 return ret;
2569}
2570
2571static long raw1394_ioctl_inactive(struct file_info *fi, unsigned int cmd,
2572 void __user *argp)
2573{
2574 switch (cmd) {
2575 case RAW1394_IOC_ISO_XMIT_INIT:
2576 return raw1394_iso_xmit_init(fi, argp);
2577 case RAW1394_IOC_ISO_RECV_INIT:
2578 return raw1394_iso_recv_init(fi, argp);
2579 default:
2580 return -EINVAL;
2581 }
2582}
2583
2584static long raw1394_ioctl_recv(struct file_info *fi, unsigned int cmd,
2585 unsigned long arg)
2586{
2587 void __user *argp = (void __user *)arg;
2588
2589 switch (cmd) {
2590 case RAW1394_IOC_ISO_RECV_START:{
2591 int args[3];
2592
2593 if (copy_from_user(&args[0], argp, sizeof(args)))
2594 return -EFAULT;
2595 return hpsb_iso_recv_start(fi->iso_handle,
2596 args[0], args[1], args[2]);
2597 }
2598 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2599 hpsb_iso_stop(fi->iso_handle);
2600 return 0;
2601 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2602 return hpsb_iso_recv_listen_channel(fi->iso_handle, arg);
2603 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2604 return hpsb_iso_recv_unlisten_channel(fi->iso_handle, arg);
2605 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
2606 u64 mask;
2607
2608 if (copy_from_user(&mask, argp, sizeof(mask)))
2609 return -EFAULT;
2610 return hpsb_iso_recv_set_channel_mask(fi->iso_handle,
2611 mask);
2612 }
2613 case RAW1394_IOC_ISO_GET_STATUS:
2614 return raw1394_iso_get_status(fi, argp);
2615 case RAW1394_IOC_ISO_RECV_PACKETS:
2616 return raw1394_iso_recv_packets(fi, argp);
2617 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2618 return hpsb_iso_recv_release_packets(fi->iso_handle, arg);
2619 case RAW1394_IOC_ISO_RECV_FLUSH:
2620 return hpsb_iso_recv_flush(fi->iso_handle);
2621 case RAW1394_IOC_ISO_SHUTDOWN:
2622 raw1394_iso_shutdown(fi);
2623 return 0;
2624 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2625 queue_rawiso_event(fi);
2626 return 0;
2627 default:
2628 return -EINVAL;
2629 }
2630}
2631
2632static long raw1394_ioctl_xmit(struct file_info *fi, unsigned int cmd,
2633 void __user *argp)
2634{
2635 switch (cmd) {
2636 case RAW1394_IOC_ISO_XMIT_START:{
2637 int args[2];
2638
2639 if (copy_from_user(&args[0], argp, sizeof(args)))
2640 return -EFAULT;
2641 return hpsb_iso_xmit_start(fi->iso_handle,
2642 args[0], args[1]);
2643 }
2644 case RAW1394_IOC_ISO_XMIT_SYNC:
2645 return hpsb_iso_xmit_sync(fi->iso_handle);
2646 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2647 hpsb_iso_stop(fi->iso_handle);
2648 return 0;
2649 case RAW1394_IOC_ISO_GET_STATUS:
2650 return raw1394_iso_get_status(fi, argp);
2651 case RAW1394_IOC_ISO_XMIT_PACKETS:
2652 return raw1394_iso_send_packets(fi, argp);
2653 case RAW1394_IOC_ISO_SHUTDOWN:
2654 raw1394_iso_shutdown(fi);
2655 return 0;
2656 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2657 queue_rawiso_event(fi);
2658 return 0;
2659 default:
2660 return -EINVAL;
2661 }
2662}
2663
2664/* ioctl is only used for rawiso operations */
2665static long raw1394_ioctl(struct file *file, unsigned int cmd,
2666 unsigned long arg)
2667{
2668 struct file_info *fi = file->private_data;
2669 void __user *argp = (void __user *)arg;
2670 long ret;
2671
2672 /* state-independent commands */
2673 switch(cmd) {
2674 case RAW1394_IOC_GET_CYCLE_TIMER:
2675 return raw1394_read_cycle_timer(fi, argp);
2676 default:
2677 break;
2678 }
2679
2680 if (!mutex_trylock(&fi->state_mutex))
2681 return -EAGAIN;
2682
2683 switch (fi->iso_state) {
2684 case RAW1394_ISO_INACTIVE:
2685 ret = raw1394_ioctl_inactive(fi, cmd, argp);
2686 break;
2687 case RAW1394_ISO_RECV:
2688 ret = raw1394_ioctl_recv(fi, cmd, arg);
2689 break;
2690 case RAW1394_ISO_XMIT:
2691 ret = raw1394_ioctl_xmit(fi, cmd, argp);
2692 break;
2693 default:
2694 ret = -EINVAL;
2695 break;
2696 }
2697
2698 mutex_unlock(&fi->state_mutex);
2699
2700 return ret;
2701}
2702
2703#ifdef CONFIG_COMPAT
2704struct raw1394_iso_packets32 {
2705 __u32 n_packets;
2706 compat_uptr_t infos;
2707} __attribute__((packed));
2708
2709struct raw1394_cycle_timer32 {
2710 __u32 cycle_timer;
2711 __u64 local_time;
2712}
2713#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
2714__attribute__((packed))
2715#endif
2716;
2717
2718#define RAW1394_IOC_ISO_RECV_PACKETS32 \
2719 _IOW ('#', 0x25, struct raw1394_iso_packets32)
2720#define RAW1394_IOC_ISO_XMIT_PACKETS32 \
2721 _IOW ('#', 0x27, struct raw1394_iso_packets32)
2722#define RAW1394_IOC_GET_CYCLE_TIMER32 \
2723 _IOR ('#', 0x30, struct raw1394_cycle_timer32)
2724
2725static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd,
2726 struct raw1394_iso_packets32 __user *arg)
2727{
2728 compat_uptr_t infos32;
2729 void __user *infos;
2730 long err = -EFAULT;
2731 struct raw1394_iso_packets __user *dst = compat_alloc_user_space(sizeof(struct raw1394_iso_packets));
2732
2733 if (!copy_in_user(&dst->n_packets, &arg->n_packets, sizeof arg->n_packets) &&
2734 !copy_from_user(&infos32, &arg->infos, sizeof infos32)) {
2735 infos = compat_ptr(infos32);
2736 if (!copy_to_user(&dst->infos, &infos, sizeof infos))
2737 err = raw1394_ioctl(file, cmd, (unsigned long)dst);
2738 }
2739 return err;
2740}
2741
2742static long raw1394_read_cycle_timer32(struct file_info *fi, void __user * uaddr)
2743{
2744 struct raw1394_cycle_timer32 ct;
2745 int err;
2746
2747 err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time);
2748 if (!err)
2749 if (copy_to_user(uaddr, &ct, sizeof(ct)))
2750 err = -EFAULT;
2751 return err;
2752}
2753
2754static long raw1394_compat_ioctl(struct file *file,
2755 unsigned int cmd, unsigned long arg)
2756{
2757 struct file_info *fi = file->private_data;
2758 void __user *argp = (void __user *)arg;
2759 long err;
2760
2761 switch (cmd) {
2762 /* These requests have same format as long as 'int' has same size. */
2763 case RAW1394_IOC_ISO_RECV_INIT:
2764 case RAW1394_IOC_ISO_RECV_START:
2765 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2766 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2767 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:
2768 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2769 case RAW1394_IOC_ISO_RECV_FLUSH:
2770 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2771 case RAW1394_IOC_ISO_XMIT_INIT:
2772 case RAW1394_IOC_ISO_XMIT_START:
2773 case RAW1394_IOC_ISO_XMIT_SYNC:
2774 case RAW1394_IOC_ISO_GET_STATUS:
2775 case RAW1394_IOC_ISO_SHUTDOWN:
2776 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2777 err = raw1394_ioctl(file, cmd, arg);
2778 break;
2779 /* These request have different format. */
2780 case RAW1394_IOC_ISO_RECV_PACKETS32:
2781 err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_RECV_PACKETS, argp);
2782 break;
2783 case RAW1394_IOC_ISO_XMIT_PACKETS32:
2784 err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_XMIT_PACKETS, argp);
2785 break;
2786 case RAW1394_IOC_GET_CYCLE_TIMER32:
2787 err = raw1394_read_cycle_timer32(fi, argp);
2788 break;
2789 default:
2790 err = -EINVAL;
2791 break;
2792 }
2793
2794 return err;
2795}
2796#endif
2797
2798static unsigned int raw1394_poll(struct file *file, poll_table * pt)
2799{
2800 struct file_info *fi = file->private_data;
2801 unsigned int mask = POLLOUT | POLLWRNORM;
2802 unsigned long flags;
2803
2804 poll_wait(file, &fi->wait_complete, pt);
2805
2806 spin_lock_irqsave(&fi->reqlists_lock, flags);
2807 if (!list_empty(&fi->req_complete)) {
2808 mask |= POLLIN | POLLRDNORM;
2809 }
2810 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2811
2812 return mask;
2813}
2814
2815static int raw1394_open(struct inode *inode, struct file *file)
2816{
2817 struct file_info *fi;
2818
2819 fi = kzalloc(sizeof(*fi), GFP_KERNEL);
2820 if (!fi)
2821 return -ENOMEM;
2822
2823 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2824
2825 INIT_LIST_HEAD(&fi->list);
2826 mutex_init(&fi->state_mutex);
2827 fi->state = opened;
2828 INIT_LIST_HEAD(&fi->req_pending);
2829 INIT_LIST_HEAD(&fi->req_complete);
2830 spin_lock_init(&fi->reqlists_lock);
2831 init_waitqueue_head(&fi->wait_complete);
2832 INIT_LIST_HEAD(&fi->addr_list);
2833
2834 file->private_data = fi;
2835
2836 return nonseekable_open(inode, file);
2837}
2838
2839static int raw1394_release(struct inode *inode, struct file *file)
2840{
2841 struct file_info *fi = file->private_data;
2842 struct list_head *lh;
2843 struct pending_request *req;
2844 int i, fail;
2845 int retval = 0;
2846 struct list_head *entry;
2847 struct arm_addr *addr = NULL;
2848 struct host_info *hi;
2849 struct file_info *fi_hlp = NULL;
2850 struct arm_addr *arm_addr = NULL;
2851 int another_host;
2852 int csr_mod = 0;
2853 unsigned long flags;
2854
2855 if (fi->iso_state != RAW1394_ISO_INACTIVE)
2856 raw1394_iso_shutdown(fi);
2857
2858 spin_lock_irqsave(&host_info_lock, flags);
2859
2860 fail = 0;
2861 /* set address-entries invalid */
2862
2863 while (!list_empty(&fi->addr_list)) {
2864 another_host = 0;
2865 lh = fi->addr_list.next;
2866 addr = list_entry(lh, struct arm_addr, addr_list);
2867 /* another host with valid address-entry containing
2868 same addressrange? */
2869 list_for_each_entry(hi, &host_info_list, list) {
2870 if (hi->host != fi->host) {
2871 list_for_each_entry(fi_hlp, &hi->file_info_list,
2872 list) {
2873 entry = fi_hlp->addr_list.next;
2874 while (entry != &(fi_hlp->addr_list)) {
2875 arm_addr = list_entry(entry, struct
2876 arm_addr,
2877 addr_list);
2878 if (arm_addr->start ==
2879 addr->start) {
2880 DBGMSG
2881 ("raw1394_release: "
2882 "another host ownes "
2883 "same addressrange");
2884 another_host = 1;
2885 break;
2886 }
2887 entry = entry->next;
2888 }
2889 if (another_host) {
2890 break;
2891 }
2892 }
2893 }
2894 }
2895 if (!another_host) {
2896 DBGMSG("raw1394_release: call hpsb_arm_unregister");
2897 retval =
2898 hpsb_unregister_addrspace(&raw1394_highlevel,
2899 fi->host, addr->start);
2900 if (!retval) {
2901 ++fail;
2902 printk(KERN_ERR
2903 "raw1394_release arm_Unregister failed\n");
2904 }
2905 }
2906 DBGMSG("raw1394_release: delete addr_entry from list");
2907 list_del(&addr->addr_list);
2908 vfree(addr->addr_space_buffer);
2909 kfree(addr);
2910 } /* while */
2911 spin_unlock_irqrestore(&host_info_lock, flags);
2912 if (fail > 0) {
2913 printk(KERN_ERR "raw1394: during addr_list-release "
2914 "error(s) occurred \n");
2915 }
2916
2917 for (;;) {
2918 /* This locked section guarantees that neither
2919 * complete nor pending requests exist once i!=0 */
2920 spin_lock_irqsave(&fi->reqlists_lock, flags);
2921 while ((req = __next_complete_req(fi)))
2922 free_pending_request(req);
2923
2924 i = list_empty(&fi->req_pending);
2925 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2926
2927 if (i)
2928 break;
2929 /*
2930 * Sleep until more requests can be freed.
2931 *
2932 * NB: We call the macro wait_event() with a condition argument
2933 * with side effect. This is only possible because the side
2934 * effect does not occur until the condition became true, and
2935 * wait_event() won't evaluate the condition again after that.
2936 */
2937 wait_event(fi->wait_complete, (req = next_complete_req(fi)));
2938 free_pending_request(req);
2939 }
2940
2941 /* Remove any sub-trees left by user space programs */
2942 for (i = 0; i < RAW1394_MAX_USER_CSR_DIRS; i++) {
2943 struct csr1212_dentry *dentry;
2944 if (!fi->csr1212_dirs[i])
2945 continue;
2946 for (dentry =
2947 fi->csr1212_dirs[i]->value.directory.dentries_head; dentry;
2948 dentry = dentry->next) {
2949 csr1212_detach_keyval_from_directory(fi->host->csr.rom->
2950 root_kv,
2951 dentry->kv);
2952 }
2953 csr1212_release_keyval(fi->csr1212_dirs[i]);
2954 fi->csr1212_dirs[i] = NULL;
2955 csr_mod = 1;
2956 }
2957
2958 if ((csr_mod || fi->cfgrom_upd)
2959 && hpsb_update_config_rom_image(fi->host) < 0)
2960 HPSB_ERR
2961 ("Failed to generate Configuration ROM image for host %d",
2962 fi->host->id);
2963
2964 if (fi->state == connected) {
2965 spin_lock_irqsave(&host_info_lock, flags);
2966 list_del(&fi->list);
2967 spin_unlock_irqrestore(&host_info_lock, flags);
2968
2969 put_device(&fi->host->device);
2970 }
2971
2972 spin_lock_irqsave(&host_info_lock, flags);
2973 if (fi->host)
2974 module_put(fi->host->driver->owner);
2975 spin_unlock_irqrestore(&host_info_lock, flags);
2976
2977 kfree(fi);
2978
2979 return 0;
2980}
2981
2982/*** HOTPLUG STUFF **********************************************************/
2983/*
2984 * Export information about protocols/devices supported by this driver.
2985 */
2986#ifdef MODULE
2987static const struct ieee1394_device_id raw1394_id_table[] = {
2988 {
2989 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2990 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
2991 .version = AVC_SW_VERSION_ENTRY & 0xffffff},
2992 {
2993 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2994 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2995 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff},
2996 {
2997 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2998 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
2999 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff},
3000 {
3001 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
3002 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
3003 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
3004 {}
3005};
3006
3007MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
3008#endif /* MODULE */
3009
3010static struct hpsb_protocol_driver raw1394_driver = {
3011 .name = "raw1394",
3012};
3013
3014/******************************************************************************/
3015
3016static struct hpsb_highlevel raw1394_highlevel = {
3017 .name = RAW1394_DEVICE_NAME,
3018 .add_host = add_host,
3019 .remove_host = remove_host,
3020 .host_reset = host_reset,
3021 .fcp_request = fcp_request,
3022};
3023
3024static struct cdev raw1394_cdev;
3025static const struct file_operations raw1394_fops = {
3026 .owner = THIS_MODULE,
3027 .read = raw1394_read,
3028 .write = raw1394_write,
3029 .mmap = raw1394_mmap,
3030 .unlocked_ioctl = raw1394_ioctl,
3031#ifdef CONFIG_COMPAT
3032 .compat_ioctl = raw1394_compat_ioctl,
3033#endif
3034 .poll = raw1394_poll,
3035 .open = raw1394_open,
3036 .release = raw1394_release,
3037 .llseek = no_llseek,
3038};
3039
3040static int __init init_raw1394(void)
3041{
3042 int ret = 0;
3043
3044 hpsb_register_highlevel(&raw1394_highlevel);
3045
3046 if (IS_ERR
3047 (device_create(hpsb_protocol_class, NULL,
3048 MKDEV(IEEE1394_MAJOR,
3049 IEEE1394_MINOR_BLOCK_RAW1394 * 16),
3050 NULL, RAW1394_DEVICE_NAME))) {
3051 ret = -EFAULT;
3052 goto out_unreg;
3053 }
3054
3055 cdev_init(&raw1394_cdev, &raw1394_fops);
3056 raw1394_cdev.owner = THIS_MODULE;
3057 ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
3058 if (ret) {
3059 HPSB_ERR("raw1394 failed to register minor device block");
3060 goto out_dev;
3061 }
3062
3063 HPSB_INFO("raw1394: /dev/%s device initialized", RAW1394_DEVICE_NAME);
3064
3065 ret = hpsb_register_protocol(&raw1394_driver);
3066 if (ret) {
3067 HPSB_ERR("raw1394: failed to register protocol");
3068 cdev_del(&raw1394_cdev);
3069 goto out_dev;
3070 }
3071
3072 goto out;
3073
3074 out_dev:
3075 device_destroy(hpsb_protocol_class,
3076 MKDEV(IEEE1394_MAJOR,
3077 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3078 out_unreg:
3079 hpsb_unregister_highlevel(&raw1394_highlevel);
3080 out:
3081 return ret;
3082}
3083
3084static void __exit cleanup_raw1394(void)
3085{
3086 device_destroy(hpsb_protocol_class,
3087 MKDEV(IEEE1394_MAJOR,
3088 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3089 cdev_del(&raw1394_cdev);
3090 hpsb_unregister_highlevel(&raw1394_highlevel);
3091 hpsb_unregister_protocol(&raw1394_driver);
3092}
3093
3094module_init(init_raw1394);
3095module_exit(cleanup_raw1394);
3096MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/raw1394.h b/drivers/ieee1394/raw1394.h
deleted file mode 100644
index 963ac20373d2..000000000000
--- a/drivers/ieee1394/raw1394.h
+++ /dev/null
@@ -1,191 +0,0 @@
1#ifndef IEEE1394_RAW1394_H
2#define IEEE1394_RAW1394_H
3
4/* header for the raw1394 API that is exported to user-space */
5
6#define RAW1394_KERNELAPI_VERSION 4
7
8/* state: opened */
9#define RAW1394_REQ_INITIALIZE 1
10
11/* state: initialized */
12#define RAW1394_REQ_LIST_CARDS 2
13#define RAW1394_REQ_SET_CARD 3
14
15/* state: connected */
16#define RAW1394_REQ_ASYNC_READ 100
17#define RAW1394_REQ_ASYNC_WRITE 101
18#define RAW1394_REQ_LOCK 102
19#define RAW1394_REQ_LOCK64 103
20#define RAW1394_REQ_ISO_SEND 104 /* removed ABI, now a no-op */
21#define RAW1394_REQ_ASYNC_SEND 105
22#define RAW1394_REQ_ASYNC_STREAM 106
23
24#define RAW1394_REQ_ISO_LISTEN 200 /* removed ABI, now a no-op */
25#define RAW1394_REQ_FCP_LISTEN 201
26#define RAW1394_REQ_RESET_BUS 202
27#define RAW1394_REQ_GET_ROM 203
28#define RAW1394_REQ_UPDATE_ROM 204
29#define RAW1394_REQ_ECHO 205
30#define RAW1394_REQ_MODIFY_ROM 206
31
32#define RAW1394_REQ_ARM_REGISTER 300
33#define RAW1394_REQ_ARM_UNREGISTER 301
34#define RAW1394_REQ_ARM_SET_BUF 302
35#define RAW1394_REQ_ARM_GET_BUF 303
36
37#define RAW1394_REQ_RESET_NOTIFY 400
38
39#define RAW1394_REQ_PHYPACKET 500
40
41/* kernel to user */
42#define RAW1394_REQ_BUS_RESET 10000
43#define RAW1394_REQ_ISO_RECEIVE 10001
44#define RAW1394_REQ_FCP_REQUEST 10002
45#define RAW1394_REQ_ARM 10003
46#define RAW1394_REQ_RAWISO_ACTIVITY 10004
47
48/* error codes */
49#define RAW1394_ERROR_NONE 0
50#define RAW1394_ERROR_COMPAT (-1001)
51#define RAW1394_ERROR_STATE_ORDER (-1002)
52#define RAW1394_ERROR_GENERATION (-1003)
53#define RAW1394_ERROR_INVALID_ARG (-1004)
54#define RAW1394_ERROR_MEMFAULT (-1005)
55#define RAW1394_ERROR_ALREADY (-1006)
56
57#define RAW1394_ERROR_EXCESSIVE (-1020)
58#define RAW1394_ERROR_UNTIDY_LEN (-1021)
59
60#define RAW1394_ERROR_SEND_ERROR (-1100)
61#define RAW1394_ERROR_ABORTED (-1101)
62#define RAW1394_ERROR_TIMEOUT (-1102)
63
64/* arm_codes */
65#define ARM_READ 1
66#define ARM_WRITE 2
67#define ARM_LOCK 4
68
69#define RAW1394_LONG_RESET 0
70#define RAW1394_SHORT_RESET 1
71
72/* busresetnotify ... */
73#define RAW1394_NOTIFY_OFF 0
74#define RAW1394_NOTIFY_ON 1
75
76#include <asm/types.h>
77
78struct raw1394_request {
79 __u32 type;
80 __s32 error;
81 __u32 misc;
82
83 __u32 generation;
84 __u32 length;
85
86 __u64 address;
87
88 __u64 tag;
89
90 __u64 sendb;
91 __u64 recvb;
92};
93
94struct raw1394_khost_list {
95 __u32 nodes;
96 __u8 name[32];
97};
98
99typedef struct arm_request {
100 __u16 destination_nodeid;
101 __u16 source_nodeid;
102 __u64 destination_offset;
103 __u8 tlabel;
104 __u8 tcode;
105 __u8 extended_transaction_code;
106 __u32 generation;
107 __u16 buffer_length;
108 __u8 __user *buffer;
109} *arm_request_t;
110
111typedef struct arm_response {
112 __s32 response_code;
113 __u16 buffer_length;
114 __u8 __user *buffer;
115} *arm_response_t;
116
117typedef struct arm_request_response {
118 struct arm_request __user *request;
119 struct arm_response __user *response;
120} *arm_request_response_t;
121
122/* rawiso API */
123#include "ieee1394-ioctl.h"
124
125/* per-packet metadata embedded in the ringbuffer */
126/* must be identical to hpsb_iso_packet_info in iso.h! */
127struct raw1394_iso_packet_info {
128 __u32 offset;
129 __u16 len;
130 __u16 cycle; /* recv only */
131 __u8 channel; /* recv only */
132 __u8 tag;
133 __u8 sy;
134};
135
136/* argument for RAW1394_ISO_RECV/XMIT_PACKETS ioctls */
137struct raw1394_iso_packets {
138 __u32 n_packets;
139 struct raw1394_iso_packet_info __user *infos;
140};
141
142struct raw1394_iso_config {
143 /* size of packet data buffer, in bytes (will be rounded up to PAGE_SIZE) */
144 __u32 data_buf_size;
145
146 /* # of packets to buffer */
147 __u32 buf_packets;
148
149 /* iso channel (set to -1 for multi-channel recv) */
150 __s32 channel;
151
152 /* xmit only - iso transmission speed */
153 __u8 speed;
154
155 /* The mode of the dma when receiving iso data. Must be supported by chip */
156 __u8 dma_mode;
157
158 /* max. latency of buffer, in packets (-1 if you don't care) */
159 __s32 irq_interval;
160};
161
162/* argument to RAW1394_ISO_XMIT/RECV_INIT and RAW1394_ISO_GET_STATUS */
163struct raw1394_iso_status {
164 /* current settings */
165 struct raw1394_iso_config config;
166
167 /* number of packets waiting to be filled with data (ISO transmission)
168 or containing data received (ISO reception) */
169 __u32 n_packets;
170
171 /* approximate number of packets dropped due to overflow or
172 underflow of the packet buffer (a value of zero guarantees
173 that no packets have been dropped) */
174 __u32 overflows;
175
176 /* cycle number at which next packet will be transmitted;
177 -1 if not known */
178 __s16 xmit_cycle;
179};
180
181/* argument to RAW1394_IOC_GET_CYCLE_TIMER ioctl */
182struct raw1394_cycle_timer {
183 /* contents of Isochronous Cycle Timer register,
184 as in OHCI 1.1 clause 5.13 (also with non-OHCI hosts) */
185 __u32 cycle_timer;
186
187 /* local time in microseconds since Epoch,
188 simultaneously read with cycle timer */
189 __u64 local_time;
190};
191#endif /* IEEE1394_RAW1394_H */
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
deleted file mode 100644
index d6e251a300ce..000000000000
--- a/drivers/ieee1394/sbp2.c
+++ /dev/null
@@ -1,2138 +0,0 @@
1/*
2 * sbp2.c - SBP-2 protocol driver for IEEE-1394
3 *
4 * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
5 * jamesg@filanet.com (JSG)
6 *
7 * Copyright (C) 2003 Ben Collins <bcollins@debian.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24/*
25 * Brief Description:
26 *
27 * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
28 * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
29 * driver. It also registers as a SCSI lower-level driver in order to accept
30 * SCSI commands for transport using SBP-2.
31 *
32 * You may access any attached SBP-2 (usually storage devices) as regular
33 * SCSI devices. E.g. mount /dev/sda1, fdisk, mkfs, etc..
34 *
35 * See http://www.t10.org/drafts.htm#sbp2 for the final draft of the SBP-2
36 * specification and for where to purchase the official standard.
37 *
38 * TODO:
39 * - look into possible improvements of the SCSI error handlers
40 * - handle Unit_Characteristics.mgt_ORB_timeout and .ORB_size
41 * - handle Logical_Unit_Number.ordered
42 * - handle src == 1 in status blocks
43 * - reimplement the DMA mapping in absence of physical DMA so that
44 * bus_to_virt is no longer required
45 * - debug the handling of absent physical DMA
46 * - replace CONFIG_IEEE1394_SBP2_PHYS_DMA by automatic detection
47 * (this is easy but depends on the previous two TODO items)
48 * - make the parameter serialize_io configurable per device
49 * - move all requests to fetch agent registers into non-atomic context,
50 * replace all usages of sbp2util_node_write_no_wait by true transactions
51 * Grep for inline FIXME comments below.
52 */
53
54#include <linux/blkdev.h>
55#include <linux/compiler.h>
56#include <linux/delay.h>
57#include <linux/device.h>
58#include <linux/dma-mapping.h>
59#include <linux/init.h>
60#include <linux/kernel.h>
61#include <linux/list.h>
62#include <linux/mm.h>
63#include <linux/module.h>
64#include <linux/moduleparam.h>
65#include <linux/sched.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/stat.h>
69#include <linux/string.h>
70#include <linux/stringify.h>
71#include <linux/types.h>
72#include <linux/wait.h>
73#include <linux/workqueue.h>
74#include <linux/scatterlist.h>
75
76#include <asm/byteorder.h>
77#include <asm/errno.h>
78#include <asm/param.h>
79#include <asm/system.h>
80#include <asm/types.h>
81
82#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
83#include <asm/io.h> /* for bus_to_virt */
84#endif
85
86#include <scsi/scsi.h>
87#include <scsi/scsi_cmnd.h>
88#include <scsi/scsi_dbg.h>
89#include <scsi/scsi_device.h>
90#include <scsi/scsi_host.h>
91
92#include "csr1212.h"
93#include "highlevel.h"
94#include "hosts.h"
95#include "ieee1394.h"
96#include "ieee1394_core.h"
97#include "ieee1394_hotplug.h"
98#include "ieee1394_transactions.h"
99#include "ieee1394_types.h"
100#include "nodemgr.h"
101#include "sbp2.h"
102
103/*
104 * Module load parameter definitions
105 */
106
107/*
108 * Change max_speed on module load if you have a bad IEEE-1394
109 * controller that has trouble running 2KB packets at 400mb.
110 *
111 * NOTE: On certain OHCI parts I have seen short packets on async transmit
112 * (probably due to PCI latency/throughput issues with the part). You can
113 * bump down the speed if you are running into problems.
114 */
115static int sbp2_max_speed = IEEE1394_SPEED_MAX;
116module_param_named(max_speed, sbp2_max_speed, int, 0644);
117MODULE_PARM_DESC(max_speed, "Limit data transfer speed (5 <= 3200, "
118 "4 <= 1600, 3 <= 800, 2 <= 400, 1 <= 200, 0 = 100 Mb/s)");
119
120/*
121 * Set serialize_io to 0 or N to use dynamically appended lists of command ORBs.
122 * This is and always has been buggy in multiple subtle ways. See above TODOs.
123 */
124static int sbp2_serialize_io = 1;
125module_param_named(serialize_io, sbp2_serialize_io, bool, 0444);
126MODULE_PARM_DESC(serialize_io, "Serialize requests coming from SCSI drivers "
127 "(default = Y, faster but buggy = N)");
128
129/*
130 * Adjust max_sectors if you'd like to influence how many sectors each SCSI
131 * command can transfer at most. Please note that some older SBP-2 bridge
132 * chips are broken for transfers greater or equal to 128KB, therefore
133 * max_sectors used to be a safe 255 sectors for many years. We now have a
134 * default of 0 here which means that we let the SCSI stack choose a limit.
135 *
136 * The SBP2_WORKAROUND_128K_MAX_TRANS flag, if set either in the workarounds
137 * module parameter or in the sbp2_workarounds_table[], will override the
138 * value of max_sectors. We should use sbp2_workarounds_table[] to cover any
139 * bridge chip which becomes known to need the 255 sectors limit.
140 */
141static int sbp2_max_sectors;
142module_param_named(max_sectors, sbp2_max_sectors, int, 0444);
143MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported "
144 "(default = 0 = use SCSI stack's default)");
145
146/*
147 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
148 * do an exclusive login, as it's generally unsafe to have two hosts
149 * talking to a single sbp2 device at the same time (filesystem coherency,
150 * etc.). If you're running an sbp2 device that supports multiple logins,
151 * and you're either running read-only filesystems or some sort of special
152 * filesystem supporting multiple hosts, e.g. OpenGFS, Oracle Cluster
153 * File System, or Lustre, then set exclusive_login to zero.
154 *
155 * So far only bridges from Oxford Semiconductor are known to support
156 * concurrent logins. Depending on firmware, four or two concurrent logins
157 * are possible on OXFW911 and newer Oxsemi bridges.
158 */
159static int sbp2_exclusive_login = 1;
160module_param_named(exclusive_login, sbp2_exclusive_login, bool, 0644);
161MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
162 "(default = Y, use N for concurrent initiators)");
163
164/*
165 * If any of the following workarounds is required for your device to work,
166 * please submit the kernel messages logged by sbp2 to the linux1394-devel
167 * mailing list.
168 *
169 * - 128kB max transfer
170 * Limit transfer size. Necessary for some old bridges.
171 *
172 * - 36 byte inquiry
173 * When scsi_mod probes the device, let the inquiry command look like that
174 * from MS Windows.
175 *
176 * - skip mode page 8
177 * Suppress sending of mode_sense for mode page 8 if the device pretends to
178 * support the SCSI Primary Block commands instead of Reduced Block Commands.
179 *
180 * - fix capacity
181 * Tell sd_mod to correct the last sector number reported by read_capacity.
182 * Avoids access beyond actual disk limits on devices with an off-by-one bug.
183 * Don't use this with devices which don't have this bug.
184 *
185 * - delay inquiry
186 * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
187 *
188 * - power condition
189 * Set the power condition field in the START STOP UNIT commands sent by
190 * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
191 * Some disks need this to spin down or to resume properly.
192 *
193 * - override internal blacklist
194 * Instead of adding to the built-in blacklist, use only the workarounds
195 * specified in the module load parameter.
196 * Useful if a blacklist entry interfered with a non-broken device.
197 */
198static int sbp2_default_workarounds;
199module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
200MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
201 ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
202 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
203 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
204 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
205 ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
206 ", set power condition in start stop unit = "
207 __stringify(SBP2_WORKAROUND_POWER_CONDITION)
208 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
209 ", or a combination)");
210
211/*
212 * This influences the format of the sysfs attribute
213 * /sys/bus/scsi/devices/.../ieee1394_id.
214 *
215 * The default format is like in older kernels: %016Lx:%d:%d
216 * It contains the target's EUI-64, a number given to the logical unit by
217 * the ieee1394 driver's nodemgr (starting at 0), and the LUN.
218 *
219 * The long format is: %016Lx:%06x:%04x
220 * It contains the target's EUI-64, the unit directory's directory_ID as per
221 * IEEE 1212 clause 7.7.19, and the LUN. This format comes closest to the
222 * format of SBP(-3) target port and logical unit identifier as per SAM (SCSI
223 * Architecture Model) rev.2 to 4 annex A. Therefore and because it is
224 * independent of the implementation of the ieee1394 nodemgr, the longer format
225 * is recommended for future use.
226 */
227static int sbp2_long_sysfs_ieee1394_id;
228module_param_named(long_ieee1394_id, sbp2_long_sysfs_ieee1394_id, bool, 0644);
229MODULE_PARM_DESC(long_ieee1394_id, "8+3+2 bytes format of ieee1394_id in sysfs "
230 "(default = backwards-compatible = N, SAM-conforming = Y)");
231
232
233#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
234#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
235
236/*
237 * Globals
238 */
239static void sbp2scsi_complete_all_commands(struct sbp2_lu *, u32);
240static void sbp2scsi_complete_command(struct sbp2_lu *, u32, struct scsi_cmnd *,
241 void (*)(struct scsi_cmnd *));
242static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *);
243static int sbp2_start_device(struct sbp2_lu *);
244static void sbp2_remove_device(struct sbp2_lu *);
245static int sbp2_login_device(struct sbp2_lu *);
246static int sbp2_reconnect_device(struct sbp2_lu *);
247static int sbp2_logout_device(struct sbp2_lu *);
248static void sbp2_host_reset(struct hpsb_host *);
249static int sbp2_handle_status_write(struct hpsb_host *, int, int, quadlet_t *,
250 u64, size_t, u16);
251static int sbp2_agent_reset(struct sbp2_lu *, int);
252static void sbp2_parse_unit_directory(struct sbp2_lu *,
253 struct unit_directory *);
254static int sbp2_set_busy_timeout(struct sbp2_lu *);
255static int sbp2_max_speed_and_size(struct sbp2_lu *);
256
257
258static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xa, 0xa, 0xa };
259
260static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
261
262static struct hpsb_highlevel sbp2_highlevel = {
263 .name = SBP2_DEVICE_NAME,
264 .host_reset = sbp2_host_reset,
265};
266
267static const struct hpsb_address_ops sbp2_ops = {
268 .write = sbp2_handle_status_write
269};
270
271#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
272static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
273 u64, size_t, u16);
274static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
275 size_t, u16);
276
277static const struct hpsb_address_ops sbp2_physdma_ops = {
278 .read = sbp2_handle_physdma_read,
279 .write = sbp2_handle_physdma_write,
280};
281#endif
282
283
284/*
285 * Interface to driver core and IEEE 1394 core
286 */
287static const struct ieee1394_device_id sbp2_id_table[] = {
288 {
289 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
290 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
291 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
292 {}
293};
294MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
295
296static int sbp2_probe(struct device *);
297static int sbp2_remove(struct device *);
298static int sbp2_update(struct unit_directory *);
299
300static struct hpsb_protocol_driver sbp2_driver = {
301 .name = SBP2_DEVICE_NAME,
302 .id_table = sbp2_id_table,
303 .update = sbp2_update,
304 .driver = {
305 .probe = sbp2_probe,
306 .remove = sbp2_remove,
307 },
308};
309
310
311/*
312 * Interface to SCSI core
313 */
314static int sbp2scsi_queuecommand(struct scsi_cmnd *,
315 void (*)(struct scsi_cmnd *));
316static int sbp2scsi_abort(struct scsi_cmnd *);
317static int sbp2scsi_reset(struct scsi_cmnd *);
318static int sbp2scsi_slave_alloc(struct scsi_device *);
319static int sbp2scsi_slave_configure(struct scsi_device *);
320static void sbp2scsi_slave_destroy(struct scsi_device *);
321static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *,
322 struct device_attribute *, char *);
323
324static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
325
326static struct device_attribute *sbp2_sysfs_sdev_attrs[] = {
327 &dev_attr_ieee1394_id,
328 NULL
329};
330
331static struct scsi_host_template sbp2_shost_template = {
332 .module = THIS_MODULE,
333 .name = "SBP-2 IEEE-1394",
334 .proc_name = SBP2_DEVICE_NAME,
335 .queuecommand = sbp2scsi_queuecommand,
336 .eh_abort_handler = sbp2scsi_abort,
337 .eh_device_reset_handler = sbp2scsi_reset,
338 .slave_alloc = sbp2scsi_slave_alloc,
339 .slave_configure = sbp2scsi_slave_configure,
340 .slave_destroy = sbp2scsi_slave_destroy,
341 .this_id = -1,
342 .sg_tablesize = SG_ALL,
343 .use_clustering = ENABLE_CLUSTERING,
344 .cmd_per_lun = SBP2_MAX_CMDS,
345 .can_queue = SBP2_MAX_CMDS,
346 .sdev_attrs = sbp2_sysfs_sdev_attrs,
347};
348
349#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
350#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
351
352/*
353 * List of devices with known bugs.
354 *
355 * The firmware_revision field, masked with 0xffff00, is the best indicator
356 * for the type of bridge chip of a device. It yields a few false positives
357 * but this did not break correctly behaving devices so far.
358 */
359static const struct {
360 u32 firmware_revision;
361 u32 model;
362 unsigned workarounds;
363} sbp2_workarounds_table[] = {
364 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
365 .firmware_revision = 0x002800,
366 .model = 0x001010,
367 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
368 SBP2_WORKAROUND_MODE_SENSE_8 |
369 SBP2_WORKAROUND_POWER_CONDITION,
370 },
371 /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
372 .firmware_revision = 0x002800,
373 .model = 0x000000,
374 .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
375 },
376 /* Initio bridges, actually only needed for some older ones */ {
377 .firmware_revision = 0x000200,
378 .model = SBP2_ROM_VALUE_WILDCARD,
379 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
380 },
381 /* PL-3507 bridge with Prolific firmware */ {
382 .firmware_revision = 0x012800,
383 .model = SBP2_ROM_VALUE_WILDCARD,
384 .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
385 },
386 /* Symbios bridge */ {
387 .firmware_revision = 0xa0b800,
388 .model = SBP2_ROM_VALUE_WILDCARD,
389 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
390 },
391 /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
392 .firmware_revision = 0x002600,
393 .model = SBP2_ROM_VALUE_WILDCARD,
394 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
395 },
396 /*
397 * iPod 2nd generation: needs 128k max transfer size workaround
398 * iPod 3rd generation: needs fix capacity workaround
399 */
400 {
401 .firmware_revision = 0x0a2700,
402 .model = 0x000000,
403 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
404 SBP2_WORKAROUND_FIX_CAPACITY,
405 },
406 /* iPod 4th generation */ {
407 .firmware_revision = 0x0a2700,
408 .model = 0x000021,
409 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
410 },
411 /* iPod mini */ {
412 .firmware_revision = 0x0a2700,
413 .model = 0x000022,
414 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
415 },
416 /* iPod mini */ {
417 .firmware_revision = 0x0a2700,
418 .model = 0x000023,
419 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
420 },
421 /* iPod Photo */ {
422 .firmware_revision = 0x0a2700,
423 .model = 0x00007e,
424 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
425 }
426};
427
428/**************************************
429 * General utility functions
430 **************************************/
431
432#ifndef __BIG_ENDIAN
433/*
434 * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
435 */
436static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
437{
438 u32 *temp = buffer;
439
440 for (length = (length >> 2); length--; )
441 temp[length] = be32_to_cpu(temp[length]);
442}
443
444/*
445 * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
446 */
447static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
448{
449 u32 *temp = buffer;
450
451 for (length = (length >> 2); length--; )
452 temp[length] = cpu_to_be32(temp[length]);
453}
454#else /* BIG_ENDIAN */
455/* Why waste the cpu cycles? */
456#define sbp2util_be32_to_cpu_buffer(x,y) do {} while (0)
457#define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0)
458#endif
459
460static DECLARE_WAIT_QUEUE_HEAD(sbp2_access_wq);
461
462/*
463 * Waits for completion of an SBP-2 access request.
464 * Returns nonzero if timed out or prematurely interrupted.
465 */
466static int sbp2util_access_timeout(struct sbp2_lu *lu, int timeout)
467{
468 long leftover;
469
470 leftover = wait_event_interruptible_timeout(
471 sbp2_access_wq, lu->access_complete, timeout);
472 lu->access_complete = 0;
473 return leftover <= 0;
474}
475
476static void sbp2_free_packet(void *packet)
477{
478 hpsb_free_tlabel(packet);
479 hpsb_free_packet(packet);
480}
481
482/*
483 * This is much like hpsb_node_write(), except it ignores the response
484 * subaction and returns immediately. Can be used from atomic context.
485 */
486static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
487 quadlet_t *buf, size_t len)
488{
489 struct hpsb_packet *packet;
490
491 packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buf, len);
492 if (!packet)
493 return -ENOMEM;
494
495 hpsb_set_packet_complete_task(packet, sbp2_free_packet, packet);
496 hpsb_node_fill_packet(ne, packet);
497 if (hpsb_send_packet(packet) < 0) {
498 sbp2_free_packet(packet);
499 return -EIO;
500 }
501 return 0;
502}
503
504static void sbp2util_notify_fetch_agent(struct sbp2_lu *lu, u64 offset,
505 quadlet_t *data, size_t len)
506{
507 /* There is a small window after a bus reset within which the node
508 * entry's generation is current but the reconnect wasn't completed. */
509 if (unlikely(atomic_read(&lu->state) == SBP2LU_STATE_IN_RESET))
510 return;
511
512 if (hpsb_node_write(lu->ne, lu->command_block_agent_addr + offset,
513 data, len))
514 SBP2_ERR("sbp2util_notify_fetch_agent failed.");
515
516 /* Now accept new SCSI commands, unless a bus reset happended during
517 * hpsb_node_write. */
518 if (likely(atomic_read(&lu->state) != SBP2LU_STATE_IN_RESET))
519 scsi_unblock_requests(lu->shost);
520}
521
522static void sbp2util_write_orb_pointer(struct work_struct *work)
523{
524 struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
525 quadlet_t data[2];
526
527 data[0] = ORB_SET_NODE_ID(lu->hi->host->node_id);
528 data[1] = lu->last_orb_dma;
529 sbp2util_cpu_to_be32_buffer(data, 8);
530 sbp2util_notify_fetch_agent(lu, SBP2_ORB_POINTER_OFFSET, data, 8);
531}
532
533static void sbp2util_write_doorbell(struct work_struct *work)
534{
535 struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
536
537 sbp2util_notify_fetch_agent(lu, SBP2_DOORBELL_OFFSET, NULL, 4);
538}
539
540static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
541{
542 struct sbp2_command_info *cmd;
543 struct device *dmadev = lu->hi->host->device.parent;
544 int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
545
546 for (i = 0; i < orbs; i++) {
547 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
548 if (!cmd)
549 goto failed_alloc;
550
551 cmd->command_orb_dma =
552 dma_map_single(dmadev, &cmd->command_orb,
553 sizeof(struct sbp2_command_orb),
554 DMA_TO_DEVICE);
555 if (dma_mapping_error(dmadev, cmd->command_orb_dma))
556 goto failed_orb;
557
558 cmd->sge_dma =
559 dma_map_single(dmadev, &cmd->scatter_gather_element,
560 sizeof(cmd->scatter_gather_element),
561 DMA_TO_DEVICE);
562 if (dma_mapping_error(dmadev, cmd->sge_dma))
563 goto failed_sge;
564
565 INIT_LIST_HEAD(&cmd->list);
566 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
567 }
568 return 0;
569
570failed_sge:
571 dma_unmap_single(dmadev, cmd->command_orb_dma,
572 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
573failed_orb:
574 kfree(cmd);
575failed_alloc:
576 return -ENOMEM;
577}
578
579static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu,
580 struct hpsb_host *host)
581{
582 struct list_head *lh, *next;
583 struct sbp2_command_info *cmd;
584 unsigned long flags;
585
586 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
587 if (!list_empty(&lu->cmd_orb_completed))
588 list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
589 cmd = list_entry(lh, struct sbp2_command_info, list);
590 dma_unmap_single(host->device.parent,
591 cmd->command_orb_dma,
592 sizeof(struct sbp2_command_orb),
593 DMA_TO_DEVICE);
594 dma_unmap_single(host->device.parent, cmd->sge_dma,
595 sizeof(cmd->scatter_gather_element),
596 DMA_TO_DEVICE);
597 kfree(cmd);
598 }
599 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
600 return;
601}
602
603/*
604 * Finds the sbp2_command for a given outstanding command ORB.
605 * Only looks at the in-use list.
606 */
607static struct sbp2_command_info *sbp2util_find_command_for_orb(
608 struct sbp2_lu *lu, dma_addr_t orb)
609{
610 struct sbp2_command_info *cmd;
611 unsigned long flags;
612
613 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
614 if (!list_empty(&lu->cmd_orb_inuse))
615 list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
616 if (cmd->command_orb_dma == orb) {
617 spin_unlock_irqrestore(
618 &lu->cmd_orb_lock, flags);
619 return cmd;
620 }
621 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
622 return NULL;
623}
624
625/*
626 * Finds the sbp2_command for a given outstanding SCpnt.
627 * Only looks at the in-use list.
628 * Must be called with lu->cmd_orb_lock held.
629 */
630static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
631 struct sbp2_lu *lu, void *SCpnt)
632{
633 struct sbp2_command_info *cmd;
634
635 if (!list_empty(&lu->cmd_orb_inuse))
636 list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
637 if (cmd->Current_SCpnt == SCpnt)
638 return cmd;
639 return NULL;
640}
641
642static struct sbp2_command_info *sbp2util_allocate_command_orb(
643 struct sbp2_lu *lu,
644 struct scsi_cmnd *Current_SCpnt,
645 void (*Current_done)(struct scsi_cmnd *))
646{
647 struct list_head *lh;
648 struct sbp2_command_info *cmd = NULL;
649 unsigned long flags;
650
651 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
652 if (!list_empty(&lu->cmd_orb_completed)) {
653 lh = lu->cmd_orb_completed.next;
654 list_del(lh);
655 cmd = list_entry(lh, struct sbp2_command_info, list);
656 cmd->Current_done = Current_done;
657 cmd->Current_SCpnt = Current_SCpnt;
658 list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
659 } else
660 SBP2_ERR("%s: no orbs available", __func__);
661 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
662 return cmd;
663}
664
665/*
666 * Unmaps the DMAs of a command and moves the command to the completed ORB list.
667 * Must be called with lu->cmd_orb_lock held.
668 */
669static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
670 struct sbp2_command_info *cmd)
671{
672 if (scsi_sg_count(cmd->Current_SCpnt))
673 dma_unmap_sg(lu->ud->ne->host->device.parent,
674 scsi_sglist(cmd->Current_SCpnt),
675 scsi_sg_count(cmd->Current_SCpnt),
676 cmd->Current_SCpnt->sc_data_direction);
677 list_move_tail(&cmd->list, &lu->cmd_orb_completed);
678}
679
680/*
681 * Is lu valid? Is the 1394 node still present?
682 */
683static inline int sbp2util_node_is_available(struct sbp2_lu *lu)
684{
685 return lu && lu->ne && !lu->ne->in_limbo;
686}
687
688/*********************************************
689 * IEEE-1394 core driver stack related section
690 *********************************************/
691
692static int sbp2_probe(struct device *dev)
693{
694 struct unit_directory *ud;
695 struct sbp2_lu *lu;
696
697 ud = container_of(dev, struct unit_directory, device);
698
699 /* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
700 * instead. */
701 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
702 return -ENODEV;
703
704 lu = sbp2_alloc_device(ud);
705 if (!lu)
706 return -ENOMEM;
707
708 sbp2_parse_unit_directory(lu, ud);
709 return sbp2_start_device(lu);
710}
711
712static int sbp2_remove(struct device *dev)
713{
714 struct unit_directory *ud;
715 struct sbp2_lu *lu;
716 struct scsi_device *sdev;
717
718 ud = container_of(dev, struct unit_directory, device);
719 lu = dev_get_drvdata(&ud->device);
720 if (!lu)
721 return 0;
722
723 if (lu->shost) {
724 /* Get rid of enqueued commands if there is no chance to
725 * send them. */
726 if (!sbp2util_node_is_available(lu))
727 sbp2scsi_complete_all_commands(lu, DID_NO_CONNECT);
728 /* scsi_remove_device() may trigger shutdown functions of SCSI
729 * highlevel drivers which would deadlock if blocked. */
730 atomic_set(&lu->state, SBP2LU_STATE_IN_SHUTDOWN);
731 scsi_unblock_requests(lu->shost);
732 }
733 sdev = lu->sdev;
734 if (sdev) {
735 lu->sdev = NULL;
736 scsi_remove_device(sdev);
737 }
738
739 sbp2_logout_device(lu);
740 sbp2_remove_device(lu);
741
742 return 0;
743}
744
745static int sbp2_update(struct unit_directory *ud)
746{
747 struct sbp2_lu *lu = dev_get_drvdata(&ud->device);
748
749 if (sbp2_reconnect_device(lu) != 0) {
750 /*
751 * Reconnect failed. If another bus reset happened,
752 * let nodemgr proceed and call sbp2_update again later
753 * (or sbp2_remove if this node went away).
754 */
755 if (!hpsb_node_entry_valid(lu->ne))
756 return 0;
757 /*
758 * Or the target rejected the reconnect because we weren't
759 * fast enough. Try a regular login, but first log out
760 * just in case of any weirdness.
761 */
762 sbp2_logout_device(lu);
763
764 if (sbp2_login_device(lu) != 0) {
765 if (!hpsb_node_entry_valid(lu->ne))
766 return 0;
767
768 /* Maybe another initiator won the login. */
769 SBP2_ERR("Failed to reconnect to sbp2 device!");
770 return -EBUSY;
771 }
772 }
773
774 sbp2_set_busy_timeout(lu);
775 sbp2_agent_reset(lu, 1);
776 sbp2_max_speed_and_size(lu);
777
778 /* Complete any pending commands with busy (so they get retried)
779 * and remove them from our queue. */
780 sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
781
782 /* Accept new commands unless there was another bus reset in the
783 * meantime. */
784 if (hpsb_node_entry_valid(lu->ne)) {
785 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
786 scsi_unblock_requests(lu->shost);
787 }
788 return 0;
789}
790
791static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
792{
793 struct sbp2_fwhost_info *hi;
794 struct Scsi_Host *shost = NULL;
795 struct sbp2_lu *lu = NULL;
796 unsigned long flags;
797
798 lu = kzalloc(sizeof(*lu), GFP_KERNEL);
799 if (!lu) {
800 SBP2_ERR("failed to create lu");
801 goto failed_alloc;
802 }
803
804 lu->ne = ud->ne;
805 lu->ud = ud;
806 lu->speed_code = IEEE1394_SPEED_100;
807 lu->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
808 lu->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
809 INIT_LIST_HEAD(&lu->cmd_orb_inuse);
810 INIT_LIST_HEAD(&lu->cmd_orb_completed);
811 INIT_LIST_HEAD(&lu->lu_list);
812 spin_lock_init(&lu->cmd_orb_lock);
813 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
814 INIT_WORK(&lu->protocol_work, NULL);
815
816 dev_set_drvdata(&ud->device, lu);
817
818 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
819 if (!hi) {
820 hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host,
821 sizeof(*hi));
822 if (!hi) {
823 SBP2_ERR("failed to allocate hostinfo");
824 goto failed_alloc;
825 }
826 hi->host = ud->ne->host;
827 INIT_LIST_HEAD(&hi->logical_units);
828
829#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
830 /* Handle data movement if physical dma is not
831 * enabled or not supported on host controller */
832 if (!hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host,
833 &sbp2_physdma_ops,
834 0x0ULL, 0xfffffffcULL)) {
835 SBP2_ERR("failed to register lower 4GB address range");
836 goto failed_alloc;
837 }
838#endif
839 }
840
841 if (dma_get_max_seg_size(hi->host->device.parent) > SBP2_MAX_SEG_SIZE)
842 BUG_ON(dma_set_max_seg_size(hi->host->device.parent,
843 SBP2_MAX_SEG_SIZE));
844
845 /* Prevent unloading of the 1394 host */
846 if (!try_module_get(hi->host->driver->owner)) {
847 SBP2_ERR("failed to get a reference on 1394 host driver");
848 goto failed_alloc;
849 }
850
851 lu->hi = hi;
852
853 write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
854 list_add_tail(&lu->lu_list, &hi->logical_units);
855 write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
856
857 /* Register the status FIFO address range. We could use the same FIFO
858 * for targets at different nodes. However we need different FIFOs per
859 * target in order to support multi-unit devices.
860 * The FIFO is located out of the local host controller's physical range
861 * but, if possible, within the posted write area. Status writes will
862 * then be performed as unified transactions. This slightly reduces
863 * bandwidth usage, and some Prolific based devices seem to require it.
864 */
865 lu->status_fifo_addr = hpsb_allocate_and_register_addrspace(
866 &sbp2_highlevel, ud->ne->host, &sbp2_ops,
867 sizeof(struct sbp2_status_block), sizeof(quadlet_t),
868 ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
869 if (lu->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
870 SBP2_ERR("failed to allocate status FIFO address range");
871 goto failed_alloc;
872 }
873
874 shost = scsi_host_alloc(&sbp2_shost_template, sizeof(unsigned long));
875 if (!shost) {
876 SBP2_ERR("failed to register scsi host");
877 goto failed_alloc;
878 }
879
880 shost->hostdata[0] = (unsigned long)lu;
881 shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
882
883 if (!scsi_add_host(shost, &ud->device)) {
884 lu->shost = shost;
885 return lu;
886 }
887
888 SBP2_ERR("failed to add scsi host");
889 scsi_host_put(shost);
890
891failed_alloc:
892 sbp2_remove_device(lu);
893 return NULL;
894}
895
896static void sbp2_host_reset(struct hpsb_host *host)
897{
898 struct sbp2_fwhost_info *hi;
899 struct sbp2_lu *lu;
900 unsigned long flags;
901
902 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
903 if (!hi)
904 return;
905
906 read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
907
908 list_for_each_entry(lu, &hi->logical_units, lu_list)
909 if (atomic_cmpxchg(&lu->state,
910 SBP2LU_STATE_RUNNING, SBP2LU_STATE_IN_RESET)
911 == SBP2LU_STATE_RUNNING)
912 scsi_block_requests(lu->shost);
913
914 read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
915}
916
917static int sbp2_start_device(struct sbp2_lu *lu)
918{
919 struct sbp2_fwhost_info *hi = lu->hi;
920 int error;
921
922 lu->login_response = dma_alloc_coherent(hi->host->device.parent,
923 sizeof(struct sbp2_login_response),
924 &lu->login_response_dma, GFP_KERNEL);
925 if (!lu->login_response)
926 goto alloc_fail;
927
928 lu->query_logins_orb = dma_alloc_coherent(hi->host->device.parent,
929 sizeof(struct sbp2_query_logins_orb),
930 &lu->query_logins_orb_dma, GFP_KERNEL);
931 if (!lu->query_logins_orb)
932 goto alloc_fail;
933
934 lu->query_logins_response = dma_alloc_coherent(hi->host->device.parent,
935 sizeof(struct sbp2_query_logins_response),
936 &lu->query_logins_response_dma, GFP_KERNEL);
937 if (!lu->query_logins_response)
938 goto alloc_fail;
939
940 lu->reconnect_orb = dma_alloc_coherent(hi->host->device.parent,
941 sizeof(struct sbp2_reconnect_orb),
942 &lu->reconnect_orb_dma, GFP_KERNEL);
943 if (!lu->reconnect_orb)
944 goto alloc_fail;
945
946 lu->logout_orb = dma_alloc_coherent(hi->host->device.parent,
947 sizeof(struct sbp2_logout_orb),
948 &lu->logout_orb_dma, GFP_KERNEL);
949 if (!lu->logout_orb)
950 goto alloc_fail;
951
952 lu->login_orb = dma_alloc_coherent(hi->host->device.parent,
953 sizeof(struct sbp2_login_orb),
954 &lu->login_orb_dma, GFP_KERNEL);
955 if (!lu->login_orb)
956 goto alloc_fail;
957
958 if (sbp2util_create_command_orb_pool(lu))
959 goto alloc_fail;
960
961 /* Wait a second before trying to log in. Previously logged in
962 * initiators need a chance to reconnect. */
963 if (msleep_interruptible(1000)) {
964 sbp2_remove_device(lu);
965 return -EINTR;
966 }
967
968 if (sbp2_login_device(lu)) {
969 sbp2_remove_device(lu);
970 return -EBUSY;
971 }
972
973 sbp2_set_busy_timeout(lu);
974 sbp2_agent_reset(lu, 1);
975 sbp2_max_speed_and_size(lu);
976
977 if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
978 ssleep(SBP2_INQUIRY_DELAY);
979
980 error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
981 if (error) {
982 SBP2_ERR("scsi_add_device failed");
983 sbp2_logout_device(lu);
984 sbp2_remove_device(lu);
985 return error;
986 }
987
988 return 0;
989
990alloc_fail:
991 SBP2_ERR("Could not allocate memory for lu");
992 sbp2_remove_device(lu);
993 return -ENOMEM;
994}
995
996static void sbp2_remove_device(struct sbp2_lu *lu)
997{
998 struct sbp2_fwhost_info *hi;
999 unsigned long flags;
1000
1001 if (!lu)
1002 return;
1003 hi = lu->hi;
1004 if (!hi)
1005 goto no_hi;
1006
1007 if (lu->shost) {
1008 scsi_remove_host(lu->shost);
1009 scsi_host_put(lu->shost);
1010 }
1011 flush_scheduled_work();
1012 sbp2util_remove_command_orb_pool(lu, hi->host);
1013
1014 write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
1015 list_del(&lu->lu_list);
1016 write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
1017
1018 if (lu->login_response)
1019 dma_free_coherent(hi->host->device.parent,
1020 sizeof(struct sbp2_login_response),
1021 lu->login_response,
1022 lu->login_response_dma);
1023 if (lu->login_orb)
1024 dma_free_coherent(hi->host->device.parent,
1025 sizeof(struct sbp2_login_orb),
1026 lu->login_orb,
1027 lu->login_orb_dma);
1028 if (lu->reconnect_orb)
1029 dma_free_coherent(hi->host->device.parent,
1030 sizeof(struct sbp2_reconnect_orb),
1031 lu->reconnect_orb,
1032 lu->reconnect_orb_dma);
1033 if (lu->logout_orb)
1034 dma_free_coherent(hi->host->device.parent,
1035 sizeof(struct sbp2_logout_orb),
1036 lu->logout_orb,
1037 lu->logout_orb_dma);
1038 if (lu->query_logins_orb)
1039 dma_free_coherent(hi->host->device.parent,
1040 sizeof(struct sbp2_query_logins_orb),
1041 lu->query_logins_orb,
1042 lu->query_logins_orb_dma);
1043 if (lu->query_logins_response)
1044 dma_free_coherent(hi->host->device.parent,
1045 sizeof(struct sbp2_query_logins_response),
1046 lu->query_logins_response,
1047 lu->query_logins_response_dma);
1048
1049 if (lu->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
1050 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
1051 lu->status_fifo_addr);
1052
1053 dev_set_drvdata(&lu->ud->device, NULL);
1054
1055 module_put(hi->host->driver->owner);
1056no_hi:
1057 kfree(lu);
1058}
1059
1060#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
1061/*
1062 * Deal with write requests on adapters which do not support physical DMA or
1063 * have it switched off.
1064 */
1065static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
1066 int destid, quadlet_t *data, u64 addr,
1067 size_t length, u16 flags)
1068{
1069 memcpy(bus_to_virt((u32) addr), data, length);
1070 return RCODE_COMPLETE;
1071}
1072
1073/*
1074 * Deal with read requests on adapters which do not support physical DMA or
1075 * have it switched off.
1076 */
1077static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1078 quadlet_t *data, u64 addr, size_t length,
1079 u16 flags)
1080{
1081 memcpy(data, bus_to_virt((u32) addr), length);
1082 return RCODE_COMPLETE;
1083}
1084#endif
1085
1086/**************************************
1087 * SBP-2 protocol related section
1088 **************************************/
1089
1090static int sbp2_query_logins(struct sbp2_lu *lu)
1091{
1092 struct sbp2_fwhost_info *hi = lu->hi;
1093 quadlet_t data[2];
1094 int max_logins;
1095 int active_logins;
1096
1097 lu->query_logins_orb->reserved1 = 0x0;
1098 lu->query_logins_orb->reserved2 = 0x0;
1099
1100 lu->query_logins_orb->query_response_lo = lu->query_logins_response_dma;
1101 lu->query_logins_orb->query_response_hi =
1102 ORB_SET_NODE_ID(hi->host->node_id);
1103 lu->query_logins_orb->lun_misc =
1104 ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1105 lu->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1106 lu->query_logins_orb->lun_misc |= ORB_SET_LUN(lu->lun);
1107
1108 lu->query_logins_orb->reserved_resp_length =
1109 ORB_SET_QUERY_LOGINS_RESP_LENGTH(
1110 sizeof(struct sbp2_query_logins_response));
1111
1112 lu->query_logins_orb->status_fifo_hi =
1113 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1114 lu->query_logins_orb->status_fifo_lo =
1115 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1116
1117 sbp2util_cpu_to_be32_buffer(lu->query_logins_orb,
1118 sizeof(struct sbp2_query_logins_orb));
1119
1120 memset(lu->query_logins_response, 0,
1121 sizeof(struct sbp2_query_logins_response));
1122
1123 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1124 data[1] = lu->query_logins_orb_dma;
1125 sbp2util_cpu_to_be32_buffer(data, 8);
1126
1127 hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1128
1129 if (sbp2util_access_timeout(lu, 2*HZ)) {
1130 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1131 return -EIO;
1132 }
1133
1134 if (lu->status_block.ORB_offset_lo != lu->query_logins_orb_dma) {
1135 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1136 return -EIO;
1137 }
1138
1139 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1140 SBP2_INFO("Error querying logins to SBP-2 device - failed");
1141 return -EIO;
1142 }
1143
1144 sbp2util_cpu_to_be32_buffer(lu->query_logins_response,
1145 sizeof(struct sbp2_query_logins_response));
1146
1147 max_logins = RESPONSE_GET_MAX_LOGINS(
1148 lu->query_logins_response->length_max_logins);
1149 SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
1150
1151 active_logins = RESPONSE_GET_ACTIVE_LOGINS(
1152 lu->query_logins_response->length_max_logins);
1153 SBP2_INFO("Number of active logins: %d", active_logins);
1154
1155 if (active_logins >= max_logins) {
1156 return -EIO;
1157 }
1158
1159 return 0;
1160}
1161
1162static int sbp2_login_device(struct sbp2_lu *lu)
1163{
1164 struct sbp2_fwhost_info *hi = lu->hi;
1165 quadlet_t data[2];
1166
1167 if (!lu->login_orb)
1168 return -EIO;
1169
1170 if (!sbp2_exclusive_login && sbp2_query_logins(lu)) {
1171 SBP2_INFO("Device does not support any more concurrent logins");
1172 return -EIO;
1173 }
1174
1175 /* assume no password */
1176 lu->login_orb->password_hi = 0;
1177 lu->login_orb->password_lo = 0;
1178
1179 lu->login_orb->login_response_lo = lu->login_response_dma;
1180 lu->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1181 lu->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
1182
1183 /* one second reconnect time */
1184 lu->login_orb->lun_misc |= ORB_SET_RECONNECT(0);
1185 lu->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(sbp2_exclusive_login);
1186 lu->login_orb->lun_misc |= ORB_SET_NOTIFY(1);
1187 lu->login_orb->lun_misc |= ORB_SET_LUN(lu->lun);
1188
1189 lu->login_orb->passwd_resp_lengths =
1190 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
1191
1192 lu->login_orb->status_fifo_hi =
1193 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1194 lu->login_orb->status_fifo_lo =
1195 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1196
1197 sbp2util_cpu_to_be32_buffer(lu->login_orb,
1198 sizeof(struct sbp2_login_orb));
1199
1200 memset(lu->login_response, 0, sizeof(struct sbp2_login_response));
1201
1202 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1203 data[1] = lu->login_orb_dma;
1204 sbp2util_cpu_to_be32_buffer(data, 8);
1205
1206 hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1207
1208 /* wait up to 20 seconds for login status */
1209 if (sbp2util_access_timeout(lu, 20*HZ)) {
1210 SBP2_ERR("Error logging into SBP-2 device - timed out");
1211 return -EIO;
1212 }
1213
1214 /* make sure that the returned status matches the login ORB */
1215 if (lu->status_block.ORB_offset_lo != lu->login_orb_dma) {
1216 SBP2_ERR("Error logging into SBP-2 device - timed out");
1217 return -EIO;
1218 }
1219
1220 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1221 SBP2_ERR("Error logging into SBP-2 device - failed");
1222 return -EIO;
1223 }
1224
1225 sbp2util_cpu_to_be32_buffer(lu->login_response,
1226 sizeof(struct sbp2_login_response));
1227 lu->command_block_agent_addr =
1228 ((u64)lu->login_response->command_block_agent_hi) << 32;
1229 lu->command_block_agent_addr |=
1230 ((u64)lu->login_response->command_block_agent_lo);
1231 lu->command_block_agent_addr &= 0x0000ffffffffffffULL;
1232
1233 SBP2_INFO("Logged into SBP-2 device");
1234 return 0;
1235}
1236
1237static int sbp2_logout_device(struct sbp2_lu *lu)
1238{
1239 struct sbp2_fwhost_info *hi = lu->hi;
1240 quadlet_t data[2];
1241 int error;
1242
1243 lu->logout_orb->reserved1 = 0x0;
1244 lu->logout_orb->reserved2 = 0x0;
1245 lu->logout_orb->reserved3 = 0x0;
1246 lu->logout_orb->reserved4 = 0x0;
1247
1248 lu->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
1249 lu->logout_orb->login_ID_misc |=
1250 ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
1251 lu->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1252
1253 lu->logout_orb->reserved5 = 0x0;
1254 lu->logout_orb->status_fifo_hi =
1255 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1256 lu->logout_orb->status_fifo_lo =
1257 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1258
1259 sbp2util_cpu_to_be32_buffer(lu->logout_orb,
1260 sizeof(struct sbp2_logout_orb));
1261
1262 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1263 data[1] = lu->logout_orb_dma;
1264 sbp2util_cpu_to_be32_buffer(data, 8);
1265
1266 error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1267 if (error)
1268 return error;
1269
1270 /* wait up to 1 second for the device to complete logout */
1271 if (sbp2util_access_timeout(lu, HZ))
1272 return -EIO;
1273
1274 SBP2_INFO("Logged out of SBP-2 device");
1275 return 0;
1276}
1277
1278static int sbp2_reconnect_device(struct sbp2_lu *lu)
1279{
1280 struct sbp2_fwhost_info *hi = lu->hi;
1281 quadlet_t data[2];
1282 int error;
1283
1284 lu->reconnect_orb->reserved1 = 0x0;
1285 lu->reconnect_orb->reserved2 = 0x0;
1286 lu->reconnect_orb->reserved3 = 0x0;
1287 lu->reconnect_orb->reserved4 = 0x0;
1288
1289 lu->reconnect_orb->login_ID_misc =
1290 ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
1291 lu->reconnect_orb->login_ID_misc |=
1292 ORB_SET_LOGIN_ID(lu->login_response->length_login_ID);
1293 lu->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
1294
1295 lu->reconnect_orb->reserved5 = 0x0;
1296 lu->reconnect_orb->status_fifo_hi =
1297 ORB_SET_STATUS_FIFO_HI(lu->status_fifo_addr, hi->host->node_id);
1298 lu->reconnect_orb->status_fifo_lo =
1299 ORB_SET_STATUS_FIFO_LO(lu->status_fifo_addr);
1300
1301 sbp2util_cpu_to_be32_buffer(lu->reconnect_orb,
1302 sizeof(struct sbp2_reconnect_orb));
1303
1304 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1305 data[1] = lu->reconnect_orb_dma;
1306 sbp2util_cpu_to_be32_buffer(data, 8);
1307
1308 error = hpsb_node_write(lu->ne, lu->management_agent_addr, data, 8);
1309 if (error)
1310 return error;
1311
1312 /* wait up to 1 second for reconnect status */
1313 if (sbp2util_access_timeout(lu, HZ)) {
1314 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1315 return -EIO;
1316 }
1317
1318 /* make sure that the returned status matches the reconnect ORB */
1319 if (lu->status_block.ORB_offset_lo != lu->reconnect_orb_dma) {
1320 SBP2_ERR("Error reconnecting to SBP-2 device - timed out");
1321 return -EIO;
1322 }
1323
1324 if (STATUS_TEST_RDS(lu->status_block.ORB_offset_hi_misc)) {
1325 SBP2_ERR("Error reconnecting to SBP-2 device - failed");
1326 return -EIO;
1327 }
1328
1329 SBP2_INFO("Reconnected to SBP-2 device");
1330 return 0;
1331}
1332
1333/*
1334 * Set the target node's Single Phase Retry limit. Affects the target's retry
1335 * behaviour if our node is too busy to accept requests.
1336 */
1337static int sbp2_set_busy_timeout(struct sbp2_lu *lu)
1338{
1339 quadlet_t data;
1340
1341 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1342 if (hpsb_node_write(lu->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
1343 SBP2_ERR("%s error", __func__);
1344 return 0;
1345}
1346
1347static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1348 struct unit_directory *ud)
1349{
1350 struct csr1212_keyval *kv;
1351 struct csr1212_dentry *dentry;
1352 u64 management_agent_addr;
1353 u32 firmware_revision, model;
1354 unsigned workarounds;
1355 int i;
1356
1357 management_agent_addr = 0;
1358 firmware_revision = SBP2_ROM_VALUE_MISSING;
1359 model = ud->flags & UNIT_DIRECTORY_MODEL_ID ?
1360 ud->model_id : SBP2_ROM_VALUE_MISSING;
1361
1362 csr1212_for_each_dir_entry(ud->ne->csr, kv, ud->ud_kv, dentry) {
1363 switch (kv->key.id) {
1364 case CSR1212_KV_ID_DEPENDENT_INFO:
1365 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET)
1366 management_agent_addr =
1367 CSR1212_REGISTER_SPACE_BASE +
1368 (kv->value.csr_offset << 2);
1369
1370 else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE)
1371 lu->lun = ORB_SET_LUN(kv->value.immediate);
1372 break;
1373
1374
1375 case SBP2_FIRMWARE_REVISION_KEY:
1376 firmware_revision = kv->value.immediate;
1377 break;
1378
1379 default:
1380 /* FIXME: Check for SBP2_UNIT_CHARACTERISTICS_KEY
1381 * mgt_ORB_timeout and ORB_size, SBP-2 clause 7.4.8. */
1382
1383 /* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.
1384 * Its "ordered" bit has consequences for command ORB
1385 * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */
1386 break;
1387 }
1388 }
1389
1390 workarounds = sbp2_default_workarounds;
1391
1392 if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
1393 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1394 if (sbp2_workarounds_table[i].firmware_revision !=
1395 SBP2_ROM_VALUE_WILDCARD &&
1396 sbp2_workarounds_table[i].firmware_revision !=
1397 (firmware_revision & 0xffff00))
1398 continue;
1399 if (sbp2_workarounds_table[i].model !=
1400 SBP2_ROM_VALUE_WILDCARD &&
1401 sbp2_workarounds_table[i].model != model)
1402 continue;
1403 workarounds |= sbp2_workarounds_table[i].workarounds;
1404 break;
1405 }
1406
1407 if (workarounds)
1408 SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
1409 "(firmware_revision 0x%06x, vendor_id 0x%06x,"
1410 " model_id 0x%06x)",
1411 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1412 workarounds, firmware_revision, ud->vendor_id,
1413 model);
1414
1415 /* We would need one SCSI host template for each target to adjust
1416 * max_sectors on the fly, therefore warn only. */
1417 if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
1418 (sbp2_max_sectors * 512) > (128 * 1024))
1419 SBP2_INFO("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
1420 "max transfer size. WARNING: Current max_sectors "
1421 "setting is larger than 128KB (%d sectors)",
1422 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1423 sbp2_max_sectors);
1424
1425 /* If this is a logical unit directory entry, process the parent
1426 * to get the values. */
1427 if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {
1428 struct unit_directory *parent_ud = container_of(
1429 ud->device.parent, struct unit_directory, device);
1430 sbp2_parse_unit_directory(lu, parent_ud);
1431 } else {
1432 lu->management_agent_addr = management_agent_addr;
1433 lu->workarounds = workarounds;
1434 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1435 lu->lun = ORB_SET_LUN(ud->lun);
1436 }
1437}
1438
1439#define SBP2_PAYLOAD_TO_BYTES(p) (1 << ((p) + 2))
1440
1441/*
1442 * This function is called in order to determine the max speed and packet
1443 * size we can use in our ORBs. Note, that we (the driver and host) only
1444 * initiate the transaction. The SBP-2 device actually transfers the data
1445 * (by reading from the DMA area we tell it). This means that the SBP-2
1446 * device decides the actual maximum data it can transfer. We just tell it
1447 * the speed that it needs to use, and the max_rec the host supports, and
1448 * it takes care of the rest.
1449 */
1450static int sbp2_max_speed_and_size(struct sbp2_lu *lu)
1451{
1452 struct sbp2_fwhost_info *hi = lu->hi;
1453 u8 payload;
1454
1455 lu->speed_code = hi->host->speed[NODEID_TO_NODE(lu->ne->nodeid)];
1456
1457 if (lu->speed_code > sbp2_max_speed) {
1458 lu->speed_code = sbp2_max_speed;
1459 SBP2_INFO("Reducing speed to %s",
1460 hpsb_speedto_str[sbp2_max_speed]);
1461 }
1462
1463 /* Payload size is the lesser of what our speed supports and what
1464 * our host supports. */
1465 payload = min(sbp2_speedto_max_payload[lu->speed_code],
1466 (u8) (hi->host->csr.max_rec - 1));
1467
1468 /* If physical DMA is off, work around limitation in ohci1394:
1469 * packet size must not exceed PAGE_SIZE */
1470 if (lu->ne->host->low_addr_space < (1ULL << 32))
1471 while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
1472 payload)
1473 payload--;
1474
1475 SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1476 NODE_BUS_ARGS(hi->host, lu->ne->nodeid),
1477 hpsb_speedto_str[lu->speed_code],
1478 SBP2_PAYLOAD_TO_BYTES(payload));
1479
1480 lu->max_payload_size = payload;
1481 return 0;
1482}
1483
1484static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
1485{
1486 quadlet_t data;
1487 u64 addr;
1488 int retval;
1489 unsigned long flags;
1490
1491 /* flush lu->protocol_work */
1492 if (wait)
1493 flush_scheduled_work();
1494
1495 data = ntohl(SBP2_AGENT_RESET_DATA);
1496 addr = lu->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
1497
1498 if (wait)
1499 retval = hpsb_node_write(lu->ne, addr, &data, 4);
1500 else
1501 retval = sbp2util_node_write_no_wait(lu->ne, addr, &data, 4);
1502
1503 if (retval < 0) {
1504 SBP2_ERR("hpsb_node_write failed.\n");
1505 return -EIO;
1506 }
1507
1508 /* make sure that the ORB_POINTER is written on next command */
1509 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1510 lu->last_orb = NULL;
1511 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1512
1513 return 0;
1514}
1515
1516static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1517 struct sbp2_fwhost_info *hi,
1518 struct sbp2_command_info *cmd,
1519 unsigned int sg_count,
1520 struct scatterlist *sg,
1521 u32 orb_direction,
1522 enum dma_data_direction dma_dir)
1523{
1524 struct device *dmadev = hi->host->device.parent;
1525 struct sbp2_unrestricted_page_table *pt;
1526 int i, n;
1527
1528 n = dma_map_sg(dmadev, sg, sg_count, dma_dir);
1529 if (n == 0)
1530 return -ENOMEM;
1531
1532 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1533 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1534
1535 /* special case if only one element (and less than 64KB in size) */
1536 if (n == 1) {
1537 orb->misc |= ORB_SET_DATA_SIZE(sg_dma_len(sg));
1538 orb->data_descriptor_lo = sg_dma_address(sg);
1539 } else {
1540 pt = &cmd->scatter_gather_element[0];
1541
1542 dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
1543 sizeof(cmd->scatter_gather_element),
1544 DMA_TO_DEVICE);
1545
1546 for_each_sg(sg, sg, n, i) {
1547 pt[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
1548 pt[i].low = cpu_to_be32(sg_dma_address(sg));
1549 }
1550
1551 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1) |
1552 ORB_SET_DATA_SIZE(n);
1553 orb->data_descriptor_lo = cmd->sge_dma;
1554
1555 dma_sync_single_for_device(dmadev, cmd->sge_dma,
1556 sizeof(cmd->scatter_gather_element),
1557 DMA_TO_DEVICE);
1558 }
1559 return 0;
1560}
1561
1562static int sbp2_create_command_orb(struct sbp2_lu *lu,
1563 struct sbp2_command_info *cmd,
1564 struct scsi_cmnd *SCpnt)
1565{
1566 struct device *dmadev = lu->hi->host->device.parent;
1567 struct sbp2_command_orb *orb = &cmd->command_orb;
1568 unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt);
1569 enum dma_data_direction dma_dir = SCpnt->sc_data_direction;
1570 u32 orb_direction;
1571 int ret;
1572
1573 dma_sync_single_for_cpu(dmadev, cmd->command_orb_dma,
1574 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
1575 /*
1576 * Set-up our command ORB.
1577 *
1578 * NOTE: We're doing unrestricted page tables (s/g), as this is
1579 * best performance (at least with the devices I have). This means
1580 * that data_size becomes the number of s/g elements, and
1581 * page_size should be zero (for unrestricted).
1582 */
1583 orb->next_ORB_hi = ORB_SET_NULL_PTR(1);
1584 orb->next_ORB_lo = 0x0;
1585 orb->misc = ORB_SET_MAX_PAYLOAD(lu->max_payload_size);
1586 orb->misc |= ORB_SET_SPEED(lu->speed_code);
1587 orb->misc |= ORB_SET_NOTIFY(1);
1588
1589 if (dma_dir == DMA_NONE)
1590 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1591 else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)
1592 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1593 else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
1594 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1595 else {
1596 SBP2_INFO("Falling back to DMA_NONE");
1597 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1598 }
1599
1600 /* set up our page table stuff */
1601 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1602 orb->data_descriptor_hi = 0x0;
1603 orb->data_descriptor_lo = 0x0;
1604 orb->misc |= ORB_SET_DIRECTION(1);
1605 ret = 0;
1606 } else {
1607 ret = sbp2_prep_command_orb_sg(orb, lu->hi, cmd,
1608 scsi_sg_count(SCpnt),
1609 scsi_sglist(SCpnt),
1610 orb_direction, dma_dir);
1611 }
1612 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
1613
1614 memset(orb->cdb, 0, sizeof(orb->cdb));
1615 memcpy(orb->cdb, SCpnt->cmnd, SCpnt->cmd_len);
1616
1617 dma_sync_single_for_device(dmadev, cmd->command_orb_dma,
1618 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
1619 return ret;
1620}
1621
1622static void sbp2_link_orb_command(struct sbp2_lu *lu,
1623 struct sbp2_command_info *cmd)
1624{
1625 struct sbp2_fwhost_info *hi = lu->hi;
1626 struct sbp2_command_orb *last_orb;
1627 dma_addr_t last_orb_dma;
1628 u64 addr = lu->command_block_agent_addr;
1629 quadlet_t data[2];
1630 size_t length;
1631 unsigned long flags;
1632
1633 /* check to see if there are any previous orbs to use */
1634 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1635 last_orb = lu->last_orb;
1636 last_orb_dma = lu->last_orb_dma;
1637 if (!last_orb) {
1638 /*
1639 * last_orb == NULL means: We know that the target's fetch agent
1640 * is not active right now.
1641 */
1642 addr += SBP2_ORB_POINTER_OFFSET;
1643 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1644 data[1] = cmd->command_orb_dma;
1645 sbp2util_cpu_to_be32_buffer(data, 8);
1646 length = 8;
1647 } else {
1648 /*
1649 * last_orb != NULL means: We know that the target's fetch agent
1650 * is (very probably) not dead or in reset state right now.
1651 * We have an ORB already sent that we can append a new one to.
1652 * The target's fetch agent may or may not have read this
1653 * previous ORB yet.
1654 */
1655 dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma,
1656 sizeof(struct sbp2_command_orb),
1657 DMA_TO_DEVICE);
1658 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
1659 wmb();
1660 /* Tells hardware that this pointer is valid */
1661 last_orb->next_ORB_hi = 0;
1662 dma_sync_single_for_device(hi->host->device.parent,
1663 last_orb_dma,
1664 sizeof(struct sbp2_command_orb),
1665 DMA_TO_DEVICE);
1666 addr += SBP2_DOORBELL_OFFSET;
1667 data[0] = 0;
1668 length = 4;
1669 }
1670 lu->last_orb = &cmd->command_orb;
1671 lu->last_orb_dma = cmd->command_orb_dma;
1672 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1673
1674 if (sbp2util_node_write_no_wait(lu->ne, addr, data, length)) {
1675 /*
1676 * sbp2util_node_write_no_wait failed. We certainly ran out
1677 * of transaction labels, perhaps just because there were no
1678 * context switches which gave khpsbpkt a chance to collect
1679 * free tlabels. Try again in non-atomic context. If necessary,
1680 * the workqueue job will sleep to guaranteedly get a tlabel.
1681 * We do not accept new commands until the job is over.
1682 */
1683 scsi_block_requests(lu->shost);
1684 PREPARE_WORK(&lu->protocol_work,
1685 last_orb ? sbp2util_write_doorbell:
1686 sbp2util_write_orb_pointer);
1687 schedule_work(&lu->protocol_work);
1688 }
1689}
1690
1691static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
1692 void (*done)(struct scsi_cmnd *))
1693{
1694 struct sbp2_command_info *cmd;
1695
1696 cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
1697 if (!cmd)
1698 return -EIO;
1699
1700 if (sbp2_create_command_orb(lu, cmd, SCpnt))
1701 return -ENOMEM;
1702
1703 sbp2_link_orb_command(lu, cmd);
1704 return 0;
1705}
1706
1707/*
1708 * Translates SBP-2 status into SCSI sense data for check conditions
1709 */
1710static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
1711 unchar *sense_data)
1712{
1713 /* OK, it's pretty ugly... ;-) */
1714 sense_data[0] = 0x70;
1715 sense_data[1] = 0x0;
1716 sense_data[2] = sbp2_status[9];
1717 sense_data[3] = sbp2_status[12];
1718 sense_data[4] = sbp2_status[13];
1719 sense_data[5] = sbp2_status[14];
1720 sense_data[6] = sbp2_status[15];
1721 sense_data[7] = 10;
1722 sense_data[8] = sbp2_status[16];
1723 sense_data[9] = sbp2_status[17];
1724 sense_data[10] = sbp2_status[18];
1725 sense_data[11] = sbp2_status[19];
1726 sense_data[12] = sbp2_status[10];
1727 sense_data[13] = sbp2_status[11];
1728 sense_data[14] = sbp2_status[20];
1729 sense_data[15] = sbp2_status[21];
1730
1731 return sbp2_status[8] & 0x3f;
1732}
1733
1734static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1735 int destid, quadlet_t *data, u64 addr,
1736 size_t length, u16 fl)
1737{
1738 struct sbp2_fwhost_info *hi;
1739 struct sbp2_lu *lu = NULL, *lu_tmp;
1740 struct scsi_cmnd *SCpnt = NULL;
1741 struct sbp2_status_block *sb;
1742 u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
1743 struct sbp2_command_info *cmd;
1744 unsigned long flags;
1745
1746 if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) {
1747 SBP2_ERR("Wrong size of status block");
1748 return RCODE_ADDRESS_ERROR;
1749 }
1750 if (unlikely(!host)) {
1751 SBP2_ERR("host is NULL - this is bad!");
1752 return RCODE_ADDRESS_ERROR;
1753 }
1754 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
1755 if (unlikely(!hi)) {
1756 SBP2_ERR("host info is NULL - this is bad!");
1757 return RCODE_ADDRESS_ERROR;
1758 }
1759
1760 /* Find the unit which wrote the status. */
1761 read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
1762 list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
1763 if (lu_tmp->ne->nodeid == nodeid &&
1764 lu_tmp->status_fifo_addr == addr) {
1765 lu = lu_tmp;
1766 break;
1767 }
1768 }
1769 read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
1770
1771 if (unlikely(!lu)) {
1772 SBP2_ERR("lu is NULL - device is gone?");
1773 return RCODE_ADDRESS_ERROR;
1774 }
1775
1776 /* Put response into lu status fifo buffer. The first two bytes
1777 * come in big endian bit order. Often the target writes only a
1778 * truncated status block, minimally the first two quadlets. The rest
1779 * is implied to be zeros. */
1780 sb = &lu->status_block;
1781 memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent));
1782 memcpy(sb, data, length);
1783 sbp2util_be32_to_cpu_buffer(sb, 8);
1784
1785 /* Ignore unsolicited status. Handle command ORB status. */
1786 if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2))
1787 cmd = NULL;
1788 else
1789 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
1790 if (cmd) {
1791 /* Grab SCSI command pointers and check status. */
1792 /*
1793 * FIXME: If the src field in the status is 1, the ORB DMA must
1794 * not be reused until status for a subsequent ORB is received.
1795 */
1796 SCpnt = cmd->Current_SCpnt;
1797 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1798 sbp2util_mark_command_completed(lu, cmd);
1799 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1800
1801 if (SCpnt) {
1802 u32 h = sb->ORB_offset_hi_misc;
1803 u32 r = STATUS_GET_RESP(h);
1804
1805 if (r != RESP_STATUS_REQUEST_COMPLETE) {
1806 SBP2_INFO("resp 0x%x, sbp_status 0x%x",
1807 r, STATUS_GET_SBP_STATUS(h));
1808 scsi_status =
1809 r == RESP_STATUS_TRANSPORT_FAILURE ?
1810 SBP2_SCSI_STATUS_BUSY :
1811 SBP2_SCSI_STATUS_COMMAND_TERMINATED;
1812 }
1813
1814 if (STATUS_GET_LEN(h) > 1)
1815 scsi_status = sbp2_status_to_sense_data(
1816 (unchar *)sb, SCpnt->sense_buffer);
1817
1818 if (STATUS_TEST_DEAD(h))
1819 sbp2_agent_reset(lu, 0);
1820 }
1821
1822 /* Check here to see if there are no commands in-use. If there
1823 * are none, we know that the fetch agent left the active state
1824 * _and_ that we did not reactivate it yet. Therefore clear
1825 * last_orb so that next time we write directly to the
1826 * ORB_POINTER register. That way the fetch agent does not need
1827 * to refetch the next_ORB. */
1828 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1829 if (list_empty(&lu->cmd_orb_inuse))
1830 lu->last_orb = NULL;
1831 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1832
1833 } else {
1834 /* It's probably status after a management request. */
1835 if ((sb->ORB_offset_lo == lu->reconnect_orb_dma) ||
1836 (sb->ORB_offset_lo == lu->login_orb_dma) ||
1837 (sb->ORB_offset_lo == lu->query_logins_orb_dma) ||
1838 (sb->ORB_offset_lo == lu->logout_orb_dma)) {
1839 lu->access_complete = 1;
1840 wake_up_interruptible(&sbp2_access_wq);
1841 }
1842 }
1843
1844 if (SCpnt)
1845 sbp2scsi_complete_command(lu, scsi_status, SCpnt,
1846 cmd->Current_done);
1847 return RCODE_COMPLETE;
1848}
1849
1850/**************************************
1851 * SCSI interface related section
1852 **************************************/
1853
1854static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
1855 void (*done)(struct scsi_cmnd *))
1856{
1857 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
1858 struct sbp2_fwhost_info *hi;
1859 int result = DID_NO_CONNECT << 16;
1860
1861 if (unlikely(!sbp2util_node_is_available(lu)))
1862 goto done;
1863
1864 hi = lu->hi;
1865
1866 if (unlikely(!hi)) {
1867 SBP2_ERR("sbp2_fwhost_info is NULL - this is bad!");
1868 goto done;
1869 }
1870
1871 /* Multiple units are currently represented to the SCSI core as separate
1872 * targets, not as one target with multiple LUs. Therefore return
1873 * selection time-out to any IO directed at non-zero LUNs. */
1874 if (unlikely(SCpnt->device->lun))
1875 goto done;
1876
1877 if (unlikely(!hpsb_node_entry_valid(lu->ne))) {
1878 SBP2_ERR("Bus reset in progress - rejecting command");
1879 result = DID_BUS_BUSY << 16;
1880 goto done;
1881 }
1882
1883 /* Bidirectional commands are not yet implemented,
1884 * and unknown transfer direction not handled. */
1885 if (unlikely(SCpnt->sc_data_direction == DMA_BIDIRECTIONAL)) {
1886 SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
1887 result = DID_ERROR << 16;
1888 goto done;
1889 }
1890
1891 if (sbp2_send_command(lu, SCpnt, done)) {
1892 SBP2_ERR("Error sending SCSI command");
1893 sbp2scsi_complete_command(lu,
1894 SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
1895 SCpnt, done);
1896 }
1897 return 0;
1898
1899done:
1900 SCpnt->result = result;
1901 done(SCpnt);
1902 return 0;
1903}
1904
1905static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1906{
1907 struct list_head *lh;
1908 struct sbp2_command_info *cmd;
1909 unsigned long flags;
1910
1911 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1912 while (!list_empty(&lu->cmd_orb_inuse)) {
1913 lh = lu->cmd_orb_inuse.next;
1914 cmd = list_entry(lh, struct sbp2_command_info, list);
1915 sbp2util_mark_command_completed(lu, cmd);
1916 if (cmd->Current_SCpnt) {
1917 cmd->Current_SCpnt->result = status << 16;
1918 cmd->Current_done(cmd->Current_SCpnt);
1919 }
1920 }
1921 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
1922
1923 return;
1924}
1925
1926/*
1927 * Complete a regular SCSI command. Can be called in atomic context.
1928 */
1929static void sbp2scsi_complete_command(struct sbp2_lu *lu, u32 scsi_status,
1930 struct scsi_cmnd *SCpnt,
1931 void (*done)(struct scsi_cmnd *))
1932{
1933 if (!SCpnt) {
1934 SBP2_ERR("SCpnt is NULL");
1935 return;
1936 }
1937
1938 switch (scsi_status) {
1939 case SBP2_SCSI_STATUS_GOOD:
1940 SCpnt->result = DID_OK << 16;
1941 break;
1942
1943 case SBP2_SCSI_STATUS_BUSY:
1944 SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
1945 SCpnt->result = DID_BUS_BUSY << 16;
1946 break;
1947
1948 case SBP2_SCSI_STATUS_CHECK_CONDITION:
1949 SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16;
1950 break;
1951
1952 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
1953 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
1954 SCpnt->result = DID_NO_CONNECT << 16;
1955 scsi_print_command(SCpnt);
1956 break;
1957
1958 case SBP2_SCSI_STATUS_CONDITION_MET:
1959 case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
1960 case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
1961 SBP2_ERR("Bad SCSI status = %x", scsi_status);
1962 SCpnt->result = DID_ERROR << 16;
1963 scsi_print_command(SCpnt);
1964 break;
1965
1966 default:
1967 SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
1968 SCpnt->result = DID_ERROR << 16;
1969 }
1970
1971 /* If a bus reset is in progress and there was an error, complete
1972 * the command as busy so that it will get retried. */
1973 if (!hpsb_node_entry_valid(lu->ne)
1974 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
1975 SBP2_ERR("Completing command with busy (bus reset)");
1976 SCpnt->result = DID_BUS_BUSY << 16;
1977 }
1978
1979 /* Tell the SCSI stack that we're done with this command. */
1980 done(SCpnt);
1981}
1982
1983static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
1984{
1985 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
1986
1987 if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0)
1988 return -ENODEV;
1989
1990 lu->sdev = sdev;
1991 sdev->allow_restart = 1;
1992
1993 /* SBP-2 requires quadlet alignment of the data buffers. */
1994 blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
1995
1996 if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1997 sdev->inquiry_len = 36;
1998 return 0;
1999}
2000
2001static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2002{
2003 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
2004
2005 sdev->use_10_for_rw = 1;
2006
2007 if (sbp2_exclusive_login)
2008 sdev->manage_start_stop = 1;
2009 if (sdev->type == TYPE_ROM)
2010 sdev->use_10_for_ms = 1;
2011 if (sdev->type == TYPE_DISK &&
2012 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2013 sdev->skip_ms_page_8 = 1;
2014 if (lu->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
2015 sdev->fix_capacity = 1;
2016 if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
2017 sdev->start_stop_pwr_cond = 1;
2018 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
2019 blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
2020
2021 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
2022 return 0;
2023}
2024
2025static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2026{
2027 ((struct sbp2_lu *)sdev->host->hostdata[0])->sdev = NULL;
2028 return;
2029}
2030
2031/*
2032 * Called by scsi stack when something has really gone wrong.
2033 * Usually called when a command has timed-out for some reason.
2034 */
2035static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2036{
2037 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2038 struct sbp2_command_info *cmd;
2039 unsigned long flags;
2040
2041 SBP2_INFO("aborting sbp2 command");
2042 scsi_print_command(SCpnt);
2043
2044 if (sbp2util_node_is_available(lu)) {
2045 sbp2_agent_reset(lu, 1);
2046
2047 /* Return a matching command structure to the free pool. */
2048 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2049 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
2050 if (cmd) {
2051 sbp2util_mark_command_completed(lu, cmd);
2052 if (cmd->Current_SCpnt) {
2053 cmd->Current_SCpnt->result = DID_ABORT << 16;
2054 cmd->Current_done(cmd->Current_SCpnt);
2055 }
2056 }
2057 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
2058
2059 sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);
2060 }
2061
2062 return SUCCESS;
2063}
2064
2065/*
2066 * Called by scsi stack when something has really gone wrong.
2067 */
2068static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2069{
2070 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2071
2072 SBP2_INFO("reset requested");
2073
2074 if (sbp2util_node_is_available(lu)) {
2075 SBP2_INFO("generating sbp2 fetch agent reset");
2076 sbp2_agent_reset(lu, 1);
2077 }
2078
2079 return SUCCESS;
2080}
2081
2082static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
2083 struct device_attribute *attr,
2084 char *buf)
2085{
2086 struct scsi_device *sdev;
2087 struct sbp2_lu *lu;
2088
2089 if (!(sdev = to_scsi_device(dev)))
2090 return 0;
2091
2092 if (!(lu = (struct sbp2_lu *)sdev->host->hostdata[0]))
2093 return 0;
2094
2095 if (sbp2_long_sysfs_ieee1394_id)
2096 return sprintf(buf, "%016Lx:%06x:%04x\n",
2097 (unsigned long long)lu->ne->guid,
2098 lu->ud->directory_id, ORB_SET_LUN(lu->lun));
2099 else
2100 return sprintf(buf, "%016Lx:%d:%d\n",
2101 (unsigned long long)lu->ne->guid,
2102 lu->ud->id, ORB_SET_LUN(lu->lun));
2103}
2104
2105MODULE_AUTHOR("Ben Collins <bcollins@debian.org>");
2106MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
2107MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
2108MODULE_LICENSE("GPL");
2109
2110static int sbp2_module_init(void)
2111{
2112 int ret;
2113
2114 if (sbp2_serialize_io) {
2115 sbp2_shost_template.can_queue = 1;
2116 sbp2_shost_template.cmd_per_lun = 1;
2117 }
2118
2119 sbp2_shost_template.max_sectors = sbp2_max_sectors;
2120
2121 hpsb_register_highlevel(&sbp2_highlevel);
2122 ret = hpsb_register_protocol(&sbp2_driver);
2123 if (ret) {
2124 SBP2_ERR("Failed to register protocol");
2125 hpsb_unregister_highlevel(&sbp2_highlevel);
2126 return ret;
2127 }
2128 return 0;
2129}
2130
2131static void __exit sbp2_module_exit(void)
2132{
2133 hpsb_unregister_protocol(&sbp2_driver);
2134 hpsb_unregister_highlevel(&sbp2_highlevel);
2135}
2136
2137module_init(sbp2_module_init);
2138module_exit(sbp2_module_exit);
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
deleted file mode 100644
index 64a3a66a8a39..000000000000
--- a/drivers/ieee1394/sbp2.h
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * sbp2.h - Defines and prototypes for sbp2.c
3 *
4 * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
5 * jamesg@filanet.com
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#ifndef SBP2_H
23#define SBP2_H
24
25#define SBP2_DEVICE_NAME "sbp2"
26
27/*
28 * There is no transport protocol limit to the CDB length, but we implement
29 * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
30 */
31#define SBP2_MAX_CDB_SIZE 16
32
33/*
34 * SBP-2 specific definitions
35 */
36
37#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
38#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
39#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
40
41#define ORB_SET_NULL_PTR(v) (((v) & 0x1) << 31)
42#define ORB_SET_NOTIFY(v) (((v) & 0x1) << 31)
43#define ORB_SET_RQ_FMT(v) (((v) & 0x3) << 29)
44#define ORB_SET_NODE_ID(v) (((v) & 0xffff) << 16)
45#define ORB_SET_STATUS_FIFO_HI(v, id) ((v) >> 32 | ORB_SET_NODE_ID(id))
46#define ORB_SET_STATUS_FIFO_LO(v) ((v) & 0xffffffff)
47#define ORB_SET_DATA_SIZE(v) ((v) & 0xffff)
48#define ORB_SET_PAGE_SIZE(v) (((v) & 0x7) << 16)
49#define ORB_SET_PAGE_TABLE_PRESENT(v) (((v) & 0x1) << 19)
50#define ORB_SET_MAX_PAYLOAD(v) (((v) & 0xf) << 20)
51#define ORB_SET_SPEED(v) (((v) & 0x7) << 24)
52#define ORB_SET_DIRECTION(v) (((v) & 0x1) << 27)
53
54struct sbp2_command_orb {
55 u32 next_ORB_hi;
56 u32 next_ORB_lo;
57 u32 data_descriptor_hi;
58 u32 data_descriptor_lo;
59 u32 misc;
60 u8 cdb[SBP2_MAX_CDB_SIZE];
61} __attribute__((packed));
62
63#define SBP2_LOGIN_REQUEST 0x0
64#define SBP2_QUERY_LOGINS_REQUEST 0x1
65#define SBP2_RECONNECT_REQUEST 0x3
66#define SBP2_SET_PASSWORD_REQUEST 0x4
67#define SBP2_LOGOUT_REQUEST 0x7
68#define SBP2_ABORT_TASK_REQUEST 0xb
69#define SBP2_ABORT_TASK_SET 0xc
70#define SBP2_LOGICAL_UNIT_RESET 0xe
71#define SBP2_TARGET_RESET_REQUEST 0xf
72
73#define ORB_SET_LUN(v) ((v) & 0xffff)
74#define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16)
75#define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20)
76#define ORB_SET_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
77#define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff)
78#define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16)
79
80struct sbp2_login_orb {
81 u32 password_hi;
82 u32 password_lo;
83 u32 login_response_hi;
84 u32 login_response_lo;
85 u32 lun_misc;
86 u32 passwd_resp_lengths;
87 u32 status_fifo_hi;
88 u32 status_fifo_lo;
89} __attribute__((packed));
90
91#define RESPONSE_GET_LOGIN_ID(v) ((v) & 0xffff)
92#define RESPONSE_GET_LENGTH(v) (((v) >> 16) & 0xffff)
93#define RESPONSE_GET_RECONNECT_HOLD(v) ((v) & 0xffff)
94
95struct sbp2_login_response {
96 u32 length_login_ID;
97 u32 command_block_agent_hi;
98 u32 command_block_agent_lo;
99 u32 reconnect_hold;
100} __attribute__((packed));
101
102#define ORB_SET_LOGIN_ID(v) ((v) & 0xffff)
103#define ORB_SET_QUERY_LOGINS_RESP_LENGTH(v) ((v) & 0xffff)
104
105struct sbp2_query_logins_orb {
106 u32 reserved1;
107 u32 reserved2;
108 u32 query_response_hi;
109 u32 query_response_lo;
110 u32 lun_misc;
111 u32 reserved_resp_length;
112 u32 status_fifo_hi;
113 u32 status_fifo_lo;
114} __attribute__((packed));
115
116#define RESPONSE_GET_MAX_LOGINS(v) ((v) & 0xffff)
117#define RESPONSE_GET_ACTIVE_LOGINS(v) ((RESPONSE_GET_LENGTH((v)) - 4) / 12)
118
119struct sbp2_query_logins_response {
120 u32 length_max_logins;
121 u32 misc_IDs;
122 u32 initiator_misc_hi;
123 u32 initiator_misc_lo;
124} __attribute__((packed));
125
126struct sbp2_reconnect_orb {
127 u32 reserved1;
128 u32 reserved2;
129 u32 reserved3;
130 u32 reserved4;
131 u32 login_ID_misc;
132 u32 reserved5;
133 u32 status_fifo_hi;
134 u32 status_fifo_lo;
135} __attribute__((packed));
136
137struct sbp2_logout_orb {
138 u32 reserved1;
139 u32 reserved2;
140 u32 reserved3;
141 u32 reserved4;
142 u32 login_ID_misc;
143 u32 reserved5;
144 u32 status_fifo_hi;
145 u32 status_fifo_lo;
146} __attribute__((packed));
147
148struct sbp2_unrestricted_page_table {
149 __be32 high;
150 __be32 low;
151};
152
153#define RESP_STATUS_REQUEST_COMPLETE 0x0
154#define RESP_STATUS_TRANSPORT_FAILURE 0x1
155#define RESP_STATUS_ILLEGAL_REQUEST 0x2
156#define RESP_STATUS_VENDOR_DEPENDENT 0x3
157
158#define SBP2_STATUS_NO_ADDITIONAL_INFO 0x0
159#define SBP2_STATUS_REQ_TYPE_NOT_SUPPORTED 0x1
160#define SBP2_STATUS_SPEED_NOT_SUPPORTED 0x2
161#define SBP2_STATUS_PAGE_SIZE_NOT_SUPPORTED 0x3
162#define SBP2_STATUS_ACCESS_DENIED 0x4
163#define SBP2_STATUS_LU_NOT_SUPPORTED 0x5
164#define SBP2_STATUS_MAX_PAYLOAD_TOO_SMALL 0x6
165#define SBP2_STATUS_RESOURCES_UNAVAILABLE 0x8
166#define SBP2_STATUS_FUNCTION_REJECTED 0x9
167#define SBP2_STATUS_LOGIN_ID_NOT_RECOGNIZED 0xa
168#define SBP2_STATUS_DUMMY_ORB_COMPLETED 0xb
169#define SBP2_STATUS_REQUEST_ABORTED 0xc
170#define SBP2_STATUS_UNSPECIFIED_ERROR 0xff
171
172#define SFMT_CURRENT_ERROR 0x0
173#define SFMT_DEFERRED_ERROR 0x1
174#define SFMT_VENDOR_DEPENDENT_STATUS 0x3
175
176#define STATUS_GET_SRC(v) (((v) >> 30) & 0x3)
177#define STATUS_GET_RESP(v) (((v) >> 28) & 0x3)
178#define STATUS_GET_LEN(v) (((v) >> 24) & 0x7)
179#define STATUS_GET_SBP_STATUS(v) (((v) >> 16) & 0xff)
180#define STATUS_GET_ORB_OFFSET_HI(v) ((v) & 0x0000ffff)
181#define STATUS_TEST_DEAD(v) ((v) & 0x08000000)
182/* test 'resp' | 'dead' | 'sbp2_status' */
183#define STATUS_TEST_RDS(v) ((v) & 0x38ff0000)
184
185struct sbp2_status_block {
186 u32 ORB_offset_hi_misc;
187 u32 ORB_offset_lo;
188 u8 command_set_dependent[24];
189} __attribute__((packed));
190
191
192/*
193 * SBP2 related configuration ROM definitions
194 */
195
196#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
197#define SBP2_CSR_OFFSET_KEY 0x54
198#define SBP2_UNIT_SPEC_ID_KEY 0x12
199#define SBP2_UNIT_SW_VERSION_KEY 0x13
200#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
201#define SBP2_COMMAND_SET_KEY 0x39
202#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
203#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
204#define SBP2_FIRMWARE_REVISION_KEY 0x3c
205
206#define SBP2_AGENT_STATE_OFFSET 0x00ULL
207#define SBP2_AGENT_RESET_OFFSET 0x04ULL
208#define SBP2_ORB_POINTER_OFFSET 0x08ULL
209#define SBP2_DOORBELL_OFFSET 0x10ULL
210#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
211#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
212
213#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
214/* biggest possible value for Single Phase Retry count is 0xf */
215#define SBP2_BUSY_TIMEOUT_VALUE 0xf
216
217#define SBP2_AGENT_RESET_DATA 0xf
218
219#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
220#define SBP2_SW_VERSION_ENTRY 0x00010483
221
222/*
223 * The default maximum s/g segment size of a FireWire controller is
224 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
225 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
226 */
227#define SBP2_MAX_SEG_SIZE 0xfffc
228
229/*
230 * There is no real limitation of the queue depth (i.e. length of the linked
231 * list of command ORBs) at the target. The chosen depth is merely an
232 * implementation detail of the sbp2 driver.
233 */
234#define SBP2_MAX_CMDS 8
235
236#define SBP2_SCSI_STATUS_GOOD 0x0
237#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
238#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
239#define SBP2_SCSI_STATUS_BUSY 0x8
240#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
241#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
242#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
243
244
245/*
246 * Representations of commands and devices
247 */
248
249/* Per SCSI command */
250struct sbp2_command_info {
251 struct list_head list;
252 struct sbp2_command_orb command_orb;
253 dma_addr_t command_orb_dma;
254 struct scsi_cmnd *Current_SCpnt;
255 void (*Current_done)(struct scsi_cmnd *);
256
257 /* Also need s/g structure for each sbp2 command */
258 struct sbp2_unrestricted_page_table
259 scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
260 dma_addr_t sge_dma;
261};
262
263/* Per FireWire host */
264struct sbp2_fwhost_info {
265 struct hpsb_host *host;
266 struct list_head logical_units;
267};
268
269/* Per logical unit */
270struct sbp2_lu {
271 /* Operation request blocks */
272 struct sbp2_command_orb *last_orb;
273 dma_addr_t last_orb_dma;
274 struct sbp2_login_orb *login_orb;
275 dma_addr_t login_orb_dma;
276 struct sbp2_login_response *login_response;
277 dma_addr_t login_response_dma;
278 struct sbp2_query_logins_orb *query_logins_orb;
279 dma_addr_t query_logins_orb_dma;
280 struct sbp2_query_logins_response *query_logins_response;
281 dma_addr_t query_logins_response_dma;
282 struct sbp2_reconnect_orb *reconnect_orb;
283 dma_addr_t reconnect_orb_dma;
284 struct sbp2_logout_orb *logout_orb;
285 dma_addr_t logout_orb_dma;
286 struct sbp2_status_block status_block;
287
288 /* How to talk to the unit */
289 u64 management_agent_addr;
290 u64 command_block_agent_addr;
291 u32 speed_code;
292 u32 max_payload_size;
293 u16 lun;
294
295 /* Address for the unit to write status blocks to */
296 u64 status_fifo_addr;
297
298 /* Waitqueue flag for logins, reconnects, logouts, query logins */
299 unsigned int access_complete:1;
300
301 /* Pool of command ORBs for this logical unit */
302 spinlock_t cmd_orb_lock;
303 struct list_head cmd_orb_inuse;
304 struct list_head cmd_orb_completed;
305
306 /* Backlink to FireWire host; list of units attached to the host */
307 struct sbp2_fwhost_info *hi;
308 struct list_head lu_list;
309
310 /* IEEE 1394 core's device representations */
311 struct node_entry *ne;
312 struct unit_directory *ud;
313
314 /* SCSI core's device representations */
315 struct scsi_device *sdev;
316 struct Scsi_Host *shost;
317
318 /* Device specific workarounds/brokeness */
319 unsigned workarounds;
320
321 /* Connection state */
322 atomic_t state;
323
324 /* For deferred requests to the fetch agent */
325 struct work_struct protocol_work;
326};
327
328/* For use in sbp2_lu.state */
329enum sbp2lu_state_types {
330 SBP2LU_STATE_RUNNING, /* all normal */
331 SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */
332 SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */
333};
334
335/* For use in sbp2_lu.workarounds and in the corresponding
336 * module load parameter */
337#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
338#define SBP2_WORKAROUND_INQUIRY_36 0x2
339#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
340#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
341#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
342#define SBP2_INQUIRY_DELAY 12
343#define SBP2_WORKAROUND_POWER_CONDITION 0x20
344#define SBP2_WORKAROUND_OVERRIDE 0x100
345
346#endif /* SBP2_H */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
deleted file mode 100644
index 5c74f796d7f1..000000000000
--- a/drivers/ieee1394/video1394.c
+++ /dev/null
@@ -1,1528 +0,0 @@
1/*
2 * video1394.c - video driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * NOTES:
21 *
22 * ioctl return codes:
23 * EFAULT is only for invalid address for the argp
24 * EINVAL for out of range values
25 * EBUSY when trying to use an already used resource
26 * ESRCH when trying to free/stop a not used resource
27 * EAGAIN for resource allocation failure that could perhaps succeed later
28 * ENOTTY for unsupported ioctl request
29 *
30 */
31#include <linux/kernel.h>
32#include <linux/list.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/interrupt.h>
36#include <linux/wait.h>
37#include <linux/errno.h>
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/fs.h>
42#include <linux/poll.h>
43#include <linux/delay.h>
44#include <linux/bitops.h>
45#include <linux/types.h>
46#include <linux/vmalloc.h>
47#include <linux/timex.h>
48#include <linux/mm.h>
49#include <linux/compat.h>
50#include <linux/cdev.h>
51
52#include "dma.h"
53#include "highlevel.h"
54#include "hosts.h"
55#include "ieee1394.h"
56#include "ieee1394_core.h"
57#include "ieee1394_hotplug.h"
58#include "ieee1394_types.h"
59#include "nodemgr.h"
60#include "ohci1394.h"
61#include "video1394.h"
62
63#define ISO_CHANNELS 64
64
65struct it_dma_prg {
66 struct dma_cmd begin;
67 quadlet_t data[4];
68 struct dma_cmd end;
69 quadlet_t pad[4]; /* FIXME: quick hack for memory alignment */
70};
71
72struct dma_iso_ctx {
73 struct ti_ohci *ohci;
74 int type; /* OHCI_ISO_TRANSMIT or OHCI_ISO_RECEIVE */
75 struct ohci1394_iso_tasklet iso_tasklet;
76 int channel;
77 int ctx;
78 int last_buffer;
79 int * next_buffer; /* For ISO Transmit of video packets
80 to write the correct SYT field
81 into the next block */
82 unsigned int num_desc;
83 unsigned int buf_size;
84 unsigned int frame_size;
85 unsigned int packet_size;
86 unsigned int left_size;
87 unsigned int nb_cmd;
88
89 struct dma_region dma;
90
91 struct dma_prog_region *prg_reg;
92
93 struct dma_cmd **ir_prg;
94 struct it_dma_prg **it_prg;
95
96 unsigned int *buffer_status;
97 unsigned int *buffer_prg_assignment;
98 struct timeval *buffer_time; /* time when the buffer was received */
99 unsigned int *last_used_cmd; /* For ISO Transmit with
100 variable sized packets only ! */
101 int ctrlClear;
102 int ctrlSet;
103 int cmdPtr;
104 int ctxMatch;
105 wait_queue_head_t waitq;
106 spinlock_t lock;
107 unsigned int syt_offset;
108 int flags;
109
110 struct list_head link;
111};
112
113
114struct file_ctx {
115 struct ti_ohci *ohci;
116 struct list_head context_list;
117 struct dma_iso_ctx *current_ctx;
118};
119
120#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
121#define VIDEO1394_DEBUG
122#endif
123
124#ifdef DBGMSG
125#undef DBGMSG
126#endif
127
128#ifdef VIDEO1394_DEBUG
129#define DBGMSG(card, fmt, args...) \
130printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args)
131#else
132#define DBGMSG(card, fmt, args...) do {} while (0)
133#endif
134
135/* print general (card independent) information */
136#define PRINT_G(level, fmt, args...) \
137printk(level "video1394: " fmt "\n" , ## args)
138
139/* print card specific information */
140#define PRINT(level, card, fmt, args...) \
141printk(level "video1394_%d: " fmt "\n" , card , ## args)
142
143static void wakeup_dma_ir_ctx(unsigned long l);
144static void wakeup_dma_it_ctx(unsigned long l);
145
146static struct hpsb_highlevel video1394_highlevel;
147
148static int free_dma_iso_ctx(struct dma_iso_ctx *d)
149{
150 int i;
151
152 DBGMSG(d->ohci->host->id, "Freeing dma_iso_ctx %d", d->ctx);
153
154 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
155 if (d->iso_tasklet.link.next != NULL)
156 ohci1394_unregister_iso_tasklet(d->ohci, &d->iso_tasklet);
157
158 dma_region_free(&d->dma);
159
160 if (d->prg_reg) {
161 for (i = 0; i < d->num_desc; i++)
162 dma_prog_region_free(&d->prg_reg[i]);
163 kfree(d->prg_reg);
164 }
165
166 kfree(d->ir_prg);
167 kfree(d->it_prg);
168 kfree(d->buffer_status);
169 kfree(d->buffer_prg_assignment);
170 kfree(d->buffer_time);
171 kfree(d->last_used_cmd);
172 kfree(d->next_buffer);
173 list_del(&d->link);
174 kfree(d);
175
176 return 0;
177}
178
179static struct dma_iso_ctx *
180alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
181 int buf_size, int channel, unsigned int packet_size)
182{
183 struct dma_iso_ctx *d;
184 int i;
185
186 d = kzalloc(sizeof(*d), GFP_KERNEL);
187 if (!d) {
188 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
189 return NULL;
190 }
191
192 d->ohci = ohci;
193 d->type = type;
194 d->channel = channel;
195 d->num_desc = num_desc;
196 d->frame_size = buf_size;
197 d->buf_size = PAGE_ALIGN(buf_size);
198 d->last_buffer = -1;
199 INIT_LIST_HEAD(&d->link);
200 init_waitqueue_head(&d->waitq);
201
202 /* Init the regions for easy cleanup */
203 dma_region_init(&d->dma);
204
205 if (dma_region_alloc(&d->dma, (d->num_desc - 1) * d->buf_size, ohci->dev,
206 PCI_DMA_BIDIRECTIONAL)) {
207 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma buffer");
208 free_dma_iso_ctx(d);
209 return NULL;
210 }
211
212 if (type == OHCI_ISO_RECEIVE)
213 ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
214 wakeup_dma_ir_ctx,
215 (unsigned long) d);
216 else
217 ohci1394_init_iso_tasklet(&d->iso_tasklet, type,
218 wakeup_dma_it_ctx,
219 (unsigned long) d);
220
221 if (ohci1394_register_iso_tasklet(ohci, &d->iso_tasklet) < 0) {
222 PRINT(KERN_ERR, ohci->host->id, "no free iso %s contexts",
223 type == OHCI_ISO_RECEIVE ? "receive" : "transmit");
224 free_dma_iso_ctx(d);
225 return NULL;
226 }
227 d->ctx = d->iso_tasklet.context;
228
229 d->prg_reg = kmalloc(d->num_desc * sizeof(*d->prg_reg), GFP_KERNEL);
230 if (!d->prg_reg) {
231 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
232 free_dma_iso_ctx(d);
233 return NULL;
234 }
235 /* Makes for easier cleanup */
236 for (i = 0; i < d->num_desc; i++)
237 dma_prog_region_init(&d->prg_reg[i]);
238
239 if (type == OHCI_ISO_RECEIVE) {
240 d->ctrlSet = OHCI1394_IsoRcvContextControlSet+32*d->ctx;
241 d->ctrlClear = OHCI1394_IsoRcvContextControlClear+32*d->ctx;
242 d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
243 d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
244
245 d->ir_prg = kzalloc(d->num_desc * sizeof(*d->ir_prg),
246 GFP_KERNEL);
247
248 if (!d->ir_prg) {
249 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
250 free_dma_iso_ctx(d);
251 return NULL;
252 }
253
254 d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
255 d->left_size = (d->frame_size % PAGE_SIZE) ?
256 d->frame_size % PAGE_SIZE : PAGE_SIZE;
257
258 for (i = 0;i < d->num_desc; i++) {
259 if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
260 sizeof(struct dma_cmd), ohci->dev)) {
261 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
262 free_dma_iso_ctx(d);
263 return NULL;
264 }
265 d->ir_prg[i] = (struct dma_cmd *)d->prg_reg[i].kvirt;
266 }
267
268 } else { /* OHCI_ISO_TRANSMIT */
269 d->ctrlSet = OHCI1394_IsoXmitContextControlSet+16*d->ctx;
270 d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
271 d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
272
273 d->it_prg = kzalloc(d->num_desc * sizeof(*d->it_prg),
274 GFP_KERNEL);
275
276 if (!d->it_prg) {
277 PRINT(KERN_ERR, ohci->host->id,
278 "Failed to allocate dma it prg");
279 free_dma_iso_ctx(d);
280 return NULL;
281 }
282
283 d->packet_size = packet_size;
284
285 if (PAGE_SIZE % packet_size || packet_size>4096) {
286 PRINT(KERN_ERR, ohci->host->id,
287 "Packet size %d (page_size: %ld) "
288 "not yet supported\n",
289 packet_size, PAGE_SIZE);
290 free_dma_iso_ctx(d);
291 return NULL;
292 }
293
294 d->nb_cmd = d->frame_size / d->packet_size;
295 if (d->frame_size % d->packet_size) {
296 d->nb_cmd++;
297 d->left_size = d->frame_size % d->packet_size;
298 } else
299 d->left_size = d->packet_size;
300
301 for (i = 0; i < d->num_desc; i++) {
302 if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
303 sizeof(struct it_dma_prg), ohci->dev)) {
304 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma it prg");
305 free_dma_iso_ctx(d);
306 return NULL;
307 }
308 d->it_prg[i] = (struct it_dma_prg *)d->prg_reg[i].kvirt;
309 }
310 }
311
312 d->buffer_status =
313 kzalloc(d->num_desc * sizeof(*d->buffer_status), GFP_KERNEL);
314 d->buffer_prg_assignment =
315 kzalloc(d->num_desc * sizeof(*d->buffer_prg_assignment), GFP_KERNEL);
316 d->buffer_time =
317 kzalloc(d->num_desc * sizeof(*d->buffer_time), GFP_KERNEL);
318 d->last_used_cmd =
319 kzalloc(d->num_desc * sizeof(*d->last_used_cmd), GFP_KERNEL);
320 d->next_buffer =
321 kzalloc(d->num_desc * sizeof(*d->next_buffer), GFP_KERNEL);
322
323 if (!d->buffer_status || !d->buffer_prg_assignment || !d->buffer_time ||
324 !d->last_used_cmd || !d->next_buffer) {
325 PRINT(KERN_ERR, ohci->host->id,
326 "Failed to allocate dma_iso_ctx member");
327 free_dma_iso_ctx(d);
328 return NULL;
329 }
330
331 spin_lock_init(&d->lock);
332
333 DBGMSG(ohci->host->id, "Iso %s DMA: %d buffers "
334 "of size %d allocated for a frame size %d, each with %d prgs",
335 (type == OHCI_ISO_RECEIVE) ? "receive" : "transmit",
336 d->num_desc - 1, d->buf_size, d->frame_size, d->nb_cmd);
337
338 return d;
339}
340
341static void reset_ir_status(struct dma_iso_ctx *d, int n)
342{
343 int i;
344 d->ir_prg[n][0].status = cpu_to_le32(4);
345 d->ir_prg[n][1].status = cpu_to_le32(PAGE_SIZE-4);
346 for (i = 2; i < d->nb_cmd - 1; i++)
347 d->ir_prg[n][i].status = cpu_to_le32(PAGE_SIZE);
348 d->ir_prg[n][i].status = cpu_to_le32(d->left_size);
349}
350
351static void reprogram_dma_ir_prg(struct dma_iso_ctx *d, int n, int buffer, int flags)
352{
353 struct dma_cmd *ir_prg = d->ir_prg[n];
354 unsigned long buf = (unsigned long)d->dma.kvirt + buffer * d->buf_size;
355 int i;
356
357 d->buffer_prg_assignment[n] = buffer;
358
359 ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
360 (unsigned long)d->dma.kvirt));
361 ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
362 (buf + 4) - (unsigned long)d->dma.kvirt));
363
364 for (i=2;i<d->nb_cmd-1;i++) {
365 ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
366 (buf+(i-1)*PAGE_SIZE) -
367 (unsigned long)d->dma.kvirt));
368 }
369
370 ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
371 DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
372 ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
373 (buf+(i-1)*PAGE_SIZE) - (unsigned long)d->dma.kvirt));
374}
375
376static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
377{
378 struct dma_cmd *ir_prg = d->ir_prg[n];
379 struct dma_prog_region *ir_reg = &d->prg_reg[n];
380 unsigned long buf = (unsigned long)d->dma.kvirt;
381 int i;
382
383 /* the first descriptor will read only 4 bytes */
384 ir_prg[0].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
385 DMA_CTL_BRANCH | 4);
386
387 /* set the sync flag */
388 if (flags & VIDEO1394_SYNC_FRAMES)
389 ir_prg[0].control |= cpu_to_le32(DMA_CTL_WAIT);
390
391 ir_prg[0].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, buf -
392 (unsigned long)d->dma.kvirt));
393 ir_prg[0].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
394 1 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
395
396 /* If there is *not* only one DMA page per frame (hence, d->nb_cmd==2) */
397 if (d->nb_cmd > 2) {
398 /* The second descriptor will read PAGE_SIZE-4 bytes */
399 ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
400 DMA_CTL_BRANCH | (PAGE_SIZE-4));
401 ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf + 4) -
402 (unsigned long)d->dma.kvirt));
403 ir_prg[1].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
404 2 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
405
406 for (i = 2; i < d->nb_cmd - 1; i++) {
407 ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
408 DMA_CTL_BRANCH | PAGE_SIZE);
409 ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
410 (buf+(i-1)*PAGE_SIZE) -
411 (unsigned long)d->dma.kvirt));
412
413 ir_prg[i].branchAddress =
414 cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
415 (i + 1) * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
416 }
417
418 /* The last descriptor will generate an interrupt */
419 ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
420 DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
421 ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
422 (buf+(i-1)*PAGE_SIZE) -
423 (unsigned long)d->dma.kvirt));
424 } else {
425 /* Only one DMA page is used. Read d->left_size immediately and */
426 /* generate an interrupt as this is also the last page. */
427 ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
428 DMA_CTL_IRQ | DMA_CTL_BRANCH | (d->left_size-4));
429 ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
430 (buf + 4) - (unsigned long)d->dma.kvirt));
431 }
432}
433
434static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
435{
436 struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
437 int i;
438
439 d->flags = flags;
440
441 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
442
443 for (i=0;i<d->num_desc;i++) {
444 initialize_dma_ir_prg(d, i, flags);
445 reset_ir_status(d, i);
446 }
447
448 /* reset the ctrl register */
449 reg_write(ohci, d->ctrlClear, 0xf0000000);
450
451 /* Set bufferFill */
452 reg_write(ohci, d->ctrlSet, 0x80000000);
453
454 /* Set isoch header */
455 if (flags & VIDEO1394_INCLUDE_ISO_HEADERS)
456 reg_write(ohci, d->ctrlSet, 0x40000000);
457
458 /* Set the context match register to match on all tags,
459 sync for sync tag, and listen to d->channel */
460 reg_write(ohci, d->ctxMatch, 0xf0000000|((tag&0xf)<<8)|d->channel);
461
462 /* Set up isoRecvIntMask to generate interrupts */
463 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1<<d->ctx);
464}
465
466/* find which context is listening to this channel */
467static struct dma_iso_ctx *
468find_ctx(struct list_head *list, int type, int channel)
469{
470 struct dma_iso_ctx *ctx;
471
472 list_for_each_entry(ctx, list, link) {
473 if (ctx->type == type && ctx->channel == channel)
474 return ctx;
475 }
476
477 return NULL;
478}
479
480static void wakeup_dma_ir_ctx(unsigned long l)
481{
482 struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
483 int i;
484
485 spin_lock(&d->lock);
486
487 for (i = 0; i < d->num_desc; i++) {
488 if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
489 reset_ir_status(d, i);
490 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
491 do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]);
492 dma_region_sync_for_cpu(&d->dma,
493 d->buffer_prg_assignment[i] * d->buf_size,
494 d->buf_size);
495 }
496 }
497
498 spin_unlock(&d->lock);
499
500 if (waitqueue_active(&d->waitq))
501 wake_up_interruptible(&d->waitq);
502}
503
504static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
505 int n)
506{
507 unsigned char* buf = d->dma.kvirt + n * d->buf_size;
508 u32 cycleTimer;
509 u32 timeStamp;
510
511 if (n == -1) {
512 return;
513 }
514
515 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
516
517 timeStamp = ((cycleTimer & 0x0fff) + d->syt_offset); /* 11059 = 450 us */
518 timeStamp = (timeStamp % 3072 + ((timeStamp / 3072) << 12)
519 + (cycleTimer & 0xf000)) & 0xffff;
520
521 buf[6] = timeStamp >> 8;
522 buf[7] = timeStamp & 0xff;
523
524 /* if first packet is empty packet, then put timestamp into the next full one too */
525 if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
526 buf += d->packet_size;
527 buf[6] = timeStamp >> 8;
528 buf[7] = timeStamp & 0xff;
529 }
530
531 /* do the next buffer frame too in case of irq latency */
532 n = d->next_buffer[n];
533 if (n == -1) {
534 return;
535 }
536 buf = d->dma.kvirt + n * d->buf_size;
537
538 timeStamp += (d->last_used_cmd[n] << 12) & 0xffff;
539
540 buf[6] = timeStamp >> 8;
541 buf[7] = timeStamp & 0xff;
542
543 /* if first packet is empty packet, then put timestamp into the next full one too */
544 if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
545 buf += d->packet_size;
546 buf[6] = timeStamp >> 8;
547 buf[7] = timeStamp & 0xff;
548 }
549
550#if 0
551 printk("curr: %d, next: %d, cycleTimer: %08x timeStamp: %08x\n",
552 curr, n, cycleTimer, timeStamp);
553#endif
554}
555
556static void wakeup_dma_it_ctx(unsigned long l)
557{
558 struct dma_iso_ctx *d = (struct dma_iso_ctx *) l;
559 struct ti_ohci *ohci = d->ohci;
560 int i;
561
562 spin_lock(&d->lock);
563
564 for (i = 0; i < d->num_desc; i++) {
565 if (d->it_prg[i][d->last_used_cmd[i]].end.status &
566 cpu_to_le32(0xFFFF0000)) {
567 int next = d->next_buffer[i];
568 put_timestamp(ohci, d, next);
569 d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
570 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
571 }
572 }
573
574 spin_unlock(&d->lock);
575
576 if (waitqueue_active(&d->waitq))
577 wake_up_interruptible(&d->waitq);
578}
579
580static void reprogram_dma_it_prg(struct dma_iso_ctx *d, int n, int buffer)
581{
582 struct it_dma_prg *it_prg = d->it_prg[n];
583 unsigned long buf = (unsigned long)d->dma.kvirt + buffer * d->buf_size;
584 int i;
585
586 d->buffer_prg_assignment[n] = buffer;
587 for (i=0;i<d->nb_cmd;i++) {
588 it_prg[i].end.address =
589 cpu_to_le32(dma_region_offset_to_bus(&d->dma,
590 (buf+i*d->packet_size) - (unsigned long)d->dma.kvirt));
591 }
592}
593
594static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
595{
596 struct it_dma_prg *it_prg = d->it_prg[n];
597 struct dma_prog_region *it_reg = &d->prg_reg[n];
598 unsigned long buf = (unsigned long)d->dma.kvirt;
599 int i;
600 d->last_used_cmd[n] = d->nb_cmd - 1;
601 for (i=0;i<d->nb_cmd;i++) {
602
603 it_prg[i].begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE |
604 DMA_CTL_IMMEDIATE | 8) ;
605 it_prg[i].begin.address = 0;
606
607 it_prg[i].begin.status = 0;
608
609 it_prg[i].data[0] = cpu_to_le32(
610 (IEEE1394_SPEED_100 << 16)
611 | (/* tag */ 1 << 14)
612 | (d->channel << 8)
613 | (TCODE_ISO_DATA << 4));
614 if (i==0) it_prg[i].data[0] |= cpu_to_le32(sync_tag);
615 it_prg[i].data[1] = cpu_to_le32(d->packet_size << 16);
616 it_prg[i].data[2] = 0;
617 it_prg[i].data[3] = 0;
618
619 it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
620 DMA_CTL_BRANCH);
621 it_prg[i].end.address =
622 cpu_to_le32(dma_region_offset_to_bus(&d->dma, (buf+i*d->packet_size) -
623 (unsigned long)d->dma.kvirt));
624
625 if (i<d->nb_cmd-1) {
626 it_prg[i].end.control |= cpu_to_le32(d->packet_size);
627 it_prg[i].begin.branchAddress =
628 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
629 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
630 it_prg[i].end.branchAddress =
631 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
632 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
633 } else {
634 /* the last prg generates an interrupt */
635 it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
636 DMA_CTL_IRQ | d->left_size);
637 /* the last prg doesn't branch */
638 it_prg[i].begin.branchAddress = 0;
639 it_prg[i].end.branchAddress = 0;
640 }
641 it_prg[i].end.status = 0;
642 }
643}
644
645static void initialize_dma_it_prg_var_packet_queue(
646 struct dma_iso_ctx *d, int n, unsigned int * packet_sizes,
647 struct ti_ohci *ohci)
648{
649 struct it_dma_prg *it_prg = d->it_prg[n];
650 struct dma_prog_region *it_reg = &d->prg_reg[n];
651 int i;
652
653#if 0
654 if (n != -1) {
655 put_timestamp(ohci, d, n);
656 }
657#endif
658 d->last_used_cmd[n] = d->nb_cmd - 1;
659
660 for (i = 0; i < d->nb_cmd; i++) {
661 unsigned int size;
662 if (packet_sizes[i] > d->packet_size) {
663 size = d->packet_size;
664 } else {
665 size = packet_sizes[i];
666 }
667 it_prg[i].data[1] = cpu_to_le32(size << 16);
668 it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH);
669
670 if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
671 it_prg[i].end.control |= cpu_to_le32(size);
672 it_prg[i].begin.branchAddress =
673 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
674 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
675 it_prg[i].end.branchAddress =
676 cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
677 sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
678 } else {
679 /* the last prg generates an interrupt */
680 it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
681 DMA_CTL_IRQ | size);
682 /* the last prg doesn't branch */
683 it_prg[i].begin.branchAddress = 0;
684 it_prg[i].end.branchAddress = 0;
685 d->last_used_cmd[n] = i;
686 break;
687 }
688 }
689}
690
691static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
692 unsigned int syt_offset, int flags)
693{
694 struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
695 int i;
696
697 d->flags = flags;
698 d->syt_offset = (syt_offset == 0 ? 11000 : syt_offset);
699
700 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
701
702 for (i=0;i<d->num_desc;i++)
703 initialize_dma_it_prg(d, i, sync_tag);
704
705 /* Set up isoRecvIntMask to generate interrupts */
706 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
707}
708
709static inline unsigned video1394_buffer_state(struct dma_iso_ctx *d,
710 unsigned int buffer)
711{
712 unsigned long flags;
713 unsigned int ret;
714 spin_lock_irqsave(&d->lock, flags);
715 ret = d->buffer_status[buffer];
716 spin_unlock_irqrestore(&d->lock, flags);
717 return ret;
718}
719
720static long video1394_ioctl(struct file *file,
721 unsigned int cmd, unsigned long arg)
722{
723 struct file_ctx *ctx = file->private_data;
724 struct ti_ohci *ohci = ctx->ohci;
725 unsigned long flags;
726 void __user *argp = (void __user *)arg;
727
728 switch(cmd)
729 {
730 case VIDEO1394_IOC_LISTEN_CHANNEL:
731 case VIDEO1394_IOC_TALK_CHANNEL:
732 {
733 struct video1394_mmap v;
734 u64 mask;
735 struct dma_iso_ctx *d;
736 int i;
737
738 if (copy_from_user(&v, argp, sizeof(v)))
739 return -EFAULT;
740
741 /* if channel < 0, find lowest available one */
742 if (v.channel < 0) {
743 mask = (u64)0x1;
744 for (i=0; ; i++) {
745 if (i == ISO_CHANNELS) {
746 PRINT(KERN_ERR, ohci->host->id,
747 "No free channel found");
748 return -EAGAIN;
749 }
750 if (!(ohci->ISO_channel_usage & mask)) {
751 v.channel = i;
752 PRINT(KERN_INFO, ohci->host->id, "Found free channel %d", i);
753 break;
754 }
755 mask = mask << 1;
756 }
757 } else if (v.channel >= ISO_CHANNELS) {
758 PRINT(KERN_ERR, ohci->host->id,
759 "Iso channel %d out of bounds", v.channel);
760 return -EINVAL;
761 } else {
762 mask = (u64)0x1<<v.channel;
763 }
764 DBGMSG(ohci->host->id, "mask: %08X%08X usage: %08X%08X\n",
765 (u32)(mask>>32),(u32)(mask&0xffffffff),
766 (u32)(ohci->ISO_channel_usage>>32),
767 (u32)(ohci->ISO_channel_usage&0xffffffff));
768 if (ohci->ISO_channel_usage & mask) {
769 PRINT(KERN_ERR, ohci->host->id,
770 "Channel %d is already taken", v.channel);
771 return -EBUSY;
772 }
773
774 if (v.buf_size == 0 || v.buf_size > VIDEO1394_MAX_SIZE) {
775 PRINT(KERN_ERR, ohci->host->id,
776 "Invalid %d length buffer requested",v.buf_size);
777 return -EINVAL;
778 }
779
780 if (v.nb_buffers == 0 || v.nb_buffers > VIDEO1394_MAX_SIZE) {
781 PRINT(KERN_ERR, ohci->host->id,
782 "Invalid %d buffers requested",v.nb_buffers);
783 return -EINVAL;
784 }
785
786 if (v.nb_buffers * v.buf_size > VIDEO1394_MAX_SIZE) {
787 PRINT(KERN_ERR, ohci->host->id,
788 "%d buffers of size %d bytes is too big",
789 v.nb_buffers, v.buf_size);
790 return -EINVAL;
791 }
792
793 if (cmd == VIDEO1394_IOC_LISTEN_CHANNEL) {
794 d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
795 v.nb_buffers + 1, v.buf_size,
796 v.channel, 0);
797
798 if (d == NULL) {
799 PRINT(KERN_ERR, ohci->host->id,
800 "Couldn't allocate ir context");
801 return -EAGAIN;
802 }
803 initialize_dma_ir_ctx(d, v.sync_tag, v.flags);
804
805 ctx->current_ctx = d;
806
807 v.buf_size = d->buf_size;
808 list_add_tail(&d->link, &ctx->context_list);
809
810 DBGMSG(ohci->host->id,
811 "iso context %d listen on channel %d",
812 d->ctx, v.channel);
813 }
814 else {
815 d = alloc_dma_iso_ctx(ohci, OHCI_ISO_TRANSMIT,
816 v.nb_buffers + 1, v.buf_size,
817 v.channel, v.packet_size);
818
819 if (d == NULL) {
820 PRINT(KERN_ERR, ohci->host->id,
821 "Couldn't allocate it context");
822 return -EAGAIN;
823 }
824 initialize_dma_it_ctx(d, v.sync_tag,
825 v.syt_offset, v.flags);
826
827 ctx->current_ctx = d;
828
829 v.buf_size = d->buf_size;
830
831 list_add_tail(&d->link, &ctx->context_list);
832
833 DBGMSG(ohci->host->id,
834 "Iso context %d talk on channel %d", d->ctx,
835 v.channel);
836 }
837
838 if (copy_to_user(argp, &v, sizeof(v))) {
839 /* FIXME : free allocated dma resources */
840 return -EFAULT;
841 }
842
843 ohci->ISO_channel_usage |= mask;
844
845 return 0;
846 }
847 case VIDEO1394_IOC_UNLISTEN_CHANNEL:
848 case VIDEO1394_IOC_UNTALK_CHANNEL:
849 {
850 int channel;
851 u64 mask;
852 struct dma_iso_ctx *d;
853
854 if (copy_from_user(&channel, argp, sizeof(int)))
855 return -EFAULT;
856
857 if (channel < 0 || channel >= ISO_CHANNELS) {
858 PRINT(KERN_ERR, ohci->host->id,
859 "Iso channel %d out of bound", channel);
860 return -EINVAL;
861 }
862 mask = (u64)0x1<<channel;
863 if (!(ohci->ISO_channel_usage & mask)) {
864 PRINT(KERN_ERR, ohci->host->id,
865 "Channel %d is not being used", channel);
866 return -ESRCH;
867 }
868
869 /* Mark this channel as unused */
870 ohci->ISO_channel_usage &= ~mask;
871
872 if (cmd == VIDEO1394_IOC_UNLISTEN_CHANNEL)
873 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, channel);
874 else
875 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
876
877 if (d == NULL) return -ESRCH;
878 DBGMSG(ohci->host->id, "Iso context %d "
879 "stop talking on channel %d", d->ctx, channel);
880 free_dma_iso_ctx(d);
881
882 return 0;
883 }
884 case VIDEO1394_IOC_LISTEN_QUEUE_BUFFER:
885 {
886 struct video1394_wait v;
887 struct dma_iso_ctx *d;
888 int next_prg;
889
890 if (unlikely(copy_from_user(&v, argp, sizeof(v))))
891 return -EFAULT;
892
893 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
894 if (unlikely(d == NULL))
895 return -EFAULT;
896
897 if (unlikely(v.buffer >= d->num_desc - 1)) {
898 PRINT(KERN_ERR, ohci->host->id,
899 "Buffer %d out of range",v.buffer);
900 return -EINVAL;
901 }
902
903 spin_lock_irqsave(&d->lock,flags);
904
905 if (unlikely(d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED)) {
906 PRINT(KERN_ERR, ohci->host->id,
907 "Buffer %d is already used",v.buffer);
908 spin_unlock_irqrestore(&d->lock,flags);
909 return -EBUSY;
910 }
911
912 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
913
914 next_prg = (d->last_buffer + 1) % d->num_desc;
915 if (d->last_buffer>=0)
916 d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress =
917 cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[next_prg], 0)
918 & 0xfffffff0) | 0x1);
919
920 d->last_buffer = next_prg;
921 reprogram_dma_ir_prg(d, d->last_buffer, v.buffer, d->flags);
922
923 d->ir_prg[d->last_buffer][d->nb_cmd-1].branchAddress = 0;
924
925 spin_unlock_irqrestore(&d->lock,flags);
926
927 if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
928 {
929 DBGMSG(ohci->host->id, "Starting iso DMA ctx=%d",d->ctx);
930
931 /* Tell the controller where the first program is */
932 reg_write(ohci, d->cmdPtr,
933 dma_prog_region_offset_to_bus(&d->prg_reg[d->last_buffer], 0) | 0x1);
934
935 /* Run IR context */
936 reg_write(ohci, d->ctrlSet, 0x8000);
937 }
938 else {
939 /* Wake up dma context if necessary */
940 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
941 DBGMSG(ohci->host->id,
942 "Waking up iso dma ctx=%d", d->ctx);
943 reg_write(ohci, d->ctrlSet, 0x1000);
944 }
945 }
946 return 0;
947
948 }
949 case VIDEO1394_IOC_LISTEN_WAIT_BUFFER:
950 case VIDEO1394_IOC_LISTEN_POLL_BUFFER:
951 {
952 struct video1394_wait v;
953 struct dma_iso_ctx *d;
954 int i = 0;
955
956 if (unlikely(copy_from_user(&v, argp, sizeof(v))))
957 return -EFAULT;
958
959 d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
960 if (unlikely(d == NULL))
961 return -EFAULT;
962
963 if (unlikely(v.buffer > d->num_desc - 1)) {
964 PRINT(KERN_ERR, ohci->host->id,
965 "Buffer %d out of range",v.buffer);
966 return -EINVAL;
967 }
968
969 /*
970 * I change the way it works so that it returns
971 * the last received frame.
972 */
973 spin_lock_irqsave(&d->lock, flags);
974 switch(d->buffer_status[v.buffer]) {
975 case VIDEO1394_BUFFER_READY:
976 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
977 break;
978 case VIDEO1394_BUFFER_QUEUED:
979 if (cmd == VIDEO1394_IOC_LISTEN_POLL_BUFFER) {
980 /* for polling, return error code EINTR */
981 spin_unlock_irqrestore(&d->lock, flags);
982 return -EINTR;
983 }
984
985 spin_unlock_irqrestore(&d->lock, flags);
986 wait_event_interruptible(d->waitq,
987 video1394_buffer_state(d, v.buffer) ==
988 VIDEO1394_BUFFER_READY);
989 if (signal_pending(current))
990 return -EINTR;
991 spin_lock_irqsave(&d->lock, flags);
992 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
993 break;
994 default:
995 PRINT(KERN_ERR, ohci->host->id,
996 "Buffer %d is not queued",v.buffer);
997 spin_unlock_irqrestore(&d->lock, flags);
998 return -ESRCH;
999 }
1000
1001 /* set time of buffer */
1002 v.filltime = d->buffer_time[v.buffer];
1003
1004 /*
1005 * Look ahead to see how many more buffers have been received
1006 */
1007 i=0;
1008 while (d->buffer_status[(v.buffer+1)%(d->num_desc - 1)]==
1009 VIDEO1394_BUFFER_READY) {
1010 v.buffer=(v.buffer+1)%(d->num_desc - 1);
1011 i++;
1012 }
1013 spin_unlock_irqrestore(&d->lock, flags);
1014
1015 v.buffer=i;
1016 if (unlikely(copy_to_user(argp, &v, sizeof(v))))
1017 return -EFAULT;
1018
1019 return 0;
1020 }
1021 case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
1022 {
1023 struct video1394_wait v;
1024 unsigned int *psizes = NULL;
1025 struct dma_iso_ctx *d;
1026 int next_prg;
1027
1028 if (copy_from_user(&v, argp, sizeof(v)))
1029 return -EFAULT;
1030
1031 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
1032 if (d == NULL) return -EFAULT;
1033
1034 if (v.buffer >= d->num_desc - 1) {
1035 PRINT(KERN_ERR, ohci->host->id,
1036 "Buffer %d out of range",v.buffer);
1037 return -EINVAL;
1038 }
1039
1040 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
1041 int buf_size = d->nb_cmd * sizeof(*psizes);
1042 struct video1394_queue_variable __user *p = argp;
1043 unsigned int __user *qv;
1044
1045 if (get_user(qv, &p->packet_sizes))
1046 return -EFAULT;
1047
1048 psizes = memdup_user(qv, buf_size);
1049 if (IS_ERR(psizes))
1050 return PTR_ERR(psizes);
1051 }
1052
1053 spin_lock_irqsave(&d->lock,flags);
1054
1055 /* last_buffer is last_prg */
1056 next_prg = (d->last_buffer + 1) % d->num_desc;
1057 if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
1058 PRINT(KERN_ERR, ohci->host->id,
1059 "Buffer %d is already used",v.buffer);
1060 spin_unlock_irqrestore(&d->lock,flags);
1061 kfree(psizes);
1062 return -EBUSY;
1063 }
1064
1065 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
1066 initialize_dma_it_prg_var_packet_queue(
1067 d, next_prg, psizes, ohci);
1068 }
1069
1070 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
1071
1072 if (d->last_buffer >= 0) {
1073 d->it_prg[d->last_buffer]
1074 [ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
1075 cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[next_prg],
1076 0) & 0xfffffff0) | 0x3);
1077
1078 d->it_prg[d->last_buffer]
1079 [ d->last_used_cmd[d->last_buffer] ].begin.branchAddress =
1080 cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[next_prg],
1081 0) & 0xfffffff0) | 0x3);
1082 d->next_buffer[d->last_buffer] = (v.buffer + 1) % (d->num_desc - 1);
1083 }
1084 d->last_buffer = next_prg;
1085 reprogram_dma_it_prg(d, d->last_buffer, v.buffer);
1086 d->next_buffer[d->last_buffer] = -1;
1087
1088 d->it_prg[d->last_buffer][d->last_used_cmd[d->last_buffer]].end.branchAddress = 0;
1089
1090 spin_unlock_irqrestore(&d->lock,flags);
1091
1092 if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
1093 {
1094 DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d",
1095 d->ctx);
1096 put_timestamp(ohci, d, d->last_buffer);
1097 dma_region_sync_for_device(&d->dma,
1098 v.buffer * d->buf_size, d->buf_size);
1099
1100 /* Tell the controller where the first program is */
1101 reg_write(ohci, d->cmdPtr,
1102 dma_prog_region_offset_to_bus(&d->prg_reg[next_prg], 0) | 0x3);
1103
1104 /* Run IT context */
1105 reg_write(ohci, d->ctrlSet, 0x8000);
1106 }
1107 else {
1108 /* Wake up dma context if necessary */
1109 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
1110 DBGMSG(ohci->host->id,
1111 "Waking up iso transmit dma ctx=%d",
1112 d->ctx);
1113 put_timestamp(ohci, d, d->last_buffer);
1114 dma_region_sync_for_device(&d->dma,
1115 v.buffer * d->buf_size, d->buf_size);
1116
1117 reg_write(ohci, d->ctrlSet, 0x1000);
1118 }
1119 }
1120
1121 kfree(psizes);
1122 return 0;
1123
1124 }
1125 case VIDEO1394_IOC_TALK_WAIT_BUFFER:
1126 {
1127 struct video1394_wait v;
1128 struct dma_iso_ctx *d;
1129
1130 if (copy_from_user(&v, argp, sizeof(v)))
1131 return -EFAULT;
1132
1133 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
1134 if (d == NULL) return -EFAULT;
1135
1136 if (v.buffer >= d->num_desc - 1) {
1137 PRINT(KERN_ERR, ohci->host->id,
1138 "Buffer %d out of range",v.buffer);
1139 return -EINVAL;
1140 }
1141
1142 switch(d->buffer_status[v.buffer]) {
1143 case VIDEO1394_BUFFER_READY:
1144 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
1145 return 0;
1146 case VIDEO1394_BUFFER_QUEUED:
1147 wait_event_interruptible(d->waitq,
1148 (d->buffer_status[v.buffer] == VIDEO1394_BUFFER_READY));
1149 if (signal_pending(current))
1150 return -EINTR;
1151 d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
1152 return 0;
1153 default:
1154 PRINT(KERN_ERR, ohci->host->id,
1155 "Buffer %d is not queued",v.buffer);
1156 return -ESRCH;
1157 }
1158 }
1159 default:
1160 return -ENOTTY;
1161 }
1162}
1163
1164/*
1165 * This maps the vmalloced and reserved buffer to user space.
1166 *
1167 * FIXME:
1168 * - PAGE_READONLY should suffice!?
1169 * - remap_pfn_range is kind of inefficient for page by page remapping.
1170 * But e.g. pte_alloc() does not work in modules ... :-(
1171 */
1172
1173static int video1394_mmap(struct file *file, struct vm_area_struct *vma)
1174{
1175 struct file_ctx *ctx = file->private_data;
1176
1177 if (ctx->current_ctx == NULL) {
1178 PRINT(KERN_ERR, ctx->ohci->host->id,
1179 "Current iso context not set");
1180 return -EINVAL;
1181 }
1182
1183 return dma_region_mmap(&ctx->current_ctx->dma, file, vma);
1184}
1185
1186static unsigned int video1394_poll(struct file *file, poll_table *pt)
1187{
1188 struct file_ctx *ctx;
1189 unsigned int mask = 0;
1190 unsigned long flags;
1191 struct dma_iso_ctx *d;
1192 int i;
1193
1194 ctx = file->private_data;
1195 d = ctx->current_ctx;
1196 if (d == NULL) {
1197 PRINT(KERN_ERR, ctx->ohci->host->id,
1198 "Current iso context not set");
1199 return POLLERR;
1200 }
1201
1202 poll_wait(file, &d->waitq, pt);
1203
1204 spin_lock_irqsave(&d->lock, flags);
1205 for (i = 0; i < d->num_desc; i++) {
1206 if (d->buffer_status[i] == VIDEO1394_BUFFER_READY) {
1207 mask |= POLLIN | POLLRDNORM;
1208 break;
1209 }
1210 }
1211 spin_unlock_irqrestore(&d->lock, flags);
1212
1213 return mask;
1214}
1215
1216static int video1394_open(struct inode *inode, struct file *file)
1217{
1218 int i = ieee1394_file_to_instance(file);
1219 struct ti_ohci *ohci;
1220 struct file_ctx *ctx;
1221
1222 ohci = hpsb_get_hostinfo_bykey(&video1394_highlevel, i);
1223 if (ohci == NULL)
1224 return -EIO;
1225
1226 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1227 if (!ctx) {
1228 PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
1229 return -ENOMEM;
1230 }
1231
1232 ctx->ohci = ohci;
1233 INIT_LIST_HEAD(&ctx->context_list);
1234 ctx->current_ctx = NULL;
1235 file->private_data = ctx;
1236
1237 return nonseekable_open(inode, file);
1238}
1239
1240static int video1394_release(struct inode *inode, struct file *file)
1241{
1242 struct file_ctx *ctx = file->private_data;
1243 struct ti_ohci *ohci = ctx->ohci;
1244 struct list_head *lh, *next;
1245 u64 mask;
1246
1247 list_for_each_safe(lh, next, &ctx->context_list) {
1248 struct dma_iso_ctx *d;
1249 d = list_entry(lh, struct dma_iso_ctx, link);
1250 mask = (u64) 1 << d->channel;
1251
1252 if (!(ohci->ISO_channel_usage & mask))
1253 PRINT(KERN_ERR, ohci->host->id, "On release: Channel %d "
1254 "is not being used", d->channel);
1255 else
1256 ohci->ISO_channel_usage &= ~mask;
1257 DBGMSG(ohci->host->id, "On release: Iso %s context "
1258 "%d stop listening on channel %d",
1259 d->type == OHCI_ISO_RECEIVE ? "receive" : "transmit",
1260 d->ctx, d->channel);
1261 free_dma_iso_ctx(d);
1262 }
1263
1264 kfree(ctx);
1265 file->private_data = NULL;
1266
1267 return 0;
1268}
1269
1270#ifdef CONFIG_COMPAT
1271static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
1272#endif
1273
1274static struct cdev video1394_cdev;
1275static const struct file_operations video1394_fops=
1276{
1277 .owner = THIS_MODULE,
1278 .unlocked_ioctl = video1394_ioctl,
1279#ifdef CONFIG_COMPAT
1280 .compat_ioctl = video1394_compat_ioctl,
1281#endif
1282 .poll = video1394_poll,
1283 .mmap = video1394_mmap,
1284 .open = video1394_open,
1285 .release = video1394_release,
1286 .llseek = no_llseek,
1287};
1288
1289/*** HOTPLUG STUFF **********************************************************/
1290/*
1291 * Export information about protocols/devices supported by this driver.
1292 */
1293#ifdef MODULE
1294static const struct ieee1394_device_id video1394_id_table[] = {
1295 {
1296 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1297 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
1298 .version = CAMERA_SW_VERSION_ENTRY & 0xffffff
1299 },
1300 {
1301 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1302 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
1303 .version = (CAMERA_SW_VERSION_ENTRY + 1) & 0xffffff
1304 },
1305 {
1306 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1307 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
1308 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
1309 },
1310 { }
1311};
1312
1313MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
1314#endif /* MODULE */
1315
1316static struct hpsb_protocol_driver video1394_driver = {
1317 .name = VIDEO1394_DRIVER_NAME,
1318};
1319
1320
1321static void video1394_add_host (struct hpsb_host *host)
1322{
1323 struct ti_ohci *ohci;
1324 int minor;
1325
1326 /* We only work with the OHCI-1394 driver */
1327 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
1328 return;
1329
1330 ohci = (struct ti_ohci *)host->hostdata;
1331
1332 if (!hpsb_create_hostinfo(&video1394_highlevel, host, 0)) {
1333 PRINT(KERN_ERR, ohci->host->id, "Cannot allocate hostinfo");
1334 return;
1335 }
1336
1337 hpsb_set_hostinfo(&video1394_highlevel, host, ohci);
1338 hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id);
1339
1340 minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
1341 device_create(hpsb_protocol_class, NULL, MKDEV(IEEE1394_MAJOR, minor),
1342 NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1343}
1344
1345
1346static void video1394_remove_host (struct hpsb_host *host)
1347{
1348 struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host);
1349
1350 if (ohci)
1351 device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
1352 IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
1353 return;
1354}
1355
1356
1357static struct hpsb_highlevel video1394_highlevel = {
1358 .name = VIDEO1394_DRIVER_NAME,
1359 .add_host = video1394_add_host,
1360 .remove_host = video1394_remove_host,
1361};
1362
1363MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
1364MODULE_DESCRIPTION("driver for digital video on OHCI board");
1365MODULE_SUPPORTED_DEVICE(VIDEO1394_DRIVER_NAME);
1366MODULE_LICENSE("GPL");
1367
1368#ifdef CONFIG_COMPAT
1369
1370#define VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER \
1371 _IOW ('#', 0x12, struct video1394_wait32)
1372#define VIDEO1394_IOC32_LISTEN_WAIT_BUFFER \
1373 _IOWR('#', 0x13, struct video1394_wait32)
1374#define VIDEO1394_IOC32_TALK_WAIT_BUFFER \
1375 _IOW ('#', 0x17, struct video1394_wait32)
1376#define VIDEO1394_IOC32_LISTEN_POLL_BUFFER \
1377 _IOWR('#', 0x18, struct video1394_wait32)
1378
1379struct video1394_wait32 {
1380 u32 channel;
1381 u32 buffer;
1382 struct compat_timeval filltime;
1383};
1384
1385static int video1394_wr_wait32(struct file *file, unsigned int cmd, unsigned long arg)
1386{
1387 struct video1394_wait32 __user *argp = (void __user *)arg;
1388 struct video1394_wait32 wait32;
1389 struct video1394_wait wait;
1390 mm_segment_t old_fs;
1391 int ret;
1392
1393 if (copy_from_user(&wait32, argp, sizeof(wait32)))
1394 return -EFAULT;
1395
1396 wait.channel = wait32.channel;
1397 wait.buffer = wait32.buffer;
1398 wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
1399 wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
1400
1401 old_fs = get_fs();
1402 set_fs(KERNEL_DS);
1403 if (cmd == VIDEO1394_IOC32_LISTEN_WAIT_BUFFER)
1404 ret = video1394_ioctl(file,
1405 VIDEO1394_IOC_LISTEN_WAIT_BUFFER,
1406 (unsigned long) &wait);
1407 else
1408 ret = video1394_ioctl(file,
1409 VIDEO1394_IOC_LISTEN_POLL_BUFFER,
1410 (unsigned long) &wait);
1411 set_fs(old_fs);
1412
1413 if (!ret) {
1414 wait32.channel = wait.channel;
1415 wait32.buffer = wait.buffer;
1416 wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
1417 wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
1418
1419 if (copy_to_user(argp, &wait32, sizeof(wait32)))
1420 ret = -EFAULT;
1421 }
1422
1423 return ret;
1424}
1425
1426static int video1394_w_wait32(struct file *file, unsigned int cmd, unsigned long arg)
1427{
1428 struct video1394_wait32 wait32;
1429 struct video1394_wait wait;
1430 mm_segment_t old_fs;
1431 int ret;
1432
1433 if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
1434 return -EFAULT;
1435
1436 wait.channel = wait32.channel;
1437 wait.buffer = wait32.buffer;
1438 wait.filltime.tv_sec = (time_t)wait32.filltime.tv_sec;
1439 wait.filltime.tv_usec = (suseconds_t)wait32.filltime.tv_usec;
1440
1441 old_fs = get_fs();
1442 set_fs(KERNEL_DS);
1443 if (cmd == VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER)
1444 ret = video1394_ioctl(file,
1445 VIDEO1394_IOC_LISTEN_QUEUE_BUFFER,
1446 (unsigned long) &wait);
1447 else
1448 ret = video1394_ioctl(file,
1449 VIDEO1394_IOC_TALK_WAIT_BUFFER,
1450 (unsigned long) &wait);
1451 set_fs(old_fs);
1452
1453 return ret;
1454}
1455
1456static int video1394_queue_buf32(struct file *file, unsigned int cmd, unsigned long arg)
1457{
1458 return -EFAULT; /* ??? was there before. */
1459
1460 return video1394_ioctl(file,
1461 VIDEO1394_IOC_TALK_QUEUE_BUFFER, arg);
1462}
1463
1464static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
1465{
1466 switch (cmd) {
1467 case VIDEO1394_IOC_LISTEN_CHANNEL:
1468 case VIDEO1394_IOC_UNLISTEN_CHANNEL:
1469 case VIDEO1394_IOC_TALK_CHANNEL:
1470 case VIDEO1394_IOC_UNTALK_CHANNEL:
1471 return video1394_ioctl(f, cmd, arg);
1472
1473 case VIDEO1394_IOC32_LISTEN_QUEUE_BUFFER:
1474 return video1394_w_wait32(f, cmd, arg);
1475 case VIDEO1394_IOC32_LISTEN_WAIT_BUFFER:
1476 return video1394_wr_wait32(f, cmd, arg);
1477 case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
1478 return video1394_queue_buf32(f, cmd, arg);
1479 case VIDEO1394_IOC32_TALK_WAIT_BUFFER:
1480 return video1394_w_wait32(f, cmd, arg);
1481 case VIDEO1394_IOC32_LISTEN_POLL_BUFFER:
1482 return video1394_wr_wait32(f, cmd, arg);
1483 default:
1484 return -ENOIOCTLCMD;
1485 }
1486}
1487
1488#endif /* CONFIG_COMPAT */
1489
1490static void __exit video1394_exit_module (void)
1491{
1492 hpsb_unregister_protocol(&video1394_driver);
1493 hpsb_unregister_highlevel(&video1394_highlevel);
1494 cdev_del(&video1394_cdev);
1495 PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module");
1496}
1497
1498static int __init video1394_init_module (void)
1499{
1500 int ret;
1501
1502 hpsb_init_highlevel(&video1394_highlevel);
1503
1504 cdev_init(&video1394_cdev, &video1394_fops);
1505 video1394_cdev.owner = THIS_MODULE;
1506 ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
1507 if (ret) {
1508 PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
1509 return ret;
1510 }
1511
1512 hpsb_register_highlevel(&video1394_highlevel);
1513
1514 ret = hpsb_register_protocol(&video1394_driver);
1515 if (ret) {
1516 PRINT_G(KERN_ERR, "video1394: failed to register protocol");
1517 hpsb_unregister_highlevel(&video1394_highlevel);
1518 cdev_del(&video1394_cdev);
1519 return ret;
1520 }
1521
1522 PRINT_G(KERN_INFO, "Installed " VIDEO1394_DRIVER_NAME " module");
1523 return 0;
1524}
1525
1526
1527module_init(video1394_init_module);
1528module_exit(video1394_exit_module);
diff --git a/drivers/ieee1394/video1394.h b/drivers/ieee1394/video1394.h
deleted file mode 100644
index 9a89d9cc3c85..000000000000
--- a/drivers/ieee1394/video1394.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * video1394.h - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _VIDEO_1394_H
22#define _VIDEO_1394_H
23
24#include "ieee1394-ioctl.h"
25
26#define VIDEO1394_DRIVER_NAME "video1394"
27
28#define VIDEO1394_MAX_SIZE 0x4000000
29
30enum {
31 VIDEO1394_BUFFER_FREE = 0,
32 VIDEO1394_BUFFER_QUEUED,
33 VIDEO1394_BUFFER_READY
34};
35
36#define VIDEO1394_SYNC_FRAMES 0x00000001
37#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
38#define VIDEO1394_VARIABLE_PACKET_SIZE 0x00000004
39
40struct video1394_mmap {
41 int channel; /* -1 to find an open channel in LISTEN/TALK */
42 unsigned int sync_tag;
43 unsigned int nb_buffers;
44 unsigned int buf_size;
45 unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
46 Maximum packet size */
47 unsigned int fps;
48 unsigned int syt_offset;
49 unsigned int flags;
50};
51
52/* For TALK_QUEUE_BUFFER with VIDEO1394_VARIABLE_PACKET_SIZE use */
53struct video1394_queue_variable {
54 unsigned int channel;
55 unsigned int buffer;
56 unsigned int __user * packet_sizes; /* Buffer of size:
57 buf_size / packet_size */
58};
59
60struct video1394_wait {
61 unsigned int channel;
62 unsigned int buffer;
63 struct timeval filltime; /* time of buffer full */
64};
65
66
67#endif