aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 11:59:57 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 11:59:57 -0400
commit40caf5ea5a7d47f8a33e26b63ca81dea4b5109d2 (patch)
tree3f879353d5cb69d2dee707108e4aaeae075f5a0c
parentd6454706c382ab74e2ecad7803c434cc6bd30343 (diff)
parentbcfd09ee48f77a4fe903dbc3757e7af931998ce1 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (56 commits) ieee1394: remove garbage from Kconfig ieee1394: more help in Kconfig ieee1394: ohci1394: Fix mistake in printk message. ieee1394: ohci1394: remove unnecessary rcvPhyPkt bit flipping in LinkControl register ieee1394: ohci1394: fix cosmetic problem in error logging ieee1394: eth1394: send async streams at S100 on 1394b buses ieee1394: eth1394: fix error path in module_init ieee1394: eth1394: correct return codes in hard_start_xmit ieee1394: eth1394: hard_start_xmit is called in atomic context ieee1394: eth1394: some conditions are unlikely ieee1394: eth1394: clean up fragment_overlap ieee1394: eth1394: don't use alloc_etherdev ieee1394: eth1394: omit useless set_mac_address callback ieee1394: eth1394: CONFIG_INET is always defined ieee1394: eth1394: allow MTU bigger than 1500 ieee1394: unexport highlevel_host_reset ieee1394: eth1394: contain host reset ieee1394: eth1394: shorter error messages ieee1394: eth1394: correct a memset argument ieee1394: eth1394: refactor .probe and .update ...
-rw-r--r--drivers/ieee1394/Kconfig52
-rw-r--r--drivers/ieee1394/config_roms.c93
-rw-r--r--drivers/ieee1394/config_roms.h20
-rw-r--r--drivers/ieee1394/csr1212.c870
-rw-r--r--drivers/ieee1394/csr1212.h483
-rw-r--r--drivers/ieee1394/dma.c24
-rw-r--r--drivers/ieee1394/dma.h22
-rw-r--r--drivers/ieee1394/eth1394.c798
-rw-r--r--drivers/ieee1394/eth1394.h25
-rw-r--r--drivers/ieee1394/highlevel.c89
-rw-r--r--drivers/ieee1394/highlevel.h55
-rw-r--r--drivers/ieee1394/hosts.c23
-rw-r--r--drivers/ieee1394/hosts.h10
-rw-r--r--drivers/ieee1394/ieee1394_core.c461
-rw-r--r--drivers/ieee1394/ieee1394_core.h100
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c43
-rw-r--r--drivers/ieee1394/ieee1394_transactions.h20
-rw-r--r--drivers/ieee1394/iso.c85
-rw-r--r--drivers/ieee1394/iso.h35
-rw-r--r--drivers/ieee1394/nodemgr.c61
-rw-r--r--drivers/ieee1394/nodemgr.h24
-rw-r--r--drivers/ieee1394/ohci1394.c12
-rw-r--r--drivers/ieee1394/ohci1394.h4
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/sbp2.c39
-rw-r--r--drivers/ieee1394/sbp2.h8
26 files changed, 1446 insertions, 2013 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index cd84a55ecf20..61d7809a5a26 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,11 +1,8 @@
1# -*- shell-script -*-
2
3menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
4 2
5config IEEE1394 3config IEEE1394
6 tristate "IEEE 1394 (FireWire) support" 4 tristate "IEEE 1394 (FireWire) support"
7 depends on PCI || BROKEN 5 depends on PCI || BROKEN
8 select NET
9 help 6 help
10 IEEE 1394 describes a high performance serial bus, which is also 7 IEEE 1394 describes a high performance serial bus, which is also
11 known as FireWire(tm) or i.Link(tm) and is used for connecting all 8 known as FireWire(tm) or i.Link(tm) and is used for connecting all
@@ -35,24 +32,7 @@ config IEEE1394_VERBOSEDEBUG
35 Say Y if you really want or need the debugging output, everyone 32 Say Y if you really want or need the debugging output, everyone
36 else says N. 33 else says N.
37 34
38config IEEE1394_EXTRA_CONFIG_ROMS 35comment "Controllers"
39 bool "Build in extra config rom entries for certain functionality"
40 depends on IEEE1394
41 help
42 Some IEEE1394 functionality depends on extra config rom entries
43 being available in the host adapters CSR. These options will
44 allow you to choose which ones.
45
46config IEEE1394_CONFIG_ROM_IP1394
47 bool "IP-1394 Entry"
48 depends on IEEE1394_EXTRA_CONFIG_ROMS && IEEE1394
49 help
50 Adds an entry for using IP-over-1394. If you want to use your
51 IEEE1394 bus as a network for IP systems (including interacting
52 with MacOSX and WinXP IP-over-1394), enable this option and the
53 eth1394 option below.
54
55comment "Device Drivers"
56 depends on IEEE1394 36 depends on IEEE1394
57 37
58comment "Texas Instruments PCILynx requires I2C" 38comment "Texas Instruments PCILynx requires I2C"
@@ -70,6 +50,10 @@ config IEEE1394_PCILYNX
70 To compile this driver as a module, say M here: the 50 To compile this driver as a module, say M here: the
71 module will be called pcilynx. 51 module will be called pcilynx.
72 52
53 Only some old and now very rare PCI and CardBus cards and
54 PowerMacs G3 B&W contain the PCILynx controller. Therefore
55 almost everybody can say N here.
56
73config IEEE1394_OHCI1394 57config IEEE1394_OHCI1394
74 tristate "OHCI-1394 support" 58 tristate "OHCI-1394 support"
75 depends on PCI && IEEE1394 59 depends on PCI && IEEE1394
@@ -83,7 +67,7 @@ config IEEE1394_OHCI1394
83 To compile this driver as a module, say M here: the 67 To compile this driver as a module, say M here: the
84 module will be called ohci1394. 68 module will be called ohci1394.
85 69
86comment "Protocol Drivers" 70comment "Protocols"
87 depends on IEEE1394 71 depends on IEEE1394
88 72
89config IEEE1394_VIDEO1394 73config IEEE1394_VIDEO1394
@@ -121,11 +105,15 @@ config IEEE1394_SBP2_PHYS_DMA
121 This option is buggy and currently broken on some architectures. 105 This option is buggy and currently broken on some architectures.
122 If unsure, say N. 106 If unsure, say N.
123 107
108config IEEE1394_ETH1394_ROM_ENTRY
109 depends on IEEE1394
110 bool
111 default n
112
124config IEEE1394_ETH1394 113config IEEE1394_ETH1394
125 tristate "Ethernet over 1394" 114 tristate "IP over 1394"
126 depends on IEEE1394 && EXPERIMENTAL && INET 115 depends on IEEE1394 && EXPERIMENTAL && INET
127 select IEEE1394_CONFIG_ROM_IP1394 116 select IEEE1394_ETH1394_ROM_ENTRY
128 select IEEE1394_EXTRA_CONFIG_ROMS
129 help 117 help
130 This driver implements a functional majority of RFC 2734: IPv4 over 118 This driver implements a functional majority of RFC 2734: IPv4 over
131 1394. It will provide IP connectivity with implementations of RFC 119 1394. It will provide IP connectivity with implementations of RFC
@@ -134,6 +122,8 @@ config IEEE1394_ETH1394
134 This driver is still considered experimental. It does not yet support 122 This driver is still considered experimental. It does not yet support
135 MCAP, therefore multicast support is significantly limited. 123 MCAP, therefore multicast support is significantly limited.
136 124
125 The module is called eth1394 although it does not emulate Ethernet.
126
137config IEEE1394_DV1394 127config IEEE1394_DV1394
138 tristate "OHCI-DV I/O support (deprecated)" 128 tristate "OHCI-DV I/O support (deprecated)"
139 depends on IEEE1394 && IEEE1394_OHCI1394 129 depends on IEEE1394 && IEEE1394_OHCI1394
@@ -146,12 +136,12 @@ config IEEE1394_RAWIO
146 tristate "Raw IEEE1394 I/O support" 136 tristate "Raw IEEE1394 I/O support"
147 depends on IEEE1394 137 depends on IEEE1394
148 help 138 help
149 Say Y here if you want support for the raw device. This is generally 139 This option adds support for the raw1394 device file which enables
150 a good idea, so you should say Y here. The raw device enables 140 direct communication of user programs with the IEEE 1394 bus and thus
151 direct communication of user programs with the IEEE 1394 bus and 141 with the attached peripherals. Almost all application programs which
152 thus with the attached peripherals. 142 access FireWire require this option.
153 143
154 To compile this driver as a module, say M here: the 144 To compile this driver as a module, say M here: the module will be
155 module will be called raw1394. 145 called raw1394.
156 146
157endmenu 147endmenu
diff --git a/drivers/ieee1394/config_roms.c b/drivers/ieee1394/config_roms.c
index e2de6fa0c9fe..1b981207fa76 100644
--- a/drivers/ieee1394/config_roms.c
+++ b/drivers/ieee1394/config_roms.c
@@ -26,12 +26,6 @@ struct hpsb_config_rom_entry {
26 /* Base initialization, called at module load */ 26 /* Base initialization, called at module load */
27 int (*init)(void); 27 int (*init)(void);
28 28
29 /* Add entry to specified host */
30 int (*add)(struct hpsb_host *host);
31
32 /* Remove entry from specified host */
33 void (*remove)(struct hpsb_host *host);
34
35 /* Cleanup called at module exit */ 29 /* Cleanup called at module exit */
36 void (*cleanup)(void); 30 void (*cleanup)(void);
37 31
@@ -39,7 +33,7 @@ struct hpsb_config_rom_entry {
39 unsigned int flag; 33 unsigned int flag;
40}; 34};
41 35
42 36/* The default host entry. This must succeed. */
43int hpsb_default_host_entry(struct hpsb_host *host) 37int hpsb_default_host_entry(struct hpsb_host *host)
44{ 38{
45 struct csr1212_keyval *root; 39 struct csr1212_keyval *root;
@@ -63,9 +57,9 @@ int hpsb_default_host_entry(struct hpsb_host *host)
63 return -ENOMEM; 57 return -ENOMEM;
64 } 58 }
65 59
66 ret = csr1212_associate_keyval(vend_id, text); 60 csr1212_associate_keyval(vend_id, text);
67 csr1212_release_keyval(text); 61 csr1212_release_keyval(text);
68 ret |= csr1212_attach_keyval_to_directory(root, vend_id); 62 ret = csr1212_attach_keyval_to_directory(root, vend_id);
69 csr1212_release_keyval(vend_id); 63 csr1212_release_keyval(vend_id);
70 if (ret != CSR1212_SUCCESS) { 64 if (ret != CSR1212_SUCCESS) {
71 csr1212_destroy_csr(host->csr.rom); 65 csr1212_destroy_csr(host->csr.rom);
@@ -78,7 +72,7 @@ int hpsb_default_host_entry(struct hpsb_host *host)
78} 72}
79 73
80 74
81#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394 75#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
82#include "eth1394.h" 76#include "eth1394.h"
83 77
84static struct csr1212_keyval *ip1394_ud; 78static struct csr1212_keyval *ip1394_ud;
@@ -103,10 +97,12 @@ static int config_rom_ip1394_init(void)
103 if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc) 97 if (!ip1394_ud || !spec_id || !spec_desc || !ver || !ver_desc)
104 goto ip1394_fail; 98 goto ip1394_fail;
105 99
106 if (csr1212_associate_keyval(spec_id, spec_desc) == CSR1212_SUCCESS && 100 csr1212_associate_keyval(spec_id, spec_desc);
107 csr1212_associate_keyval(ver, ver_desc) == CSR1212_SUCCESS && 101 csr1212_associate_keyval(ver, ver_desc);
108 csr1212_attach_keyval_to_directory(ip1394_ud, spec_id) == CSR1212_SUCCESS && 102 if (csr1212_attach_keyval_to_directory(ip1394_ud, spec_id)
109 csr1212_attach_keyval_to_directory(ip1394_ud, ver) == CSR1212_SUCCESS) 103 == CSR1212_SUCCESS &&
104 csr1212_attach_keyval_to_directory(ip1394_ud, ver)
105 == CSR1212_SUCCESS)
110 ret = 0; 106 ret = 0;
111 107
112ip1394_fail: 108ip1394_fail:
@@ -135,7 +131,7 @@ static void config_rom_ip1394_cleanup(void)
135 } 131 }
136} 132}
137 133
138static int config_rom_ip1394_add(struct hpsb_host *host) 134int hpsb_config_rom_ip1394_add(struct hpsb_host *host)
139{ 135{
140 if (!ip1394_ud) 136 if (!ip1394_ud)
141 return -ENODEV; 137 return -ENODEV;
@@ -144,92 +140,55 @@ static int config_rom_ip1394_add(struct hpsb_host *host)
144 ip1394_ud) != CSR1212_SUCCESS) 140 ip1394_ud) != CSR1212_SUCCESS)
145 return -ENOMEM; 141 return -ENOMEM;
146 142
143 host->config_roms |= HPSB_CONFIG_ROM_ENTRY_IP1394;
144 host->update_config_rom = 1;
147 return 0; 145 return 0;
148} 146}
147EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_add);
149 148
150static void config_rom_ip1394_remove(struct hpsb_host *host) 149void hpsb_config_rom_ip1394_remove(struct hpsb_host *host)
151{ 150{
152 csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud); 151 csr1212_detach_keyval_from_directory(host->csr.rom->root_kv, ip1394_ud);
152 host->config_roms &= ~HPSB_CONFIG_ROM_ENTRY_IP1394;
153 host->update_config_rom = 1;
153} 154}
155EXPORT_SYMBOL_GPL(hpsb_config_rom_ip1394_remove);
154 156
155static struct hpsb_config_rom_entry ip1394_entry = { 157static struct hpsb_config_rom_entry ip1394_entry = {
156 .name = "ip1394", 158 .name = "ip1394",
157 .init = config_rom_ip1394_init, 159 .init = config_rom_ip1394_init,
158 .add = config_rom_ip1394_add,
159 .remove = config_rom_ip1394_remove,
160 .cleanup = config_rom_ip1394_cleanup, 160 .cleanup = config_rom_ip1394_cleanup,
161 .flag = HPSB_CONFIG_ROM_ENTRY_IP1394, 161 .flag = HPSB_CONFIG_ROM_ENTRY_IP1394,
162}; 162};
163#endif /* CONFIG_IEEE1394_CONFIG_ROM_IP1394 */
164 163
164#endif /* CONFIG_IEEE1394_ETH1394_ROM_ENTRY */
165 165
166static struct hpsb_config_rom_entry *const config_rom_entries[] = { 166static struct hpsb_config_rom_entry *const config_rom_entries[] = {
167#ifdef CONFIG_IEEE1394_CONFIG_ROM_IP1394 167#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
168 &ip1394_entry, 168 &ip1394_entry,
169#endif 169#endif
170 NULL,
171}; 170};
172 171
173 172/* Initialize all config roms */
174int hpsb_init_config_roms(void) 173int hpsb_init_config_roms(void)
175{ 174{
176 int i, error = 0; 175 int i, error = 0;
177 176
178 for (i = 0; config_rom_entries[i]; i++) { 177 for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
179 if (!config_rom_entries[i]->init)
180 continue;
181
182 if (config_rom_entries[i]->init()) { 178 if (config_rom_entries[i]->init()) {
183 HPSB_ERR("Failed to initialize config rom entry `%s'", 179 HPSB_ERR("Failed to initialize config rom entry `%s'",
184 config_rom_entries[i]->name); 180 config_rom_entries[i]->name);
185 error = -1; 181 error = -1;
186 } else
187 HPSB_DEBUG("Initialized config rom entry `%s'",
188 config_rom_entries[i]->name);
189 }
190
191 return error;
192}
193
194void hpsb_cleanup_config_roms(void)
195{
196 int i;
197
198 for (i = 0; config_rom_entries[i]; i++) {
199 if (config_rom_entries[i]->cleanup)
200 config_rom_entries[i]->cleanup();
201 }
202}
203
204int hpsb_add_extra_config_roms(struct hpsb_host *host)
205{
206 int i, error = 0;
207
208 for (i = 0; config_rom_entries[i]; i++) {
209 if (config_rom_entries[i]->add(host)) {
210 HPSB_ERR("fw-host%d: Failed to attach config rom entry `%s'",
211 host->id, config_rom_entries[i]->name);
212 error = -1;
213 } else {
214 host->config_roms |= config_rom_entries[i]->flag;
215 host->update_config_rom = 1;
216 } 182 }
217 }
218 183
219 return error; 184 return error;
220} 185}
221 186
222void hpsb_remove_extra_config_roms(struct hpsb_host *host) 187/* Cleanup all config roms */
188void hpsb_cleanup_config_roms(void)
223{ 189{
224 int i; 190 int i;
225 191
226 for (i = 0; config_rom_entries[i]; i++) { 192 for (i = 0; i < ARRAY_SIZE(config_rom_entries); i++)
227 if (!(host->config_roms & config_rom_entries[i]->flag)) 193 config_rom_entries[i]->cleanup();
228 continue;
229
230 config_rom_entries[i]->remove(host);
231
232 host->config_roms &= ~config_rom_entries[i]->flag;
233 host->update_config_rom = 1;
234 }
235} 194}
diff --git a/drivers/ieee1394/config_roms.h b/drivers/ieee1394/config_roms.h
index 0a70544cfe65..1f5cd1f16c44 100644
--- a/drivers/ieee1394/config_roms.h
+++ b/drivers/ieee1394/config_roms.h
@@ -1,27 +1,19 @@
1#ifndef _IEEE1394_CONFIG_ROMS_H 1#ifndef _IEEE1394_CONFIG_ROMS_H
2#define _IEEE1394_CONFIG_ROMS_H 2#define _IEEE1394_CONFIG_ROMS_H
3 3
4#include "ieee1394_types.h" 4struct hpsb_host;
5#include "hosts.h"
6 5
7/* The default host entry. This must succeed. */
8int hpsb_default_host_entry(struct hpsb_host *host); 6int hpsb_default_host_entry(struct hpsb_host *host);
9
10/* Initialize all config roms */
11int hpsb_init_config_roms(void); 7int hpsb_init_config_roms(void);
12
13/* Cleanup all config roms */
14void hpsb_cleanup_config_roms(void); 8void hpsb_cleanup_config_roms(void);
15 9
16/* Add extra config roms to specified host */
17int hpsb_add_extra_config_roms(struct hpsb_host *host);
18
19/* Remove extra config roms from specified host */
20void hpsb_remove_extra_config_roms(struct hpsb_host *host);
21
22
23/* List of flags to check if a host contains a certain extra config rom 10/* List of flags to check if a host contains a certain extra config rom
24 * entry. Available in the host->config_roms member. */ 11 * entry. Available in the host->config_roms member. */
25#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001 12#define HPSB_CONFIG_ROM_ENTRY_IP1394 0x00000001
26 13
14#ifdef CONFIG_IEEE1394_ETH1394_ROM_ENTRY
15int hpsb_config_rom_ip1394_add(struct hpsb_host *host);
16void hpsb_config_rom_ip1394_remove(struct hpsb_host *host);
17#endif
18
27#endif /* _IEEE1394_CONFIG_ROMS_H */ 19#endif /* _IEEE1394_CONFIG_ROMS_H */
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index c28f639823d2..d08166bda1c5 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -31,12 +31,13 @@
31/* TODO List: 31/* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size 32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes. 33 * parameter expect size to be in bytes.
34 * - Convenience functions for reading a block of data from a given offset.
35 */ 34 */
36 35
37#ifndef __KERNEL__ 36#include <linux/errno.h>
38#include <string.h> 37#include <linux/kernel.h>
39#endif 38#include <linux/string.h>
39#include <asm/bug.h>
40#include <asm/byteorder.h>
40 41
41#include "csr1212.h" 42#include "csr1212.h"
42 43
@@ -46,7 +47,7 @@
46#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET) 47#define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47#define __D (1 << CSR1212_KV_TYPE_DIRECTORY) 48#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48#define __L (1 << CSR1212_KV_TYPE_LEAF) 49#define __L (1 << CSR1212_KV_TYPE_LEAF)
49static const u_int8_t csr1212_key_id_type_map[0x30] = { 50static const u8 csr1212_key_id_type_map[0x30] = {
50 __C, /* used by Apple iSight */ 51 __C, /* used by Apple iSight */
51 __D | __L, /* Descriptor */ 52 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */ 53 __I | __D | __L, /* Bus_Dependent_Info */
@@ -82,10 +83,10 @@ static const u_int8_t csr1212_key_id_type_map[0x30] = {
82#undef __L 83#undef __L
83 84
84 85
85#define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t)) 86#define quads_to_bytes(_q) ((_q) * sizeof(u32))
86#define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t)) 87#define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
87 88
88static inline void free_keyval(struct csr1212_keyval *kv) 89static void free_keyval(struct csr1212_keyval *kv)
89{ 90{
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) && 91 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)) 92 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
@@ -94,14 +95,14 @@ static inline void free_keyval(struct csr1212_keyval *kv)
94 CSR1212_FREE(kv); 95 CSR1212_FREE(kv);
95} 96}
96 97
97static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length) 98static u16 csr1212_crc16(const u32 *buffer, size_t length)
98{ 99{
99 int shift; 100 int shift;
100 u_int32_t data; 101 u32 data;
101 u_int16_t sum, crc = 0; 102 u16 sum, crc = 0;
102 103
103 for (; length; length--) { 104 for (; length; length--) {
104 data = CSR1212_BE32_TO_CPU(*buffer); 105 data = be32_to_cpu(*buffer);
105 buffer++; 106 buffer++;
106 for (shift = 28; shift >= 0; shift -= 4 ) { 107 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf; 108 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
@@ -110,21 +111,18 @@ static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
110 crc &= 0xffff; 111 crc &= 0xffff;
111 } 112 }
112 113
113 return CSR1212_CPU_TO_BE16(crc); 114 return cpu_to_be16(crc);
114} 115}
115 116
116#if 0 117/* Microsoft computes the CRC with the bytes in reverse order. */
117/* Microsoft computes the CRC with the bytes in reverse order. Therefore we 118static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
118 * have a special version of the CRC algorithm to account for their buggy
119 * software. */
120static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
121{ 119{
122 int shift; 120 int shift;
123 u_int32_t data; 121 u32 data;
124 u_int16_t sum, crc = 0; 122 u16 sum, crc = 0;
125 123
126 for (; length; length--) { 124 for (; length; length--) {
127 data = CSR1212_LE32_TO_CPU(*buffer); 125 data = le32_to_cpu(*buffer);
128 buffer++; 126 buffer++;
129 for (shift = 28; shift >= 0; shift -= 4 ) { 127 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf; 128 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
@@ -133,38 +131,35 @@ static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
133 crc &= 0xffff; 131 crc &= 0xffff;
134 } 132 }
135 133
136 return CSR1212_CPU_TO_BE16(crc); 134 return cpu_to_be16(crc);
137} 135}
138#endif
139 136
140static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir, 137static struct csr1212_dentry *
141 struct csr1212_keyval *kv) 138csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
142{ 139{
143 struct csr1212_dentry *pos; 140 struct csr1212_dentry *pos;
144 141
145 for (pos = dir->value.directory.dentries_head; 142 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next) { 143 pos != NULL; pos = pos->next)
147 if (pos->kv == kv) 144 if (pos->kv == kv)
148 return pos; 145 return pos;
149 }
150 return NULL; 146 return NULL;
151} 147}
152 148
153 149static struct csr1212_keyval *
154static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, 150csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
155 u_int32_t offset)
156{ 151{
157 struct csr1212_keyval *kv; 152 struct csr1212_keyval *kv;
158 153
159 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) { 154 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
160 if (kv->offset == offset) 155 if (kv->offset == offset)
161 return kv; 156 return kv;
162 }
163 return NULL; 157 return NULL;
164} 158}
165 159
166 160
167/* Creation Routines */ 161/* Creation Routines */
162
168struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops, 163struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
169 size_t bus_info_size, void *private) 164 size_t bus_info_size, void *private)
170{ 165{
@@ -202,27 +197,17 @@ struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
202 return csr; 197 return csr;
203} 198}
204 199
205
206
207void csr1212_init_local_csr(struct csr1212_csr *csr, 200void csr1212_init_local_csr(struct csr1212_csr *csr,
208 const u_int32_t *bus_info_data, int max_rom) 201 const u32 *bus_info_data, int max_rom)
209{ 202{
210 static const int mr_map[] = { 4, 64, 1024, 0 }; 203 static const int mr_map[] = { 4, 64, 1024, 0 };
211 204
212#ifdef __KERNEL__
213 BUG_ON(max_rom & ~0x3); 205 BUG_ON(max_rom & ~0x3);
214 csr->max_rom = mr_map[max_rom]; 206 csr->max_rom = mr_map[max_rom];
215#else
216 if (max_rom & ~0x3) /* caller supplied invalid argument */
217 csr->max_rom = 0;
218 else
219 csr->max_rom = mr_map[max_rom];
220#endif
221 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len); 207 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
222} 208}
223 209
224 210static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
225static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
226{ 211{
227 struct csr1212_keyval *kv; 212 struct csr1212_keyval *kv;
228 213
@@ -246,10 +231,11 @@ static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
246 return kv; 231 return kv;
247} 232}
248 233
249struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value) 234struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
250{ 235{
251 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key); 236 struct csr1212_keyval *kv;
252 237
238 kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
253 if (!kv) 239 if (!kv)
254 return NULL; 240 return NULL;
255 241
@@ -258,10 +244,12 @@ struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
258 return kv; 244 return kv;
259} 245}
260 246
261struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len) 247static struct csr1212_keyval *
248csr1212_new_leaf(u8 key, const void *data, size_t data_len)
262{ 249{
263 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key); 250 struct csr1212_keyval *kv;
264 251
252 kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
265 if (!kv) 253 if (!kv)
266 return NULL; 254 return NULL;
267 255
@@ -285,10 +273,12 @@ struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t d
285 return kv; 273 return kv;
286} 274}
287 275
288struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset) 276static struct csr1212_keyval *
277csr1212_new_csr_offset(u8 key, u32 csr_offset)
289{ 278{
290 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key); 279 struct csr1212_keyval *kv;
291 280
281 kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
292 if (!kv) 282 if (!kv)
293 return NULL; 283 return NULL;
294 284
@@ -299,10 +289,11 @@ struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset
299 return kv; 289 return kv;
300} 290}
301 291
302struct csr1212_keyval *csr1212_new_directory(u_int8_t key) 292struct csr1212_keyval *csr1212_new_directory(u8 key)
303{ 293{
304 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key); 294 struct csr1212_keyval *kv;
305 295
296 kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
306 if (!kv) 297 if (!kv)
307 return NULL; 298 return NULL;
308 299
@@ -314,43 +305,29 @@ struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
314 return kv; 305 return kv;
315} 306}
316 307
317int csr1212_associate_keyval(struct csr1212_keyval *kv, 308void csr1212_associate_keyval(struct csr1212_keyval *kv,
318 struct csr1212_keyval *associate) 309 struct csr1212_keyval *associate)
319{ 310{
320 if (!kv || !associate) 311 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
321 return CSR1212_EINVAL; 312 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
322 313 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
323 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR || 314 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
324 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR && 315 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
325 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO && 316 associate->key.id < 0x30) ||
326 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY && 317 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
327 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA && 318 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
328 associate->key.id < 0x30)) 319 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
329 return CSR1212_EINVAL; 320 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
330 321 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
331 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID && 322 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
332 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) 323 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
333 return CSR1212_EINVAL; 324 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
334
335 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
336 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
337 return CSR1212_EINVAL;
338
339 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
340 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
341 return CSR1212_EINVAL;
342
343 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
344 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
345 return CSR1212_EINVAL;
346 325
347 if (kv->associate) 326 if (kv->associate)
348 csr1212_release_keyval(kv->associate); 327 csr1212_release_keyval(kv->associate);
349 328
350 associate->refcnt++; 329 associate->refcnt++;
351 kv->associate = associate; 330 kv->associate = associate;
352
353 return CSR1212_SUCCESS;
354} 331}
355 332
356int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, 333int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
@@ -358,12 +335,11 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
358{ 335{
359 struct csr1212_dentry *dentry; 336 struct csr1212_dentry *dentry;
360 337
361 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY) 338 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
362 return CSR1212_EINVAL;
363 339
364 dentry = CSR1212_MALLOC(sizeof(*dentry)); 340 dentry = CSR1212_MALLOC(sizeof(*dentry));
365 if (!dentry) 341 if (!dentry)
366 return CSR1212_ENOMEM; 342 return -ENOMEM;
367 343
368 dentry->kv = kv; 344 dentry->kv = kv;
369 345
@@ -382,66 +358,22 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
382 return CSR1212_SUCCESS; 358 return CSR1212_SUCCESS;
383} 359}
384 360
385struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key, 361#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
386 u_int32_t value) 362 (&((kv)->value.leaf.data[1]))
387{ 363
388 struct csr1212_keyval *kvs, *kvk, *kvv; 364#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
389 365 ((kv)->value.leaf.data[0] = \
390 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec); 366 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
391 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key); 367 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
392 kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value); 368#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
393 369 ((kv)->value.leaf.data[0] = \
394 if (!kvs || !kvk || !kvv) { 370 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
395 if (kvs) 371 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
396 free_keyval(kvs); 372 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
397 if (kvk) 373
398 free_keyval(kvk); 374static struct csr1212_keyval *
399 if (kvv) 375csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
400 free_keyval(kvv); 376 const void *data, size_t data_len)
401 return NULL;
402 }
403
404 /* Don't keep a local reference to the extended key or value. */
405 kvk->refcnt = 0;
406 kvv->refcnt = 0;
407
408 csr1212_associate_keyval(kvk, kvv);
409 csr1212_associate_keyval(kvs, kvk);
410
411 return kvs;
412}
413
414struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
415 const void *data, size_t data_len)
416{
417 struct csr1212_keyval *kvs, *kvk, *kvv;
418
419 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
420 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
421 kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
422
423 if (!kvs || !kvk || !kvv) {
424 if (kvs)
425 free_keyval(kvs);
426 if (kvk)
427 free_keyval(kvk);
428 if (kvv)
429 free_keyval(kvv);
430 return NULL;
431 }
432
433 /* Don't keep a local reference to the extended key or value. */
434 kvk->refcnt = 0;
435 kvv->refcnt = 0;
436
437 csr1212_associate_keyval(kvk, kvv);
438 csr1212_associate_keyval(kvs, kvk);
439
440 return kvs;
441}
442
443struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
444 const void *data, size_t data_len)
445{ 377{
446 struct csr1212_keyval *kv; 378 struct csr1212_keyval *kv;
447 379
@@ -453,197 +385,72 @@ struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t spe
453 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype); 385 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
454 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id); 386 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
455 387
456 if (data) { 388 if (data)
457 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len); 389 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
458 }
459
460 return kv;
461}
462
463
464struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
465 u_int16_t cset,
466 u_int16_t language,
467 const void *data,
468 size_t data_len)
469{
470 struct csr1212_keyval *kv;
471 char *lstr;
472
473 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
474 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
475 if (!kv)
476 return NULL;
477
478 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
479 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
480 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
481
482 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
483
484 /* make sure last quadlet is zeroed out */
485 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
486
487 /* don't copy the NUL terminator */
488 memcpy(lstr, data, data_len);
489 390
490 return kv; 391 return kv;
491} 392}
492 393
394/* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
493static int csr1212_check_minimal_ascii(const char *s) 395static int csr1212_check_minimal_ascii(const char *s)
494{ 396{
495 static const char minimal_ascii_table[] = { 397 static const char minimal_ascii_table[] = {
496 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 398 /* 1 2 4 8 16 32 64 128 */
497 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00, 399 128, /* --, --, --, --, --, --, --, 07, */
498 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 400 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
499 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 401 0, /* --, --, --, --, --, --, --, --, */
500 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27, 402 0, /* --, --, --, --, --, --, --, --, */
501 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 403 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
502 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 404 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
503 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 405 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
504 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 406 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
505 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 407 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
506 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 408 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
507 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f, 409 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
508 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 410 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
509 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 411 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
510 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 412 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
511 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00, 413 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
414 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
512 }; 415 };
416 int i, j;
417
513 for (; *s; s++) { 418 for (; *s; s++) {
514 if (minimal_ascii_table[*s & 0x7F] != *s) 419 i = *s >> 3; /* i = *s / 8; */
515 return -1; /* failed */ 420 j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
421
422 if (i >= ARRAY_SIZE(minimal_ascii_table) ||
423 !(minimal_ascii_table[i] & j))
424 return -EINVAL;
516 } 425 }
517 /* String conforms to minimal-ascii, as specified by IEEE 1212,
518 * par. 7.4 */
519 return 0; 426 return 0;
520} 427}
521 428
429/* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
522struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s) 430struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
523{ 431{
524 /* Check if string conform to minimal_ascii format */
525 if (csr1212_check_minimal_ascii(s))
526 return NULL;
527
528 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
529 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
530}
531
532struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
533 u_int8_t palette_depth,
534 u_int8_t color_space,
535 u_int16_t language,
536 u_int16_t hscan,
537 u_int16_t vscan,
538 u_int32_t *palette,
539 u_int32_t *pixels)
540{
541 static const int pd[4] = { 0, 4, 16, 256 };
542 static const int cs[16] = { 4, 2 };
543 struct csr1212_keyval *kv; 432 struct csr1212_keyval *kv;
544 int palette_size; 433 u32 *text;
545 int pixel_size = (hscan * vscan + 3) & ~0x3; 434 size_t str_len, quads;
546 435
547 if (!pixels || (!palette && palette_depth) || 436 if (!s || !*s || csr1212_check_minimal_ascii(s))
548 (palette_depth & ~0x3) || (color_space & ~0xf))
549 return NULL; 437 return NULL;
550 438
551 palette_size = pd[palette_depth] * cs[color_space]; 439 str_len = strlen(s);
552 440 quads = bytes_to_quads(str_len);
553 kv = csr1212_new_descriptor_leaf(1, 0, NULL, 441 kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
554 palette_size + pixel_size + 442 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
555 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
556 if (!kv) 443 if (!kv)
557 return NULL; 444 return NULL;
558 445
559 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version); 446 kv->value.leaf.data[1] = 0; /* width, character_set, language */
560 CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth); 447 text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
561 CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space); 448 text[quads - 1] = 0; /* padding */
562 CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language); 449 memcpy(text, s, str_len);
563 CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
564 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
565
566 if (palette_size)
567 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
568 palette_size);
569
570 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
571
572 return kv;
573}
574
575struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
576 u_int64_t address)
577{
578 struct csr1212_keyval *kv;
579
580 /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
581 kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
582 if(!kv)
583 return NULL;
584
585 CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
586 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
587 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
588 450
589 return kv; 451 return kv;
590} 452}
591 453
592static int csr1212_check_keyword(const char *s)
593{
594 for (; *s; s++) {
595
596 if (('A' <= *s) && (*s <= 'Z'))
597 continue;
598 if (('0' <= *s) && (*s <= '9'))
599 continue;
600 if (*s == '-')
601 continue;
602
603 return -1; /* failed */
604 }
605 /* String conforms to keyword, as specified by IEEE 1212,
606 * par. 7.6.5 */
607 return CSR1212_SUCCESS;
608}
609
610struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
611{
612 struct csr1212_keyval *kv;
613 char *buffer;
614 int i, data_len = 0;
615
616 /* Check all keywords to see if they conform to restrictions:
617 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
618 * Each word is zero-terminated.
619 * Also calculate the total length of the keywords.
620 */
621 for (i = 0; i < strc; i++) {
622 if (!strv[i] || csr1212_check_keyword(strv[i])) {
623 return NULL;
624 }
625 data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
626 }
627
628 /* IEEE 1212, par. 7.6.5 Keyword leaves */
629 kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
630 if (!kv)
631 return NULL;
632
633 buffer = (char *)kv->value.leaf.data;
634
635 /* make sure last quadlet is zeroed out */
636 *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
637
638 /* Copy keyword(s) into leaf data buffer */
639 for (i = 0; i < strc; i++) {
640 int len = strlen(strv[i]) + 1;
641 memcpy(buffer, strv[i], len);
642 buffer += len;
643 }
644 return kv;
645}
646
647 454
648/* Destruction Routines */ 455/* Destruction Routines */
649 456
@@ -674,23 +481,12 @@ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
674 csr1212_release_keyval(kv); 481 csr1212_release_keyval(kv);
675} 482}
676 483
677
678void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
679{
680 if (kv->associate) {
681 csr1212_release_keyval(kv->associate);
682 }
683
684 kv->associate = NULL;
685}
686
687
688/* This function is used to free the memory taken by a keyval. If the given 484/* This function is used to free the memory taken by a keyval. If the given
689 * keyval is a directory type, then any keyvals contained in that directory 485 * keyval is a directory type, then any keyvals contained in that directory
690 * will be destroyed as well if their respective refcnts are 0. By means of 486 * will be destroyed as well if their respective refcnts are 0. By means of
691 * list manipulation, this routine will descend a directory structure in a 487 * list manipulation, this routine will descend a directory structure in a
692 * non-recursive manner. */ 488 * non-recursive manner. */
693void _csr1212_destroy_keyval(struct csr1212_keyval *kv) 489static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
694{ 490{
695 struct csr1212_keyval *k, *a; 491 struct csr1212_keyval *k, *a;
696 struct csr1212_dentry dentry; 492 struct csr1212_dentry dentry;
@@ -715,11 +511,13 @@ void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
715 a = k->associate; 511 a = k->associate;
716 512
717 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) { 513 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
718 /* If the current entry is a directory, then move all 514 /* If the current entry is a directory, move all
719 * the entries to the destruction list. */ 515 * the entries to the destruction list. */
720 if (k->value.directory.dentries_head) { 516 if (k->value.directory.dentries_head) {
721 tail->next = k->value.directory.dentries_head; 517 tail->next =
722 k->value.directory.dentries_head->prev = tail; 518 k->value.directory.dentries_head;
519 k->value.directory.dentries_head->prev =
520 tail;
723 tail = k->value.directory.dentries_tail; 521 tail = k->value.directory.dentries_tail;
724 } 522 }
725 } 523 }
@@ -729,15 +527,22 @@ void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
729 527
730 head = head->next; 528 head = head->next;
731 if (head) { 529 if (head) {
732 if (head->prev && head->prev != &dentry) { 530 if (head->prev && head->prev != &dentry)
733 CSR1212_FREE(head->prev); 531 CSR1212_FREE(head->prev);
734 }
735 head->prev = NULL; 532 head->prev = NULL;
736 } else if (tail != &dentry) 533 } else if (tail != &dentry) {
737 CSR1212_FREE(tail); 534 CSR1212_FREE(tail);
535 }
738 } 536 }
739} 537}
740 538
539void csr1212_release_keyval(struct csr1212_keyval *kv)
540{
541 if (kv->refcnt > 1)
542 kv->refcnt--;
543 else
544 csr1212_destroy_keyval(kv);
545}
741 546
742void csr1212_destroy_csr(struct csr1212_csr *csr) 547void csr1212_destroy_csr(struct csr1212_csr *csr)
743{ 548{
@@ -763,49 +568,51 @@ void csr1212_destroy_csr(struct csr1212_csr *csr)
763} 568}
764 569
765 570
766
767/* CSR Image Creation */ 571/* CSR Image Creation */
768 572
769static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize) 573static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
770{ 574{
771 struct csr1212_csr_rom_cache *cache; 575 struct csr1212_csr_rom_cache *cache;
772 u_int64_t csr_addr; 576 u64 csr_addr;
773 577
774 if (!csr || !csr->ops || !csr->ops->allocate_addr_range || 578 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
775 !csr->ops->release_addr || csr->max_rom < 1) 579 !csr->ops->release_addr || csr->max_rom < 1);
776 return CSR1212_EINVAL;
777 580
778 /* ROM size must be a multiple of csr->max_rom */ 581 /* ROM size must be a multiple of csr->max_rom */
779 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1); 582 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
780 583
781 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private); 584 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
782 if (csr_addr == CSR1212_INVALID_ADDR_SPACE) { 585 csr->private);
783 return CSR1212_ENOMEM; 586 if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
784 } 587 return -ENOMEM;
588
785 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) { 589 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
786 /* Invalid address returned from allocate_addr_range(). */ 590 /* Invalid address returned from allocate_addr_range(). */
787 csr->ops->release_addr(csr_addr, csr->private); 591 csr->ops->release_addr(csr_addr, csr->private);
788 return CSR1212_ENOMEM; 592 return -ENOMEM;
789 } 593 }
790 594
791 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize); 595 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
596 romsize);
792 if (!cache) { 597 if (!cache) {
793 csr->ops->release_addr(csr_addr, csr->private); 598 csr->ops->release_addr(csr_addr, csr->private);
794 return CSR1212_ENOMEM; 599 return -ENOMEM;
795 } 600 }
796 601
797 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM); 602 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
603 CSR1212_KV_ID_EXTENDED_ROM);
798 if (!cache->ext_rom) { 604 if (!cache->ext_rom) {
799 csr->ops->release_addr(csr_addr, csr->private); 605 csr->ops->release_addr(csr_addr, csr->private);
800 CSR1212_FREE(cache); 606 CSR1212_FREE(cache);
801 return CSR1212_ENOMEM; 607 return -ENOMEM;
802 } 608 }
803 609
804 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) { 610 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
611 CSR1212_SUCCESS) {
805 csr1212_release_keyval(cache->ext_rom); 612 csr1212_release_keyval(cache->ext_rom);
806 csr->ops->release_addr(csr_addr, csr->private); 613 csr->ops->release_addr(csr_addr, csr->private);
807 CSR1212_FREE(cache); 614 CSR1212_FREE(cache);
808 return CSR1212_ENOMEM; 615 return -ENOMEM;
809 } 616 }
810 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE; 617 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
811 cache->ext_rom->value.leaf.len = -1; 618 cache->ext_rom->value.leaf.len = -1;
@@ -818,8 +625,8 @@ static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
818 return CSR1212_SUCCESS; 625 return CSR1212_SUCCESS;
819} 626}
820 627
821static inline void csr1212_remove_cache(struct csr1212_csr *csr, 628static void csr1212_remove_cache(struct csr1212_csr *csr,
822 struct csr1212_csr_rom_cache *cache) 629 struct csr1212_csr_rom_cache *cache)
823{ 630{
824 if (csr->cache_head == cache) 631 if (csr->cache_head == cache)
825 csr->cache_head = cache->next; 632 csr->cache_head = cache->next;
@@ -832,7 +639,8 @@ static inline void csr1212_remove_cache(struct csr1212_csr *csr,
832 cache->next->prev = cache->prev; 639 cache->next->prev = cache->prev;
833 640
834 if (cache->ext_rom) { 641 if (cache->ext_rom) {
835 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom); 642 csr1212_detach_keyval_from_directory(csr->root_kv,
643 cache->ext_rom);
836 csr1212_release_keyval(cache->ext_rom); 644 csr1212_release_keyval(cache->ext_rom);
837 } 645 }
838 646
@@ -852,28 +660,29 @@ static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
852 dentry = dentry->next) { 660 dentry = dentry->next) {
853 for (dkv = dentry->kv; dkv; dkv = dkv->associate) { 661 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
854 /* Special Case: Extended Key Specifier_ID */ 662 /* Special Case: Extended Key Specifier_ID */
855 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) { 663 if (dkv->key.id ==
856 if (last_extkey_spec == NULL) { 664 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
665 if (last_extkey_spec == NULL)
857 last_extkey_spec = dkv; 666 last_extkey_spec = dkv;
858 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) { 667 else if (dkv->value.immediate !=
668 last_extkey_spec->value.immediate)
859 last_extkey_spec = dkv; 669 last_extkey_spec = dkv;
860 } else { 670 else
861 continue; 671 continue;
862 }
863 /* Special Case: Extended Key */ 672 /* Special Case: Extended Key */
864 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) { 673 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
865 if (last_extkey == NULL) { 674 if (last_extkey == NULL)
866 last_extkey = dkv; 675 last_extkey = dkv;
867 } else if (dkv->value.immediate != last_extkey->value.immediate) { 676 else if (dkv->value.immediate !=
677 last_extkey->value.immediate)
868 last_extkey = dkv; 678 last_extkey = dkv;
869 } else { 679 else
870 continue; 680 continue;
871 }
872 } 681 }
873 682
874 num_entries += 1; 683 num_entries += 1;
875 684
876 switch(dkv->key.type) { 685 switch (dkv->key.type) {
877 default: 686 default:
878 case CSR1212_KV_TYPE_IMMEDIATE: 687 case CSR1212_KV_TYPE_IMMEDIATE:
879 case CSR1212_KV_TYPE_CSR_OFFSET: 688 case CSR1212_KV_TYPE_CSR_OFFSET:
@@ -891,8 +700,9 @@ static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
891 /* Special case: Extended ROM leafs */ 700 /* Special case: Extended ROM leafs */
892 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) { 701 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
893 dkv->value.leaf.len = -1; 702 dkv->value.leaf.len = -1;
894 /* Don't add Extended ROM leafs in the layout list, 703 /* Don't add Extended ROM leafs in the
895 * they are handled differently. */ 704 * layout list, they are handled
705 * differently. */
896 break; 706 break;
897 } 707 }
898 708
@@ -908,20 +718,21 @@ static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
908 return num_entries; 718 return num_entries;
909} 719}
910 720
911size_t csr1212_generate_layout_order(struct csr1212_keyval *kv) 721static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
912{ 722{
913 struct csr1212_keyval *ltail = kv; 723 struct csr1212_keyval *ltail = kv;
914 size_t agg_size = 0; 724 size_t agg_size = 0;
915 725
916 while(kv) { 726 while (kv) {
917 switch(kv->key.type) { 727 switch (kv->key.type) {
918 case CSR1212_KV_TYPE_LEAF: 728 case CSR1212_KV_TYPE_LEAF:
919 /* Add 1 quadlet for crc/len field */ 729 /* Add 1 quadlet for crc/len field */
920 agg_size += kv->value.leaf.len + 1; 730 agg_size += kv->value.leaf.len + 1;
921 break; 731 break;
922 732
923 case CSR1212_KV_TYPE_DIRECTORY: 733 case CSR1212_KV_TYPE_DIRECTORY:
924 kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail); 734 kv->value.directory.len =
735 csr1212_generate_layout_subdir(kv, &ltail);
925 /* Add 1 quadlet for crc/len field */ 736 /* Add 1 quadlet for crc/len field */
926 agg_size += kv->value.directory.len + 1; 737 agg_size += kv->value.directory.len + 1;
927 break; 738 break;
@@ -931,9 +742,9 @@ size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
931 return quads_to_bytes(agg_size); 742 return quads_to_bytes(agg_size);
932} 743}
933 744
934struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache, 745static struct csr1212_keyval *
935 struct csr1212_keyval *start_kv, 746csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
936 int start_pos) 747 struct csr1212_keyval *start_kv, int start_pos)
937{ 748{
938 struct csr1212_keyval *kv = start_kv; 749 struct csr1212_keyval *kv = start_kv;
939 struct csr1212_keyval *okv = start_kv; 750 struct csr1212_keyval *okv = start_kv;
@@ -942,13 +753,12 @@ struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *
942 753
943 cache->layout_head = kv; 754 cache->layout_head = kv;
944 755
945 while(kv && pos < cache->size) { 756 while (kv && pos < cache->size) {
946 /* Special case: Extended ROM leafs */ 757 /* Special case: Extended ROM leafs */
947 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) { 758 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
948 kv->offset = cache->offset + pos; 759 kv->offset = cache->offset + pos;
949 }
950 760
951 switch(kv->key.type) { 761 switch (kv->key.type) {
952 case CSR1212_KV_TYPE_LEAF: 762 case CSR1212_KV_TYPE_LEAF:
953 kv_len = kv->value.leaf.len; 763 kv_len = kv->value.leaf.len;
954 break; 764 break;
@@ -959,6 +769,7 @@ struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *
959 769
960 default: 770 default:
961 /* Should never get here */ 771 /* Should never get here */
772 WARN_ON(1);
962 break; 773 break;
963 } 774 }
964 775
@@ -972,46 +783,55 @@ struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *
972 } 783 }
973 784
974 cache->layout_tail = okv; 785 cache->layout_tail = okv;
975 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1); 786 cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
976 787
977 return kv; 788 return kv;
978} 789}
979 790
980static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir, 791#define CSR1212_KV_KEY_SHIFT 24
981 u_int32_t *data_buffer) 792#define CSR1212_KV_KEY_TYPE_SHIFT 6
793#define CSR1212_KV_KEY_ID_MASK 0x3f
794#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
795
796static void
797csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
982{ 798{
983 struct csr1212_dentry *dentry; 799 struct csr1212_dentry *dentry;
984 struct csr1212_keyval *last_extkey_spec = NULL; 800 struct csr1212_keyval *last_extkey_spec = NULL;
985 struct csr1212_keyval *last_extkey = NULL; 801 struct csr1212_keyval *last_extkey = NULL;
986 int index = 0; 802 int index = 0;
987 803
988 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) { 804 for (dentry = dir->value.directory.dentries_head;
805 dentry;
806 dentry = dentry->next) {
989 struct csr1212_keyval *a; 807 struct csr1212_keyval *a;
990 808
991 for (a = dentry->kv; a; a = a->associate) { 809 for (a = dentry->kv; a; a = a->associate) {
992 u_int32_t value = 0; 810 u32 value = 0;
993 811
994 /* Special Case: Extended Key Specifier_ID */ 812 /* Special Case: Extended Key Specifier_ID */
995 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) { 813 if (a->key.id ==
996 if (last_extkey_spec == NULL) { 814 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
815 if (last_extkey_spec == NULL)
997 last_extkey_spec = a; 816 last_extkey_spec = a;
998 } else if (a->value.immediate != last_extkey_spec->value.immediate) { 817 else if (a->value.immediate !=
818 last_extkey_spec->value.immediate)
999 last_extkey_spec = a; 819 last_extkey_spec = a;
1000 } else { 820 else
1001 continue; 821 continue;
1002 } 822
1003 /* Special Case: Extended Key */ 823 /* Special Case: Extended Key */
1004 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) { 824 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
1005 if (last_extkey == NULL) { 825 if (last_extkey == NULL)
1006 last_extkey = a; 826 last_extkey = a;
1007 } else if (a->value.immediate != last_extkey->value.immediate) { 827 else if (a->value.immediate !=
828 last_extkey->value.immediate)
1008 last_extkey = a; 829 last_extkey = a;
1009 } else { 830 else
1010 continue; 831 continue;
1011 }
1012 } 832 }
1013 833
1014 switch(a->key.type) { 834 switch (a->key.type) {
1015 case CSR1212_KV_TYPE_IMMEDIATE: 835 case CSR1212_KV_TYPE_IMMEDIATE:
1016 value = a->value.immediate; 836 value = a->value.immediate;
1017 break; 837 break;
@@ -1030,32 +850,46 @@ static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
1030 break; 850 break;
1031 default: 851 default:
1032 /* Should never get here */ 852 /* Should never get here */
1033 break; /* GDB breakpoint */ 853 WARN_ON(1);
854 break;
1034 } 855 }
1035 856
1036 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT; 857 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
858 CSR1212_KV_KEY_SHIFT;
1037 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) << 859 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
1038 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT); 860 (CSR1212_KV_KEY_SHIFT +
1039 data_buffer[index] = CSR1212_CPU_TO_BE32(value); 861 CSR1212_KV_KEY_TYPE_SHIFT);
862 data_buffer[index] = cpu_to_be32(value);
1040 index++; 863 index++;
1041 } 864 }
1042 } 865 }
1043} 866}
1044 867
1045void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache) 868struct csr1212_keyval_img {
869 u16 length;
870 u16 crc;
871
872 /* Must be last */
873 u32 data[0]; /* older gcc can't handle [] which is standard */
874};
875
876static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1046{ 877{
1047 struct csr1212_keyval *kv, *nkv; 878 struct csr1212_keyval *kv, *nkv;
1048 struct csr1212_keyval_img *kvi; 879 struct csr1212_keyval_img *kvi;
1049 880
1050 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) { 881 for (kv = cache->layout_head;
1051 kvi = (struct csr1212_keyval_img *) 882 kv != cache->layout_tail->next;
1052 (cache->data + bytes_to_quads(kv->offset - cache->offset)); 883 kv = nkv) {
1053 switch(kv->key.type) { 884 kvi = (struct csr1212_keyval_img *)(cache->data +
885 bytes_to_quads(kv->offset - cache->offset));
886 switch (kv->key.type) {
1054 default: 887 default:
1055 case CSR1212_KV_TYPE_IMMEDIATE: 888 case CSR1212_KV_TYPE_IMMEDIATE:
1056 case CSR1212_KV_TYPE_CSR_OFFSET: 889 case CSR1212_KV_TYPE_CSR_OFFSET:
1057 /* Should never get here */ 890 /* Should never get here */
1058 break; /* GDB breakpoint */ 891 WARN_ON(1);
892 break;
1059 893
1060 case CSR1212_KV_TYPE_LEAF: 894 case CSR1212_KV_TYPE_LEAF:
1061 /* Don't copy over Extended ROM areas, they are 895 /* Don't copy over Extended ROM areas, they are
@@ -1064,15 +898,16 @@ void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1064 memcpy(kvi->data, kv->value.leaf.data, 898 memcpy(kvi->data, kv->value.leaf.data,
1065 quads_to_bytes(kv->value.leaf.len)); 899 quads_to_bytes(kv->value.leaf.len));
1066 900
1067 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len); 901 kvi->length = cpu_to_be16(kv->value.leaf.len);
1068 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len); 902 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
1069 break; 903 break;
1070 904
1071 case CSR1212_KV_TYPE_DIRECTORY: 905 case CSR1212_KV_TYPE_DIRECTORY:
1072 csr1212_generate_tree_subdir(kv, kvi->data); 906 csr1212_generate_tree_subdir(kv, kvi->data);
1073 907
1074 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len); 908 kvi->length = cpu_to_be16(kv->value.directory.len);
1075 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len); 909 kvi->crc = csr1212_crc16(kvi->data,
910 kv->value.directory.len);
1076 break; 911 break;
1077 } 912 }
1078 913
@@ -1086,6 +921,10 @@ void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1086 } 921 }
1087} 922}
1088 923
924/* This size is arbitrarily chosen.
925 * The struct overhead is subtracted for more economic allocations. */
926#define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
927
1089int csr1212_generate_csr_image(struct csr1212_csr *csr) 928int csr1212_generate_csr_image(struct csr1212_csr *csr)
1090{ 929{
1091 struct csr1212_bus_info_block_img *bi; 930 struct csr1212_bus_info_block_img *bi;
@@ -1095,8 +934,7 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
1095 int ret; 934 int ret;
1096 int init_offset; 935 int init_offset;
1097 936
1098 if (!csr) 937 BUG_ON(!csr);
1099 return CSR1212_EINVAL;
1100 938
1101 cache = csr->cache_head; 939 cache = csr->cache_head;
1102 940
@@ -1113,18 +951,21 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
1113 951
1114 init_offset = csr->bus_info_len; 952 init_offset = csr->bus_info_len;
1115 953
1116 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) { 954 for (kv = csr->root_kv, cache = csr->cache_head;
955 kv;
956 cache = cache->next) {
1117 if (!cache) { 957 if (!cache) {
1118 /* Estimate approximate number of additional cache 958 /* Estimate approximate number of additional cache
1119 * regions needed (it assumes that the cache holding 959 * regions needed (it assumes that the cache holding
1120 * the first 1K Config ROM space always exists). */ 960 * the first 1K Config ROM space always exists). */
1121 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE - 961 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
1122 (2 * sizeof(u_int32_t))) + 1; 962 (2 * sizeof(u32))) + 1;
1123 963
1124 /* Add additional cache regions, extras will be 964 /* Add additional cache regions, extras will be
1125 * removed later */ 965 * removed later */
1126 for (; est_c; est_c--) { 966 for (; est_c; est_c--) {
1127 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE); 967 ret = csr1212_append_new_cache(csr,
968 CSR1212_EXTENDED_ROM_SIZE);
1128 if (ret != CSR1212_SUCCESS) 969 if (ret != CSR1212_SUCCESS)
1129 return ret; 970 return ret;
1130 } 971 }
@@ -1136,7 +977,7 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
1136 } 977 }
1137 kv = csr1212_generate_positions(cache, kv, init_offset); 978 kv = csr1212_generate_positions(cache, kv, init_offset);
1138 agg_size -= cache->len; 979 agg_size -= cache->len;
1139 init_offset = sizeof(u_int32_t); 980 init_offset = sizeof(u32);
1140 } 981 }
1141 982
1142 /* Remove unused, excess cache regions */ 983 /* Remove unused, excess cache regions */
@@ -1149,15 +990,14 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
1149 990
1150 /* Go through the list backward so that when done, the correct CRC 991 /* Go through the list backward so that when done, the correct CRC
1151 * will be calculated for the Extended ROM areas. */ 992 * will be calculated for the Extended ROM areas. */
1152 for(cache = csr->cache_tail; cache; cache = cache->prev) { 993 for (cache = csr->cache_tail; cache; cache = cache->prev) {
1153 /* Only Extended ROM caches should have this set. */ 994 /* Only Extended ROM caches should have this set. */
1154 if (cache->ext_rom) { 995 if (cache->ext_rom) {
1155 int leaf_size; 996 int leaf_size;
1156 997
1157 /* Make sure the Extended ROM leaf is a multiple of 998 /* Make sure the Extended ROM leaf is a multiple of
1158 * max_rom in size. */ 999 * max_rom in size. */
1159 if (csr->max_rom < 1) 1000 BUG_ON(csr->max_rom < 1);
1160 return CSR1212_EINVAL;
1161 leaf_size = (cache->len + (csr->max_rom - 1)) & 1001 leaf_size = (cache->len + (csr->max_rom - 1)) &
1162 ~(csr->max_rom - 1); 1002 ~(csr->max_rom - 1);
1163 1003
@@ -1166,7 +1006,7 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
1166 leaf_size - cache->len); 1006 leaf_size - cache->len);
1167 1007
1168 /* Subtract leaf header */ 1008 /* Subtract leaf header */
1169 leaf_size -= sizeof(u_int32_t); 1009 leaf_size -= sizeof(u32);
1170 1010
1171 /* Update the Extended ROM leaf length */ 1011 /* Update the Extended ROM leaf length */
1172 cache->ext_rom->value.leaf.len = 1012 cache->ext_rom->value.leaf.len =
@@ -1184,33 +1024,31 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
1184 /* Set the length and CRC of the extended ROM. */ 1024 /* Set the length and CRC of the extended ROM. */
1185 struct csr1212_keyval_img *kvi = 1025 struct csr1212_keyval_img *kvi =
1186 (struct csr1212_keyval_img*)cache->data; 1026 (struct csr1212_keyval_img*)cache->data;
1027 u16 len = bytes_to_quads(cache->len) - 1;
1187 1028
1188 kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1); 1029 kvi->length = cpu_to_be16(len);
1189 kvi->crc = csr1212_crc16(kvi->data, 1030 kvi->crc = csr1212_crc16(kvi->data, len);
1190 bytes_to_quads(cache->len) - 1);
1191
1192 } 1031 }
1193 } 1032 }
1194 1033
1195 return CSR1212_SUCCESS; 1034 return CSR1212_SUCCESS;
1196} 1035}
1197 1036
1198int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len) 1037int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1199{ 1038{
1200 struct csr1212_csr_rom_cache *cache; 1039 struct csr1212_csr_rom_cache *cache;
1201 1040
1202 for (cache = csr->cache_head; cache; cache = cache->next) { 1041 for (cache = csr->cache_head; cache; cache = cache->next)
1203 if (offset >= cache->offset && 1042 if (offset >= cache->offset &&
1204 (offset + len) <= (cache->offset + cache->size)) { 1043 (offset + len) <= (cache->offset + cache->size)) {
1205 memcpy(buffer, 1044 memcpy(buffer, &cache->data[
1206 &cache->data[bytes_to_quads(offset - cache->offset)], 1045 bytes_to_quads(offset - cache->offset)],
1207 len); 1046 len);
1208 return CSR1212_SUCCESS; 1047 return CSR1212_SUCCESS;
1209 } 1048 }
1210 }
1211 return CSR1212_ENOENT;
1212}
1213 1049
1050 return -ENOENT;
1051}
1214 1052
1215 1053
1216/* Parse a chunk of data as a Config ROM */ 1054/* Parse a chunk of data as a Config ROM */
@@ -1227,46 +1065,43 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1227 * Unfortunately, many IEEE 1394 devices do not abide by that, so the 1065 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1228 * bus info block will be read 1 quadlet at a time. The rest of the 1066 * bus info block will be read 1 quadlet at a time. The rest of the
1229 * ConfigROM will be read according to the max_rom field. */ 1067 * ConfigROM will be read according to the max_rom field. */
1230 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) { 1068 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1231 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i, 1069 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1232 sizeof(csr1212_quad_t), 1070 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1233 &csr->cache_head->data[bytes_to_quads(i)], 1071 csr->private);
1234 csr->private);
1235 if (ret != CSR1212_SUCCESS) 1072 if (ret != CSR1212_SUCCESS)
1236 return ret; 1073 return ret;
1237 1074
1238 /* check ROM header's info_length */ 1075 /* check ROM header's info_length */
1239 if (i == 0 && 1076 if (i == 0 &&
1240 CSR1212_BE32_TO_CPU(csr->cache_head->data[0]) >> 24 != 1077 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1241 bytes_to_quads(csr->bus_info_len) - 1) 1078 bytes_to_quads(csr->bus_info_len) - 1)
1242 return CSR1212_EINVAL; 1079 return -EINVAL;
1243 } 1080 }
1244 1081
1245 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data; 1082 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1246 csr->crc_len = quads_to_bytes(bi->crc_length); 1083 csr->crc_len = quads_to_bytes(bi->crc_length);
1247 1084
1248 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not 1085 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
1249 * always the case, so read the rest of the crc area 1 quadlet at a time. */ 1086 * is not always the case, so read the rest of the crc area 1 quadlet at
1250 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) { 1087 * a time. */
1088 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1251 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i, 1089 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1252 sizeof(csr1212_quad_t), 1090 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1253 &csr->cache_head->data[bytes_to_quads(i)], 1091 csr->private);
1254 csr->private);
1255 if (ret != CSR1212_SUCCESS) 1092 if (ret != CSR1212_SUCCESS)
1256 return ret; 1093 return ret;
1257 } 1094 }
1258 1095
1259#if 0 1096 /* Apparently there are many different wrong implementations of the CRC
1260 /* Apparently there are too many differnt wrong implementations of the 1097 * algorithm. We don't fail, we just warn. */
1261 * CRC algorithm that verifying them is moot. */
1262 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) && 1098 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1263 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc)) 1099 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1264 return CSR1212_EINVAL; 1100 printk(KERN_DEBUG "IEEE 1394 device has ROM CRC error\n");
1265#endif
1266 1101
1267 cr = CSR1212_MALLOC(sizeof(*cr)); 1102 cr = CSR1212_MALLOC(sizeof(*cr));
1268 if (!cr) 1103 if (!cr)
1269 return CSR1212_ENOMEM; 1104 return -ENOMEM;
1270 1105
1271 cr->next = NULL; 1106 cr->next = NULL;
1272 cr->prev = NULL; 1107 cr->prev = NULL;
@@ -1279,21 +1114,26 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1279 return CSR1212_SUCCESS; 1114 return CSR1212_SUCCESS;
1280} 1115}
1281 1116
1282static int csr1212_parse_dir_entry(struct csr1212_keyval *dir, 1117#define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1283 csr1212_quad_t ki, 1118#define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1284 u_int32_t kv_pos) 1119#define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1120#define CSR1212_KV_VAL_MASK 0xffffff
1121#define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1122
1123static int
1124csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1285{ 1125{
1286 int ret = CSR1212_SUCCESS; 1126 int ret = CSR1212_SUCCESS;
1287 struct csr1212_keyval *k = NULL; 1127 struct csr1212_keyval *k = NULL;
1288 u_int32_t offset; 1128 u32 offset;
1289 1129
1290 switch(CSR1212_KV_KEY_TYPE(ki)) { 1130 switch (CSR1212_KV_KEY_TYPE(ki)) {
1291 case CSR1212_KV_TYPE_IMMEDIATE: 1131 case CSR1212_KV_TYPE_IMMEDIATE:
1292 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki), 1132 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1293 CSR1212_KV_VAL(ki)); 1133 CSR1212_KV_VAL(ki));
1294 if (!k) { 1134 if (!k) {
1295 ret = CSR1212_ENOMEM; 1135 ret = -ENOMEM;
1296 goto fail; 1136 goto out;
1297 } 1137 }
1298 1138
1299 k->refcnt = 0; /* Don't keep local reference when parsing. */ 1139 k->refcnt = 0; /* Don't keep local reference when parsing. */
@@ -1303,8 +1143,8 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1303 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki), 1143 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1304 CSR1212_KV_VAL(ki)); 1144 CSR1212_KV_VAL(ki));
1305 if (!k) { 1145 if (!k) {
1306 ret = CSR1212_ENOMEM; 1146 ret = -ENOMEM;
1307 goto fail; 1147 goto out;
1308 } 1148 }
1309 k->refcnt = 0; /* Don't keep local reference when parsing. */ 1149 k->refcnt = 0; /* Don't keep local reference when parsing. */
1310 break; 1150 break;
@@ -1316,8 +1156,8 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1316 /* Uh-oh. Can't have a relative offset of 0 for Leaves 1156 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1317 * or Directories. The Config ROM image is most likely 1157 * or Directories. The Config ROM image is most likely
1318 * messed up, so we'll just abort here. */ 1158 * messed up, so we'll just abort here. */
1319 ret = CSR1212_EIO; 1159 ret = -EIO;
1320 goto fail; 1160 goto out;
1321 } 1161 }
1322 1162
1323 k = csr1212_find_keyval_offset(dir, offset); 1163 k = csr1212_find_keyval_offset(dir, offset);
@@ -1325,14 +1165,14 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1325 if (k) 1165 if (k)
1326 break; /* Found it. */ 1166 break; /* Found it. */
1327 1167
1328 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) { 1168 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
1329 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki)); 1169 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1330 } else { 1170 else
1331 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0); 1171 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1332 } 1172
1333 if (!k) { 1173 if (!k) {
1334 ret = CSR1212_ENOMEM; 1174 ret = -ENOMEM;
1335 goto fail; 1175 goto out;
1336 } 1176 }
1337 k->refcnt = 0; /* Don't keep local reference when parsing. */ 1177 k->refcnt = 0; /* Don't keep local reference when parsing. */
1338 k->valid = 0; /* Contents not read yet so it's not valid. */ 1178 k->valid = 0; /* Contents not read yet so it's not valid. */
@@ -1344,16 +1184,12 @@ static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1344 dir->next = k; 1184 dir->next = k;
1345 } 1185 }
1346 ret = csr1212_attach_keyval_to_directory(dir, k); 1186 ret = csr1212_attach_keyval_to_directory(dir, k);
1347 1187out:
1348fail: 1188 if (ret != CSR1212_SUCCESS && k != NULL)
1349 if (ret != CSR1212_SUCCESS) { 1189 free_keyval(k);
1350 if (k)
1351 free_keyval(k);
1352 }
1353 return ret; 1190 return ret;
1354} 1191}
1355 1192
1356
1357int csr1212_parse_keyval(struct csr1212_keyval *kv, 1193int csr1212_parse_keyval(struct csr1212_keyval *kv,
1358 struct csr1212_csr_rom_cache *cache) 1194 struct csr1212_csr_rom_cache *cache)
1359{ 1195{
@@ -1362,24 +1198,20 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
1362 int ret = CSR1212_SUCCESS; 1198 int ret = CSR1212_SUCCESS;
1363 int kvi_len; 1199 int kvi_len;
1364 1200
1365 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset - 1201 kvi = (struct csr1212_keyval_img*)
1366 cache->offset)]; 1202 &cache->data[bytes_to_quads(kv->offset - cache->offset)];
1367 kvi_len = CSR1212_BE16_TO_CPU(kvi->length); 1203 kvi_len = be16_to_cpu(kvi->length);
1368 1204
1369#if 0 1205 /* Apparently there are many different wrong implementations of the CRC
1370 /* Apparently there are too many differnt wrong implementations of the 1206 * algorithm. We don't fail, we just warn. */
1371 * CRC algorithm that verifying them is moot. */
1372 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) && 1207 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1373 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) { 1208 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc))
1374 ret = CSR1212_EINVAL; 1209 printk(KERN_DEBUG "IEEE 1394 device has ROM CRC error\n");
1375 goto fail;
1376 }
1377#endif
1378 1210
1379 switch(kv->key.type) { 1211 switch (kv->key.type) {
1380 case CSR1212_KV_TYPE_DIRECTORY: 1212 case CSR1212_KV_TYPE_DIRECTORY:
1381 for (i = 0; i < kvi_len; i++) { 1213 for (i = 0; i < kvi_len; i++) {
1382 csr1212_quad_t ki = kvi->data[i]; 1214 u32 ki = kvi->data[i];
1383 1215
1384 /* Some devices put null entries in their unit 1216 /* Some devices put null entries in their unit
1385 * directories. If we come across such an entry, 1217 * directories. If we come across such an entry,
@@ -1387,76 +1219,72 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
1387 if (ki == 0x0) 1219 if (ki == 0x0)
1388 continue; 1220 continue;
1389 ret = csr1212_parse_dir_entry(kv, ki, 1221 ret = csr1212_parse_dir_entry(kv, ki,
1390 (kv->offset + 1222 kv->offset + quads_to_bytes(i + 1));
1391 quads_to_bytes(i + 1)));
1392 } 1223 }
1393 kv->value.directory.len = kvi_len; 1224 kv->value.directory.len = kvi_len;
1394 break; 1225 break;
1395 1226
1396 case CSR1212_KV_TYPE_LEAF: 1227 case CSR1212_KV_TYPE_LEAF:
1397 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) { 1228 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1398 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len)); 1229 size_t size = quads_to_bytes(kvi_len);
1230
1231 kv->value.leaf.data = CSR1212_MALLOC(size);
1399 if (!kv->value.leaf.data) { 1232 if (!kv->value.leaf.data) {
1400 ret = CSR1212_ENOMEM; 1233 ret = -ENOMEM;
1401 goto fail; 1234 goto out;
1402 } 1235 }
1403 1236
1404 kv->value.leaf.len = kvi_len; 1237 kv->value.leaf.len = kvi_len;
1405 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len)); 1238 memcpy(kv->value.leaf.data, kvi->data, size);
1406 } 1239 }
1407 break; 1240 break;
1408 } 1241 }
1409 1242
1410 kv->valid = 1; 1243 kv->valid = 1;
1411 1244out:
1412fail:
1413 return ret; 1245 return ret;
1414} 1246}
1415 1247
1416 1248static int
1417int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv) 1249csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1418{ 1250{
1419 struct csr1212_cache_region *cr, *ncr, *newcr = NULL; 1251 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1420 struct csr1212_keyval_img *kvi = NULL; 1252 struct csr1212_keyval_img *kvi = NULL;
1421 struct csr1212_csr_rom_cache *cache; 1253 struct csr1212_csr_rom_cache *cache;
1422 int cache_index; 1254 int cache_index;
1423 u_int64_t addr; 1255 u64 addr;
1424 u_int32_t *cache_ptr; 1256 u32 *cache_ptr;
1425 u_int16_t kv_len = 0; 1257 u16 kv_len = 0;
1426 1258
1427 if (!csr || !kv || csr->max_rom < 1) 1259 BUG_ON(!csr || !kv || csr->max_rom < 1);
1428 return CSR1212_EINVAL;
1429 1260
1430 /* First find which cache the data should be in (or go in if not read 1261 /* First find which cache the data should be in (or go in if not read
1431 * yet). */ 1262 * yet). */
1432 for (cache = csr->cache_head; cache; cache = cache->next) { 1263 for (cache = csr->cache_head; cache; cache = cache->next)
1433 if (kv->offset >= cache->offset && 1264 if (kv->offset >= cache->offset &&
1434 kv->offset < (cache->offset + cache->size)) 1265 kv->offset < (cache->offset + cache->size))
1435 break; 1266 break;
1436 }
1437 1267
1438 if (!cache) { 1268 if (!cache) {
1439 csr1212_quad_t q; 1269 u32 q, cache_size;
1440 u_int32_t cache_size;
1441 1270
1442 /* Only create a new cache for Extended ROM leaves. */ 1271 /* Only create a new cache for Extended ROM leaves. */
1443 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) 1272 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1444 return CSR1212_EINVAL; 1273 return -EINVAL;
1445 1274
1446 if (csr->ops->bus_read(csr, 1275 if (csr->ops->bus_read(csr,
1447 CSR1212_REGISTER_SPACE_BASE + kv->offset, 1276 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1448 sizeof(csr1212_quad_t), &q, csr->private)) { 1277 sizeof(u32), &q, csr->private))
1449 return CSR1212_EIO; 1278 return -EIO;
1450 }
1451 1279
1452 kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16; 1280 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1453 1281
1454 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) + 1282 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1455 (csr->max_rom - 1)) & ~(csr->max_rom - 1); 1283 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1456 1284
1457 cache = csr1212_rom_cache_malloc(kv->offset, cache_size); 1285 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1458 if (!cache) 1286 if (!cache)
1459 return CSR1212_ENOMEM; 1287 return -ENOMEM;
1460 1288
1461 kv->value.leaf.data = &cache->data[1]; 1289 kv->value.leaf.data = &cache->data[1];
1462 csr->cache_tail->next = cache; 1290 csr->cache_tail->next = cache;
@@ -1465,12 +1293,11 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1465 csr->cache_tail = cache; 1293 csr->cache_tail = cache;
1466 cache->filled_head = 1294 cache->filled_head =
1467 CSR1212_MALLOC(sizeof(*cache->filled_head)); 1295 CSR1212_MALLOC(sizeof(*cache->filled_head));
1468 if (!cache->filled_head) { 1296 if (!cache->filled_head)
1469 return CSR1212_ENOMEM; 1297 return -ENOMEM;
1470 }
1471 1298
1472 cache->filled_head->offset_start = 0; 1299 cache->filled_head->offset_start = 0;
1473 cache->filled_head->offset_end = sizeof(csr1212_quad_t); 1300 cache->filled_head->offset_end = sizeof(u32);
1474 cache->filled_tail = cache->filled_head; 1301 cache->filled_tail = cache->filled_head;
1475 cache->filled_head->next = NULL; 1302 cache->filled_head->next = NULL;
1476 cache->filled_head->prev = NULL; 1303 cache->filled_head->prev = NULL;
@@ -1488,7 +1315,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1488 if (cache_index < cr->offset_start) { 1315 if (cache_index < cr->offset_start) {
1489 newcr = CSR1212_MALLOC(sizeof(*newcr)); 1316 newcr = CSR1212_MALLOC(sizeof(*newcr));
1490 if (!newcr) 1317 if (!newcr)
1491 return CSR1212_ENOMEM; 1318 return -ENOMEM;
1492 1319
1493 newcr->offset_start = cache_index & ~(csr->max_rom - 1); 1320 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1494 newcr->offset_end = newcr->offset_start; 1321 newcr->offset_end = newcr->offset_start;
@@ -1501,18 +1328,18 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1501 (cache_index < cr->offset_end)) { 1328 (cache_index < cr->offset_end)) {
1502 kvi = (struct csr1212_keyval_img*) 1329 kvi = (struct csr1212_keyval_img*)
1503 (&cache->data[bytes_to_quads(cache_index)]); 1330 (&cache->data[bytes_to_quads(cache_index)]);
1504 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) + 1331 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1505 1);
1506 break; 1332 break;
1507 } else if (cache_index == cr->offset_end) 1333 } else if (cache_index == cr->offset_end) {
1508 break; 1334 break;
1335 }
1509 } 1336 }
1510 1337
1511 if (!cr) { 1338 if (!cr) {
1512 cr = cache->filled_tail; 1339 cr = cache->filled_tail;
1513 newcr = CSR1212_MALLOC(sizeof(*newcr)); 1340 newcr = CSR1212_MALLOC(sizeof(*newcr));
1514 if (!newcr) 1341 if (!newcr)
1515 return CSR1212_ENOMEM; 1342 return -ENOMEM;
1516 1343
1517 newcr->offset_start = cache_index & ~(csr->max_rom - 1); 1344 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1518 newcr->offset_end = newcr->offset_start; 1345 newcr->offset_end = newcr->offset_start;
@@ -1534,7 +1361,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1534 csr->private)) { 1361 csr->private)) {
1535 if (csr->max_rom == 4) 1362 if (csr->max_rom == 4)
1536 /* We've got problems! */ 1363 /* We've got problems! */
1537 return CSR1212_EIO; 1364 return -EIO;
1538 1365
1539 /* Apperently the max_rom value was a lie, set it to 1366 /* Apperently the max_rom value was a lie, set it to
1540 * do quadlet reads and try again. */ 1367 * do quadlet reads and try again. */
@@ -1548,8 +1375,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1548 if (!kvi && (cr->offset_end > cache_index)) { 1375 if (!kvi && (cr->offset_end > cache_index)) {
1549 kvi = (struct csr1212_keyval_img*) 1376 kvi = (struct csr1212_keyval_img*)
1550 (&cache->data[bytes_to_quads(cache_index)]); 1377 (&cache->data[bytes_to_quads(cache_index)]);
1551 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) + 1378 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1552 1);
1553 } 1379 }
1554 1380
1555 if ((kv_len + (kv->offset - cache->offset)) > cache->size) { 1381 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
@@ -1557,7 +1383,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1557 * beyond the ConfigROM image region and thus beyond the 1383 * beyond the ConfigROM image region and thus beyond the
1558 * end of our cache region. Therefore, we abort now 1384 * end of our cache region. Therefore, we abort now
1559 * rather than seg faulting later. */ 1385 * rather than seg faulting later. */
1560 return CSR1212_EIO; 1386 return -EIO;
1561 } 1387 }
1562 1388
1563 ncr = cr->next; 1389 ncr = cr->next;
@@ -1579,7 +1405,16 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1579 return csr1212_parse_keyval(kv, cache); 1405 return csr1212_parse_keyval(kv, cache);
1580} 1406}
1581 1407
1582 1408struct csr1212_keyval *
1409csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1410{
1411 if (!kv)
1412 return NULL;
1413 if (!kv->valid)
1414 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1415 return NULL;
1416 return kv;
1417}
1583 1418
1584int csr1212_parse_csr(struct csr1212_csr *csr) 1419int csr1212_parse_csr(struct csr1212_csr *csr)
1585{ 1420{
@@ -1587,20 +1422,19 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
1587 struct csr1212_dentry *dentry; 1422 struct csr1212_dentry *dentry;
1588 int ret; 1423 int ret;
1589 1424
1590 if (!csr || !csr->ops || !csr->ops->bus_read) 1425 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1591 return CSR1212_EINVAL;
1592 1426
1593 ret = csr1212_parse_bus_info_block(csr); 1427 ret = csr1212_parse_bus_info_block(csr);
1594 if (ret != CSR1212_SUCCESS) 1428 if (ret != CSR1212_SUCCESS)
1595 return ret; 1429 return ret;
1596 1430
1597 if (!csr->ops->get_max_rom) 1431 if (!csr->ops->get_max_rom) {
1598 csr->max_rom = mr_map[0]; /* default value */ 1432 csr->max_rom = mr_map[0]; /* default value */
1599 else { 1433 } else {
1600 int i = csr->ops->get_max_rom(csr->bus_info_data, 1434 int i = csr->ops->get_max_rom(csr->bus_info_data,
1601 csr->private); 1435 csr->private);
1602 if (i & ~0x3) 1436 if (i & ~0x3)
1603 return CSR1212_EINVAL; 1437 return -EINVAL;
1604 csr->max_rom = mr_map[i]; 1438 csr->max_rom = mr_map[i];
1605 } 1439 }
1606 1440
@@ -1613,7 +1447,7 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
1613 csr->root_kv->valid = 0; 1447 csr->root_kv->valid = 0;
1614 csr->root_kv->next = csr->root_kv; 1448 csr->root_kv->next = csr->root_kv;
1615 csr->root_kv->prev = csr->root_kv; 1449 csr->root_kv->prev = csr->root_kv;
1616 ret = _csr1212_read_keyval(csr, csr->root_kv); 1450 ret = csr1212_read_keyval(csr, csr->root_kv);
1617 if (ret != CSR1212_SUCCESS) 1451 if (ret != CSR1212_SUCCESS)
1618 return ret; 1452 return ret;
1619 1453
@@ -1623,7 +1457,7 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
1623 dentry; dentry = dentry->next) { 1457 dentry; dentry = dentry->next) {
1624 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM && 1458 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1625 !dentry->kv->valid) { 1459 !dentry->kv->valid) {
1626 ret = _csr1212_read_keyval(csr, dentry->kv); 1460 ret = csr1212_read_keyval(csr, dentry->kv);
1627 if (ret != CSR1212_SUCCESS) 1461 if (ret != CSR1212_SUCCESS)
1628 return ret; 1462 return ret;
1629 } 1463 }
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index 17ddd72dee4e..df909ce66304 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -30,94 +30,13 @@
30#ifndef __CSR1212_H__ 30#ifndef __CSR1212_H__
31#define __CSR1212_H__ 31#define __CSR1212_H__
32 32
33
34/* Compatibility layer */
35#ifdef __KERNEL__
36
37#include <linux/types.h> 33#include <linux/types.h>
38#include <linux/slab.h> 34#include <linux/slab.h>
39#include <linux/interrupt.h>
40#include <linux/vmalloc.h>
41#include <asm/pgalloc.h>
42
43#define CSR1212_MALLOC(size) vmalloc((size))
44#define CSR1212_FREE(ptr) vfree(ptr)
45#define CSR1212_BE16_TO_CPU(quad) be16_to_cpu(quad)
46#define CSR1212_CPU_TO_BE16(quad) cpu_to_be16(quad)
47#define CSR1212_BE32_TO_CPU(quad) be32_to_cpu(quad)
48#define CSR1212_CPU_TO_BE32(quad) cpu_to_be32(quad)
49#define CSR1212_BE64_TO_CPU(quad) be64_to_cpu(quad)
50#define CSR1212_CPU_TO_BE64(quad) cpu_to_be64(quad)
51
52#define CSR1212_LE16_TO_CPU(quad) le16_to_cpu(quad)
53#define CSR1212_CPU_TO_LE16(quad) cpu_to_le16(quad)
54#define CSR1212_LE32_TO_CPU(quad) le32_to_cpu(quad)
55#define CSR1212_CPU_TO_LE32(quad) cpu_to_le32(quad)
56#define CSR1212_LE64_TO_CPU(quad) le64_to_cpu(quad)
57#define CSR1212_CPU_TO_LE64(quad) cpu_to_le64(quad)
58
59#include <linux/errno.h>
60#define CSR1212_SUCCESS (0)
61#define CSR1212_EINVAL (-EINVAL)
62#define CSR1212_ENOMEM (-ENOMEM)
63#define CSR1212_ENOENT (-ENOENT)
64#define CSR1212_EIO (-EIO)
65#define CSR1212_EBUSY (-EBUSY)
66
67#else /* Userspace */
68
69#include <sys/types.h>
70#include <malloc.h>
71#define CSR1212_MALLOC(size) malloc(size)
72#define CSR1212_FREE(ptr) free(ptr)
73#include <endian.h>
74#if __BYTE_ORDER == __LITTLE_ENDIAN
75#include <byteswap.h>
76#define CSR1212_BE16_TO_CPU(quad) bswap_16(quad)
77#define CSR1212_CPU_TO_BE16(quad) bswap_16(quad)
78#define CSR1212_BE32_TO_CPU(quad) bswap_32(quad)
79#define CSR1212_CPU_TO_BE32(quad) bswap_32(quad)
80#define CSR1212_BE64_TO_CPU(quad) bswap_64(quad)
81#define CSR1212_CPU_TO_BE64(quad) bswap_64(quad)
82
83#define CSR1212_LE16_TO_CPU(quad) (quad)
84#define CSR1212_CPU_TO_LE16(quad) (quad)
85#define CSR1212_LE32_TO_CPU(quad) (quad)
86#define CSR1212_CPU_TO_LE32(quad) (quad)
87#define CSR1212_LE64_TO_CPU(quad) (quad)
88#define CSR1212_CPU_TO_LE64(quad) (quad)
89#else
90#define CSR1212_BE16_TO_CPU(quad) (quad)
91#define CSR1212_CPU_TO_BE16(quad) (quad)
92#define CSR1212_BE32_TO_CPU(quad) (quad)
93#define CSR1212_CPU_TO_BE32(quad) (quad)
94#define CSR1212_BE64_TO_CPU(quad) (quad)
95#define CSR1212_CPU_TO_BE64(quad) (quad)
96
97#define CSR1212_LE16_TO_CPU(quad) bswap_16(quad)
98#define CSR1212_CPU_TO_LE16(quad) bswap_16(quad)
99#define CSR1212_LE32_TO_CPU(quad) bswap_32(quad)
100#define CSR1212_CPU_TO_LE32(quad) bswap_32(quad)
101#define CSR1212_LE64_TO_CPU(quad) bswap_64(quad)
102#define CSR1212_CPU_TO_LE64(quad) bswap_64(quad)
103#endif
104
105#include <errno.h>
106#define CSR1212_SUCCESS (0)
107#define CSR1212_EINVAL (EINVAL)
108#define CSR1212_ENOMEM (ENOMEM)
109#define CSR1212_ENOENT (ENOENT)
110#define CSR1212_EIO (EIO)
111#define CSR1212_EBUSY (EBUSY)
112
113#endif
114 35
36#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
37#define CSR1212_FREE(ptr) kfree(ptr)
115 38
116#define CSR1212_KV_VAL_MASK 0xffffff 39#define CSR1212_SUCCESS (0)
117#define CSR1212_KV_KEY_SHIFT 24
118#define CSR1212_KV_KEY_TYPE_SHIFT 6
119#define CSR1212_KV_KEY_ID_MASK 0x3f
120#define CSR1212_KV_KEY_TYPE_MASK 0x3 /* After shift */
121 40
122 41
123/* CSR 1212 key types */ 42/* CSR 1212 key types */
@@ -190,48 +109,22 @@
190#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE) 109#define CSR1212_UNITS_SPACE_END (CSR1212_UNITS_SPACE_BASE + CSR1212_UNITS_SPACE_SIZE)
191#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE) 110#define CSR1212_UNITS_SPACE_OFFSET (CSR1212_UNITS_SPACE_BASE - CSR1212_REGISTER_SPACE_BASE)
192 111
193#define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u_int32_t))
194
195#define CSR1212_INVALID_ADDR_SPACE -1 112#define CSR1212_INVALID_ADDR_SPACE -1
196 113
114
197/* Config ROM image structures */ 115/* Config ROM image structures */
198struct csr1212_bus_info_block_img { 116struct csr1212_bus_info_block_img {
199 u_int8_t length; 117 u8 length;
200 u_int8_t crc_length; 118 u8 crc_length;
201 u_int16_t crc; 119 u16 crc;
202 120
203 /* Must be last */ 121 /* Must be last */
204 u_int32_t data[0]; /* older gcc can't handle [] which is standard */ 122 u32 data[0]; /* older gcc can't handle [] which is standard */
205};
206
207#define CSR1212_KV_KEY(quad) (CSR1212_BE32_TO_CPU(quad) >> CSR1212_KV_KEY_SHIFT)
208#define CSR1212_KV_KEY_TYPE(quad) (CSR1212_KV_KEY(quad) >> CSR1212_KV_KEY_TYPE_SHIFT)
209#define CSR1212_KV_KEY_ID(quad) (CSR1212_KV_KEY(quad) & CSR1212_KV_KEY_ID_MASK)
210#define CSR1212_KV_VAL(quad) (CSR1212_BE32_TO_CPU(quad) & CSR1212_KV_VAL_MASK)
211
212#define CSR1212_SET_KV_KEY(quad, key) ((quad) = \
213 CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | ((key) << CSR1212_KV_KEY_SHIFT)))
214#define CSR1212_SET_KV_VAL(quad, val) ((quad) = \
215 CSR1212_CPU_TO_BE32((CSR1212_KV_KEY(quad) << CSR1212_KV_KEY_SHIFT) | (val)))
216#define CSR1212_SET_KV_TYPEID(quad, type, id) ((quad) = \
217 CSR1212_CPU_TO_BE32(CSR1212_KV_VAL(quad) | \
218 (((((type) & CSR1212_KV_KEY_TYPE_MASK) << CSR1212_KV_KEY_TYPE_SHIFT) | \
219 ((id) & CSR1212_KV_KEY_ID_MASK)) << CSR1212_KV_KEY_SHIFT)))
220
221typedef u_int32_t csr1212_quad_t;
222
223
224struct csr1212_keyval_img {
225 u_int16_t length;
226 u_int16_t crc;
227
228 /* Must be last */
229 csr1212_quad_t data[0]; /* older gcc can't handle [] which is standard */
230}; 123};
231 124
232struct csr1212_leaf { 125struct csr1212_leaf {
233 int len; 126 int len;
234 u_int32_t *data; 127 u32 *data;
235}; 128};
236 129
237struct csr1212_dentry { 130struct csr1212_dentry {
@@ -246,12 +139,12 @@ struct csr1212_directory {
246 139
247struct csr1212_keyval { 140struct csr1212_keyval {
248 struct { 141 struct {
249 u_int8_t type; 142 u8 type;
250 u_int8_t id; 143 u8 id;
251 } key; 144 } key;
252 union { 145 union {
253 u_int32_t immediate; 146 u32 immediate;
254 u_int32_t csr_offset; 147 u32 csr_offset;
255 struct csr1212_leaf leaf; 148 struct csr1212_leaf leaf;
256 struct csr1212_directory directory; 149 struct csr1212_directory directory;
257 } value; 150 } value;
@@ -260,15 +153,15 @@ struct csr1212_keyval {
260 153
261 /* used in generating and/or parsing CSR image */ 154 /* used in generating and/or parsing CSR image */
262 struct csr1212_keyval *next, *prev; /* flat list of CSR elements */ 155 struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
263 u_int32_t offset; /* position in CSR from 0xffff f000 0000 */ 156 u32 offset; /* position in CSR from 0xffff f000 0000 */
264 u_int8_t valid; /* flag indicating keyval has valid data*/ 157 u8 valid; /* flag indicating keyval has valid data*/
265}; 158};
266 159
267 160
268struct csr1212_cache_region { 161struct csr1212_cache_region {
269 struct csr1212_cache_region *next, *prev; 162 struct csr1212_cache_region *next, *prev;
270 u_int32_t offset_start; /* inclusive */ 163 u32 offset_start; /* inclusive */
271 u_int32_t offset_end; /* exclusive */ 164 u32 offset_end; /* exclusive */
272}; 165};
273 166
274struct csr1212_csr_rom_cache { 167struct csr1212_csr_rom_cache {
@@ -276,18 +169,18 @@ struct csr1212_csr_rom_cache {
276 struct csr1212_cache_region *filled_head, *filled_tail; 169 struct csr1212_cache_region *filled_head, *filled_tail;
277 struct csr1212_keyval *layout_head, *layout_tail; 170 struct csr1212_keyval *layout_head, *layout_tail;
278 size_t size; 171 size_t size;
279 u_int32_t offset; 172 u32 offset;
280 struct csr1212_keyval *ext_rom; 173 struct csr1212_keyval *ext_rom;
281 size_t len; 174 size_t len;
282 175
283 /* Must be last */ 176 /* Must be last */
284 u_int32_t data[0]; /* older gcc can't handle [] which is standard */ 177 u32 data[0]; /* older gcc can't handle [] which is standard */
285}; 178};
286 179
287struct csr1212_csr { 180struct csr1212_csr {
288 size_t bus_info_len; /* bus info block length in bytes */ 181 size_t bus_info_len; /* bus info block length in bytes */
289 size_t crc_len; /* crc length in bytes */ 182 size_t crc_len; /* crc length in bytes */
290 u_int32_t *bus_info_data; /* bus info data incl bus name and EUI */ 183 u32 *bus_info_data; /* bus info data incl bus name and EUI */
291 184
292 void *private; /* private, bus specific data */ 185 void *private; /* private, bus specific data */
293 struct csr1212_bus_ops *ops; 186 struct csr1212_bus_ops *ops;
@@ -305,52 +198,38 @@ struct csr1212_bus_ops {
305 * from remote nodes when parsing a Config ROM (i.e., read Config ROM 198 * from remote nodes when parsing a Config ROM (i.e., read Config ROM
306 * entries located in the Units Space. Must return 0 on success 199 * entries located in the Units Space. Must return 0 on success
307 * anything else indicates an error. */ 200 * anything else indicates an error. */
308 int (*bus_read) (struct csr1212_csr *csr, u_int64_t addr, 201 int (*bus_read) (struct csr1212_csr *csr, u64 addr,
309 u_int16_t length, void *buffer, void *private); 202 u16 length, void *buffer, void *private);
310 203
311 /* This function is used by csr1212 to allocate a region in units space 204 /* This function is used by csr1212 to allocate a region in units space
312 * in the event that Config ROM entries don't all fit in the predefined 205 * in the event that Config ROM entries don't all fit in the predefined
313 * 1K region. The void *private parameter is private member of struct 206 * 1K region. The void *private parameter is private member of struct
314 * csr1212_csr. */ 207 * csr1212_csr. */
315 u_int64_t (*allocate_addr_range) (u_int64_t size, u_int32_t alignment, 208 u64 (*allocate_addr_range) (u64 size, u32 alignment, void *private);
316 void *private);
317
318 209
319 /* This function is used by csr1212 to release a region in units space 210 /* This function is used by csr1212 to release a region in units space
320 * that is no longer needed. */ 211 * that is no longer needed. */
321 void (*release_addr) (u_int64_t addr, void *private); 212 void (*release_addr) (u64 addr, void *private);
322 213
323 /* This function is used by csr1212 to determine the max read request 214 /* This function is used by csr1212 to determine the max read request
324 * supported by a remote node when reading the ConfigROM space. Must 215 * supported by a remote node when reading the ConfigROM space. Must
325 * return 0, 1, or 2 per IEEE 1212. */ 216 * return 0, 1, or 2 per IEEE 1212. */
326 int (*get_max_rom) (u_int32_t *bus_info, void *private); 217 int (*get_max_rom) (u32 *bus_info, void *private);
327}; 218};
328 219
329 220
330
331
332/* Descriptor Leaf manipulation macros */ 221/* Descriptor Leaf manipulation macros */
333#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24 222#define CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT 24
334#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff 223#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK 0xffffff
335#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t)) 224#define CSR1212_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
336 225
337#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \ 226#define CSR1212_DESCRIPTOR_LEAF_TYPE(kv) \
338 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) >> CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) 227 (be32_to_cpu((kv)->value.leaf.data[0]) >> \
228 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)
339#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \ 229#define CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) \
340 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[0]) & \ 230 (be32_to_cpu((kv)->value.leaf.data[0]) & \
341 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK) 231 CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)
342#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \ 232
343 (&((kv)->value.leaf.data[1]))
344
345#define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
346 ((kv)->value.leaf.data[0] = \
347 CSR1212_CPU_TO_BE32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
348 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
349#define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
350 ((kv)->value.leaf.data[0] = \
351 CSR1212_CPU_TO_BE32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
352 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
353 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
354 233
355/* Text Descriptor Leaf manipulation macros */ 234/* Text Descriptor Leaf manipulation macros */
356#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28 235#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT 28
@@ -358,182 +237,21 @@ struct csr1212_bus_ops {
358#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16 237#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT 16
359#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */ 238#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK 0xfff /* after shift */
360#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff 239#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
361#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u_int32_t)) 240#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD (1 * sizeof(u32))
362 241
363#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \ 242#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) \
364 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \ 243 (be32_to_cpu((kv)->value.leaf.data[1]) >> \
365 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT) 244 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT)
366#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \ 245#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) \
367 ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) >> \ 246 ((be32_to_cpu((kv)->value.leaf.data[1]) >> \
368 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \ 247 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT) & \
369 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) 248 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK)
370#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \ 249#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) \
371 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]) & \ 250 (be32_to_cpu((kv)->value.leaf.data[1]) & \
372 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK) 251 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)
373#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \ 252#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv) \
374 (&((kv)->value.leaf.data[2])) 253 (&((kv)->value.leaf.data[2]))
375 254
376#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
377 ((kv)->value.leaf.data[1] = \
378 ((kv)->value.leaf.data[1] & \
379 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
380 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
381 CSR1212_CPU_TO_BE32(((width) & \
382 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
383 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
384#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
385 ((kv)->value.leaf.data[1] = \
386 ((kv)->value.leaf.data[1] & \
387 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
388 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
389 CSR1212_CPU_TO_BE32(((char_set) & \
390 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
391 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
392#define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
393 ((kv)->value.leaf.data[1] = \
394 ((kv)->value.leaf.data[1] & \
395 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
396 CSR1212_CPU_TO_BE32(((language) & \
397 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
398
399
400/* Icon Descriptor Leaf manipulation macros */
401#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK 0xffffff
402#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT 30
403#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK 0x3 /* after shift */
404#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT 16
405#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK 0xf /* after shift */
406#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK 0xffff
407#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT 16
408#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK 0xffff /* after shift */
409#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK 0xffff
410#define CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD (3 * sizeof(u_int32_t))
411
412#define CSR1212_ICON_DESCRIPTOR_LEAF_VERSION(kv) \
413 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[2]) & \
414 CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)
415
416#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv) \
417 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
418 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT)
419
420#define CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv) \
421 ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) >> \
422 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT) & \
423 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK)
424
425#define CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE(kv) \
426 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[3]) & \
427 CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)
428
429#define CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN(kv) \
430 ((CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) >> \
431 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_SHIFT) & \
432 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_HSCAN_MASK)
433
434#define CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN(kv) \
435 (CSR1212_BE32_TO_CPU((kv)->value.leaf.data[4]) & \
436 CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)
437
438#define CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv) \
439 (&((kv)->value.leaf.data[5]))
440
441static inline u_int32_t *CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(struct csr1212_keyval *kv)
442{
443 static const int pd[4] = { 0, 4, 16, 256 };
444 static const int cs[16] = { 4, 2 };
445 int ps = pd[CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv)];
446
447 return &kv->value.leaf.data[5 +
448 (ps * cs[CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv)]) /
449 sizeof(u_int32_t)];
450}
451
452#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version) \
453 ((kv)->value.leaf.data[2] = \
454 ((kv)->value.leaf.data[2] & \
455 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK))) | \
456 CSR1212_CPU_TO_BE32(((version) & \
457 CSR1212_ICON_DESCRIPTOR_LEAF_VERSION_MASK)))
458
459#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth) \
460 ((kv)->value.leaf.data[3] = \
461 ((kv)->value.leaf.data[3] & \
462 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK << \
463 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))) | \
464 CSR1212_CPU_TO_BE32(((palette_depth) & \
465 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_MASK) << \
466 CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH_SHIFT))
467
468#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space) \
469 ((kv)->value.leaf.data[3] = \
470 ((kv)->value.leaf.data[3] & \
471 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK << \
472 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))) | \
473 CSR1212_CPU_TO_BE32(((color_space) & \
474 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_MASK) << \
475 CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE_SHIFT))
476
477#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
478 ((kv)->value.leaf.data[3] = \
479 ((kv)->value.leaf.data[3] & \
480 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
481 CSR1212_CPU_TO_BE32(((language) & \
482 CSR1212_ICON_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
483
484#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan) \
485 ((kv)->value.leaf.data[4] = \
486 ((kv)->value.leaf.data[4] & \
487 CSR1212_CPU_TO_BE32(~(CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK << \
488 CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))) | \
489 CSR1212_CPU_TO_BE32(((hscan) & \
490 CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_MASK) << \
491 CSR1212_ICON_DESCRIPTOR_LEAF_HSCAN_SHIFT))
492
493#define CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan) \
494 ((kv)->value.leaf.data[4] = \
495 (((kv)->value.leaf.data[4] & \
496 CSR1212_CPU_TO_BE32(~CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK))) | \
497 CSR1212_CPU_TO_BE32(((vscan) & \
498 CSR1212_ICON_DESCRIPTOR_LEAF_VSCAN_MASK)))
499
500
501/* Modifiable Descriptor Leaf manipulation macros */
502#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT 16
503#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK 0xffff
504#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_SHIFT 32
505#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK 0xffff
506#define CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK 0xffffffffULL
507
508#define CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE(kv) \
509 CSR1212_BE16_TO_CPU((kv)->value.leaf.data[0] >> CSR1212_MODIFIABLE_DESCRIPTOR_MAX_SIZE_SHIFT)
510
511#define CSR1212_MODIFIABLE_DESCRIPTOR_ADDRESS(kv) \
512 (CSR1212_BE16_TO_CPU(((u_int64_t)((kv)->value.leaf.data[0])) << \
513 CSR1212_MODIFIABLE_DESCRIPTOR_ADDR_HI_SHIFT) | \
514 CSR1212_BE32_TO_CPU((kv)->value.leaf.data[1]))
515
516#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, size) \
517 ((kv)->value.leaf.data[0] = \
518 ((kv)->value.leaf.data[0] & \
519 CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK << \
520 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))) | \
521 CSR1212_CPU_TO_BE32(((size) & \
522 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_MASK) << \
523 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_MAX_SIZE_SHIFT))
524
525#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, addr) \
526 ((kv)->value.leaf.data[0] = \
527 ((kv)->value.leaf.data[0] & \
528 CSR1212_CPU_TO_BE32(~(CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK))) | \
529 CSR1212_CPU_TO_BE32(((addr) & \
530 CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_HI_MASK)))
531
532#define CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, addr) \
533 ((kv)->value.leaf.data[1] = \
534 CSR1212_CPU_TO_BE32(addr & CSR1212_MODIFIABLE_DESCRIPTOR_LEAF_ADDR_LO_MASK))
535
536
537 255
538/* The following 2 function are for creating new Configuration ROM trees. The 256/* The following 2 function are for creating new Configuration ROM trees. The
539 * first function is used for both creating local trees and parsing remote 257 * first function is used for both creating local trees and parsing remote
@@ -543,11 +261,10 @@ extern struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
543 size_t bus_info_size, 261 size_t bus_info_size,
544 void *private); 262 void *private);
545extern void csr1212_init_local_csr(struct csr1212_csr *csr, 263extern void csr1212_init_local_csr(struct csr1212_csr *csr,
546 const u_int32_t *bus_info_data, int max_rom); 264 const u32 *bus_info_data, int max_rom);
547 265
548 266
549/* The following function destroys a Configuration ROM tree and release all 267/* Destroy a Configuration ROM tree and release all memory taken by the tree. */
550 * memory taken by the tree. */
551extern void csr1212_destroy_csr(struct csr1212_csr *csr); 268extern void csr1212_destroy_csr(struct csr1212_csr *csr);
552 269
553 270
@@ -555,50 +272,20 @@ extern void csr1212_destroy_csr(struct csr1212_csr *csr);
555 * a Configuration ROM tree. Code that creates new keyvals with these functions 272 * a Configuration ROM tree. Code that creates new keyvals with these functions
556 * must release those keyvals with csr1212_release_keyval() when they are no 273 * must release those keyvals with csr1212_release_keyval() when they are no
557 * longer needed. */ 274 * longer needed. */
558extern struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value); 275extern struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value);
559extern struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, 276extern struct csr1212_keyval *csr1212_new_directory(u8 key);
560 size_t data_len);
561extern struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key,
562 u_int32_t csr_offset);
563extern struct csr1212_keyval *csr1212_new_directory(u_int8_t key);
564extern struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec,
565 u_int32_t key,
566 u_int32_t value);
567extern struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec,
568 u_int32_t key,
569 const void *data,
570 size_t data_len);
571extern struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype,
572 u_int32_t specifier_id,
573 const void *data,
574 size_t data_len);
575extern struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
576 u_int16_t cset,
577 u_int16_t language,
578 const void *data,
579 size_t data_len);
580extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s); 277extern struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s);
581extern struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version, 278
582 u_int8_t palette_depth, 279
583 u_int8_t color_space, 280/* The following function manages association between keyvals. Typically,
584 u_int16_t language,
585 u_int16_t hscan,
586 u_int16_t vscan,
587 u_int32_t *palette,
588 u_int32_t *pixels);
589extern struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
590 u_int64_t address);
591extern struct csr1212_keyval *csr1212_new_keyword_leaf(int strc,
592 const char *strv[]);
593
594
595/* The following functions manage association between keyvals. Typically,
596 * Descriptor Leaves and Directories will be associated with another keyval and 281 * Descriptor Leaves and Directories will be associated with another keyval and
597 * it is desirable for the Descriptor keyval to be place immediately after the 282 * it is desirable for the Descriptor keyval to be place immediately after the
598 * keyval that it is associated with.*/ 283 * keyval that it is associated with.
599extern int csr1212_associate_keyval(struct csr1212_keyval *kv, 284 * Take care with subsequent ROM modifications: There is no function to remove
600 struct csr1212_keyval *associate); 285 * previously specified associations.
601extern void csr1212_disassociate_keyval(struct csr1212_keyval *kv); 286 */
287extern void csr1212_associate_keyval(struct csr1212_keyval *kv,
288 struct csr1212_keyval *associate);
602 289
603 290
604/* The following functions manage the association of a keyval and directories. 291/* The following functions manage the association of a keyval and directories.
@@ -609,23 +296,15 @@ extern void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
609 struct csr1212_keyval *kv); 296 struct csr1212_keyval *kv);
610 297
611 298
612/* The following functions create a Configuration ROM image from the tree of 299/* Creates a complete Configuration ROM image in the list of caches available
613 * keyvals provided. csr1212_generate_csr_image() creates a complete image in 300 * via csr->cache_head. */
614 * the list of caches available via csr->cache_head. The other functions are
615 * provided should there be a need to create a flat image without restrictions
616 * placed by IEEE 1212. */
617extern struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
618 struct csr1212_keyval *start_kv,
619 int start_pos);
620extern size_t csr1212_generate_layout_order(struct csr1212_keyval *kv);
621extern void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache);
622extern int csr1212_generate_csr_image(struct csr1212_csr *csr); 301extern int csr1212_generate_csr_image(struct csr1212_csr *csr);
623 302
624 303
625/* This is a convience function for reading a block of data out of one of the 304/* This is a convience function for reading a block of data out of one of the
626 * caches in the csr->cache_head list. */ 305 * caches in the csr->cache_head list. */
627extern int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, 306extern int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer,
628 u_int32_t len); 307 u32 len);
629 308
630 309
631/* The following functions are in place for parsing Configuration ROM images. 310/* The following functions are in place for parsing Configuration ROM images.
@@ -635,15 +314,11 @@ extern int csr1212_parse_keyval(struct csr1212_keyval *kv,
635 struct csr1212_csr_rom_cache *cache); 314 struct csr1212_csr_rom_cache *cache);
636extern int csr1212_parse_csr(struct csr1212_csr *csr); 315extern int csr1212_parse_csr(struct csr1212_csr *csr);
637 316
638/* These are internal functions referenced by inline functions below. */
639extern int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
640extern void _csr1212_destroy_keyval(struct csr1212_keyval *kv);
641
642 317
643/* This function allocates a new cache which may be used for either parsing or 318/* This function allocates a new cache which may be used for either parsing or
644 * generating sub-sets of Configuration ROM images. */ 319 * generating sub-sets of Configuration ROM images. */
645static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t offset, 320static inline struct csr1212_csr_rom_cache *
646 size_t size) 321csr1212_rom_cache_malloc(u32 offset, size_t size)
647{ 322{
648 struct csr1212_csr_rom_cache *cache; 323 struct csr1212_csr_rom_cache *cache;
649 324
@@ -667,16 +342,8 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o
667 342
668/* This function ensures that a keyval contains data when referencing a keyval 343/* This function ensures that a keyval contains data when referencing a keyval
669 * created by parsing a Configuration ROM. */ 344 * created by parsing a Configuration ROM. */
670static inline struct csr1212_keyval *csr1212_get_keyval(struct csr1212_csr *csr, 345extern struct csr1212_keyval *
671 struct csr1212_keyval *kv) 346csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
672{
673 if (!kv)
674 return NULL;
675 if (!kv->valid)
676 if (_csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
677 return NULL;
678 return kv;
679}
680 347
681 348
682/* This function increments the reference count for a keyval should there be a 349/* This function increments the reference count for a keyval should there be a
@@ -691,37 +358,29 @@ static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
691 * keyval when there are no more users of the keyval. This should be called by 358 * keyval when there are no more users of the keyval. This should be called by
692 * any code that calls csr1212_keep_keyval() or any of the keyval creation 359 * any code that calls csr1212_keep_keyval() or any of the keyval creation
693 * routines csr1212_new_*(). */ 360 * routines csr1212_new_*(). */
694static inline void csr1212_release_keyval(struct csr1212_keyval *kv) 361extern void csr1212_release_keyval(struct csr1212_keyval *kv);
695{
696 if (kv->refcnt > 1)
697 kv->refcnt--;
698 else
699 _csr1212_destroy_keyval(kv);
700}
701 362
702 363
703/* 364/*
704 * This macro allows for looping over the keyval entries in a directory and it 365 * This macro allows for looping over the keyval entries in a directory and it
705 * ensures that keyvals from remote ConfigROMs are parsed properly. 366 * ensures that keyvals from remote ConfigROMs are parsed properly.
706 * 367 *
707 * _csr is a struct csr1212_csr * that points to CSR associated with dir. 368 * struct csr1212_csr *_csr points to the CSR associated with dir.
708 * _kv is a struct csr1212_keyval * that'll point to the current keyval (loop index). 369 * struct csr1212_keyval *_kv points to the current keyval (loop index).
709 * _dir is a struct csr1212_keyval * that points to the directory to be looped. 370 * struct csr1212_keyval *_dir points to the directory to be looped.
710 * _pos is a struct csr1212_dentry * that is used internally for indexing. 371 * struct csr1212_dentry *_pos is used internally for indexing.
711 * 372 *
712 * kv will be NULL upon exit of the loop. 373 * kv will be NULL upon exit of the loop.
713 */ 374 */
714#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \ 375#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
715 for (csr1212_get_keyval((_csr), (_dir)), \ 376 for (csr1212_get_keyval((_csr), (_dir)), \
716 _pos = (_dir)->value.directory.dentries_head, \ 377 _pos = (_dir)->value.directory.dentries_head, \
717 _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL; \ 378 _kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : NULL;\
718 (_kv) && (_pos); \ 379 (_kv) && (_pos); \
719 (_kv->associate == NULL) ? \ 380 (_kv->associate == NULL) ? \
720 ((_pos = _pos->next), \ 381 ((_pos = _pos->next), (_kv = (_pos) ? \
721 (_kv = (_pos) ? csr1212_get_keyval((_csr), _pos->kv) : \ 382 csr1212_get_keyval((_csr), _pos->kv) : \
722 NULL)) : \ 383 NULL)) : \
723 (_kv = csr1212_get_keyval((_csr), _kv->associate))) 384 (_kv = csr1212_get_keyval((_csr), _kv->associate)))
724 385
725
726
727#endif /* __CSR1212_H__ */ 386#endif /* __CSR1212_H__ */
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index c68f328e1a29..45d605581922 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -62,6 +62,9 @@ void dma_prog_region_free(struct dma_prog_region *prog)
62 62
63/* dma_region */ 63/* dma_region */
64 64
65/**
66 * dma_region_init - clear out all fields but do not allocate anything
67 */
65void dma_region_init(struct dma_region *dma) 68void dma_region_init(struct dma_region *dma)
66{ 69{
67 dma->kvirt = NULL; 70 dma->kvirt = NULL;
@@ -71,6 +74,9 @@ void dma_region_init(struct dma_region *dma)
71 dma->sglist = NULL; 74 dma->sglist = NULL;
72} 75}
73 76
77/**
78 * dma_region_alloc - allocate the buffer and map it to the IOMMU
79 */
74int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, 80int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
75 struct pci_dev *dev, int direction) 81 struct pci_dev *dev, int direction)
76{ 82{
@@ -128,6 +134,9 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
128 return -ENOMEM; 134 return -ENOMEM;
129} 135}
130 136
137/**
138 * dma_region_free - unmap and free the buffer
139 */
131void dma_region_free(struct dma_region *dma) 140void dma_region_free(struct dma_region *dma)
132{ 141{
133 if (dma->n_dma_pages) { 142 if (dma->n_dma_pages) {
@@ -167,6 +176,12 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
167 return i; 176 return i;
168} 177}
169 178
179/**
180 * dma_region_offset_to_bus - get bus address of an offset within a DMA region
181 *
182 * Returns the DMA bus address of the byte with the given @offset relative to
183 * the beginning of the @dma.
184 */
170dma_addr_t dma_region_offset_to_bus(struct dma_region * dma, 185dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
171 unsigned long offset) 186 unsigned long offset)
172{ 187{
@@ -177,6 +192,9 @@ dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
177 return sg_dma_address(sg) + rem; 192 return sg_dma_address(sg) + rem;
178} 193}
179 194
195/**
196 * dma_region_sync_for_cpu - sync the CPU's view of the buffer
197 */
180void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, 198void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
181 unsigned long len) 199 unsigned long len)
182{ 200{
@@ -193,6 +211,9 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
193 dma->direction); 211 dma->direction);
194} 212}
195 213
214/**
215 * dma_region_sync_for_device - sync the IO bus' view of the buffer
216 */
196void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, 217void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
197 unsigned long len) 218 unsigned long len)
198{ 219{
@@ -244,6 +265,9 @@ static struct vm_operations_struct dma_region_vm_ops = {
244 .nopage = dma_region_pagefault, 265 .nopage = dma_region_pagefault,
245}; 266};
246 267
268/**
269 * dma_region_mmap - map the buffer into a user space process
270 */
247int dma_region_mmap(struct dma_region *dma, struct file *file, 271int dma_region_mmap(struct dma_region *dma, struct file *file,
248 struct vm_area_struct *vma) 272 struct vm_area_struct *vma)
249{ 273{
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h
index a1682aba71c7..2727bcd24194 100644
--- a/drivers/ieee1394/dma.h
+++ b/drivers/ieee1394/dma.h
@@ -66,35 +66,23 @@ struct dma_region {
66 int direction; 66 int direction;
67}; 67};
68 68
69/* clear out all fields but do not allocate anything */
70void dma_region_init(struct dma_region *dma); 69void dma_region_init(struct dma_region *dma);
71
72/* allocate the buffer and map it to the IOMMU */
73int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, 70int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
74 struct pci_dev *dev, int direction); 71 struct pci_dev *dev, int direction);
75
76/* unmap and free the buffer */
77void dma_region_free(struct dma_region *dma); 72void dma_region_free(struct dma_region *dma);
78
79/* sync the CPU's view of the buffer */
80void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, 73void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
81 unsigned long len); 74 unsigned long len);
82
83/* sync the IO bus' view of the buffer */
84void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, 75void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
85 unsigned long len); 76 unsigned long len);
86
87/* map the buffer into a user space process */
88int dma_region_mmap(struct dma_region *dma, struct file *file, 77int dma_region_mmap(struct dma_region *dma, struct file *file,
89 struct vm_area_struct *vma); 78 struct vm_area_struct *vma);
79dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
80 unsigned long offset);
90 81
91/* macro to index into a DMA region (or dma_prog_region) */ 82/**
83 * dma_region_i - macro to index into a DMA region (or dma_prog_region)
84 */
92#define dma_region_i(_dma, _type, _index) \ 85#define dma_region_i(_dma, _type, _index) \
93 ( ((_type*) ((_dma)->kvirt)) + (_index) ) 86 ( ((_type*) ((_dma)->kvirt)) + (_index) )
94 87
95/* return the DMA bus address of the byte with the given offset
96 * relative to the beginning of the dma_region */
97dma_addr_t dma_region_offset_to_bus(struct dma_region *dma,
98 unsigned long offset);
99
100#endif /* IEEE1394_DMA_H */ 88#endif /* IEEE1394_DMA_H */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index a364003ba47f..2296d43a2414 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * eth1394.c -- Ethernet driver for Linux IEEE-1394 Subsystem 2 * eth1394.c -- IPv4 driver for Linux IEEE-1394 Subsystem
3 * 3 *
4 * Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org> 4 * Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
5 * 2000 Bonin Franck <boninf@free.fr> 5 * 2000 Bonin Franck <boninf@free.fr>
@@ -22,10 +22,9 @@
22 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */ 23 */
24 24
25/* This driver intends to support RFC 2734, which describes a method for 25/*
26 * transporting IPv4 datagrams over IEEE-1394 serial busses. This driver 26 * This driver intends to support RFC 2734, which describes a method for
27 * will ultimately support that method, but currently falls short in 27 * transporting IPv4 datagrams over IEEE-1394 serial busses.
28 * several areas.
29 * 28 *
30 * TODO: 29 * TODO:
31 * RFC 2734 related: 30 * RFC 2734 related:
@@ -40,7 +39,6 @@
40 * - Consider garbage collecting old partial datagrams after X amount of time 39 * - Consider garbage collecting old partial datagrams after X amount of time
41 */ 40 */
42 41
43
44#include <linux/module.h> 42#include <linux/module.h>
45 43
46#include <linux/kernel.h> 44#include <linux/kernel.h>
@@ -52,7 +50,6 @@
52 50
53#include <linux/netdevice.h> 51#include <linux/netdevice.h>
54#include <linux/inetdevice.h> 52#include <linux/inetdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/if_arp.h> 53#include <linux/if_arp.h>
57#include <linux/if_ether.h> 54#include <linux/if_ether.h>
58#include <linux/ip.h> 55#include <linux/ip.h>
@@ -84,10 +81,6 @@
84#define ETH1394_PRINT(level, dev_name, fmt, args...) \ 81#define ETH1394_PRINT(level, dev_name, fmt, args...) \
85 printk(level "%s: %s: " fmt, driver_name, dev_name, ## args) 82 printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
86 83
87#define DEBUG(fmt, args...) \
88 printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
90
91struct fragment_info { 84struct fragment_info {
92 struct list_head list; 85 struct list_head list;
93 int offset; 86 int offset;
@@ -105,9 +98,9 @@ struct partial_datagram {
105}; 98};
106 99
107struct pdg_list { 100struct pdg_list {
108 struct list_head list; /* partial datagram list per node */ 101 struct list_head list; /* partial datagram list per node */
109 unsigned int sz; /* partial datagram list size per node */ 102 unsigned int sz; /* partial datagram list size per node */
110 spinlock_t lock; /* partial datagram lock */ 103 spinlock_t lock; /* partial datagram lock */
111}; 104};
112 105
113struct eth1394_host_info { 106struct eth1394_host_info {
@@ -121,16 +114,14 @@ struct eth1394_node_ref {
121}; 114};
122 115
123struct eth1394_node_info { 116struct eth1394_node_info {
124 u16 maxpayload; /* Max payload */ 117 u16 maxpayload; /* max payload */
125 u8 sspd; /* Max speed */ 118 u8 sspd; /* max speed */
126 u64 fifo; /* FIFO address */ 119 u64 fifo; /* FIFO address */
127 struct pdg_list pdg; /* partial RX datagram lists */ 120 struct pdg_list pdg; /* partial RX datagram lists */
128 int dgl; /* Outgoing datagram label */ 121 int dgl; /* outgoing datagram label */
129}; 122};
130 123
131/* Our ieee1394 highlevel driver */ 124static const char driver_name[] = "eth1394";
132#define ETH1394_DRIVER_NAME "eth1394"
133static const char driver_name[] = ETH1394_DRIVER_NAME;
134 125
135static struct kmem_cache *packet_task_cache; 126static struct kmem_cache *packet_task_cache;
136 127
@@ -138,18 +129,12 @@ static struct hpsb_highlevel eth1394_highlevel;
138 129
139/* Use common.lf to determine header len */ 130/* Use common.lf to determine header len */
140static const int hdr_type_len[] = { 131static const int hdr_type_len[] = {
141 sizeof (struct eth1394_uf_hdr), 132 sizeof(struct eth1394_uf_hdr),
142 sizeof (struct eth1394_ff_hdr), 133 sizeof(struct eth1394_ff_hdr),
143 sizeof (struct eth1394_sf_hdr), 134 sizeof(struct eth1394_sf_hdr),
144 sizeof (struct eth1394_sf_hdr) 135 sizeof(struct eth1394_sf_hdr)
145}; 136};
146 137
147/* Change this to IEEE1394_SPEED_S100 to make testing easier */
148#define ETH1394_SPEED_DEF IEEE1394_SPEED_MAX
149
150/* For now, this needs to be 1500, so that XP works with us */
151#define ETH1394_DATA_LEN ETH_DATA_LEN
152
153static const u16 eth1394_speedto_maxpayload[] = { 138static const u16 eth1394_speedto_maxpayload[] = {
154/* S100, S200, S400, S800, S1600, S3200 */ 139/* S100, S200, S400, S800, S1600, S3200 */
155 512, 1024, 2048, 4096, 4096, 4096 140 512, 1024, 2048, 4096, 4096, 4096
@@ -159,7 +144,8 @@ MODULE_AUTHOR("Ben Collins (bcollins@debian.org)");
159MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)"); 144MODULE_DESCRIPTION("IEEE 1394 IPv4 Driver (IPv4-over-1394 as per RFC 2734)");
160MODULE_LICENSE("GPL"); 145MODULE_LICENSE("GPL");
161 146
162/* The max_partial_datagrams parameter is the maximum number of fragmented 147/*
148 * The max_partial_datagrams parameter is the maximum number of fragmented
163 * datagrams per node that eth1394 will keep in memory. Providing an upper 149 * datagrams per node that eth1394 will keep in memory. Providing an upper
164 * bound allows us to limit the amount of memory that partial datagrams 150 * bound allows us to limit the amount of memory that partial datagrams
165 * consume in the event that some partial datagrams are never completed. 151 * consume in the event that some partial datagrams are never completed.
@@ -179,10 +165,7 @@ static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr);
179static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh); 165static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh);
180static void ether1394_header_cache_update(struct hh_cache *hh, 166static void ether1394_header_cache_update(struct hh_cache *hh,
181 struct net_device *dev, 167 struct net_device *dev,
182 unsigned char * haddr); 168 unsigned char *haddr);
183static int ether1394_mac_addr(struct net_device *dev, void *p);
184
185static void purge_partial_datagram(struct list_head *old);
186static int ether1394_tx(struct sk_buff *skb, struct net_device *dev); 169static int ether1394_tx(struct sk_buff *skb, struct net_device *dev);
187static void ether1394_iso(struct hpsb_iso *iso); 170static void ether1394_iso(struct hpsb_iso *iso);
188 171
@@ -190,9 +173,9 @@ static struct ethtool_ops ethtool_ops;
190 173
191static int ether1394_write(struct hpsb_host *host, int srcid, int destid, 174static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
192 quadlet_t *data, u64 addr, size_t len, u16 flags); 175 quadlet_t *data, u64 addr, size_t len, u16 flags);
193static void ether1394_add_host (struct hpsb_host *host); 176static void ether1394_add_host(struct hpsb_host *host);
194static void ether1394_remove_host (struct hpsb_host *host); 177static void ether1394_remove_host(struct hpsb_host *host);
195static void ether1394_host_reset (struct hpsb_host *host); 178static void ether1394_host_reset(struct hpsb_host *host);
196 179
197/* Function for incoming 1394 packets */ 180/* Function for incoming 1394 packets */
198static struct hpsb_address_ops addr_ops = { 181static struct hpsb_address_ops addr_ops = {
@@ -207,89 +190,107 @@ static struct hpsb_highlevel eth1394_highlevel = {
207 .host_reset = ether1394_host_reset, 190 .host_reset = ether1394_host_reset,
208}; 191};
209 192
193static int ether1394_recv_init(struct eth1394_priv *priv)
194{
195 unsigned int iso_buf_size;
196
197 /* FIXME: rawiso limits us to PAGE_SIZE */
198 iso_buf_size = min((unsigned int)PAGE_SIZE,
199 2 * (1U << (priv->host->csr.max_rec + 1)));
200
201 priv->iso = hpsb_iso_recv_init(priv->host,
202 ETHER1394_GASP_BUFFERS * iso_buf_size,
203 ETHER1394_GASP_BUFFERS,
204 priv->broadcast_channel,
205 HPSB_ISO_DMA_PACKET_PER_BUFFER,
206 1, ether1394_iso);
207 if (priv->iso == NULL) {
208 ETH1394_PRINT_G(KERN_ERR, "Failed to allocate IR context\n");
209 priv->bc_state = ETHER1394_BC_ERROR;
210 return -EAGAIN;
211 }
212
213 if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
214 priv->bc_state = ETHER1394_BC_STOPPED;
215 else
216 priv->bc_state = ETHER1394_BC_RUNNING;
217 return 0;
218}
210 219
211/* This is called after an "ifup" */ 220/* This is called after an "ifup" */
212static int ether1394_open (struct net_device *dev) 221static int ether1394_open(struct net_device *dev)
213{ 222{
214 struct eth1394_priv *priv = netdev_priv(dev); 223 struct eth1394_priv *priv = netdev_priv(dev);
215 int ret = 0; 224 int ret;
216 225
217 /* Something bad happened, don't even try */
218 if (priv->bc_state == ETHER1394_BC_ERROR) { 226 if (priv->bc_state == ETHER1394_BC_ERROR) {
219 /* we'll try again */ 227 ret = ether1394_recv_init(priv);
220 priv->iso = hpsb_iso_recv_init(priv->host, 228 if (ret)
221 ETHER1394_ISO_BUF_SIZE, 229 return ret;
222 ETHER1394_GASP_BUFFERS,
223 priv->broadcast_channel,
224 HPSB_ISO_DMA_PACKET_PER_BUFFER,
225 1, ether1394_iso);
226 if (priv->iso == NULL) {
227 ETH1394_PRINT(KERN_ERR, dev->name,
228 "Could not allocate isochronous receive "
229 "context for the broadcast channel\n");
230 priv->bc_state = ETHER1394_BC_ERROR;
231 ret = -EAGAIN;
232 } else {
233 if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
234 priv->bc_state = ETHER1394_BC_STOPPED;
235 else
236 priv->bc_state = ETHER1394_BC_RUNNING;
237 }
238 } 230 }
239 231 netif_start_queue(dev);
240 if (ret)
241 return ret;
242
243 netif_start_queue (dev);
244 return 0; 232 return 0;
245} 233}
246 234
247/* This is called after an "ifdown" */ 235/* This is called after an "ifdown" */
248static int ether1394_stop (struct net_device *dev) 236static int ether1394_stop(struct net_device *dev)
249{ 237{
250 netif_stop_queue (dev); 238 netif_stop_queue(dev);
251 return 0; 239 return 0;
252} 240}
253 241
254/* Return statistics to the caller */ 242/* Return statistics to the caller */
255static struct net_device_stats *ether1394_stats (struct net_device *dev) 243static struct net_device_stats *ether1394_stats(struct net_device *dev)
256{ 244{
257 return &(((struct eth1394_priv *)netdev_priv(dev))->stats); 245 return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
258} 246}
259 247
260/* What to do if we timeout. I think a host reset is probably in order, so 248/* FIXME: What to do if we timeout? I think a host reset is probably in order,
261 * that's what we do. Should we increment the stat counters too? */ 249 * so that's what we do. Should we increment the stat counters too? */
262static void ether1394_tx_timeout (struct net_device *dev) 250static void ether1394_tx_timeout(struct net_device *dev)
263{ 251{
264 ETH1394_PRINT (KERN_ERR, dev->name, "Timeout, resetting host %s\n", 252 struct hpsb_host *host =
265 ((struct eth1394_priv *)netdev_priv(dev))->host->driver->name); 253 ((struct eth1394_priv *)netdev_priv(dev))->host;
266 254
267 highlevel_host_reset (((struct eth1394_priv *)netdev_priv(dev))->host); 255 ETH1394_PRINT(KERN_ERR, dev->name, "Timeout, resetting host\n");
256 ether1394_host_reset(host);
257}
268 258
269 netif_wake_queue (dev); 259static inline int ether1394_max_mtu(struct hpsb_host* host)
260{
261 return (1 << (host->csr.max_rec + 1))
262 - sizeof(union eth1394_hdr) - ETHER1394_GASP_OVERHEAD;
270} 263}
271 264
272static int ether1394_change_mtu(struct net_device *dev, int new_mtu) 265static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
273{ 266{
274 struct eth1394_priv *priv = netdev_priv(dev); 267 int max_mtu;
275 268
276 if ((new_mtu < 68) || 269 if (new_mtu < 68)
277 (new_mtu > min(ETH1394_DATA_LEN,
278 (int)((1 << (priv->host->csr.max_rec + 1)) -
279 (sizeof(union eth1394_hdr) +
280 ETHER1394_GASP_OVERHEAD)))))
281 return -EINVAL; 270 return -EINVAL;
271
272 max_mtu = ether1394_max_mtu(
273 ((struct eth1394_priv *)netdev_priv(dev))->host);
274 if (new_mtu > max_mtu) {
275 ETH1394_PRINT(KERN_INFO, dev->name,
276 "Local node constrains MTU to %d\n", max_mtu);
277 return -ERANGE;
278 }
279
282 dev->mtu = new_mtu; 280 dev->mtu = new_mtu;
283 return 0; 281 return 0;
284} 282}
285 283
286static void purge_partial_datagram(struct list_head *old) 284static void purge_partial_datagram(struct list_head *old)
287{ 285{
288 struct partial_datagram *pd = list_entry(old, struct partial_datagram, list); 286 struct partial_datagram *pd;
289 struct list_head *lh, *n; 287 struct list_head *lh, *n;
288 struct fragment_info *fi;
289
290 pd = list_entry(old, struct partial_datagram, list);
290 291
291 list_for_each_safe(lh, n, &pd->frag_info) { 292 list_for_each_safe(lh, n, &pd->frag_info) {
292 struct fragment_info *fi = list_entry(lh, struct fragment_info, list); 293 fi = list_entry(lh, struct fragment_info, list);
293 list_del(lh); 294 list_del(lh);
294 kfree(fi); 295 kfree(fi);
295 } 296 }
@@ -330,35 +331,26 @@ static struct eth1394_node_ref *eth1394_find_node_nodeid(struct list_head *inl,
330 nodeid_t nodeid) 331 nodeid_t nodeid)
331{ 332{
332 struct eth1394_node_ref *node; 333 struct eth1394_node_ref *node;
333 list_for_each_entry(node, inl, list) { 334
335 list_for_each_entry(node, inl, list)
334 if (node->ud->ne->nodeid == nodeid) 336 if (node->ud->ne->nodeid == nodeid)
335 return node; 337 return node;
336 }
337 338
338 return NULL; 339 return NULL;
339} 340}
340 341
341static int eth1394_probe(struct device *dev) 342static int eth1394_new_node(struct eth1394_host_info *hi,
343 struct unit_directory *ud)
342{ 344{
343 struct unit_directory *ud;
344 struct eth1394_host_info *hi;
345 struct eth1394_priv *priv; 345 struct eth1394_priv *priv;
346 struct eth1394_node_ref *new_node; 346 struct eth1394_node_ref *new_node;
347 struct eth1394_node_info *node_info; 347 struct eth1394_node_info *node_info;
348 348
349 ud = container_of(dev, struct unit_directory, device); 349 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
350
351 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
352 if (!hi)
353 return -ENOENT;
354
355 new_node = kmalloc(sizeof(*new_node),
356 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
357 if (!new_node) 350 if (!new_node)
358 return -ENOMEM; 351 return -ENOMEM;
359 352
360 node_info = kmalloc(sizeof(*node_info), 353 node_info = kmalloc(sizeof(*node_info), GFP_KERNEL);
361 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
362 if (!node_info) { 354 if (!node_info) {
363 kfree(new_node); 355 kfree(new_node);
364 return -ENOMEM; 356 return -ENOMEM;
@@ -374,10 +366,22 @@ static int eth1394_probe(struct device *dev)
374 366
375 priv = netdev_priv(hi->dev); 367 priv = netdev_priv(hi->dev);
376 list_add_tail(&new_node->list, &priv->ip_node_list); 368 list_add_tail(&new_node->list, &priv->ip_node_list);
377
378 return 0; 369 return 0;
379} 370}
380 371
372static int eth1394_probe(struct device *dev)
373{
374 struct unit_directory *ud;
375 struct eth1394_host_info *hi;
376
377 ud = container_of(dev, struct unit_directory, device);
378 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
379 if (!hi)
380 return -ENOENT;
381
382 return eth1394_new_node(hi, ud);
383}
384
381static int eth1394_remove(struct device *dev) 385static int eth1394_remove(struct device *dev)
382{ 386{
383 struct unit_directory *ud; 387 struct unit_directory *ud;
@@ -396,24 +400,23 @@ static int eth1394_remove(struct device *dev)
396 priv = netdev_priv(hi->dev); 400 priv = netdev_priv(hi->dev);
397 401
398 old_node = eth1394_find_node(&priv->ip_node_list, ud); 402 old_node = eth1394_find_node(&priv->ip_node_list, ud);
403 if (!old_node)
404 return 0;
399 405
400 if (old_node) { 406 list_del(&old_node->list);
401 list_del(&old_node->list); 407 kfree(old_node);
402 kfree(old_node);
403 408
404 node_info = (struct eth1394_node_info*)ud->device.driver_data; 409 node_info = (struct eth1394_node_info*)ud->device.driver_data;
405 410
406 spin_lock_irqsave(&node_info->pdg.lock, flags); 411 spin_lock_irqsave(&node_info->pdg.lock, flags);
407 /* The partial datagram list should be empty, but we'll just 412 /* The partial datagram list should be empty, but we'll just
408 * make sure anyway... */ 413 * make sure anyway... */
409 list_for_each_safe(lh, n, &node_info->pdg.list) { 414 list_for_each_safe(lh, n, &node_info->pdg.list)
410 purge_partial_datagram(lh); 415 purge_partial_datagram(lh);
411 } 416 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
412 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
413 417
414 kfree(node_info); 418 kfree(node_info);
415 ud->device.driver_data = NULL; 419 ud->device.driver_data = NULL;
416 }
417 return 0; 420 return 0;
418} 421}
419 422
@@ -422,44 +425,19 @@ static int eth1394_update(struct unit_directory *ud)
422 struct eth1394_host_info *hi; 425 struct eth1394_host_info *hi;
423 struct eth1394_priv *priv; 426 struct eth1394_priv *priv;
424 struct eth1394_node_ref *node; 427 struct eth1394_node_ref *node;
425 struct eth1394_node_info *node_info;
426 428
427 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host); 429 hi = hpsb_get_hostinfo(&eth1394_highlevel, ud->ne->host);
428 if (!hi) 430 if (!hi)
429 return -ENOENT; 431 return -ENOENT;
430 432
431 priv = netdev_priv(hi->dev); 433 priv = netdev_priv(hi->dev);
432
433 node = eth1394_find_node(&priv->ip_node_list, ud); 434 node = eth1394_find_node(&priv->ip_node_list, ud);
435 if (node)
436 return 0;
434 437
435 if (!node) { 438 return eth1394_new_node(hi, ud);
436 node = kmalloc(sizeof(*node),
437 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
438 if (!node)
439 return -ENOMEM;
440
441 node_info = kmalloc(sizeof(*node_info),
442 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
443 if (!node_info) {
444 kfree(node);
445 return -ENOMEM;
446 }
447
448 spin_lock_init(&node_info->pdg.lock);
449 INIT_LIST_HEAD(&node_info->pdg.list);
450 node_info->pdg.sz = 0;
451
452 ud->device.driver_data = node_info;
453 node->ud = ud;
454
455 priv = netdev_priv(hi->dev);
456 list_add_tail(&node->list, &priv->ip_node_list);
457 }
458
459 return 0;
460} 439}
461 440
462
463static struct ieee1394_device_id eth1394_id_table[] = { 441static struct ieee1394_device_id eth1394_id_table[] = {
464 { 442 {
465 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | 443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
@@ -473,7 +451,7 @@ static struct ieee1394_device_id eth1394_id_table[] = {
473MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table); 451MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
474 452
475static struct hpsb_protocol_driver eth1394_proto_driver = { 453static struct hpsb_protocol_driver eth1394_proto_driver = {
476 .name = ETH1394_DRIVER_NAME, 454 .name = driver_name,
477 .id_table = eth1394_id_table, 455 .id_table = eth1394_id_table,
478 .update = eth1394_update, 456 .update = eth1394_update,
479 .driver = { 457 .driver = {
@@ -482,47 +460,50 @@ static struct hpsb_protocol_driver eth1394_proto_driver = {
482 }, 460 },
483}; 461};
484 462
485 463static void ether1394_reset_priv(struct net_device *dev, int set_mtu)
486static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
487{ 464{
488 unsigned long flags; 465 unsigned long flags;
489 int i; 466 int i;
490 struct eth1394_priv *priv = netdev_priv(dev); 467 struct eth1394_priv *priv = netdev_priv(dev);
491 struct hpsb_host *host = priv->host; 468 struct hpsb_host *host = priv->host;
492 u64 guid = get_unaligned((u64*)&(host->csr.rom->bus_info_data[3])); 469 u64 guid = get_unaligned((u64 *)&(host->csr.rom->bus_info_data[3]));
493 u16 maxpayload = 1 << (host->csr.max_rec + 1);
494 int max_speed = IEEE1394_SPEED_MAX; 470 int max_speed = IEEE1394_SPEED_MAX;
495 471
496 spin_lock_irqsave (&priv->lock, flags); 472 spin_lock_irqsave(&priv->lock, flags);
497 473
498 memset(priv->ud_list, 0, sizeof(struct node_entry*) * ALL_NODES); 474 memset(priv->ud_list, 0, sizeof(priv->ud_list));
499 priv->bc_maxpayload = 512; 475 priv->bc_maxpayload = 512;
500 476
501 /* Determine speed limit */ 477 /* Determine speed limit */
502 for (i = 0; i < host->node_count; i++) 478 /* FIXME: This is broken for nodes with link speed < PHY speed,
479 * and it is suboptimal for S200B...S800B hardware.
480 * The result of nodemgr's speed probe should be used somehow. */
481 for (i = 0; i < host->node_count; i++) {
482 /* take care of S100B...S400B PHY ports */
483 if (host->speed[i] == SELFID_SPEED_UNKNOWN) {
484 max_speed = IEEE1394_SPEED_100;
485 break;
486 }
503 if (max_speed > host->speed[i]) 487 if (max_speed > host->speed[i])
504 max_speed = host->speed[i]; 488 max_speed = host->speed[i];
489 }
505 priv->bc_sspd = max_speed; 490 priv->bc_sspd = max_speed;
506 491
507 /* We'll use our maxpayload as the default mtu */
508 if (set_mtu) { 492 if (set_mtu) {
509 dev->mtu = min(ETH1394_DATA_LEN, 493 /* Use the RFC 2734 default 1500 octets or the maximum payload
510 (int)(maxpayload - 494 * as initial MTU */
511 (sizeof(union eth1394_hdr) + 495 dev->mtu = min(1500, ether1394_max_mtu(host));
512 ETHER1394_GASP_OVERHEAD)));
513 496
514 /* Set our hardware address while we're at it */ 497 /* Set our hardware address while we're at it */
515 memcpy(dev->dev_addr, &guid, sizeof(u64)); 498 memcpy(dev->dev_addr, &guid, sizeof(u64));
516 memset(dev->broadcast, 0xff, sizeof(u64)); 499 memset(dev->broadcast, 0xff, sizeof(u64));
517 } 500 }
518 501
519 spin_unlock_irqrestore (&priv->lock, flags); 502 spin_unlock_irqrestore(&priv->lock, flags);
520} 503}
521 504
522/* This function is called right before register_netdev */ 505static void ether1394_init_dev(struct net_device *dev)
523static void ether1394_init_dev (struct net_device *dev)
524{ 506{
525 /* Our functions */
526 dev->open = ether1394_open; 507 dev->open = ether1394_open;
527 dev->stop = ether1394_stop; 508 dev->stop = ether1394_stop;
528 dev->hard_start_xmit = ether1394_tx; 509 dev->hard_start_xmit = ether1394_tx;
@@ -535,10 +516,9 @@ static void ether1394_init_dev (struct net_device *dev)
535 dev->hard_header_cache = ether1394_header_cache; 516 dev->hard_header_cache = ether1394_header_cache;
536 dev->header_cache_update= ether1394_header_cache_update; 517 dev->header_cache_update= ether1394_header_cache_update;
537 dev->hard_header_parse = ether1394_header_parse; 518 dev->hard_header_parse = ether1394_header_parse;
538 dev->set_mac_address = ether1394_mac_addr; 519
539 SET_ETHTOOL_OPS(dev, &ethtool_ops); 520 SET_ETHTOOL_OPS(dev, &ethtool_ops);
540 521
541 /* Some constants */
542 dev->watchdog_timeo = ETHER1394_TIMEOUT; 522 dev->watchdog_timeo = ETHER1394_TIMEOUT;
543 dev->flags = IFF_BROADCAST | IFF_MULTICAST; 523 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
544 dev->features = NETIF_F_HIGHDMA; 524 dev->features = NETIF_F_HIGHDMA;
@@ -546,7 +526,8 @@ static void ether1394_init_dev (struct net_device *dev)
546 dev->hard_header_len = ETH1394_HLEN; 526 dev->hard_header_len = ETH1394_HLEN;
547 dev->type = ARPHRD_IEEE1394; 527 dev->type = ARPHRD_IEEE1394;
548 528
549 ether1394_reset_priv (dev, 1); 529 /* FIXME: This value was copied from ether_setup(). Is it too much? */
530 dev->tx_queue_len = 1000;
550} 531}
551 532
552/* 533/*
@@ -554,34 +535,33 @@ static void ether1394_init_dev (struct net_device *dev)
554 * when the module is installed. This is where we add all of our ethernet 535 * when the module is installed. This is where we add all of our ethernet
555 * devices. One for each host. 536 * devices. One for each host.
556 */ 537 */
557static void ether1394_add_host (struct hpsb_host *host) 538static void ether1394_add_host(struct hpsb_host *host)
558{ 539{
559 struct eth1394_host_info *hi = NULL; 540 struct eth1394_host_info *hi = NULL;
560 struct net_device *dev = NULL; 541 struct net_device *dev = NULL;
561 struct eth1394_priv *priv; 542 struct eth1394_priv *priv;
562 u64 fifo_addr; 543 u64 fifo_addr;
563 544
564 if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394)) 545 if (hpsb_config_rom_ip1394_add(host) != 0) {
546 ETH1394_PRINT_G(KERN_ERR, "Can't add IP-over-1394 ROM entry\n");
565 return; 547 return;
548 }
566 549
567 fifo_addr = hpsb_allocate_and_register_addrspace( 550 fifo_addr = hpsb_allocate_and_register_addrspace(
568 &eth1394_highlevel, host, &addr_ops, 551 &eth1394_highlevel, host, &addr_ops,
569 ETHER1394_REGION_ADDR_LEN, ETHER1394_REGION_ADDR_LEN, 552 ETHER1394_REGION_ADDR_LEN, ETHER1394_REGION_ADDR_LEN,
570 CSR1212_INVALID_ADDR_SPACE, CSR1212_INVALID_ADDR_SPACE); 553 CSR1212_INVALID_ADDR_SPACE, CSR1212_INVALID_ADDR_SPACE);
571 if (fifo_addr == CSR1212_INVALID_ADDR_SPACE) 554 if (fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
572 goto out; 555 ETH1394_PRINT_G(KERN_ERR, "Cannot register CSR space\n");
573 556 hpsb_config_rom_ip1394_remove(host);
574 /* We should really have our own alloc_hpsbdev() function in 557 return;
575 * net_init.c instead of calling the one for ethernet then hijacking 558 }
576 * it for ourselves. That way we'd be a real networking device. */
577 dev = alloc_etherdev(sizeof (struct eth1394_priv));
578 559
560 dev = alloc_netdev(sizeof(*priv), "eth%d", ether1394_init_dev);
579 if (dev == NULL) { 561 if (dev == NULL) {
580 ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate " 562 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
581 "etherdevice for IEEE 1394 device %s-%d\n",
582 host->driver->name, host->id);
583 goto out; 563 goto out;
584 } 564 }
585 565
586 SET_MODULE_OWNER(dev); 566 SET_MODULE_OWNER(dev);
587#if 0 567#if 0
@@ -590,31 +570,26 @@ static void ether1394_add_host (struct hpsb_host *host)
590#endif 570#endif
591 571
592 priv = netdev_priv(dev); 572 priv = netdev_priv(dev);
593
594 INIT_LIST_HEAD(&priv->ip_node_list); 573 INIT_LIST_HEAD(&priv->ip_node_list);
595
596 spin_lock_init(&priv->lock); 574 spin_lock_init(&priv->lock);
597 priv->host = host; 575 priv->host = host;
598 priv->local_fifo = fifo_addr; 576 priv->local_fifo = fifo_addr;
599 577
600 hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi)); 578 hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
601
602 if (hi == NULL) { 579 if (hi == NULL) {
603 ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create " 580 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
604 "hostinfo for IEEE 1394 device %s-%d\n",
605 host->driver->name, host->id);
606 goto out; 581 goto out;
607 } 582 }
608 583
609 ether1394_init_dev(dev); 584 ether1394_reset_priv(dev, 1);
610 585
611 if (register_netdev (dev)) { 586 if (register_netdev(dev)) {
612 ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n"); 587 ETH1394_PRINT_G(KERN_ERR, "Cannot register the driver\n");
613 goto out; 588 goto out;
614 } 589 }
615 590
616 ETH1394_PRINT (KERN_INFO, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet (fw-host%d)\n", 591 ETH1394_PRINT(KERN_INFO, dev->name, "IPv4 over IEEE 1394 (fw-host%d)\n",
617 host->id); 592 host->id);
618 593
619 hi->host = host; 594 hi->host = host;
620 hi->dev = dev; 595 hi->dev = dev;
@@ -623,61 +598,37 @@ static void ether1394_add_host (struct hpsb_host *host)
623 * be checked when the eth device is opened. */ 598 * be checked when the eth device is opened. */
624 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f; 599 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
625 600
626 priv->iso = hpsb_iso_recv_init(host, 601 ether1394_recv_init(priv);
627 ETHER1394_ISO_BUF_SIZE,
628 ETHER1394_GASP_BUFFERS,
629 priv->broadcast_channel,
630 HPSB_ISO_DMA_PACKET_PER_BUFFER,
631 1, ether1394_iso);
632 if (priv->iso == NULL) {
633 ETH1394_PRINT(KERN_ERR, dev->name,
634 "Could not allocate isochronous receive context "
635 "for the broadcast channel\n");
636 priv->bc_state = ETHER1394_BC_ERROR;
637 } else {
638 if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0)
639 priv->bc_state = ETHER1394_BC_STOPPED;
640 else
641 priv->bc_state = ETHER1394_BC_RUNNING;
642 }
643
644 return; 602 return;
645
646out: 603out:
647 if (dev != NULL) 604 if (dev)
648 free_netdev(dev); 605 free_netdev(dev);
649 if (hi) 606 if (hi)
650 hpsb_destroy_hostinfo(&eth1394_highlevel, host); 607 hpsb_destroy_hostinfo(&eth1394_highlevel, host);
651 608 hpsb_unregister_addrspace(&eth1394_highlevel, host, fifo_addr);
652 return; 609 hpsb_config_rom_ip1394_remove(host);
653} 610}
654 611
655/* Remove a card from our list */ 612/* Remove a card from our list */
656static void ether1394_remove_host (struct hpsb_host *host) 613static void ether1394_remove_host(struct hpsb_host *host)
657{ 614{
658 struct eth1394_host_info *hi; 615 struct eth1394_host_info *hi;
616 struct eth1394_priv *priv;
659 617
660 hi = hpsb_get_hostinfo(&eth1394_highlevel, host); 618 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
661 if (hi != NULL) { 619 if (!hi)
662 struct eth1394_priv *priv = netdev_priv(hi->dev); 620 return;
663 621 priv = netdev_priv(hi->dev);
664 hpsb_unregister_addrspace(&eth1394_highlevel, host, 622 hpsb_unregister_addrspace(&eth1394_highlevel, host, priv->local_fifo);
665 priv->local_fifo); 623 hpsb_config_rom_ip1394_remove(host);
666 624 if (priv->iso)
667 if (priv->iso != NULL) 625 hpsb_iso_shutdown(priv->iso);
668 hpsb_iso_shutdown(priv->iso); 626 unregister_netdev(hi->dev);
669 627 free_netdev(hi->dev);
670 if (hi->dev) {
671 unregister_netdev (hi->dev);
672 free_netdev(hi->dev);
673 }
674 }
675
676 return;
677} 628}
678 629
679/* A reset has just arisen */ 630/* A bus reset happened */
680static void ether1394_host_reset (struct hpsb_host *host) 631static void ether1394_host_reset(struct hpsb_host *host)
681{ 632{
682 struct eth1394_host_info *hi; 633 struct eth1394_host_info *hi;
683 struct eth1394_priv *priv; 634 struct eth1394_priv *priv;
@@ -690,24 +641,23 @@ static void ether1394_host_reset (struct hpsb_host *host)
690 hi = hpsb_get_hostinfo(&eth1394_highlevel, host); 641 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
691 642
692 /* This can happen for hosts that we don't use */ 643 /* This can happen for hosts that we don't use */
693 if (hi == NULL) 644 if (!hi)
694 return; 645 return;
695 646
696 dev = hi->dev; 647 dev = hi->dev;
697 priv = (struct eth1394_priv *)netdev_priv(dev); 648 priv = netdev_priv(dev);
698 649
699 /* Reset our private host data, but not our mtu */ 650 /* Reset our private host data, but not our MTU */
700 netif_stop_queue (dev); 651 netif_stop_queue(dev);
701 ether1394_reset_priv (dev, 0); 652 ether1394_reset_priv(dev, 0);
702 653
703 list_for_each_entry(node, &priv->ip_node_list, list) { 654 list_for_each_entry(node, &priv->ip_node_list, list) {
704 node_info = (struct eth1394_node_info*)node->ud->device.driver_data; 655 node_info = node->ud->device.driver_data;
705 656
706 spin_lock_irqsave(&node_info->pdg.lock, flags); 657 spin_lock_irqsave(&node_info->pdg.lock, flags);
707 658
708 list_for_each_safe(lh, n, &node_info->pdg.list) { 659 list_for_each_safe(lh, n, &node_info->pdg.list)
709 purge_partial_datagram(lh); 660 purge_partial_datagram(lh);
710 }
711 661
712 INIT_LIST_HEAD(&(node_info->pdg.list)); 662 INIT_LIST_HEAD(&(node_info->pdg.list));
713 node_info->pdg.sz = 0; 663 node_info->pdg.sz = 0;
@@ -715,7 +665,7 @@ static void ether1394_host_reset (struct hpsb_host *host)
715 spin_unlock_irqrestore(&node_info->pdg.lock, flags); 665 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
716 } 666 }
717 667
718 netif_wake_queue (dev); 668 netif_wake_queue(dev);
719} 669}
720 670
721/****************************************** 671/******************************************
@@ -723,7 +673,6 @@ static void ether1394_host_reset (struct hpsb_host *host)
723 ******************************************/ 673 ******************************************/
724/* These functions have been adapted from net/ethernet/eth.c */ 674/* These functions have been adapted from net/ethernet/eth.c */
725 675
726
727/* Create a fake MAC header for an arbitrary protocol layer. 676/* Create a fake MAC header for an arbitrary protocol layer.
728 * saddr=NULL means use device source address 677 * saddr=NULL means use device source address
729 * daddr=NULL means leave destination address (eg unresolved arp). */ 678 * daddr=NULL means leave destination address (eg unresolved arp). */
@@ -731,25 +680,24 @@ static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
731 unsigned short type, void *daddr, void *saddr, 680 unsigned short type, void *daddr, void *saddr,
732 unsigned len) 681 unsigned len)
733{ 682{
734 struct eth1394hdr *eth = (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN); 683 struct eth1394hdr *eth =
684 (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN);
735 685
736 eth->h_proto = htons(type); 686 eth->h_proto = htons(type);
737 687
738 if (dev->flags & (IFF_LOOPBACK|IFF_NOARP)) { 688 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
739 memset(eth->h_dest, 0, dev->addr_len); 689 memset(eth->h_dest, 0, dev->addr_len);
740 return(dev->hard_header_len); 690 return dev->hard_header_len;
741 } 691 }
742 692
743 if (daddr) { 693 if (daddr) {
744 memcpy(eth->h_dest,daddr,dev->addr_len); 694 memcpy(eth->h_dest, daddr, dev->addr_len);
745 return dev->hard_header_len; 695 return dev->hard_header_len;
746 } 696 }
747 697
748 return -dev->hard_header_len; 698 return -dev->hard_header_len;
749
750} 699}
751 700
752
753/* Rebuild the faked MAC header. This is called after an ARP 701/* Rebuild the faked MAC header. This is called after an ARP
754 * (or in future other address resolution) has completed on this 702 * (or in future other address resolution) has completed on this
755 * sk_buff. We now let ARP fill in the other fields. 703 * sk_buff. We now let ARP fill in the other fields.
@@ -760,38 +708,30 @@ static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
760static int ether1394_rebuild_header(struct sk_buff *skb) 708static int ether1394_rebuild_header(struct sk_buff *skb)
761{ 709{
762 struct eth1394hdr *eth = (struct eth1394hdr *)skb->data; 710 struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
763 struct net_device *dev = skb->dev;
764 711
765 switch (eth->h_proto) { 712 if (eth->h_proto == htons(ETH_P_IP))
766 713 return arp_find((unsigned char *)&eth->h_dest, skb);
767#ifdef CONFIG_INET
768 case __constant_htons(ETH_P_IP):
769 return arp_find((unsigned char*)&eth->h_dest, skb);
770#endif
771 default:
772 ETH1394_PRINT(KERN_DEBUG, dev->name,
773 "unable to resolve type %04x addresses.\n",
774 ntohs(eth->h_proto));
775 break;
776 }
777 714
715 ETH1394_PRINT(KERN_DEBUG, skb->dev->name,
716 "unable to resolve type %04x addresses\n",
717 ntohs(eth->h_proto));
778 return 0; 718 return 0;
779} 719}
780 720
781static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr) 721static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr)
782{ 722{
783 struct net_device *dev = skb->dev; 723 struct net_device *dev = skb->dev;
724
784 memcpy(haddr, dev->dev_addr, ETH1394_ALEN); 725 memcpy(haddr, dev->dev_addr, ETH1394_ALEN);
785 return ETH1394_ALEN; 726 return ETH1394_ALEN;
786} 727}
787 728
788
789static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh) 729static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh)
790{ 730{
791 unsigned short type = hh->hh_type; 731 unsigned short type = hh->hh_type;
792 struct eth1394hdr *eth = (struct eth1394hdr*)(((u8*)hh->hh_data) +
793 (16 - ETH1394_HLEN));
794 struct net_device *dev = neigh->dev; 732 struct net_device *dev = neigh->dev;
733 struct eth1394hdr *eth =
734 (struct eth1394hdr *)((u8 *)hh->hh_data + 16 - ETH1394_HLEN);
795 735
796 if (type == htons(ETH_P_802_3)) 736 if (type == htons(ETH_P_802_3))
797 return -1; 737 return -1;
@@ -808,38 +748,25 @@ static void ether1394_header_cache_update(struct hh_cache *hh,
808 struct net_device *dev, 748 struct net_device *dev,
809 unsigned char * haddr) 749 unsigned char * haddr)
810{ 750{
811 memcpy(((u8*)hh->hh_data) + (16 - ETH1394_HLEN), haddr, dev->addr_len); 751 memcpy((u8 *)hh->hh_data + 16 - ETH1394_HLEN, haddr, dev->addr_len);
812} 752}
813 753
814static int ether1394_mac_addr(struct net_device *dev, void *p)
815{
816 if (netif_running(dev))
817 return -EBUSY;
818
819 /* Not going to allow setting the MAC address, we really need to use
820 * the real one supplied by the hardware */
821 return -EINVAL;
822 }
823
824
825
826/****************************************** 754/******************************************
827 * Datagram reception code 755 * Datagram reception code
828 ******************************************/ 756 ******************************************/
829 757
830/* Copied from net/ethernet/eth.c */ 758/* Copied from net/ethernet/eth.c */
831static inline u16 ether1394_type_trans(struct sk_buff *skb, 759static u16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
832 struct net_device *dev)
833{ 760{
834 struct eth1394hdr *eth; 761 struct eth1394hdr *eth;
835 unsigned char *rawp; 762 unsigned char *rawp;
836 763
837 skb_reset_mac_header(skb); 764 skb_reset_mac_header(skb);
838 skb_pull (skb, ETH1394_HLEN); 765 skb_pull(skb, ETH1394_HLEN);
839 eth = eth1394_hdr(skb); 766 eth = eth1394_hdr(skb);
840 767
841 if (*eth->h_dest & 1) { 768 if (*eth->h_dest & 1) {
842 if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0) 769 if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len) == 0)
843 skb->pkt_type = PACKET_BROADCAST; 770 skb->pkt_type = PACKET_BROADCAST;
844#if 0 771#if 0
845 else 772 else
@@ -848,47 +775,45 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb,
848 } else { 775 } else {
849 if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len)) 776 if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
850 skb->pkt_type = PACKET_OTHERHOST; 777 skb->pkt_type = PACKET_OTHERHOST;
851 } 778 }
852 779
853 if (ntohs (eth->h_proto) >= 1536) 780 if (ntohs(eth->h_proto) >= 1536)
854 return eth->h_proto; 781 return eth->h_proto;
855 782
856 rawp = skb->data; 783 rawp = skb->data;
857 784
858 if (*(unsigned short *)rawp == 0xFFFF) 785 if (*(unsigned short *)rawp == 0xFFFF)
859 return htons (ETH_P_802_3); 786 return htons(ETH_P_802_3);
860 787
861 return htons (ETH_P_802_2); 788 return htons(ETH_P_802_2);
862} 789}
863 790
864/* Parse an encapsulated IP1394 header into an ethernet frame packet. 791/* Parse an encapsulated IP1394 header into an ethernet frame packet.
865 * We also perform ARP translation here, if need be. */ 792 * We also perform ARP translation here, if need be. */
866static inline u16 ether1394_parse_encap(struct sk_buff *skb, 793static u16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
867 struct net_device *dev, 794 nodeid_t srcid, nodeid_t destid,
868 nodeid_t srcid, nodeid_t destid, 795 u16 ether_type)
869 u16 ether_type)
870{ 796{
871 struct eth1394_priv *priv = netdev_priv(dev); 797 struct eth1394_priv *priv = netdev_priv(dev);
872 u64 dest_hw; 798 u64 dest_hw;
873 unsigned short ret = 0; 799 unsigned short ret = 0;
874 800
875 /* Setup our hw addresses. We use these to build the 801 /* Setup our hw addresses. We use these to build the ethernet header. */
876 * ethernet header. */
877 if (destid == (LOCAL_BUS | ALL_NODES)) 802 if (destid == (LOCAL_BUS | ALL_NODES))
878 dest_hw = ~0ULL; /* broadcast */ 803 dest_hw = ~0ULL; /* broadcast */
879 else 804 else
880 dest_hw = cpu_to_be64((((u64)priv->host->csr.guid_hi) << 32) | 805 dest_hw = cpu_to_be64((u64)priv->host->csr.guid_hi << 32 |
881 priv->host->csr.guid_lo); 806 priv->host->csr.guid_lo);
882 807
883 /* If this is an ARP packet, convert it. First, we want to make 808 /* If this is an ARP packet, convert it. First, we want to make
884 * use of some of the fields, since they tell us a little bit 809 * use of some of the fields, since they tell us a little bit
885 * about the sending machine. */ 810 * about the sending machine. */
886 if (ether_type == htons(ETH_P_ARP)) { 811 if (ether_type == htons(ETH_P_ARP)) {
887 struct eth1394_arp *arp1394 = (struct eth1394_arp*)skb->data; 812 struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
888 struct arphdr *arp = (struct arphdr *)skb->data; 813 struct arphdr *arp = (struct arphdr *)skb->data;
889 unsigned char *arp_ptr = (unsigned char *)(arp + 1); 814 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
890 u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 | 815 u64 fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
891 ntohl(arp1394->fifo_lo); 816 ntohl(arp1394->fifo_lo);
892 u8 max_rec = min(priv->host->csr.max_rec, 817 u8 max_rec = min(priv->host->csr.max_rec,
893 (u8)(arp1394->max_rec)); 818 (u8)(arp1394->max_rec));
894 int sspd = arp1394->sspd; 819 int sspd = arp1394->sspd;
@@ -902,16 +827,17 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
902 if (sspd > 5 || sspd < 0) 827 if (sspd > 5 || sspd < 0)
903 sspd = 0; 828 sspd = 0;
904 829
905 maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1))); 830 maxpayload = min(eth1394_speedto_maxpayload[sspd],
831 (u16)(1 << (max_rec + 1)));
906 832
907 guid = get_unaligned(&arp1394->s_uniq_id); 833 guid = get_unaligned(&arp1394->s_uniq_id);
908 node = eth1394_find_node_guid(&priv->ip_node_list, 834 node = eth1394_find_node_guid(&priv->ip_node_list,
909 be64_to_cpu(guid)); 835 be64_to_cpu(guid));
910 if (!node) { 836 if (!node)
911 return 0; 837 return 0;
912 }
913 838
914 node_info = (struct eth1394_node_info*)node->ud->device.driver_data; 839 node_info =
840 (struct eth1394_node_info *)node->ud->device.driver_data;
915 841
916 /* Update our speed/payload/fifo_offset table */ 842 /* Update our speed/payload/fifo_offset table */
917 node_info->maxpayload = maxpayload; 843 node_info->maxpayload = maxpayload;
@@ -930,7 +856,7 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
930 856
931 arp->ar_hln = 8; 857 arp->ar_hln = 8;
932 arp_ptr += arp->ar_hln; /* skip over sender unique id */ 858 arp_ptr += arp->ar_hln; /* skip over sender unique id */
933 *(u32*)arp_ptr = arp1394->sip; /* move sender IP addr */ 859 *(u32 *)arp_ptr = arp1394->sip; /* move sender IP addr */
934 arp_ptr += arp->ar_pln; /* skip over sender IP addr */ 860 arp_ptr += arp->ar_pln; /* skip over sender IP addr */
935 861
936 if (arp->ar_op == htons(ARPOP_REQUEST)) 862 if (arp->ar_op == htons(ARPOP_REQUEST))
@@ -947,65 +873,65 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
947 return ret; 873 return ret;
948} 874}
949 875
950static inline int fragment_overlap(struct list_head *frag_list, int offset, int len) 876static int fragment_overlap(struct list_head *frag_list, int offset, int len)
951{ 877{
952 struct fragment_info *fi; 878 struct fragment_info *fi;
879 int end = offset + len;
953 880
954 list_for_each_entry(fi, frag_list, list) { 881 list_for_each_entry(fi, frag_list, list)
955 if ( ! ((offset > (fi->offset + fi->len - 1)) || 882 if (offset < fi->offset + fi->len && end > fi->offset)
956 ((offset + len - 1) < fi->offset)))
957 return 1; 883 return 1;
958 } 884
959 return 0; 885 return 0;
960} 886}
961 887
962static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl) 888static struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
963{ 889{
964 struct partial_datagram *pd; 890 struct partial_datagram *pd;
965 891
966 list_for_each_entry(pd, pdgl, list) { 892 list_for_each_entry(pd, pdgl, list)
967 if (pd->dgl == dgl) 893 if (pd->dgl == dgl)
968 return &pd->list; 894 return &pd->list;
969 } 895
970 return NULL; 896 return NULL;
971} 897}
972 898
973/* Assumes that new fragment does not overlap any existing fragments */ 899/* Assumes that new fragment does not overlap any existing fragments */
974static inline int new_fragment(struct list_head *frag_info, int offset, int len) 900static int new_fragment(struct list_head *frag_info, int offset, int len)
975{ 901{
976 struct list_head *lh; 902 struct list_head *lh;
977 struct fragment_info *fi, *fi2, *new; 903 struct fragment_info *fi, *fi2, *new;
978 904
979 list_for_each(lh, frag_info) { 905 list_for_each(lh, frag_info) {
980 fi = list_entry(lh, struct fragment_info, list); 906 fi = list_entry(lh, struct fragment_info, list);
981 if ((fi->offset + fi->len) == offset) { 907 if (fi->offset + fi->len == offset) {
982 /* The new fragment can be tacked on to the end */ 908 /* The new fragment can be tacked on to the end */
983 fi->len += len; 909 fi->len += len;
984 /* Did the new fragment plug a hole? */ 910 /* Did the new fragment plug a hole? */
985 fi2 = list_entry(lh->next, struct fragment_info, list); 911 fi2 = list_entry(lh->next, struct fragment_info, list);
986 if ((fi->offset + fi->len) == fi2->offset) { 912 if (fi->offset + fi->len == fi2->offset) {
987 /* glue fragments together */ 913 /* glue fragments together */
988 fi->len += fi2->len; 914 fi->len += fi2->len;
989 list_del(lh->next); 915 list_del(lh->next);
990 kfree(fi2); 916 kfree(fi2);
991 } 917 }
992 return 0; 918 return 0;
993 } else if ((offset + len) == fi->offset) { 919 } else if (offset + len == fi->offset) {
994 /* The new fragment can be tacked on to the beginning */ 920 /* The new fragment can be tacked on to the beginning */
995 fi->offset = offset; 921 fi->offset = offset;
996 fi->len += len; 922 fi->len += len;
997 /* Did the new fragment plug a hole? */ 923 /* Did the new fragment plug a hole? */
998 fi2 = list_entry(lh->prev, struct fragment_info, list); 924 fi2 = list_entry(lh->prev, struct fragment_info, list);
999 if ((fi2->offset + fi2->len) == fi->offset) { 925 if (fi2->offset + fi2->len == fi->offset) {
1000 /* glue fragments together */ 926 /* glue fragments together */
1001 fi2->len += fi->len; 927 fi2->len += fi->len;
1002 list_del(lh); 928 list_del(lh);
1003 kfree(fi); 929 kfree(fi);
1004 } 930 }
1005 return 0; 931 return 0;
1006 } else if (offset > (fi->offset + fi->len)) { 932 } else if (offset > fi->offset + fi->len) {
1007 break; 933 break;
1008 } else if ((offset + len) < fi->offset) { 934 } else if (offset + len < fi->offset) {
1009 lh = lh->prev; 935 lh = lh->prev;
1010 break; 936 break;
1011 } 937 }
@@ -1019,14 +945,12 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len)
1019 new->len = len; 945 new->len = len;
1020 946
1021 list_add(&new->list, lh); 947 list_add(&new->list, lh);
1022
1023 return 0; 948 return 0;
1024} 949}
1025 950
1026static inline int new_partial_datagram(struct net_device *dev, 951static int new_partial_datagram(struct net_device *dev, struct list_head *pdgl,
1027 struct list_head *pdgl, int dgl, 952 int dgl, int dg_size, char *frag_buf,
1028 int dg_size, char *frag_buf, 953 int frag_off, int frag_len)
1029 int frag_off, int frag_len)
1030{ 954{
1031 struct partial_datagram *new; 955 struct partial_datagram *new;
1032 956
@@ -1059,33 +983,33 @@ static inline int new_partial_datagram(struct net_device *dev,
1059 memcpy(new->pbuf + frag_off, frag_buf, frag_len); 983 memcpy(new->pbuf + frag_off, frag_buf, frag_len);
1060 984
1061 list_add(&new->list, pdgl); 985 list_add(&new->list, pdgl);
1062
1063 return 0; 986 return 0;
1064} 987}
1065 988
1066static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh, 989static int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
1067 char *frag_buf, int frag_off, int frag_len) 990 char *frag_buf, int frag_off, int frag_len)
1068{ 991{
1069 struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list); 992 struct partial_datagram *pd =
993 list_entry(lh, struct partial_datagram, list);
1070 994
1071 if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) { 995 if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0)
1072 return -ENOMEM; 996 return -ENOMEM;
1073 }
1074 997
1075 memcpy(pd->pbuf + frag_off, frag_buf, frag_len); 998 memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
1076 999
1077 /* Move list entry to beginnig of list so that oldest partial 1000 /* Move list entry to beginnig of list so that oldest partial
1078 * datagrams percolate to the end of the list */ 1001 * datagrams percolate to the end of the list */
1079 list_move(lh, pdgl); 1002 list_move(lh, pdgl);
1080
1081 return 0; 1003 return 0;
1082} 1004}
1083 1005
1084static inline int is_datagram_complete(struct list_head *lh, int dg_size) 1006static int is_datagram_complete(struct list_head *lh, int dg_size)
1085{ 1007{
1086 struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list); 1008 struct partial_datagram *pd;
1087 struct fragment_info *fi = list_entry(pd->frag_info.next, 1009 struct fragment_info *fi;
1088 struct fragment_info, list); 1010
1011 pd = list_entry(lh, struct partial_datagram, list);
1012 fi = list_entry(pd->frag_info.next, struct fragment_info, list);
1089 1013
1090 return (fi->len == dg_size); 1014 return (fi->len == dg_size);
1091} 1015}
@@ -1108,7 +1032,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1108 if (!ud) { 1032 if (!ud) {
1109 struct eth1394_node_ref *node; 1033 struct eth1394_node_ref *node;
1110 node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid); 1034 node = eth1394_find_node_nodeid(&priv->ip_node_list, srcid);
1111 if (!node) { 1035 if (unlikely(!node)) {
1112 HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid " 1036 HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
1113 "lookup failure: " NODE_BUS_FMT, 1037 "lookup failure: " NODE_BUS_FMT,
1114 NODE_BUS_ARGS(priv->host, srcid)); 1038 NODE_BUS_ARGS(priv->host, srcid));
@@ -1120,7 +1044,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1120 priv->ud_list[NODEID_TO_NODE(srcid)] = ud; 1044 priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
1121 } 1045 }
1122 1046
1123 node_info = (struct eth1394_node_info*)ud->device.driver_data; 1047 node_info = (struct eth1394_node_info *)ud->device.driver_data;
1124 1048
1125 /* First, did we receive a fragmented or unfragmented datagram? */ 1049 /* First, did we receive a fragmented or unfragmented datagram? */
1126 hdr->words.word1 = ntohs(hdr->words.word1); 1050 hdr->words.word1 = ntohs(hdr->words.word1);
@@ -1133,13 +1057,14 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1133 * high level network layer. */ 1057 * high level network layer. */
1134 1058
1135 skb = dev_alloc_skb(len + dev->hard_header_len + 15); 1059 skb = dev_alloc_skb(len + dev->hard_header_len + 15);
1136 if (!skb) { 1060 if (unlikely(!skb)) {
1137 HPSB_PRINT (KERN_ERR, "ether1394 rx: low on mem\n"); 1061 ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
1138 priv->stats.rx_dropped++; 1062 priv->stats.rx_dropped++;
1139 return -1; 1063 return -1;
1140 } 1064 }
1141 skb_reserve(skb, (dev->hard_header_len + 15) & ~15); 1065 skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
1142 memcpy(skb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len); 1066 memcpy(skb_put(skb, len - hdr_len), buf + hdr_len,
1067 len - hdr_len);
1143 ether_type = hdr->uf.ether_type; 1068 ether_type = hdr->uf.ether_type;
1144 } else { 1069 } else {
1145 /* A datagram fragment has been received, now the fun begins. */ 1070 /* A datagram fragment has been received, now the fun begins. */
@@ -1224,9 +1149,8 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1224 1149
1225 pd = list_entry(lh, struct partial_datagram, list); 1150 pd = list_entry(lh, struct partial_datagram, list);
1226 1151
1227 if (hdr->common.lf == ETH1394_HDR_LF_FF) { 1152 if (hdr->common.lf == ETH1394_HDR_LF_FF)
1228 pd->ether_type = ether_type; 1153 pd->ether_type = ether_type;
1229 }
1230 1154
1231 if (is_datagram_complete(lh, dg_size)) { 1155 if (is_datagram_complete(lh, dg_size)) {
1232 ether_type = pd->ether_type; 1156 ether_type = pd->ether_type;
@@ -1253,8 +1177,8 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1253 skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid, 1177 skb->protocol = ether1394_parse_encap(skb, dev, srcid, destid,
1254 ether_type); 1178 ether_type);
1255 1179
1256
1257 spin_lock_irqsave(&priv->lock, flags); 1180 spin_lock_irqsave(&priv->lock, flags);
1181
1258 if (!skb->protocol) { 1182 if (!skb->protocol) {
1259 priv->stats.rx_errors++; 1183 priv->stats.rx_errors++;
1260 priv->stats.rx_dropped++; 1184 priv->stats.rx_dropped++;
@@ -1288,9 +1212,9 @@ static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
1288 struct eth1394_host_info *hi; 1212 struct eth1394_host_info *hi;
1289 1213
1290 hi = hpsb_get_hostinfo(&eth1394_highlevel, host); 1214 hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
1291 if (hi == NULL) { 1215 if (unlikely(!hi)) {
1292 ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n", 1216 ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
1293 host->driver->name); 1217 host->id);
1294 return RCODE_ADDRESS_ERROR; 1218 return RCODE_ADDRESS_ERROR;
1295 } 1219 }
1296 1220
@@ -1314,9 +1238,9 @@ static void ether1394_iso(struct hpsb_iso *iso)
1314 int nready; 1238 int nready;
1315 1239
1316 hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host); 1240 hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
1317 if (hi == NULL) { 1241 if (unlikely(!hi)) {
1318 ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n", 1242 ETH1394_PRINT_G(KERN_ERR, "No net device at fw-host%d\n",
1319 iso->host->driver->name); 1243 iso->host->id);
1320 return; 1244 return;
1321 } 1245 }
1322 1246
@@ -1326,20 +1250,20 @@ static void ether1394_iso(struct hpsb_iso *iso)
1326 for (i = 0; i < nready; i++) { 1250 for (i = 0; i < nready; i++) {
1327 struct hpsb_iso_packet_info *info = 1251 struct hpsb_iso_packet_info *info =
1328 &iso->infos[(iso->first_packet + i) % iso->buf_packets]; 1252 &iso->infos[(iso->first_packet + i) % iso->buf_packets];
1329 data = (quadlet_t*) (iso->data_buf.kvirt + info->offset); 1253 data = (quadlet_t *)(iso->data_buf.kvirt + info->offset);
1330 1254
1331 /* skip over GASP header */ 1255 /* skip over GASP header */
1332 buf = (char *)data + 8; 1256 buf = (char *)data + 8;
1333 len = info->len - 8; 1257 len = info->len - 8;
1334 1258
1335 specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) | 1259 specifier_id = (be32_to_cpu(data[0]) & 0xffff) << 8 |
1336 ((be32_to_cpu(data[1]) & 0xff000000) >> 24)); 1260 (be32_to_cpu(data[1]) & 0xff000000) >> 24;
1337 source_id = be32_to_cpu(data[0]) >> 16; 1261 source_id = be32_to_cpu(data[0]) >> 16;
1338 1262
1339 priv = netdev_priv(dev); 1263 priv = netdev_priv(dev);
1340 1264
1341 if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) || 1265 if (info->channel != (iso->host->csr.broadcast_channel & 0x3f)
1342 specifier_id != ETHER1394_GASP_SPECIFIER_ID) { 1266 || specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
1343 /* This packet is not for us */ 1267 /* This packet is not for us */
1344 continue; 1268 continue;
1345 } 1269 }
@@ -1367,35 +1291,31 @@ static void ether1394_iso(struct hpsb_iso *iso)
1367 * speed, and unicast FIFO address information between the sender_unique_id 1291 * speed, and unicast FIFO address information between the sender_unique_id
1368 * and the IP addresses. 1292 * and the IP addresses.
1369 */ 1293 */
1370static inline void ether1394_arp_to_1394arp(struct sk_buff *skb, 1294static void ether1394_arp_to_1394arp(struct sk_buff *skb,
1371 struct net_device *dev) 1295 struct net_device *dev)
1372{ 1296{
1373 struct eth1394_priv *priv = netdev_priv(dev); 1297 struct eth1394_priv *priv = netdev_priv(dev);
1374
1375 struct arphdr *arp = (struct arphdr *)skb->data; 1298 struct arphdr *arp = (struct arphdr *)skb->data;
1376 unsigned char *arp_ptr = (unsigned char *)(arp + 1); 1299 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
1377 struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data; 1300 struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
1378 1301
1379 /* Believe it or not, all that need to happen is sender IP get moved
1380 * and set hw_addr_len, max_rec, sspd, fifo_hi and fifo_lo. */
1381 arp1394->hw_addr_len = 16; 1302 arp1394->hw_addr_len = 16;
1382 arp1394->sip = *(u32*)(arp_ptr + ETH1394_ALEN); 1303 arp1394->sip = *(u32*)(arp_ptr + ETH1394_ALEN);
1383 arp1394->max_rec = priv->host->csr.max_rec; 1304 arp1394->max_rec = priv->host->csr.max_rec;
1384 arp1394->sspd = priv->host->csr.lnk_spd; 1305 arp1394->sspd = priv->host->csr.lnk_spd;
1385 arp1394->fifo_hi = htons (priv->local_fifo >> 32); 1306 arp1394->fifo_hi = htons(priv->local_fifo >> 32);
1386 arp1394->fifo_lo = htonl (priv->local_fifo & ~0x0); 1307 arp1394->fifo_lo = htonl(priv->local_fifo & ~0x0);
1387
1388 return;
1389} 1308}
1390 1309
1391/* We need to encapsulate the standard header with our own. We use the 1310/* We need to encapsulate the standard header with our own. We use the
1392 * ethernet header's proto for our own. */ 1311 * ethernet header's proto for our own. */
1393static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload, 1312static unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
1394 __be16 proto, 1313 __be16 proto,
1395 union eth1394_hdr *hdr, 1314 union eth1394_hdr *hdr,
1396 u16 dg_size, u16 dgl) 1315 u16 dg_size, u16 dgl)
1397{ 1316{
1398 unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF]; 1317 unsigned int adj_max_payload =
1318 max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
1399 1319
1400 /* Does it all fit in one packet? */ 1320 /* Does it all fit in one packet? */
1401 if (dg_size <= adj_max_payload) { 1321 if (dg_size <= adj_max_payload) {
@@ -1408,19 +1328,19 @@ static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
1408 hdr->ff.dgl = dgl; 1328 hdr->ff.dgl = dgl;
1409 adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF]; 1329 adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
1410 } 1330 }
1411 return((dg_size + (adj_max_payload - 1)) / adj_max_payload); 1331 return (dg_size + adj_max_payload - 1) / adj_max_payload;
1412} 1332}
1413 1333
1414static inline unsigned int ether1394_encapsulate(struct sk_buff *skb, 1334static unsigned int ether1394_encapsulate(struct sk_buff *skb,
1415 unsigned int max_payload, 1335 unsigned int max_payload,
1416 union eth1394_hdr *hdr) 1336 union eth1394_hdr *hdr)
1417{ 1337{
1418 union eth1394_hdr *bufhdr; 1338 union eth1394_hdr *bufhdr;
1419 int ftype = hdr->common.lf; 1339 int ftype = hdr->common.lf;
1420 int hdrsz = hdr_type_len[ftype]; 1340 int hdrsz = hdr_type_len[ftype];
1421 unsigned int adj_max_payload = max_payload - hdrsz; 1341 unsigned int adj_max_payload = max_payload - hdrsz;
1422 1342
1423 switch(ftype) { 1343 switch (ftype) {
1424 case ETH1394_HDR_LF_UF: 1344 case ETH1394_HDR_LF_UF:
1425 bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz); 1345 bufhdr = (union eth1394_hdr *)skb_push(skb, hdrsz);
1426 bufhdr->words.word1 = htons(hdr->words.word1); 1346 bufhdr->words.word1 = htons(hdr->words.word1);
@@ -1449,11 +1369,10 @@ static inline unsigned int ether1394_encapsulate(struct sk_buff *skb,
1449 bufhdr->words.word3 = htons(hdr->words.word3); 1369 bufhdr->words.word3 = htons(hdr->words.word3);
1450 bufhdr->words.word4 = 0; 1370 bufhdr->words.word4 = 0;
1451 } 1371 }
1452
1453 return min(max_payload, skb->len); 1372 return min(max_payload, skb->len);
1454} 1373}
1455 1374
1456static inline struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host) 1375static struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host *host)
1457{ 1376{
1458 struct hpsb_packet *p; 1377 struct hpsb_packet *p;
1459 1378
@@ -1466,61 +1385,57 @@ static inline struct hpsb_packet *ether1394_alloc_common_packet(struct hpsb_host
1466 return p; 1385 return p;
1467} 1386}
1468 1387
1469static inline int ether1394_prep_write_packet(struct hpsb_packet *p, 1388static int ether1394_prep_write_packet(struct hpsb_packet *p,
1470 struct hpsb_host *host, 1389 struct hpsb_host *host, nodeid_t node,
1471 nodeid_t node, u64 addr, 1390 u64 addr, void *data, int tx_len)
1472 void * data, int tx_len)
1473{ 1391{
1474 p->node_id = node; 1392 p->node_id = node;
1475 p->data = NULL; 1393 p->data = NULL;
1476 1394
1477 p->tcode = TCODE_WRITEB; 1395 p->tcode = TCODE_WRITEB;
1478 p->header[1] = (host->node_id << 16) | (addr >> 32); 1396 p->header[1] = host->node_id << 16 | addr >> 32;
1479 p->header[2] = addr & 0xffffffff; 1397 p->header[2] = addr & 0xffffffff;
1480 1398
1481 p->header_size = 16; 1399 p->header_size = 16;
1482 p->expect_response = 1; 1400 p->expect_response = 1;
1483 1401
1484 if (hpsb_get_tlabel(p)) { 1402 if (hpsb_get_tlabel(p)) {
1485 ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending " 1403 ETH1394_PRINT_G(KERN_ERR, "Out of tlabels\n");
1486 "to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
1487 return -1; 1404 return -1;
1488 } 1405 }
1489 p->header[0] = (p->node_id << 16) | (p->tlabel << 10) 1406 p->header[0] =
1490 | (1 << 8) | (TCODE_WRITEB << 4); 1407 p->node_id << 16 | p->tlabel << 10 | 1 << 8 | TCODE_WRITEB << 4;
1491 1408
1492 p->header[3] = tx_len << 16; 1409 p->header[3] = tx_len << 16;
1493 p->data_size = (tx_len + 3) & ~3; 1410 p->data_size = (tx_len + 3) & ~3;
1494 p->data = (quadlet_t*)data; 1411 p->data = data;
1495 1412
1496 return 0; 1413 return 0;
1497} 1414}
1498 1415
1499static inline void ether1394_prep_gasp_packet(struct hpsb_packet *p, 1416static void ether1394_prep_gasp_packet(struct hpsb_packet *p,
1500 struct eth1394_priv *priv, 1417 struct eth1394_priv *priv,
1501 struct sk_buff *skb, int length) 1418 struct sk_buff *skb, int length)
1502{ 1419{
1503 p->header_size = 4; 1420 p->header_size = 4;
1504 p->tcode = TCODE_STREAM_DATA; 1421 p->tcode = TCODE_STREAM_DATA;
1505 1422
1506 p->header[0] = (length << 16) | (3 << 14) 1423 p->header[0] = length << 16 | 3 << 14 | priv->broadcast_channel << 8 |
1507 | ((priv->broadcast_channel) << 8) 1424 TCODE_STREAM_DATA << 4;
1508 | (TCODE_STREAM_DATA << 4);
1509 p->data_size = length; 1425 p->data_size = length;
1510 p->data = ((quadlet_t*)skb->data) - 2; 1426 p->data = (quadlet_t *)skb->data - 2;
1511 p->data[0] = cpu_to_be32((priv->host->node_id << 16) | 1427 p->data[0] = cpu_to_be32(priv->host->node_id << 16 |
1512 ETHER1394_GASP_SPECIFIER_ID_HI); 1428 ETHER1394_GASP_SPECIFIER_ID_HI);
1513 p->data[1] = cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) | 1429 p->data[1] = cpu_to_be32(ETHER1394_GASP_SPECIFIER_ID_LO << 24 |
1514 ETHER1394_GASP_VERSION); 1430 ETHER1394_GASP_VERSION);
1515 1431
1516 /* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
1517 * prevents hpsb_send_packet() from setting the speed to an arbitrary
1518 * value based on packet->node_id if packet->node_id is not set. */
1519 p->node_id = ALL_NODES;
1520 p->speed_code = priv->bc_sspd; 1432 p->speed_code = priv->bc_sspd;
1433
1434 /* prevent hpsb_send_packet() from overriding our speed code */
1435 p->node_id = LOCAL_BUS | ALL_NODES;
1521} 1436}
1522 1437
1523static inline void ether1394_free_packet(struct hpsb_packet *packet) 1438static void ether1394_free_packet(struct hpsb_packet *packet)
1524{ 1439{
1525 if (packet->tcode != TCODE_STREAM_DATA) 1440 if (packet->tcode != TCODE_STREAM_DATA)
1526 hpsb_free_tlabel(packet); 1441 hpsb_free_tlabel(packet);
@@ -1539,7 +1454,7 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
1539 return -1; 1454 return -1;
1540 1455
1541 if (ptask->tx_type == ETH1394_GASP) { 1456 if (ptask->tx_type == ETH1394_GASP) {
1542 int length = tx_len + (2 * sizeof(quadlet_t)); 1457 int length = tx_len + 2 * sizeof(quadlet_t);
1543 1458
1544 ether1394_prep_gasp_packet(packet, priv, ptask->skb, length); 1459 ether1394_prep_gasp_packet(packet, priv, ptask->skb, length);
1545 } else if (ether1394_prep_write_packet(packet, priv->host, 1460 } else if (ether1394_prep_write_packet(packet, priv->host,
@@ -1562,13 +1477,11 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
1562 return 0; 1477 return 0;
1563} 1478}
1564 1479
1565
1566/* Task function to be run when a datagram transmission is completed */ 1480/* Task function to be run when a datagram transmission is completed */
1567static inline void ether1394_dg_complete(struct packet_task *ptask, int fail) 1481static void ether1394_dg_complete(struct packet_task *ptask, int fail)
1568{ 1482{
1569 struct sk_buff *skb = ptask->skb; 1483 struct sk_buff *skb = ptask->skb;
1570 struct net_device *dev = skb->dev; 1484 struct eth1394_priv *priv = netdev_priv(skb->dev);
1571 struct eth1394_priv *priv = netdev_priv(dev);
1572 unsigned long flags; 1485 unsigned long flags;
1573 1486
1574 /* Statistics */ 1487 /* Statistics */
@@ -1586,7 +1499,6 @@ static inline void ether1394_dg_complete(struct packet_task *ptask, int fail)
1586 kmem_cache_free(packet_task_cache, ptask); 1499 kmem_cache_free(packet_task_cache, ptask);
1587} 1500}
1588 1501
1589
1590/* Callback for when a packet has been sent and the status of that packet is 1502/* Callback for when a packet has been sent and the status of that packet is
1591 * known */ 1503 * known */
1592static void ether1394_complete_cb(void *__ptask) 1504static void ether1394_complete_cb(void *__ptask)
@@ -1614,19 +1526,15 @@ static void ether1394_complete_cb(void *__ptask)
1614 } 1526 }
1615} 1527}
1616 1528
1617
1618
1619/* Transmit a packet (called by kernel) */ 1529/* Transmit a packet (called by kernel) */
1620static int ether1394_tx (struct sk_buff *skb, struct net_device *dev) 1530static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
1621{ 1531{
1622 gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
1623 struct eth1394hdr *eth; 1532 struct eth1394hdr *eth;
1624 struct eth1394_priv *priv = netdev_priv(dev); 1533 struct eth1394_priv *priv = netdev_priv(dev);
1625 __be16 proto; 1534 __be16 proto;
1626 unsigned long flags; 1535 unsigned long flags;
1627 nodeid_t dest_node; 1536 nodeid_t dest_node;
1628 eth1394_tx_type tx_type; 1537 eth1394_tx_type tx_type;
1629 int ret = 0;
1630 unsigned int tx_len; 1538 unsigned int tx_len;
1631 unsigned int max_payload; 1539 unsigned int max_payload;
1632 u16 dg_size; 1540 u16 dg_size;
@@ -1635,29 +1543,24 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1635 struct eth1394_node_ref *node; 1543 struct eth1394_node_ref *node;
1636 struct eth1394_node_info *node_info = NULL; 1544 struct eth1394_node_info *node_info = NULL;
1637 1545
1638 ptask = kmem_cache_alloc(packet_task_cache, kmflags); 1546 ptask = kmem_cache_alloc(packet_task_cache, GFP_ATOMIC);
1639 if (ptask == NULL) { 1547 if (ptask == NULL)
1640 ret = -ENOMEM;
1641 goto fail; 1548 goto fail;
1642 }
1643 1549
1644 /* XXX Ignore this for now. Noticed that when MacOSX is the IRM, 1550 /* XXX Ignore this for now. Noticed that when MacOSX is the IRM,
1645 * it does not set our validity bit. We need to compensate for 1551 * it does not set our validity bit. We need to compensate for
1646 * that somewhere else, but not in eth1394. */ 1552 * that somewhere else, but not in eth1394. */
1647#if 0 1553#if 0
1648 if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000) { 1554 if ((priv->host->csr.broadcast_channel & 0xc0000000) != 0xc0000000)
1649 ret = -EAGAIN;
1650 goto fail; 1555 goto fail;
1651 }
1652#endif 1556#endif
1653 1557
1654 if ((skb = skb_share_check (skb, kmflags)) == NULL) { 1558 skb = skb_share_check(skb, GFP_ATOMIC);
1655 ret = -ENOMEM; 1559 if (!skb)
1656 goto fail; 1560 goto fail;
1657 }
1658 1561
1659 /* Get rid of the fake eth1394 header, but save a pointer */ 1562 /* Get rid of the fake eth1394 header, but save a pointer */
1660 eth = (struct eth1394hdr*)skb->data; 1563 eth = (struct eth1394hdr *)skb->data;
1661 skb_pull(skb, ETH1394_HLEN); 1564 skb_pull(skb, ETH1394_HLEN);
1662 1565
1663 proto = eth->h_proto; 1566 proto = eth->h_proto;
@@ -1672,7 +1575,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1672 tx_type = ETH1394_GASP; 1575 tx_type = ETH1394_GASP;
1673 dest_node = LOCAL_BUS | ALL_NODES; 1576 dest_node = LOCAL_BUS | ALL_NODES;
1674 max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; 1577 max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
1675 BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD)); 1578 BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
1676 dgl = priv->bc_dgl; 1579 dgl = priv->bc_dgl;
1677 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF]) 1580 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
1678 priv->bc_dgl++; 1581 priv->bc_dgl++;
@@ -1681,19 +1584,17 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1681 1584
1682 node = eth1394_find_node_guid(&priv->ip_node_list, 1585 node = eth1394_find_node_guid(&priv->ip_node_list,
1683 be64_to_cpu(guid)); 1586 be64_to_cpu(guid));
1684 if (!node) { 1587 if (!node)
1685 ret = -EAGAIN;
1686 goto fail; 1588 goto fail;
1687 } 1589
1688 node_info = (struct eth1394_node_info*)node->ud->device.driver_data; 1590 node_info =
1689 if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE) { 1591 (struct eth1394_node_info *)node->ud->device.driver_data;
1690 ret = -EAGAIN; 1592 if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
1691 goto fail; 1593 goto fail;
1692 }
1693 1594
1694 dest_node = node->ud->ne->nodeid; 1595 dest_node = node->ud->ne->nodeid;
1695 max_payload = node_info->maxpayload; 1596 max_payload = node_info->maxpayload;
1696 BUG_ON(max_payload < (512 - ETHER1394_GASP_OVERHEAD)); 1597 BUG_ON(max_payload < 512 - ETHER1394_GASP_OVERHEAD);
1697 1598
1698 dgl = node_info->dgl; 1599 dgl = node_info->dgl;
1699 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF]) 1600 if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
@@ -1703,7 +1604,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1703 1604
1704 /* If this is an ARP packet, convert it */ 1605 /* If this is an ARP packet, convert it */
1705 if (proto == htons(ETH_P_ARP)) 1606 if (proto == htons(ETH_P_ARP))
1706 ether1394_arp_to_1394arp (skb, dev); 1607 ether1394_arp_to_1394arp(skb, dev);
1707 1608
1708 ptask->hdr.words.word1 = 0; 1609 ptask->hdr.words.word1 = 0;
1709 ptask->hdr.words.word2 = 0; 1610 ptask->hdr.words.word2 = 0;
@@ -1726,9 +1627,8 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1726 1627
1727 ptask->tx_type = tx_type; 1628 ptask->tx_type = tx_type;
1728 ptask->max_payload = max_payload; 1629 ptask->max_payload = max_payload;
1729 ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload, proto, 1630 ptask->outstanding_pkts = ether1394_encapsulate_prep(max_payload,
1730 &ptask->hdr, dg_size, 1631 proto, &ptask->hdr, dg_size, dgl);
1731 dgl);
1732 1632
1733 /* Add the encapsulation header to the fragment */ 1633 /* Add the encapsulation header to the fragment */
1734 tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr); 1634 tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
@@ -1737,7 +1637,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
1737 goto fail; 1637 goto fail;
1738 1638
1739 netif_wake_queue(dev); 1639 netif_wake_queue(dev);
1740 return 0; 1640 return NETDEV_TX_OK;
1741fail: 1641fail:
1742 if (ptask) 1642 if (ptask)
1743 kmem_cache_free(packet_task_cache, ptask); 1643 kmem_cache_free(packet_task_cache, ptask);
@@ -1745,40 +1645,56 @@ fail:
1745 if (skb != NULL) 1645 if (skb != NULL)
1746 dev_kfree_skb(skb); 1646 dev_kfree_skb(skb);
1747 1647
1748 spin_lock_irqsave (&priv->lock, flags); 1648 spin_lock_irqsave(&priv->lock, flags);
1749 priv->stats.tx_dropped++; 1649 priv->stats.tx_dropped++;
1750 priv->stats.tx_errors++; 1650 priv->stats.tx_errors++;
1751 spin_unlock_irqrestore (&priv->lock, flags); 1651 spin_unlock_irqrestore(&priv->lock, flags);
1752 1652
1753 if (netif_queue_stopped(dev)) 1653 if (netif_queue_stopped(dev))
1754 netif_wake_queue(dev); 1654 netif_wake_queue(dev);
1755 1655
1756 return 0; /* returning non-zero causes serious problems */ 1656 /*
1657 * FIXME: According to a patch from 2003-02-26, "returning non-zero
1658 * causes serious problems" here, allegedly. Before that patch,
1659 * -ERRNO was returned which is not appropriate under Linux 2.6.
1660 * Perhaps more needs to be done? Stop the queue in serious
1661 * conditions and restart it elsewhere?
1662 */
1663 /* return NETDEV_TX_BUSY; */
1664 return NETDEV_TX_OK;
1757} 1665}
1758 1666
1759static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1667static void ether1394_get_drvinfo(struct net_device *dev,
1668 struct ethtool_drvinfo *info)
1760{ 1669{
1761 strcpy (info->driver, driver_name); 1670 strcpy(info->driver, driver_name);
1762 /* FIXME XXX provide sane businfo */ 1671 strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */
1763 strcpy (info->bus_info, "ieee1394");
1764} 1672}
1765 1673
1766static struct ethtool_ops ethtool_ops = { 1674static struct ethtool_ops ethtool_ops = {
1767 .get_drvinfo = ether1394_get_drvinfo 1675 .get_drvinfo = ether1394_get_drvinfo
1768}; 1676};
1769 1677
1770static int __init ether1394_init_module (void) 1678static int __init ether1394_init_module(void)
1771{ 1679{
1772 packet_task_cache = kmem_cache_create("packet_task", sizeof(struct packet_task), 1680 int err;
1681
1682 packet_task_cache = kmem_cache_create("packet_task",
1683 sizeof(struct packet_task),
1773 0, 0, NULL, NULL); 1684 0, 0, NULL, NULL);
1685 if (!packet_task_cache)
1686 return -ENOMEM;
1774 1687
1775 /* Register ourselves as a highlevel driver */
1776 hpsb_register_highlevel(&eth1394_highlevel); 1688 hpsb_register_highlevel(&eth1394_highlevel);
1777 1689 err = hpsb_register_protocol(&eth1394_proto_driver);
1778 return hpsb_register_protocol(&eth1394_proto_driver); 1690 if (err) {
1691 hpsb_unregister_highlevel(&eth1394_highlevel);
1692 kmem_cache_destroy(packet_task_cache);
1693 }
1694 return err;
1779} 1695}
1780 1696
1781static void __exit ether1394_exit_module (void) 1697static void __exit ether1394_exit_module(void)
1782{ 1698{
1783 hpsb_unregister_protocol(&eth1394_proto_driver); 1699 hpsb_unregister_protocol(&eth1394_proto_driver);
1784 hpsb_unregister_highlevel(&eth1394_highlevel); 1700 hpsb_unregister_highlevel(&eth1394_highlevel);
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index 1e8356535149..a3439ee7cb4e 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -25,8 +25,11 @@
25#define __ETH1394_H 25#define __ETH1394_H
26 26
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/skbuff.h>
29#include <asm/byteorder.h>
28 30
29#include "ieee1394.h" 31#include "ieee1394.h"
32#include "ieee1394_types.h"
30 33
31/* Register for incoming packets. This is 4096 bytes, which supports up to 34/* Register for incoming packets. This is 4096 bytes, which supports up to
32 * S3200 (per Table 16-3 of IEEE 1394b-2002). */ 35 * S3200 (per Table 16-3 of IEEE 1394b-2002). */
@@ -34,22 +37,15 @@
34 37
35/* GASP identifier numbers for IPv4 over IEEE 1394 */ 38/* GASP identifier numbers for IPv4 over IEEE 1394 */
36#define ETHER1394_GASP_SPECIFIER_ID 0x00005E 39#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
37#define ETHER1394_GASP_SPECIFIER_ID_HI ((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff) 40#define ETHER1394_GASP_SPECIFIER_ID_HI ((0x00005E >> 8) & 0xffff)
38#define ETHER1394_GASP_SPECIFIER_ID_LO (ETHER1394_GASP_SPECIFIER_ID & 0xff) 41#define ETHER1394_GASP_SPECIFIER_ID_LO (0x00005E & 0xff)
39#define ETHER1394_GASP_VERSION 1 42#define ETHER1394_GASP_VERSION 1
40 43
41#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* GASP header overhead */ 44#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t)) /* for GASP header */
42 45
43#define ETHER1394_GASP_BUFFERS 16 46#define ETHER1394_GASP_BUFFERS 16
44 47
45/* rawiso buffer size - due to a limitation in rawiso, we must limit each 48#define NODE_SET (ALL_NODES + 1) /* Node set == 64 */
46 * GASP buffer to be less than PAGE_SIZE. */
47#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \
48 min((unsigned int)PAGE_SIZE, \
49 2 * (1U << (priv->host->csr.max_rec + 1)))
50
51/* Node set == 64 */
52#define NODE_SET (ALL_NODES + 1)
53 49
54enum eth1394_bc_states { ETHER1394_BC_ERROR, 50enum eth1394_bc_states { ETHER1394_BC_ERROR,
55 ETHER1394_BC_RUNNING, 51 ETHER1394_BC_RUNNING,
@@ -85,19 +81,14 @@ struct eth1394hdr {
85 unsigned short h_proto; /* packet type ID field */ 81 unsigned short h_proto; /* packet type ID field */
86} __attribute__((packed)); 82} __attribute__((packed));
87 83
88#ifdef __KERNEL__
89#include <linux/skbuff.h>
90
91static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) 84static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
92{ 85{
93 return (struct eth1394hdr *)skb_mac_header(skb); 86 return (struct eth1394hdr *)skb_mac_header(skb);
94} 87}
95#endif
96 88
97typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type; 89typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
98 90
99/* IP1394 headers */ 91/* IP1394 headers */
100#include <asm/byteorder.h>
101 92
102/* Unfragmented */ 93/* Unfragmented */
103#if defined __BIG_ENDIAN_BITFIELD 94#if defined __BIG_ENDIAN_BITFIELD
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 694da82d820b..83a493312751 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -70,8 +70,12 @@ static struct hl_host_info *hl_get_hostinfo(struct hpsb_highlevel *hl,
70 return NULL; 70 return NULL;
71} 71}
72 72
73/* Returns a per host/driver data structure that was previously stored by 73/**
74 * hpsb_create_hostinfo. */ 74 * hpsb_get_hostinfo - retrieve a hostinfo pointer bound to this driver/host
75 *
76 * Returns a per @host and @hl driver data structure that was previously stored
77 * by hpsb_create_hostinfo.
78 */
75void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host) 79void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
76{ 80{
77 struct hl_host_info *hi = hl_get_hostinfo(hl, host); 81 struct hl_host_info *hi = hl_get_hostinfo(hl, host);
@@ -79,7 +83,13 @@ void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
79 return hi ? hi->data : NULL; 83 return hi ? hi->data : NULL;
80} 84}
81 85
82/* If size is zero, then the return here is only valid for error checking */ 86/**
87 * hpsb_create_hostinfo - allocate a hostinfo pointer bound to this driver/host
88 *
89 * Allocate a hostinfo pointer backed by memory with @data_size and bind it to
90 * to this @hl driver and @host. If @data_size is zero, then the return here is
91 * only valid for error checking.
92 */
83void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, 93void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
84 size_t data_size) 94 size_t data_size)
85{ 95{
@@ -113,6 +123,11 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
113 return data; 123 return data;
114} 124}
115 125
126/**
127 * hpsb_set_hostinfo - set the hostinfo pointer to something useful
128 *
129 * Usually follows a call to hpsb_create_hostinfo, where the size is 0.
130 */
116int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, 131int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
117 void *data) 132 void *data)
118{ 133{
@@ -132,6 +147,11 @@ int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
132 return -EINVAL; 147 return -EINVAL;
133} 148}
134 149
150/**
151 * hpsb_destroy_hostinfo - free and remove a hostinfo pointer
152 *
153 * Free and remove the hostinfo pointer bound to this @hl driver and @host.
154 */
135void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host) 155void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
136{ 156{
137 struct hl_host_info *hi; 157 struct hl_host_info *hi;
@@ -147,6 +167,12 @@ void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host)
147 return; 167 return;
148} 168}
149 169
170/**
171 * hpsb_set_hostinfo_key - set an alternate lookup key for an hostinfo
172 *
173 * Sets an alternate lookup key for the hostinfo bound to this @hl driver and
174 * @host.
175 */
150void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, 176void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
151 unsigned long key) 177 unsigned long key)
152{ 178{
@@ -158,6 +184,9 @@ void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
158 return; 184 return;
159} 185}
160 186
187/**
188 * hpsb_get_hostinfo_bykey - retrieve a hostinfo pointer by its alternate key
189 */
161void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key) 190void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key)
162{ 191{
163 struct hl_host_info *hi; 192 struct hl_host_info *hi;
@@ -189,6 +218,12 @@ static int highlevel_for_each_host_reg(struct hpsb_host *host, void *__data)
189 return 0; 218 return 0;
190} 219}
191 220
221/**
222 * hpsb_register_highlevel - register highlevel driver
223 *
224 * The name pointer in @hl has to stay valid at all times because the string is
225 * not copied.
226 */
192void hpsb_register_highlevel(struct hpsb_highlevel *hl) 227void hpsb_register_highlevel(struct hpsb_highlevel *hl)
193{ 228{
194 unsigned long flags; 229 unsigned long flags;
@@ -258,6 +293,9 @@ static int highlevel_for_each_host_unreg(struct hpsb_host *host, void *__data)
258 return 0; 293 return 0;
259} 294}
260 295
296/**
297 * hpsb_unregister_highlevel - unregister highlevel driver
298 */
261void hpsb_unregister_highlevel(struct hpsb_highlevel *hl) 299void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
262{ 300{
263 unsigned long flags; 301 unsigned long flags;
@@ -273,6 +311,19 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
273 nodemgr_for_each_host(hl, highlevel_for_each_host_unreg); 311 nodemgr_for_each_host(hl, highlevel_for_each_host_unreg);
274} 312}
275 313
314/**
315 * hpsb_allocate_and_register_addrspace - alloc' and reg' a host address space
316 *
317 * @start and @end are 48 bit pointers and have to be quadlet aligned.
318 * @end points to the first address behind the handled addresses. This
319 * function can be called multiple times for a single hpsb_highlevel @hl to
320 * implement sparse register sets. The requested region must not overlap any
321 * previously allocated region, otherwise registering will fail.
322 *
323 * It returns true for successful allocation. Address spaces can be
324 * unregistered with hpsb_unregister_addrspace. All remaining address spaces
325 * are automatically deallocated together with the hpsb_highlevel @hl.
326 */
276u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, 327u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
277 struct hpsb_host *host, 328 struct hpsb_host *host,
278 struct hpsb_address_ops *ops, 329 struct hpsb_address_ops *ops,
@@ -348,6 +399,19 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
348 return retval; 399 return retval;
349} 400}
350 401
402/**
403 * hpsb_register_addrspace - register a host address space
404 *
405 * @start and @end are 48 bit pointers and have to be quadlet aligned.
406 * @end points to the first address behind the handled addresses. This
407 * function can be called multiple times for a single hpsb_highlevel @hl to
408 * implement sparse register sets. The requested region must not overlap any
409 * previously allocated region, otherwise registering will fail.
410 *
411 * It returns true for successful allocation. Address spaces can be
412 * unregistered with hpsb_unregister_addrspace. All remaining address spaces
413 * are automatically deallocated together with the hpsb_highlevel @hl.
414 */
351int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, 415int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
352 struct hpsb_address_ops *ops, u64 start, u64 end) 416 struct hpsb_address_ops *ops, u64 start, u64 end)
353{ 417{
@@ -419,6 +483,11 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
419 return retval; 483 return retval;
420} 484}
421 485
486/**
487 * hpsb_listen_channel - enable receving a certain isochronous channel
488 *
489 * Reception is handled through the @hl's iso_receive op.
490 */
422int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, 491int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
423 unsigned int channel) 492 unsigned int channel)
424{ 493{
@@ -431,6 +500,9 @@ int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
431 return 0; 500 return 0;
432} 501}
433 502
503/**
504 * hpsb_unlisten_channel - disable receving a certain isochronous channel
505 */
434void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, 506void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
435 unsigned int channel) 507 unsigned int channel)
436{ 508{
@@ -528,6 +600,17 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
528 read_unlock_irqrestore(&hl_irqs_lock, flags); 600 read_unlock_irqrestore(&hl_irqs_lock, flags);
529} 601}
530 602
603/*
604 * highlevel_read, highlevel_write, highlevel_lock, highlevel_lock64:
605 *
606 * These functions are called to handle transactions. They are called when a
607 * packet arrives. The flags argument contains the second word of the first
608 * header quadlet of the incoming packet (containing transaction label, retry
609 * code, transaction code and priority). These functions either return a
610 * response code or a negative number. In the first case a response will be
611 * generated. In the latter case, no response will be sent and the driver which
612 * handled the request will send the response itself.
613 */
531int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr, 614int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
532 unsigned int length, u16 flags) 615 unsigned int length, u16 flags)
533{ 616{
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
index 4b330117067a..63474f7ee69d 100644
--- a/drivers/ieee1394/highlevel.h
+++ b/drivers/ieee1394/highlevel.h
@@ -99,16 +99,6 @@ struct hpsb_address_ops {
99void highlevel_add_host(struct hpsb_host *host); 99void highlevel_add_host(struct hpsb_host *host);
100void highlevel_remove_host(struct hpsb_host *host); 100void highlevel_remove_host(struct hpsb_host *host);
101void highlevel_host_reset(struct hpsb_host *host); 101void highlevel_host_reset(struct hpsb_host *host);
102
103/*
104 * These functions are called to handle transactions. They are called when a
105 * packet arrives. The flags argument contains the second word of the first
106 * header quadlet of the incoming packet (containing transaction label, retry
107 * code, transaction code and priority). These functions either return a
108 * response code or a negative number. In the first case a response will be
109 * generated. In the latter case, no response will be sent and the driver which
110 * handled the request will send the response itself.
111 */
112int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr, 102int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr,
113 unsigned int length, u16 flags); 103 unsigned int length, u16 flags);
114int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data, 104int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data,
@@ -119,30 +109,13 @@ int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
119int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store, 109int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
120 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, 110 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
121 u16 flags); 111 u16 flags);
122
123void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length); 112void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length);
124void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, 113void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
125 void *data, size_t length); 114 void *data, size_t length);
126 115
127/*
128 * Register highlevel driver. The name pointer has to stay valid at all times
129 * because the string is not copied.
130 */
131void hpsb_register_highlevel(struct hpsb_highlevel *hl); 116void hpsb_register_highlevel(struct hpsb_highlevel *hl);
132void hpsb_unregister_highlevel(struct hpsb_highlevel *hl); 117void hpsb_unregister_highlevel(struct hpsb_highlevel *hl);
133 118
134/*
135 * Register handlers for host address spaces. Start and end are 48 bit pointers
136 * and have to be quadlet aligned. Argument "end" points to the first address
137 * behind the handled addresses. This function can be called multiple times for
138 * a single hpsb_highlevel to implement sparse register sets. The requested
139 * region must not overlap any previously allocated region, otherwise
140 * registering will fail.
141 *
142 * It returns true for successful allocation. Address spaces can be
143 * unregistered with hpsb_unregister_addrspace. All remaining address spaces
144 * are automatically deallocated together with the hpsb_highlevel.
145 */
146u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, 119u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
147 struct hpsb_host *host, 120 struct hpsb_host *host,
148 struct hpsb_address_ops *ops, 121 struct hpsb_address_ops *ops,
@@ -152,45 +125,19 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
152 struct hpsb_address_ops *ops, u64 start, u64 end); 125 struct hpsb_address_ops *ops, u64 start, u64 end);
153int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, 126int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
154 u64 start); 127 u64 start);
155
156/*
157 * Enable or disable receving a certain isochronous channel through the
158 * iso_receive op.
159 */
160int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, 128int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
161 unsigned int channel); 129 unsigned int channel);
162void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, 130void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
163 unsigned int channel); 131 unsigned int channel);
164 132
165/* Retrieve a hostinfo pointer bound to this driver/host */
166void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); 133void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
167
168/* Allocate a hostinfo pointer of data_size bound to this driver/host */
169void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, 134void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
170 size_t data_size); 135 size_t data_size);
171
172/* Free and remove the hostinfo pointer bound to this driver/host */
173void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); 136void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host);
174
175/* Set an alternate lookup key for the hostinfo bound to this driver/host */
176void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, 137void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host,
177 unsigned long key); 138 unsigned long key);
178
179/* Retrieve the alternate lookup key for the hostinfo bound to this
180 * driver/host */
181unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl,
182 struct hpsb_host *host);
183
184/* Retrieve a hostinfo pointer bound to this driver using its alternate key */
185void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key); 139void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key);
186
187/* Set the hostinfo pointer to something useful. Usually follows a call to
188 * hpsb_create_hostinfo, where the size is 0. */
189int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, 140int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
190 void *data); 141 void *data);
191 142
192/* Retrieve hpsb_host using a highlevel handle and a key */
193struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl,
194 unsigned long key);
195
196#endif /* IEEE1394_HIGHLEVEL_H */ 143#endif /* IEEE1394_HIGHLEVEL_H */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 32a130921938..6164a9a83396 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -94,14 +94,6 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
94 return 0; 94 return 0;
95} 95}
96 96
97/*
98 * The pending_packet_queue is special in that it's processed
99 * from hardirq context too (such as hpsb_bus_reset()). Hence
100 * split the lock class from the usual networking skb-head
101 * lock class by using a separate key for it:
102 */
103static struct lock_class_key pending_packet_queue_key;
104
105static DEFINE_MUTEX(host_num_alloc); 97static DEFINE_MUTEX(host_num_alloc);
106 98
107/** 99/**
@@ -137,9 +129,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
137 h->hostdata = h + 1; 129 h->hostdata = h + 1;
138 h->driver = drv; 130 h->driver = drv;
139 131
140 skb_queue_head_init(&h->pending_packet_queue); 132 INIT_LIST_HEAD(&h->pending_packets);
141 lockdep_set_class(&h->pending_packet_queue.lock,
142 &pending_packet_queue_key);
143 INIT_LIST_HEAD(&h->addr_space); 133 INIT_LIST_HEAD(&h->addr_space);
144 134
145 for (i = 2; i < 16; i++) 135 for (i = 2; i < 16; i++)
@@ -190,7 +180,7 @@ int hpsb_add_host(struct hpsb_host *host)
190{ 180{
191 if (hpsb_default_host_entry(host)) 181 if (hpsb_default_host_entry(host))
192 return -ENOMEM; 182 return -ENOMEM;
193 hpsb_add_extra_config_roms(host); 183
194 highlevel_add_host(host); 184 highlevel_add_host(host);
195 return 0; 185 return 0;
196} 186}
@@ -212,12 +202,19 @@ void hpsb_remove_host(struct hpsb_host *host)
212 202
213 host->driver = &dummy_driver; 203 host->driver = &dummy_driver;
214 highlevel_remove_host(host); 204 highlevel_remove_host(host);
215 hpsb_remove_extra_config_roms(host);
216 205
217 class_device_unregister(&host->class_dev); 206 class_device_unregister(&host->class_dev);
218 device_unregister(&host->device); 207 device_unregister(&host->device);
219} 208}
220 209
210/**
211 * hpsb_update_config_rom_image - updates configuration ROM image of a host
212 *
213 * Updates the configuration ROM image of a host. rom_version must be the
214 * current version, otherwise it will fail with return value -1. If this
215 * host does not support config-rom-update, it will return -%EINVAL.
216 * Return value 0 indicates success.
217 */
221int hpsb_update_config_rom_image(struct hpsb_host *host) 218int hpsb_update_config_rom_image(struct hpsb_host *host)
222{ 219{
223 unsigned long reset_delay; 220 unsigned long reset_delay;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index 4bf4fb7f67b7..feb55d032294 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/skbuff.h>
7#include <linux/timer.h> 6#include <linux/timer.h>
8#include <linux/types.h> 7#include <linux/types.h>
9#include <linux/workqueue.h> 8#include <linux/workqueue.h>
@@ -25,8 +24,7 @@ struct hpsb_host {
25 24
26 atomic_t generation; 25 atomic_t generation;
27 26
28 struct sk_buff_head pending_packet_queue; 27 struct list_head pending_packets;
29
30 struct timer_list timeout; 28 struct timer_list timeout;
31 unsigned long timeout_interval; 29 unsigned long timeout_interval;
32 30
@@ -202,12 +200,6 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
202int hpsb_add_host(struct hpsb_host *host); 200int hpsb_add_host(struct hpsb_host *host);
203void hpsb_resume_host(struct hpsb_host *host); 201void hpsb_resume_host(struct hpsb_host *host);
204void hpsb_remove_host(struct hpsb_host *host); 202void hpsb_remove_host(struct hpsb_host *host);
205
206/* Updates the configuration rom image of a host. rom_version must be the
207 * current version, otherwise it will fail with return value -1. If this
208 * host does not support config-rom-update, it will return -EINVAL.
209 * Return value 0 indicates success.
210 */
211int hpsb_update_config_rom_image(struct hpsb_host *host); 203int hpsb_update_config_rom_image(struct hpsb_host *host);
212 204
213#endif /* _IEEE1394_HOSTS_H */ 205#endif /* _IEEE1394_HOSTS_H */
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index d791d08c743c..8f71b6a06aa0 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,7 +30,6 @@
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/kdev_t.h> 32#include <linux/kdev_t.h>
33#include <linux/skbuff.h>
34#include <linux/suspend.h> 33#include <linux/suspend.h>
35#include <linux/kthread.h> 34#include <linux/kthread.h>
36#include <linux/preempt.h> 35#include <linux/preempt.h>
@@ -96,13 +95,15 @@ static void queue_packet_complete(struct hpsb_packet *packet);
96 95
97 96
98/** 97/**
99 * hpsb_set_packet_complete_task - set the task that runs when a packet 98 * hpsb_set_packet_complete_task - set task that runs when a packet completes
100 * completes. You cannot call this more than once on a single packet
101 * before it is sent.
102 *
103 * @packet: the packet whose completion we want the task added to 99 * @packet: the packet whose completion we want the task added to
104 * @routine: function to call 100 * @routine: function to call
105 * @data: data (if any) to pass to the above function 101 * @data: data (if any) to pass to the above function
102 *
103 * Set the task that runs when a packet completes. You cannot call this more
104 * than once on a single packet before it is sent.
105 *
106 * Typically, the complete @routine is responsible to call hpsb_free_packet().
106 */ 107 */
107void hpsb_set_packet_complete_task(struct hpsb_packet *packet, 108void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
108 void (*routine)(void *), void *data) 109 void (*routine)(void *), void *data)
@@ -115,12 +116,12 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
115 116
116/** 117/**
117 * hpsb_alloc_packet - allocate new packet structure 118 * hpsb_alloc_packet - allocate new packet structure
118 * @data_size: size of the data block to be allocated 119 * @data_size: size of the data block to be allocated, in bytes
119 * 120 *
120 * This function allocates, initializes and returns a new &struct hpsb_packet. 121 * This function allocates, initializes and returns a new &struct hpsb_packet.
121 * It can be used in interrupt context. A header block is always included, its 122 * It can be used in interrupt context. A header block is always included and
122 * size is big enough to contain all possible 1394 headers. The data block is 123 * initialized with zeros. Its size is big enough to contain all possible 1394
123 * only allocated when @data_size is not zero. 124 * headers. The data block is only allocated if @data_size is not zero.
124 * 125 *
125 * For packets for which responses will be received the @data_size has to be big 126 * For packets for which responses will be received the @data_size has to be big
126 * enough to contain the response's data block since no further allocation 127 * enough to contain the response's data block since no further allocation
@@ -135,50 +136,49 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
135 */ 136 */
136struct hpsb_packet *hpsb_alloc_packet(size_t data_size) 137struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
137{ 138{
138 struct hpsb_packet *packet = NULL; 139 struct hpsb_packet *packet;
139 struct sk_buff *skb;
140 140
141 data_size = ((data_size + 3) & ~3); 141 data_size = ((data_size + 3) & ~3);
142 142
143 skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC); 143 packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC);
144 if (skb == NULL) 144 if (!packet)
145 return NULL; 145 return NULL;
146 146
147 memset(skb->data, 0, data_size + sizeof(*packet));
148
149 packet = (struct hpsb_packet *)skb->data;
150 packet->skb = skb;
151
152 packet->header = packet->embedded_header;
153 packet->state = hpsb_unused; 147 packet->state = hpsb_unused;
154 packet->generation = -1; 148 packet->generation = -1;
155 INIT_LIST_HEAD(&packet->driver_list); 149 INIT_LIST_HEAD(&packet->driver_list);
150 INIT_LIST_HEAD(&packet->queue);
156 atomic_set(&packet->refcnt, 1); 151 atomic_set(&packet->refcnt, 1);
157 152
158 if (data_size) { 153 if (data_size) {
159 packet->data = (quadlet_t *)(skb->data + sizeof(*packet)); 154 packet->data = packet->embedded_data;
160 packet->data_size = data_size; 155 packet->allocated_data_size = data_size;
161 } 156 }
162
163 return packet; 157 return packet;
164} 158}
165 159
166
167/** 160/**
168 * hpsb_free_packet - free packet and data associated with it 161 * hpsb_free_packet - free packet and data associated with it
169 * @packet: packet to free (is NULL safe) 162 * @packet: packet to free (is NULL safe)
170 * 163 *
171 * This function will free packet->data and finally the packet itself. 164 * Frees @packet->data only if it was allocated through hpsb_alloc_packet().
172 */ 165 */
173void hpsb_free_packet(struct hpsb_packet *packet) 166void hpsb_free_packet(struct hpsb_packet *packet)
174{ 167{
175 if (packet && atomic_dec_and_test(&packet->refcnt)) { 168 if (packet && atomic_dec_and_test(&packet->refcnt)) {
176 BUG_ON(!list_empty(&packet->driver_list)); 169 BUG_ON(!list_empty(&packet->driver_list) ||
177 kfree_skb(packet->skb); 170 !list_empty(&packet->queue));
171 kfree(packet);
178 } 172 }
179} 173}
180 174
181 175/**
176 * hpsb_reset_bus - initiate bus reset on the given host
177 * @host: host controller whose bus to reset
178 * @type: one of enum reset_types
179 *
180 * Returns 1 if bus reset already in progress, 0 otherwise.
181 */
182int hpsb_reset_bus(struct hpsb_host *host, int type) 182int hpsb_reset_bus(struct hpsb_host *host, int type)
183{ 183{
184 if (!host->in_bus_reset) { 184 if (!host->in_bus_reset) {
@@ -229,6 +229,14 @@ int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
229 return 0; 229 return 0;
230} 230}
231 231
232/**
233 * hpsb_bus_reset - notify a bus reset to the core
234 *
235 * For host driver module usage. Safe to use in interrupt context, although
236 * quite complex; so you may want to run it in the bottom rather than top half.
237 *
238 * Returns 1 if bus reset already in progress, 0 otherwise.
239 */
232int hpsb_bus_reset(struct hpsb_host *host) 240int hpsb_bus_reset(struct hpsb_host *host)
233{ 241{
234 if (host->in_bus_reset) { 242 if (host->in_bus_reset) {
@@ -405,6 +413,14 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
405} 413}
406 414
407 415
416/**
417 * hpsb_selfid_received - hand over received selfid packet to the core
418 *
419 * For host driver module usage. Safe to use in interrupt context.
420 *
421 * The host driver should have done a successful complement check (second
422 * quadlet is complement of first) beforehand.
423 */
408void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid) 424void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
409{ 425{
410 if (host->in_bus_reset) { 426 if (host->in_bus_reset) {
@@ -416,6 +432,15 @@ void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
416 } 432 }
417} 433}
418 434
435/**
436 * hpsb_selfid_complete - notify completion of SelfID stage to the core
437 *
438 * For host driver module usage. Safe to use in interrupt context, although
439 * quite complex; so you may want to run it in the bottom rather than top half.
440 *
441 * Notify completion of SelfID stage to the core and report new physical ID
442 * and whether host is root now.
443 */
419void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) 444void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
420{ 445{
421 if (!host->in_bus_reset) 446 if (!host->in_bus_reset)
@@ -462,30 +487,41 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
462 highlevel_host_reset(host); 487 highlevel_host_reset(host);
463} 488}
464 489
490static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED;
465 491
492/**
493 * hpsb_packet_sent - notify core of sending a packet
494 *
495 * For host driver module usage. Safe to call from within a transmit packet
496 * routine.
497 *
498 * Notify core of sending a packet. Ackcode is the ack code returned for async
499 * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
500 * for other cases (internal errors that don't justify a panic).
501 */
466void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 502void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
467 int ackcode) 503 int ackcode)
468{ 504{
469 unsigned long flags; 505 unsigned long flags;
470 506
471 spin_lock_irqsave(&host->pending_packet_queue.lock, flags); 507 spin_lock_irqsave(&pending_packets_lock, flags);
472 508
473 packet->ack_code = ackcode; 509 packet->ack_code = ackcode;
474 510
475 if (packet->no_waiter || packet->state == hpsb_complete) { 511 if (packet->no_waiter || packet->state == hpsb_complete) {
476 /* if packet->no_waiter, must not have a tlabel allocated */ 512 /* if packet->no_waiter, must not have a tlabel allocated */
477 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 513 spin_unlock_irqrestore(&pending_packets_lock, flags);
478 hpsb_free_packet(packet); 514 hpsb_free_packet(packet);
479 return; 515 return;
480 } 516 }
481 517
482 atomic_dec(&packet->refcnt); /* drop HC's reference */ 518 atomic_dec(&packet->refcnt); /* drop HC's reference */
483 /* here the packet must be on the host->pending_packet_queue */ 519 /* here the packet must be on the host->pending_packets queue */
484 520
485 if (ackcode != ACK_PENDING || !packet->expect_response) { 521 if (ackcode != ACK_PENDING || !packet->expect_response) {
486 packet->state = hpsb_complete; 522 packet->state = hpsb_complete;
487 __skb_unlink(packet->skb, &host->pending_packet_queue); 523 list_del_init(&packet->queue);
488 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 524 spin_unlock_irqrestore(&pending_packets_lock, flags);
489 queue_packet_complete(packet); 525 queue_packet_complete(packet);
490 return; 526 return;
491 } 527 }
@@ -493,7 +529,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
493 packet->state = hpsb_pending; 529 packet->state = hpsb_pending;
494 packet->sendtime = jiffies; 530 packet->sendtime = jiffies;
495 531
496 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 532 spin_unlock_irqrestore(&pending_packets_lock, flags);
497 533
498 mod_timer(&host->timeout, jiffies + host->timeout_interval); 534 mod_timer(&host->timeout, jiffies + host->timeout_interval);
499} 535}
@@ -504,9 +540,10 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
504 * @rootid: root whose force_root bit should get set (-1 = don't set force_root) 540 * @rootid: root whose force_root bit should get set (-1 = don't set force_root)
505 * @gapcnt: gap count value to set (-1 = don't set gap count) 541 * @gapcnt: gap count value to set (-1 = don't set gap count)
506 * 542 *
507 * This function sends a PHY config packet on the bus through the specified host. 543 * This function sends a PHY config packet on the bus through the specified
544 * host.
508 * 545 *
509 * Return value: 0 for success or error number otherwise. 546 * Return value: 0 for success or negative error number otherwise.
510 */ 547 */
511int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt) 548int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
512{ 549{
@@ -567,12 +604,16 @@ int hpsb_send_packet(struct hpsb_packet *packet)
567 WARN_ON(packet->no_waiter && packet->expect_response); 604 WARN_ON(packet->no_waiter && packet->expect_response);
568 605
569 if (!packet->no_waiter || packet->expect_response) { 606 if (!packet->no_waiter || packet->expect_response) {
607 unsigned long flags;
608
570 atomic_inc(&packet->refcnt); 609 atomic_inc(&packet->refcnt);
571 /* Set the initial "sendtime" to 10 seconds from now, to 610 /* Set the initial "sendtime" to 10 seconds from now, to
572 prevent premature expiry. If a packet takes more than 611 prevent premature expiry. If a packet takes more than
573 10 seconds to hit the wire, we have bigger problems :) */ 612 10 seconds to hit the wire, we have bigger problems :) */
574 packet->sendtime = jiffies + 10 * HZ; 613 packet->sendtime = jiffies + 10 * HZ;
575 skb_queue_tail(&host->pending_packet_queue, packet->skb); 614 spin_lock_irqsave(&pending_packets_lock, flags);
615 list_add_tail(&packet->queue, &host->pending_packets);
616 spin_unlock_irqrestore(&pending_packets_lock, flags);
576 } 617 }
577 618
578 if (packet->node_id == host->node_id) { 619 if (packet->node_id == host->node_id) {
@@ -621,6 +662,12 @@ static void complete_packet(void *data)
621 complete((struct completion *) data); 662 complete((struct completion *) data);
622} 663}
623 664
665/**
666 * hpsb_send_packet_and_wait - enqueue packet, block until transaction completes
667 * @packet: packet to send
668 *
669 * Return value: 0 on success, negative errno on failure.
670 */
624int hpsb_send_packet_and_wait(struct hpsb_packet *packet) 671int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
625{ 672{
626 struct completion done; 673 struct completion done;
@@ -642,86 +689,97 @@ static void send_packet_nocare(struct hpsb_packet *packet)
642 } 689 }
643} 690}
644 691
692static size_t packet_size_to_data_size(size_t packet_size, size_t header_size,
693 size_t buffer_size, int tcode)
694{
695 size_t ret = packet_size <= header_size ? 0 : packet_size - header_size;
696
697 if (unlikely(ret > buffer_size))
698 ret = buffer_size;
699
700 if (unlikely(ret + header_size != packet_size))
701 HPSB_ERR("unexpected packet size %zd (tcode %d), bug?",
702 packet_size, tcode);
703 return ret;
704}
645 705
646static void handle_packet_response(struct hpsb_host *host, int tcode, 706static void handle_packet_response(struct hpsb_host *host, int tcode,
647 quadlet_t *data, size_t size) 707 quadlet_t *data, size_t size)
648{ 708{
649 struct hpsb_packet *packet = NULL; 709 struct hpsb_packet *packet;
650 struct sk_buff *skb; 710 int tlabel = (data[0] >> 10) & 0x3f;
651 int tcode_match = 0; 711 size_t header_size;
652 int tlabel;
653 unsigned long flags; 712 unsigned long flags;
654 713
655 tlabel = (data[0] >> 10) & 0x3f; 714 spin_lock_irqsave(&pending_packets_lock, flags);
656
657 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
658 715
659 skb_queue_walk(&host->pending_packet_queue, skb) { 716 list_for_each_entry(packet, &host->pending_packets, queue)
660 packet = (struct hpsb_packet *)skb->data; 717 if (packet->tlabel == tlabel &&
661 if ((packet->tlabel == tlabel) 718 packet->node_id == (data[1] >> 16))
662 && (packet->node_id == (data[1] >> 16))){ 719 goto found;
663 break;
664 }
665
666 packet = NULL;
667 }
668 720
669 if (packet == NULL) { 721 spin_unlock_irqrestore(&pending_packets_lock, flags);
670 HPSB_DEBUG("unsolicited response packet received - no tlabel match"); 722 HPSB_DEBUG("unsolicited response packet received - %s",
671 dump_packet("contents", data, 16, -1); 723 "no tlabel match");
672 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 724 dump_packet("contents", data, 16, -1);
673 return; 725 return;
674 }
675 726
727found:
676 switch (packet->tcode) { 728 switch (packet->tcode) {
677 case TCODE_WRITEQ: 729 case TCODE_WRITEQ:
678 case TCODE_WRITEB: 730 case TCODE_WRITEB:
679 if (tcode != TCODE_WRITE_RESPONSE) 731 if (unlikely(tcode != TCODE_WRITE_RESPONSE))
680 break; 732 break;
681 tcode_match = 1; 733 header_size = 12;
682 memcpy(packet->header, data, 12); 734 size = 0;
683 break; 735 goto dequeue;
736
684 case TCODE_READQ: 737 case TCODE_READQ:
685 if (tcode != TCODE_READQ_RESPONSE) 738 if (unlikely(tcode != TCODE_READQ_RESPONSE))
686 break; 739 break;
687 tcode_match = 1; 740 header_size = 16;
688 memcpy(packet->header, data, 16); 741 size = 0;
689 break; 742 goto dequeue;
743
690 case TCODE_READB: 744 case TCODE_READB:
691 if (tcode != TCODE_READB_RESPONSE) 745 if (unlikely(tcode != TCODE_READB_RESPONSE))
692 break; 746 break;
693 tcode_match = 1; 747 header_size = 16;
694 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16); 748 size = packet_size_to_data_size(size, header_size,
695 memcpy(packet->header, data, 16); 749 packet->allocated_data_size,
696 memcpy(packet->data, data + 4, size - 16); 750 tcode);
697 break; 751 goto dequeue;
752
698 case TCODE_LOCK_REQUEST: 753 case TCODE_LOCK_REQUEST:
699 if (tcode != TCODE_LOCK_RESPONSE) 754 if (unlikely(tcode != TCODE_LOCK_RESPONSE))
700 break; 755 break;
701 tcode_match = 1; 756 header_size = 16;
702 size = min((size - 16), (size_t)8); 757 size = packet_size_to_data_size(min(size, (size_t)(16 + 8)),
703 BUG_ON(packet->skb->len - sizeof(*packet) < size); 758 header_size,
704 memcpy(packet->header, data, 16); 759 packet->allocated_data_size,
705 memcpy(packet->data, data + 4, size); 760 tcode);
706 break; 761 goto dequeue;
707 } 762 }
708 763
709 if (!tcode_match) { 764 spin_unlock_irqrestore(&pending_packets_lock, flags);
710 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 765 HPSB_DEBUG("unsolicited response packet received - %s",
711 HPSB_INFO("unsolicited response packet received - tcode mismatch"); 766 "tcode mismatch");
712 dump_packet("contents", data, 16, -1); 767 dump_packet("contents", data, 16, -1);
713 return; 768 return;
714 }
715 769
716 __skb_unlink(skb, &host->pending_packet_queue); 770dequeue:
771 list_del_init(&packet->queue);
772 spin_unlock_irqrestore(&pending_packets_lock, flags);
717 773
718 if (packet->state == hpsb_queued) { 774 if (packet->state == hpsb_queued) {
719 packet->sendtime = jiffies; 775 packet->sendtime = jiffies;
720 packet->ack_code = ACK_PENDING; 776 packet->ack_code = ACK_PENDING;
721 } 777 }
722
723 packet->state = hpsb_complete; 778 packet->state = hpsb_complete;
724 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 779
780 memcpy(packet->header, data, header_size);
781 if (size)
782 memcpy(packet->data, data + 4, size);
725 783
726 queue_packet_complete(packet); 784 queue_packet_complete(packet);
727} 785}
@@ -735,6 +793,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
735 p = hpsb_alloc_packet(dsize); 793 p = hpsb_alloc_packet(dsize);
736 if (unlikely(p == NULL)) { 794 if (unlikely(p == NULL)) {
737 /* FIXME - send data_error response */ 795 /* FIXME - send data_error response */
796 HPSB_ERR("out of memory, cannot send response packet");
738 return NULL; 797 return NULL;
739 } 798 }
740 799
@@ -784,7 +843,6 @@ static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
784static void fill_async_write_resp(struct hpsb_packet *packet, int rcode) 843static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
785{ 844{
786 PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE); 845 PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
787 packet->header[2] = 0;
788 packet->header_size = 12; 846 packet->header_size = 12;
789 packet->data_size = 0; 847 packet->data_size = 0;
790} 848}
@@ -801,12 +859,9 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc
801 packet->data_size = length; 859 packet->data_size = length;
802} 860}
803 861
804#define PREP_REPLY_PACKET(length) \
805 packet = create_reply_packet(host, data, length); \
806 if (packet == NULL) break
807
808static void handle_incoming_packet(struct hpsb_host *host, int tcode, 862static void handle_incoming_packet(struct hpsb_host *host, int tcode,
809 quadlet_t *data, size_t size, int write_acked) 863 quadlet_t *data, size_t size,
864 int write_acked)
810{ 865{
811 struct hpsb_packet *packet; 866 struct hpsb_packet *packet;
812 int length, rcode, extcode; 867 int length, rcode, extcode;
@@ -816,74 +871,72 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
816 u16 flags = (u16) data[0]; 871 u16 flags = (u16) data[0];
817 u64 addr; 872 u64 addr;
818 873
819 /* big FIXME - no error checking is done for an out of bounds length */ 874 /* FIXME?
875 * Out-of-bounds lengths are left for highlevel_read|write to cap. */
820 876
821 switch (tcode) { 877 switch (tcode) {
822 case TCODE_WRITEQ: 878 case TCODE_WRITEQ:
823 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 879 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
824 rcode = highlevel_write(host, source, dest, data+3, 880 rcode = highlevel_write(host, source, dest, data + 3,
825 addr, 4, flags); 881 addr, 4, flags);
826 882 goto handle_write_request;
827 if (!write_acked
828 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
829 && (rcode >= 0)) {
830 /* not a broadcast write, reply */
831 PREP_REPLY_PACKET(0);
832 fill_async_write_resp(packet, rcode);
833 send_packet_nocare(packet);
834 }
835 break;
836 883
837 case TCODE_WRITEB: 884 case TCODE_WRITEB:
838 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 885 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
839 rcode = highlevel_write(host, source, dest, data+4, 886 rcode = highlevel_write(host, source, dest, data + 4,
840 addr, data[3]>>16, flags); 887 addr, data[3] >> 16, flags);
841 888handle_write_request:
842 if (!write_acked 889 if (rcode < 0 || write_acked ||
843 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) 890 NODEID_TO_NODE(data[0] >> 16) == NODE_MASK)
844 && (rcode >= 0)) { 891 return;
845 /* not a broadcast write, reply */ 892 /* not a broadcast write, reply */
846 PREP_REPLY_PACKET(0); 893 packet = create_reply_packet(host, data, 0);
894 if (packet) {
847 fill_async_write_resp(packet, rcode); 895 fill_async_write_resp(packet, rcode);
848 send_packet_nocare(packet); 896 send_packet_nocare(packet);
849 } 897 }
850 break; 898 return;
851 899
852 case TCODE_READQ: 900 case TCODE_READQ:
853 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 901 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
854 rcode = highlevel_read(host, source, &buffer, addr, 4, flags); 902 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
903 if (rcode < 0)
904 return;
855 905
856 if (rcode >= 0) { 906 packet = create_reply_packet(host, data, 0);
857 PREP_REPLY_PACKET(0); 907 if (packet) {
858 fill_async_readquad_resp(packet, rcode, buffer); 908 fill_async_readquad_resp(packet, rcode, buffer);
859 send_packet_nocare(packet); 909 send_packet_nocare(packet);
860 } 910 }
861 break; 911 return;
862 912
863 case TCODE_READB: 913 case TCODE_READB:
864 length = data[3] >> 16; 914 length = data[3] >> 16;
865 PREP_REPLY_PACKET(length); 915 packet = create_reply_packet(host, data, length);
916 if (!packet)
917 return;
866 918
867 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 919 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
868 rcode = highlevel_read(host, source, packet->data, addr, 920 rcode = highlevel_read(host, source, packet->data, addr,
869 length, flags); 921 length, flags);
870 922 if (rcode < 0) {
871 if (rcode >= 0) {
872 fill_async_readblock_resp(packet, rcode, length);
873 send_packet_nocare(packet);
874 } else {
875 hpsb_free_packet(packet); 923 hpsb_free_packet(packet);
924 return;
876 } 925 }
877 break; 926 fill_async_readblock_resp(packet, rcode, length);
927 send_packet_nocare(packet);
928 return;
878 929
879 case TCODE_LOCK_REQUEST: 930 case TCODE_LOCK_REQUEST:
880 length = data[3] >> 16; 931 length = data[3] >> 16;
881 extcode = data[3] & 0xffff; 932 extcode = data[3] & 0xffff;
882 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 933 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
883 934
884 PREP_REPLY_PACKET(8); 935 packet = create_reply_packet(host, data, 8);
936 if (!packet)
937 return;
885 938
886 if ((extcode == 0) || (extcode >= 7)) { 939 if (extcode == 0 || extcode >= 7) {
887 /* let switch default handle error */ 940 /* let switch default handle error */
888 length = 0; 941 length = 0;
889 } 942 }
@@ -891,12 +944,12 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
891 switch (length) { 944 switch (length) {
892 case 4: 945 case 4:
893 rcode = highlevel_lock(host, source, packet->data, addr, 946 rcode = highlevel_lock(host, source, packet->data, addr,
894 data[4], 0, extcode,flags); 947 data[4], 0, extcode, flags);
895 fill_async_lock_resp(packet, rcode, extcode, 4); 948 fill_async_lock_resp(packet, rcode, extcode, 4);
896 break; 949 break;
897 case 8: 950 case 8:
898 if ((extcode != EXTCODE_FETCH_ADD) 951 if (extcode != EXTCODE_FETCH_ADD &&
899 && (extcode != EXTCODE_LITTLE_ADD)) { 952 extcode != EXTCODE_LITTLE_ADD) {
900 rcode = highlevel_lock(host, source, 953 rcode = highlevel_lock(host, source,
901 packet->data, addr, 954 packet->data, addr,
902 data[5], data[4], 955 data[5], data[4],
@@ -920,29 +973,38 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
920 break; 973 break;
921 default: 974 default:
922 rcode = RCODE_TYPE_ERROR; 975 rcode = RCODE_TYPE_ERROR;
923 fill_async_lock_resp(packet, rcode, 976 fill_async_lock_resp(packet, rcode, extcode, 0);
924 extcode, 0);
925 } 977 }
926 978
927 if (rcode >= 0) { 979 if (rcode < 0)
928 send_packet_nocare(packet);
929 } else {
930 hpsb_free_packet(packet); 980 hpsb_free_packet(packet);
931 } 981 else
932 break; 982 send_packet_nocare(packet);
983 return;
933 } 984 }
934
935} 985}
936#undef PREP_REPLY_PACKET
937
938 986
987/**
988 * hpsb_packet_received - hand over received packet to the core
989 *
990 * For host driver module usage.
991 *
992 * The contents of data are expected to be the full packet but with the CRCs
993 * left out (data block follows header immediately), with the header (i.e. the
994 * first four quadlets) in machine byte order and the data block in big endian.
995 * *@data can be safely overwritten after this call.
996 *
997 * If the packet is a write request, @write_acked is to be set to true if it was
998 * ack_complete'd already, false otherwise. This argument is ignored for any
999 * other packet type.
1000 */
939void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, 1001void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
940 int write_acked) 1002 int write_acked)
941{ 1003{
942 int tcode; 1004 int tcode;
943 1005
944 if (host->in_bus_reset) { 1006 if (unlikely(host->in_bus_reset)) {
945 HPSB_INFO("received packet during reset; ignoring"); 1007 HPSB_DEBUG("received packet during reset; ignoring");
946 return; 1008 return;
947 } 1009 }
948 1010
@@ -976,23 +1038,27 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
976 break; 1038 break;
977 1039
978 default: 1040 default:
979 HPSB_NOTICE("received packet with bogus transaction code %d", 1041 HPSB_DEBUG("received packet with bogus transaction code %d",
980 tcode); 1042 tcode);
981 break; 1043 break;
982 } 1044 }
983} 1045}
984 1046
985
986static void abort_requests(struct hpsb_host *host) 1047static void abort_requests(struct hpsb_host *host)
987{ 1048{
988 struct hpsb_packet *packet; 1049 struct hpsb_packet *packet, *p;
989 struct sk_buff *skb; 1050 struct list_head tmp;
1051 unsigned long flags;
990 1052
991 host->driver->devctl(host, CANCEL_REQUESTS, 0); 1053 host->driver->devctl(host, CANCEL_REQUESTS, 0);
992 1054
993 while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) { 1055 INIT_LIST_HEAD(&tmp);
994 packet = (struct hpsb_packet *)skb->data; 1056 spin_lock_irqsave(&pending_packets_lock, flags);
1057 list_splice_init(&host->pending_packets, &tmp);
1058 spin_unlock_irqrestore(&pending_packets_lock, flags);
995 1059
1060 list_for_each_entry_safe(packet, p, &tmp, queue) {
1061 list_del_init(&packet->queue);
996 packet->state = hpsb_complete; 1062 packet->state = hpsb_complete;
997 packet->ack_code = ACKX_ABORTED; 1063 packet->ack_code = ACKX_ABORTED;
998 queue_packet_complete(packet); 1064 queue_packet_complete(packet);
@@ -1002,87 +1068,90 @@ static void abort_requests(struct hpsb_host *host)
1002void abort_timedouts(unsigned long __opaque) 1068void abort_timedouts(unsigned long __opaque)
1003{ 1069{
1004 struct hpsb_host *host = (struct hpsb_host *)__opaque; 1070 struct hpsb_host *host = (struct hpsb_host *)__opaque;
1005 unsigned long flags; 1071 struct hpsb_packet *packet, *p;
1006 struct hpsb_packet *packet; 1072 struct list_head tmp;
1007 struct sk_buff *skb; 1073 unsigned long flags, expire, j;
1008 unsigned long expire;
1009 1074
1010 spin_lock_irqsave(&host->csr.lock, flags); 1075 spin_lock_irqsave(&host->csr.lock, flags);
1011 expire = host->csr.expire; 1076 expire = host->csr.expire;
1012 spin_unlock_irqrestore(&host->csr.lock, flags); 1077 spin_unlock_irqrestore(&host->csr.lock, flags);
1013 1078
1014 /* Hold the lock around this, since we aren't dequeuing all 1079 j = jiffies;
1015 * packets, just ones we need. */ 1080 INIT_LIST_HEAD(&tmp);
1016 spin_lock_irqsave(&host->pending_packet_queue.lock, flags); 1081 spin_lock_irqsave(&pending_packets_lock, flags);
1017
1018 while (!skb_queue_empty(&host->pending_packet_queue)) {
1019 skb = skb_peek(&host->pending_packet_queue);
1020
1021 packet = (struct hpsb_packet *)skb->data;
1022 1082
1023 if (time_before(packet->sendtime + expire, jiffies)) { 1083 list_for_each_entry_safe(packet, p, &host->pending_packets, queue) {
1024 __skb_unlink(skb, &host->pending_packet_queue); 1084 if (time_before(packet->sendtime + expire, j))
1025 packet->state = hpsb_complete; 1085 list_move_tail(&packet->queue, &tmp);
1026 packet->ack_code = ACKX_TIMEOUT; 1086 else
1027 queue_packet_complete(packet);
1028 } else {
1029 /* Since packets are added to the tail, the oldest 1087 /* Since packets are added to the tail, the oldest
1030 * ones are first, always. When we get to one that 1088 * ones are first, always. When we get to one that
1031 * isn't timed out, the rest aren't either. */ 1089 * isn't timed out, the rest aren't either. */
1032 break; 1090 break;
1033 }
1034 } 1091 }
1092 if (!list_empty(&host->pending_packets))
1093 mod_timer(&host->timeout, j + host->timeout_interval);
1035 1094
1036 if (!skb_queue_empty(&host->pending_packet_queue)) 1095 spin_unlock_irqrestore(&pending_packets_lock, flags);
1037 mod_timer(&host->timeout, jiffies + host->timeout_interval);
1038 1096
1039 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 1097 list_for_each_entry_safe(packet, p, &tmp, queue) {
1098 list_del_init(&packet->queue);
1099 packet->state = hpsb_complete;
1100 packet->ack_code = ACKX_TIMEOUT;
1101 queue_packet_complete(packet);
1102 }
1040} 1103}
1041 1104
1042
1043/* Kernel thread and vars, which handles packets that are completed. Only
1044 * packets that have a "complete" function are sent here. This way, the
1045 * completion is run out of kernel context, and doesn't block the rest of
1046 * the stack. */
1047static struct task_struct *khpsbpkt_thread; 1105static struct task_struct *khpsbpkt_thread;
1048static struct sk_buff_head hpsbpkt_queue; 1106static LIST_HEAD(hpsbpkt_queue);
1049 1107
1050static void queue_packet_complete(struct hpsb_packet *packet) 1108static void queue_packet_complete(struct hpsb_packet *packet)
1051{ 1109{
1110 unsigned long flags;
1111
1052 if (packet->no_waiter) { 1112 if (packet->no_waiter) {
1053 hpsb_free_packet(packet); 1113 hpsb_free_packet(packet);
1054 return; 1114 return;
1055 } 1115 }
1056 if (packet->complete_routine != NULL) { 1116 if (packet->complete_routine != NULL) {
1057 skb_queue_tail(&hpsbpkt_queue, packet->skb); 1117 spin_lock_irqsave(&pending_packets_lock, flags);
1118 list_add_tail(&packet->queue, &hpsbpkt_queue);
1119 spin_unlock_irqrestore(&pending_packets_lock, flags);
1058 wake_up_process(khpsbpkt_thread); 1120 wake_up_process(khpsbpkt_thread);
1059 } 1121 }
1060 return; 1122 return;
1061} 1123}
1062 1124
1125/*
1126 * Kernel thread which handles packets that are completed. This way the
1127 * packet's "complete" function is asynchronously run in process context.
1128 * Only packets which have a "complete" function may be sent here.
1129 */
1063static int hpsbpkt_thread(void *__hi) 1130static int hpsbpkt_thread(void *__hi)
1064{ 1131{
1065 struct sk_buff *skb; 1132 struct hpsb_packet *packet, *p;
1066 struct hpsb_packet *packet; 1133 struct list_head tmp;
1067 void (*complete_routine)(void*); 1134 int may_schedule;
1068 void *complete_data;
1069 1135
1070 current->flags |= PF_NOFREEZE; 1136 current->flags |= PF_NOFREEZE;
1071 1137
1072 while (!kthread_should_stop()) { 1138 while (!kthread_should_stop()) {
1073 while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
1074 packet = (struct hpsb_packet *)skb->data;
1075
1076 complete_routine = packet->complete_routine;
1077 complete_data = packet->complete_data;
1078 1139
1079 packet->complete_routine = packet->complete_data = NULL; 1140 INIT_LIST_HEAD(&tmp);
1141 spin_lock_irq(&pending_packets_lock);
1142 list_splice_init(&hpsbpkt_queue, &tmp);
1143 spin_unlock_irq(&pending_packets_lock);
1080 1144
1081 complete_routine(complete_data); 1145 list_for_each_entry_safe(packet, p, &tmp, queue) {
1146 list_del_init(&packet->queue);
1147 packet->complete_routine(packet->complete_data);
1082 } 1148 }
1083 1149
1084 set_current_state(TASK_INTERRUPTIBLE); 1150 set_current_state(TASK_INTERRUPTIBLE);
1085 if (!skb_peek(&hpsbpkt_queue)) 1151 spin_lock_irq(&pending_packets_lock);
1152 may_schedule = list_empty(&hpsbpkt_queue);
1153 spin_unlock_irq(&pending_packets_lock);
1154 if (may_schedule)
1086 schedule(); 1155 schedule();
1087 __set_current_state(TASK_RUNNING); 1156 __set_current_state(TASK_RUNNING);
1088 } 1157 }
@@ -1093,8 +1162,6 @@ static int __init ieee1394_init(void)
1093{ 1162{
1094 int i, ret; 1163 int i, ret;
1095 1164
1096 skb_queue_head_init(&hpsbpkt_queue);
1097
1098 /* non-fatal error */ 1165 /* non-fatal error */
1099 if (hpsb_init_config_roms()) { 1166 if (hpsb_init_config_roms()) {
1100 HPSB_ERR("Failed to initialize some config rom entries.\n"); 1167 HPSB_ERR("Failed to initialize some config rom entries.\n");
@@ -1268,7 +1335,6 @@ EXPORT_SYMBOL(hpsb_destroy_hostinfo);
1268EXPORT_SYMBOL(hpsb_set_hostinfo_key); 1335EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1269EXPORT_SYMBOL(hpsb_get_hostinfo_bykey); 1336EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1270EXPORT_SYMBOL(hpsb_set_hostinfo); 1337EXPORT_SYMBOL(hpsb_set_hostinfo);
1271EXPORT_SYMBOL(highlevel_host_reset);
1272 1338
1273/** nodemgr.c **/ 1339/** nodemgr.c **/
1274EXPORT_SYMBOL(hpsb_node_fill_packet); 1340EXPORT_SYMBOL(hpsb_node_fill_packet);
@@ -1311,11 +1377,10 @@ EXPORT_SYMBOL(hpsb_iso_wake);
1311EXPORT_SYMBOL(hpsb_iso_recv_flush); 1377EXPORT_SYMBOL(hpsb_iso_recv_flush);
1312 1378
1313/** csr1212.c **/ 1379/** csr1212.c **/
1314EXPORT_SYMBOL(csr1212_new_directory);
1315EXPORT_SYMBOL(csr1212_attach_keyval_to_directory); 1380EXPORT_SYMBOL(csr1212_attach_keyval_to_directory);
1316EXPORT_SYMBOL(csr1212_detach_keyval_from_directory); 1381EXPORT_SYMBOL(csr1212_detach_keyval_from_directory);
1317EXPORT_SYMBOL(csr1212_release_keyval); 1382EXPORT_SYMBOL(csr1212_get_keyval);
1318EXPORT_SYMBOL(csr1212_read); 1383EXPORT_SYMBOL(csr1212_new_directory);
1319EXPORT_SYMBOL(csr1212_parse_keyval); 1384EXPORT_SYMBOL(csr1212_parse_keyval);
1320EXPORT_SYMBOL(_csr1212_read_keyval); 1385EXPORT_SYMBOL(csr1212_read);
1321EXPORT_SYMBOL(_csr1212_destroy_keyval); 1386EXPORT_SYMBOL(csr1212_release_keyval);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index bd29d8ef5bbd..ad526523d0ef 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -4,7 +4,6 @@
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/skbuff.h>
8#include <linux/types.h> 7#include <linux/types.h>
9#include <asm/atomic.h> 8#include <asm/atomic.h>
10 9
@@ -13,7 +12,7 @@
13 12
14struct hpsb_packet { 13struct hpsb_packet {
15 /* This struct is basically read-only for hosts with the exception of 14 /* This struct is basically read-only for hosts with the exception of
16 * the data buffer contents and xnext - see below. */ 15 * the data buffer contents and driver_list. */
17 16
18 /* This can be used for host driver internal linking. 17 /* This can be used for host driver internal linking.
19 * 18 *
@@ -49,134 +48,65 @@ struct hpsb_packet {
49 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ 48 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
50 unsigned speed_code:2; 49 unsigned speed_code:2;
51 50
52 /*
53 * *header and *data are guaranteed to be 32-bit DMAable and may be
54 * overwritten to allow in-place byte swapping. Neither of these is
55 * CRCed (the sizes also don't include CRC), but contain space for at
56 * least one additional quadlet to allow in-place CRCing. The memory is
57 * also guaranteed to be DMA mappable.
58 */
59 quadlet_t *header;
60 quadlet_t *data;
61 size_t header_size;
62 size_t data_size;
63
64 struct hpsb_host *host; 51 struct hpsb_host *host;
65 unsigned int generation; 52 unsigned int generation;
66 53
67 atomic_t refcnt; 54 atomic_t refcnt;
55 struct list_head queue;
68 56
69 /* Function (and possible data to pass to it) to call when this 57 /* Function (and possible data to pass to it) to call when this
70 * packet is completed. */ 58 * packet is completed. */
71 void (*complete_routine)(void *); 59 void (*complete_routine)(void *);
72 void *complete_data; 60 void *complete_data;
73 61
74 /* XXX This is just a hack at the moment */
75 struct sk_buff *skb;
76
77 /* Store jiffies for implementing bus timeouts. */ 62 /* Store jiffies for implementing bus timeouts. */
78 unsigned long sendtime; 63 unsigned long sendtime;
79 64
80 quadlet_t embedded_header[5]; 65 /* Sizes are in bytes. *data can be DMA-mapped. */
66 size_t allocated_data_size; /* as allocated */
67 size_t data_size; /* as filled in */
68 size_t header_size; /* as filled in, not counting the CRC */
69 quadlet_t *data;
70 quadlet_t header[5];
71 quadlet_t embedded_data[0]; /* keep as last member */
81}; 72};
82 73
83/* Set a task for when a packet completes */
84void hpsb_set_packet_complete_task(struct hpsb_packet *packet, 74void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
85 void (*routine)(void *), void *data); 75 void (*routine)(void *), void *data);
86
87static inline struct hpsb_packet *driver_packet(struct list_head *l) 76static inline struct hpsb_packet *driver_packet(struct list_head *l)
88{ 77{
89 return list_entry(l, struct hpsb_packet, driver_list); 78 return list_entry(l, struct hpsb_packet, driver_list);
90} 79}
91
92void abort_timedouts(unsigned long __opaque); 80void abort_timedouts(unsigned long __opaque);
93
94struct hpsb_packet *hpsb_alloc_packet(size_t data_size); 81struct hpsb_packet *hpsb_alloc_packet(size_t data_size);
95void hpsb_free_packet(struct hpsb_packet *packet); 82void hpsb_free_packet(struct hpsb_packet *packet);
96 83
97/* 84/**
98 * Generation counter for the complete 1394 subsystem. Generation gets 85 * get_hpsb_generation - generation counter for the complete 1394 subsystem
99 * incremented on every change in the subsystem (e.g. bus reset).
100 * 86 *
101 * Use the functions, not the variable. 87 * Generation gets incremented on every change in the subsystem (notably on bus
88 * resets). Use the functions, not the variable.
102 */ 89 */
103static inline unsigned int get_hpsb_generation(struct hpsb_host *host) 90static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
104{ 91{
105 return atomic_read(&host->generation); 92 return atomic_read(&host->generation);
106} 93}
107 94
108/*
109 * Send a PHY configuration packet, return 0 on success, negative
110 * errno on failure.
111 */
112int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt); 95int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt);
113
114/*
115 * Queue packet for transmitting, return 0 on success, negative errno
116 * on failure.
117 */
118int hpsb_send_packet(struct hpsb_packet *packet); 96int hpsb_send_packet(struct hpsb_packet *packet);
119
120/*
121 * Queue packet for transmitting, and block until the transaction
122 * completes. Return 0 on success, negative errno on failure.
123 */
124int hpsb_send_packet_and_wait(struct hpsb_packet *packet); 97int hpsb_send_packet_and_wait(struct hpsb_packet *packet);
125
126/* Initiate bus reset on the given host. Returns 1 if bus reset already in
127 * progress, 0 otherwise. */
128int hpsb_reset_bus(struct hpsb_host *host, int type); 98int hpsb_reset_bus(struct hpsb_host *host, int type);
129
130int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer, 99int hpsb_read_cycle_timer(struct hpsb_host *host, u32 *cycle_timer,
131 u64 *local_time); 100 u64 *local_time);
132 101
133/*
134 * The following functions are exported for host driver module usage. All of
135 * them are safe to use in interrupt contexts, although some are quite
136 * complicated so you may want to run them in bottom halves instead of calling
137 * them directly.
138 */
139
140/* Notify a bus reset to the core. Returns 1 if bus reset already in progress,
141 * 0 otherwise. */
142int hpsb_bus_reset(struct hpsb_host *host); 102int hpsb_bus_reset(struct hpsb_host *host);
143
144/*
145 * Hand over received selfid packet to the core. Complement check (second
146 * quadlet is complement of first) is expected to be done and successful.
147 */
148void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid); 103void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
149
150/*
151 * Notify completion of SelfID stage to the core and report new physical ID
152 * and whether host is root now.
153 */
154void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot); 104void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
155
156/*
157 * Notify core of sending a packet. Ackcode is the ack code returned for async
158 * transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
159 * for other cases (internal errors that don't justify a panic). Safe to call
160 * from within a transmit packet routine.
161 */
162void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 105void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
163 int ackcode); 106 int ackcode);
164
165/*
166 * Hand over received packet to the core. The contents of data are expected to
167 * be the full packet but with the CRCs left out (data block follows header
168 * immediately), with the header (i.e. the first four quadlets) in machine byte
169 * order and the data block in big endian. *data can be safely overwritten
170 * after this call.
171 *
172 * If the packet is a write request, write_acked is to be set to true if it was
173 * ack_complete'd already, false otherwise. This arg is ignored for any other
174 * packet type.
175 */
176void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, 107void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
177 int write_acked); 108 int write_acked);
178 109
179
180/* 110/*
181 * CHARACTER DEVICE DISPATCHING 111 * CHARACTER DEVICE DISPATCHING
182 * 112 *
@@ -217,7 +147,9 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
217#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \ 147#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \
218 IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) 148 IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
219 149
220/* return the index (within a minor number block) of a file */ 150/**
151 * ieee1394_file_to_instance - get the index within a minor number block
152 */
221static inline unsigned char ieee1394_file_to_instance(struct file *file) 153static inline unsigned char ieee1394_file_to_instance(struct file *file)
222{ 154{
223 return file->f_path.dentry->d_inode->i_cindex; 155 return file->f_path.dentry->d_inode->i_cindex;
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 0833fc9f50c4..40078ce930c8 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -10,11 +10,16 @@
10 */ 10 */
11 11
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/compiler.h>
14#include <linux/hardirq.h>
13#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/string.h>
17#include <linux/sched.h> /* because linux/wait.h is broken if CONFIG_SMP=n */
14#include <linux/wait.h> 18#include <linux/wait.h>
15 19
16#include <asm/bug.h> 20#include <asm/bug.h>
17#include <asm/errno.h> 21#include <asm/errno.h>
22#include <asm/system.h>
18 23
19#include "ieee1394.h" 24#include "ieee1394.h"
20#include "ieee1394_types.h" 25#include "ieee1394_types.h"
@@ -32,7 +37,7 @@
32#ifndef HPSB_DEBUG_TLABELS 37#ifndef HPSB_DEBUG_TLABELS
33static 38static
34#endif 39#endif
35spinlock_t hpsb_tlabel_lock = SPIN_LOCK_UNLOCKED; 40DEFINE_SPINLOCK(hpsb_tlabel_lock);
36 41
37static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq); 42static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq);
38 43
@@ -212,6 +217,15 @@ void hpsb_free_tlabel(struct hpsb_packet *packet)
212 wake_up_interruptible(&tlabel_wq); 217 wake_up_interruptible(&tlabel_wq);
213} 218}
214 219
220/**
221 * hpsb_packet_success - Make sense of the ack and reply codes
222 *
223 * Make sense of the ack and reply codes and return more convenient error codes:
224 * 0 = success. -%EBUSY = node is busy, try again. -%EAGAIN = error which can
225 * probably resolved by retry. -%EREMOTEIO = node suffers from an internal
226 * error. -%EACCES = this transaction is not allowed on requested address.
227 * -%EINVAL = invalid address at node.
228 */
215int hpsb_packet_success(struct hpsb_packet *packet) 229int hpsb_packet_success(struct hpsb_packet *packet)
216{ 230{
217 switch (packet->ack_code) { 231 switch (packet->ack_code) {
@@ -364,6 +378,13 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
364 } 378 }
365 packet->host = host; 379 packet->host = host;
366 380
381 /* Because it is too difficult to determine all PHY speeds and link
382 * speeds here, we use S100... */
383 packet->speed_code = IEEE1394_SPEED_100;
384
385 /* ...and prevent hpsb_send_packet() from overriding it. */
386 packet->node_id = LOCAL_BUS | ALL_NODES;
387
367 if (hpsb_get_tlabel(packet)) { 388 if (hpsb_get_tlabel(packet)) {
368 hpsb_free_packet(packet); 389 hpsb_free_packet(packet);
369 return NULL; 390 return NULL;
@@ -493,6 +514,16 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
493 * avoid in kernel buffers for user space callers 514 * avoid in kernel buffers for user space callers
494 */ 515 */
495 516
517/**
518 * hpsb_read - generic read function
519 *
520 * Recognizes the local node ID and act accordingly. Automatically uses a
521 * quadlet read request if @length == 4 and and a block read request otherwise.
522 * It does not yet support lengths that are not a multiple of 4.
523 *
524 * You must explicitly specifiy the @generation for which the node ID is valid,
525 * to avoid sending packets to the wrong nodes when we race with a bus reset.
526 */
496int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation, 527int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
497 u64 addr, quadlet_t * buffer, size_t length) 528 u64 addr, quadlet_t * buffer, size_t length)
498{ 529{
@@ -532,6 +563,16 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
532 return retval; 563 return retval;
533} 564}
534 565
566/**
567 * hpsb_write - generic write function
568 *
569 * Recognizes the local node ID and act accordingly. Automatically uses a
570 * quadlet write request if @length == 4 and and a block write request
571 * otherwise. It does not yet support lengths that are not a multiple of 4.
572 *
573 * You must explicitly specifiy the @generation for which the node ID is valid,
574 * to avoid sending packets to the wrong nodes when we race with a bus reset.
575 */
535int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, 576int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
536 u64 addr, quadlet_t * buffer, size_t length) 577 u64 addr, quadlet_t * buffer, size_t length)
537{ 578{
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h
index c1369c41469b..86b8ee692ea7 100644
--- a/drivers/ieee1394/ieee1394_transactions.h
+++ b/drivers/ieee1394/ieee1394_transactions.h
@@ -27,27 +27,7 @@ struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host,
27struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, 27struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer,
28 int length, int channel, int tag, 28 int length, int channel, int tag,
29 int sync); 29 int sync);
30
31/*
32 * hpsb_packet_success - Make sense of the ack and reply codes and
33 * return more convenient error codes:
34 * 0 success
35 * -EBUSY node is busy, try again
36 * -EAGAIN error which can probably resolved by retry
37 * -EREMOTEIO node suffers from an internal error
38 * -EACCES this transaction is not allowed on requested address
39 * -EINVAL invalid address at node
40 */
41int hpsb_packet_success(struct hpsb_packet *packet); 30int hpsb_packet_success(struct hpsb_packet *packet);
42
43/*
44 * The generic read and write functions. All recognize the local node ID
45 * and act accordingly. Read and write automatically use quadlet commands if
46 * length == 4 and and block commands otherwise (however, they do not yet
47 * support lengths that are not a multiple of 4). You must explicitly specifiy
48 * the generation for which the node ID is valid, to avoid sending packets to
49 * the wrong nodes when we race with a bus reset.
50 */
51int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation, 31int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
52 u64 addr, quadlet_t *buffer, size_t length); 32 u64 addr, quadlet_t *buffer, size_t length);
53int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, 33int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index c6227e51136d..07ca35c98f96 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -10,11 +10,15 @@
10 */ 10 */
11 11
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15#include "hosts.h" 16#include "hosts.h"
16#include "iso.h" 17#include "iso.h"
17 18
19/**
20 * hpsb_iso_stop - stop DMA
21 */
18void hpsb_iso_stop(struct hpsb_iso *iso) 22void hpsb_iso_stop(struct hpsb_iso *iso)
19{ 23{
20 if (!(iso->flags & HPSB_ISO_DRIVER_STARTED)) 24 if (!(iso->flags & HPSB_ISO_DRIVER_STARTED))
@@ -25,6 +29,9 @@ void hpsb_iso_stop(struct hpsb_iso *iso)
25 iso->flags &= ~HPSB_ISO_DRIVER_STARTED; 29 iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
26} 30}
27 31
32/**
33 * hpsb_iso_shutdown - deallocate buffer and DMA context
34 */
28void hpsb_iso_shutdown(struct hpsb_iso *iso) 35void hpsb_iso_shutdown(struct hpsb_iso *iso)
29{ 36{
30 if (iso->flags & HPSB_ISO_DRIVER_INIT) { 37 if (iso->flags & HPSB_ISO_DRIVER_INIT) {
@@ -130,6 +137,9 @@ static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
130 return NULL; 137 return NULL;
131} 138}
132 139
140/**
141 * hpsb_iso_n_ready - returns number of packets ready to send or receive
142 */
133int hpsb_iso_n_ready(struct hpsb_iso *iso) 143int hpsb_iso_n_ready(struct hpsb_iso *iso)
134{ 144{
135 unsigned long flags; 145 unsigned long flags;
@@ -142,6 +152,9 @@ int hpsb_iso_n_ready(struct hpsb_iso *iso)
142 return val; 152 return val;
143} 153}
144 154
155/**
156 * hpsb_iso_xmit_init - allocate the buffer and DMA context
157 */
145struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host, 158struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
146 unsigned int data_buf_size, 159 unsigned int data_buf_size,
147 unsigned int buf_packets, 160 unsigned int buf_packets,
@@ -172,6 +185,11 @@ struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
172 return NULL; 185 return NULL;
173} 186}
174 187
188/**
189 * hpsb_iso_recv_init - allocate the buffer and DMA context
190 *
191 * Note, if channel = -1, multi-channel receive is enabled.
192 */
175struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host, 193struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
176 unsigned int data_buf_size, 194 unsigned int data_buf_size,
177 unsigned int buf_packets, 195 unsigned int buf_packets,
@@ -199,6 +217,11 @@ struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
199 return NULL; 217 return NULL;
200} 218}
201 219
220/**
221 * hpsb_iso_recv_listen_channel
222 *
223 * multi-channel only
224 */
202int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel) 225int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
203{ 226{
204 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64) 227 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
@@ -206,6 +229,11 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
206 return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel); 229 return iso->host->driver->isoctl(iso, RECV_LISTEN_CHANNEL, channel);
207} 230}
208 231
232/**
233 * hpsb_iso_recv_unlisten_channel
234 *
235 * multi-channel only
236 */
209int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel) 237int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
210{ 238{
211 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64) 239 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
@@ -213,6 +241,11 @@ int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
213 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel); 241 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
214} 242}
215 243
244/**
245 * hpsb_iso_recv_set_channel_mask
246 *
247 * multi-channel only
248 */
216int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask) 249int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
217{ 250{
218 if (iso->type != HPSB_ISO_RECV || iso->channel != -1) 251 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
@@ -221,6 +254,12 @@ int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
221 (unsigned long)&mask); 254 (unsigned long)&mask);
222} 255}
223 256
257/**
258 * hpsb_iso_recv_flush - check for arrival of new packets
259 *
260 * check for arrival of new packets immediately (even if irq_interval
261 * has not yet been reached)
262 */
224int hpsb_iso_recv_flush(struct hpsb_iso *iso) 263int hpsb_iso_recv_flush(struct hpsb_iso *iso)
225{ 264{
226 if (iso->type != HPSB_ISO_RECV) 265 if (iso->type != HPSB_ISO_RECV)
@@ -238,6 +277,9 @@ static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
238 return retval; 277 return retval;
239} 278}
240 279
280/**
281 * hpsb_iso_xmit_start - start DMA
282 */
241int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer) 283int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
242{ 284{
243 if (iso->type != HPSB_ISO_XMIT) 285 if (iso->type != HPSB_ISO_XMIT)
@@ -270,6 +312,9 @@ int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
270 return 0; 312 return 0;
271} 313}
272 314
315/**
316 * hpsb_iso_recv_start - start DMA
317 */
273int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync) 318int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
274{ 319{
275 int retval = 0; 320 int retval = 0;
@@ -306,8 +351,7 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
306} 351}
307 352
308/* check to make sure the user has not supplied bogus values of offset/len 353/* check to make sure the user has not supplied bogus values of offset/len
309 that would cause the kernel to access memory outside the buffer */ 354 * that would cause the kernel to access memory outside the buffer */
310
311static int hpsb_iso_check_offset_len(struct hpsb_iso *iso, 355static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
312 unsigned int offset, unsigned short len, 356 unsigned int offset, unsigned short len,
313 unsigned int *out_offset, 357 unsigned int *out_offset,
@@ -331,6 +375,12 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
331 return 0; 375 return 0;
332} 376}
333 377
378/**
379 * hpsb_iso_xmit_queue_packet - queue a packet for transmission.
380 *
381 * @offset is relative to the beginning of the DMA buffer, where the packet's
382 * data payload should already have been placed.
383 */
334int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, 384int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
335 u8 tag, u8 sy) 385 u8 tag, u8 sy)
336{ 386{
@@ -380,6 +430,9 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
380 return rv; 430 return rv;
381} 431}
382 432
433/**
434 * hpsb_iso_xmit_sync - wait until all queued packets have been transmitted
435 */
383int hpsb_iso_xmit_sync(struct hpsb_iso *iso) 436int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
384{ 437{
385 if (iso->type != HPSB_ISO_XMIT) 438 if (iso->type != HPSB_ISO_XMIT)
@@ -390,6 +443,15 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
390 iso->buf_packets); 443 iso->buf_packets);
391} 444}
392 445
446/**
447 * hpsb_iso_packet_sent
448 *
449 * Available to low-level drivers.
450 *
451 * Call after a packet has been transmitted to the bus (interrupt context is
452 * OK). @cycle is the _exact_ cycle the packet was sent on. @error should be
453 * non-zero if some sort of error occurred when sending the packet.
454 */
393void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error) 455void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
394{ 456{
395 unsigned long flags; 457 unsigned long flags;
@@ -413,6 +475,13 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
413 spin_unlock_irqrestore(&iso->lock, flags); 475 spin_unlock_irqrestore(&iso->lock, flags);
414} 476}
415 477
478/**
479 * hpsb_iso_packet_received
480 *
481 * Available to low-level drivers.
482 *
483 * Call after a packet has been received (interrupt context is OK).
484 */
416void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, 485void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
417 u16 total_len, u16 cycle, u8 channel, u8 tag, 486 u16 total_len, u16 cycle, u8 channel, u8 tag,
418 u8 sy) 487 u8 sy)
@@ -442,6 +511,11 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
442 spin_unlock_irqrestore(&iso->lock, flags); 511 spin_unlock_irqrestore(&iso->lock, flags);
443} 512}
444 513
514/**
515 * hpsb_iso_recv_release_packets - release packets, reuse buffer
516 *
517 * @n_packets have been read out of the buffer, re-use the buffer space
518 */
445int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets) 519int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
446{ 520{
447 unsigned long flags; 521 unsigned long flags;
@@ -477,6 +551,13 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
477 return rv; 551 return rv;
478} 552}
479 553
554/**
555 * hpsb_iso_wake
556 *
557 * Available to low-level drivers.
558 *
559 * Call to wake waiting processes after buffer space has opened up.
560 */
480void hpsb_iso_wake(struct hpsb_iso *iso) 561void hpsb_iso_wake(struct hpsb_iso *iso)
481{ 562{
482 wake_up_interruptible(&iso->waitq); 563 wake_up_interruptible(&iso->waitq);
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h
index 1210a97e8685..b94e55e6eaa5 100644
--- a/drivers/ieee1394/iso.h
+++ b/drivers/ieee1394/iso.h
@@ -150,8 +150,6 @@ struct hpsb_iso {
150 150
151/* functions available to high-level drivers (e.g. raw1394) */ 151/* functions available to high-level drivers (e.g. raw1394) */
152 152
153/* allocate the buffer and DMA context */
154
155struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host, 153struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
156 unsigned int data_buf_size, 154 unsigned int data_buf_size,
157 unsigned int buf_packets, 155 unsigned int buf_packets,
@@ -159,8 +157,6 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
159 int speed, 157 int speed,
160 int irq_interval, 158 int irq_interval,
161 void (*callback)(struct hpsb_iso*)); 159 void (*callback)(struct hpsb_iso*));
162
163/* note: if channel = -1, multi-channel receive is enabled */
164struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host, 160struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
165 unsigned int data_buf_size, 161 unsigned int data_buf_size,
166 unsigned int buf_packets, 162 unsigned int buf_packets,
@@ -168,56 +164,29 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
168 int dma_mode, 164 int dma_mode,
169 int irq_interval, 165 int irq_interval,
170 void (*callback)(struct hpsb_iso*)); 166 void (*callback)(struct hpsb_iso*));
171
172/* multi-channel only */
173int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel); 167int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel);
174int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel); 168int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel);
175int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask); 169int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask);
176
177/* start/stop DMA */
178int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, 170int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle,
179 int prebuffer); 171 int prebuffer);
180int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, 172int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle,
181 int tag_mask, int sync); 173 int tag_mask, int sync);
182void hpsb_iso_stop(struct hpsb_iso *iso); 174void hpsb_iso_stop(struct hpsb_iso *iso);
183
184/* deallocate buffer and DMA context */
185void hpsb_iso_shutdown(struct hpsb_iso *iso); 175void hpsb_iso_shutdown(struct hpsb_iso *iso);
186
187/* queue a packet for transmission.
188 * 'offset' is relative to the beginning of the DMA buffer, where the packet's
189 * data payload should already have been placed. */
190int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, 176int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
191 u8 tag, u8 sy); 177 u8 tag, u8 sy);
192
193/* wait until all queued packets have been transmitted to the bus */
194int hpsb_iso_xmit_sync(struct hpsb_iso *iso); 178int hpsb_iso_xmit_sync(struct hpsb_iso *iso);
195 179int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
196/* N packets have been read out of the buffer, re-use the buffer space */ 180 unsigned int n_packets);
197int hpsb_iso_recv_release_packets(struct hpsb_iso *recv,
198 unsigned int n_packets);
199
200/* check for arrival of new packets immediately (even if irq_interval
201 * has not yet been reached) */
202int hpsb_iso_recv_flush(struct hpsb_iso *iso); 181int hpsb_iso_recv_flush(struct hpsb_iso *iso);
203
204/* returns # of packets ready to send or receive */
205int hpsb_iso_n_ready(struct hpsb_iso *iso); 182int hpsb_iso_n_ready(struct hpsb_iso *iso);
206 183
207/* the following are callbacks available to low-level drivers */ 184/* the following are callbacks available to low-level drivers */
208 185
209/* call after a packet has been transmitted to the bus (interrupt context is OK)
210 * 'cycle' is the _exact_ cycle the packet was sent on
211 * 'error' should be non-zero if some sort of error occurred when sending the
212 * packet */
213void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error); 186void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error);
214
215/* call after a packet has been received (interrupt context OK) */
216void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, 187void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
217 u16 total_len, u16 cycle, u8 channel, u8 tag, 188 u16 total_len, u16 cycle, u8 channel, u8 tag,
218 u8 sy); 189 u8 sy);
219
220/* call to wake waiting processes after buffer space has opened up. */
221void hpsb_iso_wake(struct hpsb_iso *iso); 190void hpsb_iso_wake(struct hpsb_iso *iso);
222 191
223#endif /* IEEE1394_ISO_H */ 192#endif /* IEEE1394_ISO_H */
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index dbeba45a031e..6a1a0572275e 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -16,6 +16,7 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/mutex.h>
19#include <linux/freezer.h> 20#include <linux/freezer.h>
20#include <asm/atomic.h> 21#include <asm/atomic.h>
21 22
@@ -115,7 +116,7 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
115 116
116static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci) 117static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
117{ 118{
118 return (CSR1212_BE32_TO_CPU(bus_info_data[2]) >> 8) & 0x3; 119 return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
119} 120}
120 121
121static struct csr1212_bus_ops nodemgr_csr_ops = { 122static struct csr1212_bus_ops nodemgr_csr_ops = {
@@ -580,7 +581,7 @@ static void nodemgr_create_drv_files(struct hpsb_protocol_driver *driver)
580 goto fail; 581 goto fail;
581 return; 582 return;
582fail: 583fail:
583 HPSB_ERR("Failed to add sysfs attribute for driver %s", driver->name); 584 HPSB_ERR("Failed to add sysfs attribute");
584} 585}
585 586
586 587
@@ -604,8 +605,7 @@ static void nodemgr_create_ne_dev_files(struct node_entry *ne)
604 goto fail; 605 goto fail;
605 return; 606 return;
606fail: 607fail:
607 HPSB_ERR("Failed to add sysfs attribute for node %016Lx", 608 HPSB_ERR("Failed to add sysfs attribute");
608 (unsigned long long)ne->guid);
609} 609}
610 610
611 611
@@ -619,7 +619,7 @@ static void nodemgr_create_host_dev_files(struct hpsb_host *host)
619 goto fail; 619 goto fail;
620 return; 620 return;
621fail: 621fail:
622 HPSB_ERR("Failed to add sysfs attribute for host %d", host->id); 622 HPSB_ERR("Failed to add sysfs attribute");
623} 623}
624 624
625 625
@@ -679,8 +679,7 @@ static void nodemgr_create_ud_dev_files(struct unit_directory *ud)
679 } 679 }
680 return; 680 return;
681fail: 681fail:
682 HPSB_ERR("Failed to add sysfs attributes for unit %s", 682 HPSB_ERR("Failed to add sysfs attribute");
683 ud->device.bus_id);
684} 683}
685 684
686 685
@@ -1144,13 +1143,13 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1144 last_key_id = kv->key.id; 1143 last_key_id = kv->key.id;
1145 } 1144 }
1146 1145
1147 if (ne->vendor_name_kv && 1146 if (ne->vendor_name_kv) {
1148 device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv)) 1147 int error = device_create_file(&ne->device,
1149 goto fail; 1148 &dev_attr_ne_vendor_name_kv);
1150 return; 1149
1151fail: 1150 if (error && error != -EEXIST)
1152 HPSB_ERR("Failed to add sysfs attribute for node %016Lx", 1151 HPSB_ERR("Failed to add sysfs attribute");
1153 (unsigned long long)ne->guid); 1152 }
1154} 1153}
1155 1154
1156#ifdef CONFIG_HOTPLUG 1155#ifdef CONFIG_HOTPLUG
@@ -1738,7 +1737,19 @@ exit:
1738 return 0; 1737 return 0;
1739} 1738}
1740 1739
1741int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)) 1740/**
1741 * nodemgr_for_each_host - call a function for each IEEE 1394 host
1742 * @data: an address to supply to the callback
1743 * @cb: function to call for each host
1744 *
1745 * Iterate the hosts, calling a given function with supplied data for each host.
1746 * If the callback fails on a host, i.e. if it returns a non-zero value, the
1747 * iteration is stopped.
1748 *
1749 * Return value: 0 on success, non-zero on failure (same as returned by last run
1750 * of the callback).
1751 */
1752int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
1742{ 1753{
1743 struct class_device *cdev; 1754 struct class_device *cdev;
1744 struct hpsb_host *host; 1755 struct hpsb_host *host;
@@ -1748,7 +1759,7 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
1748 list_for_each_entry(cdev, &hpsb_host_class.children, node) { 1759 list_for_each_entry(cdev, &hpsb_host_class.children, node) {
1749 host = container_of(cdev, struct hpsb_host, class_dev); 1760 host = container_of(cdev, struct hpsb_host, class_dev);
1750 1761
1751 if ((error = cb(host, __data))) 1762 if ((error = cb(host, data)))
1752 break; 1763 break;
1753 } 1764 }
1754 up(&hpsb_host_class.sem); 1765 up(&hpsb_host_class.sem);
@@ -1756,7 +1767,7 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
1756 return error; 1767 return error;
1757} 1768}
1758 1769
1759/* The following four convenience functions use a struct node_entry 1770/* The following two convenience functions use a struct node_entry
1760 * for addressing a node on the bus. They are intended for use by any 1771 * for addressing a node on the bus. They are intended for use by any
1761 * process context, not just the nodemgr thread, so we need to be a 1772 * process context, not just the nodemgr thread, so we need to be a
1762 * little careful when reading out the node ID and generation. The 1773 * little careful when reading out the node ID and generation. The
@@ -1771,12 +1782,20 @@ int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *))
1771 * ID's. 1782 * ID's.
1772 */ 1783 */
1773 1784
1774void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt) 1785/**
1786 * hpsb_node_fill_packet - fill some destination information into a packet
1787 * @ne: destination node
1788 * @packet: packet to fill in
1789 *
1790 * This will fill in the given, pre-initialised hpsb_packet with the current
1791 * information from the node entry (host, node ID, bus generation number).
1792 */
1793void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet)
1775{ 1794{
1776 pkt->host = ne->host; 1795 packet->host = ne->host;
1777 pkt->generation = ne->generation; 1796 packet->generation = ne->generation;
1778 barrier(); 1797 barrier();
1779 pkt->node_id = ne->nodeid; 1798 packet->node_id = ne->nodeid;
1780} 1799}
1781 1800
1782int hpsb_node_write(struct node_entry *ne, u64 addr, 1801int hpsb_node_write(struct node_entry *ne, u64 addr,
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 4147303ad448..e7ac683c72c7 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -153,30 +153,10 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
153{ 153{
154 return ne->generation == get_hpsb_generation(ne->host); 154 return ne->generation == get_hpsb_generation(ne->host);
155} 155}
156 156void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *packet);
157/*
158 * This will fill in the given, pre-initialised hpsb_packet with the current
159 * information from the node entry (host, node ID, generation number). It will
160 * return false if the node owning the GUID is not accessible (and not modify
161 * the hpsb_packet) and return true otherwise.
162 *
163 * Note that packet sending may still fail in hpsb_send_packet if a bus reset
164 * happens while you are trying to set up the packet (due to obsolete generation
165 * number). It will at least reliably fail so that you don't accidentally and
166 * unknowingly send your packet to the wrong node.
167 */
168void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
169
170int hpsb_node_read(struct node_entry *ne, u64 addr,
171 quadlet_t *buffer, size_t length);
172int hpsb_node_write(struct node_entry *ne, u64 addr, 157int hpsb_node_write(struct node_entry *ne, u64 addr,
173 quadlet_t *buffer, size_t length); 158 quadlet_t *buffer, size_t length);
174int hpsb_node_lock(struct node_entry *ne, u64 addr, 159int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *));
175 int extcode, quadlet_t *data, quadlet_t arg);
176
177/* Iterate the hosts, calling a given function with supplied data for each
178 * host. */
179int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *));
180 160
181int init_ieee1394_nodemgr(void); 161int init_ieee1394_nodemgr(void);
182void cleanup_ieee1394_nodemgr(void); 162void cleanup_ieee1394_nodemgr(void);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 06fac0d21264..5dadfd296f79 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -507,9 +507,8 @@ static void ohci_initialize(struct ti_ohci *ohci)
507 /* Set up self-id dma buffer */ 507 /* Set up self-id dma buffer */
508 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus); 508 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
509 509
510 /* enable self-id and phys */ 510 /* enable self-id */
511 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID | 511 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
512 OHCI1394_LinkControl_RcvPhyPkt);
513 512
514 /* Set the Config ROM mapping register */ 513 /* Set the Config ROM mapping register */
515 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus); 514 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
@@ -518,9 +517,6 @@ static void ohci_initialize(struct ti_ohci *ohci)
518 ohci->max_packet_size = 517 ohci->max_packet_size =
519 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1); 518 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
520 519
521 /* Don't accept phy packets into AR request context */
522 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
523
524 /* Clear the interrupt mask */ 520 /* Clear the interrupt mask */
525 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); 521 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
526 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); 522 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
@@ -617,7 +613,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
617#endif 613#endif
618 614
619 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, " 615 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
620 "attempting to setting max_packet_size to 512 bytes"); 616 "attempting to set max_packet_size to 512 bytes");
621 reg_write(ohci, OHCI1394_BusOptions, 617 reg_write(ohci, OHCI1394_BusOptions,
622 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002); 618 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
623 ohci->max_packet_size = 512; 619 ohci->max_packet_size = 512;
@@ -2377,6 +2373,7 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2377 if (event & OHCI1394_postedWriteErr) { 2373 if (event & OHCI1394_postedWriteErr) {
2378 PRINT(KERN_ERR, "physical posted write error"); 2374 PRINT(KERN_ERR, "physical posted write error");
2379 /* no recovery strategy yet, had to involve protocol drivers */ 2375 /* no recovery strategy yet, had to involve protocol drivers */
2376 event &= ~OHCI1394_postedWriteErr;
2380 } 2377 }
2381 if (event & OHCI1394_cycleTooLong) { 2378 if (event & OHCI1394_cycleTooLong) {
2382 if(printk_ratelimit()) 2379 if(printk_ratelimit())
@@ -3658,6 +3655,7 @@ static struct pci_driver ohci1394_pci_driver = {
3658/* essentially the only purpose of this code is to allow another 3655/* essentially the only purpose of this code is to allow another
3659 module to hook into ohci's interrupt handler */ 3656 module to hook into ohci's interrupt handler */
3660 3657
3658/* returns zero if successful, one if DMA context is locked up */
3661int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg) 3659int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3662{ 3660{
3663 int i=0; 3661 int i=0;
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
index fa05f113f7f0..f1ad539e7c1b 100644
--- a/drivers/ieee1394/ohci1394.h
+++ b/drivers/ieee1394/ohci1394.h
@@ -461,9 +461,7 @@ int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
461 struct ohci1394_iso_tasklet *tasklet); 461 struct ohci1394_iso_tasklet *tasklet);
462void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci, 462void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
463 struct ohci1394_iso_tasklet *tasklet); 463 struct ohci1394_iso_tasklet *tasklet);
464 464int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg);
465/* returns zero if successful, one if DMA context is locked up */
466int ohci1394_stop_context (struct ti_ohci *ohci, int reg, char *msg);
467struct ti_ohci *ohci1394_get_struct(int card_num); 465struct ti_ohci *ohci1394_get_struct(int card_num);
468 466
469#endif 467#endif
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bb897a37d9f7..c6aefd9ad0e8 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -938,7 +938,8 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
938 int header_length = req->req.misc & 0xffff; 938 int header_length = req->req.misc & 0xffff;
939 int expect_response = req->req.misc >> 16; 939 int expect_response = req->req.misc >> 16;
940 940
941 if ((header_length > req->req.length) || (header_length < 12)) { 941 if (header_length > req->req.length || header_length < 12 ||
942 header_length > FIELD_SIZEOF(struct hpsb_packet, header)) {
942 req->req.error = RAW1394_ERROR_INVALID_ARG; 943 req->req.error = RAW1394_ERROR_INVALID_ARG;
943 req->req.length = 0; 944 req->req.length = 0;
944 queue_complete_req(req); 945 queue_complete_req(req);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 4edfff46b1e6..4cb6fa2bcfb7 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -59,8 +59,10 @@
59#include <linux/init.h> 59#include <linux/init.h>
60#include <linux/kernel.h> 60#include <linux/kernel.h>
61#include <linux/list.h> 61#include <linux/list.h>
62#include <linux/mm.h>
62#include <linux/module.h> 63#include <linux/module.h>
63#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
65#include <linux/sched.h>
64#include <linux/slab.h> 66#include <linux/slab.h>
65#include <linux/spinlock.h> 67#include <linux/spinlock.h>
66#include <linux/stat.h> 68#include <linux/stat.h>
@@ -469,19 +471,13 @@ static void sbp2util_write_doorbell(struct work_struct *work)
469static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu) 471static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
470{ 472{
471 struct sbp2_fwhost_info *hi = lu->hi; 473 struct sbp2_fwhost_info *hi = lu->hi;
472 int i;
473 unsigned long flags, orbs;
474 struct sbp2_command_info *cmd; 474 struct sbp2_command_info *cmd;
475 int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
475 476
476 orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
477
478 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
479 for (i = 0; i < orbs; i++) { 477 for (i = 0; i < orbs; i++) {
480 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 478 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
481 if (!cmd) { 479 if (!cmd)
482 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
483 return -ENOMEM; 480 return -ENOMEM;
484 }
485 cmd->command_orb_dma = dma_map_single(hi->host->device.parent, 481 cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
486 &cmd->command_orb, 482 &cmd->command_orb,
487 sizeof(struct sbp2_command_orb), 483 sizeof(struct sbp2_command_orb),
@@ -489,11 +485,10 @@ static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
489 cmd->sge_dma = dma_map_single(hi->host->device.parent, 485 cmd->sge_dma = dma_map_single(hi->host->device.parent,
490 &cmd->scatter_gather_element, 486 &cmd->scatter_gather_element,
491 sizeof(cmd->scatter_gather_element), 487 sizeof(cmd->scatter_gather_element),
492 DMA_BIDIRECTIONAL); 488 DMA_TO_DEVICE);
493 INIT_LIST_HEAD(&cmd->list); 489 INIT_LIST_HEAD(&cmd->list);
494 list_add_tail(&cmd->list, &lu->cmd_orb_completed); 490 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
495 } 491 }
496 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
497 return 0; 492 return 0;
498} 493}
499 494
@@ -514,7 +509,7 @@ static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
514 DMA_TO_DEVICE); 509 DMA_TO_DEVICE);
515 dma_unmap_single(host->device.parent, cmd->sge_dma, 510 dma_unmap_single(host->device.parent, cmd->sge_dma,
516 sizeof(cmd->scatter_gather_element), 511 sizeof(cmd->scatter_gather_element),
517 DMA_BIDIRECTIONAL); 512 DMA_TO_DEVICE);
518 kfree(cmd); 513 kfree(cmd);
519 } 514 }
520 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 515 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
@@ -757,6 +752,11 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
757 SBP2_ERR("failed to register lower 4GB address range"); 752 SBP2_ERR("failed to register lower 4GB address range");
758 goto failed_alloc; 753 goto failed_alloc;
759 } 754 }
755#else
756 if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) {
757 SBP2_ERR("failed to set 4GB DMA mask");
758 goto failed_alloc;
759 }
760#endif 760#endif
761 } 761 }
762 762
@@ -865,11 +865,8 @@ static int sbp2_start_device(struct sbp2_lu *lu)
865 if (!lu->login_orb) 865 if (!lu->login_orb)
866 goto alloc_fail; 866 goto alloc_fail;
867 867
868 if (sbp2util_create_command_orb_pool(lu)) { 868 if (sbp2util_create_command_orb_pool(lu))
869 SBP2_ERR("sbp2util_create_command_orb_pool failed!"); 869 goto alloc_fail;
870 sbp2_remove_device(lu);
871 return -ENOMEM;
872 }
873 870
874 /* Wait a second before trying to log in. Previously logged in 871 /* Wait a second before trying to log in. Previously logged in
875 * initiators need a chance to reconnect. */ 872 * initiators need a chance to reconnect. */
@@ -1628,7 +1625,7 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
1628 DMA_TO_DEVICE); 1625 DMA_TO_DEVICE);
1629 dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma, 1626 dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
1630 sizeof(cmd->scatter_gather_element), 1627 sizeof(cmd->scatter_gather_element),
1631 DMA_BIDIRECTIONAL); 1628 DMA_TO_DEVICE);
1632 1629
1633 /* check to see if there are any previous orbs to use */ 1630 /* check to see if there are any previous orbs to use */
1634 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 1631 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
@@ -1794,7 +1791,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1794 DMA_TO_DEVICE); 1791 DMA_TO_DEVICE);
1795 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1792 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
1796 sizeof(cmd->scatter_gather_element), 1793 sizeof(cmd->scatter_gather_element),
1797 DMA_BIDIRECTIONAL); 1794 DMA_TO_DEVICE);
1798 /* Grab SCSI command pointers and check status. */ 1795 /* Grab SCSI command pointers and check status. */
1799 /* 1796 /*
1800 * FIXME: If the src field in the status is 1, the ORB DMA must 1797 * FIXME: If the src field in the status is 1, the ORB DMA must
@@ -1926,7 +1923,7 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1926 DMA_TO_DEVICE); 1923 DMA_TO_DEVICE);
1927 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1924 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
1928 sizeof(cmd->scatter_gather_element), 1925 sizeof(cmd->scatter_gather_element),
1929 DMA_BIDIRECTIONAL); 1926 DMA_TO_DEVICE);
1930 sbp2util_mark_command_completed(lu, cmd); 1927 sbp2util_mark_command_completed(lu, cmd);
1931 if (cmd->Current_SCpnt) { 1928 if (cmd->Current_SCpnt) {
1932 cmd->Current_SCpnt->result = status << 16; 1929 cmd->Current_SCpnt->result = status << 16;
@@ -2057,7 +2054,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2057 dma_sync_single_for_cpu(hi->host->device.parent, 2054 dma_sync_single_for_cpu(hi->host->device.parent,
2058 cmd->sge_dma, 2055 cmd->sge_dma,
2059 sizeof(cmd->scatter_gather_element), 2056 sizeof(cmd->scatter_gather_element),
2060 DMA_BIDIRECTIONAL); 2057 DMA_TO_DEVICE);
2061 sbp2util_mark_command_completed(lu, cmd); 2058 sbp2util_mark_command_completed(lu, cmd);
2062 if (cmd->Current_SCpnt) { 2059 if (cmd->Current_SCpnt) {
2063 cmd->Current_SCpnt->result = DID_ABORT << 16; 2060 cmd->Current_SCpnt->result = DID_ABORT << 16;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index 9ae842329bf3..44402b9d82a8 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -250,15 +250,15 @@ enum sbp2_dma_types {
250/* Per SCSI command */ 250/* Per SCSI command */
251struct sbp2_command_info { 251struct sbp2_command_info {
252 struct list_head list; 252 struct list_head list;
253 struct sbp2_command_orb command_orb ____cacheline_aligned; 253 struct sbp2_command_orb command_orb;
254 dma_addr_t command_orb_dma ____cacheline_aligned; 254 dma_addr_t command_orb_dma;
255 struct scsi_cmnd *Current_SCpnt; 255 struct scsi_cmnd *Current_SCpnt;
256 void (*Current_done)(struct scsi_cmnd *); 256 void (*Current_done)(struct scsi_cmnd *);
257 257
258 /* Also need s/g structure for each sbp2 command */ 258 /* Also need s/g structure for each sbp2 command */
259 struct sbp2_unrestricted_page_table 259 struct sbp2_unrestricted_page_table
260 scatter_gather_element[SG_ALL] ____cacheline_aligned; 260 scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
261 dma_addr_t sge_dma ____cacheline_aligned; 261 dma_addr_t sge_dma;
262 void *sge_buffer; 262 void *sge_buffer;
263 dma_addr_t cmd_dma; 263 dma_addr_t cmd_dma;
264 enum sbp2_dma_types dma_type; 264 enum sbp2_dma_types dma_type;