diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-06-07 16:57:53 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2009-06-14 08:26:29 -0400 |
commit | f91e3bd842ec6f5cea245993926ee8ff26250467 (patch) | |
tree | c7b66078c862a85fdc7d21bc2eb61f9c32a530ca | |
parent | b9530fd6c3f057bda258c8e2631ad1a25959f4a2 (diff) |
firewire: net: style changes
Change names of types, variables, functions.
Omit debug code.
Use get_unaligned*, put_unaligned*.
Annotate big endian data.
Handle errors in __init.
Change whitespace.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
-rw-r--r-- | drivers/firewire/core-card.c | 2 | ||||
-rw-r--r-- | drivers/firewire/net.c | 2041 | ||||
-rw-r--r-- | include/linux/firewire.h | 9 |
3 files changed, 969 insertions, 1083 deletions
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index cdab32b20675..8c45e43da7c5 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -430,7 +430,7 @@ void fw_card_initialize(struct fw_card *card, | |||
430 | 430 | ||
431 | INIT_DELAYED_WORK(&card->work, fw_card_bm_work); | 431 | INIT_DELAYED_WORK(&card->work, fw_card_bm_work); |
432 | card->netdev = NULL; | 432 | card->netdev = NULL; |
433 | INIT_LIST_HEAD(&card->ipv4_nodes); | 433 | INIT_LIST_HEAD(&card->peer_list); |
434 | } | 434 | } |
435 | EXPORT_SYMBOL(fw_card_initialize); | 435 | EXPORT_SYMBOL(fw_card_initialize); |
436 | 436 | ||
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 15353886bd80..ba6f924b1b13 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * based on eth1394 by Ben Collins et al | 6 | * based on eth1394 by Ben Collins et al |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bug.h> | ||
9 | #include <linux/device.h> | 10 | #include <linux/device.h> |
10 | #include <linux/ethtool.h> | 11 | #include <linux/ethtool.h> |
11 | #include <linux/firewire.h> | 12 | #include <linux/firewire.h> |
@@ -13,6 +14,7 @@ | |||
13 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
14 | #include <linux/in.h> | 15 | #include <linux/in.h> |
15 | #include <linux/ip.h> | 16 | #include <linux/ip.h> |
17 | #include <linux/jiffies.h> | ||
16 | #include <linux/mod_devicetable.h> | 18 | #include <linux/mod_devicetable.h> |
17 | #include <linux/module.h> | 19 | #include <linux/module.h> |
18 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
@@ -22,181 +24,109 @@ | |||
22 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
23 | #include <net/arp.h> | 25 | #include <net/arp.h> |
24 | 26 | ||
25 | /* Things to potentially make runtime cofigurable */ | 27 | #define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */ |
26 | /* must be at least as large as our maximum receive size */ | 28 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2) |
27 | #define FIFO_SIZE 4096 | ||
28 | /* Network timeout in glibbles */ | ||
29 | #define IPV4_TIMEOUT 100000 | ||
30 | 29 | ||
31 | /* Runitme configurable paramaters */ | 30 | #define IEEE1394_BROADCAST_CHANNEL 31 |
32 | static int ipv4_mpd = 25; | 31 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) |
33 | static int ipv4_max_xmt = 0; | 32 | #define IEEE1394_MAX_PAYLOAD_S100 512 |
34 | /* 16k for receiving arp and broadcast packets. Enough? */ | 33 | #define FWNET_NO_FIFO_ADDR (~0ULL) |
35 | static int ipv4_iso_page_count = 4; | ||
36 | 34 | ||
37 | MODULE_AUTHOR("Jay Fenlason (fenlason@redhat.com)"); | 35 | #define IANA_SPECIFIER_ID 0x00005eU |
38 | MODULE_DESCRIPTION("Firewire IPv4 Driver (IPv4-over-IEEE1394 as per RFC 2734)"); | 36 | #define RFC2734_SW_VERSION 0x000001U |
39 | MODULE_LICENSE("GPL"); | ||
40 | MODULE_DEVICE_TABLE(ieee1394, ipv4_id_table); | ||
41 | module_param_named(max_partial_datagrams, ipv4_mpd, int, S_IRUGO | S_IWUSR); | ||
42 | MODULE_PARM_DESC(max_partial_datagrams, "Maximum number of received" | ||
43 | " incomplete fragmented datagrams (default = 25)."); | ||
44 | |||
45 | /* Max xmt is useful for forcing fragmentation, which makes testing easier. */ | ||
46 | module_param_named(max_transmit, ipv4_max_xmt, int, S_IRUGO | S_IWUSR); | ||
47 | MODULE_PARM_DESC(max_transmit, "Maximum datagram size to transmit" | ||
48 | " (larger datagrams will be fragmented) (default = 0 (use hardware defaults)."); | ||
49 | |||
50 | /* iso page count controls how many pages will be used for receiving broadcast packets. */ | ||
51 | module_param_named(iso_pages, ipv4_iso_page_count, int, S_IRUGO | S_IWUSR); | ||
52 | MODULE_PARM_DESC(iso_pages, "Number of pages to use for receiving broadcast packets" | ||
53 | " (default = 4)."); | ||
54 | |||
55 | /* uncomment this line to do debugging */ | ||
56 | #define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args) | ||
57 | |||
58 | /* comment out these lines to do debugging. */ | ||
59 | /* #undef fw_debug */ | ||
60 | /* #define fw_debug(s...) */ | ||
61 | /* #define print_hex_dump(l...) */ | ||
62 | |||
63 | /* Define a fake hardware header format for the networking core. Note that | ||
64 | * header size cannot exceed 16 bytes as that is the size of the header cache. | ||
65 | * Also, we do not need the source address in the header so we omit it and | ||
66 | * keep the header to under 16 bytes */ | ||
67 | #define IPV4_ALEN (8) | ||
68 | /* This must equal sizeof(struct ipv4_ether_hdr) */ | ||
69 | #define IPV4_HLEN (10) | ||
70 | |||
71 | /* FIXME: what's a good size for this? */ | ||
72 | #define INVALID_FIFO_ADDR (u64)~0ULL | ||
73 | |||
74 | /* Things specified by standards */ | ||
75 | #define BROADCAST_CHANNEL 31 | ||
76 | |||
77 | #define S100_BUFFER_SIZE 512 | ||
78 | #define MAX_BUFFER_SIZE 4096 | ||
79 | |||
80 | #define IPV4_GASP_SPECIFIER_ID 0x00005EU | ||
81 | #define IPV4_GASP_VERSION 0x00000001U | ||
82 | |||
83 | #define IPV4_GASP_OVERHEAD (2 * sizeof(u32)) /* for GASP header */ | ||
84 | |||
85 | #define IPV4_UNFRAG_HDR_SIZE sizeof(u32) | ||
86 | #define IPV4_FRAG_HDR_SIZE (2 * sizeof(u32)) | ||
87 | #define IPV4_FRAG_OVERHEAD sizeof(u32) | ||
88 | |||
89 | #define ALL_NODES (0xffc0 | 0x003f) | ||
90 | |||
91 | #define IPV4_HDR_UNFRAG 0 /* unfragmented */ | ||
92 | #define IPV4_HDR_FIRSTFRAG 1 /* first fragment */ | ||
93 | #define IPV4_HDR_LASTFRAG 2 /* last fragment */ | ||
94 | #define IPV4_HDR_INTFRAG 3 /* interior fragment */ | ||
95 | |||
96 | /* Our arp packet (ARPHRD_IEEE1394) */ | ||
97 | /* FIXME: note that this is probably bogus on weird-endian machines */ | ||
98 | struct ipv4_arp { | ||
99 | u16 hw_type; /* 0x0018 */ | ||
100 | u16 proto_type; /* 0x0806 */ | ||
101 | u8 hw_addr_len; /* 16 */ | ||
102 | u8 ip_addr_len; /* 4 */ | ||
103 | u16 opcode; /* ARP Opcode */ | ||
104 | /* Above is exactly the same format as struct arphdr */ | ||
105 | |||
106 | u64 s_uniq_id; /* Sender's 64bit EUI */ | ||
107 | u8 max_rec; /* Sender's max packet size */ | ||
108 | u8 sspd; /* Sender's max speed */ | ||
109 | u16 fifo_hi; /* hi 16bits of sender's FIFO addr */ | ||
110 | u32 fifo_lo; /* lo 32bits of sender's FIFO addr */ | ||
111 | u32 sip; /* Sender's IP Address */ | ||
112 | u32 tip; /* IP Address of requested hw addr */ | ||
113 | } __attribute__((packed)); | ||
114 | 37 | ||
115 | struct ipv4_ether_hdr { | 38 | #define IEEE1394_GASP_HDR_SIZE 8 |
116 | unsigned char h_dest[IPV4_ALEN]; /* destination address */ | ||
117 | unsigned short h_proto; /* packet type ID field */ | ||
118 | } __attribute__((packed)); | ||
119 | 39 | ||
120 | static inline struct ipv4_ether_hdr *ipv4_ether_hdr(const struct sk_buff *skb) | 40 | #define RFC2374_UNFRAG_HDR_SIZE 4 |
121 | { | 41 | #define RFC2374_FRAG_HDR_SIZE 8 |
122 | return (struct ipv4_ether_hdr *)skb_mac_header(skb); | 42 | #define RFC2374_FRAG_OVERHEAD 4 |
123 | } | ||
124 | 43 | ||
125 | enum ipv4_tx_type { | 44 | #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ |
126 | IPV4_UNKNOWN = 0, | 45 | #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ |
127 | IPV4_GASP = 1, | 46 | #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ |
128 | IPV4_WRREQ = 2, | 47 | #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ |
129 | }; | ||
130 | 48 | ||
131 | enum ipv4_broadcast_state { | 49 | #define RFC2734_HW_ADDR_LEN 16 |
132 | IPV4_BROADCAST_ERROR, | ||
133 | IPV4_BROADCAST_RUNNING, | ||
134 | IPV4_BROADCAST_STOPPED, | ||
135 | }; | ||
136 | 50 | ||
137 | #define ipv4_get_hdr_lf(h) (((h)->w0&0xC0000000)>>30) | 51 | struct rfc2734_arp { |
138 | #define ipv4_get_hdr_ether_type(h) (((h)->w0&0x0000FFFF) ) | 52 | __be16 hw_type; /* 0x0018 */ |
139 | #define ipv4_get_hdr_dg_size(h) (((h)->w0&0x0FFF0000)>>16) | 53 | __be16 proto_type; /* 0x0806 */ |
140 | #define ipv4_get_hdr_fg_off(h) (((h)->w0&0x00000FFF) ) | 54 | u8 hw_addr_len; /* 16 */ |
141 | #define ipv4_get_hdr_dgl(h) (((h)->w1&0xFFFF0000)>>16) | 55 | u8 ip_addr_len; /* 4 */ |
56 | __be16 opcode; /* ARP Opcode */ | ||
57 | /* Above is exactly the same format as struct arphdr */ | ||
142 | 58 | ||
143 | #define ipv4_set_hdr_lf(lf) (( lf)<<30) | 59 | __be64 s_uniq_id; /* Sender's 64bit EUI */ |
144 | #define ipv4_set_hdr_ether_type(et) (( et) ) | 60 | u8 max_rec; /* Sender's max packet size */ |
145 | #define ipv4_set_hdr_dg_size(dgs) ((dgs)<<16) | 61 | u8 sspd; /* Sender's max speed */ |
146 | #define ipv4_set_hdr_fg_off(fgo) ((fgo) ) | 62 | __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */ |
63 | __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */ | ||
64 | __be32 sip; /* Sender's IP Address */ | ||
65 | __be32 tip; /* IP Address of requested hw addr */ | ||
66 | } __attribute__((packed)); | ||
147 | 67 | ||
148 | #define ipv4_set_hdr_dgl(dgl) ((dgl)<<16) | 68 | /* This header format is specific to this driver implementation. */ |
69 | #define FWNET_ALEN 8 | ||
70 | #define FWNET_HLEN 10 | ||
71 | struct fwnet_header { | ||
72 | u8 h_dest[FWNET_ALEN]; /* destination address */ | ||
73 | __be16 h_proto; /* packet type ID field */ | ||
74 | } __attribute__((packed)); | ||
149 | 75 | ||
150 | struct ipv4_hdr { | 76 | /* IPv4 and IPv6 encapsulation header */ |
77 | struct rfc2734_header { | ||
151 | u32 w0; | 78 | u32 w0; |
152 | u32 w1; | 79 | u32 w1; |
153 | }; | 80 | }; |
154 | 81 | ||
155 | static inline void ipv4_make_uf_hdr( struct ipv4_hdr *hdr, unsigned ether_type) { | 82 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) |
156 | hdr->w0 = ipv4_set_hdr_lf(IPV4_HDR_UNFRAG) | 83 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) |
157 | |ipv4_set_hdr_ether_type(ether_type); | 84 | #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) |
158 | fw_debug ( "Setting unfragmented header %p to %x\n", hdr, hdr->w0 ); | 85 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) |
159 | } | 86 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) |
160 | 87 | ||
161 | static inline void ipv4_make_ff_hdr ( struct ipv4_hdr *hdr, unsigned ether_type, unsigned dg_size, unsigned dgl ) { | 88 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) |
162 | hdr->w0 = ipv4_set_hdr_lf(IPV4_HDR_FIRSTFRAG) | 89 | #define fwnet_set_hdr_ether_type(et) (et) |
163 | |ipv4_set_hdr_dg_size(dg_size) | 90 | #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) |
164 | |ipv4_set_hdr_ether_type(ether_type); | 91 | #define fwnet_set_hdr_fg_off(fgo) (fgo) |
165 | hdr->w1 = ipv4_set_hdr_dgl(dgl); | ||
166 | fw_debug ( "Setting fragmented header %p to first_frag %x,%x (et %x, dgs %x, dgl %x)\n", hdr, hdr->w0, hdr->w1, | ||
167 | ether_type, dg_size, dgl ); | ||
168 | } | ||
169 | 92 | ||
170 | static inline void ipv4_make_sf_hdr ( struct ipv4_hdr *hdr, unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) { | 93 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) |
171 | hdr->w0 = ipv4_set_hdr_lf(lf) | ||
172 | |ipv4_set_hdr_dg_size(dg_size) | ||
173 | |ipv4_set_hdr_fg_off(fg_off); | ||
174 | hdr->w1 = ipv4_set_hdr_dgl(dgl); | ||
175 | fw_debug ( "Setting fragmented header %p to %x,%x (lf %x, dgs %x, fo %x dgl %x)\n", | ||
176 | hdr, hdr->w0, hdr->w1, | ||
177 | lf, dg_size, fg_off, dgl ); | ||
178 | } | ||
179 | 94 | ||
180 | /* End of IP1394 headers */ | 95 | static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, |
96 | unsigned ether_type) | ||
97 | { | ||
98 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) | ||
99 | | fwnet_set_hdr_ether_type(ether_type); | ||
100 | } | ||
181 | 101 | ||
182 | /* Fragment types */ | 102 | static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, |
183 | #define ETH1394_HDR_LF_UF 0 /* unfragmented */ | 103 | unsigned ether_type, unsigned dg_size, unsigned dgl) |
184 | #define ETH1394_HDR_LF_FF 1 /* first fragment */ | 104 | { |
185 | #define ETH1394_HDR_LF_LF 2 /* last fragment */ | 105 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) |
186 | #define ETH1394_HDR_LF_IF 3 /* interior fragment */ | 106 | | fwnet_set_hdr_dg_size(dg_size) |
107 | | fwnet_set_hdr_ether_type(ether_type); | ||
108 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | ||
109 | } | ||
187 | 110 | ||
188 | #define IP1394_HW_ADDR_LEN 16 /* As per RFC */ | 111 | static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, |
112 | unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) | ||
113 | { | ||
114 | hdr->w0 = fwnet_set_hdr_lf(lf) | ||
115 | | fwnet_set_hdr_dg_size(dg_size) | ||
116 | | fwnet_set_hdr_fg_off(fg_off); | ||
117 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | ||
118 | } | ||
189 | 119 | ||
190 | /* This list keeps track of what parts of the datagram have been filled in */ | 120 | /* This list keeps track of what parts of the datagram have been filled in */ |
191 | struct ipv4_fragment_info { | 121 | struct fwnet_fragment_info { |
192 | struct list_head fragment_info; | 122 | struct list_head fi_link; |
193 | u16 offset; | 123 | u16 offset; |
194 | u16 len; | 124 | u16 len; |
195 | }; | 125 | }; |
196 | 126 | ||
197 | struct ipv4_partial_datagram { | 127 | struct fwnet_partial_datagram { |
198 | struct list_head pdg_list; | 128 | struct list_head pd_link; |
199 | struct list_head fragment_info; | 129 | struct list_head fi_list; |
200 | struct sk_buff *skb; | 130 | struct sk_buff *skb; |
201 | /* FIXME Why not use skb->data? */ | 131 | /* FIXME Why not use skb->data? */ |
202 | char *pbuf; | 132 | char *pbuf; |
@@ -208,40 +138,43 @@ struct ipv4_partial_datagram { | |||
208 | /* | 138 | /* |
209 | * We keep one of these for each IPv4 capable device attached to a fw_card. | 139 | * We keep one of these for each IPv4 capable device attached to a fw_card. |
210 | * The list of them is stored in the fw_card structure rather than in the | 140 | * The list of them is stored in the fw_card structure rather than in the |
211 | * ipv4_priv because the remote IPv4 nodes may be probed before the card is, | 141 | * fwnet_device because the remote IPv4 nodes may be probed before the card is, |
212 | * so we need a place to store them before the ipv4_priv structure is | 142 | * so we need a place to store them before the fwnet_device structure is |
213 | * allocated. | 143 | * allocated. |
214 | */ | 144 | */ |
215 | struct ipv4_node { | 145 | struct fwnet_peer { |
216 | struct list_head ipv4_nodes; | 146 | struct list_head peer_link; |
217 | /* guid of the remote node */ | 147 | /* guid of the remote peer */ |
218 | u64 guid; | 148 | u64 guid; |
219 | /* FIFO address to transmit datagrams to, or INVALID_FIFO_ADDR */ | 149 | /* FIFO address to transmit datagrams to, or FWNET_NO_FIFO_ADDR */ |
220 | u64 fifo; | 150 | u64 fifo; |
221 | 151 | ||
222 | spinlock_t pdg_lock; /* partial datagram lock */ | 152 | spinlock_t pdg_lock; /* partial datagram lock */ |
223 | /* List of partial datagrams received from this node */ | 153 | /* List of partial datagrams received from this peer */ |
224 | struct list_head pdg_list; | 154 | struct list_head pd_list; |
225 | /* Number of entries in pdg_list at the moment */ | 155 | /* Number of entries in pd_list at the moment */ |
226 | unsigned pdg_size; | 156 | unsigned pdg_size; |
227 | 157 | ||
228 | /* max payload to transmit to this remote node */ | 158 | /* max payload to transmit to this remote peer */ |
229 | /* This already includes the IPV4_FRAG_HDR_SIZE overhead */ | 159 | /* This already includes the RFC2374_FRAG_HDR_SIZE overhead */ |
230 | u16 max_payload; | 160 | u16 max_payload; |
231 | /* outgoing datagram label */ | 161 | /* outgoing datagram label */ |
232 | u16 datagram_label; | 162 | u16 datagram_label; |
233 | /* Current node_id of the remote node */ | 163 | /* Current node_id of the remote peer */ |
234 | u16 nodeid; | 164 | u16 node_id; |
235 | /* current generation of the remote node */ | 165 | /* current generation of the remote peer */ |
236 | u8 generation; | 166 | u8 generation; |
237 | /* max speed that this node can receive at */ | 167 | /* max speed that this peer can receive at */ |
238 | u8 xmt_speed; | 168 | u8 xmt_speed; |
239 | }; | 169 | }; |
240 | 170 | ||
241 | struct ipv4_priv { | 171 | struct fwnet_device { |
242 | spinlock_t lock; | 172 | spinlock_t lock; |
243 | 173 | enum { | |
244 | enum ipv4_broadcast_state broadcast_state; | 174 | FWNET_BROADCAST_ERROR, |
175 | FWNET_BROADCAST_RUNNING, | ||
176 | FWNET_BROADCAST_STOPPED, | ||
177 | } broadcast_state; | ||
245 | struct fw_iso_context *broadcast_rcv_context; | 178 | struct fw_iso_context *broadcast_rcv_context; |
246 | struct fw_iso_buffer broadcast_rcv_buffer; | 179 | struct fw_iso_buffer broadcast_rcv_buffer; |
247 | void **broadcast_rcv_buffer_ptrs; | 180 | void **broadcast_rcv_buffer_ptrs; |
@@ -257,14 +190,12 @@ struct ipv4_priv { | |||
257 | u16 broadcast_xmt_datagramlabel; | 190 | u16 broadcast_xmt_datagramlabel; |
258 | 191 | ||
259 | /* | 192 | /* |
260 | * The csr address that remote nodes must send datagrams to for us to | 193 | * The CSR address that remote nodes must send datagrams to for us to |
261 | * receive them. | 194 | * receive them. |
262 | */ | 195 | */ |
263 | struct fw_address_handler handler; | 196 | struct fw_address_handler handler; |
264 | u64 local_fifo; | 197 | u64 local_fifo; |
265 | 198 | ||
266 | /* Wake up to xmt */ | ||
267 | /* struct work_struct wake;*/ | ||
268 | /* List of packets to be sent */ | 199 | /* List of packets to be sent */ |
269 | struct list_head packet_list; | 200 | struct list_head packet_list; |
270 | /* | 201 | /* |
@@ -279,17 +210,17 @@ struct ipv4_priv { | |||
279 | }; | 210 | }; |
280 | 211 | ||
281 | /* This is our task struct. It's used for the packet complete callback. */ | 212 | /* This is our task struct. It's used for the packet complete callback. */ |
282 | struct ipv4_packet_task { | 213 | struct fwnet_packet_task { |
283 | /* | 214 | /* |
284 | * ptask can actually be on priv->packet_list, priv->broadcasted_list, | 215 | * ptask can actually be on dev->packet_list, dev->broadcasted_list, |
285 | * or priv->sent_list depending on its current state. | 216 | * or dev->sent_list depending on its current state. |
286 | */ | 217 | */ |
287 | struct list_head packet_list; | 218 | struct list_head pt_link; |
288 | struct fw_transaction transaction; | 219 | struct fw_transaction transaction; |
289 | struct ipv4_hdr hdr; | 220 | struct rfc2734_header hdr; |
290 | struct sk_buff *skb; | 221 | struct sk_buff *skb; |
291 | struct ipv4_priv *priv; | 222 | struct fwnet_device *dev; |
292 | enum ipv4_tx_type tx_type; | 223 | |
293 | int outstanding_pkts; | 224 | int outstanding_pkts; |
294 | unsigned max_payload; | 225 | unsigned max_payload; |
295 | u64 fifo_addr; | 226 | u64 fifo_addr; |
@@ -298,243 +229,192 @@ struct ipv4_packet_task { | |||
298 | u8 speed; | 229 | u8 speed; |
299 | }; | 230 | }; |
300 | 231 | ||
301 | static struct kmem_cache *ipv4_packet_task_cache; | 232 | /* |
302 | 233 | * saddr == NULL means use device source address. | |
303 | static const char ipv4_driver_name[] = "firewire-ipv4"; | 234 | * daddr == NULL means leave destination address (eg unresolved arp). |
304 | 235 | */ | |
305 | static const struct ieee1394_device_id ipv4_id_table[] = { | 236 | static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, |
306 | { | 237 | unsigned short type, const void *daddr, |
307 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | 238 | const void *saddr, unsigned len) |
308 | IEEE1394_MATCH_VERSION, | 239 | { |
309 | .specifier_id = IPV4_GASP_SPECIFIER_ID, | 240 | struct fwnet_header *h; |
310 | .version = IPV4_GASP_VERSION, | ||
311 | }, | ||
312 | { } | ||
313 | }; | ||
314 | |||
315 | static u32 ipv4_unit_directory_data[] = { | ||
316 | 0x00040000, /* unit directory */ | ||
317 | 0x12000000 | IPV4_GASP_SPECIFIER_ID, /* specifier ID */ | ||
318 | 0x81000003, /* text descriptor */ | ||
319 | 0x13000000 | IPV4_GASP_VERSION, /* version */ | ||
320 | 0x81000005, /* text descriptor */ | ||
321 | |||
322 | 0x00030000, /* Three quadlets */ | ||
323 | 0x00000000, /* Text */ | ||
324 | 0x00000000, /* Language 0 */ | ||
325 | 0x49414e41, /* I A N A */ | ||
326 | 0x00030000, /* Three quadlets */ | ||
327 | 0x00000000, /* Text */ | ||
328 | 0x00000000, /* Language 0 */ | ||
329 | 0x49507634, /* I P v 4 */ | ||
330 | }; | ||
331 | |||
332 | static struct fw_descriptor ipv4_unit_directory = { | ||
333 | .length = ARRAY_SIZE(ipv4_unit_directory_data), | ||
334 | .key = 0xd1000000, | ||
335 | .data = ipv4_unit_directory_data | ||
336 | }; | ||
337 | |||
338 | static int ipv4_send_packet(struct ipv4_packet_task *ptask ); | ||
339 | |||
340 | /* ------------------------------------------------------------------ */ | ||
341 | /****************************************** | ||
342 | * HW Header net device functions | ||
343 | ******************************************/ | ||
344 | /* These functions have been adapted from net/ethernet/eth.c */ | ||
345 | |||
346 | /* Create a fake MAC header for an arbitrary protocol layer. | ||
347 | * saddr=NULL means use device source address | ||
348 | * daddr=NULL means leave destination address (eg unresolved arp). */ | ||
349 | 241 | ||
350 | static int ipv4_header ( struct sk_buff *skb, struct net_device *dev, | 242 | h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); |
351 | unsigned short type, const void *daddr, | 243 | put_unaligned_be16(type, &h->h_proto); |
352 | const void *saddr, unsigned len) { | ||
353 | struct ipv4_ether_hdr *eth; | ||
354 | 244 | ||
355 | eth = (struct ipv4_ether_hdr *)skb_push(skb, sizeof(*eth)); | 245 | if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { |
356 | eth->h_proto = htons(type); | 246 | memset(h->h_dest, 0, net->addr_len); |
357 | 247 | ||
358 | if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { | 248 | return net->hard_header_len; |
359 | memset(eth->h_dest, 0, dev->addr_len); | ||
360 | return dev->hard_header_len; | ||
361 | } | 249 | } |
362 | 250 | ||
363 | if (daddr) { | 251 | if (daddr) { |
364 | memcpy(eth->h_dest, daddr, dev->addr_len); | 252 | memcpy(h->h_dest, daddr, net->addr_len); |
365 | return dev->hard_header_len; | 253 | |
254 | return net->hard_header_len; | ||
366 | } | 255 | } |
367 | 256 | ||
368 | return -dev->hard_header_len; | 257 | return -net->hard_header_len; |
369 | } | 258 | } |
370 | 259 | ||
371 | /* Rebuild the faked MAC header. This is called after an ARP | 260 | static int fwnet_header_rebuild(struct sk_buff *skb) |
372 | * (or in future other address resolution) has completed on this | ||
373 | * sk_buff. We now let ARP fill in the other fields. | ||
374 | * | ||
375 | * This routine CANNOT use cached dst->neigh! | ||
376 | * Really, it is used only when dst->neigh is wrong. | ||
377 | */ | ||
378 | |||
379 | static int ipv4_rebuild_header(struct sk_buff *skb) | ||
380 | { | 261 | { |
381 | struct ipv4_ether_hdr *eth; | 262 | struct fwnet_header *h = (struct fwnet_header *)skb->data; |
382 | 263 | ||
383 | eth = (struct ipv4_ether_hdr *)skb->data; | 264 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) |
384 | if (eth->h_proto == htons(ETH_P_IP)) | 265 | return arp_find((unsigned char *)&h->h_dest, skb); |
385 | return arp_find((unsigned char *)ð->h_dest, skb); | ||
386 | 266 | ||
387 | fw_notify ( "%s: unable to resolve type %04x addresses\n", | 267 | fw_notify("%s: unable to resolve type %04x addresses\n", |
388 | skb->dev->name,ntohs(eth->h_proto) ); | 268 | skb->dev->name, be16_to_cpu(h->h_proto)); |
389 | return 0; | 269 | return 0; |
390 | } | 270 | } |
391 | 271 | ||
392 | static int ipv4_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { | 272 | static int fwnet_header_cache(const struct neighbour *neigh, |
393 | unsigned short type = hh->hh_type; | 273 | struct hh_cache *hh) |
394 | struct net_device *dev; | 274 | { |
395 | struct ipv4_ether_hdr *eth; | 275 | struct net_device *net; |
276 | struct fwnet_header *h; | ||
396 | 277 | ||
397 | if (type == htons(ETH_P_802_3)) | 278 | if (hh->hh_type == cpu_to_be16(ETH_P_802_3)) |
398 | return -1; | 279 | return -1; |
399 | dev = neigh->dev; | 280 | net = neigh->dev; |
400 | eth = (struct ipv4_ether_hdr *)((u8 *)hh->hh_data + 16 - sizeof(*eth)); | 281 | h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h)); |
401 | eth->h_proto = type; | 282 | h->h_proto = hh->hh_type; |
402 | memcpy(eth->h_dest, neigh->ha, dev->addr_len); | 283 | memcpy(h->h_dest, neigh->ha, net->addr_len); |
284 | hh->hh_len = FWNET_HLEN; | ||
403 | 285 | ||
404 | hh->hh_len = IPV4_HLEN; | ||
405 | return 0; | 286 | return 0; |
406 | } | 287 | } |
407 | 288 | ||
408 | /* Called by Address Resolution module to notify changes in address. */ | 289 | /* Called by Address Resolution module to notify changes in address. */ |
409 | static void ipv4_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char * haddr ) { | 290 | static void fwnet_header_cache_update(struct hh_cache *hh, |
410 | memcpy((u8 *)hh->hh_data + 16 - IPV4_HLEN, haddr, dev->addr_len); | 291 | const struct net_device *net, const unsigned char *haddr) |
292 | { | ||
293 | memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len); | ||
411 | } | 294 | } |
412 | 295 | ||
413 | static int ipv4_header_parse(const struct sk_buff *skb, unsigned char *haddr) { | 296 | static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) |
414 | memcpy(haddr, skb->dev->dev_addr, IPV4_ALEN); | 297 | { |
415 | return IPV4_ALEN; | 298 | memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); |
299 | |||
300 | return FWNET_ALEN; | ||
416 | } | 301 | } |
417 | 302 | ||
418 | static const struct header_ops ipv4_header_ops = { | 303 | static const struct header_ops fwnet_header_ops = { |
419 | .create = ipv4_header, | 304 | .create = fwnet_header_create, |
420 | .rebuild = ipv4_rebuild_header, | 305 | .rebuild = fwnet_header_rebuild, |
421 | .cache = ipv4_header_cache, | 306 | .cache = fwnet_header_cache, |
422 | .cache_update = ipv4_header_cache_update, | 307 | .cache_update = fwnet_header_cache_update, |
423 | .parse = ipv4_header_parse, | 308 | .parse = fwnet_header_parse, |
424 | }; | 309 | }; |
425 | 310 | ||
426 | /* ------------------------------------------------------------------ */ | ||
427 | |||
428 | /* FIXME: is this correct for all cases? */ | 311 | /* FIXME: is this correct for all cases? */ |
429 | static bool ipv4_frag_overlap(struct ipv4_partial_datagram *pd, unsigned offset, unsigned len) | 312 | static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, |
313 | unsigned offset, unsigned len) | ||
430 | { | 314 | { |
431 | struct ipv4_fragment_info *fi; | 315 | struct fwnet_fragment_info *fi; |
432 | unsigned end = offset + len; | 316 | unsigned end = offset + len; |
433 | 317 | ||
434 | list_for_each_entry(fi, &pd->fragment_info, fragment_info) { | 318 | list_for_each_entry(fi, &pd->fi_list, fi_link) |
435 | if (offset < fi->offset + fi->len && end > fi->offset) { | 319 | if (offset < fi->offset + fi->len && end > fi->offset) |
436 | fw_debug ( "frag_overlap pd %p fi %p (%x@%x) with %x@%x\n", pd, fi, fi->len, fi->offset, len, offset ); | ||
437 | return true; | 320 | return true; |
438 | } | 321 | |
439 | } | ||
440 | fw_debug ( "frag_overlap %p does not overlap with %x@%x\n", pd, len, offset ); | ||
441 | return false; | 322 | return false; |
442 | } | 323 | } |
443 | 324 | ||
444 | /* Assumes that new fragment does not overlap any existing fragments */ | 325 | /* Assumes that new fragment does not overlap any existing fragments */ |
445 | static struct ipv4_fragment_info *ipv4_frag_new ( struct ipv4_partial_datagram *pd, unsigned offset, unsigned len ) { | 326 | static struct fwnet_fragment_info *fwnet_frag_new( |
446 | struct ipv4_fragment_info *fi, *fi2, *new; | 327 | struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) |
328 | { | ||
329 | struct fwnet_fragment_info *fi, *fi2, *new; | ||
447 | struct list_head *list; | 330 | struct list_head *list; |
448 | 331 | ||
449 | fw_debug ( "frag_new pd %p %x@%x\n", pd, len, offset ); | 332 | list = &pd->fi_list; |
450 | list = &pd->fragment_info; | 333 | list_for_each_entry(fi, &pd->fi_list, fi_link) { |
451 | list_for_each_entry(fi, &pd->fragment_info, fragment_info) { | ||
452 | if (fi->offset + fi->len == offset) { | 334 | if (fi->offset + fi->len == offset) { |
453 | /* The new fragment can be tacked on to the end */ | 335 | /* The new fragment can be tacked on to the end */ |
454 | /* Did the new fragment plug a hole? */ | 336 | /* Did the new fragment plug a hole? */ |
455 | fi2 = list_entry(fi->fragment_info.next, struct ipv4_fragment_info, fragment_info); | 337 | fi2 = list_entry(fi->fi_link.next, |
338 | struct fwnet_fragment_info, fi_link); | ||
456 | if (fi->offset + fi->len == fi2->offset) { | 339 | if (fi->offset + fi->len == fi2->offset) { |
457 | fw_debug ( "pd %p: hole filling %p (%x@%x) and %p(%x@%x): now %x@%x\n", pd, fi, fi->len, fi->offset, | ||
458 | fi2, fi2->len, fi2->offset, fi->len + len + fi2->len, fi->offset ); | ||
459 | /* glue fragments together */ | 340 | /* glue fragments together */ |
460 | fi->len += len + fi2->len; | 341 | fi->len += len + fi2->len; |
461 | list_del(&fi2->fragment_info); | 342 | list_del(&fi2->fi_link); |
462 | kfree(fi2); | 343 | kfree(fi2); |
463 | } else { | 344 | } else { |
464 | fw_debug ( "pd %p: extending %p from %x@%x to %x@%x\n", pd, fi, fi->len, fi->offset, fi->len+len, fi->offset ); | ||
465 | fi->len += len; | 345 | fi->len += len; |
466 | } | 346 | } |
347 | |||
467 | return fi; | 348 | return fi; |
468 | } | 349 | } |
469 | if (offset + len == fi->offset) { | 350 | if (offset + len == fi->offset) { |
470 | /* The new fragment can be tacked on to the beginning */ | 351 | /* The new fragment can be tacked on to the beginning */ |
471 | /* Did the new fragment plug a hole? */ | 352 | /* Did the new fragment plug a hole? */ |
472 | fi2 = list_entry(fi->fragment_info.prev, struct ipv4_fragment_info, fragment_info); | 353 | fi2 = list_entry(fi->fi_link.prev, |
354 | struct fwnet_fragment_info, fi_link); | ||
473 | if (fi2->offset + fi2->len == fi->offset) { | 355 | if (fi2->offset + fi2->len == fi->offset) { |
474 | /* glue fragments together */ | 356 | /* glue fragments together */ |
475 | fw_debug ( "pd %p: extending %p and merging with %p from %x@%x to %x@%x\n", | ||
476 | pd, fi2, fi, fi2->len, fi2->offset, fi2->len + fi->len + len, fi2->offset ); | ||
477 | fi2->len += fi->len + len; | 357 | fi2->len += fi->len + len; |
478 | list_del(&fi->fragment_info); | 358 | list_del(&fi->fi_link); |
479 | kfree(fi); | 359 | kfree(fi); |
360 | |||
480 | return fi2; | 361 | return fi2; |
481 | } | 362 | } |
482 | fw_debug ( "pd %p: extending %p from %x@%x to %x@%x\n", pd, fi, fi->len, fi->offset, offset, fi->len + len ); | ||
483 | fi->offset = offset; | 363 | fi->offset = offset; |
484 | fi->len += len; | 364 | fi->len += len; |
365 | |||
485 | return fi; | 366 | return fi; |
486 | } | 367 | } |
487 | if (offset > fi->offset + fi->len) { | 368 | if (offset > fi->offset + fi->len) { |
488 | list = &fi->fragment_info; | 369 | list = &fi->fi_link; |
489 | break; | 370 | break; |
490 | } | 371 | } |
491 | if (offset + len < fi->offset) { | 372 | if (offset + len < fi->offset) { |
492 | list = fi->fragment_info.prev; | 373 | list = fi->fi_link.prev; |
493 | break; | 374 | break; |
494 | } | 375 | } |
495 | } | 376 | } |
496 | 377 | ||
497 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | 378 | new = kmalloc(sizeof(*new), GFP_ATOMIC); |
498 | if (!new) { | 379 | if (!new) { |
499 | fw_error ( "out of memory in fragment handling!\n" ); | 380 | fw_error("out of memory\n"); |
500 | return NULL; | 381 | return NULL; |
501 | } | 382 | } |
502 | 383 | ||
503 | new->offset = offset; | 384 | new->offset = offset; |
504 | new->len = len; | 385 | new->len = len; |
505 | list_add(&new->fragment_info, list); | 386 | list_add(&new->fi_link, list); |
506 | fw_debug ( "pd %p: new frag %p %x@%x\n", pd, new, new->len, new->offset ); | 387 | |
507 | list_for_each_entry( fi, &pd->fragment_info, fragment_info ) | ||
508 | fw_debug ( "fi %p %x@%x\n", fi, fi->len, fi->offset ); | ||
509 | return new; | 388 | return new; |
510 | } | 389 | } |
511 | 390 | ||
512 | /* ------------------------------------------------------------------ */ | 391 | static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, |
513 | 392 | struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, | |
514 | static struct ipv4_partial_datagram *ipv4_pd_new(struct net_device *netdev, | 393 | void *frag_buf, unsigned frag_off, unsigned frag_len) |
515 | struct ipv4_node *node, u16 datagram_label, unsigned dg_size, u32 *frag_buf, | 394 | { |
516 | unsigned frag_off, unsigned frag_len) { | 395 | struct fwnet_partial_datagram *new; |
517 | struct ipv4_partial_datagram *new; | 396 | struct fwnet_fragment_info *fi; |
518 | struct ipv4_fragment_info *fi; | ||
519 | 397 | ||
520 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | 398 | new = kmalloc(sizeof(*new), GFP_ATOMIC); |
521 | if (!new) | 399 | if (!new) |
522 | goto fail; | 400 | goto fail; |
523 | INIT_LIST_HEAD(&new->fragment_info); | 401 | |
524 | fi = ipv4_frag_new ( new, frag_off, frag_len); | 402 | INIT_LIST_HEAD(&new->fi_list); |
525 | if ( fi == NULL ) | 403 | fi = fwnet_frag_new(new, frag_off, frag_len); |
404 | if (fi == NULL) | ||
526 | goto fail_w_new; | 405 | goto fail_w_new; |
406 | |||
527 | new->datagram_label = datagram_label; | 407 | new->datagram_label = datagram_label; |
528 | new->datagram_size = dg_size; | 408 | new->datagram_size = dg_size; |
529 | new->skb = dev_alloc_skb(dg_size + netdev->hard_header_len + 15); | 409 | new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15); |
530 | if ( new->skb == NULL ) | 410 | if (new->skb == NULL) |
531 | goto fail_w_fi; | 411 | goto fail_w_fi; |
532 | skb_reserve(new->skb, (netdev->hard_header_len + 15) & ~15); | 412 | |
413 | skb_reserve(new->skb, (net->hard_header_len + 15) & ~15); | ||
533 | new->pbuf = skb_put(new->skb, dg_size); | 414 | new->pbuf = skb_put(new->skb, dg_size); |
534 | memcpy(new->pbuf + frag_off, frag_buf, frag_len); | 415 | memcpy(new->pbuf + frag_off, frag_buf, frag_len); |
535 | list_add_tail(&new->pdg_list, &node->pdg_list); | 416 | list_add_tail(&new->pd_link, &peer->pd_list); |
536 | fw_debug ( "pd_new: new pd %p { dgl %u, dg_size %u, skb %p, pbuf %p } on node %p\n", | 417 | |
537 | new, new->datagram_label, new->datagram_size, new->skb, new->pbuf, node ); | ||
538 | return new; | 418 | return new; |
539 | 419 | ||
540 | fail_w_fi: | 420 | fail_w_fi: |
@@ -542,174 +422,171 @@ fail_w_fi: | |||
542 | fail_w_new: | 422 | fail_w_new: |
543 | kfree(new); | 423 | kfree(new); |
544 | fail: | 424 | fail: |
545 | fw_error("ipv4_pd_new: no memory\n"); | 425 | fw_error("out of memory\n"); |
426 | |||
546 | return NULL; | 427 | return NULL; |
547 | } | 428 | } |
548 | 429 | ||
549 | static struct ipv4_partial_datagram *ipv4_pd_find(struct ipv4_node *node, u16 datagram_label) { | 430 | static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, |
550 | struct ipv4_partial_datagram *pd; | 431 | u16 datagram_label) |
432 | { | ||
433 | struct fwnet_partial_datagram *pd; | ||
551 | 434 | ||
552 | list_for_each_entry(pd, &node->pdg_list, pdg_list) { | 435 | list_for_each_entry(pd, &peer->pd_list, pd_link) |
553 | if ( pd->datagram_label == datagram_label ) { | 436 | if (pd->datagram_label == datagram_label) |
554 | fw_debug ( "pd_find(node %p, label %u): pd %p\n", node, datagram_label, pd ); | ||
555 | return pd; | 437 | return pd; |
556 | } | 438 | |
557 | } | ||
558 | fw_debug ( "pd_find(node %p, label %u) no entry\n", node, datagram_label ); | ||
559 | return NULL; | 439 | return NULL; |
560 | } | 440 | } |
561 | 441 | ||
562 | 442 | ||
563 | static void ipv4_pd_delete ( struct ipv4_partial_datagram *old ) { | 443 | static void fwnet_pd_delete(struct fwnet_partial_datagram *old) |
564 | struct ipv4_fragment_info *fi, *n; | 444 | { |
445 | struct fwnet_fragment_info *fi, *n; | ||
565 | 446 | ||
566 | fw_debug ( "pd_delete %p\n", old ); | 447 | list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) |
567 | list_for_each_entry_safe(fi, n, &old->fragment_info, fragment_info) { | ||
568 | fw_debug ( "Freeing fi %p\n", fi ); | ||
569 | kfree(fi); | 448 | kfree(fi); |
570 | } | 449 | |
571 | list_del(&old->pdg_list); | 450 | list_del(&old->pd_link); |
572 | dev_kfree_skb_any(old->skb); | 451 | dev_kfree_skb_any(old->skb); |
573 | kfree(old); | 452 | kfree(old); |
574 | } | 453 | } |
575 | 454 | ||
576 | static bool ipv4_pd_update ( struct ipv4_node *node, struct ipv4_partial_datagram *pd, | 455 | static bool fwnet_pd_update(struct fwnet_peer *peer, |
577 | u32 *frag_buf, unsigned frag_off, unsigned frag_len) { | 456 | struct fwnet_partial_datagram *pd, void *frag_buf, |
578 | fw_debug ( "pd_update node %p, pd %p, frag_buf %p, %x@%x\n", node, pd, frag_buf, frag_len, frag_off ); | 457 | unsigned frag_off, unsigned frag_len) |
579 | if ( ipv4_frag_new ( pd, frag_off, frag_len ) == NULL) | 458 | { |
459 | if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) | ||
580 | return false; | 460 | return false; |
461 | |||
581 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); | 462 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); |
582 | 463 | ||
583 | /* | 464 | /* |
584 | * Move list entry to beginnig of list so that oldest partial | 465 | * Move list entry to beginnig of list so that oldest partial |
585 | * datagrams percolate to the end of the list | 466 | * datagrams percolate to the end of the list |
586 | */ | 467 | */ |
587 | list_move_tail(&pd->pdg_list, &node->pdg_list); | 468 | list_move_tail(&pd->pd_link, &peer->pd_list); |
588 | fw_debug ( "New pd list:\n" ); | 469 | |
589 | list_for_each_entry ( pd, &node->pdg_list, pdg_list ) { | ||
590 | fw_debug ( "pd %p\n", pd ); | ||
591 | } | ||
592 | return true; | 470 | return true; |
593 | } | 471 | } |
594 | 472 | ||
595 | static bool ipv4_pd_is_complete ( struct ipv4_partial_datagram *pd ) { | 473 | static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) |
596 | struct ipv4_fragment_info *fi; | 474 | { |
597 | bool ret; | 475 | struct fwnet_fragment_info *fi; |
598 | 476 | ||
599 | fi = list_entry(pd->fragment_info.next, struct ipv4_fragment_info, fragment_info); | 477 | fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); |
600 | 478 | ||
601 | ret = (fi->len == pd->datagram_size); | 479 | return fi->len == pd->datagram_size; |
602 | fw_debug ( "pd_is_complete (pd %p, dgs %x): fi %p (%x@%x) %s\n", pd, pd->datagram_size, fi, fi->len, fi->offset, ret ? "yes" : "no" ); | ||
603 | return ret; | ||
604 | } | 480 | } |
605 | 481 | ||
606 | /* ------------------------------------------------------------------ */ | 482 | static int fwnet_peer_new(struct fw_card *card, struct fw_device *device) |
483 | { | ||
484 | struct fwnet_peer *peer; | ||
607 | 485 | ||
608 | static int ipv4_node_new ( struct fw_card *card, struct fw_device *device ) { | 486 | peer = kmalloc(sizeof(*peer), GFP_KERNEL); |
609 | struct ipv4_node *node; | 487 | if (!peer) { |
488 | fw_error("out of memory\n"); | ||
610 | 489 | ||
611 | node = kmalloc ( sizeof(*node), GFP_KERNEL ); | ||
612 | if ( ! node ) { | ||
613 | fw_error ( "allocate new node failed\n" ); | ||
614 | return -ENOMEM; | 490 | return -ENOMEM; |
615 | } | 491 | } |
616 | node->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 492 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
617 | node->fifo = INVALID_FIFO_ADDR; | 493 | peer->fifo = FWNET_NO_FIFO_ADDR; |
618 | INIT_LIST_HEAD(&node->pdg_list); | 494 | INIT_LIST_HEAD(&peer->pd_list); |
619 | spin_lock_init(&node->pdg_lock); | 495 | spin_lock_init(&peer->pdg_lock); |
620 | node->pdg_size = 0; | 496 | peer->pdg_size = 0; |
621 | node->generation = device->generation; | 497 | peer->generation = device->generation; |
622 | rmb(); | 498 | rmb(); |
623 | node->nodeid = device->node_id; | 499 | peer->node_id = device->node_id; |
624 | /* FIXME what should it really be? */ | 500 | /* FIXME what should it really be? */ |
625 | node->max_payload = S100_BUFFER_SIZE - IPV4_UNFRAG_HDR_SIZE; | 501 | peer->max_payload = IEEE1394_MAX_PAYLOAD_S100 - RFC2374_UNFRAG_HDR_SIZE; |
626 | node->datagram_label = 0U; | 502 | peer->datagram_label = 0U; |
627 | node->xmt_speed = device->max_speed; | 503 | peer->xmt_speed = device->max_speed; |
628 | list_add_tail ( &node->ipv4_nodes, &card->ipv4_nodes ); | 504 | list_add_tail(&peer->peer_link, &card->peer_list); |
629 | fw_debug ( "node_new: %p { guid %016llx, generation %u, nodeid %x, max_payload %x, xmt_speed %x } added\n", | 505 | |
630 | node, (unsigned long long)node->guid, node->generation, node->nodeid, node->max_payload, node->xmt_speed ); | ||
631 | return 0; | 506 | return 0; |
632 | } | 507 | } |
633 | 508 | ||
634 | static struct ipv4_node *ipv4_node_find_by_guid(struct ipv4_priv *priv, u64 guid) { | 509 | /* FIXME caller must take the lock, or peer needs to be reference-counted */ |
635 | struct ipv4_node *node; | 510 | static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, |
511 | u64 guid) | ||
512 | { | ||
513 | struct fwnet_peer *p, *peer = NULL; | ||
636 | unsigned long flags; | 514 | unsigned long flags; |
637 | 515 | ||
638 | spin_lock_irqsave(&priv->lock, flags); | 516 | spin_lock_irqsave(&dev->lock, flags); |
639 | list_for_each_entry(node, &priv->card->ipv4_nodes, ipv4_nodes) | 517 | list_for_each_entry(p, &dev->card->peer_list, peer_link) |
640 | if (node->guid == guid) { | 518 | if (p->guid == guid) { |
641 | /* FIXME: lock the node first? */ | 519 | peer = p; |
642 | spin_unlock_irqrestore ( &priv->lock, flags ); | 520 | break; |
643 | fw_debug ( "node_find_by_guid (%016llx) found %p\n", (unsigned long long)guid, node ); | ||
644 | return node; | ||
645 | } | 521 | } |
522 | spin_unlock_irqrestore(&dev->lock, flags); | ||
646 | 523 | ||
647 | spin_unlock_irqrestore ( &priv->lock, flags ); | 524 | return peer; |
648 | fw_debug ( "node_find_by_guid (%016llx) not found\n", (unsigned long long)guid ); | ||
649 | return NULL; | ||
650 | } | 525 | } |
651 | 526 | ||
652 | static struct ipv4_node *ipv4_node_find_by_nodeid(struct ipv4_priv *priv, u16 nodeid) { | 527 | /* FIXME caller must take the lock, or peer needs to be reference-counted */ |
653 | struct ipv4_node *node; | 528 | /* FIXME node_id doesn't mean anything without generation */ |
529 | static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, | ||
530 | u16 node_id) | ||
531 | { | ||
532 | struct fwnet_peer *p, *peer = NULL; | ||
654 | unsigned long flags; | 533 | unsigned long flags; |
655 | 534 | ||
656 | spin_lock_irqsave(&priv->lock, flags); | 535 | spin_lock_irqsave(&dev->lock, flags); |
657 | list_for_each_entry(node, &priv->card->ipv4_nodes, ipv4_nodes) | 536 | list_for_each_entry(p, &dev->card->peer_list, peer_link) |
658 | if (node->nodeid == nodeid) { | 537 | if (p->node_id == node_id) { |
659 | /* FIXME: lock the node first? */ | 538 | peer = p; |
660 | spin_unlock_irqrestore ( &priv->lock, flags ); | 539 | break; |
661 | fw_debug ( "node_find_by_nodeid (%x) found %p\n", nodeid, node ); | ||
662 | return node; | ||
663 | } | 540 | } |
664 | fw_debug ( "node_find_by_nodeid (%x) not found\n", nodeid ); | 541 | spin_unlock_irqrestore(&dev->lock, flags); |
665 | spin_unlock_irqrestore ( &priv->lock, flags ); | 542 | |
666 | return NULL; | 543 | return peer; |
667 | } | 544 | } |
668 | 545 | ||
669 | /* This is only complicated because we can't assume priv exists */ | 546 | /* FIXME */ |
670 | static void ipv4_node_delete ( struct fw_card *card, struct fw_device *device ) { | 547 | static void fwnet_peer_delete(struct fw_card *card, struct fw_device *device) |
671 | struct net_device *netdev; | 548 | { |
672 | struct ipv4_priv *priv; | 549 | struct net_device *net; |
673 | struct ipv4_node *node; | 550 | struct fwnet_device *dev; |
551 | struct fwnet_peer *peer; | ||
674 | u64 guid; | 552 | u64 guid; |
675 | unsigned long flags; | 553 | unsigned long flags; |
676 | struct ipv4_partial_datagram *pd, *pd_next; | 554 | struct fwnet_partial_datagram *pd, *pd_next; |
677 | 555 | ||
678 | guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | 556 | guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
679 | netdev = card->netdev; | 557 | net = card->netdev; |
680 | if ( netdev ) | 558 | if (net) |
681 | priv = netdev_priv ( netdev ); | 559 | dev = netdev_priv(net); |
682 | else | 560 | else |
683 | priv = NULL; | 561 | dev = NULL; |
684 | if ( priv ) | 562 | if (dev) |
685 | spin_lock_irqsave ( &priv->lock, flags ); | 563 | spin_lock_irqsave(&dev->lock, flags); |
686 | list_for_each_entry( node, &card->ipv4_nodes, ipv4_nodes ) { | 564 | |
687 | if ( node->guid == guid ) { | 565 | list_for_each_entry(peer, &card->peer_list, peer_link) { |
688 | list_del ( &node->ipv4_nodes ); | 566 | if (peer->guid == guid) { |
689 | list_for_each_entry_safe( pd, pd_next, &node->pdg_list, pdg_list ) | 567 | list_del(&peer->peer_link); |
690 | ipv4_pd_delete ( pd ); | 568 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, |
569 | pd_link) | ||
570 | fwnet_pd_delete(pd); | ||
691 | break; | 571 | break; |
692 | } | 572 | } |
693 | } | 573 | } |
694 | if ( priv ) | 574 | if (dev) |
695 | spin_unlock_irqrestore ( &priv->lock, flags ); | 575 | spin_unlock_irqrestore(&dev->lock, flags); |
696 | } | 576 | } |
697 | 577 | ||
698 | /* ------------------------------------------------------------------ */ | 578 | static int fwnet_finish_incoming_packet(struct net_device *net, |
699 | 579 | struct sk_buff *skb, u16 source_node_id, | |
700 | 580 | bool is_broadcast, u16 ether_type) | |
701 | static int ipv4_finish_incoming_packet ( struct net_device *netdev, | 581 | { |
702 | struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type ) { | 582 | struct fwnet_device *dev; |
703 | struct ipv4_priv *priv; | 583 | static const __be64 broadcast_hw = cpu_to_be64(~0ULL); |
704 | static u64 broadcast_hw = ~0ULL; | ||
705 | int status; | 584 | int status; |
706 | u64 guid; | 585 | __be64 guid; |
707 | 586 | ||
708 | fw_debug ( "ipv4_finish_incoming_packet(%p, %p, %x, %s, %x\n", | 587 | dev = netdev_priv(net); |
709 | netdev, skb, source_node_id, is_broadcast ? "true" : "false", ether_type ); | ||
710 | priv = netdev_priv(netdev); | ||
711 | /* Write metadata, and then pass to the receive level */ | 588 | /* Write metadata, and then pass to the receive level */ |
712 | skb->dev = netdev; | 589 | skb->dev = net; |
713 | skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */ | 590 | skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */ |
714 | 591 | ||
715 | /* | 592 | /* |
@@ -724,73 +601,75 @@ static int ipv4_finish_incoming_packet ( struct net_device *netdev, | |||
724 | * about the sending machine. | 601 | * about the sending machine. |
725 | */ | 602 | */ |
726 | if (ether_type == ETH_P_ARP) { | 603 | if (ether_type == ETH_P_ARP) { |
727 | struct ipv4_arp *arp1394; | 604 | struct rfc2734_arp *arp1394; |
728 | struct arphdr *arp; | 605 | struct arphdr *arp; |
729 | unsigned char *arp_ptr; | 606 | unsigned char *arp_ptr; |
730 | u64 fifo_addr; | 607 | u64 fifo_addr; |
608 | u64 peer_guid; | ||
731 | u8 max_rec; | 609 | u8 max_rec; |
732 | u8 sspd; | 610 | u8 sspd; |
733 | u16 max_payload; | 611 | u16 max_payload; |
734 | struct ipv4_node *node; | 612 | struct fwnet_peer *peer; |
735 | static const u16 ipv4_speed_to_max_payload[] = { | 613 | static const u16 fwnet_speed_to_max_payload[] = { |
736 | /* S100, S200, S400, S800, S1600, S3200 */ | 614 | /* S100, S200, S400, S800, S1600, S3200 */ |
737 | 512, 1024, 2048, 4096, 4096, 4096 | 615 | 512, 1024, 2048, 4096, 4096, 4096 |
738 | }; | 616 | }; |
739 | 617 | ||
740 | /* fw_debug ( "ARP packet\n" ); */ | 618 | arp1394 = (struct rfc2734_arp *)skb->data; |
741 | arp1394 = (struct ipv4_arp *)skb->data; | ||
742 | arp = (struct arphdr *)skb->data; | 619 | arp = (struct arphdr *)skb->data; |
743 | arp_ptr = (unsigned char *)(arp + 1); | 620 | arp_ptr = (unsigned char *)(arp + 1); |
744 | fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 | | 621 | fifo_addr = (u64)ntohs(arp1394->fifo_hi) << 32 |
745 | ntohl(arp1394->fifo_lo); | 622 | | ntohl(arp1394->fifo_lo); |
746 | max_rec = priv->card->max_receive; | 623 | max_rec = dev->card->max_receive; |
747 | if ( arp1394->max_rec < max_rec ) | 624 | if (arp1394->max_rec < max_rec) |
748 | max_rec = arp1394->max_rec; | 625 | max_rec = arp1394->max_rec; |
749 | sspd = arp1394->sspd; | 626 | sspd = arp1394->sspd; |
750 | /* | 627 | /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ |
751 | * Sanity check. MacOSX seems to be sending us 131 in this | 628 | if (sspd > SCODE_3200) { |
752 | * field (atleast on my Panther G5). Not sure why. | 629 | fw_notify("sspd %x out of range\n", sspd); |
753 | */ | ||
754 | if (sspd > 5 ) { | ||
755 | fw_notify ( "sspd %x out of range\n", sspd ); | ||
756 | sspd = 0; | 630 | sspd = 0; |
757 | } | 631 | } |
758 | 632 | ||
759 | max_payload = min(ipv4_speed_to_max_payload[sspd], | 633 | max_payload = min(fwnet_speed_to_max_payload[sspd], |
760 | (u16)(1 << (max_rec + 1))) - IPV4_UNFRAG_HDR_SIZE; | 634 | (u16)(1 << (max_rec + 1))) - RFC2374_UNFRAG_HDR_SIZE; |
761 | 635 | ||
762 | guid = be64_to_cpu(get_unaligned(&arp1394->s_uniq_id)); | 636 | peer_guid = get_unaligned_be64(&arp1394->s_uniq_id); |
763 | node = ipv4_node_find_by_guid(priv, guid); | 637 | peer = fwnet_peer_find_by_guid(dev, peer_guid); |
764 | if (!node) { | 638 | if (!peer) { |
765 | fw_notify ( "No node for ARP packet from %llx\n", guid ); | 639 | fw_notify("No peer for ARP packet from %016llx\n", |
640 | (unsigned long long)peer_guid); | ||
766 | goto failed_proto; | 641 | goto failed_proto; |
767 | } | 642 | } |
768 | if ( node->nodeid != source_node_id || node->generation != priv->card->generation ) { | 643 | |
769 | fw_notify ( "Internal error: node->nodeid (%x) != soucre_node_id (%x) or node->generation (%x) != priv->card->generation(%x)\n", | 644 | /* FIXME don't use card->generation */ |
770 | node->nodeid, source_node_id, node->generation, priv->card->generation ); | 645 | if (peer->node_id != source_node_id || |
771 | node->nodeid = source_node_id; | 646 | peer->generation != dev->card->generation) { |
772 | node->generation = priv->card->generation; | 647 | fw_notify("Internal error: peer->node_id (%x) != " |
648 | "source_node_id (%x) or peer->generation (%x)" | ||
649 | " != dev->card->generation(%x)\n", | ||
650 | peer->node_id, source_node_id, | ||
651 | peer->generation, dev->card->generation); | ||
652 | peer->node_id = source_node_id; | ||
653 | peer->generation = dev->card->generation; | ||
773 | } | 654 | } |
774 | 655 | ||
775 | /* FIXME: for debugging */ | 656 | /* FIXME: for debugging */ |
776 | if ( sspd > SCODE_400 ) | 657 | if (sspd > SCODE_400) |
777 | sspd = SCODE_400; | 658 | sspd = SCODE_400; |
778 | /* Update our speed/payload/fifo_offset table */ | 659 | /* Update our speed/payload/fifo_offset table */ |
779 | /* | 660 | /* |
780 | * FIXME: this does not handle cases where two high-speed endpoints must use a slower speed because of | 661 | * FIXME: this does not handle cases where two high-speed endpoints must use a slower speed because of |
781 | * a lower speed hub between them. We need to look at the actual topology map here. | 662 | * a lower speed hub between them. We need to look at the actual topology map here. |
782 | */ | 663 | */ |
783 | fw_debug ( "Setting node %p fifo %llx (was %llx), max_payload %x (was %x), speed %x (was %x)\n", | 664 | peer->fifo = fifo_addr; |
784 | node, fifo_addr, node->fifo, max_payload, node->max_payload, sspd, node->xmt_speed ); | 665 | peer->max_payload = max_payload; |
785 | node->fifo = fifo_addr; | ||
786 | node->max_payload = max_payload; | ||
787 | /* | 666 | /* |
788 | * Only allow speeds to go down from their initial value. | 667 | * Only allow speeds to go down from their initial value. |
789 | * Otherwise a local node that can only do S400 or slower may | 668 | * Otherwise a local peer that can only do S400 or slower may |
790 | * be told to transmit at S800 to a faster remote node. | 669 | * be told to transmit at S800 to a faster remote peer. |
791 | */ | 670 | */ |
792 | if ( node->xmt_speed > sspd ) | 671 | if (peer->xmt_speed > sspd) |
793 | node->xmt_speed = sspd; | 672 | peer->xmt_speed = sspd; |
794 | 673 | ||
795 | /* | 674 | /* |
796 | * Now that we're done with the 1394 specific stuff, we'll | 675 | * Now that we're done with the 1394 specific stuff, we'll |
@@ -805,248 +684,257 @@ static int ipv4_finish_incoming_packet ( struct net_device *netdev, | |||
805 | */ | 684 | */ |
806 | 685 | ||
807 | arp->ar_hln = 8; | 686 | arp->ar_hln = 8; |
808 | arp_ptr += arp->ar_hln; /* skip over sender unique id */ | 687 | /* skip over sender unique id */ |
809 | *(u32 *)arp_ptr = arp1394->sip; /* move sender IP addr */ | 688 | arp_ptr += arp->ar_hln; |
810 | arp_ptr += arp->ar_pln; /* skip over sender IP addr */ | 689 | /* move sender IP addr */ |
690 | put_unaligned(arp1394->sip, (u32 *)arp_ptr); | ||
691 | /* skip over sender IP addr */ | ||
692 | arp_ptr += arp->ar_pln; | ||
811 | 693 | ||
812 | if (arp->ar_op == htons(ARPOP_REQUEST)) | 694 | if (arp->ar_op == htons(ARPOP_REQUEST)) |
813 | memset(arp_ptr, 0, sizeof(u64)); | 695 | memset(arp_ptr, 0, sizeof(u64)); |
814 | else | 696 | else |
815 | memcpy(arp_ptr, netdev->dev_addr, sizeof(u64)); | 697 | memcpy(arp_ptr, net->dev_addr, sizeof(u64)); |
816 | } | 698 | } |
817 | 699 | ||
818 | /* Now add the ethernet header. */ | 700 | /* Now add the ethernet header. */ |
819 | guid = cpu_to_be64(priv->card->guid); | 701 | guid = cpu_to_be64(dev->card->guid); |
820 | if (dev_hard_header(skb, netdev, ether_type, is_broadcast ? &broadcast_hw : &guid, NULL, | 702 | if (dev_hard_header(skb, net, ether_type, |
821 | skb->len) >= 0) { | 703 | is_broadcast ? &broadcast_hw : &guid, |
822 | struct ipv4_ether_hdr *eth; | 704 | NULL, skb->len) >= 0) { |
705 | struct fwnet_header *eth; | ||
823 | u16 *rawp; | 706 | u16 *rawp; |
824 | __be16 protocol; | 707 | __be16 protocol; |
825 | 708 | ||
826 | skb_reset_mac_header(skb); | 709 | skb_reset_mac_header(skb); |
827 | skb_pull(skb, sizeof(*eth)); | 710 | skb_pull(skb, sizeof(*eth)); |
828 | eth = ipv4_ether_hdr(skb); | 711 | eth = (struct fwnet_header *)skb_mac_header(skb); |
829 | if (*eth->h_dest & 1) { | 712 | if (*eth->h_dest & 1) { |
830 | if (memcmp(eth->h_dest, netdev->broadcast, netdev->addr_len) == 0) { | 713 | if (memcmp(eth->h_dest, net->broadcast, |
831 | fw_debug ( "Broadcast\n" ); | 714 | net->addr_len) == 0) |
832 | skb->pkt_type = PACKET_BROADCAST; | 715 | skb->pkt_type = PACKET_BROADCAST; |
833 | } | ||
834 | #if 0 | 716 | #if 0 |
835 | else | 717 | else |
836 | skb->pkt_type = PACKET_MULTICAST; | 718 | skb->pkt_type = PACKET_MULTICAST; |
837 | #endif | 719 | #endif |
838 | } else { | 720 | } else { |
839 | if (memcmp(eth->h_dest, netdev->dev_addr, netdev->addr_len)) { | 721 | if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) { |
840 | u64 a1, a2; | 722 | u64 a1, a2; |
841 | 723 | ||
842 | memcpy ( &a1, eth->h_dest, sizeof(u64)); | 724 | memcpy(&a1, eth->h_dest, sizeof(u64)); |
843 | memcpy ( &a2, netdev->dev_addr, sizeof(u64)); | 725 | memcpy(&a2, net->dev_addr, sizeof(u64)); |
844 | fw_debug ( "Otherhost %llx %llx %x\n", a1, a2, netdev->addr_len ); | ||
845 | skb->pkt_type = PACKET_OTHERHOST; | 726 | skb->pkt_type = PACKET_OTHERHOST; |
846 | } | 727 | } |
847 | } | 728 | } |
848 | if (ntohs(eth->h_proto) >= 1536) { | 729 | if (ntohs(eth->h_proto) >= 1536) { |
849 | fw_debug ( " proto %x %x\n", eth->h_proto, ntohs(eth->h_proto) ); | ||
850 | protocol = eth->h_proto; | 730 | protocol = eth->h_proto; |
851 | } else { | 731 | } else { |
852 | rawp = (u16 *)skb->data; | 732 | rawp = (u16 *)skb->data; |
853 | if (*rawp == 0xFFFF) { | 733 | if (*rawp == 0xffff) |
854 | fw_debug ( "proto 802_3\n" ); | ||
855 | protocol = htons(ETH_P_802_3); | 734 | protocol = htons(ETH_P_802_3); |
856 | } else { | 735 | else |
857 | fw_debug ( "proto 802_2\n" ); | ||
858 | protocol = htons(ETH_P_802_2); | 736 | protocol = htons(ETH_P_802_2); |
859 | } | ||
860 | } | 737 | } |
861 | skb->protocol = protocol; | 738 | skb->protocol = protocol; |
862 | } | 739 | } |
863 | status = netif_rx(skb); | 740 | status = netif_rx(skb); |
864 | if ( status == NET_RX_DROP) { | 741 | if (status == NET_RX_DROP) { |
865 | netdev->stats.rx_errors++; | 742 | net->stats.rx_errors++; |
866 | netdev->stats.rx_dropped++; | 743 | net->stats.rx_dropped++; |
867 | } else { | 744 | } else { |
868 | netdev->stats.rx_packets++; | 745 | net->stats.rx_packets++; |
869 | netdev->stats.rx_bytes += skb->len; | 746 | net->stats.rx_bytes += skb->len; |
870 | } | 747 | } |
871 | if (netif_queue_stopped(netdev)) | 748 | if (netif_queue_stopped(net)) |
872 | netif_wake_queue(netdev); | 749 | netif_wake_queue(net); |
750 | |||
873 | return 0; | 751 | return 0; |
874 | 752 | ||
875 | failed_proto: | 753 | failed_proto: |
876 | netdev->stats.rx_errors++; | 754 | net->stats.rx_errors++; |
877 | netdev->stats.rx_dropped++; | 755 | net->stats.rx_dropped++; |
756 | |||
878 | dev_kfree_skb_any(skb); | 757 | dev_kfree_skb_any(skb); |
879 | if (netif_queue_stopped(netdev)) | 758 | if (netif_queue_stopped(net)) |
880 | netif_wake_queue(netdev); | 759 | netif_wake_queue(net); |
881 | netdev->last_rx = jiffies; | 760 | |
761 | net->last_rx = jiffies; | ||
762 | |||
882 | return 0; | 763 | return 0; |
883 | } | 764 | } |
884 | 765 | ||
885 | /* ------------------------------------------------------------------ */ | 766 | static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, |
886 | 767 | u16 source_node_id, bool is_broadcast) | |
887 | static int ipv4_incoming_packet ( struct ipv4_priv *priv, u32 *buf, int len, u16 source_node_id, bool is_broadcast ) { | 768 | { |
888 | struct sk_buff *skb; | 769 | struct sk_buff *skb; |
889 | struct net_device *netdev; | 770 | struct net_device *net; |
890 | struct ipv4_hdr hdr; | 771 | struct rfc2734_header hdr; |
891 | unsigned lf; | 772 | unsigned lf; |
892 | unsigned long flags; | 773 | unsigned long flags; |
893 | struct ipv4_node *node; | 774 | struct fwnet_peer *peer; |
894 | struct ipv4_partial_datagram *pd; | 775 | struct fwnet_partial_datagram *pd; |
895 | int fg_off; | 776 | int fg_off; |
896 | int dg_size; | 777 | int dg_size; |
897 | u16 datagram_label; | 778 | u16 datagram_label; |
898 | int retval; | 779 | int retval; |
899 | u16 ether_type; | 780 | u16 ether_type; |
900 | 781 | ||
901 | fw_debug ( "ipv4_incoming_packet(%p, %p, %d, %x, %s)\n", priv, buf, len, source_node_id, is_broadcast ? "true" : "false" ); | 782 | net = dev->card->netdev; |
902 | netdev = priv->card->netdev; | ||
903 | 783 | ||
904 | hdr.w0 = ntohl(buf[0]); | 784 | hdr.w0 = be32_to_cpu(buf[0]); |
905 | lf = ipv4_get_hdr_lf(&hdr); | 785 | lf = fwnet_get_hdr_lf(&hdr); |
906 | if ( lf == IPV4_HDR_UNFRAG ) { | 786 | if (lf == RFC2374_HDR_UNFRAG) { |
907 | /* | 787 | /* |
908 | * An unfragmented datagram has been received by the ieee1394 | 788 | * An unfragmented datagram has been received by the ieee1394 |
909 | * bus. Build an skbuff around it so we can pass it to the | 789 | * bus. Build an skbuff around it so we can pass it to the |
910 | * high level network layer. | 790 | * high level network layer. |
911 | */ | 791 | */ |
912 | ether_type = ipv4_get_hdr_ether_type(&hdr); | 792 | ether_type = fwnet_get_hdr_ether_type(&hdr); |
913 | fw_debug ( "header w0 = %x, lf = %x, ether_type = %x\n", hdr.w0, lf, ether_type ); | ||
914 | buf++; | 793 | buf++; |
915 | len -= IPV4_UNFRAG_HDR_SIZE; | 794 | len -= RFC2374_UNFRAG_HDR_SIZE; |
916 | 795 | ||
917 | skb = dev_alloc_skb(len + netdev->hard_header_len + 15); | 796 | skb = dev_alloc_skb(len + net->hard_header_len + 15); |
918 | if (unlikely(!skb)) { | 797 | if (unlikely(!skb)) { |
919 | fw_error ( "Out of memory for incoming packet\n"); | 798 | fw_error("out of memory\n"); |
920 | netdev->stats.rx_dropped++; | 799 | net->stats.rx_dropped++; |
800 | |||
921 | return -1; | 801 | return -1; |
922 | } | 802 | } |
923 | skb_reserve(skb, (netdev->hard_header_len + 15) & ~15); | 803 | skb_reserve(skb, (net->hard_header_len + 15) & ~15); |
924 | memcpy(skb_put(skb, len), buf, len ); | 804 | memcpy(skb_put(skb, len), buf, len); |
925 | return ipv4_finish_incoming_packet(netdev, skb, source_node_id, is_broadcast, ether_type ); | 805 | |
806 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | ||
807 | is_broadcast, ether_type); | ||
926 | } | 808 | } |
927 | /* A datagram fragment has been received, now the fun begins. */ | 809 | /* A datagram fragment has been received, now the fun begins. */ |
928 | hdr.w1 = ntohl(buf[1]); | 810 | hdr.w1 = ntohl(buf[1]); |
929 | buf +=2; | 811 | buf += 2; |
930 | len -= IPV4_FRAG_HDR_SIZE; | 812 | len -= RFC2374_FRAG_HDR_SIZE; |
931 | if ( lf ==IPV4_HDR_FIRSTFRAG ) { | 813 | if (lf == RFC2374_HDR_FIRSTFRAG) { |
932 | ether_type = ipv4_get_hdr_ether_type(&hdr); | 814 | ether_type = fwnet_get_hdr_ether_type(&hdr); |
933 | fg_off = 0; | 815 | fg_off = 0; |
934 | } else { | 816 | } else { |
935 | fg_off = ipv4_get_hdr_fg_off(&hdr); | 817 | ether_type = 0; |
936 | ether_type = 0; /* Shut up compiler! */ | 818 | fg_off = fwnet_get_hdr_fg_off(&hdr); |
937 | } | 819 | } |
938 | datagram_label = ipv4_get_hdr_dgl(&hdr); | 820 | datagram_label = fwnet_get_hdr_dgl(&hdr); |
939 | dg_size = ipv4_get_hdr_dg_size(&hdr); /* ??? + 1 */ | 821 | dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ |
940 | fw_debug ( "fragmented: %x.%x = lf %x, ether_type %x, fg_off %x, dgl %x, dg_size %x\n", hdr.w0, hdr.w1, lf, ether_type, fg_off, datagram_label, dg_size ); | 822 | peer = fwnet_peer_find_by_node_id(dev, source_node_id); |
941 | node = ipv4_node_find_by_nodeid ( priv, source_node_id); | 823 | |
942 | spin_lock_irqsave(&node->pdg_lock, flags); | 824 | spin_lock_irqsave(&peer->pdg_lock, flags); |
943 | pd = ipv4_pd_find( node, datagram_label ); | 825 | |
826 | pd = fwnet_pd_find(peer, datagram_label); | ||
944 | if (pd == NULL) { | 827 | if (pd == NULL) { |
945 | while ( node->pdg_size >= ipv4_mpd ) { | 828 | while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { |
946 | /* remove the oldest */ | 829 | /* remove the oldest */ |
947 | ipv4_pd_delete ( list_first_entry(&node->pdg_list, struct ipv4_partial_datagram, pdg_list) ); | 830 | fwnet_pd_delete(list_first_entry(&peer->pd_list, |
948 | node->pdg_size--; | 831 | struct fwnet_partial_datagram, pd_link)); |
832 | peer->pdg_size--; | ||
949 | } | 833 | } |
950 | pd = ipv4_pd_new ( netdev, node, datagram_label, dg_size, | 834 | pd = fwnet_pd_new(net, peer, datagram_label, |
951 | buf, fg_off, len); | 835 | dg_size, buf, fg_off, len); |
952 | if ( pd == NULL) { | 836 | if (pd == NULL) { |
953 | retval = -ENOMEM; | 837 | retval = -ENOMEM; |
954 | goto bad_proto; | 838 | goto bad_proto; |
955 | } | 839 | } |
956 | node->pdg_size++; | 840 | peer->pdg_size++; |
957 | } else { | 841 | } else { |
958 | if (ipv4_frag_overlap(pd, fg_off, len) || pd->datagram_size != dg_size) { | 842 | if (fwnet_frag_overlap(pd, fg_off, len) || |
843 | pd->datagram_size != dg_size) { | ||
959 | /* | 844 | /* |
960 | * Differing datagram sizes or overlapping fragments, | 845 | * Differing datagram sizes or overlapping fragments, |
961 | * Either way the remote machine is playing silly buggers | 846 | * discard old datagram and start a new one. |
962 | * with us: obliterate the old datagram and start a new one. | ||
963 | */ | 847 | */ |
964 | ipv4_pd_delete ( pd ); | 848 | fwnet_pd_delete(pd); |
965 | pd = ipv4_pd_new ( netdev, node, datagram_label, | 849 | pd = fwnet_pd_new(net, peer, datagram_label, |
966 | dg_size, buf, fg_off, len); | 850 | dg_size, buf, fg_off, len); |
967 | if ( pd == NULL ) { | 851 | if (pd == NULL) { |
968 | retval = -ENOMEM; | 852 | retval = -ENOMEM; |
969 | node->pdg_size--; | 853 | peer->pdg_size--; |
970 | goto bad_proto; | 854 | goto bad_proto; |
971 | } | 855 | } |
972 | } else { | 856 | } else { |
973 | bool worked; | 857 | if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { |
974 | |||
975 | worked = ipv4_pd_update ( node, pd, | ||
976 | buf, fg_off, len ); | ||
977 | if ( ! worked ) { | ||
978 | /* | 858 | /* |
979 | * Couldn't save off fragment anyway | 859 | * Couldn't save off fragment anyway |
980 | * so might as well obliterate the | 860 | * so might as well obliterate the |
981 | * datagram now. | 861 | * datagram now. |
982 | */ | 862 | */ |
983 | ipv4_pd_delete ( pd ); | 863 | fwnet_pd_delete(pd); |
984 | node->pdg_size--; | 864 | peer->pdg_size--; |
985 | goto bad_proto; | 865 | goto bad_proto; |
986 | } | 866 | } |
987 | } | 867 | } |
988 | } /* new datagram or add to existing one */ | 868 | } /* new datagram or add to existing one */ |
989 | 869 | ||
990 | if ( lf == IPV4_HDR_FIRSTFRAG ) | 870 | if (lf == RFC2374_HDR_FIRSTFRAG) |
991 | pd->ether_type = ether_type; | 871 | pd->ether_type = ether_type; |
992 | if ( ipv4_pd_is_complete ( pd ) ) { | 872 | |
873 | if (fwnet_pd_is_complete(pd)) { | ||
993 | ether_type = pd->ether_type; | 874 | ether_type = pd->ether_type; |
994 | node->pdg_size--; | 875 | peer->pdg_size--; |
995 | skb = skb_get(pd->skb); | 876 | skb = skb_get(pd->skb); |
996 | ipv4_pd_delete ( pd ); | 877 | fwnet_pd_delete(pd); |
997 | spin_unlock_irqrestore(&node->pdg_lock, flags); | 878 | |
998 | return ipv4_finish_incoming_packet ( netdev, skb, source_node_id, false, ether_type ); | 879 | spin_unlock_irqrestore(&peer->pdg_lock, flags); |
880 | |||
881 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | ||
882 | false, ether_type); | ||
999 | } | 883 | } |
1000 | /* | 884 | /* |
1001 | * Datagram is not complete, we're done for the | 885 | * Datagram is not complete, we're done for the |
1002 | * moment. | 886 | * moment. |
1003 | */ | 887 | */ |
1004 | spin_unlock_irqrestore(&node->pdg_lock, flags); | 888 | spin_unlock_irqrestore(&peer->pdg_lock, flags); |
889 | |||
1005 | return 0; | 890 | return 0; |
1006 | 891 | ||
1007 | bad_proto: | 892 | bad_proto: |
1008 | spin_unlock_irqrestore(&node->pdg_lock, flags); | 893 | spin_unlock_irqrestore(&peer->pdg_lock, flags); |
1009 | if (netif_queue_stopped(netdev)) | 894 | |
1010 | netif_wake_queue(netdev); | 895 | if (netif_queue_stopped(net)) |
896 | netif_wake_queue(net); | ||
897 | |||
1011 | return 0; | 898 | return 0; |
1012 | } | 899 | } |
1013 | 900 | ||
1014 | static void ipv4_receive_packet ( struct fw_card *card, struct fw_request *r, | 901 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, |
1015 | int tcode, int destination, int source, int generation, int speed, | 902 | int tcode, int destination, int source, int generation, |
1016 | unsigned long long offset, void *payload, size_t length, void *callback_data ) { | 903 | int speed, unsigned long long offset, void *payload, |
1017 | struct ipv4_priv *priv; | 904 | size_t length, void *callback_data) |
905 | { | ||
906 | struct fwnet_device *dev; | ||
1018 | int status; | 907 | int status; |
1019 | 908 | ||
1020 | fw_debug ( "ipv4_receive_packet(%p,%p,%x,%x,%x,%x,%x,%llx,%p,%lx,%p)\n", | 909 | dev = callback_data; |
1021 | card, r, tcode, destination, source, generation, speed, offset, payload, | 910 | if (tcode != TCODE_WRITE_BLOCK_REQUEST |
1022 | (unsigned long)length, callback_data); | 911 | || destination != card->node_id /* <- FIXME */ |
1023 | print_hex_dump ( KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 32, 1, payload, length, false ); | 912 | || generation != card->generation /* <- FIXME */ |
1024 | priv = callback_data; | 913 | || offset != dev->handler.offset) { |
1025 | if ( tcode != TCODE_WRITE_BLOCK_REQUEST | ||
1026 | || destination != card->node_id | ||
1027 | || generation != card->generation | ||
1028 | || offset != priv->handler.offset ) { | ||
1029 | fw_send_response(card, r, RCODE_CONFLICT_ERROR); | 914 | fw_send_response(card, r, RCODE_CONFLICT_ERROR); |
1030 | fw_debug("Conflict error card node_id=%x, card generation=%x, local offset %llx\n", | 915 | |
1031 | card->node_id, card->generation, (unsigned long long)priv->handler.offset ); | ||
1032 | return; | 916 | return; |
1033 | } | 917 | } |
1034 | status = ipv4_incoming_packet ( priv, payload, length, source, false ); | 918 | |
1035 | if ( status != 0 ) { | 919 | status = fwnet_incoming_packet(dev, payload, length, source, false); |
1036 | fw_error ( "Incoming packet failure\n" ); | 920 | if (status != 0) { |
1037 | fw_send_response ( card, r, RCODE_CONFLICT_ERROR ); | 921 | fw_error("Incoming packet failure\n"); |
922 | fw_send_response(card, r, RCODE_CONFLICT_ERROR); | ||
923 | |||
1038 | return; | 924 | return; |
1039 | } | 925 | } |
1040 | fw_send_response ( card, r, RCODE_COMPLETE ); | 926 | |
927 | fw_send_response(card, r, RCODE_COMPLETE); | ||
1041 | } | 928 | } |
1042 | 929 | ||
1043 | static void ipv4_receive_broadcast(struct fw_iso_context *context, u32 cycle, | 930 | static void fwnet_receive_broadcast(struct fw_iso_context *context, |
1044 | size_t header_length, void *header, void *data) { | 931 | u32 cycle, size_t header_length, void *header, void *data) |
1045 | struct ipv4_priv *priv; | 932 | { |
933 | struct fwnet_device *dev; | ||
1046 | struct fw_iso_packet packet; | 934 | struct fw_iso_packet packet; |
1047 | struct fw_card *card; | 935 | struct fw_card *card; |
1048 | u16 *hdr_ptr; | 936 | __be16 *hdr_ptr; |
1049 | u32 *buf_ptr; | 937 | __be32 *buf_ptr; |
1050 | int retval; | 938 | int retval; |
1051 | u32 length; | 939 | u32 length; |
1052 | u16 source_node_id; | 940 | u16 source_node_id; |
@@ -1055,70 +943,68 @@ static void ipv4_receive_broadcast(struct fw_iso_context *context, u32 cycle, | |||
1055 | unsigned long offset; | 943 | unsigned long offset; |
1056 | unsigned long flags; | 944 | unsigned long flags; |
1057 | 945 | ||
1058 | fw_debug ( "ipv4_receive_broadcast ( context=%p, cycle=%x, header_length=%lx, header=%p, data=%p )\n", context, cycle, (unsigned long)header_length, header, data ); | 946 | dev = data; |
1059 | print_hex_dump ( KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 32, 1, header, header_length, false ); | 947 | card = dev->card; |
1060 | priv = data; | ||
1061 | card = priv->card; | ||
1062 | hdr_ptr = header; | 948 | hdr_ptr = header; |
1063 | length = ntohs(hdr_ptr[0]); | 949 | length = be16_to_cpup(hdr_ptr); |
1064 | spin_lock_irqsave(&priv->lock,flags); | 950 | |
1065 | offset = priv->rcv_buffer_size * priv->broadcast_rcv_next_ptr; | 951 | spin_lock_irqsave(&dev->lock, flags); |
1066 | buf_ptr = priv->broadcast_rcv_buffer_ptrs[priv->broadcast_rcv_next_ptr++]; | 952 | |
1067 | if ( priv->broadcast_rcv_next_ptr == priv->num_broadcast_rcv_ptrs ) | 953 | offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; |
1068 | priv->broadcast_rcv_next_ptr = 0; | 954 | buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; |
1069 | spin_unlock_irqrestore(&priv->lock,flags); | 955 | if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) |
1070 | fw_debug ( "length %u at %p\n", length, buf_ptr ); | 956 | dev->broadcast_rcv_next_ptr = 0; |
1071 | print_hex_dump ( KERN_DEBUG, "buffer: ", DUMP_PREFIX_OFFSET, 32, 1, buf_ptr, length, false ); | 957 | |
958 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1072 | 959 | ||
1073 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | 960 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 |
1074 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; | 961 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; |
1075 | ver = be32_to_cpu(buf_ptr[1]) & 0xFFFFFF; | 962 | ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; |
1076 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; | 963 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; |
1077 | /* fw_debug ( "source %x SpecID %x ver %x\n", source_node_id, specifier_id, ver ); */ | 964 | |
1078 | if ( specifier_id == IPV4_GASP_SPECIFIER_ID && ver == IPV4_GASP_VERSION ) { | 965 | if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { |
1079 | buf_ptr += 2; | 966 | buf_ptr += 2; |
1080 | length -= IPV4_GASP_OVERHEAD; | 967 | length -= IEEE1394_GASP_HDR_SIZE; |
1081 | ipv4_incoming_packet(priv, buf_ptr, length, source_node_id, true); | 968 | fwnet_incoming_packet(dev, buf_ptr, length, |
1082 | } else | 969 | source_node_id, true); |
1083 | fw_debug ( "Ignoring packet: not GASP\n" ); | 970 | } |
1084 | packet.payload_length = priv->rcv_buffer_size; | 971 | |
972 | packet.payload_length = dev->rcv_buffer_size; | ||
1085 | packet.interrupt = 1; | 973 | packet.interrupt = 1; |
1086 | packet.skip = 0; | 974 | packet.skip = 0; |
1087 | packet.tag = 3; | 975 | packet.tag = 3; |
1088 | packet.sy = 0; | 976 | packet.sy = 0; |
1089 | packet.header_length = IPV4_GASP_OVERHEAD; | 977 | packet.header_length = IEEE1394_GASP_HDR_SIZE; |
1090 | spin_lock_irqsave(&priv->lock,flags); | 978 | |
1091 | retval = fw_iso_context_queue ( priv->broadcast_rcv_context, &packet, | 979 | spin_lock_irqsave(&dev->lock, flags); |
1092 | &priv->broadcast_rcv_buffer, offset ); | ||
1093 | spin_unlock_irqrestore(&priv->lock,flags); | ||
1094 | if ( retval < 0 ) | ||
1095 | fw_error ( "requeue failed\n" ); | ||
1096 | } | ||
1097 | 980 | ||
1098 | static void debug_ptask ( struct ipv4_packet_task *ptask ) { | 981 | retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, |
1099 | static const char *tx_types[] = { "Unknown", "GASP", "Write" }; | 982 | &dev->broadcast_rcv_buffer, offset); |
1100 | 983 | ||
1101 | fw_debug ( "packet %p { hdr { w0 %x w1 %x }, skb %p, priv %p," | 984 | spin_unlock_irqrestore(&dev->lock, flags); |
1102 | " tx_type %s, outstanding_pkts %d, max_payload %x, fifo %llx," | 985 | |
1103 | " speed %x, dest_node %x, generation %x }\n", | 986 | if (retval < 0) |
1104 | ptask, ptask->hdr.w0, ptask->hdr.w1, ptask->skb, ptask->priv, | 987 | fw_error("requeue failed\n"); |
1105 | ptask->tx_type > IPV4_WRREQ ? "Invalid" : tx_types[ptask->tx_type], | ||
1106 | ptask->outstanding_pkts, ptask->max_payload, | ||
1107 | ptask->fifo_addr, ptask->speed, ptask->dest_node, ptask->generation ); | ||
1108 | print_hex_dump ( KERN_DEBUG, "packet :", DUMP_PREFIX_OFFSET, 32, 1, | ||
1109 | ptask->skb->data, ptask->skb->len, false ); | ||
1110 | } | 988 | } |
1111 | 989 | ||
1112 | static void ipv4_transmit_packet_done ( struct ipv4_packet_task *ptask ) { | 990 | static struct kmem_cache *fwnet_packet_task_cache; |
1113 | struct ipv4_priv *priv; | 991 | |
992 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); | ||
993 | |||
994 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | ||
995 | { | ||
996 | struct fwnet_device *dev; | ||
1114 | unsigned long flags; | 997 | unsigned long flags; |
1115 | 998 | ||
1116 | priv = ptask->priv; | 999 | dev = ptask->dev; |
1117 | spin_lock_irqsave ( &priv->lock, flags ); | 1000 | |
1118 | list_del ( &ptask->packet_list ); | 1001 | spin_lock_irqsave(&dev->lock, flags); |
1119 | spin_unlock_irqrestore ( &priv->lock, flags ); | 1002 | list_del(&ptask->pt_link); |
1120 | ptask->outstanding_pkts--; | 1003 | spin_unlock_irqrestore(&dev->lock, flags); |
1121 | if ( ptask->outstanding_pkts > 0 ) { | 1004 | |
1005 | ptask->outstanding_pkts--; /* FIXME access inside lock */ | ||
1006 | |||
1007 | if (ptask->outstanding_pkts > 0) { | ||
1122 | u16 dg_size; | 1008 | u16 dg_size; |
1123 | u16 fg_off; | 1009 | u16 fg_off; |
1124 | u16 datagram_label; | 1010 | u16 datagram_label; |
@@ -1126,133 +1012,139 @@ static void ipv4_transmit_packet_done ( struct ipv4_packet_task *ptask ) { | |||
1126 | struct sk_buff *skb; | 1012 | struct sk_buff *skb; |
1127 | 1013 | ||
1128 | /* Update the ptask to point to the next fragment and send it */ | 1014 | /* Update the ptask to point to the next fragment and send it */ |
1129 | lf = ipv4_get_hdr_lf(&ptask->hdr); | 1015 | lf = fwnet_get_hdr_lf(&ptask->hdr); |
1130 | switch (lf) { | 1016 | switch (lf) { |
1131 | case IPV4_HDR_LASTFRAG: | 1017 | case RFC2374_HDR_LASTFRAG: |
1132 | case IPV4_HDR_UNFRAG: | 1018 | case RFC2374_HDR_UNFRAG: |
1133 | default: | 1019 | default: |
1134 | fw_error ( "Outstanding packet %x lf %x, header %x,%x\n", ptask->outstanding_pkts, lf, ptask->hdr.w0, ptask->hdr.w1 ); | 1020 | fw_error("Outstanding packet %x lf %x, header %x,%x\n", |
1021 | ptask->outstanding_pkts, lf, ptask->hdr.w0, | ||
1022 | ptask->hdr.w1); | ||
1135 | BUG(); | 1023 | BUG(); |
1136 | 1024 | ||
1137 | case IPV4_HDR_FIRSTFRAG: | 1025 | case RFC2374_HDR_FIRSTFRAG: |
1138 | /* Set frag type here for future interior fragments */ | 1026 | /* Set frag type here for future interior fragments */ |
1139 | dg_size = ipv4_get_hdr_dg_size(&ptask->hdr); | 1027 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); |
1140 | fg_off = ptask->max_payload - IPV4_FRAG_HDR_SIZE; | 1028 | fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; |
1141 | datagram_label = ipv4_get_hdr_dgl(&ptask->hdr); | 1029 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); |
1142 | break; | 1030 | break; |
1143 | 1031 | ||
1144 | case IPV4_HDR_INTFRAG: | 1032 | case RFC2374_HDR_INTFRAG: |
1145 | dg_size = ipv4_get_hdr_dg_size(&ptask->hdr); | 1033 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); |
1146 | fg_off = ipv4_get_hdr_fg_off(&ptask->hdr) + ptask->max_payload - IPV4_FRAG_HDR_SIZE; | 1034 | fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) |
1147 | datagram_label = ipv4_get_hdr_dgl(&ptask->hdr); | 1035 | + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; |
1036 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | ||
1148 | break; | 1037 | break; |
1149 | } | 1038 | } |
1150 | skb = ptask->skb; | 1039 | skb = ptask->skb; |
1151 | skb_pull ( skb, ptask->max_payload ); | 1040 | skb_pull(skb, ptask->max_payload); |
1152 | if ( ptask->outstanding_pkts > 1 ) { | 1041 | if (ptask->outstanding_pkts > 1) { |
1153 | ipv4_make_sf_hdr ( &ptask->hdr, | 1042 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, |
1154 | IPV4_HDR_INTFRAG, dg_size, fg_off, datagram_label ); | 1043 | dg_size, fg_off, datagram_label); |
1155 | } else { | 1044 | } else { |
1156 | ipv4_make_sf_hdr ( &ptask->hdr, | 1045 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, |
1157 | IPV4_HDR_LASTFRAG, dg_size, fg_off, datagram_label ); | 1046 | dg_size, fg_off, datagram_label); |
1158 | ptask->max_payload = skb->len + IPV4_FRAG_HDR_SIZE; | 1047 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; |
1159 | |||
1160 | } | 1048 | } |
1161 | ipv4_send_packet ( ptask ); | 1049 | fwnet_send_packet(ptask); |
1162 | } else { | 1050 | } else { |
1163 | dev_kfree_skb_any ( ptask->skb ); | 1051 | dev_kfree_skb_any(ptask->skb); |
1164 | kmem_cache_free( ipv4_packet_task_cache, ptask ); | 1052 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
1165 | } | 1053 | } |
1166 | } | 1054 | } |
1167 | 1055 | ||
1168 | static void ipv4_write_complete ( struct fw_card *card, int rcode, | 1056 | static void fwnet_write_complete(struct fw_card *card, int rcode, |
1169 | void *payload, size_t length, void *data ) { | 1057 | void *payload, size_t length, void *data) |
1170 | struct ipv4_packet_task *ptask; | 1058 | { |
1059 | struct fwnet_packet_task *ptask; | ||
1171 | 1060 | ||
1172 | ptask = data; | 1061 | ptask = data; |
1173 | fw_debug ( "ipv4_write_complete ( %p, %x, %p, %lx, %p )\n", | ||
1174 | card, rcode, payload, (unsigned long)length, data ); | ||
1175 | debug_ptask ( ptask ); | ||
1176 | 1062 | ||
1177 | if ( rcode == RCODE_COMPLETE ) { | 1063 | if (rcode == RCODE_COMPLETE) |
1178 | ipv4_transmit_packet_done ( ptask ); | 1064 | fwnet_transmit_packet_done(ptask); |
1179 | } else { | 1065 | else |
1180 | fw_error ( "ipv4_write_complete: failed: %x\n", rcode ); | 1066 | fw_error("fwnet_write_complete: failed: %x\n", rcode); |
1181 | /* ??? error recovery */ | 1067 | /* ??? error recovery */ |
1182 | } | ||
1183 | } | 1068 | } |
1184 | 1069 | ||
1185 | static int ipv4_send_packet ( struct ipv4_packet_task *ptask ) { | 1070 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) |
1186 | struct ipv4_priv *priv; | 1071 | { |
1072 | struct fwnet_device *dev; | ||
1187 | unsigned tx_len; | 1073 | unsigned tx_len; |
1188 | struct ipv4_hdr *bufhdr; | 1074 | struct rfc2734_header *bufhdr; |
1189 | unsigned long flags; | 1075 | unsigned long flags; |
1190 | struct net_device *netdev; | 1076 | struct net_device *net; |
1191 | #if 0 /* stefanr */ | ||
1192 | int retval; | ||
1193 | #endif | ||
1194 | 1077 | ||
1195 | fw_debug ( "ipv4_send_packet\n" ); | 1078 | dev = ptask->dev; |
1196 | debug_ptask ( ptask ); | ||
1197 | priv = ptask->priv; | ||
1198 | tx_len = ptask->max_payload; | 1079 | tx_len = ptask->max_payload; |
1199 | switch (ipv4_get_hdr_lf(&ptask->hdr)) { | 1080 | switch (fwnet_get_hdr_lf(&ptask->hdr)) { |
1200 | case IPV4_HDR_UNFRAG: | 1081 | case RFC2374_HDR_UNFRAG: |
1201 | bufhdr = (struct ipv4_hdr *)skb_push(ptask->skb, IPV4_UNFRAG_HDR_SIZE); | 1082 | bufhdr = (struct rfc2734_header *) |
1202 | bufhdr->w0 = htonl(ptask->hdr.w0); | 1083 | skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); |
1084 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | ||
1203 | break; | 1085 | break; |
1204 | 1086 | ||
1205 | case IPV4_HDR_FIRSTFRAG: | 1087 | case RFC2374_HDR_FIRSTFRAG: |
1206 | case IPV4_HDR_INTFRAG: | 1088 | case RFC2374_HDR_INTFRAG: |
1207 | case IPV4_HDR_LASTFRAG: | 1089 | case RFC2374_HDR_LASTFRAG: |
1208 | bufhdr = (struct ipv4_hdr *)skb_push(ptask->skb, IPV4_FRAG_HDR_SIZE); | 1090 | bufhdr = (struct rfc2734_header *) |
1209 | bufhdr->w0 = htonl(ptask->hdr.w0); | 1091 | skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); |
1210 | bufhdr->w1 = htonl(ptask->hdr.w1); | 1092 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); |
1093 | put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); | ||
1211 | break; | 1094 | break; |
1212 | 1095 | ||
1213 | default: | 1096 | default: |
1214 | BUG(); | 1097 | BUG(); |
1215 | } | 1098 | } |
1216 | if ( ptask->tx_type == IPV4_GASP ) { | 1099 | if (ptask->dest_node == IEEE1394_ALL_NODES) { |
1217 | u32 *packets; | 1100 | u8 *p; |
1218 | int generation; | 1101 | int generation; |
1219 | int nodeid; | 1102 | int node_id; |
1220 | 1103 | ||
1221 | /* ptask->generation may not have been set yet */ | 1104 | /* ptask->generation may not have been set yet */ |
1222 | generation = priv->card->generation; | 1105 | generation = dev->card->generation; |
1223 | smp_rmb(); | 1106 | smp_rmb(); |
1224 | nodeid = priv->card->node_id; | 1107 | node_id = dev->card->node_id; |
1225 | packets = (u32 *)skb_push(ptask->skb, sizeof(u32)*2); | 1108 | |
1226 | packets[0] = htonl(nodeid << 16 | (IPV4_GASP_SPECIFIER_ID>>8)); | 1109 | p = skb_push(ptask->skb, 8); |
1227 | packets[1] = htonl((IPV4_GASP_SPECIFIER_ID & 0xFF) << 24 | IPV4_GASP_VERSION); | 1110 | put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); |
1228 | fw_send_request ( priv->card, &ptask->transaction, TCODE_STREAM_DATA, | 1111 | put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 |
1229 | fw_stream_packet_destination_id(3, BROADCAST_CHANNEL, 0), | 1112 | | RFC2734_SW_VERSION, &p[4]); |
1230 | generation, SCODE_100, 0ULL, ptask->skb->data, tx_len + 8, ipv4_write_complete, ptask ); | 1113 | |
1231 | spin_lock_irqsave(&priv->lock,flags); | 1114 | /* We should not transmit if broadcast_channel.valid == 0. */ |
1232 | list_add_tail ( &ptask->packet_list, &priv->broadcasted_list ); | 1115 | fw_send_request(dev->card, &ptask->transaction, |
1233 | spin_unlock_irqrestore(&priv->lock,flags); | 1116 | TCODE_STREAM_DATA, |
1234 | #if 0 /* stefanr */ | 1117 | fw_stream_packet_destination_id(3, |
1235 | return retval; | 1118 | IEEE1394_BROADCAST_CHANNEL, 0), |
1236 | #else | 1119 | generation, SCODE_100, 0ULL, ptask->skb->data, |
1120 | tx_len + 8, fwnet_write_complete, ptask); | ||
1121 | |||
1122 | /* FIXME race? */ | ||
1123 | spin_lock_irqsave(&dev->lock, flags); | ||
1124 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | ||
1125 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1126 | |||
1237 | return 0; | 1127 | return 0; |
1238 | #endif | ||
1239 | } | 1128 | } |
1240 | fw_debug("send_request (%p, %p, WRITE_BLOCK, %x, %x, %x, %llx, %p, %d, %p, %p\n", | 1129 | |
1241 | priv->card, &ptask->transaction, ptask->dest_node, ptask->generation, | 1130 | fw_send_request(dev->card, &ptask->transaction, |
1242 | ptask->speed, (unsigned long long)ptask->fifo_addr, ptask->skb->data, tx_len, | 1131 | TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, |
1243 | ipv4_write_complete, ptask ); | 1132 | ptask->generation, ptask->speed, ptask->fifo_addr, |
1244 | fw_send_request ( priv->card, &ptask->transaction, | 1133 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); |
1245 | TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, ptask->generation, ptask->speed, | 1134 | |
1246 | ptask->fifo_addr, ptask->skb->data, tx_len, ipv4_write_complete, ptask ); | 1135 | /* FIXME race? */ |
1247 | spin_lock_irqsave(&priv->lock,flags); | 1136 | spin_lock_irqsave(&dev->lock, flags); |
1248 | list_add_tail ( &ptask->packet_list, &priv->sent_list ); | 1137 | list_add_tail(&ptask->pt_link, &dev->sent_list); |
1249 | spin_unlock_irqrestore(&priv->lock,flags); | 1138 | spin_unlock_irqrestore(&dev->lock, flags); |
1250 | netdev = priv->card->netdev; | 1139 | |
1251 | netdev->trans_start = jiffies; | 1140 | net = dev->card->netdev; |
1141 | net->trans_start = jiffies; | ||
1142 | |||
1252 | return 0; | 1143 | return 0; |
1253 | } | 1144 | } |
1254 | 1145 | ||
1255 | static int ipv4_broadcast_start ( struct ipv4_priv *priv ) { | 1146 | static int fwnet_broadcast_start(struct fwnet_device *dev) |
1147 | { | ||
1256 | struct fw_iso_context *context; | 1148 | struct fw_iso_context *context; |
1257 | int retval; | 1149 | int retval; |
1258 | unsigned num_packets; | 1150 | unsigned num_packets; |
@@ -1260,150 +1152,151 @@ static int ipv4_broadcast_start ( struct ipv4_priv *priv ) { | |||
1260 | struct fw_iso_packet packet; | 1152 | struct fw_iso_packet packet; |
1261 | unsigned long offset; | 1153 | unsigned long offset; |
1262 | unsigned u; | 1154 | unsigned u; |
1263 | /* unsigned transmit_speed; */ | ||
1264 | 1155 | ||
1265 | #if 0 /* stefanr */ | 1156 | if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { |
1266 | if ( priv->card->broadcast_channel != (BROADCAST_CHANNEL_VALID|BROADCAST_CHANNEL_INITIAL)) { | 1157 | /* outside OHCI posted write area? */ |
1267 | fw_notify ( "Invalid broadcast channel %x\n", priv->card->broadcast_channel ); | 1158 | static const struct fw_address_region region = { |
1268 | /* FIXME: try again later? */ | 1159 | .start = 0xffff00000000ULL, |
1269 | /* return -EINVAL; */ | 1160 | .end = CSR_REGISTER_BASE, |
1270 | } | 1161 | }; |
1271 | #endif | 1162 | |
1272 | if ( priv->local_fifo == INVALID_FIFO_ADDR ) { | 1163 | dev->handler.length = 4096; |
1273 | struct fw_address_region region; | 1164 | dev->handler.address_callback = fwnet_receive_packet; |
1274 | 1165 | dev->handler.callback_data = dev; | |
1275 | priv->handler.length = FIFO_SIZE; | 1166 | |
1276 | priv->handler.address_callback = ipv4_receive_packet; | 1167 | retval = fw_core_add_address_handler(&dev->handler, ®ion); |
1277 | priv->handler.callback_data = priv; | 1168 | if (retval < 0) |
1278 | /* FIXME: this is OHCI, but what about others? */ | ||
1279 | region.start = 0xffff00000000ULL; | ||
1280 | region.end = 0xfffffffffffcULL; | ||
1281 | |||
1282 | retval = fw_core_add_address_handler ( &priv->handler, ®ion ); | ||
1283 | if ( retval < 0 ) | ||
1284 | goto failed_initial; | 1169 | goto failed_initial; |
1285 | priv->local_fifo = priv->handler.offset; | 1170 | |
1171 | dev->local_fifo = dev->handler.offset; | ||
1286 | } | 1172 | } |
1287 | 1173 | ||
1288 | /* | 1174 | max_receive = 1U << (dev->card->max_receive + 1); |
1289 | * FIXME: rawiso limits us to PAGE_SIZE. This only matters if we ever have | 1175 | num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; |
1290 | * a machine with PAGE_SIZE < 4096 | 1176 | |
1291 | */ | 1177 | if (!dev->broadcast_rcv_context) { |
1292 | max_receive = 1U << (priv->card->max_receive + 1); | ||
1293 | num_packets = ( ipv4_iso_page_count * PAGE_SIZE ) / max_receive; | ||
1294 | if ( ! priv->broadcast_rcv_context ) { | ||
1295 | void **ptrptr; | 1178 | void **ptrptr; |
1296 | 1179 | ||
1297 | context = fw_iso_context_create ( priv->card, | 1180 | context = fw_iso_context_create(dev->card, |
1298 | FW_ISO_CONTEXT_RECEIVE, BROADCAST_CHANNEL, | 1181 | FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, |
1299 | priv->card->link_speed, 8, ipv4_receive_broadcast, priv ); | 1182 | dev->card->link_speed, 8, fwnet_receive_broadcast, dev); |
1300 | if (IS_ERR(context)) { | 1183 | if (IS_ERR(context)) { |
1301 | retval = PTR_ERR(context); | 1184 | retval = PTR_ERR(context); |
1302 | goto failed_context_create; | 1185 | goto failed_context_create; |
1303 | } | 1186 | } |
1304 | retval = fw_iso_buffer_init ( &priv->broadcast_rcv_buffer, | 1187 | |
1305 | priv->card, ipv4_iso_page_count, DMA_FROM_DEVICE ); | 1188 | retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, |
1306 | if ( retval < 0 ) | 1189 | dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); |
1190 | if (retval < 0) | ||
1307 | goto failed_buffer_init; | 1191 | goto failed_buffer_init; |
1308 | ptrptr = kmalloc ( sizeof(void*)*num_packets, GFP_KERNEL ); | 1192 | |
1309 | if ( ! ptrptr ) { | 1193 | ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); |
1194 | if (!ptrptr) { | ||
1310 | retval = -ENOMEM; | 1195 | retval = -ENOMEM; |
1311 | goto failed_ptrs_alloc; | 1196 | goto failed_ptrs_alloc; |
1312 | } | 1197 | } |
1313 | priv->broadcast_rcv_buffer_ptrs = ptrptr; | 1198 | |
1314 | for ( u = 0; u < ipv4_iso_page_count; u++ ) { | 1199 | dev->broadcast_rcv_buffer_ptrs = ptrptr; |
1200 | for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { | ||
1315 | void *ptr; | 1201 | void *ptr; |
1316 | unsigned v; | 1202 | unsigned v; |
1317 | 1203 | ||
1318 | ptr = kmap ( priv->broadcast_rcv_buffer.pages[u] ); | 1204 | ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); |
1319 | for ( v = 0; v < num_packets / ipv4_iso_page_count; v++ ) | 1205 | for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) |
1320 | *ptrptr++ = (void *)((char *)ptr + v * max_receive); | 1206 | *ptrptr++ = (void *) |
1207 | ((char *)ptr + v * max_receive); | ||
1321 | } | 1208 | } |
1322 | priv->broadcast_rcv_context = context; | 1209 | dev->broadcast_rcv_context = context; |
1323 | } else | 1210 | } else { |
1324 | context = priv->broadcast_rcv_context; | 1211 | context = dev->broadcast_rcv_context; |
1212 | } | ||
1325 | 1213 | ||
1326 | packet.payload_length = max_receive; | 1214 | packet.payload_length = max_receive; |
1327 | packet.interrupt = 1; | 1215 | packet.interrupt = 1; |
1328 | packet.skip = 0; | 1216 | packet.skip = 0; |
1329 | packet.tag = 3; | 1217 | packet.tag = 3; |
1330 | packet.sy = 0; | 1218 | packet.sy = 0; |
1331 | packet.header_length = IPV4_GASP_OVERHEAD; | 1219 | packet.header_length = IEEE1394_GASP_HDR_SIZE; |
1332 | offset = 0; | 1220 | offset = 0; |
1333 | for ( u = 0; u < num_packets; u++ ) { | 1221 | |
1334 | retval = fw_iso_context_queue ( context, &packet, | 1222 | for (u = 0; u < num_packets; u++) { |
1335 | &priv->broadcast_rcv_buffer, offset ); | 1223 | retval = fw_iso_context_queue(context, &packet, |
1336 | if ( retval < 0 ) | 1224 | &dev->broadcast_rcv_buffer, offset); |
1225 | if (retval < 0) | ||
1337 | goto failed_rcv_queue; | 1226 | goto failed_rcv_queue; |
1227 | |||
1338 | offset += max_receive; | 1228 | offset += max_receive; |
1339 | } | 1229 | } |
1340 | priv->num_broadcast_rcv_ptrs = num_packets; | 1230 | dev->num_broadcast_rcv_ptrs = num_packets; |
1341 | priv->rcv_buffer_size = max_receive; | 1231 | dev->rcv_buffer_size = max_receive; |
1342 | priv->broadcast_rcv_next_ptr = 0U; | 1232 | dev->broadcast_rcv_next_ptr = 0U; |
1343 | retval = fw_iso_context_start ( context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS ); /* ??? sync */ | 1233 | retval = fw_iso_context_start(context, -1, 0, |
1344 | if ( retval < 0 ) | 1234 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ |
1235 | if (retval < 0) | ||
1345 | goto failed_rcv_queue; | 1236 | goto failed_rcv_queue; |
1346 | /* FIXME: adjust this when we know the max receive speeds of all other IP nodes on the bus. */ | 1237 | |
1347 | /* since we only xmt at S100 ??? */ | 1238 | /* FIXME: adjust it according to the min. speed of all known peers? */ |
1348 | priv->broadcast_xmt_max_payload = S100_BUFFER_SIZE - IPV4_GASP_OVERHEAD - IPV4_UNFRAG_HDR_SIZE; | 1239 | dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 |
1349 | priv->broadcast_state = IPV4_BROADCAST_RUNNING; | 1240 | - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; |
1241 | dev->broadcast_state = FWNET_BROADCAST_RUNNING; | ||
1242 | |||
1350 | return 0; | 1243 | return 0; |
1351 | 1244 | ||
1352 | failed_rcv_queue: | 1245 | failed_rcv_queue: |
1353 | kfree ( priv->broadcast_rcv_buffer_ptrs ); | 1246 | kfree(dev->broadcast_rcv_buffer_ptrs); |
1354 | priv->broadcast_rcv_buffer_ptrs = NULL; | 1247 | dev->broadcast_rcv_buffer_ptrs = NULL; |
1355 | failed_ptrs_alloc: | 1248 | failed_ptrs_alloc: |
1356 | fw_iso_buffer_destroy ( &priv->broadcast_rcv_buffer, priv->card ); | 1249 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); |
1357 | failed_buffer_init: | 1250 | failed_buffer_init: |
1358 | fw_iso_context_destroy ( context ); | 1251 | fw_iso_context_destroy(context); |
1359 | priv->broadcast_rcv_context = NULL; | 1252 | dev->broadcast_rcv_context = NULL; |
1360 | failed_context_create: | 1253 | failed_context_create: |
1361 | fw_core_remove_address_handler ( &priv->handler ); | 1254 | fw_core_remove_address_handler(&dev->handler); |
1362 | failed_initial: | 1255 | failed_initial: |
1363 | priv->local_fifo = INVALID_FIFO_ADDR; | 1256 | dev->local_fifo = FWNET_NO_FIFO_ADDR; |
1257 | |||
1364 | return retval; | 1258 | return retval; |
1365 | } | 1259 | } |
1366 | 1260 | ||
1367 | /* This is called after an "ifup" */ | 1261 | /* ifup */ |
1368 | static int ipv4_open(struct net_device *dev) { | 1262 | static int fwnet_open(struct net_device *net) |
1369 | struct ipv4_priv *priv; | 1263 | { |
1264 | struct fwnet_device *dev = netdev_priv(net); | ||
1370 | int ret; | 1265 | int ret; |
1371 | 1266 | ||
1372 | priv = netdev_priv(dev); | 1267 | if (dev->broadcast_state == FWNET_BROADCAST_ERROR) { |
1373 | if (priv->broadcast_state == IPV4_BROADCAST_ERROR) { | 1268 | ret = fwnet_broadcast_start(dev); |
1374 | ret = ipv4_broadcast_start ( priv ); | ||
1375 | if (ret) | 1269 | if (ret) |
1376 | return ret; | 1270 | return ret; |
1377 | } | 1271 | } |
1378 | netif_start_queue(dev); | 1272 | netif_start_queue(net); |
1273 | |||
1379 | return 0; | 1274 | return 0; |
1380 | } | 1275 | } |
1381 | 1276 | ||
1382 | /* This is called after an "ifdown" */ | 1277 | /* ifdown */ |
1383 | static int ipv4_stop(struct net_device *netdev) | 1278 | static int fwnet_stop(struct net_device *net) |
1384 | { | 1279 | { |
1385 | /* flush priv->wake */ | 1280 | netif_stop_queue(net); |
1386 | /* flush_scheduled_work(); */ | 1281 | |
1282 | /* Deallocate iso context for use by other applications? */ | ||
1387 | 1283 | ||
1388 | netif_stop_queue(netdev); | ||
1389 | return 0; | 1284 | return 0; |
1390 | } | 1285 | } |
1391 | 1286 | ||
1392 | /* Transmit a packet (called by kernel) */ | 1287 | static int fwnet_tx(struct sk_buff *skb, struct net_device *net) |
1393 | static int ipv4_tx(struct sk_buff *skb, struct net_device *netdev) | ||
1394 | { | 1288 | { |
1395 | struct ipv4_ether_hdr hdr_buf; | 1289 | struct fwnet_header hdr_buf; |
1396 | struct ipv4_priv *priv = netdev_priv(netdev); | 1290 | struct fwnet_device *dev = netdev_priv(net); |
1397 | __be16 proto; | 1291 | __be16 proto; |
1398 | u16 dest_node; | 1292 | u16 dest_node; |
1399 | enum ipv4_tx_type tx_type; | ||
1400 | unsigned max_payload; | 1293 | unsigned max_payload; |
1401 | u16 dg_size; | 1294 | u16 dg_size; |
1402 | u16 *datagram_label_ptr; | 1295 | u16 *datagram_label_ptr; |
1403 | struct ipv4_packet_task *ptask; | 1296 | struct fwnet_packet_task *ptask; |
1404 | struct ipv4_node *node = NULL; | 1297 | struct fwnet_peer *peer = NULL; |
1405 | 1298 | ||
1406 | ptask = kmem_cache_alloc(ipv4_packet_task_cache, GFP_ATOMIC); | 1299 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); |
1407 | if (ptask == NULL) | 1300 | if (ptask == NULL) |
1408 | goto fail; | 1301 | goto fail; |
1409 | 1302 | ||
@@ -1412,7 +1305,7 @@ static int ipv4_tx(struct sk_buff *skb, struct net_device *netdev) | |||
1412 | goto fail; | 1305 | goto fail; |
1413 | 1306 | ||
1414 | /* | 1307 | /* |
1415 | * Get rid of the fake ipv4 header, but first make a copy. | 1308 | * Make a copy of the driver-specific header. |
1416 | * We might need to rebuild the header on tx failure. | 1309 | * We might need to rebuild the header on tx failure. |
1417 | */ | 1310 | */ |
1418 | memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); | 1311 | memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); |
@@ -1425,110 +1318,95 @@ static int ipv4_tx(struct sk_buff *skb, struct net_device *netdev) | |||
1425 | * Set the transmission type for the packet. ARP packets and IP | 1318 | * Set the transmission type for the packet. ARP packets and IP |
1426 | * broadcast packets are sent via GASP. | 1319 | * broadcast packets are sent via GASP. |
1427 | */ | 1320 | */ |
1428 | if ( memcmp(hdr_buf.h_dest, netdev->broadcast, IPV4_ALEN) == 0 | 1321 | if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0 |
1429 | || proto == htons(ETH_P_ARP) | 1322 | || proto == htons(ETH_P_ARP) |
1430 | || ( proto == htons(ETH_P_IP) | 1323 | || (proto == htons(ETH_P_IP) |
1431 | && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)) ) ) { | 1324 | && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { |
1432 | /* fw_debug ( "transmitting arp or multicast packet\n" );*/ | 1325 | max_payload = dev->broadcast_xmt_max_payload; |
1433 | tx_type = IPV4_GASP; | 1326 | datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; |
1434 | dest_node = ALL_NODES; | 1327 | |
1435 | max_payload = priv->broadcast_xmt_max_payload; | 1328 | ptask->fifo_addr = FWNET_NO_FIFO_ADDR; |
1436 | /* BUG_ON(max_payload < S100_BUFFER_SIZE - IPV4_GASP_OVERHEAD); */ | 1329 | ptask->generation = 0; |
1437 | datagram_label_ptr = &priv->broadcast_xmt_datagramlabel; | 1330 | ptask->dest_node = IEEE1394_ALL_NODES; |
1438 | ptask->fifo_addr = INVALID_FIFO_ADDR; | 1331 | ptask->speed = SCODE_100; |
1439 | ptask->generation = 0U; | ||
1440 | ptask->dest_node = 0U; | ||
1441 | ptask->speed = 0; | ||
1442 | } else { | 1332 | } else { |
1443 | __be64 guid = get_unaligned((u64 *)hdr_buf.h_dest); | 1333 | __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); |
1444 | u8 generation; | 1334 | u8 generation; |
1445 | 1335 | ||
1446 | node = ipv4_node_find_by_guid(priv, be64_to_cpu(guid)); | 1336 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); |
1447 | if (!node) { | 1337 | if (!peer) |
1448 | fw_debug ( "Normal packet but no node\n" ); | ||
1449 | goto fail; | 1338 | goto fail; |
1450 | } | ||
1451 | 1339 | ||
1452 | if (node->fifo == INVALID_FIFO_ADDR) { | 1340 | if (peer->fifo == FWNET_NO_FIFO_ADDR) |
1453 | fw_debug ( "Normal packet but no fifo addr\n" ); | ||
1454 | goto fail; | 1341 | goto fail; |
1455 | } | ||
1456 | 1342 | ||
1457 | /* fw_debug ( "Transmitting normal packet to %x at %llxx\n", node->nodeid, node->fifo ); */ | 1343 | generation = peer->generation; |
1458 | generation = node->generation; | 1344 | smp_rmb(); |
1459 | dest_node = node->nodeid; | 1345 | dest_node = peer->node_id; |
1460 | max_payload = node->max_payload; | 1346 | |
1461 | /* BUG_ON(max_payload < S100_BUFFER_SIZE - IPV4_FRAG_HDR_SIZE); */ | 1347 | max_payload = peer->max_payload; |
1348 | datagram_label_ptr = &peer->datagram_label; | ||
1462 | 1349 | ||
1463 | datagram_label_ptr = &node->datagram_label; | 1350 | ptask->fifo_addr = peer->fifo; |
1464 | tx_type = IPV4_WRREQ; | ||
1465 | ptask->fifo_addr = node->fifo; | ||
1466 | ptask->generation = generation; | 1351 | ptask->generation = generation; |
1467 | ptask->dest_node = dest_node; | 1352 | ptask->dest_node = dest_node; |
1468 | ptask->speed = node->xmt_speed; | 1353 | ptask->speed = peer->xmt_speed; |
1469 | } | 1354 | } |
1470 | 1355 | ||
1471 | /* If this is an ARP packet, convert it */ | 1356 | /* If this is an ARP packet, convert it */ |
1472 | if (proto == htons(ETH_P_ARP)) { | 1357 | if (proto == htons(ETH_P_ARP)) { |
1473 | /* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire | ||
1474 | * arphdr) is the same format as the ip1394 header, so they overlap. The rest | ||
1475 | * needs to be munged a bit. The remainder of the arphdr is formatted based | ||
1476 | * on hwaddr len and ipaddr len. We know what they'll be, so it's easy to | ||
1477 | * judge. | ||
1478 | * | ||
1479 | * Now that the EUI is used for the hardware address all we need to do to make | ||
1480 | * this work for 1394 is to insert 2 quadlets that contain max_rec size, | ||
1481 | * speed, and unicast FIFO address information between the sender_unique_id | ||
1482 | * and the IP addresses. | ||
1483 | */ | ||
1484 | struct arphdr *arp = (struct arphdr *)skb->data; | 1358 | struct arphdr *arp = (struct arphdr *)skb->data; |
1485 | unsigned char *arp_ptr = (unsigned char *)(arp + 1); | 1359 | unsigned char *arp_ptr = (unsigned char *)(arp + 1); |
1486 | struct ipv4_arp *arp1394 = (struct ipv4_arp *)skb->data; | 1360 | struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data; |
1487 | u32 ipaddr; | 1361 | __be32 ipaddr; |
1488 | 1362 | ||
1489 | ipaddr = *(u32*)(arp_ptr + IPV4_ALEN); | 1363 | ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN)); |
1490 | arp1394->hw_addr_len = 16; | 1364 | |
1491 | arp1394->max_rec = priv->card->max_receive; | 1365 | arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN; |
1492 | arp1394->sspd = priv->card->link_speed; | 1366 | arp1394->max_rec = dev->card->max_receive; |
1493 | arp1394->fifo_hi = htons(priv->local_fifo >> 32); | 1367 | arp1394->sspd = dev->card->link_speed; |
1494 | arp1394->fifo_lo = htonl(priv->local_fifo & 0xFFFFFFFF); | 1368 | |
1495 | arp1394->sip = ipaddr; | 1369 | put_unaligned_be16(dev->local_fifo >> 32, |
1370 | &arp1394->fifo_hi); | ||
1371 | put_unaligned_be32(dev->local_fifo & 0xffffffff, | ||
1372 | &arp1394->fifo_lo); | ||
1373 | put_unaligned(ipaddr, &arp1394->sip); | ||
1496 | } | 1374 | } |
1497 | if ( ipv4_max_xmt && max_payload > ipv4_max_xmt ) | ||
1498 | max_payload = ipv4_max_xmt; | ||
1499 | 1375 | ||
1500 | ptask->hdr.w0 = 0; | 1376 | ptask->hdr.w0 = 0; |
1501 | ptask->hdr.w1 = 0; | 1377 | ptask->hdr.w1 = 0; |
1502 | ptask->skb = skb; | 1378 | ptask->skb = skb; |
1503 | ptask->priv = priv; | 1379 | ptask->dev = dev; |
1504 | ptask->tx_type = tx_type; | 1380 | |
1505 | /* Does it all fit in one packet? */ | 1381 | /* Does it all fit in one packet? */ |
1506 | if ( dg_size <= max_payload ) { | 1382 | if (dg_size <= max_payload) { |
1507 | ipv4_make_uf_hdr(&ptask->hdr, be16_to_cpu(proto)); | 1383 | fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); |
1508 | ptask->outstanding_pkts = 1; | 1384 | ptask->outstanding_pkts = 1; |
1509 | max_payload = dg_size + IPV4_UNFRAG_HDR_SIZE; | 1385 | max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; |
1510 | } else { | 1386 | } else { |
1511 | u16 datagram_label; | 1387 | u16 datagram_label; |
1512 | 1388 | ||
1513 | max_payload -= IPV4_FRAG_OVERHEAD; | 1389 | max_payload -= RFC2374_FRAG_OVERHEAD; |
1514 | datagram_label = (*datagram_label_ptr)++; | 1390 | datagram_label = (*datagram_label_ptr)++; |
1515 | ipv4_make_ff_hdr(&ptask->hdr, be16_to_cpu(proto), dg_size, datagram_label ); | 1391 | fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, |
1392 | datagram_label); | ||
1516 | ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); | 1393 | ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); |
1517 | max_payload += IPV4_FRAG_HDR_SIZE; | 1394 | max_payload += RFC2374_FRAG_HDR_SIZE; |
1518 | } | 1395 | } |
1519 | ptask->max_payload = max_payload; | 1396 | ptask->max_payload = max_payload; |
1520 | ipv4_send_packet ( ptask ); | 1397 | fwnet_send_packet(ptask); |
1398 | |||
1521 | return NETDEV_TX_OK; | 1399 | return NETDEV_TX_OK; |
1522 | 1400 | ||
1523 | fail: | 1401 | fail: |
1524 | if (ptask) | 1402 | if (ptask) |
1525 | kmem_cache_free(ipv4_packet_task_cache, ptask); | 1403 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
1526 | 1404 | ||
1527 | if (skb != NULL) | 1405 | if (skb != NULL) |
1528 | dev_kfree_skb(skb); | 1406 | dev_kfree_skb(skb); |
1529 | 1407 | ||
1530 | netdev->stats.tx_dropped++; | 1408 | net->stats.tx_dropped++; |
1531 | netdev->stats.tx_errors++; | 1409 | net->stats.tx_errors++; |
1532 | 1410 | ||
1533 | /* | 1411 | /* |
1534 | * FIXME: According to a patch from 2003-02-26, "returning non-zero | 1412 | * FIXME: According to a patch from 2003-02-26, "returning non-zero |
@@ -1540,280 +1418,291 @@ static int ipv4_tx(struct sk_buff *skb, struct net_device *netdev) | |||
1540 | return NETDEV_TX_OK; | 1418 | return NETDEV_TX_OK; |
1541 | } | 1419 | } |
1542 | 1420 | ||
1543 | /* | 1421 | static void fwnet_tx_timeout(struct net_device *net) |
1544 | * FIXME: What to do if we timeout? I think a host reset is probably in order, | 1422 | { |
1545 | * so that's what we do. Should we increment the stat counters too? | 1423 | fw_error("%s: timeout\n", net->name); |
1546 | */ | ||
1547 | static void ipv4_tx_timeout(struct net_device *dev) { | ||
1548 | struct ipv4_priv *priv; | ||
1549 | 1424 | ||
1550 | priv = netdev_priv(dev); | 1425 | /* FIXME: What to do if we timeout? */ |
1551 | fw_error ( "%s: Timeout, resetting host\n", dev->name ); | ||
1552 | #if 0 /* stefanr */ | ||
1553 | fw_core_initiate_bus_reset ( priv->card, 1 ); | ||
1554 | #endif | ||
1555 | } | 1426 | } |
1556 | 1427 | ||
1557 | static int ipv4_change_mtu ( struct net_device *dev, int new_mtu ) { | 1428 | static int fwnet_change_mtu(struct net_device *net, int new_mtu) |
1558 | #if 0 | 1429 | { |
1559 | int max_mtu; | ||
1560 | struct ipv4_priv *priv; | ||
1561 | #endif | ||
1562 | |||
1563 | if (new_mtu < 68) | 1430 | if (new_mtu < 68) |
1564 | return -EINVAL; | 1431 | return -EINVAL; |
1565 | 1432 | ||
1566 | #if 0 | 1433 | net->mtu = new_mtu; |
1567 | priv = netdev_priv(dev); | ||
1568 | /* This is not actually true because we can fragment packets at the firewire layer */ | ||
1569 | max_mtu = (1 << (priv->card->max_receive + 1)) | ||
1570 | - sizeof(struct ipv4_hdr) - IPV4_GASP_OVERHEAD; | ||
1571 | if (new_mtu > max_mtu) { | ||
1572 | fw_notify ( "%s: Local node constrains MTU to %d\n", dev->name, max_mtu); | ||
1573 | return -ERANGE; | ||
1574 | } | ||
1575 | #endif | ||
1576 | dev->mtu = new_mtu; | ||
1577 | return 0; | 1434 | return 0; |
1578 | } | 1435 | } |
1579 | 1436 | ||
1580 | static void ipv4_get_drvinfo(struct net_device *dev, | 1437 | static void fwnet_get_drvinfo(struct net_device *net, |
1581 | struct ethtool_drvinfo *info) { | 1438 | struct ethtool_drvinfo *info) |
1582 | strcpy(info->driver, ipv4_driver_name); | 1439 | { |
1583 | strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */ | 1440 | strcpy(info->driver, KBUILD_MODNAME); |
1441 | strcpy(info->bus_info, "ieee1394"); | ||
1584 | } | 1442 | } |
1585 | 1443 | ||
1586 | static struct ethtool_ops ipv4_ethtool_ops = { | 1444 | static struct ethtool_ops fwnet_ethtool_ops = { |
1587 | .get_drvinfo = ipv4_get_drvinfo, | 1445 | .get_drvinfo = fwnet_get_drvinfo, |
1588 | }; | 1446 | }; |
1589 | 1447 | ||
1590 | static const struct net_device_ops ipv4_netdev_ops = { | 1448 | static const struct net_device_ops fwnet_netdev_ops = { |
1591 | .ndo_open = ipv4_open, | 1449 | .ndo_open = fwnet_open, |
1592 | .ndo_stop = ipv4_stop, | 1450 | .ndo_stop = fwnet_stop, |
1593 | .ndo_start_xmit = ipv4_tx, | 1451 | .ndo_start_xmit = fwnet_tx, |
1594 | .ndo_tx_timeout = ipv4_tx_timeout, | 1452 | .ndo_tx_timeout = fwnet_tx_timeout, |
1595 | .ndo_change_mtu = ipv4_change_mtu, | 1453 | .ndo_change_mtu = fwnet_change_mtu, |
1596 | }; | 1454 | }; |
1597 | 1455 | ||
1598 | static void ipv4_init_dev ( struct net_device *dev ) { | 1456 | static void fwnet_init_dev(struct net_device *net) |
1599 | dev->header_ops = &ipv4_header_ops; | 1457 | { |
1600 | dev->netdev_ops = &ipv4_netdev_ops; | 1458 | net->header_ops = &fwnet_header_ops; |
1601 | SET_ETHTOOL_OPS(dev, &ipv4_ethtool_ops); | 1459 | net->netdev_ops = &fwnet_netdev_ops; |
1602 | 1460 | net->watchdog_timeo = 100000; /* ? FIXME */ | |
1603 | dev->watchdog_timeo = IPV4_TIMEOUT; | 1461 | net->flags = IFF_BROADCAST | IFF_MULTICAST; |
1604 | dev->flags = IFF_BROADCAST | IFF_MULTICAST; | 1462 | net->features = NETIF_F_HIGHDMA; |
1605 | dev->features = NETIF_F_HIGHDMA; | 1463 | net->addr_len = FWNET_ALEN; |
1606 | dev->addr_len = IPV4_ALEN; | 1464 | net->hard_header_len = FWNET_HLEN; |
1607 | dev->hard_header_len = IPV4_HLEN; | 1465 | net->type = ARPHRD_IEEE1394; |
1608 | dev->type = ARPHRD_IEEE1394; | 1466 | net->tx_queue_len = 1000; /* ? FIXME */ |
1609 | 1467 | SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops); | |
1610 | /* FIXME: This value was copied from ether_setup(). Is it too much? */ | ||
1611 | dev->tx_queue_len = 1000; | ||
1612 | } | 1468 | } |
1613 | 1469 | ||
1614 | static int ipv4_probe ( struct device *dev ) { | 1470 | /* FIXME create netdev upon first fw_unit of a card, not upon local fw_unit */ |
1615 | struct fw_unit * unit; | 1471 | static int fwnet_probe(struct device *_dev) |
1616 | struct fw_device *device; | 1472 | { |
1617 | struct fw_card *card; | 1473 | struct fw_unit *unit = fw_unit(_dev); |
1618 | struct net_device *netdev; | 1474 | struct fw_device *device = fw_parent_device(unit); |
1619 | struct ipv4_priv *priv; | 1475 | struct fw_card *card = device->card; |
1476 | struct net_device *net; | ||
1477 | struct fwnet_device *dev; | ||
1620 | unsigned max_mtu; | 1478 | unsigned max_mtu; |
1621 | __be64 guid; | ||
1622 | |||
1623 | fw_debug("ipv4 Probing\n" ); | ||
1624 | unit = fw_unit ( dev ); | ||
1625 | device = fw_device ( unit->device.parent ); | ||
1626 | card = device->card; | ||
1627 | 1479 | ||
1628 | if ( ! device->is_local ) { | 1480 | if (!device->is_local) { |
1629 | int added; | 1481 | int added; |
1630 | 1482 | ||
1631 | fw_debug ( "Non-local, adding remote node entry\n" ); | 1483 | added = fwnet_peer_new(card, device); |
1632 | added = ipv4_node_new ( card, device ); | ||
1633 | return added; | 1484 | return added; |
1634 | } | 1485 | } |
1635 | fw_debug("ipv4 Local: adding netdev\n" ); | 1486 | net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev); |
1636 | netdev = alloc_netdev ( sizeof(*priv), "firewire%d", ipv4_init_dev ); | 1487 | if (net == NULL) { |
1637 | if ( netdev == NULL) { | 1488 | fw_error("out of memory\n"); |
1638 | fw_error( "Out of memory\n"); | ||
1639 | goto out; | 1489 | goto out; |
1640 | } | 1490 | } |
1641 | 1491 | ||
1642 | SET_NETDEV_DEV(netdev, card->device); | 1492 | SET_NETDEV_DEV(net, card->device); |
1643 | priv = netdev_priv(netdev); | 1493 | dev = netdev_priv(net); |
1644 | 1494 | ||
1645 | spin_lock_init(&priv->lock); | 1495 | spin_lock_init(&dev->lock); |
1646 | priv->broadcast_state = IPV4_BROADCAST_ERROR; | 1496 | dev->broadcast_state = FWNET_BROADCAST_ERROR; |
1647 | priv->broadcast_rcv_context = NULL; | 1497 | dev->broadcast_rcv_context = NULL; |
1648 | priv->broadcast_xmt_max_payload = 0; | 1498 | dev->broadcast_xmt_max_payload = 0; |
1649 | priv->broadcast_xmt_datagramlabel = 0; | 1499 | dev->broadcast_xmt_datagramlabel = 0; |
1650 | 1500 | ||
1651 | priv->local_fifo = INVALID_FIFO_ADDR; | 1501 | dev->local_fifo = FWNET_NO_FIFO_ADDR; |
1652 | 1502 | ||
1653 | /* INIT_WORK(&priv->wake, ipv4_handle_queue);*/ | 1503 | /* INIT_WORK(&dev->wake, fwnet_handle_queue);*/ |
1654 | INIT_LIST_HEAD(&priv->packet_list); | 1504 | INIT_LIST_HEAD(&dev->packet_list); |
1655 | INIT_LIST_HEAD(&priv->broadcasted_list); | 1505 | INIT_LIST_HEAD(&dev->broadcasted_list); |
1656 | INIT_LIST_HEAD(&priv->sent_list ); | 1506 | INIT_LIST_HEAD(&dev->sent_list); |
1657 | 1507 | ||
1658 | priv->card = card; | 1508 | dev->card = card; |
1659 | 1509 | ||
1660 | /* | 1510 | /* |
1661 | * Use the RFC 2734 default 1500 octets or the maximum payload | 1511 | * Use the RFC 2734 default 1500 octets or the maximum payload |
1662 | * as initial MTU | 1512 | * as initial MTU |
1663 | */ | 1513 | */ |
1664 | max_mtu = (1 << (card->max_receive + 1)) | 1514 | max_mtu = (1 << (card->max_receive + 1)) |
1665 | - sizeof(struct ipv4_hdr) - IPV4_GASP_OVERHEAD; | 1515 | - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; |
1666 | netdev->mtu = min(1500U, max_mtu); | 1516 | net->mtu = min(1500U, max_mtu); |
1667 | 1517 | ||
1668 | /* Set our hardware address while we're at it */ | 1518 | /* Set our hardware address while we're at it */ |
1669 | guid = cpu_to_be64(card->guid); | 1519 | put_unaligned_be64(card->guid, net->dev_addr); |
1670 | memcpy(netdev->dev_addr, &guid, sizeof(u64)); | 1520 | put_unaligned_be64(~0ULL, net->broadcast); |
1671 | memset(netdev->broadcast, 0xff, sizeof(u64)); | 1521 | if (register_netdev(net)) { |
1672 | if ( register_netdev ( netdev ) ) { | 1522 | fw_error("Cannot register the driver\n"); |
1673 | fw_error ( "Cannot register the driver\n"); | ||
1674 | goto out; | 1523 | goto out; |
1675 | } | 1524 | } |
1676 | 1525 | ||
1677 | fw_notify ( "%s: IPv4 over Firewire on device %016llx\n", | 1526 | fw_notify("%s: IPv4 over FireWire on device %016llx\n", |
1678 | netdev->name, card->guid ); | 1527 | net->name, (unsigned long long)card->guid); |
1679 | card->netdev = netdev; | 1528 | card->netdev = net; |
1680 | 1529 | ||
1681 | return 0 /* ipv4_new_node ( ud ) */; | 1530 | return 0; |
1682 | out: | 1531 | out: |
1683 | if ( netdev ) | 1532 | if (net) |
1684 | free_netdev ( netdev ); | 1533 | free_netdev(net); |
1534 | |||
1685 | return -ENOENT; | 1535 | return -ENOENT; |
1686 | } | 1536 | } |
1687 | 1537 | ||
1538 | static int fwnet_remove(struct device *_dev) | ||
1539 | { | ||
1540 | struct fw_unit *unit = fw_unit(_dev); | ||
1541 | struct fw_device *device = fw_parent_device(unit); | ||
1542 | struct fw_card *card = device->card; | ||
1543 | struct net_device *net; | ||
1544 | struct fwnet_device *dev; | ||
1545 | struct fwnet_peer *peer; | ||
1546 | struct fwnet_partial_datagram *pd, *pd_next; | ||
1547 | struct fwnet_packet_task *ptask, *pt_next; | ||
1548 | |||
1549 | if (!device->is_local) { | ||
1550 | fwnet_peer_delete(card, device); | ||
1688 | 1551 | ||
1689 | static int ipv4_remove ( struct device *dev ) { | ||
1690 | struct fw_unit * unit; | ||
1691 | struct fw_device *device; | ||
1692 | struct fw_card *card; | ||
1693 | struct net_device *netdev; | ||
1694 | struct ipv4_priv *priv; | ||
1695 | struct ipv4_node *node; | ||
1696 | struct ipv4_partial_datagram *pd, *pd_next; | ||
1697 | struct ipv4_packet_task *ptask, *pt_next; | ||
1698 | |||
1699 | fw_debug("ipv4 Removing\n" ); | ||
1700 | unit = fw_unit ( dev ); | ||
1701 | device = fw_device ( unit->device.parent ); | ||
1702 | card = device->card; | ||
1703 | |||
1704 | if ( ! device->is_local ) { | ||
1705 | fw_debug ( "Node %x is non-local, removing remote node entry\n", device->node_id ); | ||
1706 | ipv4_node_delete ( card, device ); | ||
1707 | return 0; | 1552 | return 0; |
1708 | } | 1553 | } |
1709 | netdev = card->netdev; | 1554 | |
1710 | if ( netdev ) { | 1555 | net = card->netdev; |
1711 | fw_debug ( "Node %x is local: deleting netdev\n", device->node_id ); | 1556 | if (net) { |
1712 | priv = netdev_priv ( netdev ); | 1557 | dev = netdev_priv(net); |
1713 | unregister_netdev ( netdev ); | 1558 | unregister_netdev(net); |
1714 | fw_debug ( "unregistered\n" ); | 1559 | |
1715 | if ( priv->local_fifo != INVALID_FIFO_ADDR ) | 1560 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) |
1716 | fw_core_remove_address_handler ( &priv->handler ); | 1561 | fw_core_remove_address_handler(&dev->handler); |
1717 | fw_debug ( "address handler gone\n" ); | 1562 | if (dev->broadcast_rcv_context) { |
1718 | if ( priv->broadcast_rcv_context ) { | 1563 | fw_iso_context_stop(dev->broadcast_rcv_context); |
1719 | fw_iso_context_stop ( priv->broadcast_rcv_context ); | 1564 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, |
1720 | fw_iso_buffer_destroy ( &priv->broadcast_rcv_buffer, priv->card ); | 1565 | dev->card); |
1721 | fw_iso_context_destroy ( priv->broadcast_rcv_context ); | 1566 | fw_iso_context_destroy(dev->broadcast_rcv_context); |
1722 | fw_debug ( "rcv stopped\n" ); | ||
1723 | } | 1567 | } |
1724 | list_for_each_entry_safe( ptask, pt_next, &priv->packet_list, packet_list ) { | 1568 | list_for_each_entry_safe(ptask, pt_next, |
1725 | dev_kfree_skb_any ( ptask->skb ); | 1569 | &dev->packet_list, pt_link) { |
1726 | kmem_cache_free( ipv4_packet_task_cache, ptask ); | 1570 | dev_kfree_skb_any(ptask->skb); |
1571 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1727 | } | 1572 | } |
1728 | list_for_each_entry_safe( ptask, pt_next, &priv->broadcasted_list, packet_list ) { | 1573 | list_for_each_entry_safe(ptask, pt_next, |
1729 | dev_kfree_skb_any ( ptask->skb ); | 1574 | &dev->broadcasted_list, pt_link) { |
1730 | kmem_cache_free( ipv4_packet_task_cache, ptask ); | 1575 | dev_kfree_skb_any(ptask->skb); |
1576 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1731 | } | 1577 | } |
1732 | list_for_each_entry_safe( ptask, pt_next, &priv->sent_list, packet_list ) { | 1578 | list_for_each_entry_safe(ptask, pt_next, |
1733 | dev_kfree_skb_any ( ptask->skb ); | 1579 | &dev->sent_list, pt_link) { |
1734 | kmem_cache_free( ipv4_packet_task_cache, ptask ); | 1580 | dev_kfree_skb_any(ptask->skb); |
1581 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1735 | } | 1582 | } |
1736 | fw_debug ( "lists emptied\n" ); | 1583 | list_for_each_entry(peer, &card->peer_list, peer_link) { |
1737 | list_for_each_entry( node, &card->ipv4_nodes, ipv4_nodes ) { | 1584 | if (peer->pdg_size) { |
1738 | if ( node->pdg_size ) { | 1585 | list_for_each_entry_safe(pd, pd_next, |
1739 | list_for_each_entry_safe( pd, pd_next, &node->pdg_list, pdg_list ) | 1586 | &peer->pd_list, pd_link) |
1740 | ipv4_pd_delete ( pd ); | 1587 | fwnet_pd_delete(pd); |
1741 | node->pdg_size = 0; | 1588 | peer->pdg_size = 0; |
1742 | } | 1589 | } |
1743 | node->fifo = INVALID_FIFO_ADDR; | 1590 | peer->fifo = FWNET_NO_FIFO_ADDR; |
1744 | } | 1591 | } |
1745 | fw_debug ( "nodes cleaned up\n" ); | 1592 | free_netdev(net); |
1746 | free_netdev ( netdev ); | ||
1747 | card->netdev = NULL; | 1593 | card->netdev = NULL; |
1748 | fw_debug ( "done\n" ); | ||
1749 | } | 1594 | } |
1595 | |||
1750 | return 0; | 1596 | return 0; |
1751 | } | 1597 | } |
1752 | 1598 | ||
1753 | static void ipv4_update ( struct fw_unit *unit ) { | 1599 | /* |
1754 | struct fw_device *device; | 1600 | * FIXME abort partially sent fragmented datagrams, |
1755 | struct fw_card *card; | 1601 | * discard partially received fragmented datagrams |
1602 | */ | ||
1603 | static void fwnet_update(struct fw_unit *unit) | ||
1604 | { | ||
1605 | struct fw_device *device = fw_parent_device(unit); | ||
1606 | struct net_device *net = device->card->netdev; | ||
1607 | struct fwnet_device *dev; | ||
1608 | struct fwnet_peer *peer; | ||
1609 | u64 guid; | ||
1756 | 1610 | ||
1757 | fw_debug ( "ipv4_update unit %p\n", unit ); | 1611 | if (net && !device->is_local) { |
1758 | device = fw_device ( unit->device.parent ); | 1612 | dev = netdev_priv(net); |
1759 | card = device->card; | 1613 | guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; |
1760 | if ( ! device->is_local ) { | 1614 | peer = fwnet_peer_find_by_guid(dev, guid); |
1761 | struct ipv4_node *node; | 1615 | if (!peer) { |
1762 | u64 guid; | 1616 | fw_error("fwnet_update: no peer for device %016llx\n", |
1763 | struct net_device *netdev; | 1617 | (unsigned long long)guid); |
1764 | struct ipv4_priv *priv; | 1618 | return; |
1765 | 1619 | } | |
1766 | netdev = card->netdev; | 1620 | peer->generation = device->generation; |
1767 | if ( netdev ) { | 1621 | rmb(); |
1768 | priv = netdev_priv ( netdev ); | 1622 | peer->node_id = device->node_id; |
1769 | guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | ||
1770 | node = ipv4_node_find_by_guid ( priv, guid ); | ||
1771 | if ( ! node ) { | ||
1772 | fw_error ( "ipv4_update: no node for device %llx\n", guid ); | ||
1773 | return; | ||
1774 | } | ||
1775 | fw_debug ( "Non-local, updating remote node entry for guid %llx old generation %x, old nodeid %x\n", guid, node->generation, node->nodeid ); | ||
1776 | node->generation = device->generation; | ||
1777 | rmb(); | ||
1778 | node->nodeid = device->node_id; | ||
1779 | fw_debug ( "New generation %x, new nodeid %x\n", node->generation, node->nodeid ); | ||
1780 | } else | ||
1781 | fw_error ( "nonlocal, but no netdev? How can that be?\n" ); | ||
1782 | } else { | ||
1783 | /* FIXME: What do we need to do on bus reset? */ | ||
1784 | fw_debug ( "Local, doing nothing\n" ); | ||
1785 | } | 1623 | } |
1786 | } | 1624 | } |
1787 | 1625 | ||
1788 | static struct fw_driver ipv4_driver = { | 1626 | static const struct ieee1394_device_id fwnet_id_table[] = { |
1627 | { | ||
1628 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | ||
1629 | IEEE1394_MATCH_VERSION, | ||
1630 | .specifier_id = IANA_SPECIFIER_ID, | ||
1631 | .version = RFC2734_SW_VERSION, | ||
1632 | }, | ||
1633 | { } | ||
1634 | }; | ||
1635 | |||
1636 | static struct fw_driver fwnet_driver = { | ||
1789 | .driver = { | 1637 | .driver = { |
1790 | .owner = THIS_MODULE, | 1638 | .owner = THIS_MODULE, |
1791 | .name = ipv4_driver_name, | 1639 | .name = "net", |
1792 | .bus = &fw_bus_type, | 1640 | .bus = &fw_bus_type, |
1793 | .probe = ipv4_probe, | 1641 | .probe = fwnet_probe, |
1794 | .remove = ipv4_remove, | 1642 | .remove = fwnet_remove, |
1795 | }, | 1643 | }, |
1796 | .update = ipv4_update, | 1644 | .update = fwnet_update, |
1797 | .id_table = ipv4_id_table, | 1645 | .id_table = fwnet_id_table, |
1646 | }; | ||
1647 | |||
1648 | static const u32 rfc2374_unit_directory_data[] = { | ||
1649 | 0x00040000, /* directory_length */ | ||
1650 | 0x1200005e, /* unit_specifier_id: IANA */ | ||
1651 | 0x81000003, /* textual descriptor offset */ | ||
1652 | 0x13000001, /* unit_sw_version: RFC 2734 */ | ||
1653 | 0x81000005, /* textual descriptor offset */ | ||
1654 | 0x00030000, /* descriptor_length */ | ||
1655 | 0x00000000, /* text */ | ||
1656 | 0x00000000, /* minimal ASCII, en */ | ||
1657 | 0x49414e41, /* I A N A */ | ||
1658 | 0x00030000, /* descriptor_length */ | ||
1659 | 0x00000000, /* text */ | ||
1660 | 0x00000000, /* minimal ASCII, en */ | ||
1661 | 0x49507634, /* I P v 4 */ | ||
1662 | }; | ||
1663 | |||
1664 | static struct fw_descriptor rfc2374_unit_directory = { | ||
1665 | .length = ARRAY_SIZE(rfc2374_unit_directory_data), | ||
1666 | .key = (CSR_DIRECTORY | CSR_UNIT) << 24, | ||
1667 | .data = rfc2374_unit_directory_data | ||
1798 | }; | 1668 | }; |
1799 | 1669 | ||
1800 | static int __init ipv4_init ( void ) { | 1670 | static int __init fwnet_init(void) |
1801 | int added; | 1671 | { |
1672 | int err; | ||
1673 | |||
1674 | err = fw_core_add_descriptor(&rfc2374_unit_directory); | ||
1675 | if (err) | ||
1676 | return err; | ||
1802 | 1677 | ||
1803 | added = fw_core_add_descriptor ( &ipv4_unit_directory ); | 1678 | fwnet_packet_task_cache = kmem_cache_create("packet_task", |
1804 | if ( added < 0 ) | 1679 | sizeof(struct fwnet_packet_task), 0, 0, NULL); |
1805 | fw_error ( "Failed to add descriptor" ); | 1680 | if (!fwnet_packet_task_cache) { |
1806 | ipv4_packet_task_cache = kmem_cache_create("packet_task", | 1681 | err = -ENOMEM; |
1807 | sizeof(struct ipv4_packet_task), 0, 0, NULL); | 1682 | goto out; |
1808 | fw_debug("Adding ipv4 module\n" ); | 1683 | } |
1809 | return driver_register ( &ipv4_driver.driver ); | 1684 | |
1685 | err = driver_register(&fwnet_driver.driver); | ||
1686 | if (!err) | ||
1687 | return 0; | ||
1688 | |||
1689 | kmem_cache_destroy(fwnet_packet_task_cache); | ||
1690 | out: | ||
1691 | fw_core_remove_descriptor(&rfc2374_unit_directory); | ||
1692 | |||
1693 | return err; | ||
1810 | } | 1694 | } |
1695 | module_init(fwnet_init); | ||
1811 | 1696 | ||
1812 | static void __exit ipv4_cleanup ( void ) { | 1697 | static void __exit fwnet_cleanup(void) |
1813 | fw_core_remove_descriptor ( &ipv4_unit_directory ); | 1698 | { |
1814 | fw_debug("Removing ipv4 module\n" ); | 1699 | driver_unregister(&fwnet_driver.driver); |
1815 | driver_unregister ( &ipv4_driver.driver ); | 1700 | kmem_cache_destroy(fwnet_packet_task_cache); |
1701 | fw_core_remove_descriptor(&rfc2374_unit_directory); | ||
1816 | } | 1702 | } |
1703 | module_exit(fwnet_cleanup); | ||
1817 | 1704 | ||
1818 | module_init(ipv4_init); | 1705 | MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); |
1819 | module_exit(ipv4_cleanup); | 1706 | MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734"); |
1707 | MODULE_LICENSE("GPL"); | ||
1708 | MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); | ||
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index d44f47d3b2d9..5cb0c1549ff1 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
@@ -131,13 +131,10 @@ struct fw_card { | |||
131 | bool broadcast_channel_allocated; | 131 | bool broadcast_channel_allocated; |
132 | u32 broadcast_channel; | 132 | u32 broadcast_channel; |
133 | u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; | 133 | u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
134 | /* Only non-NULL if firewire-ipv4 is active on this card. */ | 134 | |
135 | /* firewire-net driver data */ | ||
135 | void *netdev; | 136 | void *netdev; |
136 | /* | 137 | struct list_head peer_list; |
137 | * The nodes get probed before the card, so we need a place to store | ||
138 | * them independent of card->netdev | ||
139 | */ | ||
140 | struct list_head ipv4_nodes; | ||
141 | }; | 138 | }; |
142 | 139 | ||
143 | static inline struct fw_card *fw_card_get(struct fw_card *card) | 140 | static inline struct fw_card *fw_card_get(struct fw_card *card) |