diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-23 00:29:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-23 00:29:52 -0400 |
commit | 1053414068bad659479e6efa62a67403b8b1ec0a (patch) | |
tree | d4096db0f3aa9ca5e6b44c85ab848b7bedbfc37a | |
parent | b88f8a546f5dba213938fdfc11e66bc5c2421623 (diff) | |
parent | 0c53decdd0a9f9c459ccabe0b5f79660bde5375b (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
firewire: new stack is no longer experimental
firewire: net: better FIFO address range check and rcodes
firewire: net: fix card driver reloading
firewire: core: fix iso context shutdown on card removal
firewire: core: fix DMA unmapping in iso buffer removal
firewire: net: adjust net_device ops
firewire: net: remove unused code
firewire: net: allow for unordered unit discovery
firewire: net: style changes
firewire: net: add Kconfig item, rename driver
firewire: add IPv4 support
-rw-r--r-- | drivers/firewire/Kconfig | 60 | ||||
-rw-r--r-- | drivers/firewire/Makefile | 4 | ||||
-rw-r--r-- | drivers/firewire/core-card.c | 20 | ||||
-rw-r--r-- | drivers/firewire/core-iso.c | 11 | ||||
-rw-r--r-- | drivers/firewire/core.h | 87 | ||||
-rw-r--r-- | drivers/firewire/net.c | 1655 | ||||
-rw-r--r-- | drivers/ieee1394/Kconfig | 19 | ||||
-rw-r--r-- | include/linux/firewire.h | 87 |
8 files changed, 1807 insertions, 136 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 450902438208..13efcd362072 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -1,28 +1,29 @@ | |||
1 | comment "A new alternative FireWire stack is available with EXPERIMENTAL=y" | 1 | comment "You can enable one or both FireWire driver stacks." |
2 | depends on EXPERIMENTAL=n | 2 | comment "See the help texts for more information." |
3 | |||
4 | comment "Enable only one of the two stacks, unless you know what you are doing" | ||
5 | depends on EXPERIMENTAL | ||
6 | 3 | ||
7 | config FIREWIRE | 4 | config FIREWIRE |
8 | tristate "New FireWire stack, EXPERIMENTAL" | 5 | tristate "FireWire driver stack" |
9 | depends on EXPERIMENTAL | ||
10 | select CRC_ITU_T | 6 | select CRC_ITU_T |
11 | help | 7 | help |
12 | This is the "Juju" FireWire stack, a new alternative implementation | 8 | This is the new-generation IEEE 1394 (FireWire) driver stack |
13 | designed for robustness and simplicity. You can build either this | 9 | a.k.a. Juju, a new implementation designed for robustness and |
14 | stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both. | 10 | simplicity. |
15 | Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration | 11 | See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration |
16 | before you enable the new stack. | 12 | for information about migration from the older Linux 1394 stack |
13 | to the new driver stack. | ||
17 | 14 | ||
18 | To compile this driver as a module, say M here: the module will be | 15 | To compile this driver as a module, say M here: the module will be |
19 | called firewire-core. | 16 | called firewire-core. |
20 | 17 | ||
21 | This module functionally replaces ieee1394, raw1394, and video1394. | 18 | This module functionally replaces ieee1394, raw1394, and video1394. |
22 | To access it from application programs, you generally need at least | 19 | To access it from application programs, you generally need at least |
23 | libraw1394 version 2. IIDC/DCAM applications also need libdc1394 | 20 | libraw1394 v2. IIDC/DCAM applications need libdc1394 v2. |
24 | version 2. No libraries are required to access storage devices | 21 | No libraries are required to access storage devices through the |
25 | through the firewire-sbp2 driver. | 22 | firewire-sbp2 driver. |
23 | |||
24 | NOTE: | ||
25 | FireWire audio devices currently require the old drivers (ieee1394, | ||
26 | ohci1394, raw1394). | ||
26 | 27 | ||
27 | config FIREWIRE_OHCI | 28 | config FIREWIRE_OHCI |
28 | tristate "OHCI-1394 controllers" | 29 | tristate "OHCI-1394 controllers" |
@@ -37,11 +38,9 @@ config FIREWIRE_OHCI | |||
37 | stack. | 38 | stack. |
38 | 39 | ||
39 | NOTE: | 40 | NOTE: |
40 | 41 | If you want to install firewire-ohci and ohci1394 together, you | |
41 | You should only build either firewire-ohci or the old ohci1394 driver, | 42 | should configure them only as modules and blacklist the driver(s) |
42 | but not both. If you nevertheless want to install both, you should | 43 | which you don't want to have auto-loaded. Add either |
43 | configure them only as modules and blacklist the driver(s) which you | ||
44 | don't want to have auto-loaded. Add either | ||
45 | 44 | ||
46 | blacklist firewire-ohci | 45 | blacklist firewire-ohci |
47 | or | 46 | or |
@@ -50,12 +49,7 @@ config FIREWIRE_OHCI | |||
50 | blacklist dv1394 | 49 | blacklist dv1394 |
51 | 50 | ||
52 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf | 51 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf |
53 | depending on your distribution. The latter two modules should be | 52 | depending on your distribution. |
54 | blacklisted together with ohci1394 because they depend on ohci1394. | ||
55 | |||
56 | If you have an old modprobe which doesn't implement the blacklist | ||
57 | directive, use "install modulename /bin/true" for the modules to be | ||
58 | blacklisted. | ||
59 | 53 | ||
60 | config FIREWIRE_OHCI_DEBUG | 54 | config FIREWIRE_OHCI_DEBUG |
61 | bool | 55 | bool |
@@ -77,3 +71,17 @@ config FIREWIRE_SBP2 | |||
77 | 71 | ||
78 | You should also enable support for disks, CD-ROMs, etc. in the SCSI | 72 | You should also enable support for disks, CD-ROMs, etc. in the SCSI |
79 | configuration section. | 73 | configuration section. |
74 | |||
75 | config FIREWIRE_NET | ||
76 | tristate "IP networking over 1394 (EXPERIMENTAL)" | ||
77 | depends on FIREWIRE && INET && EXPERIMENTAL | ||
78 | help | ||
79 | This enables IPv4 over IEEE 1394, providing IP connectivity with | ||
80 | other implementations of RFC 2734 as found on several operating | ||
81 | systems. Multicast support is currently limited. | ||
82 | |||
83 | NOTE, this driver is not stable yet! | ||
84 | |||
85 | To compile this driver as a module, say M here: The module will be | ||
86 | called firewire-net. It replaces eth1394 of the classic IEEE 1394 | ||
87 | stack. | ||
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile index bc3b9bf822bf..a8f9bb6d9fdf 100644 --- a/drivers/firewire/Makefile +++ b/drivers/firewire/Makefile | |||
@@ -6,7 +6,9 @@ firewire-core-y += core-card.o core-cdev.o core-device.o \ | |||
6 | core-iso.o core-topology.o core-transaction.o | 6 | core-iso.o core-topology.o core-transaction.o |
7 | firewire-ohci-y += ohci.o | 7 | firewire-ohci-y += ohci.o |
8 | firewire-sbp2-y += sbp2.o | 8 | firewire-sbp2-y += sbp2.o |
9 | firewire-net-y += net.o | ||
9 | 10 | ||
10 | obj-$(CONFIG_FIREWIRE) += firewire-core.o | 11 | obj-$(CONFIG_FIREWIRE) += firewire-core.o |
11 | obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o | 12 | obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o |
12 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o | 13 | obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o |
14 | obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o | ||
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 4c1be64fdddd..543fccac81bb 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -176,6 +176,7 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) | |||
176 | 176 | ||
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
179 | EXPORT_SYMBOL(fw_core_add_descriptor); | ||
179 | 180 | ||
180 | void fw_core_remove_descriptor(struct fw_descriptor *desc) | 181 | void fw_core_remove_descriptor(struct fw_descriptor *desc) |
181 | { | 182 | { |
@@ -189,6 +190,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) | |||
189 | 190 | ||
190 | mutex_unlock(&card_mutex); | 191 | mutex_unlock(&card_mutex); |
191 | } | 192 | } |
193 | EXPORT_SYMBOL(fw_core_remove_descriptor); | ||
192 | 194 | ||
193 | static void allocate_broadcast_channel(struct fw_card *card, int generation) | 195 | static void allocate_broadcast_channel(struct fw_card *card, int generation) |
194 | { | 196 | { |
@@ -459,11 +461,11 @@ EXPORT_SYMBOL(fw_card_add); | |||
459 | 461 | ||
460 | 462 | ||
461 | /* | 463 | /* |
462 | * The next few functions implements a dummy driver that use once a | 464 | * The next few functions implement a dummy driver that is used once a card |
463 | * card driver shuts down an fw_card. This allows the driver to | 465 | * driver shuts down an fw_card. This allows the driver to cleanly unload, |
464 | * cleanly unload, as all IO to the card will be handled by the dummy | 466 | * as all IO to the card will be handled (and failed) by the dummy driver |
465 | * driver instead of calling into the (possibly) unloaded module. The | 467 | * instead of calling into the module. Only functions for iso context |
466 | * dummy driver just fails all IO. | 468 | * shutdown still need to be provided by the card driver. |
467 | */ | 469 | */ |
468 | 470 | ||
469 | static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) | 471 | static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) |
@@ -510,7 +512,7 @@ static int dummy_enable_phys_dma(struct fw_card *card, | |||
510 | return -ENODEV; | 512 | return -ENODEV; |
511 | } | 513 | } |
512 | 514 | ||
513 | static struct fw_card_driver dummy_driver = { | 515 | static const struct fw_card_driver dummy_driver_template = { |
514 | .enable = dummy_enable, | 516 | .enable = dummy_enable, |
515 | .update_phy_reg = dummy_update_phy_reg, | 517 | .update_phy_reg = dummy_update_phy_reg, |
516 | .set_config_rom = dummy_set_config_rom, | 518 | .set_config_rom = dummy_set_config_rom, |
@@ -529,6 +531,8 @@ void fw_card_release(struct kref *kref) | |||
529 | 531 | ||
530 | void fw_core_remove_card(struct fw_card *card) | 532 | void fw_core_remove_card(struct fw_card *card) |
531 | { | 533 | { |
534 | struct fw_card_driver dummy_driver = dummy_driver_template; | ||
535 | |||
532 | card->driver->update_phy_reg(card, 4, | 536 | card->driver->update_phy_reg(card, 4, |
533 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); | 537 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); |
534 | fw_core_initiate_bus_reset(card, 1); | 538 | fw_core_initiate_bus_reset(card, 1); |
@@ -537,7 +541,9 @@ void fw_core_remove_card(struct fw_card *card) | |||
537 | list_del_init(&card->link); | 541 | list_del_init(&card->link); |
538 | mutex_unlock(&card_mutex); | 542 | mutex_unlock(&card_mutex); |
539 | 543 | ||
540 | /* Set up the dummy driver. */ | 544 | /* Switch off most of the card driver interface. */ |
545 | dummy_driver.free_iso_context = card->driver->free_iso_context; | ||
546 | dummy_driver.stop_iso = card->driver->stop_iso; | ||
541 | card->driver = &dummy_driver; | 547 | card->driver = &dummy_driver; |
542 | 548 | ||
543 | fw_destroy_nodes(card); | 549 | fw_destroy_nodes(card); |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 28076c892d7e..166f19c6d38d 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -71,7 +71,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |||
71 | for (j = 0; j < i; j++) { | 71 | for (j = 0; j < i; j++) { |
72 | address = page_private(buffer->pages[j]); | 72 | address = page_private(buffer->pages[j]); |
73 | dma_unmap_page(card->device, address, | 73 | dma_unmap_page(card->device, address, |
74 | PAGE_SIZE, DMA_TO_DEVICE); | 74 | PAGE_SIZE, direction); |
75 | __free_page(buffer->pages[j]); | 75 | __free_page(buffer->pages[j]); |
76 | } | 76 | } |
77 | kfree(buffer->pages); | 77 | kfree(buffer->pages); |
@@ -80,6 +80,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |||
80 | 80 | ||
81 | return -ENOMEM; | 81 | return -ENOMEM; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(fw_iso_buffer_init); | ||
83 | 84 | ||
84 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) | 85 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) |
85 | { | 86 | { |
@@ -107,13 +108,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | |||
107 | for (i = 0; i < buffer->page_count; i++) { | 108 | for (i = 0; i < buffer->page_count; i++) { |
108 | address = page_private(buffer->pages[i]); | 109 | address = page_private(buffer->pages[i]); |
109 | dma_unmap_page(card->device, address, | 110 | dma_unmap_page(card->device, address, |
110 | PAGE_SIZE, DMA_TO_DEVICE); | 111 | PAGE_SIZE, buffer->direction); |
111 | __free_page(buffer->pages[i]); | 112 | __free_page(buffer->pages[i]); |
112 | } | 113 | } |
113 | 114 | ||
114 | kfree(buffer->pages); | 115 | kfree(buffer->pages); |
115 | buffer->pages = NULL; | 116 | buffer->pages = NULL; |
116 | } | 117 | } |
118 | EXPORT_SYMBOL(fw_iso_buffer_destroy); | ||
117 | 119 | ||
118 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 120 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
119 | int type, int channel, int speed, size_t header_size, | 121 | int type, int channel, int speed, size_t header_size, |
@@ -136,6 +138,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | |||
136 | 138 | ||
137 | return ctx; | 139 | return ctx; |
138 | } | 140 | } |
141 | EXPORT_SYMBOL(fw_iso_context_create); | ||
139 | 142 | ||
140 | void fw_iso_context_destroy(struct fw_iso_context *ctx) | 143 | void fw_iso_context_destroy(struct fw_iso_context *ctx) |
141 | { | 144 | { |
@@ -143,12 +146,14 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx) | |||
143 | 146 | ||
144 | card->driver->free_iso_context(ctx); | 147 | card->driver->free_iso_context(ctx); |
145 | } | 148 | } |
149 | EXPORT_SYMBOL(fw_iso_context_destroy); | ||
146 | 150 | ||
147 | int fw_iso_context_start(struct fw_iso_context *ctx, | 151 | int fw_iso_context_start(struct fw_iso_context *ctx, |
148 | int cycle, int sync, int tags) | 152 | int cycle, int sync, int tags) |
149 | { | 153 | { |
150 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); | 154 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); |
151 | } | 155 | } |
156 | EXPORT_SYMBOL(fw_iso_context_start); | ||
152 | 157 | ||
153 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 158 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
154 | struct fw_iso_packet *packet, | 159 | struct fw_iso_packet *packet, |
@@ -159,11 +164,13 @@ int fw_iso_context_queue(struct fw_iso_context *ctx, | |||
159 | 164 | ||
160 | return card->driver->queue_iso(ctx, packet, buffer, payload); | 165 | return card->driver->queue_iso(ctx, packet, buffer, payload); |
161 | } | 166 | } |
167 | EXPORT_SYMBOL(fw_iso_context_queue); | ||
162 | 168 | ||
163 | int fw_iso_context_stop(struct fw_iso_context *ctx) | 169 | int fw_iso_context_stop(struct fw_iso_context *ctx) |
164 | { | 170 | { |
165 | return ctx->card->driver->stop_iso(ctx); | 171 | return ctx->card->driver->stop_iso(ctx); |
166 | } | 172 | } |
173 | EXPORT_SYMBOL(fw_iso_context_stop); | ||
167 | 174 | ||
168 | /* | 175 | /* |
169 | * Isochronous bus resource management (channels, bandwidth), client side | 176 | * Isochronous bus resource management (channels, bandwidth), client side |
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 0a25a7b38a80..c3cfc647e5e3 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _FIREWIRE_CORE_H | 1 | #ifndef _FIREWIRE_CORE_H |
2 | #define _FIREWIRE_CORE_H | 2 | #define _FIREWIRE_CORE_H |
3 | 3 | ||
4 | #include <linux/dma-mapping.h> | ||
5 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
6 | #include <linux/list.h> | 5 | #include <linux/list.h> |
7 | #include <linux/idr.h> | 6 | #include <linux/idr.h> |
@@ -97,17 +96,6 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); | |||
97 | int fw_compute_block_crc(u32 *block); | 96 | int fw_compute_block_crc(u32 *block); |
98 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); | 97 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay); |
99 | 98 | ||
100 | struct fw_descriptor { | ||
101 | struct list_head link; | ||
102 | size_t length; | ||
103 | u32 immediate; | ||
104 | u32 key; | ||
105 | const u32 *data; | ||
106 | }; | ||
107 | |||
108 | int fw_core_add_descriptor(struct fw_descriptor *desc); | ||
109 | void fw_core_remove_descriptor(struct fw_descriptor *desc); | ||
110 | |||
111 | 99 | ||
112 | /* -cdev */ | 100 | /* -cdev */ |
113 | 101 | ||
@@ -130,77 +118,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); | |||
130 | 118 | ||
131 | /* -iso */ | 119 | /* -iso */ |
132 | 120 | ||
133 | /* | ||
134 | * The iso packet format allows for an immediate header/payload part | ||
135 | * stored in 'header' immediately after the packet info plus an | ||
136 | * indirect payload part that is pointer to by the 'payload' field. | ||
137 | * Applications can use one or the other or both to implement simple | ||
138 | * low-bandwidth streaming (e.g. audio) or more advanced | ||
139 | * scatter-gather streaming (e.g. assembling video frame automatically). | ||
140 | */ | ||
141 | struct fw_iso_packet { | ||
142 | u16 payload_length; /* Length of indirect payload. */ | ||
143 | u32 interrupt:1; /* Generate interrupt on this packet */ | ||
144 | u32 skip:1; /* Set to not send packet at all. */ | ||
145 | u32 tag:2; | ||
146 | u32 sy:4; | ||
147 | u32 header_length:8; /* Length of immediate header. */ | ||
148 | u32 header[0]; | ||
149 | }; | ||
150 | |||
151 | #define FW_ISO_CONTEXT_TRANSMIT 0 | ||
152 | #define FW_ISO_CONTEXT_RECEIVE 1 | ||
153 | |||
154 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | ||
155 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | ||
156 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 | ||
157 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 | ||
158 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 | ||
159 | |||
160 | /* | ||
161 | * An iso buffer is just a set of pages mapped for DMA in the | ||
162 | * specified direction. Since the pages are to be used for DMA, they | ||
163 | * are not mapped into the kernel virtual address space. We store the | ||
164 | * DMA address in the page private. The helper function | ||
165 | * fw_iso_buffer_map() will map the pages into a given vma. | ||
166 | */ | ||
167 | struct fw_iso_buffer { | ||
168 | enum dma_data_direction direction; | ||
169 | struct page **pages; | ||
170 | int page_count; | ||
171 | }; | ||
172 | |||
173 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | ||
174 | u32 cycle, size_t header_length, | ||
175 | void *header, void *data); | ||
176 | |||
177 | struct fw_iso_context { | ||
178 | struct fw_card *card; | ||
179 | int type; | ||
180 | int channel; | ||
181 | int speed; | ||
182 | size_t header_size; | ||
183 | fw_iso_callback_t callback; | ||
184 | void *callback_data; | ||
185 | }; | ||
186 | |||
187 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | ||
188 | int page_count, enum dma_data_direction direction); | ||
189 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); | 121 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); |
190 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | ||
191 | |||
192 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | ||
193 | int type, int channel, int speed, size_t header_size, | ||
194 | fw_iso_callback_t callback, void *callback_data); | ||
195 | int fw_iso_context_queue(struct fw_iso_context *ctx, | ||
196 | struct fw_iso_packet *packet, | ||
197 | struct fw_iso_buffer *buffer, | ||
198 | unsigned long payload); | ||
199 | int fw_iso_context_start(struct fw_iso_context *ctx, | ||
200 | int cycle, int sync, int tags); | ||
201 | int fw_iso_context_stop(struct fw_iso_context *ctx); | ||
202 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | ||
203 | |||
204 | void fw_iso_resource_manage(struct fw_card *card, int generation, | 122 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
205 | u64 channels_mask, int *channel, int *bandwidth, bool allocate); | 123 | u64 channels_mask, int *channel, int *bandwidth, bool allocate); |
206 | 124 | ||
@@ -285,9 +203,4 @@ void fw_flush_transactions(struct fw_card *card); | |||
285 | void fw_send_phy_config(struct fw_card *card, | 203 | void fw_send_phy_config(struct fw_card *card, |
286 | int node_id, int generation, int gap_count); | 204 | int node_id, int generation, int gap_count); |
287 | 205 | ||
288 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) | ||
289 | { | ||
290 | return tag << 14 | channel << 8 | sy; | ||
291 | } | ||
292 | |||
293 | #endif /* _FIREWIRE_CORE_H */ | 206 | #endif /* _FIREWIRE_CORE_H */ |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c new file mode 100644 index 000000000000..a42209a73aed --- /dev/null +++ b/drivers/firewire/net.c | |||
@@ -0,0 +1,1655 @@ | |||
1 | /* | ||
2 | * IPv4 over IEEE 1394, per RFC 2734 | ||
3 | * | ||
4 | * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> | ||
5 | * | ||
6 | * based on eth1394 by Ben Collins et al | ||
7 | */ | ||
8 | |||
9 | #include <linux/bug.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/ethtool.h> | ||
12 | #include <linux/firewire.h> | ||
13 | #include <linux/firewire-constants.h> | ||
14 | #include <linux/highmem.h> | ||
15 | #include <linux/in.h> | ||
16 | #include <linux/ip.h> | ||
17 | #include <linux/jiffies.h> | ||
18 | #include <linux/mod_devicetable.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/moduleparam.h> | ||
21 | #include <linux/mutex.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/skbuff.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | #include <asm/unaligned.h> | ||
27 | #include <net/arp.h> | ||
28 | |||
29 | #define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */ | ||
30 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2) | ||
31 | |||
32 | #define IEEE1394_BROADCAST_CHANNEL 31 | ||
33 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) | ||
34 | #define IEEE1394_MAX_PAYLOAD_S100 512 | ||
35 | #define FWNET_NO_FIFO_ADDR (~0ULL) | ||
36 | |||
37 | #define IANA_SPECIFIER_ID 0x00005eU | ||
38 | #define RFC2734_SW_VERSION 0x000001U | ||
39 | |||
40 | #define IEEE1394_GASP_HDR_SIZE 8 | ||
41 | |||
42 | #define RFC2374_UNFRAG_HDR_SIZE 4 | ||
43 | #define RFC2374_FRAG_HDR_SIZE 8 | ||
44 | #define RFC2374_FRAG_OVERHEAD 4 | ||
45 | |||
46 | #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ | ||
47 | #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ | ||
48 | #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ | ||
49 | #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ | ||
50 | |||
51 | #define RFC2734_HW_ADDR_LEN 16 | ||
52 | |||
53 | struct rfc2734_arp { | ||
54 | __be16 hw_type; /* 0x0018 */ | ||
55 | __be16 proto_type; /* 0x0806 */ | ||
56 | u8 hw_addr_len; /* 16 */ | ||
57 | u8 ip_addr_len; /* 4 */ | ||
58 | __be16 opcode; /* ARP Opcode */ | ||
59 | /* Above is exactly the same format as struct arphdr */ | ||
60 | |||
61 | __be64 s_uniq_id; /* Sender's 64bit EUI */ | ||
62 | u8 max_rec; /* Sender's max packet size */ | ||
63 | u8 sspd; /* Sender's max speed */ | ||
64 | __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */ | ||
65 | __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */ | ||
66 | __be32 sip; /* Sender's IP Address */ | ||
67 | __be32 tip; /* IP Address of requested hw addr */ | ||
68 | } __attribute__((packed)); | ||
69 | |||
70 | /* This header format is specific to this driver implementation. */ | ||
71 | #define FWNET_ALEN 8 | ||
72 | #define FWNET_HLEN 10 | ||
73 | struct fwnet_header { | ||
74 | u8 h_dest[FWNET_ALEN]; /* destination address */ | ||
75 | __be16 h_proto; /* packet type ID field */ | ||
76 | } __attribute__((packed)); | ||
77 | |||
78 | /* IPv4 and IPv6 encapsulation header */ | ||
79 | struct rfc2734_header { | ||
80 | u32 w0; | ||
81 | u32 w1; | ||
82 | }; | ||
83 | |||
84 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) | ||
85 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) | ||
86 | #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) | ||
87 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) | ||
88 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) | ||
89 | |||
90 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) | ||
91 | #define fwnet_set_hdr_ether_type(et) (et) | ||
92 | #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) | ||
93 | #define fwnet_set_hdr_fg_off(fgo) (fgo) | ||
94 | |||
95 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) | ||
96 | |||
97 | static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, | ||
98 | unsigned ether_type) | ||
99 | { | ||
100 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) | ||
101 | | fwnet_set_hdr_ether_type(ether_type); | ||
102 | } | ||
103 | |||
104 | static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, | ||
105 | unsigned ether_type, unsigned dg_size, unsigned dgl) | ||
106 | { | ||
107 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) | ||
108 | | fwnet_set_hdr_dg_size(dg_size) | ||
109 | | fwnet_set_hdr_ether_type(ether_type); | ||
110 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | ||
111 | } | ||
112 | |||
113 | static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, | ||
114 | unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) | ||
115 | { | ||
116 | hdr->w0 = fwnet_set_hdr_lf(lf) | ||
117 | | fwnet_set_hdr_dg_size(dg_size) | ||
118 | | fwnet_set_hdr_fg_off(fg_off); | ||
119 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | ||
120 | } | ||
121 | |||
122 | /* This list keeps track of what parts of the datagram have been filled in */ | ||
123 | struct fwnet_fragment_info { | ||
124 | struct list_head fi_link; | ||
125 | u16 offset; | ||
126 | u16 len; | ||
127 | }; | ||
128 | |||
129 | struct fwnet_partial_datagram { | ||
130 | struct list_head pd_link; | ||
131 | struct list_head fi_list; | ||
132 | struct sk_buff *skb; | ||
133 | /* FIXME Why not use skb->data? */ | ||
134 | char *pbuf; | ||
135 | u16 datagram_label; | ||
136 | u16 ether_type; | ||
137 | u16 datagram_size; | ||
138 | }; | ||
139 | |||
140 | static DEFINE_MUTEX(fwnet_device_mutex); | ||
141 | static LIST_HEAD(fwnet_device_list); | ||
142 | |||
143 | struct fwnet_device { | ||
144 | struct list_head dev_link; | ||
145 | spinlock_t lock; | ||
146 | enum { | ||
147 | FWNET_BROADCAST_ERROR, | ||
148 | FWNET_BROADCAST_RUNNING, | ||
149 | FWNET_BROADCAST_STOPPED, | ||
150 | } broadcast_state; | ||
151 | struct fw_iso_context *broadcast_rcv_context; | ||
152 | struct fw_iso_buffer broadcast_rcv_buffer; | ||
153 | void **broadcast_rcv_buffer_ptrs; | ||
154 | unsigned broadcast_rcv_next_ptr; | ||
155 | unsigned num_broadcast_rcv_ptrs; | ||
156 | unsigned rcv_buffer_size; | ||
157 | /* | ||
158 | * This value is the maximum unfragmented datagram size that can be | ||
159 | * sent by the hardware. It already has the GASP overhead and the | ||
160 | * unfragmented datagram header overhead calculated into it. | ||
161 | */ | ||
162 | unsigned broadcast_xmt_max_payload; | ||
163 | u16 broadcast_xmt_datagramlabel; | ||
164 | |||
165 | /* | ||
166 | * The CSR address that remote nodes must send datagrams to for us to | ||
167 | * receive them. | ||
168 | */ | ||
169 | struct fw_address_handler handler; | ||
170 | u64 local_fifo; | ||
171 | |||
172 | /* List of packets to be sent */ | ||
173 | struct list_head packet_list; | ||
174 | /* | ||
175 | * List of packets that were broadcasted. When we get an ISO interrupt | ||
176 | * one of them has been sent | ||
177 | */ | ||
178 | struct list_head broadcasted_list; | ||
179 | /* List of packets that have been sent but not yet acked */ | ||
180 | struct list_head sent_list; | ||
181 | |||
182 | struct list_head peer_list; | ||
183 | struct fw_card *card; | ||
184 | struct net_device *netdev; | ||
185 | }; | ||
186 | |||
187 | struct fwnet_peer { | ||
188 | struct list_head peer_link; | ||
189 | struct fwnet_device *dev; | ||
190 | u64 guid; | ||
191 | u64 fifo; | ||
192 | |||
193 | /* guarded by dev->lock */ | ||
194 | struct list_head pd_list; /* received partial datagrams */ | ||
195 | unsigned pdg_size; /* pd_list size */ | ||
196 | |||
197 | u16 datagram_label; /* outgoing datagram label */ | ||
198 | unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ | ||
199 | int node_id; | ||
200 | int generation; | ||
201 | unsigned speed; | ||
202 | }; | ||
203 | |||
204 | /* This is our task struct. It's used for the packet complete callback. */ | ||
205 | struct fwnet_packet_task { | ||
206 | /* | ||
207 | * ptask can actually be on dev->packet_list, dev->broadcasted_list, | ||
208 | * or dev->sent_list depending on its current state. | ||
209 | */ | ||
210 | struct list_head pt_link; | ||
211 | struct fw_transaction transaction; | ||
212 | struct rfc2734_header hdr; | ||
213 | struct sk_buff *skb; | ||
214 | struct fwnet_device *dev; | ||
215 | |||
216 | int outstanding_pkts; | ||
217 | unsigned max_payload; | ||
218 | u64 fifo_addr; | ||
219 | u16 dest_node; | ||
220 | u8 generation; | ||
221 | u8 speed; | ||
222 | }; | ||
223 | |||
224 | /* | ||
225 | * saddr == NULL means use device source address. | ||
226 | * daddr == NULL means leave destination address (eg unresolved arp). | ||
227 | */ | ||
228 | static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, | ||
229 | unsigned short type, const void *daddr, | ||
230 | const void *saddr, unsigned len) | ||
231 | { | ||
232 | struct fwnet_header *h; | ||
233 | |||
234 | h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); | ||
235 | put_unaligned_be16(type, &h->h_proto); | ||
236 | |||
237 | if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { | ||
238 | memset(h->h_dest, 0, net->addr_len); | ||
239 | |||
240 | return net->hard_header_len; | ||
241 | } | ||
242 | |||
243 | if (daddr) { | ||
244 | memcpy(h->h_dest, daddr, net->addr_len); | ||
245 | |||
246 | return net->hard_header_len; | ||
247 | } | ||
248 | |||
249 | return -net->hard_header_len; | ||
250 | } | ||
251 | |||
252 | static int fwnet_header_rebuild(struct sk_buff *skb) | ||
253 | { | ||
254 | struct fwnet_header *h = (struct fwnet_header *)skb->data; | ||
255 | |||
256 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) | ||
257 | return arp_find((unsigned char *)&h->h_dest, skb); | ||
258 | |||
259 | fw_notify("%s: unable to resolve type %04x addresses\n", | ||
260 | skb->dev->name, be16_to_cpu(h->h_proto)); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | static int fwnet_header_cache(const struct neighbour *neigh, | ||
265 | struct hh_cache *hh) | ||
266 | { | ||
267 | struct net_device *net; | ||
268 | struct fwnet_header *h; | ||
269 | |||
270 | if (hh->hh_type == cpu_to_be16(ETH_P_802_3)) | ||
271 | return -1; | ||
272 | net = neigh->dev; | ||
273 | h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h)); | ||
274 | h->h_proto = hh->hh_type; | ||
275 | memcpy(h->h_dest, neigh->ha, net->addr_len); | ||
276 | hh->hh_len = FWNET_HLEN; | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /* Called by Address Resolution module to notify changes in address. */ | ||
282 | static void fwnet_header_cache_update(struct hh_cache *hh, | ||
283 | const struct net_device *net, const unsigned char *haddr) | ||
284 | { | ||
285 | memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len); | ||
286 | } | ||
287 | |||
288 | static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) | ||
289 | { | ||
290 | memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); | ||
291 | |||
292 | return FWNET_ALEN; | ||
293 | } | ||
294 | |||
295 | static const struct header_ops fwnet_header_ops = { | ||
296 | .create = fwnet_header_create, | ||
297 | .rebuild = fwnet_header_rebuild, | ||
298 | .cache = fwnet_header_cache, | ||
299 | .cache_update = fwnet_header_cache_update, | ||
300 | .parse = fwnet_header_parse, | ||
301 | }; | ||
302 | |||
303 | /* FIXME: is this correct for all cases? */ | ||
304 | static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, | ||
305 | unsigned offset, unsigned len) | ||
306 | { | ||
307 | struct fwnet_fragment_info *fi; | ||
308 | unsigned end = offset + len; | ||
309 | |||
310 | list_for_each_entry(fi, &pd->fi_list, fi_link) | ||
311 | if (offset < fi->offset + fi->len && end > fi->offset) | ||
312 | return true; | ||
313 | |||
314 | return false; | ||
315 | } | ||
316 | |||
317 | /* Assumes that new fragment does not overlap any existing fragments */ | ||
318 | static struct fwnet_fragment_info *fwnet_frag_new( | ||
319 | struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) | ||
320 | { | ||
321 | struct fwnet_fragment_info *fi, *fi2, *new; | ||
322 | struct list_head *list; | ||
323 | |||
324 | list = &pd->fi_list; | ||
325 | list_for_each_entry(fi, &pd->fi_list, fi_link) { | ||
326 | if (fi->offset + fi->len == offset) { | ||
327 | /* The new fragment can be tacked on to the end */ | ||
328 | /* Did the new fragment plug a hole? */ | ||
329 | fi2 = list_entry(fi->fi_link.next, | ||
330 | struct fwnet_fragment_info, fi_link); | ||
331 | if (fi->offset + fi->len == fi2->offset) { | ||
332 | /* glue fragments together */ | ||
333 | fi->len += len + fi2->len; | ||
334 | list_del(&fi2->fi_link); | ||
335 | kfree(fi2); | ||
336 | } else { | ||
337 | fi->len += len; | ||
338 | } | ||
339 | |||
340 | return fi; | ||
341 | } | ||
342 | if (offset + len == fi->offset) { | ||
343 | /* The new fragment can be tacked on to the beginning */ | ||
344 | /* Did the new fragment plug a hole? */ | ||
345 | fi2 = list_entry(fi->fi_link.prev, | ||
346 | struct fwnet_fragment_info, fi_link); | ||
347 | if (fi2->offset + fi2->len == fi->offset) { | ||
348 | /* glue fragments together */ | ||
349 | fi2->len += fi->len + len; | ||
350 | list_del(&fi->fi_link); | ||
351 | kfree(fi); | ||
352 | |||
353 | return fi2; | ||
354 | } | ||
355 | fi->offset = offset; | ||
356 | fi->len += len; | ||
357 | |||
358 | return fi; | ||
359 | } | ||
360 | if (offset > fi->offset + fi->len) { | ||
361 | list = &fi->fi_link; | ||
362 | break; | ||
363 | } | ||
364 | if (offset + len < fi->offset) { | ||
365 | list = fi->fi_link.prev; | ||
366 | break; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | ||
371 | if (!new) { | ||
372 | fw_error("out of memory\n"); | ||
373 | return NULL; | ||
374 | } | ||
375 | |||
376 | new->offset = offset; | ||
377 | new->len = len; | ||
378 | list_add(&new->fi_link, list); | ||
379 | |||
380 | return new; | ||
381 | } | ||
382 | |||
383 | static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, | ||
384 | struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, | ||
385 | void *frag_buf, unsigned frag_off, unsigned frag_len) | ||
386 | { | ||
387 | struct fwnet_partial_datagram *new; | ||
388 | struct fwnet_fragment_info *fi; | ||
389 | |||
390 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | ||
391 | if (!new) | ||
392 | goto fail; | ||
393 | |||
394 | INIT_LIST_HEAD(&new->fi_list); | ||
395 | fi = fwnet_frag_new(new, frag_off, frag_len); | ||
396 | if (fi == NULL) | ||
397 | goto fail_w_new; | ||
398 | |||
399 | new->datagram_label = datagram_label; | ||
400 | new->datagram_size = dg_size; | ||
401 | new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15); | ||
402 | if (new->skb == NULL) | ||
403 | goto fail_w_fi; | ||
404 | |||
405 | skb_reserve(new->skb, (net->hard_header_len + 15) & ~15); | ||
406 | new->pbuf = skb_put(new->skb, dg_size); | ||
407 | memcpy(new->pbuf + frag_off, frag_buf, frag_len); | ||
408 | list_add_tail(&new->pd_link, &peer->pd_list); | ||
409 | |||
410 | return new; | ||
411 | |||
412 | fail_w_fi: | ||
413 | kfree(fi); | ||
414 | fail_w_new: | ||
415 | kfree(new); | ||
416 | fail: | ||
417 | fw_error("out of memory\n"); | ||
418 | |||
419 | return NULL; | ||
420 | } | ||
421 | |||
422 | static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, | ||
423 | u16 datagram_label) | ||
424 | { | ||
425 | struct fwnet_partial_datagram *pd; | ||
426 | |||
427 | list_for_each_entry(pd, &peer->pd_list, pd_link) | ||
428 | if (pd->datagram_label == datagram_label) | ||
429 | return pd; | ||
430 | |||
431 | return NULL; | ||
432 | } | ||
433 | |||
434 | |||
435 | static void fwnet_pd_delete(struct fwnet_partial_datagram *old) | ||
436 | { | ||
437 | struct fwnet_fragment_info *fi, *n; | ||
438 | |||
439 | list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) | ||
440 | kfree(fi); | ||
441 | |||
442 | list_del(&old->pd_link); | ||
443 | dev_kfree_skb_any(old->skb); | ||
444 | kfree(old); | ||
445 | } | ||
446 | |||
447 | static bool fwnet_pd_update(struct fwnet_peer *peer, | ||
448 | struct fwnet_partial_datagram *pd, void *frag_buf, | ||
449 | unsigned frag_off, unsigned frag_len) | ||
450 | { | ||
451 | if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) | ||
452 | return false; | ||
453 | |||
454 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); | ||
455 | |||
456 | /* | ||
457 | * Move list entry to beginnig of list so that oldest partial | ||
458 | * datagrams percolate to the end of the list | ||
459 | */ | ||
460 | list_move_tail(&pd->pd_link, &peer->pd_list); | ||
461 | |||
462 | return true; | ||
463 | } | ||
464 | |||
465 | static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) | ||
466 | { | ||
467 | struct fwnet_fragment_info *fi; | ||
468 | |||
469 | fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); | ||
470 | |||
471 | return fi->len == pd->datagram_size; | ||
472 | } | ||
473 | |||
474 | /* caller must hold dev->lock */ | ||
475 | static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, | ||
476 | u64 guid) | ||
477 | { | ||
478 | struct fwnet_peer *peer; | ||
479 | |||
480 | list_for_each_entry(peer, &dev->peer_list, peer_link) | ||
481 | if (peer->guid == guid) | ||
482 | return peer; | ||
483 | |||
484 | return NULL; | ||
485 | } | ||
486 | |||
487 | /* caller must hold dev->lock */ | ||
488 | static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, | ||
489 | int node_id, int generation) | ||
490 | { | ||
491 | struct fwnet_peer *peer; | ||
492 | |||
493 | list_for_each_entry(peer, &dev->peer_list, peer_link) | ||
494 | if (peer->node_id == node_id && | ||
495 | peer->generation == generation) | ||
496 | return peer; | ||
497 | |||
498 | return NULL; | ||
499 | } | ||
500 | |||
501 | /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ | ||
502 | static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) | ||
503 | { | ||
504 | max_rec = min(max_rec, speed + 8); | ||
505 | max_rec = min(max_rec, 0xbU); /* <= 4096 */ | ||
506 | if (max_rec < 8) { | ||
507 | fw_notify("max_rec %x out of range\n", max_rec); | ||
508 | max_rec = 8; | ||
509 | } | ||
510 | |||
511 | return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; | ||
512 | } | ||
513 | |||
514 | |||
515 | static int fwnet_finish_incoming_packet(struct net_device *net, | ||
516 | struct sk_buff *skb, u16 source_node_id, | ||
517 | bool is_broadcast, u16 ether_type) | ||
518 | { | ||
519 | struct fwnet_device *dev; | ||
520 | static const __be64 broadcast_hw = cpu_to_be64(~0ULL); | ||
521 | int status; | ||
522 | __be64 guid; | ||
523 | |||
524 | dev = netdev_priv(net); | ||
525 | /* Write metadata, and then pass to the receive level */ | ||
526 | skb->dev = net; | ||
527 | skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */ | ||
528 | |||
529 | /* | ||
530 | * Parse the encapsulation header. This actually does the job of | ||
531 | * converting to an ethernet frame header, as well as arp | ||
532 | * conversion if needed. ARP conversion is easier in this | ||
533 | * direction, since we are using ethernet as our backend. | ||
534 | */ | ||
535 | /* | ||
536 | * If this is an ARP packet, convert it. First, we want to make | ||
537 | * use of some of the fields, since they tell us a little bit | ||
538 | * about the sending machine. | ||
539 | */ | ||
540 | if (ether_type == ETH_P_ARP) { | ||
541 | struct rfc2734_arp *arp1394; | ||
542 | struct arphdr *arp; | ||
543 | unsigned char *arp_ptr; | ||
544 | u64 fifo_addr; | ||
545 | u64 peer_guid; | ||
546 | unsigned sspd; | ||
547 | u16 max_payload; | ||
548 | struct fwnet_peer *peer; | ||
549 | unsigned long flags; | ||
550 | |||
551 | arp1394 = (struct rfc2734_arp *)skb->data; | ||
552 | arp = (struct arphdr *)skb->data; | ||
553 | arp_ptr = (unsigned char *)(arp + 1); | ||
554 | peer_guid = get_unaligned_be64(&arp1394->s_uniq_id); | ||
555 | fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32 | ||
556 | | get_unaligned_be32(&arp1394->fifo_lo); | ||
557 | |||
558 | sspd = arp1394->sspd; | ||
559 | /* Sanity check. OS X 10.3 PPC reportedly sends 131. */ | ||
560 | if (sspd > SCODE_3200) { | ||
561 | fw_notify("sspd %x out of range\n", sspd); | ||
562 | sspd = SCODE_3200; | ||
563 | } | ||
564 | max_payload = fwnet_max_payload(arp1394->max_rec, sspd); | ||
565 | |||
566 | spin_lock_irqsave(&dev->lock, flags); | ||
567 | peer = fwnet_peer_find_by_guid(dev, peer_guid); | ||
568 | if (peer) { | ||
569 | peer->fifo = fifo_addr; | ||
570 | |||
571 | if (peer->speed > sspd) | ||
572 | peer->speed = sspd; | ||
573 | if (peer->max_payload > max_payload) | ||
574 | peer->max_payload = max_payload; | ||
575 | } | ||
576 | spin_unlock_irqrestore(&dev->lock, flags); | ||
577 | |||
578 | if (!peer) { | ||
579 | fw_notify("No peer for ARP packet from %016llx\n", | ||
580 | (unsigned long long)peer_guid); | ||
581 | goto failed_proto; | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * Now that we're done with the 1394 specific stuff, we'll | ||
586 | * need to alter some of the data. Believe it or not, all | ||
587 | * that needs to be done is sender_IP_address needs to be | ||
588 | * moved, the destination hardware address get stuffed | ||
589 | * in and the hardware address length set to 8. | ||
590 | * | ||
591 | * IMPORTANT: The code below overwrites 1394 specific data | ||
592 | * needed above so keep the munging of the data for the | ||
593 | * higher level IP stack last. | ||
594 | */ | ||
595 | |||
596 | arp->ar_hln = 8; | ||
597 | /* skip over sender unique id */ | ||
598 | arp_ptr += arp->ar_hln; | ||
599 | /* move sender IP addr */ | ||
600 | put_unaligned(arp1394->sip, (u32 *)arp_ptr); | ||
601 | /* skip over sender IP addr */ | ||
602 | arp_ptr += arp->ar_pln; | ||
603 | |||
604 | if (arp->ar_op == htons(ARPOP_REQUEST)) | ||
605 | memset(arp_ptr, 0, sizeof(u64)); | ||
606 | else | ||
607 | memcpy(arp_ptr, net->dev_addr, sizeof(u64)); | ||
608 | } | ||
609 | |||
610 | /* Now add the ethernet header. */ | ||
611 | guid = cpu_to_be64(dev->card->guid); | ||
612 | if (dev_hard_header(skb, net, ether_type, | ||
613 | is_broadcast ? &broadcast_hw : &guid, | ||
614 | NULL, skb->len) >= 0) { | ||
615 | struct fwnet_header *eth; | ||
616 | u16 *rawp; | ||
617 | __be16 protocol; | ||
618 | |||
619 | skb_reset_mac_header(skb); | ||
620 | skb_pull(skb, sizeof(*eth)); | ||
621 | eth = (struct fwnet_header *)skb_mac_header(skb); | ||
622 | if (*eth->h_dest & 1) { | ||
623 | if (memcmp(eth->h_dest, net->broadcast, | ||
624 | net->addr_len) == 0) | ||
625 | skb->pkt_type = PACKET_BROADCAST; | ||
626 | #if 0 | ||
627 | else | ||
628 | skb->pkt_type = PACKET_MULTICAST; | ||
629 | #endif | ||
630 | } else { | ||
631 | if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) | ||
632 | skb->pkt_type = PACKET_OTHERHOST; | ||
633 | } | ||
634 | if (ntohs(eth->h_proto) >= 1536) { | ||
635 | protocol = eth->h_proto; | ||
636 | } else { | ||
637 | rawp = (u16 *)skb->data; | ||
638 | if (*rawp == 0xffff) | ||
639 | protocol = htons(ETH_P_802_3); | ||
640 | else | ||
641 | protocol = htons(ETH_P_802_2); | ||
642 | } | ||
643 | skb->protocol = protocol; | ||
644 | } | ||
645 | status = netif_rx(skb); | ||
646 | if (status == NET_RX_DROP) { | ||
647 | net->stats.rx_errors++; | ||
648 | net->stats.rx_dropped++; | ||
649 | } else { | ||
650 | net->stats.rx_packets++; | ||
651 | net->stats.rx_bytes += skb->len; | ||
652 | } | ||
653 | if (netif_queue_stopped(net)) | ||
654 | netif_wake_queue(net); | ||
655 | |||
656 | return 0; | ||
657 | |||
658 | failed_proto: | ||
659 | net->stats.rx_errors++; | ||
660 | net->stats.rx_dropped++; | ||
661 | |||
662 | dev_kfree_skb_any(skb); | ||
663 | if (netif_queue_stopped(net)) | ||
664 | netif_wake_queue(net); | ||
665 | |||
666 | net->last_rx = jiffies; | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | ||
672 | int source_node_id, int generation, | ||
673 | bool is_broadcast) | ||
674 | { | ||
675 | struct sk_buff *skb; | ||
676 | struct net_device *net = dev->netdev; | ||
677 | struct rfc2734_header hdr; | ||
678 | unsigned lf; | ||
679 | unsigned long flags; | ||
680 | struct fwnet_peer *peer; | ||
681 | struct fwnet_partial_datagram *pd; | ||
682 | int fg_off; | ||
683 | int dg_size; | ||
684 | u16 datagram_label; | ||
685 | int retval; | ||
686 | u16 ether_type; | ||
687 | |||
688 | hdr.w0 = be32_to_cpu(buf[0]); | ||
689 | lf = fwnet_get_hdr_lf(&hdr); | ||
690 | if (lf == RFC2374_HDR_UNFRAG) { | ||
691 | /* | ||
692 | * An unfragmented datagram has been received by the ieee1394 | ||
693 | * bus. Build an skbuff around it so we can pass it to the | ||
694 | * high level network layer. | ||
695 | */ | ||
696 | ether_type = fwnet_get_hdr_ether_type(&hdr); | ||
697 | buf++; | ||
698 | len -= RFC2374_UNFRAG_HDR_SIZE; | ||
699 | |||
700 | skb = dev_alloc_skb(len + net->hard_header_len + 15); | ||
701 | if (unlikely(!skb)) { | ||
702 | fw_error("out of memory\n"); | ||
703 | net->stats.rx_dropped++; | ||
704 | |||
705 | return -1; | ||
706 | } | ||
707 | skb_reserve(skb, (net->hard_header_len + 15) & ~15); | ||
708 | memcpy(skb_put(skb, len), buf, len); | ||
709 | |||
710 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | ||
711 | is_broadcast, ether_type); | ||
712 | } | ||
713 | /* A datagram fragment has been received, now the fun begins. */ | ||
714 | hdr.w1 = ntohl(buf[1]); | ||
715 | buf += 2; | ||
716 | len -= RFC2374_FRAG_HDR_SIZE; | ||
717 | if (lf == RFC2374_HDR_FIRSTFRAG) { | ||
718 | ether_type = fwnet_get_hdr_ether_type(&hdr); | ||
719 | fg_off = 0; | ||
720 | } else { | ||
721 | ether_type = 0; | ||
722 | fg_off = fwnet_get_hdr_fg_off(&hdr); | ||
723 | } | ||
724 | datagram_label = fwnet_get_hdr_dgl(&hdr); | ||
725 | dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ | ||
726 | |||
727 | spin_lock_irqsave(&dev->lock, flags); | ||
728 | |||
729 | peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); | ||
730 | if (!peer) | ||
731 | goto bad_proto; | ||
732 | |||
733 | pd = fwnet_pd_find(peer, datagram_label); | ||
734 | if (pd == NULL) { | ||
735 | while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { | ||
736 | /* remove the oldest */ | ||
737 | fwnet_pd_delete(list_first_entry(&peer->pd_list, | ||
738 | struct fwnet_partial_datagram, pd_link)); | ||
739 | peer->pdg_size--; | ||
740 | } | ||
741 | pd = fwnet_pd_new(net, peer, datagram_label, | ||
742 | dg_size, buf, fg_off, len); | ||
743 | if (pd == NULL) { | ||
744 | retval = -ENOMEM; | ||
745 | goto bad_proto; | ||
746 | } | ||
747 | peer->pdg_size++; | ||
748 | } else { | ||
749 | if (fwnet_frag_overlap(pd, fg_off, len) || | ||
750 | pd->datagram_size != dg_size) { | ||
751 | /* | ||
752 | * Differing datagram sizes or overlapping fragments, | ||
753 | * discard old datagram and start a new one. | ||
754 | */ | ||
755 | fwnet_pd_delete(pd); | ||
756 | pd = fwnet_pd_new(net, peer, datagram_label, | ||
757 | dg_size, buf, fg_off, len); | ||
758 | if (pd == NULL) { | ||
759 | retval = -ENOMEM; | ||
760 | peer->pdg_size--; | ||
761 | goto bad_proto; | ||
762 | } | ||
763 | } else { | ||
764 | if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { | ||
765 | /* | ||
766 | * Couldn't save off fragment anyway | ||
767 | * so might as well obliterate the | ||
768 | * datagram now. | ||
769 | */ | ||
770 | fwnet_pd_delete(pd); | ||
771 | peer->pdg_size--; | ||
772 | goto bad_proto; | ||
773 | } | ||
774 | } | ||
775 | } /* new datagram or add to existing one */ | ||
776 | |||
777 | if (lf == RFC2374_HDR_FIRSTFRAG) | ||
778 | pd->ether_type = ether_type; | ||
779 | |||
780 | if (fwnet_pd_is_complete(pd)) { | ||
781 | ether_type = pd->ether_type; | ||
782 | peer->pdg_size--; | ||
783 | skb = skb_get(pd->skb); | ||
784 | fwnet_pd_delete(pd); | ||
785 | |||
786 | spin_unlock_irqrestore(&dev->lock, flags); | ||
787 | |||
788 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | ||
789 | false, ether_type); | ||
790 | } | ||
791 | /* | ||
792 | * Datagram is not complete, we're done for the | ||
793 | * moment. | ||
794 | */ | ||
795 | spin_unlock_irqrestore(&dev->lock, flags); | ||
796 | |||
797 | return 0; | ||
798 | |||
799 | bad_proto: | ||
800 | spin_unlock_irqrestore(&dev->lock, flags); | ||
801 | |||
802 | if (netif_queue_stopped(net)) | ||
803 | netif_wake_queue(net); | ||
804 | |||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, | ||
809 | int tcode, int destination, int source, int generation, | ||
810 | int speed, unsigned long long offset, void *payload, | ||
811 | size_t length, void *callback_data) | ||
812 | { | ||
813 | struct fwnet_device *dev = callback_data; | ||
814 | int rcode; | ||
815 | |||
816 | if (destination == IEEE1394_ALL_NODES) { | ||
817 | kfree(r); | ||
818 | |||
819 | return; | ||
820 | } | ||
821 | |||
822 | if (offset != dev->handler.offset) | ||
823 | rcode = RCODE_ADDRESS_ERROR; | ||
824 | else if (tcode != TCODE_WRITE_BLOCK_REQUEST) | ||
825 | rcode = RCODE_TYPE_ERROR; | ||
826 | else if (fwnet_incoming_packet(dev, payload, length, | ||
827 | source, generation, false) != 0) { | ||
828 | fw_error("Incoming packet failure\n"); | ||
829 | rcode = RCODE_CONFLICT_ERROR; | ||
830 | } else | ||
831 | rcode = RCODE_COMPLETE; | ||
832 | |||
833 | fw_send_response(card, r, rcode); | ||
834 | } | ||
835 | |||
836 | static void fwnet_receive_broadcast(struct fw_iso_context *context, | ||
837 | u32 cycle, size_t header_length, void *header, void *data) | ||
838 | { | ||
839 | struct fwnet_device *dev; | ||
840 | struct fw_iso_packet packet; | ||
841 | struct fw_card *card; | ||
842 | __be16 *hdr_ptr; | ||
843 | __be32 *buf_ptr; | ||
844 | int retval; | ||
845 | u32 length; | ||
846 | u16 source_node_id; | ||
847 | u32 specifier_id; | ||
848 | u32 ver; | ||
849 | unsigned long offset; | ||
850 | unsigned long flags; | ||
851 | |||
852 | dev = data; | ||
853 | card = dev->card; | ||
854 | hdr_ptr = header; | ||
855 | length = be16_to_cpup(hdr_ptr); | ||
856 | |||
857 | spin_lock_irqsave(&dev->lock, flags); | ||
858 | |||
859 | offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; | ||
860 | buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; | ||
861 | if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) | ||
862 | dev->broadcast_rcv_next_ptr = 0; | ||
863 | |||
864 | spin_unlock_irqrestore(&dev->lock, flags); | ||
865 | |||
866 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | ||
867 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; | ||
868 | ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; | ||
869 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; | ||
870 | |||
871 | if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { | ||
872 | buf_ptr += 2; | ||
873 | length -= IEEE1394_GASP_HDR_SIZE; | ||
874 | fwnet_incoming_packet(dev, buf_ptr, length, | ||
875 | source_node_id, -1, true); | ||
876 | } | ||
877 | |||
878 | packet.payload_length = dev->rcv_buffer_size; | ||
879 | packet.interrupt = 1; | ||
880 | packet.skip = 0; | ||
881 | packet.tag = 3; | ||
882 | packet.sy = 0; | ||
883 | packet.header_length = IEEE1394_GASP_HDR_SIZE; | ||
884 | |||
885 | spin_lock_irqsave(&dev->lock, flags); | ||
886 | |||
887 | retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, | ||
888 | &dev->broadcast_rcv_buffer, offset); | ||
889 | |||
890 | spin_unlock_irqrestore(&dev->lock, flags); | ||
891 | |||
892 | if (retval < 0) | ||
893 | fw_error("requeue failed\n"); | ||
894 | } | ||
895 | |||
896 | static struct kmem_cache *fwnet_packet_task_cache; | ||
897 | |||
898 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); | ||
899 | |||
900 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | ||
901 | { | ||
902 | struct fwnet_device *dev; | ||
903 | unsigned long flags; | ||
904 | |||
905 | dev = ptask->dev; | ||
906 | |||
907 | spin_lock_irqsave(&dev->lock, flags); | ||
908 | list_del(&ptask->pt_link); | ||
909 | spin_unlock_irqrestore(&dev->lock, flags); | ||
910 | |||
911 | ptask->outstanding_pkts--; /* FIXME access inside lock */ | ||
912 | |||
913 | if (ptask->outstanding_pkts > 0) { | ||
914 | u16 dg_size; | ||
915 | u16 fg_off; | ||
916 | u16 datagram_label; | ||
917 | u16 lf; | ||
918 | struct sk_buff *skb; | ||
919 | |||
920 | /* Update the ptask to point to the next fragment and send it */ | ||
921 | lf = fwnet_get_hdr_lf(&ptask->hdr); | ||
922 | switch (lf) { | ||
923 | case RFC2374_HDR_LASTFRAG: | ||
924 | case RFC2374_HDR_UNFRAG: | ||
925 | default: | ||
926 | fw_error("Outstanding packet %x lf %x, header %x,%x\n", | ||
927 | ptask->outstanding_pkts, lf, ptask->hdr.w0, | ||
928 | ptask->hdr.w1); | ||
929 | BUG(); | ||
930 | |||
931 | case RFC2374_HDR_FIRSTFRAG: | ||
932 | /* Set frag type here for future interior fragments */ | ||
933 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); | ||
934 | fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; | ||
935 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | ||
936 | break; | ||
937 | |||
938 | case RFC2374_HDR_INTFRAG: | ||
939 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); | ||
940 | fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) | ||
941 | + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; | ||
942 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | ||
943 | break; | ||
944 | } | ||
945 | skb = ptask->skb; | ||
946 | skb_pull(skb, ptask->max_payload); | ||
947 | if (ptask->outstanding_pkts > 1) { | ||
948 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, | ||
949 | dg_size, fg_off, datagram_label); | ||
950 | } else { | ||
951 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, | ||
952 | dg_size, fg_off, datagram_label); | ||
953 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; | ||
954 | } | ||
955 | fwnet_send_packet(ptask); | ||
956 | } else { | ||
957 | dev_kfree_skb_any(ptask->skb); | ||
958 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
959 | } | ||
960 | } | ||
961 | |||
962 | static void fwnet_write_complete(struct fw_card *card, int rcode, | ||
963 | void *payload, size_t length, void *data) | ||
964 | { | ||
965 | struct fwnet_packet_task *ptask; | ||
966 | |||
967 | ptask = data; | ||
968 | |||
969 | if (rcode == RCODE_COMPLETE) | ||
970 | fwnet_transmit_packet_done(ptask); | ||
971 | else | ||
972 | fw_error("fwnet_write_complete: failed: %x\n", rcode); | ||
973 | /* ??? error recovery */ | ||
974 | } | ||
975 | |||
976 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) | ||
977 | { | ||
978 | struct fwnet_device *dev; | ||
979 | unsigned tx_len; | ||
980 | struct rfc2734_header *bufhdr; | ||
981 | unsigned long flags; | ||
982 | |||
983 | dev = ptask->dev; | ||
984 | tx_len = ptask->max_payload; | ||
985 | switch (fwnet_get_hdr_lf(&ptask->hdr)) { | ||
986 | case RFC2374_HDR_UNFRAG: | ||
987 | bufhdr = (struct rfc2734_header *) | ||
988 | skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); | ||
989 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | ||
990 | break; | ||
991 | |||
992 | case RFC2374_HDR_FIRSTFRAG: | ||
993 | case RFC2374_HDR_INTFRAG: | ||
994 | case RFC2374_HDR_LASTFRAG: | ||
995 | bufhdr = (struct rfc2734_header *) | ||
996 | skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); | ||
997 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | ||
998 | put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); | ||
999 | break; | ||
1000 | |||
1001 | default: | ||
1002 | BUG(); | ||
1003 | } | ||
1004 | if (ptask->dest_node == IEEE1394_ALL_NODES) { | ||
1005 | u8 *p; | ||
1006 | int generation; | ||
1007 | int node_id; | ||
1008 | |||
1009 | /* ptask->generation may not have been set yet */ | ||
1010 | generation = dev->card->generation; | ||
1011 | smp_rmb(); | ||
1012 | node_id = dev->card->node_id; | ||
1013 | |||
1014 | p = skb_push(ptask->skb, 8); | ||
1015 | put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); | ||
1016 | put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | ||
1017 | | RFC2734_SW_VERSION, &p[4]); | ||
1018 | |||
1019 | /* We should not transmit if broadcast_channel.valid == 0. */ | ||
1020 | fw_send_request(dev->card, &ptask->transaction, | ||
1021 | TCODE_STREAM_DATA, | ||
1022 | fw_stream_packet_destination_id(3, | ||
1023 | IEEE1394_BROADCAST_CHANNEL, 0), | ||
1024 | generation, SCODE_100, 0ULL, ptask->skb->data, | ||
1025 | tx_len + 8, fwnet_write_complete, ptask); | ||
1026 | |||
1027 | /* FIXME race? */ | ||
1028 | spin_lock_irqsave(&dev->lock, flags); | ||
1029 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | ||
1030 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1031 | |||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | fw_send_request(dev->card, &ptask->transaction, | ||
1036 | TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, | ||
1037 | ptask->generation, ptask->speed, ptask->fifo_addr, | ||
1038 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); | ||
1039 | |||
1040 | /* FIXME race? */ | ||
1041 | spin_lock_irqsave(&dev->lock, flags); | ||
1042 | list_add_tail(&ptask->pt_link, &dev->sent_list); | ||
1043 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1044 | |||
1045 | dev->netdev->trans_start = jiffies; | ||
1046 | |||
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | static int fwnet_broadcast_start(struct fwnet_device *dev) | ||
1051 | { | ||
1052 | struct fw_iso_context *context; | ||
1053 | int retval; | ||
1054 | unsigned num_packets; | ||
1055 | unsigned max_receive; | ||
1056 | struct fw_iso_packet packet; | ||
1057 | unsigned long offset; | ||
1058 | unsigned u; | ||
1059 | |||
1060 | if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { | ||
1061 | /* outside OHCI posted write area? */ | ||
1062 | static const struct fw_address_region region = { | ||
1063 | .start = 0xffff00000000ULL, | ||
1064 | .end = CSR_REGISTER_BASE, | ||
1065 | }; | ||
1066 | |||
1067 | dev->handler.length = 4096; | ||
1068 | dev->handler.address_callback = fwnet_receive_packet; | ||
1069 | dev->handler.callback_data = dev; | ||
1070 | |||
1071 | retval = fw_core_add_address_handler(&dev->handler, ®ion); | ||
1072 | if (retval < 0) | ||
1073 | goto failed_initial; | ||
1074 | |||
1075 | dev->local_fifo = dev->handler.offset; | ||
1076 | } | ||
1077 | |||
1078 | max_receive = 1U << (dev->card->max_receive + 1); | ||
1079 | num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; | ||
1080 | |||
1081 | if (!dev->broadcast_rcv_context) { | ||
1082 | void **ptrptr; | ||
1083 | |||
1084 | context = fw_iso_context_create(dev->card, | ||
1085 | FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, | ||
1086 | dev->card->link_speed, 8, fwnet_receive_broadcast, dev); | ||
1087 | if (IS_ERR(context)) { | ||
1088 | retval = PTR_ERR(context); | ||
1089 | goto failed_context_create; | ||
1090 | } | ||
1091 | |||
1092 | retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, | ||
1093 | dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); | ||
1094 | if (retval < 0) | ||
1095 | goto failed_buffer_init; | ||
1096 | |||
1097 | ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); | ||
1098 | if (!ptrptr) { | ||
1099 | retval = -ENOMEM; | ||
1100 | goto failed_ptrs_alloc; | ||
1101 | } | ||
1102 | |||
1103 | dev->broadcast_rcv_buffer_ptrs = ptrptr; | ||
1104 | for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { | ||
1105 | void *ptr; | ||
1106 | unsigned v; | ||
1107 | |||
1108 | ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); | ||
1109 | for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) | ||
1110 | *ptrptr++ = (void *) | ||
1111 | ((char *)ptr + v * max_receive); | ||
1112 | } | ||
1113 | dev->broadcast_rcv_context = context; | ||
1114 | } else { | ||
1115 | context = dev->broadcast_rcv_context; | ||
1116 | } | ||
1117 | |||
1118 | packet.payload_length = max_receive; | ||
1119 | packet.interrupt = 1; | ||
1120 | packet.skip = 0; | ||
1121 | packet.tag = 3; | ||
1122 | packet.sy = 0; | ||
1123 | packet.header_length = IEEE1394_GASP_HDR_SIZE; | ||
1124 | offset = 0; | ||
1125 | |||
1126 | for (u = 0; u < num_packets; u++) { | ||
1127 | retval = fw_iso_context_queue(context, &packet, | ||
1128 | &dev->broadcast_rcv_buffer, offset); | ||
1129 | if (retval < 0) | ||
1130 | goto failed_rcv_queue; | ||
1131 | |||
1132 | offset += max_receive; | ||
1133 | } | ||
1134 | dev->num_broadcast_rcv_ptrs = num_packets; | ||
1135 | dev->rcv_buffer_size = max_receive; | ||
1136 | dev->broadcast_rcv_next_ptr = 0U; | ||
1137 | retval = fw_iso_context_start(context, -1, 0, | ||
1138 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ | ||
1139 | if (retval < 0) | ||
1140 | goto failed_rcv_queue; | ||
1141 | |||
1142 | /* FIXME: adjust it according to the min. speed of all known peers? */ | ||
1143 | dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 | ||
1144 | - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; | ||
1145 | dev->broadcast_state = FWNET_BROADCAST_RUNNING; | ||
1146 | |||
1147 | return 0; | ||
1148 | |||
1149 | failed_rcv_queue: | ||
1150 | kfree(dev->broadcast_rcv_buffer_ptrs); | ||
1151 | dev->broadcast_rcv_buffer_ptrs = NULL; | ||
1152 | failed_ptrs_alloc: | ||
1153 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); | ||
1154 | failed_buffer_init: | ||
1155 | fw_iso_context_destroy(context); | ||
1156 | dev->broadcast_rcv_context = NULL; | ||
1157 | failed_context_create: | ||
1158 | fw_core_remove_address_handler(&dev->handler); | ||
1159 | failed_initial: | ||
1160 | dev->local_fifo = FWNET_NO_FIFO_ADDR; | ||
1161 | |||
1162 | return retval; | ||
1163 | } | ||
1164 | |||
1165 | /* ifup */ | ||
1166 | static int fwnet_open(struct net_device *net) | ||
1167 | { | ||
1168 | struct fwnet_device *dev = netdev_priv(net); | ||
1169 | int ret; | ||
1170 | |||
1171 | if (dev->broadcast_state == FWNET_BROADCAST_ERROR) { | ||
1172 | ret = fwnet_broadcast_start(dev); | ||
1173 | if (ret) | ||
1174 | return ret; | ||
1175 | } | ||
1176 | netif_start_queue(net); | ||
1177 | |||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1181 | /* ifdown */ | ||
1182 | static int fwnet_stop(struct net_device *net) | ||
1183 | { | ||
1184 | netif_stop_queue(net); | ||
1185 | |||
1186 | /* Deallocate iso context for use by other applications? */ | ||
1187 | |||
1188 | return 0; | ||
1189 | } | ||
1190 | |||
1191 | static int fwnet_tx(struct sk_buff *skb, struct net_device *net) | ||
1192 | { | ||
1193 | struct fwnet_header hdr_buf; | ||
1194 | struct fwnet_device *dev = netdev_priv(net); | ||
1195 | __be16 proto; | ||
1196 | u16 dest_node; | ||
1197 | unsigned max_payload; | ||
1198 | u16 dg_size; | ||
1199 | u16 *datagram_label_ptr; | ||
1200 | struct fwnet_packet_task *ptask; | ||
1201 | struct fwnet_peer *peer; | ||
1202 | unsigned long flags; | ||
1203 | |||
1204 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); | ||
1205 | if (ptask == NULL) | ||
1206 | goto fail; | ||
1207 | |||
1208 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
1209 | if (!skb) | ||
1210 | goto fail; | ||
1211 | |||
1212 | /* | ||
1213 | * Make a copy of the driver-specific header. | ||
1214 | * We might need to rebuild the header on tx failure. | ||
1215 | */ | ||
1216 | memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); | ||
1217 | skb_pull(skb, sizeof(hdr_buf)); | ||
1218 | |||
1219 | proto = hdr_buf.h_proto; | ||
1220 | dg_size = skb->len; | ||
1221 | |||
1222 | /* serialize access to peer, including peer->datagram_label */ | ||
1223 | spin_lock_irqsave(&dev->lock, flags); | ||
1224 | |||
1225 | /* | ||
1226 | * Set the transmission type for the packet. ARP packets and IP | ||
1227 | * broadcast packets are sent via GASP. | ||
1228 | */ | ||
1229 | if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0 | ||
1230 | || proto == htons(ETH_P_ARP) | ||
1231 | || (proto == htons(ETH_P_IP) | ||
1232 | && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { | ||
1233 | max_payload = dev->broadcast_xmt_max_payload; | ||
1234 | datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; | ||
1235 | |||
1236 | ptask->fifo_addr = FWNET_NO_FIFO_ADDR; | ||
1237 | ptask->generation = 0; | ||
1238 | ptask->dest_node = IEEE1394_ALL_NODES; | ||
1239 | ptask->speed = SCODE_100; | ||
1240 | } else { | ||
1241 | __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); | ||
1242 | u8 generation; | ||
1243 | |||
1244 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); | ||
1245 | if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) | ||
1246 | goto fail_unlock; | ||
1247 | |||
1248 | generation = peer->generation; | ||
1249 | dest_node = peer->node_id; | ||
1250 | max_payload = peer->max_payload; | ||
1251 | datagram_label_ptr = &peer->datagram_label; | ||
1252 | |||
1253 | ptask->fifo_addr = peer->fifo; | ||
1254 | ptask->generation = generation; | ||
1255 | ptask->dest_node = dest_node; | ||
1256 | ptask->speed = peer->speed; | ||
1257 | } | ||
1258 | |||
1259 | /* If this is an ARP packet, convert it */ | ||
1260 | if (proto == htons(ETH_P_ARP)) { | ||
1261 | struct arphdr *arp = (struct arphdr *)skb->data; | ||
1262 | unsigned char *arp_ptr = (unsigned char *)(arp + 1); | ||
1263 | struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data; | ||
1264 | __be32 ipaddr; | ||
1265 | |||
1266 | ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN)); | ||
1267 | |||
1268 | arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN; | ||
1269 | arp1394->max_rec = dev->card->max_receive; | ||
1270 | arp1394->sspd = dev->card->link_speed; | ||
1271 | |||
1272 | put_unaligned_be16(dev->local_fifo >> 32, | ||
1273 | &arp1394->fifo_hi); | ||
1274 | put_unaligned_be32(dev->local_fifo & 0xffffffff, | ||
1275 | &arp1394->fifo_lo); | ||
1276 | put_unaligned(ipaddr, &arp1394->sip); | ||
1277 | } | ||
1278 | |||
1279 | ptask->hdr.w0 = 0; | ||
1280 | ptask->hdr.w1 = 0; | ||
1281 | ptask->skb = skb; | ||
1282 | ptask->dev = dev; | ||
1283 | |||
1284 | /* Does it all fit in one packet? */ | ||
1285 | if (dg_size <= max_payload) { | ||
1286 | fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); | ||
1287 | ptask->outstanding_pkts = 1; | ||
1288 | max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; | ||
1289 | } else { | ||
1290 | u16 datagram_label; | ||
1291 | |||
1292 | max_payload -= RFC2374_FRAG_OVERHEAD; | ||
1293 | datagram_label = (*datagram_label_ptr)++; | ||
1294 | fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, | ||
1295 | datagram_label); | ||
1296 | ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); | ||
1297 | max_payload += RFC2374_FRAG_HDR_SIZE; | ||
1298 | } | ||
1299 | |||
1300 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1301 | |||
1302 | ptask->max_payload = max_payload; | ||
1303 | fwnet_send_packet(ptask); | ||
1304 | |||
1305 | return NETDEV_TX_OK; | ||
1306 | |||
1307 | fail_unlock: | ||
1308 | spin_unlock_irqrestore(&dev->lock, flags); | ||
1309 | fail: | ||
1310 | if (ptask) | ||
1311 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1312 | |||
1313 | if (skb != NULL) | ||
1314 | dev_kfree_skb(skb); | ||
1315 | |||
1316 | net->stats.tx_dropped++; | ||
1317 | net->stats.tx_errors++; | ||
1318 | |||
1319 | /* | ||
1320 | * FIXME: According to a patch from 2003-02-26, "returning non-zero | ||
1321 | * causes serious problems" here, allegedly. Before that patch, | ||
1322 | * -ERRNO was returned which is not appropriate under Linux 2.6. | ||
1323 | * Perhaps more needs to be done? Stop the queue in serious | ||
1324 | * conditions and restart it elsewhere? | ||
1325 | */ | ||
1326 | return NETDEV_TX_OK; | ||
1327 | } | ||
1328 | |||
1329 | static int fwnet_change_mtu(struct net_device *net, int new_mtu) | ||
1330 | { | ||
1331 | if (new_mtu < 68) | ||
1332 | return -EINVAL; | ||
1333 | |||
1334 | net->mtu = new_mtu; | ||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | static void fwnet_get_drvinfo(struct net_device *net, | ||
1339 | struct ethtool_drvinfo *info) | ||
1340 | { | ||
1341 | strcpy(info->driver, KBUILD_MODNAME); | ||
1342 | strcpy(info->bus_info, "ieee1394"); | ||
1343 | } | ||
1344 | |||
1345 | static struct ethtool_ops fwnet_ethtool_ops = { | ||
1346 | .get_drvinfo = fwnet_get_drvinfo, | ||
1347 | }; | ||
1348 | |||
1349 | static const struct net_device_ops fwnet_netdev_ops = { | ||
1350 | .ndo_open = fwnet_open, | ||
1351 | .ndo_stop = fwnet_stop, | ||
1352 | .ndo_start_xmit = fwnet_tx, | ||
1353 | .ndo_change_mtu = fwnet_change_mtu, | ||
1354 | }; | ||
1355 | |||
1356 | static void fwnet_init_dev(struct net_device *net) | ||
1357 | { | ||
1358 | net->header_ops = &fwnet_header_ops; | ||
1359 | net->netdev_ops = &fwnet_netdev_ops; | ||
1360 | net->watchdog_timeo = 2 * HZ; | ||
1361 | net->flags = IFF_BROADCAST | IFF_MULTICAST; | ||
1362 | net->features = NETIF_F_HIGHDMA; | ||
1363 | net->addr_len = FWNET_ALEN; | ||
1364 | net->hard_header_len = FWNET_HLEN; | ||
1365 | net->type = ARPHRD_IEEE1394; | ||
1366 | net->tx_queue_len = 10; | ||
1367 | SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops); | ||
1368 | } | ||
1369 | |||
1370 | /* caller must hold fwnet_device_mutex */ | ||
1371 | static struct fwnet_device *fwnet_dev_find(struct fw_card *card) | ||
1372 | { | ||
1373 | struct fwnet_device *dev; | ||
1374 | |||
1375 | list_for_each_entry(dev, &fwnet_device_list, dev_link) | ||
1376 | if (dev->card == card) | ||
1377 | return dev; | ||
1378 | |||
1379 | return NULL; | ||
1380 | } | ||
1381 | |||
1382 | static int fwnet_add_peer(struct fwnet_device *dev, | ||
1383 | struct fw_unit *unit, struct fw_device *device) | ||
1384 | { | ||
1385 | struct fwnet_peer *peer; | ||
1386 | |||
1387 | peer = kmalloc(sizeof(*peer), GFP_KERNEL); | ||
1388 | if (!peer) | ||
1389 | return -ENOMEM; | ||
1390 | |||
1391 | dev_set_drvdata(&unit->device, peer); | ||
1392 | |||
1393 | peer->dev = dev; | ||
1394 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | ||
1395 | peer->fifo = FWNET_NO_FIFO_ADDR; | ||
1396 | INIT_LIST_HEAD(&peer->pd_list); | ||
1397 | peer->pdg_size = 0; | ||
1398 | peer->datagram_label = 0; | ||
1399 | peer->speed = device->max_speed; | ||
1400 | peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); | ||
1401 | |||
1402 | peer->generation = device->generation; | ||
1403 | smp_rmb(); | ||
1404 | peer->node_id = device->node_id; | ||
1405 | |||
1406 | spin_lock_irq(&dev->lock); | ||
1407 | list_add_tail(&peer->peer_link, &dev->peer_list); | ||
1408 | spin_unlock_irq(&dev->lock); | ||
1409 | |||
1410 | return 0; | ||
1411 | } | ||
1412 | |||
1413 | static int fwnet_probe(struct device *_dev) | ||
1414 | { | ||
1415 | struct fw_unit *unit = fw_unit(_dev); | ||
1416 | struct fw_device *device = fw_parent_device(unit); | ||
1417 | struct fw_card *card = device->card; | ||
1418 | struct net_device *net; | ||
1419 | bool allocated_netdev = false; | ||
1420 | struct fwnet_device *dev; | ||
1421 | unsigned max_mtu; | ||
1422 | int ret; | ||
1423 | |||
1424 | mutex_lock(&fwnet_device_mutex); | ||
1425 | |||
1426 | dev = fwnet_dev_find(card); | ||
1427 | if (dev) { | ||
1428 | net = dev->netdev; | ||
1429 | goto have_dev; | ||
1430 | } | ||
1431 | |||
1432 | net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev); | ||
1433 | if (net == NULL) { | ||
1434 | ret = -ENOMEM; | ||
1435 | goto out; | ||
1436 | } | ||
1437 | |||
1438 | allocated_netdev = true; | ||
1439 | SET_NETDEV_DEV(net, card->device); | ||
1440 | dev = netdev_priv(net); | ||
1441 | |||
1442 | spin_lock_init(&dev->lock); | ||
1443 | dev->broadcast_state = FWNET_BROADCAST_ERROR; | ||
1444 | dev->broadcast_rcv_context = NULL; | ||
1445 | dev->broadcast_xmt_max_payload = 0; | ||
1446 | dev->broadcast_xmt_datagramlabel = 0; | ||
1447 | |||
1448 | dev->local_fifo = FWNET_NO_FIFO_ADDR; | ||
1449 | |||
1450 | INIT_LIST_HEAD(&dev->packet_list); | ||
1451 | INIT_LIST_HEAD(&dev->broadcasted_list); | ||
1452 | INIT_LIST_HEAD(&dev->sent_list); | ||
1453 | INIT_LIST_HEAD(&dev->peer_list); | ||
1454 | |||
1455 | dev->card = card; | ||
1456 | dev->netdev = net; | ||
1457 | |||
1458 | /* | ||
1459 | * Use the RFC 2734 default 1500 octets or the maximum payload | ||
1460 | * as initial MTU | ||
1461 | */ | ||
1462 | max_mtu = (1 << (card->max_receive + 1)) | ||
1463 | - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; | ||
1464 | net->mtu = min(1500U, max_mtu); | ||
1465 | |||
1466 | /* Set our hardware address while we're at it */ | ||
1467 | put_unaligned_be64(card->guid, net->dev_addr); | ||
1468 | put_unaligned_be64(~0ULL, net->broadcast); | ||
1469 | ret = register_netdev(net); | ||
1470 | if (ret) { | ||
1471 | fw_error("Cannot register the driver\n"); | ||
1472 | goto out; | ||
1473 | } | ||
1474 | |||
1475 | list_add_tail(&dev->dev_link, &fwnet_device_list); | ||
1476 | fw_notify("%s: IPv4 over FireWire on device %016llx\n", | ||
1477 | net->name, (unsigned long long)card->guid); | ||
1478 | have_dev: | ||
1479 | ret = fwnet_add_peer(dev, unit, device); | ||
1480 | if (ret && allocated_netdev) { | ||
1481 | unregister_netdev(net); | ||
1482 | list_del(&dev->dev_link); | ||
1483 | } | ||
1484 | out: | ||
1485 | if (ret && allocated_netdev) | ||
1486 | free_netdev(net); | ||
1487 | |||
1488 | mutex_unlock(&fwnet_device_mutex); | ||
1489 | |||
1490 | return ret; | ||
1491 | } | ||
1492 | |||
1493 | static void fwnet_remove_peer(struct fwnet_peer *peer) | ||
1494 | { | ||
1495 | struct fwnet_partial_datagram *pd, *pd_next; | ||
1496 | |||
1497 | spin_lock_irq(&peer->dev->lock); | ||
1498 | list_del(&peer->peer_link); | ||
1499 | spin_unlock_irq(&peer->dev->lock); | ||
1500 | |||
1501 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) | ||
1502 | fwnet_pd_delete(pd); | ||
1503 | |||
1504 | kfree(peer); | ||
1505 | } | ||
1506 | |||
1507 | static int fwnet_remove(struct device *_dev) | ||
1508 | { | ||
1509 | struct fwnet_peer *peer = dev_get_drvdata(_dev); | ||
1510 | struct fwnet_device *dev = peer->dev; | ||
1511 | struct net_device *net; | ||
1512 | struct fwnet_packet_task *ptask, *pt_next; | ||
1513 | |||
1514 | mutex_lock(&fwnet_device_mutex); | ||
1515 | |||
1516 | fwnet_remove_peer(peer); | ||
1517 | |||
1518 | if (list_empty(&dev->peer_list)) { | ||
1519 | net = dev->netdev; | ||
1520 | unregister_netdev(net); | ||
1521 | |||
1522 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) | ||
1523 | fw_core_remove_address_handler(&dev->handler); | ||
1524 | if (dev->broadcast_rcv_context) { | ||
1525 | fw_iso_context_stop(dev->broadcast_rcv_context); | ||
1526 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, | ||
1527 | dev->card); | ||
1528 | fw_iso_context_destroy(dev->broadcast_rcv_context); | ||
1529 | } | ||
1530 | list_for_each_entry_safe(ptask, pt_next, | ||
1531 | &dev->packet_list, pt_link) { | ||
1532 | dev_kfree_skb_any(ptask->skb); | ||
1533 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1534 | } | ||
1535 | list_for_each_entry_safe(ptask, pt_next, | ||
1536 | &dev->broadcasted_list, pt_link) { | ||
1537 | dev_kfree_skb_any(ptask->skb); | ||
1538 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1539 | } | ||
1540 | list_for_each_entry_safe(ptask, pt_next, | ||
1541 | &dev->sent_list, pt_link) { | ||
1542 | dev_kfree_skb_any(ptask->skb); | ||
1543 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
1544 | } | ||
1545 | list_del(&dev->dev_link); | ||
1546 | |||
1547 | free_netdev(net); | ||
1548 | } | ||
1549 | |||
1550 | mutex_unlock(&fwnet_device_mutex); | ||
1551 | |||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | /* | ||
1556 | * FIXME abort partially sent fragmented datagrams, | ||
1557 | * discard partially received fragmented datagrams | ||
1558 | */ | ||
1559 | static void fwnet_update(struct fw_unit *unit) | ||
1560 | { | ||
1561 | struct fw_device *device = fw_parent_device(unit); | ||
1562 | struct fwnet_peer *peer = dev_get_drvdata(&unit->device); | ||
1563 | int generation; | ||
1564 | |||
1565 | generation = device->generation; | ||
1566 | |||
1567 | spin_lock_irq(&peer->dev->lock); | ||
1568 | peer->node_id = device->node_id; | ||
1569 | peer->generation = generation; | ||
1570 | spin_unlock_irq(&peer->dev->lock); | ||
1571 | } | ||
1572 | |||
1573 | static const struct ieee1394_device_id fwnet_id_table[] = { | ||
1574 | { | ||
1575 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | ||
1576 | IEEE1394_MATCH_VERSION, | ||
1577 | .specifier_id = IANA_SPECIFIER_ID, | ||
1578 | .version = RFC2734_SW_VERSION, | ||
1579 | }, | ||
1580 | { } | ||
1581 | }; | ||
1582 | |||
1583 | static struct fw_driver fwnet_driver = { | ||
1584 | .driver = { | ||
1585 | .owner = THIS_MODULE, | ||
1586 | .name = "net", | ||
1587 | .bus = &fw_bus_type, | ||
1588 | .probe = fwnet_probe, | ||
1589 | .remove = fwnet_remove, | ||
1590 | }, | ||
1591 | .update = fwnet_update, | ||
1592 | .id_table = fwnet_id_table, | ||
1593 | }; | ||
1594 | |||
1595 | static const u32 rfc2374_unit_directory_data[] = { | ||
1596 | 0x00040000, /* directory_length */ | ||
1597 | 0x1200005e, /* unit_specifier_id: IANA */ | ||
1598 | 0x81000003, /* textual descriptor offset */ | ||
1599 | 0x13000001, /* unit_sw_version: RFC 2734 */ | ||
1600 | 0x81000005, /* textual descriptor offset */ | ||
1601 | 0x00030000, /* descriptor_length */ | ||
1602 | 0x00000000, /* text */ | ||
1603 | 0x00000000, /* minimal ASCII, en */ | ||
1604 | 0x49414e41, /* I A N A */ | ||
1605 | 0x00030000, /* descriptor_length */ | ||
1606 | 0x00000000, /* text */ | ||
1607 | 0x00000000, /* minimal ASCII, en */ | ||
1608 | 0x49507634, /* I P v 4 */ | ||
1609 | }; | ||
1610 | |||
1611 | static struct fw_descriptor rfc2374_unit_directory = { | ||
1612 | .length = ARRAY_SIZE(rfc2374_unit_directory_data), | ||
1613 | .key = (CSR_DIRECTORY | CSR_UNIT) << 24, | ||
1614 | .data = rfc2374_unit_directory_data | ||
1615 | }; | ||
1616 | |||
1617 | static int __init fwnet_init(void) | ||
1618 | { | ||
1619 | int err; | ||
1620 | |||
1621 | err = fw_core_add_descriptor(&rfc2374_unit_directory); | ||
1622 | if (err) | ||
1623 | return err; | ||
1624 | |||
1625 | fwnet_packet_task_cache = kmem_cache_create("packet_task", | ||
1626 | sizeof(struct fwnet_packet_task), 0, 0, NULL); | ||
1627 | if (!fwnet_packet_task_cache) { | ||
1628 | err = -ENOMEM; | ||
1629 | goto out; | ||
1630 | } | ||
1631 | |||
1632 | err = driver_register(&fwnet_driver.driver); | ||
1633 | if (!err) | ||
1634 | return 0; | ||
1635 | |||
1636 | kmem_cache_destroy(fwnet_packet_task_cache); | ||
1637 | out: | ||
1638 | fw_core_remove_descriptor(&rfc2374_unit_directory); | ||
1639 | |||
1640 | return err; | ||
1641 | } | ||
1642 | module_init(fwnet_init); | ||
1643 | |||
1644 | static void __exit fwnet_cleanup(void) | ||
1645 | { | ||
1646 | driver_unregister(&fwnet_driver.driver); | ||
1647 | kmem_cache_destroy(fwnet_packet_task_cache); | ||
1648 | fw_core_remove_descriptor(&rfc2374_unit_directory); | ||
1649 | } | ||
1650 | module_exit(fwnet_cleanup); | ||
1651 | |||
1652 | MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); | ||
1653 | MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734"); | ||
1654 | MODULE_LICENSE("GPL"); | ||
1655 | MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); | ||
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig index 95f45f9b8e5e..f102fcc7e52a 100644 --- a/drivers/ieee1394/Kconfig +++ b/drivers/ieee1394/Kconfig | |||
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support" | |||
4 | source "drivers/firewire/Kconfig" | 4 | source "drivers/firewire/Kconfig" |
5 | 5 | ||
6 | config IEEE1394 | 6 | config IEEE1394 |
7 | tristate "Stable FireWire stack" | 7 | tristate "Legacy alternative FireWire driver stack" |
8 | depends on PCI || BROKEN | 8 | depends on PCI || BROKEN |
9 | help | 9 | help |
10 | IEEE 1394 describes a high performance serial bus, which is also | 10 | IEEE 1394 describes a high performance serial bus, which is also |
@@ -33,11 +33,9 @@ config IEEE1394_OHCI1394 | |||
33 | module will be called ohci1394. | 33 | module will be called ohci1394. |
34 | 34 | ||
35 | NOTE: | 35 | NOTE: |
36 | 36 | If you want to install firewire-ohci and ohci1394 together, you | |
37 | You should only build either ohci1394 or the new firewire-ohci driver, | 37 | should configure them only as modules and blacklist the driver(s) |
38 | but not both. If you nevertheless want to install both, you should | 38 | which you don't want to have auto-loaded. Add either |
39 | configure them only as modules and blacklist the driver(s) which you | ||
40 | don't want to have auto-loaded. Add either | ||
41 | 39 | ||
42 | blacklist firewire-ohci | 40 | blacklist firewire-ohci |
43 | or | 41 | or |
@@ -46,12 +44,7 @@ config IEEE1394_OHCI1394 | |||
46 | blacklist dv1394 | 44 | blacklist dv1394 |
47 | 45 | ||
48 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf | 46 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf |
49 | depending on your distribution. The latter two modules should be | 47 | depending on your distribution. |
50 | blacklisted together with ohci1394 because they depend on ohci1394. | ||
51 | |||
52 | If you have an old modprobe which doesn't implement the blacklist | ||
53 | directive, use "install modulename /bin/true" for the modules to be | ||
54 | blacklisted. | ||
55 | 48 | ||
56 | comment "PCILynx controller requires I2C" | 49 | comment "PCILynx controller requires I2C" |
57 | depends on IEEE1394 && I2C=n | 50 | depends on IEEE1394 && I2C=n |
@@ -105,7 +98,7 @@ config IEEE1394_ETH1394_ROM_ENTRY | |||
105 | default n | 98 | default n |
106 | 99 | ||
107 | config IEEE1394_ETH1394 | 100 | config IEEE1394_ETH1394 |
108 | tristate "IP over 1394" | 101 | tristate "IP networking over 1394 (experimental)" |
109 | depends on IEEE1394 && EXPERIMENTAL && INET | 102 | depends on IEEE1394 && EXPERIMENTAL && INET |
110 | select IEEE1394_ETH1394_ROM_ENTRY | 103 | select IEEE1394_ETH1394_ROM_ENTRY |
111 | help | 104 | help |
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index e584b7215e8b..9823946adbc5 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/completion.h> | 4 | #include <linux/completion.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/dma-mapping.h> | ||
6 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
7 | #include <linux/kref.h> | 8 | #include <linux/kref.h> |
8 | #include <linux/list.h> | 9 | #include <linux/list.h> |
@@ -355,4 +356,90 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | |||
355 | int generation, int speed, unsigned long long offset, | 356 | int generation, int speed, unsigned long long offset, |
356 | void *payload, size_t length); | 357 | void *payload, size_t length); |
357 | 358 | ||
359 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) | ||
360 | { | ||
361 | return tag << 14 | channel << 8 | sy; | ||
362 | } | ||
363 | |||
364 | struct fw_descriptor { | ||
365 | struct list_head link; | ||
366 | size_t length; | ||
367 | u32 immediate; | ||
368 | u32 key; | ||
369 | const u32 *data; | ||
370 | }; | ||
371 | |||
372 | int fw_core_add_descriptor(struct fw_descriptor *desc); | ||
373 | void fw_core_remove_descriptor(struct fw_descriptor *desc); | ||
374 | |||
375 | /* | ||
376 | * The iso packet format allows for an immediate header/payload part | ||
377 | * stored in 'header' immediately after the packet info plus an | ||
378 | * indirect payload part that is pointer to by the 'payload' field. | ||
379 | * Applications can use one or the other or both to implement simple | ||
380 | * low-bandwidth streaming (e.g. audio) or more advanced | ||
381 | * scatter-gather streaming (e.g. assembling video frame automatically). | ||
382 | */ | ||
383 | struct fw_iso_packet { | ||
384 | u16 payload_length; /* Length of indirect payload. */ | ||
385 | u32 interrupt:1; /* Generate interrupt on this packet */ | ||
386 | u32 skip:1; /* Set to not send packet at all. */ | ||
387 | u32 tag:2; | ||
388 | u32 sy:4; | ||
389 | u32 header_length:8; /* Length of immediate header. */ | ||
390 | u32 header[0]; | ||
391 | }; | ||
392 | |||
393 | #define FW_ISO_CONTEXT_TRANSMIT 0 | ||
394 | #define FW_ISO_CONTEXT_RECEIVE 1 | ||
395 | |||
396 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | ||
397 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | ||
398 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 | ||
399 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 | ||
400 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 | ||
401 | |||
402 | /* | ||
403 | * An iso buffer is just a set of pages mapped for DMA in the | ||
404 | * specified direction. Since the pages are to be used for DMA, they | ||
405 | * are not mapped into the kernel virtual address space. We store the | ||
406 | * DMA address in the page private. The helper function | ||
407 | * fw_iso_buffer_map() will map the pages into a given vma. | ||
408 | */ | ||
409 | struct fw_iso_buffer { | ||
410 | enum dma_data_direction direction; | ||
411 | struct page **pages; | ||
412 | int page_count; | ||
413 | }; | ||
414 | |||
415 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | ||
416 | int page_count, enum dma_data_direction direction); | ||
417 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | ||
418 | |||
419 | struct fw_iso_context; | ||
420 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | ||
421 | u32 cycle, size_t header_length, | ||
422 | void *header, void *data); | ||
423 | struct fw_iso_context { | ||
424 | struct fw_card *card; | ||
425 | int type; | ||
426 | int channel; | ||
427 | int speed; | ||
428 | size_t header_size; | ||
429 | fw_iso_callback_t callback; | ||
430 | void *callback_data; | ||
431 | }; | ||
432 | |||
433 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | ||
434 | int type, int channel, int speed, size_t header_size, | ||
435 | fw_iso_callback_t callback, void *callback_data); | ||
436 | int fw_iso_context_queue(struct fw_iso_context *ctx, | ||
437 | struct fw_iso_packet *packet, | ||
438 | struct fw_iso_buffer *buffer, | ||
439 | unsigned long payload); | ||
440 | int fw_iso_context_start(struct fw_iso_context *ctx, | ||
441 | int cycle, int sync, int tags); | ||
442 | int fw_iso_context_stop(struct fw_iso_context *ctx); | ||
443 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | ||
444 | |||
358 | #endif /* _LINUX_FIREWIRE_H */ | 445 | #endif /* _LINUX_FIREWIRE_H */ |