diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-02-21 11:57:05 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-02-24 14:36:55 -0500 |
commit | 6498ba04aee69540f8f586438f90d58e5b8e6936 (patch) | |
tree | 07f085b966af88265cbf36f30464f6f3943ecd77 /drivers/firewire/ohci.c | |
parent | 64582298b9c29535188380f488873e7d2196a2eb (diff) |
firewire: ohci: remove unused dualbuffer IR code
This code was no longer used since 2.6.33, "firewire: ohci: always use
packet-per-buffer mode for isochronous reception" commit 090699c0. If
anybody needs this code in the future for special purposes, it can be
brought back in. But it must not be re-enabled by default; drivers
(kernelspace or userspace drivers) should only get this mode if they
explicitly request it.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r-- | drivers/firewire/ohci.c | 184 |
1 files changed, 1 insertions, 183 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 0f7c4bb978e7..047331e59b31 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -72,20 +72,6 @@ struct descriptor { | |||
72 | __le16 transfer_status; | 72 | __le16 transfer_status; |
73 | } __attribute__((aligned(16))); | 73 | } __attribute__((aligned(16))); |
74 | 74 | ||
75 | struct db_descriptor { | ||
76 | __le16 first_size; | ||
77 | __le16 control; | ||
78 | __le16 second_req_count; | ||
79 | __le16 first_req_count; | ||
80 | __le32 branch_address; | ||
81 | __le16 second_res_count; | ||
82 | __le16 first_res_count; | ||
83 | __le32 reserved0; | ||
84 | __le32 first_buffer; | ||
85 | __le32 second_buffer; | ||
86 | __le32 reserved1; | ||
87 | } __attribute__((aligned(16))); | ||
88 | |||
89 | #define CONTROL_SET(regs) (regs) | 75 | #define CONTROL_SET(regs) (regs) |
90 | #define CONTROL_CLEAR(regs) ((regs) + 4) | 76 | #define CONTROL_CLEAR(regs) ((regs) + 4) |
91 | #define COMMAND_PTR(regs) ((regs) + 12) | 77 | #define COMMAND_PTR(regs) ((regs) + 12) |
@@ -187,7 +173,6 @@ struct fw_ohci { | |||
187 | int generation; | 173 | int generation; |
188 | int request_generation; /* for timestamping incoming requests */ | 174 | int request_generation; /* for timestamping incoming requests */ |
189 | 175 | ||
190 | bool use_dualbuffer; | ||
191 | bool old_uninorth; | 176 | bool old_uninorth; |
192 | bool bus_reset_packet_quirk; | 177 | bool bus_reset_packet_quirk; |
193 | bool iso_cycle_timer_quirk; | 178 | bool iso_cycle_timer_quirk; |
@@ -1863,52 +1848,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p) | |||
1863 | ctx->header_length += ctx->base.header_size; | 1848 | ctx->header_length += ctx->base.header_size; |
1864 | } | 1849 | } |
1865 | 1850 | ||
1866 | static int handle_ir_dualbuffer_packet(struct context *context, | ||
1867 | struct descriptor *d, | ||
1868 | struct descriptor *last) | ||
1869 | { | ||
1870 | struct iso_context *ctx = | ||
1871 | container_of(context, struct iso_context, context); | ||
1872 | struct db_descriptor *db = (struct db_descriptor *) d; | ||
1873 | __le32 *ir_header; | ||
1874 | size_t header_length; | ||
1875 | void *p, *end; | ||
1876 | |||
1877 | if (db->first_res_count != 0 && db->second_res_count != 0) { | ||
1878 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { | ||
1879 | /* This descriptor isn't done yet, stop iteration. */ | ||
1880 | return 0; | ||
1881 | } | ||
1882 | ctx->excess_bytes -= le16_to_cpu(db->second_req_count); | ||
1883 | } | ||
1884 | |||
1885 | header_length = le16_to_cpu(db->first_req_count) - | ||
1886 | le16_to_cpu(db->first_res_count); | ||
1887 | |||
1888 | p = db + 1; | ||
1889 | end = p + header_length; | ||
1890 | while (p < end) { | ||
1891 | copy_iso_headers(ctx, p); | ||
1892 | ctx->excess_bytes += | ||
1893 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; | ||
1894 | p += max(ctx->base.header_size, (size_t)8); | ||
1895 | } | ||
1896 | |||
1897 | ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - | ||
1898 | le16_to_cpu(db->second_res_count); | ||
1899 | |||
1900 | if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) { | ||
1901 | ir_header = (__le32 *) (db + 1); | ||
1902 | ctx->base.callback(&ctx->base, | ||
1903 | le32_to_cpu(ir_header[0]) & 0xffff, | ||
1904 | ctx->header_length, ctx->header, | ||
1905 | ctx->base.callback_data); | ||
1906 | ctx->header_length = 0; | ||
1907 | } | ||
1908 | |||
1909 | return 1; | ||
1910 | } | ||
1911 | |||
1912 | static int handle_ir_packet_per_buffer(struct context *context, | 1851 | static int handle_ir_packet_per_buffer(struct context *context, |
1913 | struct descriptor *d, | 1852 | struct descriptor *d, |
1914 | struct descriptor *last) | 1853 | struct descriptor *last) |
@@ -1995,10 +1934,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | |||
1995 | channels = &ohci->ir_context_channels; | 1934 | channels = &ohci->ir_context_channels; |
1996 | mask = &ohci->ir_context_mask; | 1935 | mask = &ohci->ir_context_mask; |
1997 | list = ohci->ir_context_list; | 1936 | list = ohci->ir_context_list; |
1998 | if (ohci->use_dualbuffer) | 1937 | callback = handle_ir_packet_per_buffer; |
1999 | callback = handle_ir_dualbuffer_packet; | ||
2000 | else | ||
2001 | callback = handle_ir_packet_per_buffer; | ||
2002 | } | 1938 | } |
2003 | 1939 | ||
2004 | spin_lock_irqsave(&ohci->lock, flags); | 1940 | spin_lock_irqsave(&ohci->lock, flags); |
@@ -2061,8 +1997,6 @@ static int ohci_start_iso(struct fw_iso_context *base, | |||
2061 | } else { | 1997 | } else { |
2062 | index = ctx - ohci->ir_context_list; | 1998 | index = ctx - ohci->ir_context_list; |
2063 | control = IR_CONTEXT_ISOCH_HEADER; | 1999 | control = IR_CONTEXT_ISOCH_HEADER; |
2064 | if (ohci->use_dualbuffer) | ||
2065 | control |= IR_CONTEXT_DUAL_BUFFER_MODE; | ||
2066 | match = (tags << 28) | (sync << 8) | ctx->base.channel; | 2000 | match = (tags << 28) | (sync << 8) | ctx->base.channel; |
2067 | if (cycle >= 0) { | 2001 | if (cycle >= 0) { |
2068 | match |= (cycle & 0x07fff) << 12; | 2002 | match |= (cycle & 0x07fff) << 12; |
@@ -2223,92 +2157,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2223 | return 0; | 2157 | return 0; |
2224 | } | 2158 | } |
2225 | 2159 | ||
2226 | static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | ||
2227 | struct fw_iso_packet *packet, | ||
2228 | struct fw_iso_buffer *buffer, | ||
2229 | unsigned long payload) | ||
2230 | { | ||
2231 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
2232 | struct db_descriptor *db = NULL; | ||
2233 | struct descriptor *d; | ||
2234 | struct fw_iso_packet *p; | ||
2235 | dma_addr_t d_bus, page_bus; | ||
2236 | u32 z, header_z, length, rest; | ||
2237 | int page, offset, packet_count, header_size; | ||
2238 | |||
2239 | /* | ||
2240 | * FIXME: Cycle lost behavior should be configurable: lose | ||
2241 | * packet, retransmit or terminate.. | ||
2242 | */ | ||
2243 | |||
2244 | p = packet; | ||
2245 | z = 2; | ||
2246 | |||
2247 | /* | ||
2248 | * The OHCI controller puts the isochronous header and trailer in the | ||
2249 | * buffer, so we need at least 8 bytes. | ||
2250 | */ | ||
2251 | packet_count = p->header_length / ctx->base.header_size; | ||
2252 | header_size = packet_count * max(ctx->base.header_size, (size_t)8); | ||
2253 | |||
2254 | /* Get header size in number of descriptors. */ | ||
2255 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | ||
2256 | page = payload >> PAGE_SHIFT; | ||
2257 | offset = payload & ~PAGE_MASK; | ||
2258 | rest = p->payload_length; | ||
2259 | /* | ||
2260 | * The controllers I've tested have not worked correctly when | ||
2261 | * second_req_count is zero. Rather than do something we know won't | ||
2262 | * work, return an error | ||
2263 | */ | ||
2264 | if (rest == 0) | ||
2265 | return -EINVAL; | ||
2266 | |||
2267 | while (rest > 0) { | ||
2268 | d = context_get_descriptors(&ctx->context, | ||
2269 | z + header_z, &d_bus); | ||
2270 | if (d == NULL) | ||
2271 | return -ENOMEM; | ||
2272 | |||
2273 | db = (struct db_descriptor *) d; | ||
2274 | db->control = cpu_to_le16(DESCRIPTOR_STATUS | | ||
2275 | DESCRIPTOR_BRANCH_ALWAYS); | ||
2276 | db->first_size = | ||
2277 | cpu_to_le16(max(ctx->base.header_size, (size_t)8)); | ||
2278 | if (p->skip && rest == p->payload_length) { | ||
2279 | db->control |= cpu_to_le16(DESCRIPTOR_WAIT); | ||
2280 | db->first_req_count = db->first_size; | ||
2281 | } else { | ||
2282 | db->first_req_count = cpu_to_le16(header_size); | ||
2283 | } | ||
2284 | db->first_res_count = db->first_req_count; | ||
2285 | db->first_buffer = cpu_to_le32(d_bus + sizeof(*db)); | ||
2286 | |||
2287 | if (p->skip && rest == p->payload_length) | ||
2288 | length = 4; | ||
2289 | else if (offset + rest < PAGE_SIZE) | ||
2290 | length = rest; | ||
2291 | else | ||
2292 | length = PAGE_SIZE - offset; | ||
2293 | |||
2294 | db->second_req_count = cpu_to_le16(length); | ||
2295 | db->second_res_count = db->second_req_count; | ||
2296 | page_bus = page_private(buffer->pages[page]); | ||
2297 | db->second_buffer = cpu_to_le32(page_bus + offset); | ||
2298 | |||
2299 | if (p->interrupt && length == rest) | ||
2300 | db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); | ||
2301 | |||
2302 | context_append(&ctx->context, d, z, header_z); | ||
2303 | offset = (offset + length) & ~PAGE_MASK; | ||
2304 | rest -= length; | ||
2305 | if (offset == 0) | ||
2306 | page++; | ||
2307 | } | ||
2308 | |||
2309 | return 0; | ||
2310 | } | ||
2311 | |||
2312 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | 2160 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, |
2313 | struct fw_iso_packet *packet, | 2161 | struct fw_iso_packet *packet, |
2314 | struct fw_iso_buffer *buffer, | 2162 | struct fw_iso_buffer *buffer, |
@@ -2399,9 +2247,6 @@ static int ohci_queue_iso(struct fw_iso_context *base, | |||
2399 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 2247 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2400 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 2248 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) |
2401 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); | 2249 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); |
2402 | else if (ctx->context.ohci->use_dualbuffer) | ||
2403 | ret = ohci_queue_iso_receive_dualbuffer(base, packet, | ||
2404 | buffer, payload); | ||
2405 | else | 2250 | else |
2406 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, | 2251 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, |
2407 | buffer, payload); | 2252 | buffer, payload); |
@@ -2456,10 +2301,6 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2456 | #define ohci_pmac_off(dev) | 2301 | #define ohci_pmac_off(dev) |
2457 | #endif /* CONFIG_PPC_PMAC */ | 2302 | #endif /* CONFIG_PPC_PMAC */ |
2458 | 2303 | ||
2459 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT | ||
2460 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | ||
2461 | #define PCI_DEVICE_ID_TI_TSB43AB23 0x8024 | ||
2462 | |||
2463 | static int __devinit pci_probe(struct pci_dev *dev, | 2304 | static int __devinit pci_probe(struct pci_dev *dev, |
2464 | const struct pci_device_id *ent) | 2305 | const struct pci_device_id *ent) |
2465 | { | 2306 | { |
@@ -2508,29 +2349,6 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2508 | } | 2349 | } |
2509 | 2350 | ||
2510 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 2351 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
2511 | #if 0 | ||
2512 | /* FIXME: make it a context option or remove dual-buffer mode */ | ||
2513 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; | ||
2514 | #endif | ||
2515 | |||
2516 | /* dual-buffer mode is broken if more than one IR context is active */ | ||
2517 | if (dev->vendor == PCI_VENDOR_ID_AGERE && | ||
2518 | dev->device == PCI_DEVICE_ID_AGERE_FW643) | ||
2519 | ohci->use_dualbuffer = false; | ||
2520 | |||
2521 | /* dual-buffer mode is broken */ | ||
2522 | if (dev->vendor == PCI_VENDOR_ID_RICOH && | ||
2523 | dev->device == PCI_DEVICE_ID_RICOH_R5C832) | ||
2524 | ohci->use_dualbuffer = false; | ||
2525 | |||
2526 | /* x86-32 currently doesn't use highmem for dma_alloc_coherent */ | ||
2527 | #if !defined(CONFIG_X86_32) | ||
2528 | /* dual-buffer mode is broken with descriptor addresses above 2G */ | ||
2529 | if (dev->vendor == PCI_VENDOR_ID_TI && | ||
2530 | (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 || | ||
2531 | dev->device == PCI_DEVICE_ID_TI_TSB43AB23)) | ||
2532 | ohci->use_dualbuffer = false; | ||
2533 | #endif | ||
2534 | 2352 | ||
2535 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | 2353 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) |
2536 | ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && | 2354 | ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && |