aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/ohci.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/firewire/ohci.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r--drivers/firewire/ohci.c437
1 files changed, 176 insertions, 261 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 94260aa76aa3..94b16e0340ae 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -24,7 +24,6 @@
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/firewire.h> 25#include <linux/firewire.h>
26#include <linux/firewire-constants.h> 26#include <linux/firewire-constants.h>
27#include <linux/gfp.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
30#include <linux/io.h> 29#include <linux/io.h>
@@ -35,10 +34,10 @@
35#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
36#include <linux/pci.h> 35#include <linux/pci.h>
37#include <linux/pci_ids.h> 36#include <linux/pci_ids.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/string.h> 39#include <linux/string.h>
40 40
41#include <asm/atomic.h>
42#include <asm/byteorder.h> 41#include <asm/byteorder.h>
43#include <asm/page.h> 42#include <asm/page.h>
44#include <asm/system.h> 43#include <asm/system.h>
@@ -73,20 +72,6 @@ struct descriptor {
73 __le16 transfer_status; 72 __le16 transfer_status;
74} __attribute__((aligned(16))); 73} __attribute__((aligned(16)));
75 74
76struct db_descriptor {
77 __le16 first_size;
78 __le16 control;
79 __le16 second_req_count;
80 __le16 first_req_count;
81 __le32 branch_address;
82 __le16 second_res_count;
83 __le16 first_res_count;
84 __le32 reserved0;
85 __le32 first_buffer;
86 __le32 second_buffer;
87 __le32 reserved1;
88} __attribute__((aligned(16)));
89
90#define CONTROL_SET(regs) (regs) 75#define CONTROL_SET(regs) (regs)
91#define CONTROL_CLEAR(regs) ((regs) + 4) 76#define CONTROL_CLEAR(regs) ((regs) + 4)
92#define COMMAND_PTR(regs) ((regs) + 12) 77#define COMMAND_PTR(regs) ((regs) + 12)
@@ -181,31 +166,16 @@ struct fw_ohci {
181 struct fw_card card; 166 struct fw_card card;
182 167
183 __iomem char *registers; 168 __iomem char *registers;
184 dma_addr_t self_id_bus;
185 __le32 *self_id_cpu;
186 struct tasklet_struct bus_reset_tasklet;
187 int node_id; 169 int node_id;
188 int generation; 170 int generation;
189 int request_generation; /* for timestamping incoming requests */ 171 int request_generation; /* for timestamping incoming requests */
190 atomic_t bus_seconds; 172 unsigned quirks;
191
192 bool use_dualbuffer;
193 bool old_uninorth;
194 bool bus_reset_packet_quirk;
195 173
196 /* 174 /*
197 * Spinlock for accessing fw_ohci data. Never call out of 175 * Spinlock for accessing fw_ohci data. Never call out of
198 * this driver with this lock held. 176 * this driver with this lock held.
199 */ 177 */
200 spinlock_t lock; 178 spinlock_t lock;
201 u32 self_id_buffer[512];
202
203 /* Config rom buffers */
204 __be32 *config_rom;
205 dma_addr_t config_rom_bus;
206 __be32 *next_config_rom;
207 dma_addr_t next_config_rom_bus;
208 u32 next_header;
209 179
210 struct ar_context ar_request_ctx; 180 struct ar_context ar_request_ctx;
211 struct ar_context ar_response_ctx; 181 struct ar_context ar_response_ctx;
@@ -217,6 +187,18 @@ struct fw_ohci {
217 u64 ir_context_channels; 187 u64 ir_context_channels;
218 u32 ir_context_mask; 188 u32 ir_context_mask;
219 struct iso_context *ir_context_list; 189 struct iso_context *ir_context_list;
190
191 __be32 *config_rom;
192 dma_addr_t config_rom_bus;
193 __be32 *next_config_rom;
194 dma_addr_t next_config_rom_bus;
195 __be32 next_header;
196
197 __le32 *self_id_cpu;
198 dma_addr_t self_id_bus;
199 struct tasklet_struct bus_reset_tasklet;
200
201 u32 self_id_buffer[512];
220}; 202};
221 203
222static inline struct fw_ohci *fw_ohci(struct fw_card *card) 204static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@@ -249,6 +231,34 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
249 231
250static char ohci_driver_name[] = KBUILD_MODNAME; 232static char ohci_driver_name[] = KBUILD_MODNAME;
251 233
234#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
235
236#define QUIRK_CYCLE_TIMER 1
237#define QUIRK_RESET_PACKET 2
238#define QUIRK_BE_HEADERS 4
239
240/* In case of multiple matches in ohci_quirks[], only the first one is used. */
241static const struct {
242 unsigned short vendor, device, flags;
243} ohci_quirks[] = {
244 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
245 QUIRK_RESET_PACKET},
246 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
247 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
248 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
249 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
250 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
251};
252
253/* This overrides anything that was found in ohci_quirks[]. */
254static int param_quirks;
255module_param_named(quirks, param_quirks, int, 0644);
256MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
257 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
258 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
259 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
260 ")");
261
252#ifdef CONFIG_FIREWIRE_OHCI_DEBUG 262#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
253 263
254#define OHCI_PARAM_DEBUG_AT_AR 1 264#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -275,7 +285,7 @@ static void log_irqs(u32 evt)
275 !(evt & OHCI1394_busReset)) 285 !(evt & OHCI1394_busReset))
276 return; 286 return;
277 287
278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 288 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
279 evt & OHCI1394_selfIDComplete ? " selfID" : "", 289 evt & OHCI1394_selfIDComplete ? " selfID" : "",
280 evt & OHCI1394_RQPkt ? " AR_req" : "", 290 evt & OHCI1394_RQPkt ? " AR_req" : "",
281 evt & OHCI1394_RSPkt ? " AR_resp" : "", 291 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -285,7 +295,6 @@ static void log_irqs(u32 evt)
285 evt & OHCI1394_isochTx ? " IT" : "", 295 evt & OHCI1394_isochTx ? " IT" : "",
286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 296 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 297 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
289 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 298 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
290 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 299 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
291 evt & OHCI1394_busReset ? " busReset" : "", 300 evt & OHCI1394_busReset ? " busReset" : "",
@@ -293,8 +302,7 @@ static void log_irqs(u32 evt)
293 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 302 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
294 OHCI1394_respTxComplete | OHCI1394_isochRx | 303 OHCI1394_respTxComplete | OHCI1394_isochRx |
295 OHCI1394_isochTx | OHCI1394_postedWriteErr | 304 OHCI1394_isochTx | OHCI1394_postedWriteErr |
296 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 305 OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
297 OHCI1394_cycleInconsistent |
298 OHCI1394_regAccessFail | OHCI1394_busReset) 306 OHCI1394_regAccessFail | OHCI1394_busReset)
299 ? " ?" : ""); 307 ? " ?" : "");
300} 308}
@@ -524,7 +532,7 @@ static void ar_context_release(struct ar_context *ctx)
524 532
525#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 533#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
526#define cond_le32_to_cpu(v) \ 534#define cond_le32_to_cpu(v) \
527 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) 535 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
528#else 536#else
529#define cond_le32_to_cpu(v) le32_to_cpu(v) 537#define cond_le32_to_cpu(v) le32_to_cpu(v)
530#endif 538#endif
@@ -605,7 +613,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
605 * at a slightly incorrect time (in bus_reset_tasklet). 613 * at a slightly incorrect time (in bus_reset_tasklet).
606 */ 614 */
607 if (evt == OHCI1394_evt_bus_reset) { 615 if (evt == OHCI1394_evt_bus_reset) {
608 if (!ohci->bus_reset_packet_quirk) 616 if (!(ohci->quirks & QUIRK_RESET_PACKET))
609 ohci->request_generation = (p.header[2] >> 16) & 0xff; 617 ohci->request_generation = (p.header[2] >> 16) & 0xff;
610 } else if (ctx == &ohci->ar_request_ctx) { 618 } else if (ctx == &ohci->ar_request_ctx) {
611 fw_core_handle_request(&ohci->card, &p); 619 fw_core_handle_request(&ohci->card, &p);
@@ -997,7 +1005,8 @@ static int at_context_queue_packet(struct context *ctx,
997 packet->ack = RCODE_SEND_ERROR; 1005 packet->ack = RCODE_SEND_ERROR;
998 return -1; 1006 return -1;
999 } 1007 }
1000 packet->payload_bus = payload_bus; 1008 packet->payload_bus = payload_bus;
1009 packet->payload_mapped = true;
1001 1010
1002 d[2].req_count = cpu_to_le16(packet->payload_length); 1011 d[2].req_count = cpu_to_le16(packet->payload_length);
1003 d[2].data_address = cpu_to_le32(payload_bus); 1012 d[2].data_address = cpu_to_le32(payload_bus);
@@ -1025,7 +1034,7 @@ static int at_context_queue_packet(struct context *ctx,
1025 */ 1034 */
1026 if (ohci->generation != packet->generation || 1035 if (ohci->generation != packet->generation ||
1027 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) { 1036 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1028 if (packet->payload_length > 0) 1037 if (packet->payload_mapped)
1029 dma_unmap_single(ohci->card.device, payload_bus, 1038 dma_unmap_single(ohci->card.device, payload_bus,
1030 packet->payload_length, DMA_TO_DEVICE); 1039 packet->payload_length, DMA_TO_DEVICE);
1031 packet->ack = RCODE_GENERATION; 1040 packet->ack = RCODE_GENERATION;
@@ -1061,7 +1070,7 @@ static int handle_at_packet(struct context *context,
1061 /* This packet was cancelled, just continue. */ 1070 /* This packet was cancelled, just continue. */
1062 return 1; 1071 return 1;
1063 1072
1064 if (packet->payload_bus) 1073 if (packet->payload_mapped)
1065 dma_unmap_single(ohci->card.device, packet->payload_bus, 1074 dma_unmap_single(ohci->card.device, packet->payload_bus,
1066 packet->payload_length, DMA_TO_DEVICE); 1075 packet->payload_length, DMA_TO_DEVICE);
1067 1076
@@ -1149,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
1149 struct fw_packet *packet, u32 csr) 1158 struct fw_packet *packet, u32 csr)
1150{ 1159{
1151 struct fw_packet response; 1160 struct fw_packet response;
1152 int tcode, length, ext_tcode, sel; 1161 int tcode, length, ext_tcode, sel, try;
1153 __be32 *payload, lock_old; 1162 __be32 *payload, lock_old;
1154 u32 lock_arg, lock_data; 1163 u32 lock_arg, lock_data;
1155 1164
@@ -1176,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci,
1176 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1185 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1177 reg_write(ohci, OHCI1394_CSRControl, sel); 1186 reg_write(ohci, OHCI1394_CSRControl, sel);
1178 1187
1179 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) 1188 for (try = 0; try < 20; try++)
1180 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); 1189 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1181 else 1190 lock_old = cpu_to_be32(reg_read(ohci,
1182 fw_notify("swap not done yet\n"); 1191 OHCI1394_CSRData));
1192 fw_fill_response(&response, packet->header,
1193 RCODE_COMPLETE,
1194 &lock_old, sizeof(lock_old));
1195 goto out;
1196 }
1197
1198 fw_error("swap not done (CSR lock timeout)\n");
1199 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1183 1200
1184 fw_fill_response(&response, packet->header,
1185 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1186 out: 1201 out:
1187 fw_core_handle_response(&ohci->card, &response); 1202 fw_core_handle_response(&ohci->card, &response);
1188} 1203}
1189 1204
1190static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1205static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1191{ 1206{
1192 u64 offset; 1207 u64 offset, csr;
1193 u32 csr;
1194 1208
1195 if (ctx == &ctx->ohci->at_request_ctx) { 1209 if (ctx == &ctx->ohci->at_request_ctx) {
1196 packet->ack = ACK_PENDING; 1210 packet->ack = ACK_PENDING;
@@ -1328,7 +1342,7 @@ static void bus_reset_tasklet(unsigned long data)
1328 context_stop(&ohci->at_response_ctx); 1342 context_stop(&ohci->at_response_ctx);
1329 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1343 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1330 1344
1331 if (ohci->bus_reset_packet_quirk) 1345 if (ohci->quirks & QUIRK_RESET_PACKET)
1332 ohci->request_generation = generation; 1346 ohci->request_generation = generation;
1333 1347
1334 /* 1348 /*
@@ -1357,8 +1371,9 @@ static void bus_reset_tasklet(unsigned long data)
1357 */ 1371 */
1358 reg_write(ohci, OHCI1394_BusOptions, 1372 reg_write(ohci, OHCI1394_BusOptions,
1359 be32_to_cpu(ohci->config_rom[2])); 1373 be32_to_cpu(ohci->config_rom[2]));
1360 ohci->config_rom[0] = cpu_to_be32(ohci->next_header); 1374 ohci->config_rom[0] = ohci->next_header;
1361 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header); 1375 reg_write(ohci, OHCI1394_ConfigROMhdr,
1376 be32_to_cpu(ohci->next_header));
1362 } 1377 }
1363 1378
1364#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1379#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
@@ -1382,7 +1397,7 @@ static void bus_reset_tasklet(unsigned long data)
1382static irqreturn_t irq_handler(int irq, void *data) 1397static irqreturn_t irq_handler(int irq, void *data)
1383{ 1398{
1384 struct fw_ohci *ohci = data; 1399 struct fw_ohci *ohci = data;
1385 u32 event, iso_event, cycle_time; 1400 u32 event, iso_event;
1386 int i; 1401 int i;
1387 1402
1388 event = reg_read(ohci, OHCI1394_IntEventClear); 1403 event = reg_read(ohci, OHCI1394_IntEventClear);
@@ -1452,12 +1467,6 @@ static irqreturn_t irq_handler(int irq, void *data)
1452 fw_notify("isochronous cycle inconsistent\n"); 1467 fw_notify("isochronous cycle inconsistent\n");
1453 } 1468 }
1454 1469
1455 if (event & OHCI1394_cycle64Seconds) {
1456 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1457 if ((cycle_time & 0x80000000) == 0)
1458 atomic_inc(&ohci->bus_seconds);
1459 }
1460
1461 return IRQ_HANDLED; 1470 return IRQ_HANDLED;
1462} 1471}
1463 1472
@@ -1477,7 +1486,17 @@ static int software_reset(struct fw_ohci *ohci)
1477 return -EBUSY; 1486 return -EBUSY;
1478} 1487}
1479 1488
1480static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) 1489static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1490{
1491 size_t size = length * 4;
1492
1493 memcpy(dest, src, size);
1494 if (size < CONFIG_ROM_SIZE)
1495 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1496}
1497
1498static int ohci_enable(struct fw_card *card,
1499 const __be32 *config_rom, size_t length)
1481{ 1500{
1482 struct fw_ohci *ohci = fw_ohci(card); 1501 struct fw_ohci *ohci = fw_ohci(card);
1483 struct pci_dev *dev = to_pci_dev(card->device); 1502 struct pci_dev *dev = to_pci_dev(card->device);
@@ -1541,8 +1560,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1541 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1560 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1542 OHCI1394_isochRx | OHCI1394_isochTx | 1561 OHCI1394_isochRx | OHCI1394_isochTx |
1543 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1562 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1544 OHCI1394_cycleInconsistent | 1563 OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
1545 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1546 OHCI1394_masterIntEnable); 1564 OHCI1394_masterIntEnable);
1547 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1565 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1548 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1566 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@@ -1579,8 +1597,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1579 if (ohci->next_config_rom == NULL) 1597 if (ohci->next_config_rom == NULL)
1580 return -ENOMEM; 1598 return -ENOMEM;
1581 1599
1582 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); 1600 copy_config_rom(ohci->next_config_rom, config_rom, length);
1583 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1584 } else { 1601 } else {
1585 /* 1602 /*
1586 * In the suspend case, config_rom is NULL, which 1603 * In the suspend case, config_rom is NULL, which
@@ -1590,7 +1607,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1590 ohci->next_config_rom_bus = ohci->config_rom_bus; 1607 ohci->next_config_rom_bus = ohci->config_rom_bus;
1591 } 1608 }
1592 1609
1593 ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]); 1610 ohci->next_header = ohci->next_config_rom[0];
1594 ohci->next_config_rom[0] = 0; 1611 ohci->next_config_rom[0] = 0;
1595 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 1612 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1596 reg_write(ohci, OHCI1394_BusOptions, 1613 reg_write(ohci, OHCI1394_BusOptions,
@@ -1624,7 +1641,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1624} 1641}
1625 1642
1626static int ohci_set_config_rom(struct fw_card *card, 1643static int ohci_set_config_rom(struct fw_card *card,
1627 u32 *config_rom, size_t length) 1644 const __be32 *config_rom, size_t length)
1628{ 1645{
1629 struct fw_ohci *ohci; 1646 struct fw_ohci *ohci;
1630 unsigned long flags; 1647 unsigned long flags;
@@ -1673,9 +1690,7 @@ static int ohci_set_config_rom(struct fw_card *card,
1673 ohci->next_config_rom = next_config_rom; 1690 ohci->next_config_rom = next_config_rom;
1674 ohci->next_config_rom_bus = next_config_rom_bus; 1691 ohci->next_config_rom_bus = next_config_rom_bus;
1675 1692
1676 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); 1693 copy_config_rom(ohci->next_config_rom, config_rom, length);
1677 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1678 length * 4);
1679 1694
1680 ohci->next_header = config_rom[0]; 1695 ohci->next_header = config_rom[0];
1681 ohci->next_config_rom[0] = 0; 1696 ohci->next_config_rom[0] = 0;
@@ -1729,7 +1744,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1729 if (packet->ack != 0) 1744 if (packet->ack != 0)
1730 goto out; 1745 goto out;
1731 1746
1732 if (packet->payload_bus) 1747 if (packet->payload_mapped)
1733 dma_unmap_single(ohci->card.device, packet->payload_bus, 1748 dma_unmap_single(ohci->card.device, packet->payload_bus,
1734 packet->payload_length, DMA_TO_DEVICE); 1749 packet->payload_length, DMA_TO_DEVICE);
1735 1750
@@ -1785,16 +1800,61 @@ static int ohci_enable_phys_dma(struct fw_card *card,
1785#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1800#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1786} 1801}
1787 1802
1788static u64 ohci_get_bus_time(struct fw_card *card) 1803static u32 cycle_timer_ticks(u32 cycle_timer)
1789{ 1804{
1790 struct fw_ohci *ohci = fw_ohci(card); 1805 u32 ticks;
1791 u32 cycle_time;
1792 u64 bus_time;
1793 1806
1794 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1807 ticks = cycle_timer & 0xfff;
1795 bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time; 1808 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1809 ticks += (3072 * 8000) * (cycle_timer >> 25);
1796 1810
1797 return bus_time; 1811 return ticks;
1812}
1813
1814/*
1815 * Some controllers exhibit one or more of the following bugs when updating the
1816 * iso cycle timer register:
1817 * - When the lowest six bits are wrapping around to zero, a read that happens
1818 * at the same time will return garbage in the lowest ten bits.
1819 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1820 * not incremented for about 60 ns.
1821 * - Occasionally, the entire register reads zero.
1822 *
1823 * To catch these, we read the register three times and ensure that the
1824 * difference between each two consecutive reads is approximately the same, i.e.
1825 * less than twice the other. Furthermore, any negative difference indicates an
1826 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1827 * execute, so we have enough precision to compute the ratio of the differences.)
1828 */
1829static u32 ohci_get_cycle_time(struct fw_card *card)
1830{
1831 struct fw_ohci *ohci = fw_ohci(card);
1832 u32 c0, c1, c2;
1833 u32 t0, t1, t2;
1834 s32 diff01, diff12;
1835 int i;
1836
1837 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1838
1839 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1840 i = 0;
1841 c1 = c2;
1842 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1843 do {
1844 c0 = c1;
1845 c1 = c2;
1846 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1847 t0 = cycle_timer_ticks(c0);
1848 t1 = cycle_timer_ticks(c1);
1849 t2 = cycle_timer_ticks(c2);
1850 diff01 = t1 - t0;
1851 diff12 = t2 - t1;
1852 } while ((diff01 <= 0 || diff12 <= 0 ||
1853 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1854 && i++ < 20);
1855 }
1856
1857 return c2;
1798} 1858}
1799 1859
1800static void copy_iso_headers(struct iso_context *ctx, void *p) 1860static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1819,52 +1879,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p)
1819 ctx->header_length += ctx->base.header_size; 1879 ctx->header_length += ctx->base.header_size;
1820} 1880}
1821 1881
1822static int handle_ir_dualbuffer_packet(struct context *context,
1823 struct descriptor *d,
1824 struct descriptor *last)
1825{
1826 struct iso_context *ctx =
1827 container_of(context, struct iso_context, context);
1828 struct db_descriptor *db = (struct db_descriptor *) d;
1829 __le32 *ir_header;
1830 size_t header_length;
1831 void *p, *end;
1832
1833 if (db->first_res_count != 0 && db->second_res_count != 0) {
1834 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1835 /* This descriptor isn't done yet, stop iteration. */
1836 return 0;
1837 }
1838 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1839 }
1840
1841 header_length = le16_to_cpu(db->first_req_count) -
1842 le16_to_cpu(db->first_res_count);
1843
1844 p = db + 1;
1845 end = p + header_length;
1846 while (p < end) {
1847 copy_iso_headers(ctx, p);
1848 ctx->excess_bytes +=
1849 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1850 p += max(ctx->base.header_size, (size_t)8);
1851 }
1852
1853 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1854 le16_to_cpu(db->second_res_count);
1855
1856 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1857 ir_header = (__le32 *) (db + 1);
1858 ctx->base.callback(&ctx->base,
1859 le32_to_cpu(ir_header[0]) & 0xffff,
1860 ctx->header_length, ctx->header,
1861 ctx->base.callback_data);
1862 ctx->header_length = 0;
1863 }
1864
1865 return 1;
1866}
1867
1868static int handle_ir_packet_per_buffer(struct context *context, 1882static int handle_ir_packet_per_buffer(struct context *context,
1869 struct descriptor *d, 1883 struct descriptor *d,
1870 struct descriptor *last) 1884 struct descriptor *last)
@@ -1951,10 +1965,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1951 channels = &ohci->ir_context_channels; 1965 channels = &ohci->ir_context_channels;
1952 mask = &ohci->ir_context_mask; 1966 mask = &ohci->ir_context_mask;
1953 list = ohci->ir_context_list; 1967 list = ohci->ir_context_list;
1954 if (ohci->use_dualbuffer) 1968 callback = handle_ir_packet_per_buffer;
1955 callback = handle_ir_dualbuffer_packet;
1956 else
1957 callback = handle_ir_packet_per_buffer;
1958 } 1969 }
1959 1970
1960 spin_lock_irqsave(&ohci->lock, flags); 1971 spin_lock_irqsave(&ohci->lock, flags);
@@ -2017,8 +2028,6 @@ static int ohci_start_iso(struct fw_iso_context *base,
2017 } else { 2028 } else {
2018 index = ctx - ohci->ir_context_list; 2029 index = ctx - ohci->ir_context_list;
2019 control = IR_CONTEXT_ISOCH_HEADER; 2030 control = IR_CONTEXT_ISOCH_HEADER;
2020 if (ohci->use_dualbuffer)
2021 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
2022 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2031 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2023 if (cycle >= 0) { 2032 if (cycle >= 0) {
2024 match |= (cycle & 0x07fff) << 12; 2033 match |= (cycle & 0x07fff) << 12;
@@ -2092,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2092 u32 payload_index, payload_end_index, next_page_index; 2101 u32 payload_index, payload_end_index, next_page_index;
2093 int page, end_page, i, length, offset; 2102 int page, end_page, i, length, offset;
2094 2103
2095 /*
2096 * FIXME: Cycle lost behavior should be configurable: lose
2097 * packet, retransmit or terminate..
2098 */
2099
2100 p = packet; 2104 p = packet;
2101 payload_index = payload; 2105 payload_index = payload;
2102 2106
@@ -2126,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2126 if (!p->skip) { 2130 if (!p->skip) {
2127 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2131 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2128 d[0].req_count = cpu_to_le16(8); 2132 d[0].req_count = cpu_to_le16(8);
2133 /*
2134 * Link the skip address to this descriptor itself. This causes
2135 * a context to skip a cycle whenever lost cycles or FIFO
2136 * overruns occur, without dropping the data. The application
2137 * should then decide whether this is an error condition or not.
2138 * FIXME: Make the context's cycle-lost behaviour configurable?
2139 */
2140 d[0].branch_address = cpu_to_le32(d_bus | z);
2129 2141
2130 header = (__le32 *) &d[1]; 2142 header = (__le32 *) &d[1];
2131 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2143 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
@@ -2176,93 +2188,13 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2176 return 0; 2188 return 0;
2177} 2189}
2178 2190
2179static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2180 struct fw_iso_packet *packet,
2181 struct fw_iso_buffer *buffer,
2182 unsigned long payload)
2183{
2184 struct iso_context *ctx = container_of(base, struct iso_context, base);
2185 struct db_descriptor *db = NULL;
2186 struct descriptor *d;
2187 struct fw_iso_packet *p;
2188 dma_addr_t d_bus, page_bus;
2189 u32 z, header_z, length, rest;
2190 int page, offset, packet_count, header_size;
2191
2192 /*
2193 * FIXME: Cycle lost behavior should be configurable: lose
2194 * packet, retransmit or terminate..
2195 */
2196
2197 p = packet;
2198 z = 2;
2199
2200 /*
2201 * The OHCI controller puts the isochronous header and trailer in the
2202 * buffer, so we need at least 8 bytes.
2203 */
2204 packet_count = p->header_length / ctx->base.header_size;
2205 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2206
2207 /* Get header size in number of descriptors. */
2208 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2209 page = payload >> PAGE_SHIFT;
2210 offset = payload & ~PAGE_MASK;
2211 rest = p->payload_length;
2212
2213 /* FIXME: make packet-per-buffer/dual-buffer a context option */
2214 while (rest > 0) {
2215 d = context_get_descriptors(&ctx->context,
2216 z + header_z, &d_bus);
2217 if (d == NULL)
2218 return -ENOMEM;
2219
2220 db = (struct db_descriptor *) d;
2221 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2222 DESCRIPTOR_BRANCH_ALWAYS);
2223 db->first_size =
2224 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2225 if (p->skip && rest == p->payload_length) {
2226 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2227 db->first_req_count = db->first_size;
2228 } else {
2229 db->first_req_count = cpu_to_le16(header_size);
2230 }
2231 db->first_res_count = db->first_req_count;
2232 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2233
2234 if (p->skip && rest == p->payload_length)
2235 length = 4;
2236 else if (offset + rest < PAGE_SIZE)
2237 length = rest;
2238 else
2239 length = PAGE_SIZE - offset;
2240
2241 db->second_req_count = cpu_to_le16(length);
2242 db->second_res_count = db->second_req_count;
2243 page_bus = page_private(buffer->pages[page]);
2244 db->second_buffer = cpu_to_le32(page_bus + offset);
2245
2246 if (p->interrupt && length == rest)
2247 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2248
2249 context_append(&ctx->context, d, z, header_z);
2250 offset = (offset + length) & ~PAGE_MASK;
2251 rest -= length;
2252 if (offset == 0)
2253 page++;
2254 }
2255
2256 return 0;
2257}
2258
2259static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2191static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2260 struct fw_iso_packet *packet, 2192 struct fw_iso_packet *packet,
2261 struct fw_iso_buffer *buffer, 2193 struct fw_iso_buffer *buffer,
2262 unsigned long payload) 2194 unsigned long payload)
2263{ 2195{
2264 struct iso_context *ctx = container_of(base, struct iso_context, base); 2196 struct iso_context *ctx = container_of(base, struct iso_context, base);
2265 struct descriptor *d = NULL, *pd = NULL; 2197 struct descriptor *d, *pd;
2266 struct fw_iso_packet *p = packet; 2198 struct fw_iso_packet *p = packet;
2267 dma_addr_t d_bus, page_bus; 2199 dma_addr_t d_bus, page_bus;
2268 u32 z, header_z, rest; 2200 u32 z, header_z, rest;
@@ -2300,8 +2232,9 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2300 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 2232 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2301 2233
2302 rest = payload_per_buffer; 2234 rest = payload_per_buffer;
2235 pd = d;
2303 for (j = 1; j < z; j++) { 2236 for (j = 1; j < z; j++) {
2304 pd = d + j; 2237 pd++;
2305 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2238 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2306 DESCRIPTOR_INPUT_MORE); 2239 DESCRIPTOR_INPUT_MORE);
2307 2240
@@ -2345,9 +2278,6 @@ static int ohci_queue_iso(struct fw_iso_context *base,
2345 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2278 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2346 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2279 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2347 ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2280 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2348 else if (ctx->context.ohci->use_dualbuffer)
2349 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2350 buffer, payload);
2351 else 2281 else
2352 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2282 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2353 buffer, payload); 2283 buffer, payload);
@@ -2364,7 +2294,7 @@ static const struct fw_card_driver ohci_driver = {
2364 .send_response = ohci_send_response, 2294 .send_response = ohci_send_response,
2365 .cancel_packet = ohci_cancel_packet, 2295 .cancel_packet = ohci_cancel_packet,
2366 .enable_phys_dma = ohci_enable_phys_dma, 2296 .enable_phys_dma = ohci_enable_phys_dma,
2367 .get_bus_time = ohci_get_bus_time, 2297 .get_cycle_time = ohci_get_cycle_time,
2368 2298
2369 .allocate_iso_context = ohci_allocate_iso_context, 2299 .allocate_iso_context = ohci_allocate_iso_context,
2370 .free_iso_context = ohci_free_iso_context, 2300 .free_iso_context = ohci_free_iso_context,
@@ -2402,16 +2332,13 @@ static void ohci_pmac_off(struct pci_dev *dev)
2402#define ohci_pmac_off(dev) 2332#define ohci_pmac_off(dev)
2403#endif /* CONFIG_PPC_PMAC */ 2333#endif /* CONFIG_PPC_PMAC */
2404 2334
2405#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2406#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2407
2408static int __devinit pci_probe(struct pci_dev *dev, 2335static int __devinit pci_probe(struct pci_dev *dev,
2409 const struct pci_device_id *ent) 2336 const struct pci_device_id *ent)
2410{ 2337{
2411 struct fw_ohci *ohci; 2338 struct fw_ohci *ohci;
2412 u32 bus_options, max_receive, link_speed, version; 2339 u32 bus_options, max_receive, link_speed, version;
2413 u64 guid; 2340 u64 guid;
2414 int err; 2341 int i, err, n_ir, n_it;
2415 size_t size; 2342 size_t size;
2416 2343
2417 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2344 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2452,32 +2379,15 @@ static int __devinit pci_probe(struct pci_dev *dev,
2452 goto fail_iomem; 2379 goto fail_iomem;
2453 } 2380 }
2454 2381
2455 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2382 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
2456 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2383 if (ohci_quirks[i].vendor == dev->vendor &&
2457 2384 (ohci_quirks[i].device == dev->device ||
2458 /* dual-buffer mode is broken if more than one IR context is active */ 2385 ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
2459 if (dev->vendor == PCI_VENDOR_ID_AGERE && 2386 ohci->quirks = ohci_quirks[i].flags;
2460 dev->device == PCI_DEVICE_ID_AGERE_FW643) 2387 break;
2461 ohci->use_dualbuffer = false; 2388 }
2462 2389 if (param_quirks)
2463 /* dual-buffer mode is broken */ 2390 ohci->quirks = param_quirks;
2464 if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2465 dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2466 ohci->use_dualbuffer = false;
2467
2468/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2469#if !defined(CONFIG_X86_32)
2470 /* dual-buffer mode is broken with descriptor addresses above 2G */
2471 if (dev->vendor == PCI_VENDOR_ID_TI &&
2472 dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2473 ohci->use_dualbuffer = false;
2474#endif
2475
2476#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2477 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2478 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2479#endif
2480 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2481 2391
2482 ar_context_init(&ohci->ar_request_ctx, ohci, 2392 ar_context_init(&ohci->ar_request_ctx, ohci,
2483 OHCI1394_AsReqRcvContextControlSet); 2393 OHCI1394_AsReqRcvContextControlSet);
@@ -2492,17 +2402,19 @@ static int __devinit pci_probe(struct pci_dev *dev,
2492 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2402 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2493 2403
2494 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2404 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2495 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 2405 ohci->ir_context_channels = ~0ULL;
2406 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2496 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 2407 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2497 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); 2408 n_ir = hweight32(ohci->ir_context_mask);
2498 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2409 size = sizeof(struct iso_context) * n_ir;
2410 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2499 2411
2500 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2412 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2501 ohci->ir_context_channels = ~0ULL; 2413 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2502 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2503 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2414 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2504 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2415 n_it = hweight32(ohci->it_context_mask);
2505 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2416 size = sizeof(struct iso_context) * n_it;
2417 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2506 2418
2507 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2419 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2508 err = -ENOMEM; 2420 err = -ENOMEM;
@@ -2529,8 +2441,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
2529 if (err) 2441 if (err)
2530 goto fail_self_id; 2442 goto fail_self_id;
2531 2443
2532 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2444 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2533 dev_name(&dev->dev), version >> 16, version & 0xff); 2445 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
2446 "%d IR + %d IT contexts, quirks 0x%x\n",
2447 dev_name(&dev->dev), version >> 16, version & 0xff,
2448 n_ir, n_it, ohci->quirks);
2534 2449
2535 return 0; 2450 return 0;
2536 2451
@@ -2638,7 +2553,7 @@ static int pci_resume(struct pci_dev *dev)
2638} 2553}
2639#endif 2554#endif
2640 2555
2641static struct pci_device_id pci_table[] = { 2556static const struct pci_device_id pci_table[] = {
2642 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 2557 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2643 { } 2558 { }
2644}; 2559};