aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/ohci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r--drivers/firewire/ohci.c701
1 files changed, 548 insertions, 153 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9f627e758cfc..7f03540cabe8 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/bug.h>
21#include <linux/compiler.h> 22#include <linux/compiler.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/device.h> 24#include <linux/device.h>
@@ -32,11 +33,13 @@
32#include <linux/mm.h> 33#include <linux/mm.h>
33#include <linux/module.h> 34#include <linux/module.h>
34#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/mutex.h>
35#include <linux/pci.h> 37#include <linux/pci.h>
36#include <linux/pci_ids.h> 38#include <linux/pci_ids.h>
37#include <linux/slab.h> 39#include <linux/slab.h>
38#include <linux/spinlock.h> 40#include <linux/spinlock.h>
39#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/time.h>
40 43
41#include <asm/byteorder.h> 44#include <asm/byteorder.h>
42#include <asm/page.h> 45#include <asm/page.h>
@@ -170,6 +173,10 @@ struct fw_ohci {
170 int generation; 173 int generation;
171 int request_generation; /* for timestamping incoming requests */ 174 int request_generation; /* for timestamping incoming requests */
172 unsigned quirks; 175 unsigned quirks;
176 unsigned int pri_req_max;
177 u32 bus_time;
178 bool is_root;
179 bool csr_state_setclear_abdicate;
173 180
174 /* 181 /*
175 * Spinlock for accessing fw_ohci data. Never call out of 182 * Spinlock for accessing fw_ohci data. Never call out of
@@ -177,16 +184,20 @@ struct fw_ohci {
177 */ 184 */
178 spinlock_t lock; 185 spinlock_t lock;
179 186
187 struct mutex phy_reg_mutex;
188
180 struct ar_context ar_request_ctx; 189 struct ar_context ar_request_ctx;
181 struct ar_context ar_response_ctx; 190 struct ar_context ar_response_ctx;
182 struct context at_request_ctx; 191 struct context at_request_ctx;
183 struct context at_response_ctx; 192 struct context at_response_ctx;
184 193
185 u32 it_context_mask; 194 u32 it_context_mask; /* unoccupied IT contexts */
186 struct iso_context *it_context_list; 195 struct iso_context *it_context_list;
187 u64 ir_context_channels; 196 u64 ir_context_channels; /* unoccupied channels */
188 u32 ir_context_mask; 197 u32 ir_context_mask; /* unoccupied IR contexts */
189 struct iso_context *ir_context_list; 198 struct iso_context *ir_context_list;
199 u64 mc_channels; /* channels in use by the multichannel IR context */
200 bool mc_allocated;
190 201
191 __be32 *config_rom; 202 __be32 *config_rom;
192 dma_addr_t config_rom_bus; 203 dma_addr_t config_rom_bus;
@@ -231,12 +242,14 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
231 242
232static char ohci_driver_name[] = KBUILD_MODNAME; 243static char ohci_driver_name[] = KBUILD_MODNAME;
233 244
245#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
234#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 246#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
235 247
236#define QUIRK_CYCLE_TIMER 1 248#define QUIRK_CYCLE_TIMER 1
237#define QUIRK_RESET_PACKET 2 249#define QUIRK_RESET_PACKET 2
238#define QUIRK_BE_HEADERS 4 250#define QUIRK_BE_HEADERS 4
239#define QUIRK_NO_1394A 8 251#define QUIRK_NO_1394A 8
252#define QUIRK_NO_MSI 16
240 253
241/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 254/* In case of multiple matches in ohci_quirks[], only the first one is used. */
242static const struct { 255static const struct {
@@ -247,6 +260,7 @@ static const struct {
247 QUIRK_NO_1394A}, 260 QUIRK_NO_1394A},
248 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 261 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
249 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 262 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
250 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
251 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
252 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 266 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
@@ -260,6 +274,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
260 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 274 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
261 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 275 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
262 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 276 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
277 ", disable MSI = " __stringify(QUIRK_NO_MSI)
263 ")"); 278 ")");
264 279
265#define OHCI_PARAM_DEBUG_AT_AR 1 280#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -288,7 +303,7 @@ static void log_irqs(u32 evt)
288 !(evt & OHCI1394_busReset)) 303 !(evt & OHCI1394_busReset))
289 return; 304 return;
290 305
291 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 306 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
292 evt & OHCI1394_selfIDComplete ? " selfID" : "", 307 evt & OHCI1394_selfIDComplete ? " selfID" : "",
293 evt & OHCI1394_RQPkt ? " AR_req" : "", 308 evt & OHCI1394_RQPkt ? " AR_req" : "",
294 evt & OHCI1394_RSPkt ? " AR_resp" : "", 309 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -298,6 +313,7 @@ static void log_irqs(u32 evt)
298 evt & OHCI1394_isochTx ? " IT" : "", 313 evt & OHCI1394_isochTx ? " IT" : "",
299 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 314 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
300 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 315 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
316 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
301 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 317 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
302 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 318 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
303 evt & OHCI1394_busReset ? " busReset" : "", 319 evt & OHCI1394_busReset ? " busReset" : "",
@@ -305,7 +321,8 @@ static void log_irqs(u32 evt)
305 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 321 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
306 OHCI1394_respTxComplete | OHCI1394_isochRx | 322 OHCI1394_respTxComplete | OHCI1394_isochRx |
307 OHCI1394_isochTx | OHCI1394_postedWriteErr | 323 OHCI1394_isochTx | OHCI1394_postedWriteErr |
308 OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | 324 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
325 OHCI1394_cycleInconsistent |
309 OHCI1394_regAccessFail | OHCI1394_busReset) 326 OHCI1394_regAccessFail | OHCI1394_busReset)
310 ? " ?" : ""); 327 ? " ?" : "");
311} 328}
@@ -470,12 +487,17 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
470 int i; 487 int i;
471 488
472 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 489 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
473 for (i = 0; i < 10; i++) { 490 for (i = 0; i < 3 + 100; i++) {
474 val = reg_read(ohci, OHCI1394_PhyControl); 491 val = reg_read(ohci, OHCI1394_PhyControl);
475 if (val & OHCI1394_PhyControl_ReadDone) 492 if (val & OHCI1394_PhyControl_ReadDone)
476 return OHCI1394_PhyControl_ReadData(val); 493 return OHCI1394_PhyControl_ReadData(val);
477 494
478 msleep(1); 495 /*
496 * Try a few times without waiting. Sleeping is necessary
497 * only when the link/PHY interface is busy.
498 */
499 if (i >= 3)
500 msleep(1);
479 } 501 }
480 fw_error("failed to read phy reg\n"); 502 fw_error("failed to read phy reg\n");
481 503
@@ -488,25 +510,23 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
488 510
489 reg_write(ohci, OHCI1394_PhyControl, 511 reg_write(ohci, OHCI1394_PhyControl,
490 OHCI1394_PhyControl_Write(addr, val)); 512 OHCI1394_PhyControl_Write(addr, val));
491 for (i = 0; i < 100; i++) { 513 for (i = 0; i < 3 + 100; i++) {
492 val = reg_read(ohci, OHCI1394_PhyControl); 514 val = reg_read(ohci, OHCI1394_PhyControl);
493 if (!(val & OHCI1394_PhyControl_WritePending)) 515 if (!(val & OHCI1394_PhyControl_WritePending))
494 return 0; 516 return 0;
495 517
496 msleep(1); 518 if (i >= 3)
519 msleep(1);
497 } 520 }
498 fw_error("failed to write phy reg\n"); 521 fw_error("failed to write phy reg\n");
499 522
500 return -EBUSY; 523 return -EBUSY;
501} 524}
502 525
503static int ohci_update_phy_reg(struct fw_card *card, int addr, 526static int update_phy_reg(struct fw_ohci *ohci, int addr,
504 int clear_bits, int set_bits) 527 int clear_bits, int set_bits)
505{ 528{
506 struct fw_ohci *ohci = fw_ohci(card); 529 int ret = read_phy_reg(ohci, addr);
507 int ret;
508
509 ret = read_phy_reg(ohci, addr);
510 if (ret < 0) 530 if (ret < 0)
511 return ret; 531 return ret;
512 532
@@ -524,13 +544,38 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
524{ 544{
525 int ret; 545 int ret;
526 546
527 ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); 547 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
528 if (ret < 0) 548 if (ret < 0)
529 return ret; 549 return ret;
530 550
531 return read_phy_reg(ohci, addr); 551 return read_phy_reg(ohci, addr);
532} 552}
533 553
554static int ohci_read_phy_reg(struct fw_card *card, int addr)
555{
556 struct fw_ohci *ohci = fw_ohci(card);
557 int ret;
558
559 mutex_lock(&ohci->phy_reg_mutex);
560 ret = read_phy_reg(ohci, addr);
561 mutex_unlock(&ohci->phy_reg_mutex);
562
563 return ret;
564}
565
566static int ohci_update_phy_reg(struct fw_card *card, int addr,
567 int clear_bits, int set_bits)
568{
569 struct fw_ohci *ohci = fw_ohci(card);
570 int ret;
571
572 mutex_lock(&ohci->phy_reg_mutex);
573 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
574 mutex_unlock(&ohci->phy_reg_mutex);
575
576 return ret;
577}
578
534static int ar_context_add_page(struct ar_context *ctx) 579static int ar_context_add_page(struct ar_context *ctx)
535{ 580{
536 struct device *dev = ctx->ohci->card.device; 581 struct device *dev = ctx->ohci->card.device;
@@ -553,6 +598,7 @@ static int ar_context_add_page(struct ar_context *ctx)
553 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); 598 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
554 ab->descriptor.branch_address = 0; 599 ab->descriptor.branch_address = 0;
555 600
601 wmb(); /* finish init of new descriptors before branch_address update */
556 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 602 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
557 ctx->last_buffer->next = ab; 603 ctx->last_buffer->next = ab;
558 ctx->last_buffer = ab; 604 ctx->last_buffer = ab;
@@ -940,6 +986,8 @@ static void context_append(struct context *ctx,
940 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 986 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
941 987
942 desc->used += (z + extra) * sizeof(*d); 988 desc->used += (z + extra) * sizeof(*d);
989
990 wmb(); /* finish init of new descriptors before branch_address update */
943 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 991 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
944 ctx->prev = find_branch_descriptor(d, z); 992 ctx->prev = find_branch_descriptor(d, z);
945 993
@@ -1026,6 +1074,9 @@ static int at_context_queue_packet(struct context *ctx,
1026 header[1] = cpu_to_le32(packet->header[0]); 1074 header[1] = cpu_to_le32(packet->header[0]);
1027 header[2] = cpu_to_le32(packet->header[1]); 1075 header[2] = cpu_to_le32(packet->header[1]);
1028 d[0].req_count = cpu_to_le16(12); 1076 d[0].req_count = cpu_to_le16(12);
1077
1078 if (is_ping_packet(packet->header))
1079 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1029 break; 1080 break;
1030 1081
1031 case 4: 1082 case 4:
@@ -1311,6 +1362,78 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1311 1362
1312} 1363}
1313 1364
1365static u32 cycle_timer_ticks(u32 cycle_timer)
1366{
1367 u32 ticks;
1368
1369 ticks = cycle_timer & 0xfff;
1370 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1371 ticks += (3072 * 8000) * (cycle_timer >> 25);
1372
1373 return ticks;
1374}
1375
1376/*
1377 * Some controllers exhibit one or more of the following bugs when updating the
1378 * iso cycle timer register:
1379 * - When the lowest six bits are wrapping around to zero, a read that happens
1380 * at the same time will return garbage in the lowest ten bits.
1381 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1382 * not incremented for about 60 ns.
1383 * - Occasionally, the entire register reads zero.
1384 *
1385 * To catch these, we read the register three times and ensure that the
1386 * difference between each two consecutive reads is approximately the same, i.e.
1387 * less than twice the other. Furthermore, any negative difference indicates an
1388 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1389 * execute, so we have enough precision to compute the ratio of the differences.)
1390 */
1391static u32 get_cycle_time(struct fw_ohci *ohci)
1392{
1393 u32 c0, c1, c2;
1394 u32 t0, t1, t2;
1395 s32 diff01, diff12;
1396 int i;
1397
1398 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1399
1400 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1401 i = 0;
1402 c1 = c2;
1403 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1404 do {
1405 c0 = c1;
1406 c1 = c2;
1407 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1408 t0 = cycle_timer_ticks(c0);
1409 t1 = cycle_timer_ticks(c1);
1410 t2 = cycle_timer_ticks(c2);
1411 diff01 = t1 - t0;
1412 diff12 = t2 - t1;
1413 } while ((diff01 <= 0 || diff12 <= 0 ||
1414 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1415 && i++ < 20);
1416 }
1417
1418 return c2;
1419}
1420
1421/*
1422 * This function has to be called at least every 64 seconds. The bus_time
1423 * field stores not only the upper 25 bits of the BUS_TIME register but also
1424 * the most significant bit of the cycle timer in bit 6 so that we can detect
1425 * changes in this bit.
1426 */
1427static u32 update_bus_time(struct fw_ohci *ohci)
1428{
1429 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1430
1431 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1432 ohci->bus_time += 0x40;
1433
1434 return ohci->bus_time | cycle_time_seconds;
1435}
1436
1314static void bus_reset_tasklet(unsigned long data) 1437static void bus_reset_tasklet(unsigned long data)
1315{ 1438{
1316 struct fw_ohci *ohci = (struct fw_ohci *)data; 1439 struct fw_ohci *ohci = (struct fw_ohci *)data;
@@ -1319,6 +1442,7 @@ static void bus_reset_tasklet(unsigned long data)
1319 unsigned long flags; 1442 unsigned long flags;
1320 void *free_rom = NULL; 1443 void *free_rom = NULL;
1321 dma_addr_t free_rom_bus = 0; 1444 dma_addr_t free_rom_bus = 0;
1445 bool is_new_root;
1322 1446
1323 reg = reg_read(ohci, OHCI1394_NodeID); 1447 reg = reg_read(ohci, OHCI1394_NodeID);
1324 if (!(reg & OHCI1394_NodeID_idValid)) { 1448 if (!(reg & OHCI1394_NodeID_idValid)) {
@@ -1332,6 +1456,12 @@ static void bus_reset_tasklet(unsigned long data)
1332 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1456 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1333 OHCI1394_NodeID_nodeNumber); 1457 OHCI1394_NodeID_nodeNumber);
1334 1458
1459 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1460 if (!(ohci->is_root && is_new_root))
1461 reg_write(ohci, OHCI1394_LinkControlSet,
1462 OHCI1394_LinkControl_cycleMaster);
1463 ohci->is_root = is_new_root;
1464
1335 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1465 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1336 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1466 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1337 fw_notify("inconsistent self IDs\n"); 1467 fw_notify("inconsistent self IDs\n");
@@ -1439,7 +1569,9 @@ static void bus_reset_tasklet(unsigned long data)
1439 self_id_count, ohci->self_id_buffer); 1569 self_id_count, ohci->self_id_buffer);
1440 1570
1441 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 1571 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1442 self_id_count, ohci->self_id_buffer); 1572 self_id_count, ohci->self_id_buffer,
1573 ohci->csr_state_setclear_abdicate);
1574 ohci->csr_state_setclear_abdicate = false;
1443} 1575}
1444 1576
1445static irqreturn_t irq_handler(int irq, void *data) 1577static irqreturn_t irq_handler(int irq, void *data)
@@ -1515,6 +1647,12 @@ static irqreturn_t irq_handler(int irq, void *data)
1515 fw_notify("isochronous cycle inconsistent\n"); 1647 fw_notify("isochronous cycle inconsistent\n");
1516 } 1648 }
1517 1649
1650 if (event & OHCI1394_cycle64Seconds) {
1651 spin_lock(&ohci->lock);
1652 update_bus_time(ohci);
1653 spin_unlock(&ohci->lock);
1654 }
1655
1518 return IRQ_HANDLED; 1656 return IRQ_HANDLED;
1519} 1657}
1520 1658
@@ -1577,7 +1715,7 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
1577 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 1715 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1578 set = 0; 1716 set = 0;
1579 } 1717 }
1580 ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); 1718 ret = update_phy_reg(ohci, 5, clear, set);
1581 if (ret < 0) 1719 if (ret < 0)
1582 return ret; 1720 return ret;
1583 1721
@@ -1599,7 +1737,7 @@ static int ohci_enable(struct fw_card *card,
1599{ 1737{
1600 struct fw_ohci *ohci = fw_ohci(card); 1738 struct fw_ohci *ohci = fw_ohci(card);
1601 struct pci_dev *dev = to_pci_dev(card->device); 1739 struct pci_dev *dev = to_pci_dev(card->device);
1602 u32 lps; 1740 u32 lps, seconds, version, irqs;
1603 int i, ret; 1741 int i, ret;
1604 1742
1605 if (software_reset(ohci)) { 1743 if (software_reset(ohci)) {
@@ -1635,17 +1773,34 @@ static int ohci_enable(struct fw_card *card,
1635 OHCI1394_HCControl_noByteSwapData); 1773 OHCI1394_HCControl_noByteSwapData);
1636 1774
1637 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 1775 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1638 reg_write(ohci, OHCI1394_LinkControlClear,
1639 OHCI1394_LinkControl_rcvPhyPkt);
1640 reg_write(ohci, OHCI1394_LinkControlSet, 1776 reg_write(ohci, OHCI1394_LinkControlSet,
1641 OHCI1394_LinkControl_rcvSelfID | 1777 OHCI1394_LinkControl_rcvSelfID |
1778 OHCI1394_LinkControl_rcvPhyPkt |
1642 OHCI1394_LinkControl_cycleTimerEnable | 1779 OHCI1394_LinkControl_cycleTimerEnable |
1643 OHCI1394_LinkControl_cycleMaster); 1780 OHCI1394_LinkControl_cycleMaster);
1644 1781
1645 reg_write(ohci, OHCI1394_ATRetries, 1782 reg_write(ohci, OHCI1394_ATRetries,
1646 OHCI1394_MAX_AT_REQ_RETRIES | 1783 OHCI1394_MAX_AT_REQ_RETRIES |
1647 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 1784 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1648 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); 1785 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
1786 (200 << 16));
1787
1788 seconds = lower_32_bits(get_seconds());
1789 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
1790 ohci->bus_time = seconds & ~0x3f;
1791
1792 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1793 if (version >= OHCI_VERSION_1_1) {
1794 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
1795 0xfffffffe);
1796 card->broadcast_channel_auto_allocated = true;
1797 }
1798
1799 /* Get implemented bits of the priority arbitration request counter. */
1800 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
1801 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
1802 reg_write(ohci, OHCI1394_FairnessControl, 0);
1803 card->priority_budget_implemented = ohci->pri_req_max != 0;
1649 1804
1650 ar_context_run(&ohci->ar_request_ctx); 1805 ar_context_run(&ohci->ar_request_ctx);
1651 ar_context_run(&ohci->ar_response_ctx); 1806 ar_context_run(&ohci->ar_response_ctx);
@@ -1653,16 +1808,6 @@ static int ohci_enable(struct fw_card *card,
1653 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 1808 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1654 reg_write(ohci, OHCI1394_IntEventClear, ~0); 1809 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1655 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 1810 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1656 reg_write(ohci, OHCI1394_IntMaskSet,
1657 OHCI1394_selfIDComplete |
1658 OHCI1394_RQPkt | OHCI1394_RSPkt |
1659 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1660 OHCI1394_isochRx | OHCI1394_isochTx |
1661 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1662 OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
1663 OHCI1394_masterIntEnable);
1664 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1665 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1666 1811
1667 ret = configure_1394a_enhancements(ohci); 1812 ret = configure_1394a_enhancements(ohci);
1668 if (ret < 0) 1813 if (ret < 0)
@@ -1719,26 +1864,38 @@ static int ohci_enable(struct fw_card *card,
1719 1864
1720 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 1865 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1721 1866
1867 if (!(ohci->quirks & QUIRK_NO_MSI))
1868 pci_enable_msi(dev);
1722 if (request_irq(dev->irq, irq_handler, 1869 if (request_irq(dev->irq, irq_handler,
1723 IRQF_SHARED, ohci_driver_name, ohci)) { 1870 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
1724 fw_error("Failed to allocate shared interrupt %d.\n", 1871 ohci_driver_name, ohci)) {
1725 dev->irq); 1872 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
1873 pci_disable_msi(dev);
1726 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1874 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1727 ohci->config_rom, ohci->config_rom_bus); 1875 ohci->config_rom, ohci->config_rom_bus);
1728 return -EIO; 1876 return -EIO;
1729 } 1877 }
1730 1878
1879 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1880 OHCI1394_RQPkt | OHCI1394_RSPkt |
1881 OHCI1394_isochTx | OHCI1394_isochRx |
1882 OHCI1394_postedWriteErr |
1883 OHCI1394_selfIDComplete |
1884 OHCI1394_regAccessFail |
1885 OHCI1394_cycle64Seconds |
1886 OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong |
1887 OHCI1394_masterIntEnable;
1888 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1889 irqs |= OHCI1394_busReset;
1890 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
1891
1731 reg_write(ohci, OHCI1394_HCControlSet, 1892 reg_write(ohci, OHCI1394_HCControlSet,
1732 OHCI1394_HCControl_linkEnable | 1893 OHCI1394_HCControl_linkEnable |
1733 OHCI1394_HCControl_BIBimageValid); 1894 OHCI1394_HCControl_BIBimageValid);
1734 flush_writes(ohci); 1895 flush_writes(ohci);
1735 1896
1736 /* 1897 /* We are ready to go, reset bus to finish initialization. */
1737 * We are ready to go, initiate bus reset to finish the 1898 fw_schedule_bus_reset(&ohci->card, false, true);
1738 * initialization.
1739 */
1740
1741 fw_core_initiate_bus_reset(&ohci->card, 1);
1742 1899
1743 return 0; 1900 return 0;
1744} 1901}
@@ -1813,7 +1970,7 @@ static int ohci_set_config_rom(struct fw_card *card,
1813 * takes effect. 1970 * takes effect.
1814 */ 1971 */
1815 if (ret == 0) 1972 if (ret == 0)
1816 fw_core_initiate_bus_reset(&ohci->card, 1); 1973 fw_schedule_bus_reset(&ohci->card, true, true);
1817 else 1974 else
1818 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1975 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1819 next_config_rom, next_config_rom_bus); 1976 next_config_rom, next_config_rom_bus);
@@ -1903,61 +2060,117 @@ static int ohci_enable_phys_dma(struct fw_card *card,
1903#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 2060#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1904} 2061}
1905 2062
1906static u32 cycle_timer_ticks(u32 cycle_timer) 2063static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
1907{ 2064{
1908 u32 ticks; 2065 struct fw_ohci *ohci = fw_ohci(card);
2066 unsigned long flags;
2067 u32 value;
2068
2069 switch (csr_offset) {
2070 case CSR_STATE_CLEAR:
2071 case CSR_STATE_SET:
2072 if (ohci->is_root &&
2073 (reg_read(ohci, OHCI1394_LinkControlSet) &
2074 OHCI1394_LinkControl_cycleMaster))
2075 value = CSR_STATE_BIT_CMSTR;
2076 else
2077 value = 0;
2078 if (ohci->csr_state_setclear_abdicate)
2079 value |= CSR_STATE_BIT_ABDICATE;
1909 2080
1910 ticks = cycle_timer & 0xfff; 2081 return value;
1911 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1912 ticks += (3072 * 8000) * (cycle_timer >> 25);
1913 2082
1914 return ticks; 2083 case CSR_NODE_IDS:
2084 return reg_read(ohci, OHCI1394_NodeID) << 16;
2085
2086 case CSR_CYCLE_TIME:
2087 return get_cycle_time(ohci);
2088
2089 case CSR_BUS_TIME:
2090 /*
2091 * We might be called just after the cycle timer has wrapped
2092 * around but just before the cycle64Seconds handler, so we
2093 * better check here, too, if the bus time needs to be updated.
2094 */
2095 spin_lock_irqsave(&ohci->lock, flags);
2096 value = update_bus_time(ohci);
2097 spin_unlock_irqrestore(&ohci->lock, flags);
2098 return value;
2099
2100 case CSR_BUSY_TIMEOUT:
2101 value = reg_read(ohci, OHCI1394_ATRetries);
2102 return (value >> 4) & 0x0ffff00f;
2103
2104 case CSR_PRIORITY_BUDGET:
2105 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2106 (ohci->pri_req_max << 8);
2107
2108 default:
2109 WARN_ON(1);
2110 return 0;
2111 }
1915} 2112}
1916 2113
1917/* 2114static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
1918 * Some controllers exhibit one or more of the following bugs when updating the
1919 * iso cycle timer register:
1920 * - When the lowest six bits are wrapping around to zero, a read that happens
1921 * at the same time will return garbage in the lowest ten bits.
1922 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1923 * not incremented for about 60 ns.
1924 * - Occasionally, the entire register reads zero.
1925 *
1926 * To catch these, we read the register three times and ensure that the
1927 * difference between each two consecutive reads is approximately the same, i.e.
1928 * less than twice the other. Furthermore, any negative difference indicates an
1929 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1930 * execute, so we have enough precision to compute the ratio of the differences.)
1931 */
1932static u32 ohci_get_cycle_time(struct fw_card *card)
1933{ 2115{
1934 struct fw_ohci *ohci = fw_ohci(card); 2116 struct fw_ohci *ohci = fw_ohci(card);
1935 u32 c0, c1, c2; 2117 unsigned long flags;
1936 u32 t0, t1, t2;
1937 s32 diff01, diff12;
1938 int i;
1939 2118
1940 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 2119 switch (csr_offset) {
2120 case CSR_STATE_CLEAR:
2121 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2122 reg_write(ohci, OHCI1394_LinkControlClear,
2123 OHCI1394_LinkControl_cycleMaster);
2124 flush_writes(ohci);
2125 }
2126 if (value & CSR_STATE_BIT_ABDICATE)
2127 ohci->csr_state_setclear_abdicate = false;
2128 break;
1941 2129
1942 if (ohci->quirks & QUIRK_CYCLE_TIMER) { 2130 case CSR_STATE_SET:
1943 i = 0; 2131 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
1944 c1 = c2; 2132 reg_write(ohci, OHCI1394_LinkControlSet,
1945 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 2133 OHCI1394_LinkControl_cycleMaster);
1946 do { 2134 flush_writes(ohci);
1947 c0 = c1; 2135 }
1948 c1 = c2; 2136 if (value & CSR_STATE_BIT_ABDICATE)
1949 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 2137 ohci->csr_state_setclear_abdicate = true;
1950 t0 = cycle_timer_ticks(c0); 2138 break;
1951 t1 = cycle_timer_ticks(c1);
1952 t2 = cycle_timer_ticks(c2);
1953 diff01 = t1 - t0;
1954 diff12 = t2 - t1;
1955 } while ((diff01 <= 0 || diff12 <= 0 ||
1956 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1957 && i++ < 20);
1958 }
1959 2139
1960 return c2; 2140 case CSR_NODE_IDS:
2141 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2142 flush_writes(ohci);
2143 break;
2144
2145 case CSR_CYCLE_TIME:
2146 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2147 reg_write(ohci, OHCI1394_IntEventSet,
2148 OHCI1394_cycleInconsistent);
2149 flush_writes(ohci);
2150 break;
2151
2152 case CSR_BUS_TIME:
2153 spin_lock_irqsave(&ohci->lock, flags);
2154 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2155 spin_unlock_irqrestore(&ohci->lock, flags);
2156 break;
2157
2158 case CSR_BUSY_TIMEOUT:
2159 value = (value & 0xf) | ((value & 0xf) << 4) |
2160 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2161 reg_write(ohci, OHCI1394_ATRetries, value);
2162 flush_writes(ohci);
2163 break;
2164
2165 case CSR_PRIORITY_BUDGET:
2166 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2167 flush_writes(ohci);
2168 break;
2169
2170 default:
2171 WARN_ON(1);
2172 break;
2173 }
1961} 2174}
1962 2175
1963static void copy_iso_headers(struct iso_context *ctx, void *p) 2176static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1992,10 +2205,9 @@ static int handle_ir_packet_per_buffer(struct context *context,
1992 __le32 *ir_header; 2205 __le32 *ir_header;
1993 void *p; 2206 void *p;
1994 2207
1995 for (pd = d; pd <= last; pd++) { 2208 for (pd = d; pd <= last; pd++)
1996 if (pd->transfer_status) 2209 if (pd->transfer_status)
1997 break; 2210 break;
1998 }
1999 if (pd > last) 2211 if (pd > last)
2000 /* Descriptor(s) not done yet, stop iteration */ 2212 /* Descriptor(s) not done yet, stop iteration */
2001 return 0; 2213 return 0;
@@ -2005,16 +2217,38 @@ static int handle_ir_packet_per_buffer(struct context *context,
2005 2217
2006 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2218 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2007 ir_header = (__le32 *) p; 2219 ir_header = (__le32 *) p;
2008 ctx->base.callback(&ctx->base, 2220 ctx->base.callback.sc(&ctx->base,
2009 le32_to_cpu(ir_header[0]) & 0xffff, 2221 le32_to_cpu(ir_header[0]) & 0xffff,
2010 ctx->header_length, ctx->header, 2222 ctx->header_length, ctx->header,
2011 ctx->base.callback_data); 2223 ctx->base.callback_data);
2012 ctx->header_length = 0; 2224 ctx->header_length = 0;
2013 } 2225 }
2014 2226
2015 return 1; 2227 return 1;
2016} 2228}
2017 2229
2230/* d == last because each descriptor block is only a single descriptor. */
2231static int handle_ir_buffer_fill(struct context *context,
2232 struct descriptor *d,
2233 struct descriptor *last)
2234{
2235 struct iso_context *ctx =
2236 container_of(context, struct iso_context, context);
2237
2238 if (!last->transfer_status)
2239 /* Descriptor(s) not done yet, stop iteration */
2240 return 0;
2241
2242 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2243 ctx->base.callback.mc(&ctx->base,
2244 le32_to_cpu(last->data_address) +
2245 le16_to_cpu(last->req_count) -
2246 le16_to_cpu(last->res_count),
2247 ctx->base.callback_data);
2248
2249 return 1;
2250}
2251
2018static int handle_it_packet(struct context *context, 2252static int handle_it_packet(struct context *context,
2019 struct descriptor *d, 2253 struct descriptor *d,
2020 struct descriptor *last) 2254 struct descriptor *last)
@@ -2040,71 +2274,118 @@ static int handle_it_packet(struct context *context,
2040 ctx->header_length += 4; 2274 ctx->header_length += 4;
2041 } 2275 }
2042 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 2276 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2043 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 2277 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
2044 ctx->header_length, ctx->header, 2278 ctx->header_length, ctx->header,
2045 ctx->base.callback_data); 2279 ctx->base.callback_data);
2046 ctx->header_length = 0; 2280 ctx->header_length = 0;
2047 } 2281 }
2048 return 1; 2282 return 1;
2049} 2283}
2050 2284
2285static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2286{
2287 u32 hi = channels >> 32, lo = channels;
2288
2289 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2290 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2291 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2292 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2293 mmiowb();
2294 ohci->mc_channels = channels;
2295}
2296
2051static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2297static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2052 int type, int channel, size_t header_size) 2298 int type, int channel, size_t header_size)
2053{ 2299{
2054 struct fw_ohci *ohci = fw_ohci(card); 2300 struct fw_ohci *ohci = fw_ohci(card);
2055 struct iso_context *ctx, *list; 2301 struct iso_context *uninitialized_var(ctx);
2056 descriptor_callback_t callback; 2302 descriptor_callback_t uninitialized_var(callback);
2057 u64 *channels, dont_care = ~0ULL; 2303 u64 *uninitialized_var(channels);
2058 u32 *mask, regs; 2304 u32 *uninitialized_var(mask), uninitialized_var(regs);
2059 unsigned long flags; 2305 unsigned long flags;
2060 int index, ret = -ENOMEM; 2306 int index, ret = -EBUSY;
2307
2308 spin_lock_irqsave(&ohci->lock, flags);
2061 2309
2062 if (type == FW_ISO_CONTEXT_TRANSMIT) { 2310 switch (type) {
2063 channels = &dont_care; 2311 case FW_ISO_CONTEXT_TRANSMIT:
2064 mask = &ohci->it_context_mask; 2312 mask = &ohci->it_context_mask;
2065 list = ohci->it_context_list;
2066 callback = handle_it_packet; 2313 callback = handle_it_packet;
2067 } else { 2314 index = ffs(*mask) - 1;
2315 if (index >= 0) {
2316 *mask &= ~(1 << index);
2317 regs = OHCI1394_IsoXmitContextBase(index);
2318 ctx = &ohci->it_context_list[index];
2319 }
2320 break;
2321
2322 case FW_ISO_CONTEXT_RECEIVE:
2068 channels = &ohci->ir_context_channels; 2323 channels = &ohci->ir_context_channels;
2069 mask = &ohci->ir_context_mask; 2324 mask = &ohci->ir_context_mask;
2070 list = ohci->ir_context_list;
2071 callback = handle_ir_packet_per_buffer; 2325 callback = handle_ir_packet_per_buffer;
2072 } 2326 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2327 if (index >= 0) {
2328 *channels &= ~(1ULL << channel);
2329 *mask &= ~(1 << index);
2330 regs = OHCI1394_IsoRcvContextBase(index);
2331 ctx = &ohci->ir_context_list[index];
2332 }
2333 break;
2073 2334
2074 spin_lock_irqsave(&ohci->lock, flags); 2335 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2075 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2336 mask = &ohci->ir_context_mask;
2076 if (index >= 0) { 2337 callback = handle_ir_buffer_fill;
2077 *channels &= ~(1ULL << channel); 2338 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2078 *mask &= ~(1 << index); 2339 if (index >= 0) {
2340 ohci->mc_allocated = true;
2341 *mask &= ~(1 << index);
2342 regs = OHCI1394_IsoRcvContextBase(index);
2343 ctx = &ohci->ir_context_list[index];
2344 }
2345 break;
2346
2347 default:
2348 index = -1;
2349 ret = -ENOSYS;
2079 } 2350 }
2351
2080 spin_unlock_irqrestore(&ohci->lock, flags); 2352 spin_unlock_irqrestore(&ohci->lock, flags);
2081 2353
2082 if (index < 0) 2354 if (index < 0)
2083 return ERR_PTR(-EBUSY); 2355 return ERR_PTR(ret);
2084
2085 if (type == FW_ISO_CONTEXT_TRANSMIT)
2086 regs = OHCI1394_IsoXmitContextBase(index);
2087 else
2088 regs = OHCI1394_IsoRcvContextBase(index);
2089 2356
2090 ctx = &list[index];
2091 memset(ctx, 0, sizeof(*ctx)); 2357 memset(ctx, 0, sizeof(*ctx));
2092 ctx->header_length = 0; 2358 ctx->header_length = 0;
2093 ctx->header = (void *) __get_free_page(GFP_KERNEL); 2359 ctx->header = (void *) __get_free_page(GFP_KERNEL);
2094 if (ctx->header == NULL) 2360 if (ctx->header == NULL) {
2361 ret = -ENOMEM;
2095 goto out; 2362 goto out;
2096 2363 }
2097 ret = context_init(&ctx->context, ohci, regs, callback); 2364 ret = context_init(&ctx->context, ohci, regs, callback);
2098 if (ret < 0) 2365 if (ret < 0)
2099 goto out_with_header; 2366 goto out_with_header;
2100 2367
2368 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
2369 set_multichannel_mask(ohci, 0);
2370
2101 return &ctx->base; 2371 return &ctx->base;
2102 2372
2103 out_with_header: 2373 out_with_header:
2104 free_page((unsigned long)ctx->header); 2374 free_page((unsigned long)ctx->header);
2105 out: 2375 out:
2106 spin_lock_irqsave(&ohci->lock, flags); 2376 spin_lock_irqsave(&ohci->lock, flags);
2377
2378 switch (type) {
2379 case FW_ISO_CONTEXT_RECEIVE:
2380 *channels |= 1ULL << channel;
2381 break;
2382
2383 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2384 ohci->mc_allocated = false;
2385 break;
2386 }
2107 *mask |= 1 << index; 2387 *mask |= 1 << index;
2388
2108 spin_unlock_irqrestore(&ohci->lock, flags); 2389 spin_unlock_irqrestore(&ohci->lock, flags);
2109 2390
2110 return ERR_PTR(ret); 2391 return ERR_PTR(ret);
@@ -2115,10 +2396,11 @@ static int ohci_start_iso(struct fw_iso_context *base,
2115{ 2396{
2116 struct iso_context *ctx = container_of(base, struct iso_context, base); 2397 struct iso_context *ctx = container_of(base, struct iso_context, base);
2117 struct fw_ohci *ohci = ctx->context.ohci; 2398 struct fw_ohci *ohci = ctx->context.ohci;
2118 u32 control, match; 2399 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2119 int index; 2400 int index;
2120 2401
2121 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2402 switch (ctx->base.type) {
2403 case FW_ISO_CONTEXT_TRANSMIT:
2122 index = ctx - ohci->it_context_list; 2404 index = ctx - ohci->it_context_list;
2123 match = 0; 2405 match = 0;
2124 if (cycle >= 0) 2406 if (cycle >= 0)
@@ -2128,9 +2410,13 @@ static int ohci_start_iso(struct fw_iso_context *base,
2128 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 2410 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2129 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 2411 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2130 context_run(&ctx->context, match); 2412 context_run(&ctx->context, match);
2131 } else { 2413 break;
2414
2415 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2416 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
2417 /* fall through */
2418 case FW_ISO_CONTEXT_RECEIVE:
2132 index = ctx - ohci->ir_context_list; 2419 index = ctx - ohci->ir_context_list;
2133 control = IR_CONTEXT_ISOCH_HEADER;
2134 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2420 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2135 if (cycle >= 0) { 2421 if (cycle >= 0) {
2136 match |= (cycle & 0x07fff) << 12; 2422 match |= (cycle & 0x07fff) << 12;
@@ -2141,6 +2427,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
2141 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2427 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2142 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2428 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2143 context_run(&ctx->context, control); 2429 context_run(&ctx->context, control);
2430 break;
2144 } 2431 }
2145 2432
2146 return 0; 2433 return 0;
@@ -2152,12 +2439,17 @@ static int ohci_stop_iso(struct fw_iso_context *base)
2152 struct iso_context *ctx = container_of(base, struct iso_context, base); 2439 struct iso_context *ctx = container_of(base, struct iso_context, base);
2153 int index; 2440 int index;
2154 2441
2155 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2442 switch (ctx->base.type) {
2443 case FW_ISO_CONTEXT_TRANSMIT:
2156 index = ctx - ohci->it_context_list; 2444 index = ctx - ohci->it_context_list;
2157 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 2445 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2158 } else { 2446 break;
2447
2448 case FW_ISO_CONTEXT_RECEIVE:
2449 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2159 index = ctx - ohci->ir_context_list; 2450 index = ctx - ohci->ir_context_list;
2160 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 2451 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2452 break;
2161 } 2453 }
2162 flush_writes(ohci); 2454 flush_writes(ohci);
2163 context_stop(&ctx->context); 2455 context_stop(&ctx->context);
@@ -2178,24 +2470,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
2178 2470
2179 spin_lock_irqsave(&ohci->lock, flags); 2471 spin_lock_irqsave(&ohci->lock, flags);
2180 2472
2181 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { 2473 switch (base->type) {
2474 case FW_ISO_CONTEXT_TRANSMIT:
2182 index = ctx - ohci->it_context_list; 2475 index = ctx - ohci->it_context_list;
2183 ohci->it_context_mask |= 1 << index; 2476 ohci->it_context_mask |= 1 << index;
2184 } else { 2477 break;
2478
2479 case FW_ISO_CONTEXT_RECEIVE:
2185 index = ctx - ohci->ir_context_list; 2480 index = ctx - ohci->ir_context_list;
2186 ohci->ir_context_mask |= 1 << index; 2481 ohci->ir_context_mask |= 1 << index;
2187 ohci->ir_context_channels |= 1ULL << base->channel; 2482 ohci->ir_context_channels |= 1ULL << base->channel;
2483 break;
2484
2485 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2486 index = ctx - ohci->ir_context_list;
2487 ohci->ir_context_mask |= 1 << index;
2488 ohci->ir_context_channels |= ohci->mc_channels;
2489 ohci->mc_channels = 0;
2490 ohci->mc_allocated = false;
2491 break;
2188 } 2492 }
2189 2493
2190 spin_unlock_irqrestore(&ohci->lock, flags); 2494 spin_unlock_irqrestore(&ohci->lock, flags);
2191} 2495}
2192 2496
2193static int ohci_queue_iso_transmit(struct fw_iso_context *base, 2497static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2194 struct fw_iso_packet *packet, 2498{
2195 struct fw_iso_buffer *buffer, 2499 struct fw_ohci *ohci = fw_ohci(base->card);
2196 unsigned long payload) 2500 unsigned long flags;
2501 int ret;
2502
2503 switch (base->type) {
2504 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2505
2506 spin_lock_irqsave(&ohci->lock, flags);
2507
2508 /* Don't allow multichannel to grab other contexts' channels. */
2509 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
2510 *channels = ohci->ir_context_channels;
2511 ret = -EBUSY;
2512 } else {
2513 set_multichannel_mask(ohci, *channels);
2514 ret = 0;
2515 }
2516
2517 spin_unlock_irqrestore(&ohci->lock, flags);
2518
2519 break;
2520 default:
2521 ret = -EINVAL;
2522 }
2523
2524 return ret;
2525}
2526
2527static int queue_iso_transmit(struct iso_context *ctx,
2528 struct fw_iso_packet *packet,
2529 struct fw_iso_buffer *buffer,
2530 unsigned long payload)
2197{ 2531{
2198 struct iso_context *ctx = container_of(base, struct iso_context, base);
2199 struct descriptor *d, *last, *pd; 2532 struct descriptor *d, *last, *pd;
2200 struct fw_iso_packet *p; 2533 struct fw_iso_packet *p;
2201 __le32 *header; 2534 __le32 *header;
@@ -2291,14 +2624,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2291 return 0; 2624 return 0;
2292} 2625}
2293 2626
2294static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2627static int queue_iso_packet_per_buffer(struct iso_context *ctx,
2295 struct fw_iso_packet *packet, 2628 struct fw_iso_packet *packet,
2296 struct fw_iso_buffer *buffer, 2629 struct fw_iso_buffer *buffer,
2297 unsigned long payload) 2630 unsigned long payload)
2298{ 2631{
2299 struct iso_context *ctx = container_of(base, struct iso_context, base);
2300 struct descriptor *d, *pd; 2632 struct descriptor *d, *pd;
2301 struct fw_iso_packet *p = packet;
2302 dma_addr_t d_bus, page_bus; 2633 dma_addr_t d_bus, page_bus;
2303 u32 z, header_z, rest; 2634 u32 z, header_z, rest;
2304 int i, j, length; 2635 int i, j, length;
@@ -2308,14 +2639,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2308 * The OHCI controller puts the isochronous header and trailer in the 2639 * The OHCI controller puts the isochronous header and trailer in the
2309 * buffer, so we need at least 8 bytes. 2640 * buffer, so we need at least 8 bytes.
2310 */ 2641 */
2311 packet_count = p->header_length / ctx->base.header_size; 2642 packet_count = packet->header_length / ctx->base.header_size;
2312 header_size = max(ctx->base.header_size, (size_t)8); 2643 header_size = max(ctx->base.header_size, (size_t)8);
2313 2644
2314 /* Get header size in number of descriptors. */ 2645 /* Get header size in number of descriptors. */
2315 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2646 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2316 page = payload >> PAGE_SHIFT; 2647 page = payload >> PAGE_SHIFT;
2317 offset = payload & ~PAGE_MASK; 2648 offset = payload & ~PAGE_MASK;
2318 payload_per_buffer = p->payload_length / packet_count; 2649 payload_per_buffer = packet->payload_length / packet_count;
2319 2650
2320 for (i = 0; i < packet_count; i++) { 2651 for (i = 0; i < packet_count; i++) {
2321 /* d points to the header descriptor */ 2652 /* d points to the header descriptor */
@@ -2327,7 +2658,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2327 2658
2328 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 2659 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2329 DESCRIPTOR_INPUT_MORE); 2660 DESCRIPTOR_INPUT_MORE);
2330 if (p->skip && i == 0) 2661 if (packet->skip && i == 0)
2331 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2662 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2332 d->req_count = cpu_to_le16(header_size); 2663 d->req_count = cpu_to_le16(header_size);
2333 d->res_count = d->req_count; 2664 d->res_count = d->req_count;
@@ -2360,7 +2691,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2360 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2691 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2361 DESCRIPTOR_INPUT_LAST | 2692 DESCRIPTOR_INPUT_LAST |
2362 DESCRIPTOR_BRANCH_ALWAYS); 2693 DESCRIPTOR_BRANCH_ALWAYS);
2363 if (p->interrupt && i == packet_count - 1) 2694 if (packet->interrupt && i == packet_count - 1)
2364 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 2695 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2365 2696
2366 context_append(&ctx->context, d, z, header_z); 2697 context_append(&ctx->context, d, z, header_z);
@@ -2369,6 +2700,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2369 return 0; 2700 return 0;
2370} 2701}
2371 2702
2703static int queue_iso_buffer_fill(struct iso_context *ctx,
2704 struct fw_iso_packet *packet,
2705 struct fw_iso_buffer *buffer,
2706 unsigned long payload)
2707{
2708 struct descriptor *d;
2709 dma_addr_t d_bus, page_bus;
2710 int page, offset, rest, z, i, length;
2711
2712 page = payload >> PAGE_SHIFT;
2713 offset = payload & ~PAGE_MASK;
2714 rest = packet->payload_length;
2715
2716 /* We need one descriptor for each page in the buffer. */
2717 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
2718
2719 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
2720 return -EFAULT;
2721
2722 for (i = 0; i < z; i++) {
2723 d = context_get_descriptors(&ctx->context, 1, &d_bus);
2724 if (d == NULL)
2725 return -ENOMEM;
2726
2727 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
2728 DESCRIPTOR_BRANCH_ALWAYS);
2729 if (packet->skip && i == 0)
2730 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2731 if (packet->interrupt && i == z - 1)
2732 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2733
2734 if (offset + rest < PAGE_SIZE)
2735 length = rest;
2736 else
2737 length = PAGE_SIZE - offset;
2738 d->req_count = cpu_to_le16(length);
2739 d->res_count = d->req_count;
2740 d->transfer_status = 0;
2741
2742 page_bus = page_private(buffer->pages[page]);
2743 d->data_address = cpu_to_le32(page_bus + offset);
2744
2745 rest -= length;
2746 offset = 0;
2747 page++;
2748
2749 context_append(&ctx->context, d, 1, 0);
2750 }
2751
2752 return 0;
2753}
2754
2372static int ohci_queue_iso(struct fw_iso_context *base, 2755static int ohci_queue_iso(struct fw_iso_context *base,
2373 struct fw_iso_packet *packet, 2756 struct fw_iso_packet *packet,
2374 struct fw_iso_buffer *buffer, 2757 struct fw_iso_buffer *buffer,
@@ -2376,14 +2759,20 @@ static int ohci_queue_iso(struct fw_iso_context *base,
2376{ 2759{
2377 struct iso_context *ctx = container_of(base, struct iso_context, base); 2760 struct iso_context *ctx = container_of(base, struct iso_context, base);
2378 unsigned long flags; 2761 unsigned long flags;
2379 int ret; 2762 int ret = -ENOSYS;
2380 2763
2381 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2764 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2382 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2765 switch (base->type) {
2383 ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2766 case FW_ISO_CONTEXT_TRANSMIT:
2384 else 2767 ret = queue_iso_transmit(ctx, packet, buffer, payload);
2385 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2768 break;
2386 buffer, payload); 2769 case FW_ISO_CONTEXT_RECEIVE:
2770 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
2771 break;
2772 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2773 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
2774 break;
2775 }
2387 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2776 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2388 2777
2389 return ret; 2778 return ret;
@@ -2391,16 +2780,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
2391 2780
2392static const struct fw_card_driver ohci_driver = { 2781static const struct fw_card_driver ohci_driver = {
2393 .enable = ohci_enable, 2782 .enable = ohci_enable,
2783 .read_phy_reg = ohci_read_phy_reg,
2394 .update_phy_reg = ohci_update_phy_reg, 2784 .update_phy_reg = ohci_update_phy_reg,
2395 .set_config_rom = ohci_set_config_rom, 2785 .set_config_rom = ohci_set_config_rom,
2396 .send_request = ohci_send_request, 2786 .send_request = ohci_send_request,
2397 .send_response = ohci_send_response, 2787 .send_response = ohci_send_response,
2398 .cancel_packet = ohci_cancel_packet, 2788 .cancel_packet = ohci_cancel_packet,
2399 .enable_phys_dma = ohci_enable_phys_dma, 2789 .enable_phys_dma = ohci_enable_phys_dma,
2400 .get_cycle_time = ohci_get_cycle_time, 2790 .read_csr = ohci_read_csr,
2791 .write_csr = ohci_write_csr,
2401 2792
2402 .allocate_iso_context = ohci_allocate_iso_context, 2793 .allocate_iso_context = ohci_allocate_iso_context,
2403 .free_iso_context = ohci_free_iso_context, 2794 .free_iso_context = ohci_free_iso_context,
2795 .set_iso_channels = ohci_set_iso_channels,
2404 .queue_iso = ohci_queue_iso, 2796 .queue_iso = ohci_queue_iso,
2405 .start_iso = ohci_start_iso, 2797 .start_iso = ohci_start_iso,
2406 .stop_iso = ohci_stop_iso, 2798 .stop_iso = ohci_stop_iso,
@@ -2465,6 +2857,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2465 pci_set_drvdata(dev, ohci); 2857 pci_set_drvdata(dev, ohci);
2466 2858
2467 spin_lock_init(&ohci->lock); 2859 spin_lock_init(&ohci->lock);
2860 mutex_init(&ohci->phy_reg_mutex);
2468 2861
2469 tasklet_init(&ohci->bus_reset_tasklet, 2862 tasklet_init(&ohci->bus_reset_tasklet,
2470 bus_reset_tasklet, (unsigned long)ohci); 2863 bus_reset_tasklet, (unsigned long)ohci);
@@ -2625,6 +3018,7 @@ static void pci_remove(struct pci_dev *dev)
2625 context_release(&ohci->at_response_ctx); 3018 context_release(&ohci->at_response_ctx);
2626 kfree(ohci->it_context_list); 3019 kfree(ohci->it_context_list);
2627 kfree(ohci->ir_context_list); 3020 kfree(ohci->ir_context_list);
3021 pci_disable_msi(dev);
2628 pci_iounmap(dev, ohci->registers); 3022 pci_iounmap(dev, ohci->registers);
2629 pci_release_region(dev, 0); 3023 pci_release_region(dev, 0);
2630 pci_disable_device(dev); 3024 pci_disable_device(dev);
@@ -2642,6 +3036,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2642 3036
2643 software_reset(ohci); 3037 software_reset(ohci);
2644 free_irq(dev->irq, ohci); 3038 free_irq(dev->irq, ohci);
3039 pci_disable_msi(dev);
2645 err = pci_save_state(dev); 3040 err = pci_save_state(dev);
2646 if (err) { 3041 if (err) {
2647 fw_error("pci_save_state failed\n"); 3042 fw_error("pci_save_state failed\n");