aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire/ohci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire/ohci.c')
-rw-r--r--drivers/firewire/ohci.c877
1 files changed, 633 insertions, 244 deletions
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9dcb17d51aee..ebb897329c1e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/bitops.h>
21#include <linux/bug.h> 22#include <linux/bug.h>
22#include <linux/compiler.h> 23#include <linux/compiler.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
@@ -40,6 +41,7 @@
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
41#include <linux/string.h> 42#include <linux/string.h>
42#include <linux/time.h> 43#include <linux/time.h>
44#include <linux/vmalloc.h>
43 45
44#include <asm/byteorder.h> 46#include <asm/byteorder.h>
45#include <asm/page.h> 47#include <asm/page.h>
@@ -80,17 +82,23 @@ struct descriptor {
80#define COMMAND_PTR(regs) ((regs) + 12) 82#define COMMAND_PTR(regs) ((regs) + 12)
81#define CONTEXT_MATCH(regs) ((regs) + 16) 83#define CONTEXT_MATCH(regs) ((regs) + 16)
82 84
83struct ar_buffer { 85#define AR_BUFFER_SIZE (32*1024)
84 struct descriptor descriptor; 86#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
85 struct ar_buffer *next; 87/* we need at least two pages for proper list management */
86 __le32 data[0]; 88#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
87}; 89
90#define MAX_ASYNC_PAYLOAD 4096
91#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
92#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
88 93
89struct ar_context { 94struct ar_context {
90 struct fw_ohci *ohci; 95 struct fw_ohci *ohci;
91 struct ar_buffer *current_buffer; 96 struct page *pages[AR_BUFFERS];
92 struct ar_buffer *last_buffer; 97 void *buffer;
98 struct descriptor *descriptors;
99 dma_addr_t descriptors_bus;
93 void *pointer; 100 void *pointer;
101 unsigned int last_buffer_index;
94 u32 regs; 102 u32 regs;
95 struct tasklet_struct tasklet; 103 struct tasklet_struct tasklet;
96}; 104};
@@ -117,6 +125,8 @@ struct context {
117 struct fw_ohci *ohci; 125 struct fw_ohci *ohci;
118 u32 regs; 126 u32 regs;
119 int total_allocation; 127 int total_allocation;
128 bool running;
129 bool flushing;
120 130
121 /* 131 /*
122 * List of page-sized buffers for storing DMA descriptors. 132 * List of page-sized buffers for storing DMA descriptors.
@@ -161,6 +171,9 @@ struct iso_context {
161 int excess_bytes; 171 int excess_bytes;
162 void *header; 172 void *header;
163 size_t header_length; 173 size_t header_length;
174
175 u8 sync;
176 u8 tags;
164}; 177};
165 178
166#define CONFIG_ROM_SIZE 1024 179#define CONFIG_ROM_SIZE 1024
@@ -177,7 +190,8 @@ struct fw_ohci {
177 u32 bus_time; 190 u32 bus_time;
178 bool is_root; 191 bool is_root;
179 bool csr_state_setclear_abdicate; 192 bool csr_state_setclear_abdicate;
180 193 int n_ir;
194 int n_it;
181 /* 195 /*
182 * Spinlock for accessing fw_ohci data. Never call out of 196 * Spinlock for accessing fw_ohci data. Never call out of
183 * this driver with this lock held. 197 * this driver with this lock held.
@@ -186,14 +200,19 @@ struct fw_ohci {
186 200
187 struct mutex phy_reg_mutex; 201 struct mutex phy_reg_mutex;
188 202
203 void *misc_buffer;
204 dma_addr_t misc_buffer_bus;
205
189 struct ar_context ar_request_ctx; 206 struct ar_context ar_request_ctx;
190 struct ar_context ar_response_ctx; 207 struct ar_context ar_response_ctx;
191 struct context at_request_ctx; 208 struct context at_request_ctx;
192 struct context at_response_ctx; 209 struct context at_response_ctx;
193 210
211 u32 it_context_support;
194 u32 it_context_mask; /* unoccupied IT contexts */ 212 u32 it_context_mask; /* unoccupied IT contexts */
195 struct iso_context *it_context_list; 213 struct iso_context *it_context_list;
196 u64 ir_context_channels; /* unoccupied channels */ 214 u64 ir_context_channels; /* unoccupied channels */
215 u32 ir_context_support;
197 u32 ir_context_mask; /* unoccupied IR contexts */ 216 u32 ir_context_mask; /* unoccupied IR contexts */
198 struct iso_context *ir_context_list; 217 struct iso_context *ir_context_list;
199 u64 mc_channels; /* channels in use by the multichannel IR context */ 218 u64 mc_channels; /* channels in use by the multichannel IR context */
@@ -242,8 +261,10 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
242 261
243static char ohci_driver_name[] = KBUILD_MODNAME; 262static char ohci_driver_name[] = KBUILD_MODNAME;
244 263
264#define PCI_DEVICE_ID_AGERE_FW643 0x5901
245#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 265#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
246#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 266#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
267#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
247 268
248#define QUIRK_CYCLE_TIMER 1 269#define QUIRK_CYCLE_TIMER 1
249#define QUIRK_RESET_PACKET 2 270#define QUIRK_RESET_PACKET 2
@@ -253,18 +274,34 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
253 274
254/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 275/* In case of multiple matches in ohci_quirks[], only the first one is used. */
255static const struct { 276static const struct {
256 unsigned short vendor, device, flags; 277 unsigned short vendor, device, revision, flags;
257} ohci_quirks[] = { 278} ohci_quirks[] = {
258 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | 279 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
259 QUIRK_RESET_PACKET | 280 QUIRK_CYCLE_TIMER},
260 QUIRK_NO_1394A}, 281
261 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 282 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
262 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 283 QUIRK_BE_HEADERS},
263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 284
264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 285 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 286 QUIRK_NO_MSI},
266 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 287
267 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 288 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
289 QUIRK_NO_MSI},
290
291 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
292 QUIRK_CYCLE_TIMER},
293
294 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
295 QUIRK_CYCLE_TIMER},
296
297 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
298 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
299
300 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
301 QUIRK_RESET_PACKET},
302
303 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
304 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
268}; 305};
269 306
270/* This overrides anything that was found in ohci_quirks[]. */ 307/* This overrides anything that was found in ohci_quirks[]. */
@@ -304,7 +341,7 @@ static void log_irqs(u32 evt)
304 !(evt & OHCI1394_busReset)) 341 !(evt & OHCI1394_busReset))
305 return; 342 return;
306 343
307 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 344 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
308 evt & OHCI1394_selfIDComplete ? " selfID" : "", 345 evt & OHCI1394_selfIDComplete ? " selfID" : "",
309 evt & OHCI1394_RQPkt ? " AR_req" : "", 346 evt & OHCI1394_RQPkt ? " AR_req" : "",
310 evt & OHCI1394_RSPkt ? " AR_resp" : "", 347 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -317,6 +354,7 @@ static void log_irqs(u32 evt)
317 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 354 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
318 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 355 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
319 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 356 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
357 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
320 evt & OHCI1394_busReset ? " busReset" : "", 358 evt & OHCI1394_busReset ? " busReset" : "",
321 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 359 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
322 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 360 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
@@ -394,10 +432,6 @@ static const char *tcodes[] = {
394 [0xc] = "-reserved-", [0xd] = "-reserved-", 432 [0xc] = "-reserved-", [0xd] = "-reserved-",
395 [0xe] = "link internal", [0xf] = "-reserved-", 433 [0xe] = "link internal", [0xf] = "-reserved-",
396}; 434};
397static const char *phys[] = {
398 [0x0] = "phy config packet", [0x1] = "link-on packet",
399 [0x2] = "self-id packet", [0x3] = "-reserved-",
400};
401 435
402static void log_ar_at_event(char dir, int speed, u32 *header, int evt) 436static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
403{ 437{
@@ -416,12 +450,6 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
416 return; 450 return;
417 } 451 }
418 452
419 if (header[0] == ~header[1]) {
420 fw_notify("A%c %s, %s, %08x\n",
421 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
422 return;
423 }
424
425 switch (tcode) { 453 switch (tcode) {
426 case 0x0: case 0x6: case 0x8: 454 case 0x0: case 0x6: case 0x8:
427 snprintf(specific, sizeof(specific), " = %08x", 455 snprintf(specific, sizeof(specific), " = %08x",
@@ -436,9 +464,13 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
436 } 464 }
437 465
438 switch (tcode) { 466 switch (tcode) {
439 case 0xe: case 0xa: 467 case 0xa:
440 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); 468 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
441 break; 469 break;
470 case 0xe:
471 fw_notify("A%c %s, PHY %08x %08x\n",
472 dir, evts[evt], header[1], header[2]);
473 break;
442 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 474 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
443 fw_notify("A%c spd %x tl %02x, " 475 fw_notify("A%c spd %x tl %02x, "
444 "%04x -> %04x, %s, " 476 "%04x -> %04x, %s, "
@@ -577,52 +609,150 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
577 return ret; 609 return ret;
578} 610}
579 611
580static int ar_context_add_page(struct ar_context *ctx) 612static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
581{ 613{
582 struct device *dev = ctx->ohci->card.device; 614 return page_private(ctx->pages[i]);
583 struct ar_buffer *ab; 615}
584 dma_addr_t uninitialized_var(ab_bus);
585 size_t offset;
586 616
587 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); 617static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
588 if (ab == NULL) 618{
589 return -ENOMEM; 619 struct descriptor *d;
590 620
591 ab->next = NULL; 621 d = &ctx->descriptors[index];
592 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 622 d->branch_address &= cpu_to_le32(~0xf);
593 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 623 d->res_count = cpu_to_le16(PAGE_SIZE);
594 DESCRIPTOR_STATUS | 624 d->transfer_status = 0;
595 DESCRIPTOR_BRANCH_ALWAYS);
596 offset = offsetof(struct ar_buffer, data);
597 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
598 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
599 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
600 ab->descriptor.branch_address = 0;
601 625
602 wmb(); /* finish init of new descriptors before branch_address update */ 626 wmb(); /* finish init of new descriptors before branch_address update */
603 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 627 d = &ctx->descriptors[ctx->last_buffer_index];
604 ctx->last_buffer->next = ab; 628 d->branch_address |= cpu_to_le32(1);
605 ctx->last_buffer = ab; 629
630 ctx->last_buffer_index = index;
606 631
607 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 632 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
608 flush_writes(ctx->ohci); 633 flush_writes(ctx->ohci);
609
610 return 0;
611} 634}
612 635
613static void ar_context_release(struct ar_context *ctx) 636static void ar_context_release(struct ar_context *ctx)
614{ 637{
615 struct ar_buffer *ab, *ab_next; 638 unsigned int i;
616 size_t offset; 639
617 dma_addr_t ab_bus; 640 if (ctx->buffer)
641 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
642
643 for (i = 0; i < AR_BUFFERS; i++)
644 if (ctx->pages[i]) {
645 dma_unmap_page(ctx->ohci->card.device,
646 ar_buffer_bus(ctx, i),
647 PAGE_SIZE, DMA_FROM_DEVICE);
648 __free_page(ctx->pages[i]);
649 }
650}
651
652static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
653{
654 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
655 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
656 flush_writes(ctx->ohci);
618 657
619 for (ab = ctx->current_buffer; ab; ab = ab_next) { 658 fw_error("AR error: %s; DMA stopped\n", error_msg);
620 ab_next = ab->next;
621 offset = offsetof(struct ar_buffer, data);
622 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
623 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
624 ab, ab_bus);
625 } 659 }
660 /* FIXME: restart? */
661}
662
663static inline unsigned int ar_next_buffer_index(unsigned int index)
664{
665 return (index + 1) % AR_BUFFERS;
666}
667
668static inline unsigned int ar_prev_buffer_index(unsigned int index)
669{
670 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
671}
672
673static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
674{
675 return ar_next_buffer_index(ctx->last_buffer_index);
676}
677
678/*
679 * We search for the buffer that contains the last AR packet DMA data written
680 * by the controller.
681 */
682static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
683 unsigned int *buffer_offset)
684{
685 unsigned int i, next_i, last = ctx->last_buffer_index;
686 __le16 res_count, next_res_count;
687
688 i = ar_first_buffer_index(ctx);
689 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
690
691 /* A buffer that is not yet completely filled must be the last one. */
692 while (i != last && res_count == 0) {
693
694 /* Peek at the next descriptor. */
695 next_i = ar_next_buffer_index(i);
696 rmb(); /* read descriptors in order */
697 next_res_count = ACCESS_ONCE(
698 ctx->descriptors[next_i].res_count);
699 /*
700 * If the next descriptor is still empty, we must stop at this
701 * descriptor.
702 */
703 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
704 /*
705 * The exception is when the DMA data for one packet is
706 * split over three buffers; in this case, the middle
707 * buffer's descriptor might be never updated by the
708 * controller and look still empty, and we have to peek
709 * at the third one.
710 */
711 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
712 next_i = ar_next_buffer_index(next_i);
713 rmb();
714 next_res_count = ACCESS_ONCE(
715 ctx->descriptors[next_i].res_count);
716 if (next_res_count != cpu_to_le16(PAGE_SIZE))
717 goto next_buffer_is_active;
718 }
719
720 break;
721 }
722
723next_buffer_is_active:
724 i = next_i;
725 res_count = next_res_count;
726 }
727
728 rmb(); /* read res_count before the DMA data */
729
730 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
731 if (*buffer_offset > PAGE_SIZE) {
732 *buffer_offset = 0;
733 ar_context_abort(ctx, "corrupted descriptor");
734 }
735
736 return i;
737}
738
739static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
740 unsigned int end_buffer_index,
741 unsigned int end_buffer_offset)
742{
743 unsigned int i;
744
745 i = ar_first_buffer_index(ctx);
746 while (i != end_buffer_index) {
747 dma_sync_single_for_cpu(ctx->ohci->card.device,
748 ar_buffer_bus(ctx, i),
749 PAGE_SIZE, DMA_FROM_DEVICE);
750 i = ar_next_buffer_index(i);
751 }
752 if (end_buffer_offset > 0)
753 dma_sync_single_for_cpu(ctx->ohci->card.device,
754 ar_buffer_bus(ctx, i),
755 end_buffer_offset, DMA_FROM_DEVICE);
626} 756}
627 757
628#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 758#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
@@ -665,6 +795,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
665 p.header[3] = cond_le32_to_cpu(buffer[3]); 795 p.header[3] = cond_le32_to_cpu(buffer[3]);
666 p.header_length = 16; 796 p.header_length = 16;
667 p.payload_length = p.header[3] >> 16; 797 p.payload_length = p.header[3] >> 16;
798 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
799 ar_context_abort(ctx, "invalid packet length");
800 return NULL;
801 }
668 break; 802 break;
669 803
670 case TCODE_WRITE_RESPONSE: 804 case TCODE_WRITE_RESPONSE:
@@ -675,9 +809,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
675 break; 809 break;
676 810
677 default: 811 default:
678 /* FIXME: Stop context, discard everything, and restart? */ 812 ar_context_abort(ctx, "invalid tcode");
679 p.header_length = 0; 813 return NULL;
680 p.payload_length = 0;
681 } 814 }
682 815
683 p.payload = (void *) buffer + p.header_length; 816 p.payload = (void *) buffer + p.header_length;
@@ -727,99 +860,159 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
727 return buffer + length + 1; 860 return buffer + length + 1;
728} 861}
729 862
863static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
864{
865 void *next;
866
867 while (p < end) {
868 next = handle_ar_packet(ctx, p);
869 if (!next)
870 return p;
871 p = next;
872 }
873
874 return p;
875}
876
877static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
878{
879 unsigned int i;
880
881 i = ar_first_buffer_index(ctx);
882 while (i != end_buffer) {
883 dma_sync_single_for_device(ctx->ohci->card.device,
884 ar_buffer_bus(ctx, i),
885 PAGE_SIZE, DMA_FROM_DEVICE);
886 ar_context_link_page(ctx, i);
887 i = ar_next_buffer_index(i);
888 }
889}
890
730static void ar_context_tasklet(unsigned long data) 891static void ar_context_tasklet(unsigned long data)
731{ 892{
732 struct ar_context *ctx = (struct ar_context *)data; 893 struct ar_context *ctx = (struct ar_context *)data;
733 struct fw_ohci *ohci = ctx->ohci; 894 unsigned int end_buffer_index, end_buffer_offset;
734 struct ar_buffer *ab; 895 void *p, *end;
735 struct descriptor *d;
736 void *buffer, *end;
737 896
738 ab = ctx->current_buffer; 897 p = ctx->pointer;
739 d = &ab->descriptor; 898 if (!p)
899 return;
740 900
741 if (d->res_count == 0) { 901 end_buffer_index = ar_search_last_active_buffer(ctx,
742 size_t size, rest, offset; 902 &end_buffer_offset);
743 dma_addr_t start_bus; 903 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
744 void *start; 904 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
745 905
906 if (end_buffer_index < ar_first_buffer_index(ctx)) {
746 /* 907 /*
747 * This descriptor is finished and we may have a 908 * The filled part of the overall buffer wraps around; handle
748 * packet split across this and the next buffer. We 909 * all packets up to the buffer end here. If the last packet
749 * reuse the page for reassembling the split packet. 910 * wraps around, its tail will be visible after the buffer end
911 * because the buffer start pages are mapped there again.
750 */ 912 */
913 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
914 p = handle_ar_packets(ctx, p, buffer_end);
915 if (p < buffer_end)
916 goto error;
917 /* adjust p to point back into the actual buffer */
918 p -= AR_BUFFERS * PAGE_SIZE;
919 }
751 920
752 offset = offsetof(struct ar_buffer, data); 921 p = handle_ar_packets(ctx, p, end);
753 start = buffer = ab; 922 if (p != end) {
754 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 923 if (p > end)
755 924 ar_context_abort(ctx, "inconsistent descriptor");
756 ab = ab->next; 925 goto error;
757 d = &ab->descriptor;
758 size = buffer + PAGE_SIZE - ctx->pointer;
759 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
760 memmove(buffer, ctx->pointer, size);
761 memcpy(buffer + size, ab->data, rest);
762 ctx->current_buffer = ab;
763 ctx->pointer = (void *) ab->data + rest;
764 end = buffer + size + rest;
765
766 while (buffer < end)
767 buffer = handle_ar_packet(ctx, buffer);
768
769 dma_free_coherent(ohci->card.device, PAGE_SIZE,
770 start, start_bus);
771 ar_context_add_page(ctx);
772 } else {
773 buffer = ctx->pointer;
774 ctx->pointer = end =
775 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
776
777 while (buffer < end)
778 buffer = handle_ar_packet(ctx, buffer);
779 } 926 }
927
928 ctx->pointer = p;
929 ar_recycle_buffers(ctx, end_buffer_index);
930
931 return;
932
933error:
934 ctx->pointer = NULL;
780} 935}
781 936
782static int ar_context_init(struct ar_context *ctx, 937static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
783 struct fw_ohci *ohci, u32 regs) 938 unsigned int descriptors_offset, u32 regs)
784{ 939{
785 struct ar_buffer ab; 940 unsigned int i;
941 dma_addr_t dma_addr;
942 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
943 struct descriptor *d;
786 944
787 ctx->regs = regs; 945 ctx->regs = regs;
788 ctx->ohci = ohci; 946 ctx->ohci = ohci;
789 ctx->last_buffer = &ab;
790 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 947 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
791 948
792 ar_context_add_page(ctx); 949 for (i = 0; i < AR_BUFFERS; i++) {
793 ar_context_add_page(ctx); 950 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
794 ctx->current_buffer = ab.next; 951 if (!ctx->pages[i])
795 ctx->pointer = ctx->current_buffer->data; 952 goto out_of_memory;
953 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
954 0, PAGE_SIZE, DMA_FROM_DEVICE);
955 if (dma_mapping_error(ohci->card.device, dma_addr)) {
956 __free_page(ctx->pages[i]);
957 ctx->pages[i] = NULL;
958 goto out_of_memory;
959 }
960 set_page_private(ctx->pages[i], dma_addr);
961 }
962
963 for (i = 0; i < AR_BUFFERS; i++)
964 pages[i] = ctx->pages[i];
965 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
966 pages[AR_BUFFERS + i] = ctx->pages[i];
967 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
968 -1, PAGE_KERNEL);
969 if (!ctx->buffer)
970 goto out_of_memory;
971
972 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
973 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
974
975 for (i = 0; i < AR_BUFFERS; i++) {
976 d = &ctx->descriptors[i];
977 d->req_count = cpu_to_le16(PAGE_SIZE);
978 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
979 DESCRIPTOR_STATUS |
980 DESCRIPTOR_BRANCH_ALWAYS);
981 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
982 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
983 ar_next_buffer_index(i) * sizeof(struct descriptor));
984 }
796 985
797 return 0; 986 return 0;
987
988out_of_memory:
989 ar_context_release(ctx);
990
991 return -ENOMEM;
798} 992}
799 993
800static void ar_context_run(struct ar_context *ctx) 994static void ar_context_run(struct ar_context *ctx)
801{ 995{
802 struct ar_buffer *ab = ctx->current_buffer; 996 unsigned int i;
803 dma_addr_t ab_bus;
804 size_t offset;
805 997
806 offset = offsetof(struct ar_buffer, data); 998 for (i = 0; i < AR_BUFFERS; i++)
807 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 999 ar_context_link_page(ctx, i);
808 1000
809 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); 1001 ctx->pointer = ctx->buffer;
1002
1003 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
810 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 1004 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
811 flush_writes(ctx->ohci); 1005 flush_writes(ctx->ohci);
812} 1006}
813 1007
814static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 1008static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
815{ 1009{
816 int b, key; 1010 __le16 branch;
817 1011
818 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; 1012 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
819 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
820 1013
821 /* figure out which descriptor the branch address goes in */ 1014 /* figure out which descriptor the branch address goes in */
822 if (z == 2 && (b == 3 || key == 2)) 1015 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
823 return d; 1016 return d;
824 else 1017 else
825 return d + z - 1; 1018 return d + z - 1;
@@ -983,6 +1176,7 @@ static void context_run(struct context *ctx, u32 extra)
983 le32_to_cpu(ctx->last->branch_address)); 1176 le32_to_cpu(ctx->last->branch_address));
984 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 1177 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
985 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 1178 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1179 ctx->running = true;
986 flush_writes(ohci); 1180 flush_writes(ohci);
987} 1181}
988 1182
@@ -999,9 +1193,6 @@ static void context_append(struct context *ctx,
999 wmb(); /* finish init of new descriptors before branch_address update */ 1193 wmb(); /* finish init of new descriptors before branch_address update */
1000 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 1194 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1001 ctx->prev = find_branch_descriptor(d, z); 1195 ctx->prev = find_branch_descriptor(d, z);
1002
1003 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1004 flush_writes(ctx->ohci);
1005} 1196}
1006 1197
1007static void context_stop(struct context *ctx) 1198static void context_stop(struct context *ctx)
@@ -1010,6 +1201,7 @@ static void context_stop(struct context *ctx)
1010 int i; 1201 int i;
1011 1202
1012 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1203 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1204 ctx->running = false;
1013 flush_writes(ctx->ohci); 1205 flush_writes(ctx->ohci);
1014 1206
1015 for (i = 0; i < 10; i++) { 1207 for (i = 0; i < 10; i++) {
@@ -1023,6 +1215,7 @@ static void context_stop(struct context *ctx)
1023} 1215}
1024 1216
1025struct driver_data { 1217struct driver_data {
1218 u8 inline_data[8];
1026 struct fw_packet *packet; 1219 struct fw_packet *packet;
1027}; 1220};
1028 1221
@@ -1040,7 +1233,6 @@ static int at_context_queue_packet(struct context *ctx,
1040 struct descriptor *d, *last; 1233 struct descriptor *d, *last;
1041 __le32 *header; 1234 __le32 *header;
1042 int z, tcode; 1235 int z, tcode;
1043 u32 reg;
1044 1236
1045 d = context_get_descriptors(ctx, 4, &d_bus); 1237 d = context_get_descriptors(ctx, 4, &d_bus);
1046 if (d == NULL) { 1238 if (d == NULL) {
@@ -1054,21 +1246,27 @@ static int at_context_queue_packet(struct context *ctx,
1054 /* 1246 /*
1055 * The DMA format for asyncronous link packets is different 1247 * The DMA format for asyncronous link packets is different
1056 * from the IEEE1394 layout, so shift the fields around 1248 * from the IEEE1394 layout, so shift the fields around
1057 * accordingly. If header_length is 8, it's a PHY packet, to 1249 * accordingly.
1058 * which we need to prepend an extra quadlet.
1059 */ 1250 */
1060 1251
1252 tcode = (packet->header[0] >> 4) & 0x0f;
1061 header = (__le32 *) &d[1]; 1253 header = (__le32 *) &d[1];
1062 switch (packet->header_length) { 1254 switch (tcode) {
1063 case 16: 1255 case TCODE_WRITE_QUADLET_REQUEST:
1064 case 12: 1256 case TCODE_WRITE_BLOCK_REQUEST:
1257 case TCODE_WRITE_RESPONSE:
1258 case TCODE_READ_QUADLET_REQUEST:
1259 case TCODE_READ_BLOCK_REQUEST:
1260 case TCODE_READ_QUADLET_RESPONSE:
1261 case TCODE_READ_BLOCK_RESPONSE:
1262 case TCODE_LOCK_REQUEST:
1263 case TCODE_LOCK_RESPONSE:
1065 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1264 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1066 (packet->speed << 16)); 1265 (packet->speed << 16));
1067 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 1266 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1068 (packet->header[0] & 0xffff0000)); 1267 (packet->header[0] & 0xffff0000));
1069 header[2] = cpu_to_le32(packet->header[2]); 1268 header[2] = cpu_to_le32(packet->header[2]);
1070 1269
1071 tcode = (packet->header[0] >> 4) & 0x0f;
1072 if (TCODE_IS_BLOCK_PACKET(tcode)) 1270 if (TCODE_IS_BLOCK_PACKET(tcode))
1073 header[3] = cpu_to_le32(packet->header[3]); 1271 header[3] = cpu_to_le32(packet->header[3]);
1074 else 1272 else
@@ -1077,18 +1275,18 @@ static int at_context_queue_packet(struct context *ctx,
1077 d[0].req_count = cpu_to_le16(packet->header_length); 1275 d[0].req_count = cpu_to_le16(packet->header_length);
1078 break; 1276 break;
1079 1277
1080 case 8: 1278 case TCODE_LINK_INTERNAL:
1081 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 1279 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1082 (packet->speed << 16)); 1280 (packet->speed << 16));
1083 header[1] = cpu_to_le32(packet->header[0]); 1281 header[1] = cpu_to_le32(packet->header[1]);
1084 header[2] = cpu_to_le32(packet->header[1]); 1282 header[2] = cpu_to_le32(packet->header[2]);
1085 d[0].req_count = cpu_to_le16(12); 1283 d[0].req_count = cpu_to_le16(12);
1086 1284
1087 if (is_ping_packet(packet->header)) 1285 if (is_ping_packet(&packet->header[1]))
1088 d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1286 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1089 break; 1287 break;
1090 1288
1091 case 4: 1289 case TCODE_STREAM_DATA:
1092 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1290 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1093 (packet->speed << 16)); 1291 (packet->speed << 16));
1094 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 1292 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
@@ -1101,20 +1299,28 @@ static int at_context_queue_packet(struct context *ctx,
1101 return -1; 1299 return -1;
1102 } 1300 }
1103 1301
1302 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1104 driver_data = (struct driver_data *) &d[3]; 1303 driver_data = (struct driver_data *) &d[3];
1105 driver_data->packet = packet; 1304 driver_data->packet = packet;
1106 packet->driver_data = driver_data; 1305 packet->driver_data = driver_data;
1107 1306
1108 if (packet->payload_length > 0) { 1307 if (packet->payload_length > 0) {
1109 payload_bus = 1308 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1110 dma_map_single(ohci->card.device, packet->payload, 1309 payload_bus = dma_map_single(ohci->card.device,
1111 packet->payload_length, DMA_TO_DEVICE); 1310 packet->payload,
1112 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1311 packet->payload_length,
1113 packet->ack = RCODE_SEND_ERROR; 1312 DMA_TO_DEVICE);
1114 return -1; 1313 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1314 packet->ack = RCODE_SEND_ERROR;
1315 return -1;
1316 }
1317 packet->payload_bus = payload_bus;
1318 packet->payload_mapped = true;
1319 } else {
1320 memcpy(driver_data->inline_data, packet->payload,
1321 packet->payload_length);
1322 payload_bus = d_bus + 3 * sizeof(*d);
1115 } 1323 }
1116 packet->payload_bus = payload_bus;
1117 packet->payload_mapped = true;
1118 1324
1119 d[2].req_count = cpu_to_le16(packet->payload_length); 1325 d[2].req_count = cpu_to_le16(packet->payload_length);
1120 d[2].data_address = cpu_to_le32(payload_bus); 1326 d[2].data_address = cpu_to_le32(payload_bus);
@@ -1129,19 +1335,8 @@ static int at_context_queue_packet(struct context *ctx,
1129 DESCRIPTOR_IRQ_ALWAYS | 1335 DESCRIPTOR_IRQ_ALWAYS |
1130 DESCRIPTOR_BRANCH_ALWAYS); 1336 DESCRIPTOR_BRANCH_ALWAYS);
1131 1337
1132 /* 1338 /* FIXME: Document how the locking works. */
1133 * If the controller and packet generations don't match, we need to 1339 if (ohci->generation != packet->generation) {
1134 * bail out and try again. If IntEvent.busReset is set, the AT context
1135 * is halted, so appending to the context and trying to run it is
1136 * futile. Most controllers do the right thing and just flush the AT
1137 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1138 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1139 * up stalling out. So we just bail out in software and try again
1140 * later, and everyone is happy.
1141 * FIXME: Document how the locking works.
1142 */
1143 if (ohci->generation != packet->generation ||
1144 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1145 if (packet->payload_mapped) 1340 if (packet->payload_mapped)
1146 dma_unmap_single(ohci->card.device, payload_bus, 1341 dma_unmap_single(ohci->card.device, payload_bus,
1147 packet->payload_length, DMA_TO_DEVICE); 1342 packet->payload_length, DMA_TO_DEVICE);
@@ -1151,14 +1346,27 @@ static int at_context_queue_packet(struct context *ctx,
1151 1346
1152 context_append(ctx, d, z, 4 - z); 1347 context_append(ctx, d, z, 4 - z);
1153 1348
1154 /* If the context isn't already running, start it up. */ 1349 if (ctx->running) {
1155 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs)); 1350 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1156 if ((reg & CONTEXT_RUN) == 0) 1351 flush_writes(ohci);
1352 } else {
1157 context_run(ctx, 0); 1353 context_run(ctx, 0);
1354 }
1158 1355
1159 return 0; 1356 return 0;
1160} 1357}
1161 1358
1359static void at_context_flush(struct context *ctx)
1360{
1361 tasklet_disable(&ctx->tasklet);
1362
1363 ctx->flushing = true;
1364 context_tasklet((unsigned long)ctx);
1365 ctx->flushing = false;
1366
1367 tasklet_enable(&ctx->tasklet);
1368}
1369
1162static int handle_at_packet(struct context *context, 1370static int handle_at_packet(struct context *context,
1163 struct descriptor *d, 1371 struct descriptor *d,
1164 struct descriptor *last) 1372 struct descriptor *last)
@@ -1168,7 +1376,7 @@ static int handle_at_packet(struct context *context,
1168 struct fw_ohci *ohci = context->ohci; 1376 struct fw_ohci *ohci = context->ohci;
1169 int evt; 1377 int evt;
1170 1378
1171 if (last->transfer_status == 0) 1379 if (last->transfer_status == 0 && !context->flushing)
1172 /* This descriptor isn't done yet, stop iteration. */ 1380 /* This descriptor isn't done yet, stop iteration. */
1173 return 0; 1381 return 0;
1174 1382
@@ -1202,11 +1410,15 @@ static int handle_at_packet(struct context *context,
1202 break; 1410 break;
1203 1411
1204 case OHCI1394_evt_missing_ack: 1412 case OHCI1394_evt_missing_ack:
1205 /* 1413 if (context->flushing)
1206 * Using a valid (current) generation count, but the 1414 packet->ack = RCODE_GENERATION;
1207 * node is not on the bus or not sending acks. 1415 else {
1208 */ 1416 /*
1209 packet->ack = RCODE_NO_ACK; 1417 * Using a valid (current) generation count, but the
1418 * node is not on the bus or not sending acks.
1419 */
1420 packet->ack = RCODE_NO_ACK;
1421 }
1210 break; 1422 break;
1211 1423
1212 case ACK_COMPLETE + 0x10: 1424 case ACK_COMPLETE + 0x10:
@@ -1219,6 +1431,13 @@ static int handle_at_packet(struct context *context,
1219 packet->ack = evt - 0x10; 1431 packet->ack = evt - 0x10;
1220 break; 1432 break;
1221 1433
1434 case OHCI1394_evt_no_status:
1435 if (context->flushing) {
1436 packet->ack = RCODE_GENERATION;
1437 break;
1438 }
1439 /* fall through */
1440
1222 default: 1441 default:
1223 packet->ack = RCODE_SEND_ERROR; 1442 packet->ack = RCODE_SEND_ERROR;
1224 break; 1443 break;
@@ -1371,6 +1590,47 @@ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1371 1590
1372} 1591}
1373 1592
1593static void detect_dead_context(struct fw_ohci *ohci,
1594 const char *name, unsigned int regs)
1595{
1596 u32 ctl;
1597
1598 ctl = reg_read(ohci, CONTROL_SET(regs));
1599 if (ctl & CONTEXT_DEAD) {
1600#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
1601 fw_error("DMA context %s has stopped, error code: %s\n",
1602 name, evts[ctl & 0x1f]);
1603#else
1604 fw_error("DMA context %s has stopped, error code: %#x\n",
1605 name, ctl & 0x1f);
1606#endif
1607 }
1608}
1609
1610static void handle_dead_contexts(struct fw_ohci *ohci)
1611{
1612 unsigned int i;
1613 char name[8];
1614
1615 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1616 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1617 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1618 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1619 for (i = 0; i < 32; ++i) {
1620 if (!(ohci->it_context_support & (1 << i)))
1621 continue;
1622 sprintf(name, "IT%u", i);
1623 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1624 }
1625 for (i = 0; i < 32; ++i) {
1626 if (!(ohci->ir_context_support & (1 << i)))
1627 continue;
1628 sprintf(name, "IR%u", i);
1629 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1630 }
1631 /* TODO: maybe try to flush and restart the dead contexts */
1632}
1633
1374static u32 cycle_timer_ticks(u32 cycle_timer) 1634static u32 cycle_timer_ticks(u32 cycle_timer)
1375{ 1635{
1376 u32 ticks; 1636 u32 ticks;
@@ -1524,9 +1784,23 @@ static void bus_reset_tasklet(unsigned long data)
1524 /* FIXME: Document how the locking works. */ 1784 /* FIXME: Document how the locking works. */
1525 spin_lock_irqsave(&ohci->lock, flags); 1785 spin_lock_irqsave(&ohci->lock, flags);
1526 1786
1527 ohci->generation = generation; 1787 ohci->generation = -1; /* prevent AT packet queueing */
1528 context_stop(&ohci->at_request_ctx); 1788 context_stop(&ohci->at_request_ctx);
1529 context_stop(&ohci->at_response_ctx); 1789 context_stop(&ohci->at_response_ctx);
1790
1791 spin_unlock_irqrestore(&ohci->lock, flags);
1792
1793 /*
1794 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1795 * packets in the AT queues and software needs to drain them.
1796 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1797 */
1798 at_context_flush(&ohci->at_request_ctx);
1799 at_context_flush(&ohci->at_response_ctx);
1800
1801 spin_lock_irqsave(&ohci->lock, flags);
1802
1803 ohci->generation = generation;
1530 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1804 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1531 1805
1532 if (ohci->quirks & QUIRK_RESET_PACKET) 1806 if (ohci->quirks & QUIRK_RESET_PACKET)
@@ -1594,8 +1868,12 @@ static irqreturn_t irq_handler(int irq, void *data)
1594 if (!event || !~event) 1868 if (!event || !~event)
1595 return IRQ_NONE; 1869 return IRQ_NONE;
1596 1870
1597 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ 1871 /*
1598 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); 1872 * busReset and postedWriteErr must not be cleared yet
1873 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
1874 */
1875 reg_write(ohci, OHCI1394_IntEventClear,
1876 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
1599 log_irqs(event); 1877 log_irqs(event);
1600 1878
1601 if (event & OHCI1394_selfIDComplete) 1879 if (event & OHCI1394_selfIDComplete)
@@ -1613,30 +1891,41 @@ static irqreturn_t irq_handler(int irq, void *data)
1613 if (event & OHCI1394_respTxComplete) 1891 if (event & OHCI1394_respTxComplete)
1614 tasklet_schedule(&ohci->at_response_ctx.tasklet); 1892 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1615 1893
1616 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 1894 if (event & OHCI1394_isochRx) {
1617 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 1895 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1896 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1618 1897
1619 while (iso_event) { 1898 while (iso_event) {
1620 i = ffs(iso_event) - 1; 1899 i = ffs(iso_event) - 1;
1621 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); 1900 tasklet_schedule(
1622 iso_event &= ~(1 << i); 1901 &ohci->ir_context_list[i].context.tasklet);
1902 iso_event &= ~(1 << i);
1903 }
1623 } 1904 }
1624 1905
1625 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 1906 if (event & OHCI1394_isochTx) {
1626 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 1907 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1908 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1627 1909
1628 while (iso_event) { 1910 while (iso_event) {
1629 i = ffs(iso_event) - 1; 1911 i = ffs(iso_event) - 1;
1630 tasklet_schedule(&ohci->it_context_list[i].context.tasklet); 1912 tasklet_schedule(
1631 iso_event &= ~(1 << i); 1913 &ohci->it_context_list[i].context.tasklet);
1914 iso_event &= ~(1 << i);
1915 }
1632 } 1916 }
1633 1917
1634 if (unlikely(event & OHCI1394_regAccessFail)) 1918 if (unlikely(event & OHCI1394_regAccessFail))
1635 fw_error("Register access failure - " 1919 fw_error("Register access failure - "
1636 "please notify linux1394-devel@lists.sf.net\n"); 1920 "please notify linux1394-devel@lists.sf.net\n");
1637 1921
1638 if (unlikely(event & OHCI1394_postedWriteErr)) 1922 if (unlikely(event & OHCI1394_postedWriteErr)) {
1923 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
1924 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
1925 reg_write(ohci, OHCI1394_IntEventClear,
1926 OHCI1394_postedWriteErr);
1639 fw_error("PCI posted write error\n"); 1927 fw_error("PCI posted write error\n");
1928 }
1640 1929
1641 if (unlikely(event & OHCI1394_cycleTooLong)) { 1930 if (unlikely(event & OHCI1394_cycleTooLong)) {
1642 if (printk_ratelimit()) 1931 if (printk_ratelimit())
@@ -1656,11 +1945,15 @@ static irqreturn_t irq_handler(int irq, void *data)
1656 fw_notify("isochronous cycle inconsistent\n"); 1945 fw_notify("isochronous cycle inconsistent\n");
1657 } 1946 }
1658 1947
1948 if (unlikely(event & OHCI1394_unrecoverableError))
1949 handle_dead_contexts(ohci);
1950
1659 if (event & OHCI1394_cycle64Seconds) { 1951 if (event & OHCI1394_cycle64Seconds) {
1660 spin_lock(&ohci->lock); 1952 spin_lock(&ohci->lock);
1661 update_bus_time(ohci); 1953 update_bus_time(ohci);
1662 spin_unlock(&ohci->lock); 1954 spin_unlock(&ohci->lock);
1663 } 1955 } else
1956 flush_writes(ohci);
1664 1957
1665 return IRQ_HANDLED; 1958 return IRQ_HANDLED;
1666} 1959}
@@ -1783,8 +2076,6 @@ static int ohci_enable(struct fw_card *card,
1783 2076
1784 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 2077 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1785 reg_write(ohci, OHCI1394_LinkControlSet, 2078 reg_write(ohci, OHCI1394_LinkControlSet,
1786 OHCI1394_LinkControl_rcvSelfID |
1787 OHCI1394_LinkControl_rcvPhyPkt |
1788 OHCI1394_LinkControl_cycleTimerEnable | 2079 OHCI1394_LinkControl_cycleTimerEnable |
1789 OHCI1394_LinkControl_cycleMaster); 2080 OHCI1394_LinkControl_cycleMaster);
1790 2081
@@ -1811,9 +2102,6 @@ static int ohci_enable(struct fw_card *card,
1811 reg_write(ohci, OHCI1394_FairnessControl, 0); 2102 reg_write(ohci, OHCI1394_FairnessControl, 0);
1812 card->priority_budget_implemented = ohci->pri_req_max != 0; 2103 card->priority_budget_implemented = ohci->pri_req_max != 0;
1813 2104
1814 ar_context_run(&ohci->ar_request_ctx);
1815 ar_context_run(&ohci->ar_response_ctx);
1816
1817 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 2105 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1818 reg_write(ohci, OHCI1394_IntEventClear, ~0); 2106 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1819 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2107 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
@@ -1892,7 +2180,9 @@ static int ohci_enable(struct fw_card *card,
1892 OHCI1394_selfIDComplete | 2180 OHCI1394_selfIDComplete |
1893 OHCI1394_regAccessFail | 2181 OHCI1394_regAccessFail |
1894 OHCI1394_cycle64Seconds | 2182 OHCI1394_cycle64Seconds |
1895 OHCI1394_cycleInconsistent | OHCI1394_cycleTooLong | 2183 OHCI1394_cycleInconsistent |
2184 OHCI1394_unrecoverableError |
2185 OHCI1394_cycleTooLong |
1896 OHCI1394_masterIntEnable; 2186 OHCI1394_masterIntEnable;
1897 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 2187 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1898 irqs |= OHCI1394_busReset; 2188 irqs |= OHCI1394_busReset;
@@ -1901,7 +2191,13 @@ static int ohci_enable(struct fw_card *card,
1901 reg_write(ohci, OHCI1394_HCControlSet, 2191 reg_write(ohci, OHCI1394_HCControlSet,
1902 OHCI1394_HCControl_linkEnable | 2192 OHCI1394_HCControl_linkEnable |
1903 OHCI1394_HCControl_BIBimageValid); 2193 OHCI1394_HCControl_BIBimageValid);
1904 flush_writes(ohci); 2194
2195 reg_write(ohci, OHCI1394_LinkControlSet,
2196 OHCI1394_LinkControl_rcvSelfID |
2197 OHCI1394_LinkControl_rcvPhyPkt);
2198
2199 ar_context_run(&ohci->ar_request_ctx);
2200 ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
1905 2201
1906 /* We are ready to go, reset bus to finish initialization. */ 2202 /* We are ready to go, reset bus to finish initialization. */
1907 fw_schedule_bus_reset(&ohci->card, false, true); 2203 fw_schedule_bus_reset(&ohci->card, false, true);
@@ -1914,7 +2210,6 @@ static int ohci_set_config_rom(struct fw_card *card,
1914{ 2210{
1915 struct fw_ohci *ohci; 2211 struct fw_ohci *ohci;
1916 unsigned long flags; 2212 unsigned long flags;
1917 int ret = -EBUSY;
1918 __be32 *next_config_rom; 2213 __be32 *next_config_rom;
1919 dma_addr_t uninitialized_var(next_config_rom_bus); 2214 dma_addr_t uninitialized_var(next_config_rom_bus);
1920 2215
@@ -1955,22 +2250,37 @@ static int ohci_set_config_rom(struct fw_card *card,
1955 2250
1956 spin_lock_irqsave(&ohci->lock, flags); 2251 spin_lock_irqsave(&ohci->lock, flags);
1957 2252
2253 /*
2254 * If there is not an already pending config_rom update,
2255 * push our new allocation into the ohci->next_config_rom
2256 * and then mark the local variable as null so that we
2257 * won't deallocate the new buffer.
2258 *
2259 * OTOH, if there is a pending config_rom update, just
2260 * use that buffer with the new config_rom data, and
2261 * let this routine free the unused DMA allocation.
2262 */
2263
1958 if (ohci->next_config_rom == NULL) { 2264 if (ohci->next_config_rom == NULL) {
1959 ohci->next_config_rom = next_config_rom; 2265 ohci->next_config_rom = next_config_rom;
1960 ohci->next_config_rom_bus = next_config_rom_bus; 2266 ohci->next_config_rom_bus = next_config_rom_bus;
2267 next_config_rom = NULL;
2268 }
1961 2269
1962 copy_config_rom(ohci->next_config_rom, config_rom, length); 2270 copy_config_rom(ohci->next_config_rom, config_rom, length);
1963 2271
1964 ohci->next_header = config_rom[0]; 2272 ohci->next_header = config_rom[0];
1965 ohci->next_config_rom[0] = 0; 2273 ohci->next_config_rom[0] = 0;
1966 2274
1967 reg_write(ohci, OHCI1394_ConfigROMmap, 2275 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1968 ohci->next_config_rom_bus);
1969 ret = 0;
1970 }
1971 2276
1972 spin_unlock_irqrestore(&ohci->lock, flags); 2277 spin_unlock_irqrestore(&ohci->lock, flags);
1973 2278
2279 /* If we didn't use the DMA allocation, delete it. */
2280 if (next_config_rom != NULL)
2281 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2282 next_config_rom, next_config_rom_bus);
2283
1974 /* 2284 /*
1975 * Now initiate a bus reset to have the changes take 2285 * Now initiate a bus reset to have the changes take
1976 * effect. We clean up the old config rom memory and DMA 2286 * effect. We clean up the old config rom memory and DMA
@@ -1978,13 +2288,10 @@ static int ohci_set_config_rom(struct fw_card *card,
1978 * controller could need to access it before the bus reset 2288 * controller could need to access it before the bus reset
1979 * takes effect. 2289 * takes effect.
1980 */ 2290 */
1981 if (ret == 0)
1982 fw_schedule_bus_reset(&ohci->card, true, true);
1983 else
1984 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1985 next_config_rom, next_config_rom_bus);
1986 2291
1987 return ret; 2292 fw_schedule_bus_reset(&ohci->card, true, true);
2293
2294 return 0;
1988} 2295}
1989 2296
1990static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 2297static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@@ -2408,6 +2715,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
2408 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 2715 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2409 int index; 2716 int index;
2410 2717
2718 /* the controller cannot start without any queued packets */
2719 if (ctx->context.last->branch_address == 0)
2720 return -ENODATA;
2721
2411 switch (ctx->base.type) { 2722 switch (ctx->base.type) {
2412 case FW_ISO_CONTEXT_TRANSMIT: 2723 case FW_ISO_CONTEXT_TRANSMIT:
2413 index = ctx - ohci->it_context_list; 2724 index = ctx - ohci->it_context_list;
@@ -2436,6 +2747,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
2436 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2747 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2437 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2748 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2438 context_run(&ctx->context, control); 2749 context_run(&ctx->context, control);
2750
2751 ctx->sync = sync;
2752 ctx->tags = tags;
2753
2439 break; 2754 break;
2440 } 2755 }
2441 2756
@@ -2462,6 +2777,7 @@ static int ohci_stop_iso(struct fw_iso_context *base)
2462 } 2777 }
2463 flush_writes(ohci); 2778 flush_writes(ohci);
2464 context_stop(&ctx->context); 2779 context_stop(&ctx->context);
2780 tasklet_kill(&ctx->context.tasklet);
2465 2781
2466 return 0; 2782 return 0;
2467} 2783}
@@ -2533,6 +2849,26 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2533 return ret; 2849 return ret;
2534} 2850}
2535 2851
2852#ifdef CONFIG_PM
2853static void ohci_resume_iso_dma(struct fw_ohci *ohci)
2854{
2855 int i;
2856 struct iso_context *ctx;
2857
2858 for (i = 0 ; i < ohci->n_ir ; i++) {
2859 ctx = &ohci->ir_context_list[i];
2860 if (ctx->context.running)
2861 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2862 }
2863
2864 for (i = 0 ; i < ohci->n_it ; i++) {
2865 ctx = &ohci->it_context_list[i];
2866 if (ctx->context.running)
2867 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2868 }
2869}
2870#endif
2871
2536static int queue_iso_transmit(struct iso_context *ctx, 2872static int queue_iso_transmit(struct iso_context *ctx,
2537 struct fw_iso_packet *packet, 2873 struct fw_iso_packet *packet,
2538 struct fw_iso_buffer *buffer, 2874 struct fw_iso_buffer *buffer,
@@ -2787,6 +3123,15 @@ static int ohci_queue_iso(struct fw_iso_context *base,
2787 return ret; 3123 return ret;
2788} 3124}
2789 3125
3126static void ohci_flush_queue_iso(struct fw_iso_context *base)
3127{
3128 struct context *ctx =
3129 &container_of(base, struct iso_context, base)->context;
3130
3131 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3132 flush_writes(ctx->ohci);
3133}
3134
2790static const struct fw_card_driver ohci_driver = { 3135static const struct fw_card_driver ohci_driver = {
2791 .enable = ohci_enable, 3136 .enable = ohci_enable,
2792 .read_phy_reg = ohci_read_phy_reg, 3137 .read_phy_reg = ohci_read_phy_reg,
@@ -2803,6 +3148,7 @@ static const struct fw_card_driver ohci_driver = {
2803 .free_iso_context = ohci_free_iso_context, 3148 .free_iso_context = ohci_free_iso_context,
2804 .set_iso_channels = ohci_set_iso_channels, 3149 .set_iso_channels = ohci_set_iso_channels,
2805 .queue_iso = ohci_queue_iso, 3150 .queue_iso = ohci_queue_iso,
3151 .flush_queue_iso = ohci_flush_queue_iso,
2806 .start_iso = ohci_start_iso, 3152 .start_iso = ohci_start_iso,
2807 .stop_iso = ohci_stop_iso, 3153 .stop_iso = ohci_stop_iso,
2808}; 3154};
@@ -2842,9 +3188,14 @@ static int __devinit pci_probe(struct pci_dev *dev,
2842 struct fw_ohci *ohci; 3188 struct fw_ohci *ohci;
2843 u32 bus_options, max_receive, link_speed, version; 3189 u32 bus_options, max_receive, link_speed, version;
2844 u64 guid; 3190 u64 guid;
2845 int i, err, n_ir, n_it; 3191 int i, err;
2846 size_t size; 3192 size_t size;
2847 3193
3194 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3195 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3196 return -ENOSYS;
3197 }
3198
2848 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 3199 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2849 if (ohci == NULL) { 3200 if (ohci == NULL) {
2850 err = -ENOMEM; 3201 err = -ENOMEM;
@@ -2885,40 +3236,68 @@ static int __devinit pci_probe(struct pci_dev *dev,
2885 } 3236 }
2886 3237
2887 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 3238 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
2888 if (ohci_quirks[i].vendor == dev->vendor && 3239 if ((ohci_quirks[i].vendor == dev->vendor) &&
2889 (ohci_quirks[i].device == dev->device || 3240 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
2890 ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) { 3241 ohci_quirks[i].device == dev->device) &&
3242 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3243 ohci_quirks[i].revision >= dev->revision)) {
2891 ohci->quirks = ohci_quirks[i].flags; 3244 ohci->quirks = ohci_quirks[i].flags;
2892 break; 3245 break;
2893 } 3246 }
2894 if (param_quirks) 3247 if (param_quirks)
2895 ohci->quirks = param_quirks; 3248 ohci->quirks = param_quirks;
2896 3249
2897 ar_context_init(&ohci->ar_request_ctx, ohci, 3250 /*
2898 OHCI1394_AsReqRcvContextControlSet); 3251 * Because dma_alloc_coherent() allocates at least one page,
3252 * we save space by using a common buffer for the AR request/
3253 * response descriptors and the self IDs buffer.
3254 */
3255 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3256 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3257 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3258 PAGE_SIZE,
3259 &ohci->misc_buffer_bus,
3260 GFP_KERNEL);
3261 if (!ohci->misc_buffer) {
3262 err = -ENOMEM;
3263 goto fail_iounmap;
3264 }
3265
3266 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3267 OHCI1394_AsReqRcvContextControlSet);
3268 if (err < 0)
3269 goto fail_misc_buf;
2899 3270
2900 ar_context_init(&ohci->ar_response_ctx, ohci, 3271 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
2901 OHCI1394_AsRspRcvContextControlSet); 3272 OHCI1394_AsRspRcvContextControlSet);
3273 if (err < 0)
3274 goto fail_arreq_ctx;
2902 3275
2903 context_init(&ohci->at_request_ctx, ohci, 3276 err = context_init(&ohci->at_request_ctx, ohci,
2904 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 3277 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3278 if (err < 0)
3279 goto fail_arrsp_ctx;
2905 3280
2906 context_init(&ohci->at_response_ctx, ohci, 3281 err = context_init(&ohci->at_response_ctx, ohci,
2907 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 3282 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3283 if (err < 0)
3284 goto fail_atreq_ctx;
2908 3285
2909 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3286 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2910 ohci->ir_context_channels = ~0ULL; 3287 ohci->ir_context_channels = ~0ULL;
2911 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3288 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2912 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3289 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2913 n_ir = hweight32(ohci->ir_context_mask); 3290 ohci->ir_context_mask = ohci->ir_context_support;
2914 size = sizeof(struct iso_context) * n_ir; 3291 ohci->n_ir = hweight32(ohci->ir_context_mask);
3292 size = sizeof(struct iso_context) * ohci->n_ir;
2915 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3293 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2916 3294
2917 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3295 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2918 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3296 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2919 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3297 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2920 n_it = hweight32(ohci->it_context_mask); 3298 ohci->it_context_mask = ohci->it_context_support;
2921 size = sizeof(struct iso_context) * n_it; 3299 ohci->n_it = hweight32(ohci->it_context_mask);
3300 size = sizeof(struct iso_context) * ohci->n_it;
2922 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 3301 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2923 3302
2924 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 3303 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
@@ -2926,15 +3305,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
2926 goto fail_contexts; 3305 goto fail_contexts;
2927 } 3306 }
2928 3307
2929 /* self-id dma buffer allocation */ 3308 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
2930 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, 3309 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
2931 SELF_ID_BUF_SIZE,
2932 &ohci->self_id_bus,
2933 GFP_KERNEL);
2934 if (ohci->self_id_cpu == NULL) {
2935 err = -ENOMEM;
2936 goto fail_contexts;
2937 }
2938 3310
2939 bus_options = reg_read(ohci, OHCI1394_BusOptions); 3311 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2940 max_receive = (bus_options >> 12) & 0xf; 3312 max_receive = (bus_options >> 12) & 0xf;
@@ -2944,33 +3316,37 @@ static int __devinit pci_probe(struct pci_dev *dev,
2944 3316
2945 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 3317 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2946 if (err) 3318 if (err)
2947 goto fail_self_id; 3319 goto fail_contexts;
2948 3320
2949 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 3321 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2950 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " 3322 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
2951 "%d IR + %d IT contexts, quirks 0x%x\n", 3323 "%d IR + %d IT contexts, quirks 0x%x\n",
2952 dev_name(&dev->dev), version >> 16, version & 0xff, 3324 dev_name(&dev->dev), version >> 16, version & 0xff,
2953 n_ir, n_it, ohci->quirks); 3325 ohci->n_ir, ohci->n_it, ohci->quirks);
2954 3326
2955 return 0; 3327 return 0;
2956 3328
2957 fail_self_id:
2958 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2959 ohci->self_id_cpu, ohci->self_id_bus);
2960 fail_contexts: 3329 fail_contexts:
2961 kfree(ohci->ir_context_list); 3330 kfree(ohci->ir_context_list);
2962 kfree(ohci->it_context_list); 3331 kfree(ohci->it_context_list);
2963 context_release(&ohci->at_response_ctx); 3332 context_release(&ohci->at_response_ctx);
3333 fail_atreq_ctx:
2964 context_release(&ohci->at_request_ctx); 3334 context_release(&ohci->at_request_ctx);
3335 fail_arrsp_ctx:
2965 ar_context_release(&ohci->ar_response_ctx); 3336 ar_context_release(&ohci->ar_response_ctx);
3337 fail_arreq_ctx:
2966 ar_context_release(&ohci->ar_request_ctx); 3338 ar_context_release(&ohci->ar_request_ctx);
3339 fail_misc_buf:
3340 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3341 ohci->misc_buffer, ohci->misc_buffer_bus);
3342 fail_iounmap:
2967 pci_iounmap(dev, ohci->registers); 3343 pci_iounmap(dev, ohci->registers);
2968 fail_iomem: 3344 fail_iomem:
2969 pci_release_region(dev, 0); 3345 pci_release_region(dev, 0);
2970 fail_disable: 3346 fail_disable:
2971 pci_disable_device(dev); 3347 pci_disable_device(dev);
2972 fail_free: 3348 fail_free:
2973 kfree(&ohci->card); 3349 kfree(ohci);
2974 pmac_ohci_off(dev); 3350 pmac_ohci_off(dev);
2975 fail: 3351 fail:
2976 if (err == -ENOMEM) 3352 if (err == -ENOMEM)
@@ -3002,10 +3378,10 @@ static void pci_remove(struct pci_dev *dev)
3002 if (ohci->config_rom) 3378 if (ohci->config_rom)
3003 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3379 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3004 ohci->config_rom, ohci->config_rom_bus); 3380 ohci->config_rom, ohci->config_rom_bus);
3005 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
3006 ohci->self_id_cpu, ohci->self_id_bus);
3007 ar_context_release(&ohci->ar_request_ctx); 3381 ar_context_release(&ohci->ar_request_ctx);
3008 ar_context_release(&ohci->ar_response_ctx); 3382 ar_context_release(&ohci->ar_response_ctx);
3383 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3384 ohci->misc_buffer, ohci->misc_buffer_bus);
3009 context_release(&ohci->at_request_ctx); 3385 context_release(&ohci->at_request_ctx);
3010 context_release(&ohci->at_response_ctx); 3386 context_release(&ohci->at_response_ctx);
3011 kfree(ohci->it_context_list); 3387 kfree(ohci->it_context_list);
@@ -3014,7 +3390,7 @@ static void pci_remove(struct pci_dev *dev)
3014 pci_iounmap(dev, ohci->registers); 3390 pci_iounmap(dev, ohci->registers);
3015 pci_release_region(dev, 0); 3391 pci_release_region(dev, 0);
3016 pci_disable_device(dev); 3392 pci_disable_device(dev);
3017 kfree(&ohci->card); 3393 kfree(ohci);
3018 pmac_ohci_off(dev); 3394 pmac_ohci_off(dev);
3019 3395
3020 fw_notify("Removed fw-ohci device.\n"); 3396 fw_notify("Removed fw-ohci device.\n");
@@ -3056,7 +3432,20 @@ static int pci_resume(struct pci_dev *dev)
3056 return err; 3432 return err;
3057 } 3433 }
3058 3434
3059 return ohci_enable(&ohci->card, NULL, 0); 3435 /* Some systems don't setup GUID register on resume from ram */
3436 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3437 !reg_read(ohci, OHCI1394_GUIDHi)) {
3438 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3439 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3440 }
3441
3442 err = ohci_enable(&ohci->card, NULL, 0);
3443 if (err)
3444 return err;
3445
3446 ohci_resume_iso_dma(ohci);
3447
3448 return 0;
3060} 3449}
3061#endif 3450#endif
3062 3451