aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-10 11:14:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-10 11:14:02 -0500
commit78c92a9fd4b6abbbc1fe1ec335c697cb4e63f252 (patch)
treea04527a9a6ecb532439b84b6a6b143fac75ce594
parentb65f0d673a0280a49b80f44c9a62e5dfc1ec203f (diff)
parent410cf2bd3dc6ec1ed9e1b36b25b9d7aa927ed14e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: firewire: use split transaction timeout only for split transactions firewire: ohci: consolidate context status flags firewire: ohci: cache the context run bit firewire: ohci: flush AT contexts after bus reset - addendum firewire: ohci: flush AT contexts after bus reset for OHCI 1.2 firewire: net: set carrier state at ifup firewire: net: add carrier detection firewire: net: ratelimit error messages firewire: ohci: restart iso DMA contexts on resume from low power mode firewire: ohci: restore GUID on resume. firewire: ohci: use common buffer for self IDs and AR descriptors firewire: ohci: optimize iso context checks in the interrupt handler firewire: make PHY packet header format consistent firewire: ohci: properly clear posted write errors firewire: ohci: flush MMIO writes in the interrupt handler firewire: ohci: fix AT context initialization error handling firewire: ohci: Asynchronous Reception rewrite firewire: core: Update WARN uses firewire: nosy: char device is not seekable
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-cdev.c7
-rw-r--r--drivers/firewire/core-transaction.c58
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/net.c47
-rw-r--r--drivers/firewire/nosy.c3
-rw-r--r--drivers/firewire/ohci.c672
-rw-r--r--include/linux/firewire.h2
8 files changed, 548 insertions, 247 deletions
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 40a222e19b2d..68f942cb30f2 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -19,7 +19,7 @@ config FIREWIRE
19 19
20config FIREWIRE_OHCI 20config FIREWIRE_OHCI
21 tristate "OHCI-1394 controllers" 21 tristate "OHCI-1394 controllers"
22 depends on PCI && FIREWIRE 22 depends on PCI && FIREWIRE && MMU
23 help 23 help
24 Enable this driver if you have a FireWire controller based 24 Enable this driver if you have a FireWire controller based
25 on the OHCI specification. For all practical purposes, this 25 on the OHCI specification. For all practical purposes, this
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14bb7b7b5dd7..48ae712e2101 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1501,9 +1501,10 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1501 e->client = client; 1501 e->client = client;
1502 e->p.speed = SCODE_100; 1502 e->p.speed = SCODE_100;
1503 e->p.generation = a->generation; 1503 e->p.generation = a->generation;
1504 e->p.header[0] = a->data[0]; 1504 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1505 e->p.header[1] = a->data[1]; 1505 e->p.header[1] = a->data[0];
1506 e->p.header_length = 8; 1506 e->p.header[2] = a->data[1];
1507 e->p.header_length = 12;
1507 e->p.callback = outbound_phy_packet_callback; 1508 e->p.callback = outbound_phy_packet_callback;
1508 e->phy_packet.closure = a->closure; 1509 e->phy_packet.closure = a->closure;
1509 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT; 1510 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b42a0bde8494..d00f8ce902cc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -72,6 +72,15 @@
72#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 72#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
73#define PHY_IDENTIFIER(id) ((id) << 30) 73#define PHY_IDENTIFIER(id) ((id) << 30)
74 74
75/* returns 0 if the split timeout handler is already running */
76static int try_cancel_split_timeout(struct fw_transaction *t)
77{
78 if (t->is_split_transaction)
79 return del_timer(&t->split_timeout_timer);
80 else
81 return 1;
82}
83
75static int close_transaction(struct fw_transaction *transaction, 84static int close_transaction(struct fw_transaction *transaction,
76 struct fw_card *card, int rcode) 85 struct fw_card *card, int rcode)
77{ 86{
@@ -81,7 +90,7 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 90 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 91 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 92 if (t == transaction) {
84 if (!del_timer(&t->split_timeout_timer)) { 93 if (!try_cancel_split_timeout(t)) {
85 spin_unlock_irqrestore(&card->lock, flags); 94 spin_unlock_irqrestore(&card->lock, flags);
86 goto timed_out; 95 goto timed_out;
87 } 96 }
@@ -141,16 +150,28 @@ static void split_transaction_timeout_callback(unsigned long data)
141 card->tlabel_mask &= ~(1ULL << t->tlabel); 150 card->tlabel_mask &= ~(1ULL << t->tlabel);
142 spin_unlock_irqrestore(&card->lock, flags); 151 spin_unlock_irqrestore(&card->lock, flags);
143 152
144 card->driver->cancel_packet(card, &t->packet);
145
146 /*
147 * At this point cancel_packet will never call the transaction
148 * callback, since we just took the transaction out of the list.
149 * So do it here.
150 */
151 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); 153 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
152} 154}
153 155
156static void start_split_transaction_timeout(struct fw_transaction *t,
157 struct fw_card *card)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(&card->lock, flags);
162
163 if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
164 spin_unlock_irqrestore(&card->lock, flags);
165 return;
166 }
167
168 t->is_split_transaction = true;
169 mod_timer(&t->split_timeout_timer,
170 jiffies + card->split_timeout_jiffies);
171
172 spin_unlock_irqrestore(&card->lock, flags);
173}
174
154static void transmit_complete_callback(struct fw_packet *packet, 175static void transmit_complete_callback(struct fw_packet *packet,
155 struct fw_card *card, int status) 176 struct fw_card *card, int status)
156{ 177{
@@ -162,7 +183,7 @@ static void transmit_complete_callback(struct fw_packet *packet,
162 close_transaction(t, card, RCODE_COMPLETE); 183 close_transaction(t, card, RCODE_COMPLETE);
163 break; 184 break;
164 case ACK_PENDING: 185 case ACK_PENDING:
165 t->timestamp = packet->timestamp; 186 start_split_transaction_timeout(t, card);
166 break; 187 break;
167 case ACK_BUSY_X: 188 case ACK_BUSY_X:
168 case ACK_BUSY_A: 189 case ACK_BUSY_A:
@@ -250,7 +271,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
250 break; 271 break;
251 272
252 default: 273 default:
253 WARN(1, "wrong tcode %d", tcode); 274 WARN(1, "wrong tcode %d\n", tcode);
254 } 275 }
255 common: 276 common:
256 packet->speed = speed; 277 packet->speed = speed;
@@ -349,11 +370,9 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
349 t->node_id = destination_id; 370 t->node_id = destination_id;
350 t->tlabel = tlabel; 371 t->tlabel = tlabel;
351 t->card = card; 372 t->card = card;
373 t->is_split_transaction = false;
352 setup_timer(&t->split_timeout_timer, 374 setup_timer(&t->split_timeout_timer,
353 split_transaction_timeout_callback, (unsigned long)t); 375 split_transaction_timeout_callback, (unsigned long)t);
354 /* FIXME: start this timer later, relative to t->timestamp */
355 mod_timer(&t->split_timeout_timer,
356 jiffies + card->split_timeout_jiffies);
357 t->callback = callback; 376 t->callback = callback;
358 t->callback_data = callback_data; 377 t->callback_data = callback_data;
359 378
@@ -423,7 +442,8 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
423} 442}
424 443
425static struct fw_packet phy_config_packet = { 444static struct fw_packet phy_config_packet = {
426 .header_length = 8, 445 .header_length = 12,
446 .header[0] = TCODE_LINK_INTERNAL << 4,
427 .payload_length = 0, 447 .payload_length = 0,
428 .speed = SCODE_100, 448 .speed = SCODE_100,
429 .callback = transmit_phy_packet_callback, 449 .callback = transmit_phy_packet_callback,
@@ -451,8 +471,8 @@ void fw_send_phy_config(struct fw_card *card,
451 471
452 mutex_lock(&phy_config_mutex); 472 mutex_lock(&phy_config_mutex);
453 473
454 phy_config_packet.header[0] = data; 474 phy_config_packet.header[1] = data;
455 phy_config_packet.header[1] = ~data; 475 phy_config_packet.header[2] = ~data;
456 phy_config_packet.generation = generation; 476 phy_config_packet.generation = generation;
457 INIT_COMPLETION(phy_config_done); 477 INIT_COMPLETION(phy_config_done);
458 478
@@ -638,7 +658,7 @@ int fw_get_response_length(struct fw_request *r)
638 } 658 }
639 659
640 default: 660 default:
641 WARN(1, "wrong tcode %d", tcode); 661 WARN(1, "wrong tcode %d\n", tcode);
642 return 0; 662 return 0;
643 } 663 }
644} 664}
@@ -694,7 +714,7 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
694 break; 714 break;
695 715
696 default: 716 default:
697 WARN(1, "wrong tcode %d", tcode); 717 WARN(1, "wrong tcode %d\n", tcode);
698 } 718 }
699 719
700 response->payload_mapped = false; 720 response->payload_mapped = false;
@@ -925,7 +945,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
925 spin_lock_irqsave(&card->lock, flags); 945 spin_lock_irqsave(&card->lock, flags);
926 list_for_each_entry(t, &card->transaction_list, link) { 946 list_for_each_entry(t, &card->transaction_list, link) {
927 if (t->node_id == source && t->tlabel == tlabel) { 947 if (t->node_id == source && t->tlabel == tlabel) {
928 if (!del_timer(&t->split_timeout_timer)) { 948 if (!try_cancel_split_timeout(t)) {
929 spin_unlock_irqrestore(&card->lock, flags); 949 spin_unlock_irqrestore(&card->lock, flags);
930 goto timed_out; 950 goto timed_out;
931 } 951 }
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index e6239f971be6..f8dfcf1c6cbe 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -215,9 +215,11 @@ static inline bool is_next_generation(int new_generation, int old_generation)
215 215
216/* -transaction */ 216/* -transaction */
217 217
218#define TCODE_LINK_INTERNAL 0xe
219
218#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 220#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
219#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) 221#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
220#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe) 222#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
221#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0) 223#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
222#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0) 224#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
223#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4) 225#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1a467a91fb0b..c2e194c58667 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -9,6 +9,7 @@
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/ethtool.h>
12#include <linux/firewire.h> 13#include <linux/firewire.h>
13#include <linux/firewire-constants.h> 14#include <linux/firewire-constants.h>
14#include <linux/highmem.h> 15#include <linux/highmem.h>
@@ -179,6 +180,7 @@ struct fwnet_device {
179 /* Number of tx datagrams that have been queued but not yet acked */ 180 /* Number of tx datagrams that have been queued but not yet acked */
180 int queued_datagrams; 181 int queued_datagrams;
181 182
183 int peer_count;
182 struct list_head peer_list; 184 struct list_head peer_list;
183 struct fw_card *card; 185 struct fw_card *card;
184 struct net_device *netdev; 186 struct net_device *netdev;
@@ -996,15 +998,23 @@ static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
996static void fwnet_write_complete(struct fw_card *card, int rcode, 998static void fwnet_write_complete(struct fw_card *card, int rcode,
997 void *payload, size_t length, void *data) 999 void *payload, size_t length, void *data)
998{ 1000{
999 struct fwnet_packet_task *ptask; 1001 struct fwnet_packet_task *ptask = data;
1000 1002 static unsigned long j;
1001 ptask = data; 1003 static int last_rcode, errors_skipped;
1002 1004
1003 if (rcode == RCODE_COMPLETE) { 1005 if (rcode == RCODE_COMPLETE) {
1004 fwnet_transmit_packet_done(ptask); 1006 fwnet_transmit_packet_done(ptask);
1005 } else { 1007 } else {
1006 fw_error("fwnet_write_complete: failed: %x\n", rcode);
1007 fwnet_transmit_packet_failed(ptask); 1008 fwnet_transmit_packet_failed(ptask);
1009
1010 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
1011 fw_error("fwnet_write_complete: "
1012 "failed: %x (skipped %d)\n", rcode, errors_skipped);
1013
1014 errors_skipped = 0;
1015 last_rcode = rcode;
1016 } else
1017 errors_skipped++;
1008 } 1018 }
1009} 1019}
1010 1020
@@ -1213,6 +1223,14 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1213 return retval; 1223 return retval;
1214} 1224}
1215 1225
1226static void set_carrier_state(struct fwnet_device *dev)
1227{
1228 if (dev->peer_count > 1)
1229 netif_carrier_on(dev->netdev);
1230 else
1231 netif_carrier_off(dev->netdev);
1232}
1233
1216/* ifup */ 1234/* ifup */
1217static int fwnet_open(struct net_device *net) 1235static int fwnet_open(struct net_device *net)
1218{ 1236{
@@ -1226,6 +1244,10 @@ static int fwnet_open(struct net_device *net)
1226 } 1244 }
1227 netif_start_queue(net); 1245 netif_start_queue(net);
1228 1246
1247 spin_lock_irq(&dev->lock);
1248 set_carrier_state(dev);
1249 spin_unlock_irq(&dev->lock);
1250
1229 return 0; 1251 return 0;
1230} 1252}
1231 1253
@@ -1397,6 +1419,10 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu)
1397 return 0; 1419 return 0;
1398} 1420}
1399 1421
1422static const struct ethtool_ops fwnet_ethtool_ops = {
1423 .get_link = ethtool_op_get_link,
1424};
1425
1400static const struct net_device_ops fwnet_netdev_ops = { 1426static const struct net_device_ops fwnet_netdev_ops = {
1401 .ndo_open = fwnet_open, 1427 .ndo_open = fwnet_open,
1402 .ndo_stop = fwnet_stop, 1428 .ndo_stop = fwnet_stop,
@@ -1415,6 +1441,7 @@ static void fwnet_init_dev(struct net_device *net)
1415 net->hard_header_len = FWNET_HLEN; 1441 net->hard_header_len = FWNET_HLEN;
1416 net->type = ARPHRD_IEEE1394; 1442 net->type = ARPHRD_IEEE1394;
1417 net->tx_queue_len = FWNET_TX_QUEUE_LEN; 1443 net->tx_queue_len = FWNET_TX_QUEUE_LEN;
1444 net->ethtool_ops = &fwnet_ethtool_ops;
1418} 1445}
1419 1446
1420/* caller must hold fwnet_device_mutex */ 1447/* caller must hold fwnet_device_mutex */
@@ -1455,6 +1482,8 @@ static int fwnet_add_peer(struct fwnet_device *dev,
1455 1482
1456 spin_lock_irq(&dev->lock); 1483 spin_lock_irq(&dev->lock);
1457 list_add_tail(&peer->peer_link, &dev->peer_list); 1484 list_add_tail(&peer->peer_link, &dev->peer_list);
1485 dev->peer_count++;
1486 set_carrier_state(dev);
1458 spin_unlock_irq(&dev->lock); 1487 spin_unlock_irq(&dev->lock);
1459 1488
1460 return 0; 1489 return 0;
@@ -1535,13 +1564,15 @@ static int fwnet_probe(struct device *_dev)
1535 return ret; 1564 return ret;
1536} 1565}
1537 1566
1538static void fwnet_remove_peer(struct fwnet_peer *peer) 1567static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
1539{ 1568{
1540 struct fwnet_partial_datagram *pd, *pd_next; 1569 struct fwnet_partial_datagram *pd, *pd_next;
1541 1570
1542 spin_lock_irq(&peer->dev->lock); 1571 spin_lock_irq(&dev->lock);
1543 list_del(&peer->peer_link); 1572 list_del(&peer->peer_link);
1544 spin_unlock_irq(&peer->dev->lock); 1573 dev->peer_count--;
1574 set_carrier_state(dev);
1575 spin_unlock_irq(&dev->lock);
1545 1576
1546 list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) 1577 list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
1547 fwnet_pd_delete(pd); 1578 fwnet_pd_delete(pd);
@@ -1558,7 +1589,7 @@ static int fwnet_remove(struct device *_dev)
1558 1589
1559 mutex_lock(&fwnet_device_mutex); 1590 mutex_lock(&fwnet_device_mutex);
1560 1591
1561 fwnet_remove_peer(peer); 1592 fwnet_remove_peer(peer, dev);
1562 1593
1563 if (list_empty(&dev->peer_list)) { 1594 if (list_empty(&dev->peer_list)) {
1564 net = dev->netdev; 1595 net = dev->netdev;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index bf184fb59a5e..0618145376ad 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -302,7 +302,7 @@ nosy_open(struct inode *inode, struct file *file)
302 302
303 file->private_data = client; 303 file->private_data = client;
304 304
305 return 0; 305 return nonseekable_open(inode, file);
306fail: 306fail:
307 kfree(client); 307 kfree(client);
308 lynx_put(lynx); 308 lynx_put(lynx);
@@ -405,7 +405,6 @@ static const struct file_operations nosy_ops = {
405 .poll = nosy_poll, 405 .poll = nosy_poll,
406 .open = nosy_open, 406 .open = nosy_open,
407 .release = nosy_release, 407 .release = nosy_release,
408 .llseek = noop_llseek,
409}; 408};
410 409
411#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ 410#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index e3c8b60bd86b..d77d120ddc25 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/bitops.h>
21#include <linux/bug.h> 22#include <linux/bug.h>
22#include <linux/compiler.h> 23#include <linux/compiler.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
@@ -40,6 +41,7 @@
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
41#include <linux/string.h> 42#include <linux/string.h>
42#include <linux/time.h> 43#include <linux/time.h>
44#include <linux/vmalloc.h>
43 45
44#include <asm/byteorder.h> 46#include <asm/byteorder.h>
45#include <asm/page.h> 47#include <asm/page.h>
@@ -80,17 +82,23 @@ struct descriptor {
80#define COMMAND_PTR(regs) ((regs) + 12) 82#define COMMAND_PTR(regs) ((regs) + 12)
81#define CONTEXT_MATCH(regs) ((regs) + 16) 83#define CONTEXT_MATCH(regs) ((regs) + 16)
82 84
83struct ar_buffer { 85#define AR_BUFFER_SIZE (32*1024)
84 struct descriptor descriptor; 86#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
85 struct ar_buffer *next; 87/* we need at least two pages for proper list management */
86 __le32 data[0]; 88#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
87}; 89
90#define MAX_ASYNC_PAYLOAD 4096
91#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
92#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
88 93
89struct ar_context { 94struct ar_context {
90 struct fw_ohci *ohci; 95 struct fw_ohci *ohci;
91 struct ar_buffer *current_buffer; 96 struct page *pages[AR_BUFFERS];
92 struct ar_buffer *last_buffer; 97 void *buffer;
98 struct descriptor *descriptors;
99 dma_addr_t descriptors_bus;
93 void *pointer; 100 void *pointer;
101 unsigned int last_buffer_index;
94 u32 regs; 102 u32 regs;
95 struct tasklet_struct tasklet; 103 struct tasklet_struct tasklet;
96}; 104};
@@ -117,6 +125,8 @@ struct context {
117 struct fw_ohci *ohci; 125 struct fw_ohci *ohci;
118 u32 regs; 126 u32 regs;
119 int total_allocation; 127 int total_allocation;
128 bool running;
129 bool flushing;
120 130
121 /* 131 /*
122 * List of page-sized buffers for storing DMA descriptors. 132 * List of page-sized buffers for storing DMA descriptors.
@@ -161,6 +171,9 @@ struct iso_context {
161 int excess_bytes; 171 int excess_bytes;
162 void *header; 172 void *header;
163 size_t header_length; 173 size_t header_length;
174
175 u8 sync;
176 u8 tags;
164}; 177};
165 178
166#define CONFIG_ROM_SIZE 1024 179#define CONFIG_ROM_SIZE 1024
@@ -177,7 +190,8 @@ struct fw_ohci {
177 u32 bus_time; 190 u32 bus_time;
178 bool is_root; 191 bool is_root;
179 bool csr_state_setclear_abdicate; 192 bool csr_state_setclear_abdicate;
180 193 int n_ir;
194 int n_it;
181 /* 195 /*
182 * Spinlock for accessing fw_ohci data. Never call out of 196 * Spinlock for accessing fw_ohci data. Never call out of
183 * this driver with this lock held. 197 * this driver with this lock held.
@@ -186,6 +200,9 @@ struct fw_ohci {
186 200
187 struct mutex phy_reg_mutex; 201 struct mutex phy_reg_mutex;
188 202
203 void *misc_buffer;
204 dma_addr_t misc_buffer_bus;
205
189 struct ar_context ar_request_ctx; 206 struct ar_context ar_request_ctx;
190 struct ar_context ar_response_ctx; 207 struct ar_context ar_response_ctx;
191 struct context at_request_ctx; 208 struct context at_request_ctx;
@@ -411,10 +428,6 @@ static const char *tcodes[] = {
411 [0xc] = "-reserved-", [0xd] = "-reserved-", 428 [0xc] = "-reserved-", [0xd] = "-reserved-",
412 [0xe] = "link internal", [0xf] = "-reserved-", 429 [0xe] = "link internal", [0xf] = "-reserved-",
413}; 430};
414static const char *phys[] = {
415 [0x0] = "phy config packet", [0x1] = "link-on packet",
416 [0x2] = "self-id packet", [0x3] = "-reserved-",
417};
418 431
419static void log_ar_at_event(char dir, int speed, u32 *header, int evt) 432static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
420{ 433{
@@ -433,12 +446,6 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
433 return; 446 return;
434 } 447 }
435 448
436 if (header[0] == ~header[1]) {
437 fw_notify("A%c %s, %s, %08x\n",
438 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
439 return;
440 }
441
442 switch (tcode) { 449 switch (tcode) {
443 case 0x0: case 0x6: case 0x8: 450 case 0x0: case 0x6: case 0x8:
444 snprintf(specific, sizeof(specific), " = %08x", 451 snprintf(specific, sizeof(specific), " = %08x",
@@ -453,9 +460,13 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
453 } 460 }
454 461
455 switch (tcode) { 462 switch (tcode) {
456 case 0xe: case 0xa: 463 case 0xa:
457 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]); 464 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
458 break; 465 break;
466 case 0xe:
467 fw_notify("A%c %s, PHY %08x %08x\n",
468 dir, evts[evt], header[1], header[2]);
469 break;
459 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 470 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
460 fw_notify("A%c spd %x tl %02x, " 471 fw_notify("A%c spd %x tl %02x, "
461 "%04x -> %04x, %s, " 472 "%04x -> %04x, %s, "
@@ -594,59 +605,150 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
594 return ret; 605 return ret;
595} 606}
596 607
597static void ar_context_link_page(struct ar_context *ctx, 608static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
598 struct ar_buffer *ab, dma_addr_t ab_bus)
599{ 609{
600 size_t offset; 610 return page_private(ctx->pages[i]);
611}
601 612
602 ab->next = NULL; 613static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
603 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 614{
604 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 615 struct descriptor *d;
605 DESCRIPTOR_STATUS | 616
606 DESCRIPTOR_BRANCH_ALWAYS); 617 d = &ctx->descriptors[index];
607 offset = offsetof(struct ar_buffer, data); 618 d->branch_address &= cpu_to_le32(~0xf);
608 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset); 619 d->res_count = cpu_to_le16(PAGE_SIZE);
609 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset); 620 d->transfer_status = 0;
610 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
611 ab->descriptor.branch_address = 0;
612 621
613 wmb(); /* finish init of new descriptors before branch_address update */ 622 wmb(); /* finish init of new descriptors before branch_address update */
614 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); 623 d = &ctx->descriptors[ctx->last_buffer_index];
615 ctx->last_buffer->next = ab; 624 d->branch_address |= cpu_to_le32(1);
616 ctx->last_buffer = ab; 625
626 ctx->last_buffer_index = index;
617 627
618 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 628 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
619 flush_writes(ctx->ohci); 629 flush_writes(ctx->ohci);
620} 630}
621 631
622static int ar_context_add_page(struct ar_context *ctx) 632static void ar_context_release(struct ar_context *ctx)
623{ 633{
624 struct device *dev = ctx->ohci->card.device; 634 unsigned int i;
625 struct ar_buffer *ab;
626 dma_addr_t uninitialized_var(ab_bus);
627 635
628 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); 636 if (ctx->buffer)
629 if (ab == NULL) 637 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
630 return -ENOMEM;
631 638
632 ar_context_link_page(ctx, ab, ab_bus); 639 for (i = 0; i < AR_BUFFERS; i++)
640 if (ctx->pages[i]) {
641 dma_unmap_page(ctx->ohci->card.device,
642 ar_buffer_bus(ctx, i),
643 PAGE_SIZE, DMA_FROM_DEVICE);
644 __free_page(ctx->pages[i]);
645 }
646}
633 647
634 return 0; 648static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
649{
650 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
651 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
652 flush_writes(ctx->ohci);
653
654 fw_error("AR error: %s; DMA stopped\n", error_msg);
655 }
656 /* FIXME: restart? */
635} 657}
636 658
637static void ar_context_release(struct ar_context *ctx) 659static inline unsigned int ar_next_buffer_index(unsigned int index)
660{
661 return (index + 1) % AR_BUFFERS;
662}
663
664static inline unsigned int ar_prev_buffer_index(unsigned int index)
665{
666 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
667}
668
669static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
670{
671 return ar_next_buffer_index(ctx->last_buffer_index);
672}
673
674/*
675 * We search for the buffer that contains the last AR packet DMA data written
676 * by the controller.
677 */
678static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
679 unsigned int *buffer_offset)
638{ 680{
639 struct ar_buffer *ab, *ab_next; 681 unsigned int i, next_i, last = ctx->last_buffer_index;
640 size_t offset; 682 __le16 res_count, next_res_count;
641 dma_addr_t ab_bus; 683
684 i = ar_first_buffer_index(ctx);
685 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
686
687 /* A buffer that is not yet completely filled must be the last one. */
688 while (i != last && res_count == 0) {
689
690 /* Peek at the next descriptor. */
691 next_i = ar_next_buffer_index(i);
692 rmb(); /* read descriptors in order */
693 next_res_count = ACCESS_ONCE(
694 ctx->descriptors[next_i].res_count);
695 /*
696 * If the next descriptor is still empty, we must stop at this
697 * descriptor.
698 */
699 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
700 /*
701 * The exception is when the DMA data for one packet is
702 * split over three buffers; in this case, the middle
703 * buffer's descriptor might be never updated by the
704 * controller and look still empty, and we have to peek
705 * at the third one.
706 */
707 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
708 next_i = ar_next_buffer_index(next_i);
709 rmb();
710 next_res_count = ACCESS_ONCE(
711 ctx->descriptors[next_i].res_count);
712 if (next_res_count != cpu_to_le16(PAGE_SIZE))
713 goto next_buffer_is_active;
714 }
642 715
643 for (ab = ctx->current_buffer; ab; ab = ab_next) { 716 break;
644 ab_next = ab->next; 717 }
645 offset = offsetof(struct ar_buffer, data); 718
646 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 719next_buffer_is_active:
647 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE, 720 i = next_i;
648 ab, ab_bus); 721 res_count = next_res_count;
722 }
723
724 rmb(); /* read res_count before the DMA data */
725
726 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
727 if (*buffer_offset > PAGE_SIZE) {
728 *buffer_offset = 0;
729 ar_context_abort(ctx, "corrupted descriptor");
730 }
731
732 return i;
733}
734
735static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
736 unsigned int end_buffer_index,
737 unsigned int end_buffer_offset)
738{
739 unsigned int i;
740
741 i = ar_first_buffer_index(ctx);
742 while (i != end_buffer_index) {
743 dma_sync_single_for_cpu(ctx->ohci->card.device,
744 ar_buffer_bus(ctx, i),
745 PAGE_SIZE, DMA_FROM_DEVICE);
746 i = ar_next_buffer_index(i);
649 } 747 }
748 if (end_buffer_offset > 0)
749 dma_sync_single_for_cpu(ctx->ohci->card.device,
750 ar_buffer_bus(ctx, i),
751 end_buffer_offset, DMA_FROM_DEVICE);
650} 752}
651 753
652#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 754#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
@@ -689,6 +791,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
689 p.header[3] = cond_le32_to_cpu(buffer[3]); 791 p.header[3] = cond_le32_to_cpu(buffer[3]);
690 p.header_length = 16; 792 p.header_length = 16;
691 p.payload_length = p.header[3] >> 16; 793 p.payload_length = p.header[3] >> 16;
794 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
795 ar_context_abort(ctx, "invalid packet length");
796 return NULL;
797 }
692 break; 798 break;
693 799
694 case TCODE_WRITE_RESPONSE: 800 case TCODE_WRITE_RESPONSE:
@@ -699,9 +805,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
699 break; 805 break;
700 806
701 default: 807 default:
702 /* FIXME: Stop context, discard everything, and restart? */ 808 ar_context_abort(ctx, "invalid tcode");
703 p.header_length = 0; 809 return NULL;
704 p.payload_length = 0;
705 } 810 }
706 811
707 p.payload = (void *) buffer + p.header_length; 812 p.payload = (void *) buffer + p.header_length;
@@ -751,121 +856,147 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
751 return buffer + length + 1; 856 return buffer + length + 1;
752} 857}
753 858
859static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
860{
861 void *next;
862
863 while (p < end) {
864 next = handle_ar_packet(ctx, p);
865 if (!next)
866 return p;
867 p = next;
868 }
869
870 return p;
871}
872
873static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
874{
875 unsigned int i;
876
877 i = ar_first_buffer_index(ctx);
878 while (i != end_buffer) {
879 dma_sync_single_for_device(ctx->ohci->card.device,
880 ar_buffer_bus(ctx, i),
881 PAGE_SIZE, DMA_FROM_DEVICE);
882 ar_context_link_page(ctx, i);
883 i = ar_next_buffer_index(i);
884 }
885}
886
754static void ar_context_tasklet(unsigned long data) 887static void ar_context_tasklet(unsigned long data)
755{ 888{
756 struct ar_context *ctx = (struct ar_context *)data; 889 struct ar_context *ctx = (struct ar_context *)data;
757 struct ar_buffer *ab; 890 unsigned int end_buffer_index, end_buffer_offset;
758 struct descriptor *d; 891 void *p, *end;
759 void *buffer, *end;
760 __le16 res_count;
761 892
762 ab = ctx->current_buffer; 893 p = ctx->pointer;
763 d = &ab->descriptor; 894 if (!p)
895 return;
764 896
765 res_count = ACCESS_ONCE(d->res_count); 897 end_buffer_index = ar_search_last_active_buffer(ctx,
766 if (res_count == 0) { 898 &end_buffer_offset);
767 size_t size, size2, rest, pktsize, size3, offset; 899 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
768 dma_addr_t start_bus; 900 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
769 void *start;
770 901
902 if (end_buffer_index < ar_first_buffer_index(ctx)) {
771 /* 903 /*
772 * This descriptor is finished and we may have a 904 * The filled part of the overall buffer wraps around; handle
773 * packet split across this and the next buffer. We 905 * all packets up to the buffer end here. If the last packet
774 * reuse the page for reassembling the split packet. 906 * wraps around, its tail will be visible after the buffer end
907 * because the buffer start pages are mapped there again.
775 */ 908 */
909 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
910 p = handle_ar_packets(ctx, p, buffer_end);
911 if (p < buffer_end)
912 goto error;
913 /* adjust p to point back into the actual buffer */
914 p -= AR_BUFFERS * PAGE_SIZE;
915 }
776 916
777 offset = offsetof(struct ar_buffer, data); 917 p = handle_ar_packets(ctx, p, end);
778 start = ab; 918 if (p != end) {
779 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 919 if (p > end)
780 buffer = ab->data; 920 ar_context_abort(ctx, "inconsistent descriptor");
781 921 goto error;
782 ab = ab->next; 922 }
783 d = &ab->descriptor;
784 size = start + PAGE_SIZE - ctx->pointer;
785 /* valid buffer data in the next page */
786 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
787 /* what actually fits in this page */
788 size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
789 memmove(buffer, ctx->pointer, size);
790 memcpy(buffer + size, ab->data, size2);
791
792 while (size > 0) {
793 void *next = handle_ar_packet(ctx, buffer);
794 pktsize = next - buffer;
795 if (pktsize >= size) {
796 /*
797 * We have handled all the data that was
798 * originally in this page, so we can now
799 * continue in the next page.
800 */
801 buffer = next;
802 break;
803 }
804 /* move the next packet to the start of the buffer */
805 memmove(buffer, next, size + size2 - pktsize);
806 size -= pktsize;
807 /* fill up this page again */
808 size3 = min(rest - size2,
809 (size_t)PAGE_SIZE - offset - size - size2);
810 memcpy(buffer + size + size2,
811 (void *) ab->data + size2, size3);
812 size2 += size3;
813 }
814
815 if (rest > 0) {
816 /* handle the packets that are fully in the next page */
817 buffer = (void *) ab->data +
818 (buffer - (start + offset + size));
819 end = (void *) ab->data + rest;
820
821 while (buffer < end)
822 buffer = handle_ar_packet(ctx, buffer);
823 923
824 ctx->current_buffer = ab; 924 ctx->pointer = p;
825 ctx->pointer = end; 925 ar_recycle_buffers(ctx, end_buffer_index);
826 926
827 ar_context_link_page(ctx, start, start_bus); 927 return;
828 } else {
829 ctx->pointer = start + PAGE_SIZE;
830 }
831 } else {
832 buffer = ctx->pointer;
833 ctx->pointer = end =
834 (void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
835 928
836 while (buffer < end) 929error:
837 buffer = handle_ar_packet(ctx, buffer); 930 ctx->pointer = NULL;
838 }
839} 931}
840 932
841static int ar_context_init(struct ar_context *ctx, 933static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
842 struct fw_ohci *ohci, u32 regs) 934 unsigned int descriptors_offset, u32 regs)
843{ 935{
844 struct ar_buffer ab; 936 unsigned int i;
937 dma_addr_t dma_addr;
938 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
939 struct descriptor *d;
845 940
846 ctx->regs = regs; 941 ctx->regs = regs;
847 ctx->ohci = ohci; 942 ctx->ohci = ohci;
848 ctx->last_buffer = &ab;
849 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 943 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
850 944
851 ar_context_add_page(ctx); 945 for (i = 0; i < AR_BUFFERS; i++) {
852 ar_context_add_page(ctx); 946 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
853 ctx->current_buffer = ab.next; 947 if (!ctx->pages[i])
854 ctx->pointer = ctx->current_buffer->data; 948 goto out_of_memory;
949 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
950 0, PAGE_SIZE, DMA_FROM_DEVICE);
951 if (dma_mapping_error(ohci->card.device, dma_addr)) {
952 __free_page(ctx->pages[i]);
953 ctx->pages[i] = NULL;
954 goto out_of_memory;
955 }
956 set_page_private(ctx->pages[i], dma_addr);
957 }
958
959 for (i = 0; i < AR_BUFFERS; i++)
960 pages[i] = ctx->pages[i];
961 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
962 pages[AR_BUFFERS + i] = ctx->pages[i];
963 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
964 -1, PAGE_KERNEL_RO);
965 if (!ctx->buffer)
966 goto out_of_memory;
967
968 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
969 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
970
971 for (i = 0; i < AR_BUFFERS; i++) {
972 d = &ctx->descriptors[i];
973 d->req_count = cpu_to_le16(PAGE_SIZE);
974 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
975 DESCRIPTOR_STATUS |
976 DESCRIPTOR_BRANCH_ALWAYS);
977 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
978 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
979 ar_next_buffer_index(i) * sizeof(struct descriptor));
980 }
855 981
856 return 0; 982 return 0;
983
984out_of_memory:
985 ar_context_release(ctx);
986
987 return -ENOMEM;
857} 988}
858 989
859static void ar_context_run(struct ar_context *ctx) 990static void ar_context_run(struct ar_context *ctx)
860{ 991{
861 struct ar_buffer *ab = ctx->current_buffer; 992 unsigned int i;
862 dma_addr_t ab_bus; 993
863 size_t offset; 994 for (i = 0; i < AR_BUFFERS; i++)
995 ar_context_link_page(ctx, i);
864 996
865 offset = offsetof(struct ar_buffer, data); 997 ctx->pointer = ctx->buffer;
866 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
867 998
868 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1); 999 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
869 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 1000 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
870 flush_writes(ctx->ohci); 1001 flush_writes(ctx->ohci);
871} 1002}
@@ -1042,6 +1173,7 @@ static void context_run(struct context *ctx, u32 extra)
1042 le32_to_cpu(ctx->last->branch_address)); 1173 le32_to_cpu(ctx->last->branch_address));
1043 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 1174 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1044 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 1175 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1176 ctx->running = true;
1045 flush_writes(ohci); 1177 flush_writes(ohci);
1046} 1178}
1047 1179
@@ -1069,6 +1201,7 @@ static void context_stop(struct context *ctx)
1069 int i; 1201 int i;
1070 1202
1071 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1203 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1204 ctx->running = false;
1072 flush_writes(ctx->ohci); 1205 flush_writes(ctx->ohci);
1073 1206
1074 for (i = 0; i < 10; i++) { 1207 for (i = 0; i < 10; i++) {
@@ -1099,7 +1232,6 @@ static int at_context_queue_packet(struct context *ctx,
1099 struct descriptor *d, *last; 1232 struct descriptor *d, *last;
1100 __le32 *header; 1233 __le32 *header;
1101 int z, tcode; 1234 int z, tcode;
1102 u32 reg;
1103 1235
1104 d = context_get_descriptors(ctx, 4, &d_bus); 1236 d = context_get_descriptors(ctx, 4, &d_bus);
1105 if (d == NULL) { 1237 if (d == NULL) {
@@ -1113,21 +1245,27 @@ static int at_context_queue_packet(struct context *ctx,
1113 /* 1245 /*
1114 * The DMA format for asyncronous link packets is different 1246 * The DMA format for asyncronous link packets is different
1115 * from the IEEE1394 layout, so shift the fields around 1247 * from the IEEE1394 layout, so shift the fields around
1116 * accordingly. If header_length is 8, it's a PHY packet, to 1248 * accordingly.
1117 * which we need to prepend an extra quadlet.
1118 */ 1249 */
1119 1250
1251 tcode = (packet->header[0] >> 4) & 0x0f;
1120 header = (__le32 *) &d[1]; 1252 header = (__le32 *) &d[1];
1121 switch (packet->header_length) { 1253 switch (tcode) {
1122 case 16: 1254 case TCODE_WRITE_QUADLET_REQUEST:
1123 case 12: 1255 case TCODE_WRITE_BLOCK_REQUEST:
1256 case TCODE_WRITE_RESPONSE:
1257 case TCODE_READ_QUADLET_REQUEST:
1258 case TCODE_READ_BLOCK_REQUEST:
1259 case TCODE_READ_QUADLET_RESPONSE:
1260 case TCODE_READ_BLOCK_RESPONSE:
1261 case TCODE_LOCK_REQUEST:
1262 case TCODE_LOCK_RESPONSE:
1124 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1263 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1125 (packet->speed << 16)); 1264 (packet->speed << 16));
1126 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 1265 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1127 (packet->header[0] & 0xffff0000)); 1266 (packet->header[0] & 0xffff0000));
1128 header[2] = cpu_to_le32(packet->header[2]); 1267 header[2] = cpu_to_le32(packet->header[2]);
1129 1268
1130 tcode = (packet->header[0] >> 4) & 0x0f;
1131 if (TCODE_IS_BLOCK_PACKET(tcode)) 1269 if (TCODE_IS_BLOCK_PACKET(tcode))
1132 header[3] = cpu_to_le32(packet->header[3]); 1270 header[3] = cpu_to_le32(packet->header[3]);
1133 else 1271 else
@@ -1136,18 +1274,18 @@ static int at_context_queue_packet(struct context *ctx,
1136 d[0].req_count = cpu_to_le16(packet->header_length); 1274 d[0].req_count = cpu_to_le16(packet->header_length);
1137 break; 1275 break;
1138 1276
1139 case 8: 1277 case TCODE_LINK_INTERNAL:
1140 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 1278 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1141 (packet->speed << 16)); 1279 (packet->speed << 16));
1142 header[1] = cpu_to_le32(packet->header[0]); 1280 header[1] = cpu_to_le32(packet->header[1]);
1143 header[2] = cpu_to_le32(packet->header[1]); 1281 header[2] = cpu_to_le32(packet->header[2]);
1144 d[0].req_count = cpu_to_le16(12); 1282 d[0].req_count = cpu_to_le16(12);
1145 1283
1146 if (is_ping_packet(packet->header)) 1284 if (is_ping_packet(&packet->header[1]))
1147 d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1285 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1148 break; 1286 break;
1149 1287
1150 case 4: 1288 case TCODE_STREAM_DATA:
1151 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1289 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1152 (packet->speed << 16)); 1290 (packet->speed << 16));
1153 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 1291 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
@@ -1197,6 +1335,8 @@ static int at_context_queue_packet(struct context *ctx,
1197 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind 1335 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1198 * up stalling out. So we just bail out in software and try again 1336 * up stalling out. So we just bail out in software and try again
1199 * later, and everyone is happy. 1337 * later, and everyone is happy.
1338 * FIXME: Test of IntEvent.busReset may no longer be necessary since we
1339 * flush AT queues in bus_reset_tasklet.
1200 * FIXME: Document how the locking works. 1340 * FIXME: Document how the locking works.
1201 */ 1341 */
1202 if (ohci->generation != packet->generation || 1342 if (ohci->generation != packet->generation ||
@@ -1210,14 +1350,23 @@ static int at_context_queue_packet(struct context *ctx,
1210 1350
1211 context_append(ctx, d, z, 4 - z); 1351 context_append(ctx, d, z, 4 - z);
1212 1352
1213 /* If the context isn't already running, start it up. */ 1353 if (!ctx->running)
1214 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1215 if ((reg & CONTEXT_RUN) == 0)
1216 context_run(ctx, 0); 1354 context_run(ctx, 0);
1217 1355
1218 return 0; 1356 return 0;
1219} 1357}
1220 1358
1359static void at_context_flush(struct context *ctx)
1360{
1361 tasklet_disable(&ctx->tasklet);
1362
1363 ctx->flushing = true;
1364 context_tasklet((unsigned long)ctx);
1365 ctx->flushing = false;
1366
1367 tasklet_enable(&ctx->tasklet);
1368}
1369
1221static int handle_at_packet(struct context *context, 1370static int handle_at_packet(struct context *context,
1222 struct descriptor *d, 1371 struct descriptor *d,
1223 struct descriptor *last) 1372 struct descriptor *last)
@@ -1227,7 +1376,7 @@ static int handle_at_packet(struct context *context,
1227 struct fw_ohci *ohci = context->ohci; 1376 struct fw_ohci *ohci = context->ohci;
1228 int evt; 1377 int evt;
1229 1378
1230 if (last->transfer_status == 0) 1379 if (last->transfer_status == 0 && !context->flushing)
1231 /* This descriptor isn't done yet, stop iteration. */ 1380 /* This descriptor isn't done yet, stop iteration. */
1232 return 0; 1381 return 0;
1233 1382
@@ -1261,11 +1410,15 @@ static int handle_at_packet(struct context *context,
1261 break; 1410 break;
1262 1411
1263 case OHCI1394_evt_missing_ack: 1412 case OHCI1394_evt_missing_ack:
1264 /* 1413 if (context->flushing)
1265 * Using a valid (current) generation count, but the 1414 packet->ack = RCODE_GENERATION;
1266 * node is not on the bus or not sending acks. 1415 else {
1267 */ 1416 /*
1268 packet->ack = RCODE_NO_ACK; 1417 * Using a valid (current) generation count, but the
1418 * node is not on the bus or not sending acks.
1419 */
1420 packet->ack = RCODE_NO_ACK;
1421 }
1269 break; 1422 break;
1270 1423
1271 case ACK_COMPLETE + 0x10: 1424 case ACK_COMPLETE + 0x10:
@@ -1278,6 +1431,13 @@ static int handle_at_packet(struct context *context,
1278 packet->ack = evt - 0x10; 1431 packet->ack = evt - 0x10;
1279 break; 1432 break;
1280 1433
1434 case OHCI1394_evt_no_status:
1435 if (context->flushing) {
1436 packet->ack = RCODE_GENERATION;
1437 break;
1438 }
1439 /* fall through */
1440
1281 default: 1441 default:
1282 packet->ack = RCODE_SEND_ERROR; 1442 packet->ack = RCODE_SEND_ERROR;
1283 break; 1443 break;
@@ -1583,9 +1743,23 @@ static void bus_reset_tasklet(unsigned long data)
1583 /* FIXME: Document how the locking works. */ 1743 /* FIXME: Document how the locking works. */
1584 spin_lock_irqsave(&ohci->lock, flags); 1744 spin_lock_irqsave(&ohci->lock, flags);
1585 1745
1586 ohci->generation = generation; 1746 ohci->generation = -1; /* prevent AT packet queueing */
1587 context_stop(&ohci->at_request_ctx); 1747 context_stop(&ohci->at_request_ctx);
1588 context_stop(&ohci->at_response_ctx); 1748 context_stop(&ohci->at_response_ctx);
1749
1750 spin_unlock_irqrestore(&ohci->lock, flags);
1751
1752 /*
1753 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1754 * packets in the AT queues and software needs to drain them.
1755 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1756 */
1757 at_context_flush(&ohci->at_request_ctx);
1758 at_context_flush(&ohci->at_response_ctx);
1759
1760 spin_lock_irqsave(&ohci->lock, flags);
1761
1762 ohci->generation = generation;
1589 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1763 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1590 1764
1591 if (ohci->quirks & QUIRK_RESET_PACKET) 1765 if (ohci->quirks & QUIRK_RESET_PACKET)
@@ -1653,8 +1827,12 @@ static irqreturn_t irq_handler(int irq, void *data)
1653 if (!event || !~event) 1827 if (!event || !~event)
1654 return IRQ_NONE; 1828 return IRQ_NONE;
1655 1829
1656 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */ 1830 /*
1657 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset); 1831 * busReset and postedWriteErr must not be cleared yet
1832 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
1833 */
1834 reg_write(ohci, OHCI1394_IntEventClear,
1835 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
1658 log_irqs(event); 1836 log_irqs(event);
1659 1837
1660 if (event & OHCI1394_selfIDComplete) 1838 if (event & OHCI1394_selfIDComplete)
@@ -1672,30 +1850,41 @@ static irqreturn_t irq_handler(int irq, void *data)
1672 if (event & OHCI1394_respTxComplete) 1850 if (event & OHCI1394_respTxComplete)
1673 tasklet_schedule(&ohci->at_response_ctx.tasklet); 1851 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1674 1852
1675 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 1853 if (event & OHCI1394_isochRx) {
1676 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 1854 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1855 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1677 1856
1678 while (iso_event) { 1857 while (iso_event) {
1679 i = ffs(iso_event) - 1; 1858 i = ffs(iso_event) - 1;
1680 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet); 1859 tasklet_schedule(
1681 iso_event &= ~(1 << i); 1860 &ohci->ir_context_list[i].context.tasklet);
1861 iso_event &= ~(1 << i);
1862 }
1682 } 1863 }
1683 1864
1684 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 1865 if (event & OHCI1394_isochTx) {
1685 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 1866 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1867 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1686 1868
1687 while (iso_event) { 1869 while (iso_event) {
1688 i = ffs(iso_event) - 1; 1870 i = ffs(iso_event) - 1;
1689 tasklet_schedule(&ohci->it_context_list[i].context.tasklet); 1871 tasklet_schedule(
1690 iso_event &= ~(1 << i); 1872 &ohci->it_context_list[i].context.tasklet);
1873 iso_event &= ~(1 << i);
1874 }
1691 } 1875 }
1692 1876
1693 if (unlikely(event & OHCI1394_regAccessFail)) 1877 if (unlikely(event & OHCI1394_regAccessFail))
1694 fw_error("Register access failure - " 1878 fw_error("Register access failure - "
1695 "please notify linux1394-devel@lists.sf.net\n"); 1879 "please notify linux1394-devel@lists.sf.net\n");
1696 1880
1697 if (unlikely(event & OHCI1394_postedWriteErr)) 1881 if (unlikely(event & OHCI1394_postedWriteErr)) {
1882 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
1883 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
1884 reg_write(ohci, OHCI1394_IntEventClear,
1885 OHCI1394_postedWriteErr);
1698 fw_error("PCI posted write error\n"); 1886 fw_error("PCI posted write error\n");
1887 }
1699 1888
1700 if (unlikely(event & OHCI1394_cycleTooLong)) { 1889 if (unlikely(event & OHCI1394_cycleTooLong)) {
1701 if (printk_ratelimit()) 1890 if (printk_ratelimit())
@@ -1719,7 +1908,8 @@ static irqreturn_t irq_handler(int irq, void *data)
1719 spin_lock(&ohci->lock); 1908 spin_lock(&ohci->lock);
1720 update_bus_time(ohci); 1909 update_bus_time(ohci);
1721 spin_unlock(&ohci->lock); 1910 spin_unlock(&ohci->lock);
1722 } 1911 } else
1912 flush_writes(ohci);
1723 1913
1724 return IRQ_HANDLED; 1914 return IRQ_HANDLED;
1725} 1915}
@@ -2495,6 +2685,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
2495 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 2685 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2496 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 2686 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2497 context_run(&ctx->context, control); 2687 context_run(&ctx->context, control);
2688
2689 ctx->sync = sync;
2690 ctx->tags = tags;
2691
2498 break; 2692 break;
2499 } 2693 }
2500 2694
@@ -2592,6 +2786,26 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2592 return ret; 2786 return ret;
2593} 2787}
2594 2788
2789#ifdef CONFIG_PM
2790static void ohci_resume_iso_dma(struct fw_ohci *ohci)
2791{
2792 int i;
2793 struct iso_context *ctx;
2794
2795 for (i = 0 ; i < ohci->n_ir ; i++) {
2796 ctx = &ohci->ir_context_list[i];
2797 if (ctx->context.running)
2798 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2799 }
2800
2801 for (i = 0 ; i < ohci->n_it ; i++) {
2802 ctx = &ohci->it_context_list[i];
2803 if (ctx->context.running)
2804 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2805 }
2806}
2807#endif
2808
2595static int queue_iso_transmit(struct iso_context *ctx, 2809static int queue_iso_transmit(struct iso_context *ctx,
2596 struct fw_iso_packet *packet, 2810 struct fw_iso_packet *packet,
2597 struct fw_iso_buffer *buffer, 2811 struct fw_iso_buffer *buffer,
@@ -2901,7 +3115,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2901 struct fw_ohci *ohci; 3115 struct fw_ohci *ohci;
2902 u32 bus_options, max_receive, link_speed, version; 3116 u32 bus_options, max_receive, link_speed, version;
2903 u64 guid; 3117 u64 guid;
2904 int i, err, n_ir, n_it; 3118 int i, err;
2905 size_t size; 3119 size_t size;
2906 3120
2907 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 3121 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2955,31 +3169,55 @@ static int __devinit pci_probe(struct pci_dev *dev,
2955 if (param_quirks) 3169 if (param_quirks)
2956 ohci->quirks = param_quirks; 3170 ohci->quirks = param_quirks;
2957 3171
2958 ar_context_init(&ohci->ar_request_ctx, ohci, 3172 /*
2959 OHCI1394_AsReqRcvContextControlSet); 3173 * Because dma_alloc_coherent() allocates at least one page,
3174 * we save space by using a common buffer for the AR request/
3175 * response descriptors and the self IDs buffer.
3176 */
3177 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3178 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3179 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3180 PAGE_SIZE,
3181 &ohci->misc_buffer_bus,
3182 GFP_KERNEL);
3183 if (!ohci->misc_buffer) {
3184 err = -ENOMEM;
3185 goto fail_iounmap;
3186 }
3187
3188 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3189 OHCI1394_AsReqRcvContextControlSet);
3190 if (err < 0)
3191 goto fail_misc_buf;
2960 3192
2961 ar_context_init(&ohci->ar_response_ctx, ohci, 3193 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
2962 OHCI1394_AsRspRcvContextControlSet); 3194 OHCI1394_AsRspRcvContextControlSet);
3195 if (err < 0)
3196 goto fail_arreq_ctx;
2963 3197
2964 context_init(&ohci->at_request_ctx, ohci, 3198 err = context_init(&ohci->at_request_ctx, ohci,
2965 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 3199 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3200 if (err < 0)
3201 goto fail_arrsp_ctx;
2966 3202
2967 context_init(&ohci->at_response_ctx, ohci, 3203 err = context_init(&ohci->at_response_ctx, ohci,
2968 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 3204 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3205 if (err < 0)
3206 goto fail_atreq_ctx;
2969 3207
2970 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3208 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2971 ohci->ir_context_channels = ~0ULL; 3209 ohci->ir_context_channels = ~0ULL;
2972 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3210 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2973 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3211 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2974 n_ir = hweight32(ohci->ir_context_mask); 3212 ohci->n_ir = hweight32(ohci->ir_context_mask);
2975 size = sizeof(struct iso_context) * n_ir; 3213 size = sizeof(struct iso_context) * ohci->n_ir;
2976 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3214 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2977 3215
2978 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3216 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2979 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3217 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2980 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3218 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2981 n_it = hweight32(ohci->it_context_mask); 3219 ohci->n_it = hweight32(ohci->it_context_mask);
2982 size = sizeof(struct iso_context) * n_it; 3220 size = sizeof(struct iso_context) * ohci->n_it;
2983 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 3221 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2984 3222
2985 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 3223 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
@@ -2987,15 +3225,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
2987 goto fail_contexts; 3225 goto fail_contexts;
2988 } 3226 }
2989 3227
2990 /* self-id dma buffer allocation */ 3228 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
2991 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, 3229 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
2992 SELF_ID_BUF_SIZE,
2993 &ohci->self_id_bus,
2994 GFP_KERNEL);
2995 if (ohci->self_id_cpu == NULL) {
2996 err = -ENOMEM;
2997 goto fail_contexts;
2998 }
2999 3230
3000 bus_options = reg_read(ohci, OHCI1394_BusOptions); 3231 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3001 max_receive = (bus_options >> 12) & 0xf; 3232 max_receive = (bus_options >> 12) & 0xf;
@@ -3005,26 +3236,30 @@ static int __devinit pci_probe(struct pci_dev *dev,
3005 3236
3006 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 3237 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3007 if (err) 3238 if (err)
3008 goto fail_self_id; 3239 goto fail_contexts;
3009 3240
3010 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 3241 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3011 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " 3242 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
3012 "%d IR + %d IT contexts, quirks 0x%x\n", 3243 "%d IR + %d IT contexts, quirks 0x%x\n",
3013 dev_name(&dev->dev), version >> 16, version & 0xff, 3244 dev_name(&dev->dev), version >> 16, version & 0xff,
3014 n_ir, n_it, ohci->quirks); 3245 ohci->n_ir, ohci->n_it, ohci->quirks);
3015 3246
3016 return 0; 3247 return 0;
3017 3248
3018 fail_self_id:
3019 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
3020 ohci->self_id_cpu, ohci->self_id_bus);
3021 fail_contexts: 3249 fail_contexts:
3022 kfree(ohci->ir_context_list); 3250 kfree(ohci->ir_context_list);
3023 kfree(ohci->it_context_list); 3251 kfree(ohci->it_context_list);
3024 context_release(&ohci->at_response_ctx); 3252 context_release(&ohci->at_response_ctx);
3253 fail_atreq_ctx:
3025 context_release(&ohci->at_request_ctx); 3254 context_release(&ohci->at_request_ctx);
3255 fail_arrsp_ctx:
3026 ar_context_release(&ohci->ar_response_ctx); 3256 ar_context_release(&ohci->ar_response_ctx);
3257 fail_arreq_ctx:
3027 ar_context_release(&ohci->ar_request_ctx); 3258 ar_context_release(&ohci->ar_request_ctx);
3259 fail_misc_buf:
3260 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3261 ohci->misc_buffer, ohci->misc_buffer_bus);
3262 fail_iounmap:
3028 pci_iounmap(dev, ohci->registers); 3263 pci_iounmap(dev, ohci->registers);
3029 fail_iomem: 3264 fail_iomem:
3030 pci_release_region(dev, 0); 3265 pci_release_region(dev, 0);
@@ -3063,10 +3298,10 @@ static void pci_remove(struct pci_dev *dev)
3063 if (ohci->config_rom) 3298 if (ohci->config_rom)
3064 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3299 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3065 ohci->config_rom, ohci->config_rom_bus); 3300 ohci->config_rom, ohci->config_rom_bus);
3066 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
3067 ohci->self_id_cpu, ohci->self_id_bus);
3068 ar_context_release(&ohci->ar_request_ctx); 3301 ar_context_release(&ohci->ar_request_ctx);
3069 ar_context_release(&ohci->ar_response_ctx); 3302 ar_context_release(&ohci->ar_response_ctx);
3303 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3304 ohci->misc_buffer, ohci->misc_buffer_bus);
3070 context_release(&ohci->at_request_ctx); 3305 context_release(&ohci->at_request_ctx);
3071 context_release(&ohci->at_response_ctx); 3306 context_release(&ohci->at_response_ctx);
3072 kfree(ohci->it_context_list); 3307 kfree(ohci->it_context_list);
@@ -3117,7 +3352,20 @@ static int pci_resume(struct pci_dev *dev)
3117 return err; 3352 return err;
3118 } 3353 }
3119 3354
3120 return ohci_enable(&ohci->card, NULL, 0); 3355 /* Some systems don't setup GUID register on resume from ram */
3356 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3357 !reg_read(ohci, OHCI1394_GUIDHi)) {
3358 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3359 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3360 }
3361
3362 err = ohci_enable(&ohci->card, NULL, 0);
3363 if (err)
3364 return err;
3365
3366 ohci_resume_iso_dma(ohci);
3367
3368 return 0;
3121} 3369}
3122#endif 3370#endif
3123 3371
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 1cd637ef62d2..9a3f5f9383f6 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -302,9 +302,9 @@ struct fw_packet {
302struct fw_transaction { 302struct fw_transaction {
303 int node_id; /* The generation is implied; it is always the current. */ 303 int node_id; /* The generation is implied; it is always the current. */
304 int tlabel; 304 int tlabel;
305 int timestamp;
306 struct list_head link; 305 struct list_head link;
307 struct fw_card *card; 306 struct fw_card *card;
307 bool is_split_transaction;
308 struct timer_list split_timeout_timer; 308 struct timer_list split_timeout_timer;
309 309
310 struct fw_packet packet; 310 struct fw_packet packet;