diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2007-03-25 16:22:40 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2007-04-29 18:00:29 -0400 |
commit | 7542e0e696d1b6e71e6eb3183cbf2c63ec6b5acb (patch) | |
tree | b496b71d3d3a614413527e4dbbd078bb95fcc28e | |
parent | d265250341f83fa904d4fecdfadb46d7ab50765f (diff) |
ieee1394: remove usage of skb_queue as packet queue
This considerably reduces the memory requirements for a packet and
eliminates ieee1394's dependency on CONFIG_NET.
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
-rw-r--r-- | drivers/ieee1394/Kconfig | 1 | ||||
-rw-r--r-- | drivers/ieee1394/hosts.c | 12 | ||||
-rw-r--r-- | drivers/ieee1394/hosts.h | 4 | ||||
-rw-r--r-- | drivers/ieee1394/ieee1394_core.c | 285 | ||||
-rw-r--r-- | drivers/ieee1394/ieee1394_core.h | 27 | ||||
-rw-r--r-- | drivers/ieee1394/raw1394.c | 3 |
6 files changed, 163 insertions, 169 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig index cd84a55ecf20..f8ff6b866e6e 100644 --- a/drivers/ieee1394/Kconfig +++ b/drivers/ieee1394/Kconfig | |||
@@ -5,7 +5,6 @@ menu "IEEE 1394 (FireWire) support" | |||
5 | config IEEE1394 | 5 | config IEEE1394 |
6 | tristate "IEEE 1394 (FireWire) support" | 6 | tristate "IEEE 1394 (FireWire) support" |
7 | depends on PCI || BROKEN | 7 | depends on PCI || BROKEN |
8 | select NET | ||
9 | help | 8 | help |
10 | IEEE 1394 describes a high performance serial bus, which is also | 9 | IEEE 1394 describes a high performance serial bus, which is also |
11 | known as FireWire(tm) or i.Link(tm) and is used for connecting all | 10 | known as FireWire(tm) or i.Link(tm) and is used for connecting all |
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index 615ba6208013..1bf4aa3e8d15 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c | |||
@@ -94,14 +94,6 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) | |||
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | 96 | ||
97 | /* | ||
98 | * The pending_packet_queue is special in that it's processed | ||
99 | * from hardirq context too (such as hpsb_bus_reset()). Hence | ||
100 | * split the lock class from the usual networking skb-head | ||
101 | * lock class by using a separate key for it: | ||
102 | */ | ||
103 | static struct lock_class_key pending_packet_queue_key; | ||
104 | |||
105 | static DEFINE_MUTEX(host_num_alloc); | 97 | static DEFINE_MUTEX(host_num_alloc); |
106 | 98 | ||
107 | /** | 99 | /** |
@@ -137,9 +129,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | |||
137 | h->hostdata = h + 1; | 129 | h->hostdata = h + 1; |
138 | h->driver = drv; | 130 | h->driver = drv; |
139 | 131 | ||
140 | skb_queue_head_init(&h->pending_packet_queue); | 132 | INIT_LIST_HEAD(&h->pending_packets); |
141 | lockdep_set_class(&h->pending_packet_queue.lock, | ||
142 | &pending_packet_queue_key); | ||
143 | INIT_LIST_HEAD(&h->addr_space); | 133 | INIT_LIST_HEAD(&h->addr_space); |
144 | 134 | ||
145 | for (i = 2; i < 16; i++) | 135 | for (i = 2; i < 16; i++) |
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index 6b4e22507966..feb55d032294 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/skbuff.h> | ||
7 | #include <linux/timer.h> | 6 | #include <linux/timer.h> |
8 | #include <linux/types.h> | 7 | #include <linux/types.h> |
9 | #include <linux/workqueue.h> | 8 | #include <linux/workqueue.h> |
@@ -25,8 +24,7 @@ struct hpsb_host { | |||
25 | 24 | ||
26 | atomic_t generation; | 25 | atomic_t generation; |
27 | 26 | ||
28 | struct sk_buff_head pending_packet_queue; | 27 | struct list_head pending_packets; |
29 | |||
30 | struct timer_list timeout; | 28 | struct timer_list timeout; |
31 | unsigned long timeout_interval; | 29 | unsigned long timeout_interval; |
32 | 30 | ||
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index 270885679df1..d6e3d441f833 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/moduleparam.h> | 30 | #include <linux/moduleparam.h> |
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/kdev_t.h> | 32 | #include <linux/kdev_t.h> |
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/suspend.h> | 33 | #include <linux/suspend.h> |
35 | #include <linux/kthread.h> | 34 | #include <linux/kthread.h> |
36 | #include <linux/preempt.h> | 35 | #include <linux/preempt.h> |
@@ -103,6 +102,8 @@ static void queue_packet_complete(struct hpsb_packet *packet); | |||
103 | * | 102 | * |
104 | * Set the task that runs when a packet completes. You cannot call this more | 103 | * Set the task that runs when a packet completes. You cannot call this more |
105 | * than once on a single packet before it is sent. | 104 | * than once on a single packet before it is sent. |
105 | * | ||
106 | * Typically, the complete @routine is responsible to call hpsb_free_packet(). | ||
106 | */ | 107 | */ |
107 | void hpsb_set_packet_complete_task(struct hpsb_packet *packet, | 108 | void hpsb_set_packet_complete_task(struct hpsb_packet *packet, |
108 | void (*routine)(void *), void *data) | 109 | void (*routine)(void *), void *data) |
@@ -115,12 +116,12 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet, | |||
115 | 116 | ||
116 | /** | 117 | /** |
117 | * hpsb_alloc_packet - allocate new packet structure | 118 | * hpsb_alloc_packet - allocate new packet structure |
118 | * @data_size: size of the data block to be allocated | 119 | * @data_size: size of the data block to be allocated, in bytes |
119 | * | 120 | * |
120 | * This function allocates, initializes and returns a new &struct hpsb_packet. | 121 | * This function allocates, initializes and returns a new &struct hpsb_packet. |
121 | * It can be used in interrupt context. A header block is always included, its | 122 | * It can be used in interrupt context. A header block is always included and |
122 | * size is big enough to contain all possible 1394 headers. The data block is | 123 | * initialized with zeros. Its size is big enough to contain all possible 1394 |
123 | * only allocated when @data_size is not zero. | 124 | * headers. The data block is only allocated if @data_size is not zero. |
124 | * | 125 | * |
125 | * For packets for which responses will be received the @data_size has to be big | 126 | * For packets for which responses will be received the @data_size has to be big |
126 | * enough to contain the response's data block since no further allocation | 127 | * enough to contain the response's data block since no further allocation |
@@ -135,50 +136,42 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet, | |||
135 | */ | 136 | */ |
136 | struct hpsb_packet *hpsb_alloc_packet(size_t data_size) | 137 | struct hpsb_packet *hpsb_alloc_packet(size_t data_size) |
137 | { | 138 | { |
138 | struct hpsb_packet *packet = NULL; | 139 | struct hpsb_packet *packet; |
139 | struct sk_buff *skb; | ||
140 | 140 | ||
141 | data_size = ((data_size + 3) & ~3); | 141 | data_size = ((data_size + 3) & ~3); |
142 | 142 | ||
143 | skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC); | 143 | packet = kzalloc(sizeof(*packet) + data_size, GFP_ATOMIC); |
144 | if (skb == NULL) | 144 | if (!packet) |
145 | return NULL; | 145 | return NULL; |
146 | 146 | ||
147 | memset(skb->data, 0, data_size + sizeof(*packet)); | ||
148 | |||
149 | packet = (struct hpsb_packet *)skb->data; | ||
150 | packet->skb = skb; | ||
151 | |||
152 | packet->header = packet->embedded_header; | ||
153 | packet->state = hpsb_unused; | 147 | packet->state = hpsb_unused; |
154 | packet->generation = -1; | 148 | packet->generation = -1; |
155 | INIT_LIST_HEAD(&packet->driver_list); | 149 | INIT_LIST_HEAD(&packet->driver_list); |
150 | INIT_LIST_HEAD(&packet->queue); | ||
156 | atomic_set(&packet->refcnt, 1); | 151 | atomic_set(&packet->refcnt, 1); |
157 | 152 | ||
158 | if (data_size) { | 153 | if (data_size) { |
159 | packet->data = (quadlet_t *)(skb->data + sizeof(*packet)); | 154 | packet->data = packet->embedded_data; |
160 | packet->data_size = data_size; | 155 | packet->allocated_data_size = data_size; |
161 | } | 156 | } |
162 | |||
163 | return packet; | 157 | return packet; |
164 | } | 158 | } |
165 | 159 | ||
166 | |||
167 | /** | 160 | /** |
168 | * hpsb_free_packet - free packet and data associated with it | 161 | * hpsb_free_packet - free packet and data associated with it |
169 | * @packet: packet to free (is NULL safe) | 162 | * @packet: packet to free (is NULL safe) |
170 | * | 163 | * |
171 | * This function will free packet->data and finally the packet itself. | 164 | * Frees @packet->data only if it was allocated through hpsb_alloc_packet(). |
172 | */ | 165 | */ |
173 | void hpsb_free_packet(struct hpsb_packet *packet) | 166 | void hpsb_free_packet(struct hpsb_packet *packet) |
174 | { | 167 | { |
175 | if (packet && atomic_dec_and_test(&packet->refcnt)) { | 168 | if (packet && atomic_dec_and_test(&packet->refcnt)) { |
176 | BUG_ON(!list_empty(&packet->driver_list)); | 169 | BUG_ON(!list_empty(&packet->driver_list) || |
177 | kfree_skb(packet->skb); | 170 | !list_empty(&packet->queue)); |
171 | kfree(packet); | ||
178 | } | 172 | } |
179 | } | 173 | } |
180 | 174 | ||
181 | |||
182 | /** | 175 | /** |
183 | * hpsb_reset_bus - initiate bus reset on the given host | 176 | * hpsb_reset_bus - initiate bus reset on the given host |
184 | * @host: host controller whose bus to reset | 177 | * @host: host controller whose bus to reset |
@@ -494,6 +487,8 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) | |||
494 | highlevel_host_reset(host); | 487 | highlevel_host_reset(host); |
495 | } | 488 | } |
496 | 489 | ||
490 | static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED; | ||
491 | |||
497 | /** | 492 | /** |
498 | * hpsb_packet_sent - notify core of sending a packet | 493 | * hpsb_packet_sent - notify core of sending a packet |
499 | * | 494 | * |
@@ -509,24 +504,24 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, | |||
509 | { | 504 | { |
510 | unsigned long flags; | 505 | unsigned long flags; |
511 | 506 | ||
512 | spin_lock_irqsave(&host->pending_packet_queue.lock, flags); | 507 | spin_lock_irqsave(&pending_packets_lock, flags); |
513 | 508 | ||
514 | packet->ack_code = ackcode; | 509 | packet->ack_code = ackcode; |
515 | 510 | ||
516 | if (packet->no_waiter || packet->state == hpsb_complete) { | 511 | if (packet->no_waiter || packet->state == hpsb_complete) { |
517 | /* if packet->no_waiter, must not have a tlabel allocated */ | 512 | /* if packet->no_waiter, must not have a tlabel allocated */ |
518 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 513 | spin_unlock_irqrestore(&pending_packets_lock, flags); |
519 | hpsb_free_packet(packet); | 514 | hpsb_free_packet(packet); |
520 | return; | 515 | return; |
521 | } | 516 | } |
522 | 517 | ||
523 | atomic_dec(&packet->refcnt); /* drop HC's reference */ | 518 | atomic_dec(&packet->refcnt); /* drop HC's reference */ |
524 | /* here the packet must be on the host->pending_packet_queue */ | 519 | /* here the packet must be on the host->pending_packets queue */ |
525 | 520 | ||
526 | if (ackcode != ACK_PENDING || !packet->expect_response) { | 521 | if (ackcode != ACK_PENDING || !packet->expect_response) { |
527 | packet->state = hpsb_complete; | 522 | packet->state = hpsb_complete; |
528 | __skb_unlink(packet->skb, &host->pending_packet_queue); | 523 | list_del_init(&packet->queue); |
529 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 524 | spin_unlock_irqrestore(&pending_packets_lock, flags); |
530 | queue_packet_complete(packet); | 525 | queue_packet_complete(packet); |
531 | return; | 526 | return; |
532 | } | 527 | } |
@@ -534,7 +529,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, | |||
534 | packet->state = hpsb_pending; | 529 | packet->state = hpsb_pending; |
535 | packet->sendtime = jiffies; | 530 | packet->sendtime = jiffies; |
536 | 531 | ||
537 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 532 | spin_unlock_irqrestore(&pending_packets_lock, flags); |
538 | 533 | ||
539 | mod_timer(&host->timeout, jiffies + host->timeout_interval); | 534 | mod_timer(&host->timeout, jiffies + host->timeout_interval); |
540 | } | 535 | } |
@@ -609,12 +604,16 @@ int hpsb_send_packet(struct hpsb_packet *packet) | |||
609 | WARN_ON(packet->no_waiter && packet->expect_response); | 604 | WARN_ON(packet->no_waiter && packet->expect_response); |
610 | 605 | ||
611 | if (!packet->no_waiter || packet->expect_response) { | 606 | if (!packet->no_waiter || packet->expect_response) { |
607 | unsigned long flags; | ||
608 | |||
612 | atomic_inc(&packet->refcnt); | 609 | atomic_inc(&packet->refcnt); |
613 | /* Set the initial "sendtime" to 10 seconds from now, to | 610 | /* Set the initial "sendtime" to 10 seconds from now, to |
614 | prevent premature expiry. If a packet takes more than | 611 | prevent premature expiry. If a packet takes more than |
615 | 10 seconds to hit the wire, we have bigger problems :) */ | 612 | 10 seconds to hit the wire, we have bigger problems :) */ |
616 | packet->sendtime = jiffies + 10 * HZ; | 613 | packet->sendtime = jiffies + 10 * HZ; |
617 | skb_queue_tail(&host->pending_packet_queue, packet->skb); | 614 | spin_lock_irqsave(&pending_packets_lock, flags); |
615 | list_add_tail(&packet->queue, &host->pending_packets); | ||
616 | spin_unlock_irqrestore(&pending_packets_lock, flags); | ||
618 | } | 617 | } |
619 | 618 | ||
620 | if (packet->node_id == host->node_id) { | 619 | if (packet->node_id == host->node_id) { |
@@ -690,86 +689,97 @@ static void send_packet_nocare(struct hpsb_packet *packet) | |||
690 | } | 689 | } |
691 | } | 690 | } |
692 | 691 | ||
692 | static size_t packet_size_to_data_size(size_t packet_size, size_t header_size, | ||
693 | size_t buffer_size, int tcode) | ||
694 | { | ||
695 | size_t ret = packet_size <= header_size ? 0 : packet_size - header_size; | ||
696 | |||
697 | if (unlikely(ret > buffer_size)) | ||
698 | ret = buffer_size; | ||
699 | |||
700 | if (unlikely(ret + header_size != packet_size)) | ||
701 | HPSB_ERR("unexpected packet size %d (tcode %d), bug?", | ||
702 | packet_size, tcode); | ||
703 | return ret; | ||
704 | } | ||
693 | 705 | ||
694 | static void handle_packet_response(struct hpsb_host *host, int tcode, | 706 | static void handle_packet_response(struct hpsb_host *host, int tcode, |
695 | quadlet_t *data, size_t size) | 707 | quadlet_t *data, size_t size) |
696 | { | 708 | { |
697 | struct hpsb_packet *packet = NULL; | 709 | struct hpsb_packet *packet; |
698 | struct sk_buff *skb; | 710 | int tlabel = (data[0] >> 10) & 0x3f; |
699 | int tcode_match = 0; | 711 | size_t header_size; |
700 | int tlabel; | ||
701 | unsigned long flags; | 712 | unsigned long flags; |
702 | 713 | ||
703 | tlabel = (data[0] >> 10) & 0x3f; | 714 | spin_lock_irqsave(&pending_packets_lock, flags); |
704 | |||
705 | spin_lock_irqsave(&host->pending_packet_queue.lock, flags); | ||
706 | |||
707 | skb_queue_walk(&host->pending_packet_queue, skb) { | ||
708 | packet = (struct hpsb_packet *)skb->data; | ||
709 | if ((packet->tlabel == tlabel) | ||
710 | && (packet->node_id == (data[1] >> 16))){ | ||
711 | break; | ||
712 | } | ||
713 | 715 | ||
714 | packet = NULL; | 716 | list_for_each_entry(packet, &host->pending_packets, queue) |
715 | } | 717 | if (packet->tlabel == tlabel && |
718 | packet->node_id == (data[1] >> 16)) | ||
719 | goto found; | ||
716 | 720 | ||
717 | if (packet == NULL) { | 721 | spin_unlock_irqrestore(&pending_packets_lock, flags); |
718 | HPSB_DEBUG("unsolicited response packet received - no tlabel match"); | 722 | HPSB_DEBUG("unsolicited response packet received - %s", |
719 | dump_packet("contents", data, 16, -1); | 723 | "no tlabel match"); |
720 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 724 | dump_packet("contents", data, 16, -1); |
721 | return; | 725 | return; |
722 | } | ||
723 | 726 | ||
727 | found: | ||
724 | switch (packet->tcode) { | 728 | switch (packet->tcode) { |
725 | case TCODE_WRITEQ: | 729 | case TCODE_WRITEQ: |
726 | case TCODE_WRITEB: | 730 | case TCODE_WRITEB: |
727 | if (tcode != TCODE_WRITE_RESPONSE) | 731 | if (unlikely(tcode != TCODE_WRITE_RESPONSE)) |
728 | break; | 732 | break; |
729 | tcode_match = 1; | 733 | header_size = 12; |
730 | memcpy(packet->header, data, 12); | 734 | size = 0; |
731 | break; | 735 | goto dequeue; |
736 | |||
732 | case TCODE_READQ: | 737 | case TCODE_READQ: |
733 | if (tcode != TCODE_READQ_RESPONSE) | 738 | if (unlikely(tcode != TCODE_READQ_RESPONSE)) |
734 | break; | 739 | break; |
735 | tcode_match = 1; | 740 | header_size = 16; |
736 | memcpy(packet->header, data, 16); | 741 | size = 0; |
737 | break; | 742 | goto dequeue; |
743 | |||
738 | case TCODE_READB: | 744 | case TCODE_READB: |
739 | if (tcode != TCODE_READB_RESPONSE) | 745 | if (unlikely(tcode != TCODE_READB_RESPONSE)) |
740 | break; | 746 | break; |
741 | tcode_match = 1; | 747 | header_size = 16; |
742 | BUG_ON(packet->skb->len - sizeof(*packet) < size - 16); | 748 | size = packet_size_to_data_size(size, header_size, |
743 | memcpy(packet->header, data, 16); | 749 | packet->allocated_data_size, |
744 | memcpy(packet->data, data + 4, size - 16); | 750 | tcode); |
745 | break; | 751 | goto dequeue; |
752 | |||
746 | case TCODE_LOCK_REQUEST: | 753 | case TCODE_LOCK_REQUEST: |
747 | if (tcode != TCODE_LOCK_RESPONSE) | 754 | if (unlikely(tcode != TCODE_LOCK_RESPONSE)) |
748 | break; | 755 | break; |
749 | tcode_match = 1; | 756 | header_size = 16; |
750 | size = min((size - 16), (size_t)8); | 757 | size = packet_size_to_data_size(min(size, (size_t)(16 + 8)), |
751 | BUG_ON(packet->skb->len - sizeof(*packet) < size); | 758 | header_size, |
752 | memcpy(packet->header, data, 16); | 759 | packet->allocated_data_size, |
753 | memcpy(packet->data, data + 4, size); | 760 | tcode); |
754 | break; | 761 | goto dequeue; |
755 | } | 762 | } |
756 | 763 | ||
757 | if (!tcode_match) { | 764 | spin_unlock_irqrestore(&pending_packets_lock, flags); |
758 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 765 | HPSB_DEBUG("unsolicited response packet received - %s", |
759 | HPSB_INFO("unsolicited response packet received - tcode mismatch"); | 766 | "tcode mismatch"); |
760 | dump_packet("contents", data, 16, -1); | 767 | dump_packet("contents", data, 16, -1); |
761 | return; | 768 | return; |
762 | } | ||
763 | 769 | ||
764 | __skb_unlink(skb, &host->pending_packet_queue); | 770 | dequeue: |
771 | list_del_init(&packet->queue); | ||
772 | spin_unlock_irqrestore(&pending_packets_lock, flags); | ||
765 | 773 | ||
766 | if (packet->state == hpsb_queued) { | 774 | if (packet->state == hpsb_queued) { |
767 | packet->sendtime = jiffies; | 775 | packet->sendtime = jiffies; |
768 | packet->ack_code = ACK_PENDING; | 776 | packet->ack_code = ACK_PENDING; |
769 | } | 777 | } |
770 | |||
771 | packet->state = hpsb_complete; | 778 | packet->state = hpsb_complete; |
772 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 779 | |
780 | memcpy(packet->header, data, header_size); | ||
781 | if (size) | ||
782 | memcpy(packet->data, data + 4, size); | ||
773 | 783 | ||
774 | queue_packet_complete(packet); | 784 | queue_packet_complete(packet); |
775 | } | 785 | } |
@@ -783,6 +793,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host, | |||
783 | p = hpsb_alloc_packet(dsize); | 793 | p = hpsb_alloc_packet(dsize); |
784 | if (unlikely(p == NULL)) { | 794 | if (unlikely(p == NULL)) { |
785 | /* FIXME - send data_error response */ | 795 | /* FIXME - send data_error response */ |
796 | HPSB_ERR("out of memory, cannot send response packet"); | ||
786 | return NULL; | 797 | return NULL; |
787 | } | 798 | } |
788 | 799 | ||
@@ -832,7 +843,6 @@ static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode, | |||
832 | static void fill_async_write_resp(struct hpsb_packet *packet, int rcode) | 843 | static void fill_async_write_resp(struct hpsb_packet *packet, int rcode) |
833 | { | 844 | { |
834 | PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE); | 845 | PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE); |
835 | packet->header[2] = 0; | ||
836 | packet->header_size = 12; | 846 | packet->header_size = 12; |
837 | packet->data_size = 0; | 847 | packet->data_size = 0; |
838 | } | 848 | } |
@@ -1002,8 +1012,8 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, | |||
1002 | { | 1012 | { |
1003 | int tcode; | 1013 | int tcode; |
1004 | 1014 | ||
1005 | if (host->in_bus_reset) { | 1015 | if (unlikely(host->in_bus_reset)) { |
1006 | HPSB_INFO("received packet during reset; ignoring"); | 1016 | HPSB_DEBUG("received packet during reset; ignoring"); |
1007 | return; | 1017 | return; |
1008 | } | 1018 | } |
1009 | 1019 | ||
@@ -1037,23 +1047,27 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, | |||
1037 | break; | 1047 | break; |
1038 | 1048 | ||
1039 | default: | 1049 | default: |
1040 | HPSB_NOTICE("received packet with bogus transaction code %d", | 1050 | HPSB_DEBUG("received packet with bogus transaction code %d", |
1041 | tcode); | 1051 | tcode); |
1042 | break; | 1052 | break; |
1043 | } | 1053 | } |
1044 | } | 1054 | } |
1045 | 1055 | ||
1046 | |||
1047 | static void abort_requests(struct hpsb_host *host) | 1056 | static void abort_requests(struct hpsb_host *host) |
1048 | { | 1057 | { |
1049 | struct hpsb_packet *packet; | 1058 | struct hpsb_packet *packet, *p; |
1050 | struct sk_buff *skb; | 1059 | struct list_head tmp; |
1060 | unsigned long flags; | ||
1051 | 1061 | ||
1052 | host->driver->devctl(host, CANCEL_REQUESTS, 0); | 1062 | host->driver->devctl(host, CANCEL_REQUESTS, 0); |
1053 | 1063 | ||
1054 | while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) { | 1064 | INIT_LIST_HEAD(&tmp); |
1055 | packet = (struct hpsb_packet *)skb->data; | 1065 | spin_lock_irqsave(&pending_packets_lock, flags); |
1066 | list_splice_init(&host->pending_packets, &tmp); | ||
1067 | spin_unlock_irqrestore(&pending_packets_lock, flags); | ||
1056 | 1068 | ||
1069 | list_for_each_entry_safe(packet, p, &tmp, queue) { | ||
1070 | list_del_init(&packet->queue); | ||
1057 | packet->state = hpsb_complete; | 1071 | packet->state = hpsb_complete; |
1058 | packet->ack_code = ACKX_ABORTED; | 1072 | packet->ack_code = ACKX_ABORTED; |
1059 | queue_packet_complete(packet); | 1073 | queue_packet_complete(packet); |
@@ -1063,87 +1077,90 @@ static void abort_requests(struct hpsb_host *host) | |||
1063 | void abort_timedouts(unsigned long __opaque) | 1077 | void abort_timedouts(unsigned long __opaque) |
1064 | { | 1078 | { |
1065 | struct hpsb_host *host = (struct hpsb_host *)__opaque; | 1079 | struct hpsb_host *host = (struct hpsb_host *)__opaque; |
1066 | unsigned long flags; | 1080 | struct hpsb_packet *packet, *p; |
1067 | struct hpsb_packet *packet; | 1081 | struct list_head tmp; |
1068 | struct sk_buff *skb; | 1082 | unsigned long flags, expire, j; |
1069 | unsigned long expire; | ||
1070 | 1083 | ||
1071 | spin_lock_irqsave(&host->csr.lock, flags); | 1084 | spin_lock_irqsave(&host->csr.lock, flags); |
1072 | expire = host->csr.expire; | 1085 | expire = host->csr.expire; |
1073 | spin_unlock_irqrestore(&host->csr.lock, flags); | 1086 | spin_unlock_irqrestore(&host->csr.lock, flags); |
1074 | 1087 | ||
1075 | /* Hold the lock around this, since we aren't dequeuing all | 1088 | j = jiffies; |
1076 | * packets, just ones we need. */ | 1089 | INIT_LIST_HEAD(&tmp); |
1077 | spin_lock_irqsave(&host->pending_packet_queue.lock, flags); | 1090 | spin_lock_irqsave(&pending_packets_lock, flags); |
1078 | |||
1079 | while (!skb_queue_empty(&host->pending_packet_queue)) { | ||
1080 | skb = skb_peek(&host->pending_packet_queue); | ||
1081 | 1091 | ||
1082 | packet = (struct hpsb_packet *)skb->data; | 1092 | list_for_each_entry_safe(packet, p, &host->pending_packets, queue) { |
1083 | 1093 | if (time_before(packet->sendtime + expire, j)) | |
1084 | if (time_before(packet->sendtime + expire, jiffies)) { | 1094 | list_move_tail(&packet->queue, &tmp); |
1085 | __skb_unlink(skb, &host->pending_packet_queue); | 1095 | else |
1086 | packet->state = hpsb_complete; | ||
1087 | packet->ack_code = ACKX_TIMEOUT; | ||
1088 | queue_packet_complete(packet); | ||
1089 | } else { | ||
1090 | /* Since packets are added to the tail, the oldest | 1096 | /* Since packets are added to the tail, the oldest |
1091 | * ones are first, always. When we get to one that | 1097 | * ones are first, always. When we get to one that |
1092 | * isn't timed out, the rest aren't either. */ | 1098 | * isn't timed out, the rest aren't either. */ |
1093 | break; | 1099 | break; |
1094 | } | ||
1095 | } | 1100 | } |
1101 | if (!list_empty(&host->pending_packets)) | ||
1102 | mod_timer(&host->timeout, j + host->timeout_interval); | ||
1096 | 1103 | ||
1097 | if (!skb_queue_empty(&host->pending_packet_queue)) | 1104 | spin_unlock_irqrestore(&pending_packets_lock, flags); |
1098 | mod_timer(&host->timeout, jiffies + host->timeout_interval); | ||
1099 | 1105 | ||
1100 | spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); | 1106 | list_for_each_entry_safe(packet, p, &tmp, queue) { |
1107 | list_del_init(&packet->queue); | ||
1108 | packet->state = hpsb_complete; | ||
1109 | packet->ack_code = ACKX_TIMEOUT; | ||
1110 | queue_packet_complete(packet); | ||
1111 | } | ||
1101 | } | 1112 | } |
1102 | 1113 | ||
1103 | |||
1104 | /* Kernel thread and vars, which handles packets that are completed. Only | ||
1105 | * packets that have a "complete" function are sent here. This way, the | ||
1106 | * completion is run out of kernel context, and doesn't block the rest of | ||
1107 | * the stack. */ | ||
1108 | static struct task_struct *khpsbpkt_thread; | 1114 | static struct task_struct *khpsbpkt_thread; |
1109 | static struct sk_buff_head hpsbpkt_queue; | 1115 | static LIST_HEAD(hpsbpkt_queue); |
1110 | 1116 | ||
1111 | static void queue_packet_complete(struct hpsb_packet *packet) | 1117 | static void queue_packet_complete(struct hpsb_packet *packet) |
1112 | { | 1118 | { |
1119 | unsigned long flags; | ||
1120 | |||
1113 | if (packet->no_waiter) { | 1121 | if (packet->no_waiter) { |
1114 | hpsb_free_packet(packet); | 1122 | hpsb_free_packet(packet); |
1115 | return; | 1123 | return; |
1116 | } | 1124 | } |
1117 | if (packet->complete_routine != NULL) { | 1125 | if (packet->complete_routine != NULL) { |
1118 | skb_queue_tail(&hpsbpkt_queue, packet->skb); | 1126 | spin_lock_irqsave(&pending_packets_lock, flags); |
1127 | list_add_tail(&packet->queue, &hpsbpkt_queue); | ||
1128 | spin_unlock_irqrestore(&pending_packets_lock, flags); | ||
1119 | wake_up_process(khpsbpkt_thread); | 1129 | wake_up_process(khpsbpkt_thread); |
1120 | } | 1130 | } |
1121 | return; | 1131 | return; |
1122 | } | 1132 | } |
1123 | 1133 | ||
1134 | /* | ||
1135 | * Kernel thread which handles packets that are completed. This way the | ||
1136 | * packet's "complete" function is asynchronously run in process context. | ||
1137 | * Only packets which have a "complete" function may be sent here. | ||
1138 | */ | ||
1124 | static int hpsbpkt_thread(void *__hi) | 1139 | static int hpsbpkt_thread(void *__hi) |
1125 | { | 1140 | { |
1126 | struct sk_buff *skb; | 1141 | struct hpsb_packet *packet, *p; |
1127 | struct hpsb_packet *packet; | 1142 | struct list_head tmp; |
1128 | void (*complete_routine)(void*); | 1143 | int may_schedule; |
1129 | void *complete_data; | ||
1130 | 1144 | ||
1131 | current->flags |= PF_NOFREEZE; | 1145 | current->flags |= PF_NOFREEZE; |
1132 | 1146 | ||
1133 | while (!kthread_should_stop()) { | 1147 | while (!kthread_should_stop()) { |
1134 | while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) { | ||
1135 | packet = (struct hpsb_packet *)skb->data; | ||
1136 | |||
1137 | complete_routine = packet->complete_routine; | ||
1138 | complete_data = packet->complete_data; | ||
1139 | 1148 | ||
1140 | packet->complete_routine = packet->complete_data = NULL; | 1149 | INIT_LIST_HEAD(&tmp); |
1150 | spin_lock_irq(&pending_packets_lock); | ||
1151 | list_splice_init(&hpsbpkt_queue, &tmp); | ||
1152 | spin_unlock_irq(&pending_packets_lock); | ||
1141 | 1153 | ||
1142 | complete_routine(complete_data); | 1154 | list_for_each_entry_safe(packet, p, &tmp, queue) { |
1155 | list_del_init(&packet->queue); | ||
1156 | packet->complete_routine(packet->complete_data); | ||
1143 | } | 1157 | } |
1144 | 1158 | ||
1145 | set_current_state(TASK_INTERRUPTIBLE); | 1159 | set_current_state(TASK_INTERRUPTIBLE); |
1146 | if (!skb_peek(&hpsbpkt_queue)) | 1160 | spin_lock_irq(&pending_packets_lock); |
1161 | may_schedule = list_empty(&hpsbpkt_queue); | ||
1162 | spin_unlock_irq(&pending_packets_lock); | ||
1163 | if (may_schedule) | ||
1147 | schedule(); | 1164 | schedule(); |
1148 | __set_current_state(TASK_RUNNING); | 1165 | __set_current_state(TASK_RUNNING); |
1149 | } | 1166 | } |
@@ -1154,8 +1171,6 @@ static int __init ieee1394_init(void) | |||
1154 | { | 1171 | { |
1155 | int i, ret; | 1172 | int i, ret; |
1156 | 1173 | ||
1157 | skb_queue_head_init(&hpsbpkt_queue); | ||
1158 | |||
1159 | /* non-fatal error */ | 1174 | /* non-fatal error */ |
1160 | if (hpsb_init_config_roms()) { | 1175 | if (hpsb_init_config_roms()) { |
1161 | HPSB_ERR("Failed to initialize some config rom entries.\n"); | 1176 | HPSB_ERR("Failed to initialize some config rom entries.\n"); |
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h index 11b46d2db577..ad526523d0ef 100644 --- a/drivers/ieee1394/ieee1394_core.h +++ b/drivers/ieee1394/ieee1394_core.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
6 | #include <linux/list.h> | 6 | #include <linux/list.h> |
7 | #include <linux/skbuff.h> | ||
8 | #include <linux/types.h> | 7 | #include <linux/types.h> |
9 | #include <asm/atomic.h> | 8 | #include <asm/atomic.h> |
10 | 9 | ||
@@ -13,7 +12,7 @@ | |||
13 | 12 | ||
14 | struct hpsb_packet { | 13 | struct hpsb_packet { |
15 | /* This struct is basically read-only for hosts with the exception of | 14 | /* This struct is basically read-only for hosts with the exception of |
16 | * the data buffer contents and xnext - see below. */ | 15 | * the data buffer contents and driver_list. */ |
17 | 16 | ||
18 | /* This can be used for host driver internal linking. | 17 | /* This can be used for host driver internal linking. |
19 | * | 18 | * |
@@ -49,35 +48,27 @@ struct hpsb_packet { | |||
49 | /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ | 48 | /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ |
50 | unsigned speed_code:2; | 49 | unsigned speed_code:2; |
51 | 50 | ||
52 | /* | ||
53 | * *header and *data are guaranteed to be 32-bit DMAable and may be | ||
54 | * overwritten to allow in-place byte swapping. Neither of these is | ||
55 | * CRCed (the sizes also don't include CRC), but contain space for at | ||
56 | * least one additional quadlet to allow in-place CRCing. The memory is | ||
57 | * also guaranteed to be DMA mappable. | ||
58 | */ | ||
59 | quadlet_t *header; | ||
60 | quadlet_t *data; | ||
61 | size_t header_size; | ||
62 | size_t data_size; | ||
63 | |||
64 | struct hpsb_host *host; | 51 | struct hpsb_host *host; |
65 | unsigned int generation; | 52 | unsigned int generation; |
66 | 53 | ||
67 | atomic_t refcnt; | 54 | atomic_t refcnt; |
55 | struct list_head queue; | ||
68 | 56 | ||
69 | /* Function (and possible data to pass to it) to call when this | 57 | /* Function (and possible data to pass to it) to call when this |
70 | * packet is completed. */ | 58 | * packet is completed. */ |
71 | void (*complete_routine)(void *); | 59 | void (*complete_routine)(void *); |
72 | void *complete_data; | 60 | void *complete_data; |
73 | 61 | ||
74 | /* XXX This is just a hack at the moment */ | ||
75 | struct sk_buff *skb; | ||
76 | |||
77 | /* Store jiffies for implementing bus timeouts. */ | 62 | /* Store jiffies for implementing bus timeouts. */ |
78 | unsigned long sendtime; | 63 | unsigned long sendtime; |
79 | 64 | ||
80 | quadlet_t embedded_header[5]; | 65 | /* Sizes are in bytes. *data can be DMA-mapped. */ |
66 | size_t allocated_data_size; /* as allocated */ | ||
67 | size_t data_size; /* as filled in */ | ||
68 | size_t header_size; /* as filled in, not counting the CRC */ | ||
69 | quadlet_t *data; | ||
70 | quadlet_t header[5]; | ||
71 | quadlet_t embedded_data[0]; /* keep as last member */ | ||
81 | }; | 72 | }; |
82 | 73 | ||
83 | void hpsb_set_packet_complete_task(struct hpsb_packet *packet, | 74 | void hpsb_set_packet_complete_task(struct hpsb_packet *packet, |
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index bb897a37d9f7..c6aefd9ad0e8 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c | |||
@@ -938,7 +938,8 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) | |||
938 | int header_length = req->req.misc & 0xffff; | 938 | int header_length = req->req.misc & 0xffff; |
939 | int expect_response = req->req.misc >> 16; | 939 | int expect_response = req->req.misc >> 16; |
940 | 940 | ||
941 | if ((header_length > req->req.length) || (header_length < 12)) { | 941 | if (header_length > req->req.length || header_length < 12 || |
942 | header_length > FIELD_SIZEOF(struct hpsb_packet, header)) { | ||
942 | req->req.error = RAW1394_ERROR_INVALID_ARG; | 943 | req->req.error = RAW1394_ERROR_INVALID_ARG; |
943 | req->req.length = 0; | 944 | req->req.length = 0; |
944 | queue_complete_req(req); | 945 | queue_complete_req(req); |