aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-21 15:25:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-21 15:25:07 -0400
commitdcb4a1f0e0367efb813907e5083998bbb98b0456 (patch)
tree80b1edb72dd20ed20917e6c835bd49a0c0b52df2
parentc21fd1a8c68ce3f49b00caf10337169262cfb8ad (diff)
parent105e53f863c04e1d9e5bb34bf753c9fdbce6a60c (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: firewire: sbp2: parallelize login, reconnect, logout firewire: sbp2: octlet AT payloads can be stack-allocated firewire: sbp2: omit Scsi_Host lock from queuecommand firewire: core: use non-reentrant workqueue with rescuer firewire: optimize iso queueing by setting wake only after the last packet firewire: octlet AT payloads can be stack-allocated firewire: ohci: optimize find_branch_descriptor() firewire: ohci: avoid separate DMA mapping for small AT payloads firewire: ohci: do not start DMA contexts before link is enabled
-rw-r--r--drivers/firewire/core-card.c27
-rw-r--r--drivers/firewire/core-cdev.c7
-rw-r--r--drivers/firewire/core-device.c31
-rw-r--r--drivers/firewire/core-iso.c27
-rw-r--r--drivers/firewire/core-transaction.c19
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firewire/ohci.c64
-rw-r--r--drivers/firewire/sbp2.c37
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c15
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c1
-rw-r--r--include/linux/firewire.h6
-rw-r--r--sound/firewire/amdtp.c1
-rw-r--r--sound/firewire/cmp.c3
-rw-r--r--sound/firewire/iso-resources.c12
-rw-r--r--sound/firewire/iso-resources.h1
16 files changed, 140 insertions, 117 deletions
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 3c44fbc81acb..29d2423fae6d 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -228,8 +228,8 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
228 228
229 /* Use an arbitrary short delay to combine multiple reset requests. */ 229 /* Use an arbitrary short delay to combine multiple reset requests. */
230 fw_card_get(card); 230 fw_card_get(card);
231 if (!schedule_delayed_work(&card->br_work, 231 if (!queue_delayed_work(fw_workqueue, &card->br_work,
232 delayed ? DIV_ROUND_UP(HZ, 100) : 0)) 232 delayed ? DIV_ROUND_UP(HZ, 100) : 0))
233 fw_card_put(card); 233 fw_card_put(card);
234} 234}
235EXPORT_SYMBOL(fw_schedule_bus_reset); 235EXPORT_SYMBOL(fw_schedule_bus_reset);
@@ -241,7 +241,7 @@ static void br_work(struct work_struct *work)
241 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ 241 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
242 if (card->reset_jiffies != 0 && 242 if (card->reset_jiffies != 0 &&
243 time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { 243 time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
244 if (!schedule_delayed_work(&card->br_work, 2 * HZ)) 244 if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
245 fw_card_put(card); 245 fw_card_put(card);
246 return; 246 return;
247 } 247 }
@@ -258,8 +258,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
258 258
259 if (!card->broadcast_channel_allocated) { 259 if (!card->broadcast_channel_allocated) {
260 fw_iso_resource_manage(card, generation, 1ULL << 31, 260 fw_iso_resource_manage(card, generation, 1ULL << 31,
261 &channel, &bandwidth, true, 261 &channel, &bandwidth, true);
262 card->bm_transaction_data);
263 if (channel != 31) { 262 if (channel != 31) {
264 fw_notify("failed to allocate broadcast channel\n"); 263 fw_notify("failed to allocate broadcast channel\n");
265 return; 264 return;
@@ -294,6 +293,7 @@ static void bm_work(struct work_struct *work)
294 bool root_device_is_cmc; 293 bool root_device_is_cmc;
295 bool irm_is_1394_1995_only; 294 bool irm_is_1394_1995_only;
296 bool keep_this_irm; 295 bool keep_this_irm;
296 __be32 transaction_data[2];
297 297
298 spin_lock_irq(&card->lock); 298 spin_lock_irq(&card->lock);
299 299
@@ -355,21 +355,21 @@ static void bm_work(struct work_struct *work)
355 goto pick_me; 355 goto pick_me;
356 } 356 }
357 357
358 card->bm_transaction_data[0] = cpu_to_be32(0x3f); 358 transaction_data[0] = cpu_to_be32(0x3f);
359 card->bm_transaction_data[1] = cpu_to_be32(local_id); 359 transaction_data[1] = cpu_to_be32(local_id);
360 360
361 spin_unlock_irq(&card->lock); 361 spin_unlock_irq(&card->lock);
362 362
363 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 363 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
364 irm_id, generation, SCODE_100, 364 irm_id, generation, SCODE_100,
365 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 365 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
366 card->bm_transaction_data, 8); 366 transaction_data, 8);
367 367
368 if (rcode == RCODE_GENERATION) 368 if (rcode == RCODE_GENERATION)
369 /* Another bus reset, BM work has been rescheduled. */ 369 /* Another bus reset, BM work has been rescheduled. */
370 goto out; 370 goto out;
371 371
372 bm_id = be32_to_cpu(card->bm_transaction_data[0]); 372 bm_id = be32_to_cpu(transaction_data[0]);
373 373
374 spin_lock_irq(&card->lock); 374 spin_lock_irq(&card->lock);
375 if (rcode == RCODE_COMPLETE && generation == card->generation) 375 if (rcode == RCODE_COMPLETE && generation == card->generation)
@@ -490,11 +490,11 @@ static void bm_work(struct work_struct *work)
490 /* 490 /*
491 * Make sure that the cycle master sends cycle start packets. 491 * Make sure that the cycle master sends cycle start packets.
492 */ 492 */
493 card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR); 493 transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
494 rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 494 rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
495 root_id, generation, SCODE_100, 495 root_id, generation, SCODE_100,
496 CSR_REGISTER_BASE + CSR_STATE_SET, 496 CSR_REGISTER_BASE + CSR_STATE_SET,
497 card->bm_transaction_data, 4); 497 transaction_data, 4);
498 if (rcode == RCODE_GENERATION) 498 if (rcode == RCODE_GENERATION)
499 goto out; 499 goto out;
500 } 500 }
@@ -630,6 +630,10 @@ static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
630 return -ENODEV; 630 return -ENODEV;
631} 631}
632 632
633static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
634{
635}
636
633static const struct fw_card_driver dummy_driver_template = { 637static const struct fw_card_driver dummy_driver_template = {
634 .read_phy_reg = dummy_read_phy_reg, 638 .read_phy_reg = dummy_read_phy_reg,
635 .update_phy_reg = dummy_update_phy_reg, 639 .update_phy_reg = dummy_update_phy_reg,
@@ -641,6 +645,7 @@ static const struct fw_card_driver dummy_driver_template = {
641 .start_iso = dummy_start_iso, 645 .start_iso = dummy_start_iso,
642 .set_iso_channels = dummy_set_iso_channels, 646 .set_iso_channels = dummy_set_iso_channels,
643 .queue_iso = dummy_queue_iso, 647 .queue_iso = dummy_queue_iso,
648 .flush_queue_iso = dummy_flush_queue_iso,
644}; 649};
645 650
646void fw_card_release(struct kref *kref) 651void fw_card_release(struct kref *kref)
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 62ac111af243..b1c11775839c 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -141,7 +141,6 @@ struct iso_resource {
141 int generation; 141 int generation;
142 u64 channels; 142 u64 channels;
143 s32 bandwidth; 143 s32 bandwidth;
144 __be32 transaction_data[2];
145 struct iso_resource_event *e_alloc, *e_dealloc; 144 struct iso_resource_event *e_alloc, *e_dealloc;
146}; 145};
147 146
@@ -150,7 +149,7 @@ static void release_iso_resource(struct client *, struct client_resource *);
150static void schedule_iso_resource(struct iso_resource *r, unsigned long delay) 149static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
151{ 150{
152 client_get(r->client); 151 client_get(r->client);
153 if (!schedule_delayed_work(&r->work, delay)) 152 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
154 client_put(r->client); 153 client_put(r->client);
155} 154}
156 155
@@ -1108,6 +1107,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1108 payload += u.packet.payload_length; 1107 payload += u.packet.payload_length;
1109 count++; 1108 count++;
1110 } 1109 }
1110 fw_iso_context_queue_flush(ctx);
1111 1111
1112 a->size -= uptr_to_u64(p) - a->packets; 1112 a->size -= uptr_to_u64(p) - a->packets;
1113 a->packets = uptr_to_u64(p); 1113 a->packets = uptr_to_u64(p);
@@ -1229,8 +1229,7 @@ static void iso_resource_work(struct work_struct *work)
1229 r->channels, &channel, &bandwidth, 1229 r->channels, &channel, &bandwidth,
1230 todo == ISO_RES_ALLOC || 1230 todo == ISO_RES_ALLOC ||
1231 todo == ISO_RES_REALLOC || 1231 todo == ISO_RES_REALLOC ||
1232 todo == ISO_RES_ALLOC_ONCE, 1232 todo == ISO_RES_ALLOC_ONCE);
1233 r->transaction_data);
1234 /* 1233 /*
1235 * Is this generation outdated already? As long as this resource sticks 1234 * Is this generation outdated already? As long as this resource sticks
1236 * in the idr, it will be scheduled again for a newer generation or at 1235 * in the idr, it will be scheduled again for a newer generation or at
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9a262439e3a7..95a471401892 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -725,6 +725,15 @@ struct fw_device *fw_device_get_by_devt(dev_t devt)
725 return device; 725 return device;
726} 726}
727 727
728struct workqueue_struct *fw_workqueue;
729EXPORT_SYMBOL(fw_workqueue);
730
731static void fw_schedule_device_work(struct fw_device *device,
732 unsigned long delay)
733{
734 queue_delayed_work(fw_workqueue, &device->work, delay);
735}
736
728/* 737/*
729 * These defines control the retry behavior for reading the config 738 * These defines control the retry behavior for reading the config
730 * rom. It shouldn't be necessary to tweak these; if the device 739 * rom. It shouldn't be necessary to tweak these; if the device
@@ -750,7 +759,7 @@ static void fw_device_shutdown(struct work_struct *work)
750 if (time_before64(get_jiffies_64(), 759 if (time_before64(get_jiffies_64(),
751 device->card->reset_jiffies + SHUTDOWN_DELAY) 760 device->card->reset_jiffies + SHUTDOWN_DELAY)
752 && !list_empty(&device->card->link)) { 761 && !list_empty(&device->card->link)) {
753 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 762 fw_schedule_device_work(device, SHUTDOWN_DELAY);
754 return; 763 return;
755 } 764 }
756 765
@@ -862,7 +871,7 @@ static int lookup_existing_device(struct device *dev, void *data)
862 fw_notify("rediscovered device %s\n", dev_name(dev)); 871 fw_notify("rediscovered device %s\n", dev_name(dev));
863 872
864 PREPARE_DELAYED_WORK(&old->work, fw_device_update); 873 PREPARE_DELAYED_WORK(&old->work, fw_device_update);
865 schedule_delayed_work(&old->work, 0); 874 fw_schedule_device_work(old, 0);
866 875
867 if (current_node == card->root_node) 876 if (current_node == card->root_node)
868 fw_schedule_bm_work(card, 0); 877 fw_schedule_bm_work(card, 0);
@@ -953,7 +962,7 @@ static void fw_device_init(struct work_struct *work)
953 if (device->config_rom_retries < MAX_RETRIES && 962 if (device->config_rom_retries < MAX_RETRIES &&
954 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 963 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
955 device->config_rom_retries++; 964 device->config_rom_retries++;
956 schedule_delayed_work(&device->work, RETRY_DELAY); 965 fw_schedule_device_work(device, RETRY_DELAY);
957 } else { 966 } else {
958 if (device->node->link_on) 967 if (device->node->link_on)
959 fw_notify("giving up on config rom for node id %x\n", 968 fw_notify("giving up on config rom for node id %x\n",
@@ -1019,7 +1028,7 @@ static void fw_device_init(struct work_struct *work)
1019 FW_DEVICE_INITIALIZING, 1028 FW_DEVICE_INITIALIZING,
1020 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 1029 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
1021 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1030 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1022 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 1031 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1023 } else { 1032 } else {
1024 if (device->config_rom_retries) 1033 if (device->config_rom_retries)
1025 fw_notify("created device %s: GUID %08x%08x, S%d00, " 1034 fw_notify("created device %s: GUID %08x%08x, S%d00, "
@@ -1098,7 +1107,7 @@ static void fw_device_refresh(struct work_struct *work)
1098 if (device->config_rom_retries < MAX_RETRIES / 2 && 1107 if (device->config_rom_retries < MAX_RETRIES / 2 &&
1099 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1108 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1100 device->config_rom_retries++; 1109 device->config_rom_retries++;
1101 schedule_delayed_work(&device->work, RETRY_DELAY / 2); 1110 fw_schedule_device_work(device, RETRY_DELAY / 2);
1102 1111
1103 return; 1112 return;
1104 } 1113 }
@@ -1131,7 +1140,7 @@ static void fw_device_refresh(struct work_struct *work)
1131 if (device->config_rom_retries < MAX_RETRIES && 1140 if (device->config_rom_retries < MAX_RETRIES &&
1132 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1141 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1133 device->config_rom_retries++; 1142 device->config_rom_retries++;
1134 schedule_delayed_work(&device->work, RETRY_DELAY); 1143 fw_schedule_device_work(device, RETRY_DELAY);
1135 1144
1136 return; 1145 return;
1137 } 1146 }
@@ -1158,7 +1167,7 @@ static void fw_device_refresh(struct work_struct *work)
1158 gone: 1167 gone:
1159 atomic_set(&device->state, FW_DEVICE_GONE); 1168 atomic_set(&device->state, FW_DEVICE_GONE);
1160 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1169 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1161 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 1170 fw_schedule_device_work(device, SHUTDOWN_DELAY);
1162 out: 1171 out:
1163 if (node_id == card->root_node->node_id) 1172 if (node_id == card->root_node->node_id)
1164 fw_schedule_bm_work(card, 0); 1173 fw_schedule_bm_work(card, 0);
@@ -1214,7 +1223,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1214 * first config rom scan half a second after bus reset. 1223 * first config rom scan half a second after bus reset.
1215 */ 1224 */
1216 INIT_DELAYED_WORK(&device->work, fw_device_init); 1225 INIT_DELAYED_WORK(&device->work, fw_device_init);
1217 schedule_delayed_work(&device->work, INITIAL_DELAY); 1226 fw_schedule_device_work(device, INITIAL_DELAY);
1218 break; 1227 break;
1219 1228
1220 case FW_NODE_INITIATED_RESET: 1229 case FW_NODE_INITIATED_RESET:
@@ -1230,7 +1239,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1230 FW_DEVICE_RUNNING, 1239 FW_DEVICE_RUNNING,
1231 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1240 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1232 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1241 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
1233 schedule_delayed_work(&device->work, 1242 fw_schedule_device_work(device,
1234 device->is_local ? 0 : INITIAL_DELAY); 1243 device->is_local ? 0 : INITIAL_DELAY);
1235 } 1244 }
1236 break; 1245 break;
@@ -1245,7 +1254,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1245 device->generation = card->generation; 1254 device->generation = card->generation;
1246 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { 1255 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
1247 PREPARE_DELAYED_WORK(&device->work, fw_device_update); 1256 PREPARE_DELAYED_WORK(&device->work, fw_device_update);
1248 schedule_delayed_work(&device->work, 0); 1257 fw_schedule_device_work(device, 0);
1249 } 1258 }
1250 break; 1259 break;
1251 1260
@@ -1270,7 +1279,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1270 if (atomic_xchg(&device->state, 1279 if (atomic_xchg(&device->state,
1271 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { 1280 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1272 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1281 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1273 schedule_delayed_work(&device->work, 1282 fw_schedule_device_work(device,
1274 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); 1283 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1275 } 1284 }
1276 break; 1285 break;
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 481056df9268..57c3973093ad 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -185,6 +185,12 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
185} 185}
186EXPORT_SYMBOL(fw_iso_context_queue); 186EXPORT_SYMBOL(fw_iso_context_queue);
187 187
188void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
189{
190 ctx->card->driver->flush_queue_iso(ctx);
191}
192EXPORT_SYMBOL(fw_iso_context_queue_flush);
193
188int fw_iso_context_stop(struct fw_iso_context *ctx) 194int fw_iso_context_stop(struct fw_iso_context *ctx)
189{ 195{
190 return ctx->card->driver->stop_iso(ctx); 196 return ctx->card->driver->stop_iso(ctx);
@@ -196,9 +202,10 @@ EXPORT_SYMBOL(fw_iso_context_stop);
196 */ 202 */
197 203
198static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, 204static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
199 int bandwidth, bool allocate, __be32 data[2]) 205 int bandwidth, bool allocate)
200{ 206{
201 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; 207 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
208 __be32 data[2];
202 209
203 /* 210 /*
204 * On a 1394a IRM with low contention, try < 1 is enough. 211 * On a 1394a IRM with low contention, try < 1 is enough.
@@ -233,9 +240,10 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
233} 240}
234 241
235static int manage_channel(struct fw_card *card, int irm_id, int generation, 242static int manage_channel(struct fw_card *card, int irm_id, int generation,
236 u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) 243 u32 channels_mask, u64 offset, bool allocate)
237{ 244{
238 __be32 bit, all, old; 245 __be32 bit, all, old;
246 __be32 data[2];
239 int channel, ret = -EIO, retry = 5; 247 int channel, ret = -EIO, retry = 5;
240 248
241 old = all = allocate ? cpu_to_be32(~0) : 0; 249 old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -284,7 +292,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
284} 292}
285 293
286static void deallocate_channel(struct fw_card *card, int irm_id, 294static void deallocate_channel(struct fw_card *card, int irm_id,
287 int generation, int channel, __be32 buffer[2]) 295 int generation, int channel)
288{ 296{
289 u32 mask; 297 u32 mask;
290 u64 offset; 298 u64 offset;
@@ -293,7 +301,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
293 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : 301 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
294 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; 302 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
295 303
296 manage_channel(card, irm_id, generation, mask, offset, false, buffer); 304 manage_channel(card, irm_id, generation, mask, offset, false);
297} 305}
298 306
299/** 307/**
@@ -322,7 +330,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
322 */ 330 */
323void fw_iso_resource_manage(struct fw_card *card, int generation, 331void fw_iso_resource_manage(struct fw_card *card, int generation,
324 u64 channels_mask, int *channel, int *bandwidth, 332 u64 channels_mask, int *channel, int *bandwidth,
325 bool allocate, __be32 buffer[2]) 333 bool allocate)
326{ 334{
327 u32 channels_hi = channels_mask; /* channels 31...0 */ 335 u32 channels_hi = channels_mask; /* channels 31...0 */
328 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ 336 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
@@ -335,11 +343,11 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
335 if (channels_hi) 343 if (channels_hi)
336 c = manage_channel(card, irm_id, generation, channels_hi, 344 c = manage_channel(card, irm_id, generation, channels_hi,
337 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, 345 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
338 allocate, buffer); 346 allocate);
339 if (channels_lo && c < 0) { 347 if (channels_lo && c < 0) {
340 c = manage_channel(card, irm_id, generation, channels_lo, 348 c = manage_channel(card, irm_id, generation, channels_lo,
341 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, 349 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
342 allocate, buffer); 350 allocate);
343 if (c >= 0) 351 if (c >= 0)
344 c += 32; 352 c += 32;
345 } 353 }
@@ -351,14 +359,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
351 if (*bandwidth == 0) 359 if (*bandwidth == 0)
352 return; 360 return;
353 361
354 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, 362 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
355 allocate, buffer);
356 if (ret < 0) 363 if (ret < 0)
357 *bandwidth = 0; 364 *bandwidth = 0;
358 365
359 if (allocate && ret < 0) { 366 if (allocate && ret < 0) {
360 if (c >= 0) 367 if (c >= 0)
361 deallocate_channel(card, irm_id, generation, c, buffer); 368 deallocate_channel(card, irm_id, generation, c);
362 *channel = ret; 369 *channel = ret;
363 } 370 }
364} 371}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index d00f8ce902cc..334b82a3542c 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -36,6 +36,7 @@
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/workqueue.h>
39 40
40#include <asm/byteorder.h> 41#include <asm/byteorder.h>
41 42
@@ -326,8 +327,8 @@ static int allocate_tlabel(struct fw_card *card)
326 * It will contain tag, channel, and sy data instead of a node ID then. 327 * It will contain tag, channel, and sy data instead of a node ID then.
327 * 328 *
328 * The payload buffer at @data is going to be DMA-mapped except in case of 329 * The payload buffer at @data is going to be DMA-mapped except in case of
329 * quadlet-sized payload or of local (loopback) requests. Hence make sure that 330 * @length <= 8 or of local (loopback) requests. Hence make sure that the
330 * the buffer complies with the restrictions for DMA-mapped memory. The 331 * buffer complies with the restrictions of the streaming DMA mapping API.
331 * @payload must not be freed before the @callback is called. 332 * @payload must not be freed before the @callback is called.
332 * 333 *
333 * In case of request types without payload, @data is NULL and @length is 0. 334 * In case of request types without payload, @data is NULL and @length is 0.
@@ -411,7 +412,8 @@ static void transaction_callback(struct fw_card *card, int rcode,
411 * 412 *
412 * Returns the RCODE. See fw_send_request() for parameter documentation. 413 * Returns the RCODE. See fw_send_request() for parameter documentation.
413 * Unlike fw_send_request(), @data points to the payload of the request or/and 414 * Unlike fw_send_request(), @data points to the payload of the request or/and
414 * to the payload of the response. 415 * to the payload of the response. DMA mapping restrictions apply to outbound
416 * request payloads of >= 8 bytes but not to inbound response payloads.
415 */ 417 */
416int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 418int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
417 int generation, int speed, unsigned long long offset, 419 int generation, int speed, unsigned long long offset,
@@ -1212,13 +1214,21 @@ static int __init fw_core_init(void)
1212{ 1214{
1213 int ret; 1215 int ret;
1214 1216
1217 fw_workqueue = alloc_workqueue("firewire",
1218 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1219 if (!fw_workqueue)
1220 return -ENOMEM;
1221
1215 ret = bus_register(&fw_bus_type); 1222 ret = bus_register(&fw_bus_type);
1216 if (ret < 0) 1223 if (ret < 0) {
1224 destroy_workqueue(fw_workqueue);
1217 return ret; 1225 return ret;
1226 }
1218 1227
1219 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); 1228 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
1220 if (fw_cdev_major < 0) { 1229 if (fw_cdev_major < 0) {
1221 bus_unregister(&fw_bus_type); 1230 bus_unregister(&fw_bus_type);
1231 destroy_workqueue(fw_workqueue);
1222 return fw_cdev_major; 1232 return fw_cdev_major;
1223 } 1233 }
1224 1234
@@ -1234,6 +1244,7 @@ static void __exit fw_core_cleanup(void)
1234{ 1244{
1235 unregister_chrdev(fw_cdev_major, "firewire"); 1245 unregister_chrdev(fw_cdev_major, "firewire");
1236 bus_unregister(&fw_bus_type); 1246 bus_unregister(&fw_bus_type);
1247 destroy_workqueue(fw_workqueue);
1237 idr_destroy(&fw_device_idr); 1248 idr_destroy(&fw_device_idr);
1238} 1249}
1239 1250
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 25e729cde2f7..0fe4e4e6eda7 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -97,6 +97,8 @@ struct fw_card_driver {
97 struct fw_iso_buffer *buffer, 97 struct fw_iso_buffer *buffer,
98 unsigned long payload); 98 unsigned long payload);
99 99
100 void (*flush_queue_iso)(struct fw_iso_context *ctx);
101
100 int (*stop_iso)(struct fw_iso_context *ctx); 102 int (*stop_iso)(struct fw_iso_context *ctx);
101}; 103};
102 104
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 3f04dd3681cf..b9762d07198d 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -881,7 +881,9 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
881 881
882 spin_unlock_irqrestore(&dev->lock, flags); 882 spin_unlock_irqrestore(&dev->lock, flags);
883 883
884 if (retval < 0) 884 if (retval >= 0)
885 fw_iso_context_queue_flush(dev->broadcast_rcv_context);
886 else
885 fw_error("requeue failed\n"); 887 fw_error("requeue failed\n");
886} 888}
887 889
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 23d1468ad253..438e6c831170 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1006,13 +1006,12 @@ static void ar_context_run(struct ar_context *ctx)
1006 1006
1007static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 1007static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1008{ 1008{
1009 int b, key; 1009 __le16 branch;
1010 1010
1011 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2; 1011 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1012 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
1013 1012
1014 /* figure out which descriptor the branch address goes in */ 1013 /* figure out which descriptor the branch address goes in */
1015 if (z == 2 && (b == 3 || key == 2)) 1014 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1016 return d; 1015 return d;
1017 else 1016 else
1018 return d + z - 1; 1017 return d + z - 1;
@@ -1193,9 +1192,6 @@ static void context_append(struct context *ctx,
1193 wmb(); /* finish init of new descriptors before branch_address update */ 1192 wmb(); /* finish init of new descriptors before branch_address update */
1194 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 1193 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1195 ctx->prev = find_branch_descriptor(d, z); 1194 ctx->prev = find_branch_descriptor(d, z);
1196
1197 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1198 flush_writes(ctx->ohci);
1199} 1195}
1200 1196
1201static void context_stop(struct context *ctx) 1197static void context_stop(struct context *ctx)
@@ -1218,6 +1214,7 @@ static void context_stop(struct context *ctx)
1218} 1214}
1219 1215
1220struct driver_data { 1216struct driver_data {
1217 u8 inline_data[8];
1221 struct fw_packet *packet; 1218 struct fw_packet *packet;
1222}; 1219};
1223 1220
@@ -1301,20 +1298,28 @@ static int at_context_queue_packet(struct context *ctx,
1301 return -1; 1298 return -1;
1302 } 1299 }
1303 1300
1301 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1304 driver_data = (struct driver_data *) &d[3]; 1302 driver_data = (struct driver_data *) &d[3];
1305 driver_data->packet = packet; 1303 driver_data->packet = packet;
1306 packet->driver_data = driver_data; 1304 packet->driver_data = driver_data;
1307 1305
1308 if (packet->payload_length > 0) { 1306 if (packet->payload_length > 0) {
1309 payload_bus = 1307 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1310 dma_map_single(ohci->card.device, packet->payload, 1308 payload_bus = dma_map_single(ohci->card.device,
1311 packet->payload_length, DMA_TO_DEVICE); 1309 packet->payload,
1312 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1310 packet->payload_length,
1313 packet->ack = RCODE_SEND_ERROR; 1311 DMA_TO_DEVICE);
1314 return -1; 1312 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1313 packet->ack = RCODE_SEND_ERROR;
1314 return -1;
1315 }
1316 packet->payload_bus = payload_bus;
1317 packet->payload_mapped = true;
1318 } else {
1319 memcpy(driver_data->inline_data, packet->payload,
1320 packet->payload_length);
1321 payload_bus = d_bus + 3 * sizeof(*d);
1315 } 1322 }
1316 packet->payload_bus = payload_bus;
1317 packet->payload_mapped = true;
1318 1323
1319 d[2].req_count = cpu_to_le16(packet->payload_length); 1324 d[2].req_count = cpu_to_le16(packet->payload_length);
1320 d[2].data_address = cpu_to_le32(payload_bus); 1325 d[2].data_address = cpu_to_le32(payload_bus);
@@ -1340,8 +1345,12 @@ static int at_context_queue_packet(struct context *ctx,
1340 1345
1341 context_append(ctx, d, z, 4 - z); 1346 context_append(ctx, d, z, 4 - z);
1342 1347
1343 if (!ctx->running) 1348 if (ctx->running) {
1349 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1350 flush_writes(ohci);
1351 } else {
1344 context_run(ctx, 0); 1352 context_run(ctx, 0);
1353 }
1345 1354
1346 return 0; 1355 return 0;
1347} 1356}
@@ -2066,8 +2075,6 @@ static int ohci_enable(struct fw_card *card,
2066 2075
2067 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 2076 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2068 reg_write(ohci, OHCI1394_LinkControlSet, 2077 reg_write(ohci, OHCI1394_LinkControlSet,
2069 OHCI1394_LinkControl_rcvSelfID |
2070 OHCI1394_LinkControl_rcvPhyPkt |
2071 OHCI1394_LinkControl_cycleTimerEnable | 2078 OHCI1394_LinkControl_cycleTimerEnable |
2072 OHCI1394_LinkControl_cycleMaster); 2079 OHCI1394_LinkControl_cycleMaster);
2073 2080
@@ -2094,9 +2101,6 @@ static int ohci_enable(struct fw_card *card,
2094 reg_write(ohci, OHCI1394_FairnessControl, 0); 2101 reg_write(ohci, OHCI1394_FairnessControl, 0);
2095 card->priority_budget_implemented = ohci->pri_req_max != 0; 2102 card->priority_budget_implemented = ohci->pri_req_max != 0;
2096 2103
2097 ar_context_run(&ohci->ar_request_ctx);
2098 ar_context_run(&ohci->ar_response_ctx);
2099
2100 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); 2104 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
2101 reg_write(ohci, OHCI1394_IntEventClear, ~0); 2105 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2102 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2106 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
@@ -2186,7 +2190,13 @@ static int ohci_enable(struct fw_card *card,
2186 reg_write(ohci, OHCI1394_HCControlSet, 2190 reg_write(ohci, OHCI1394_HCControlSet,
2187 OHCI1394_HCControl_linkEnable | 2191 OHCI1394_HCControl_linkEnable |
2188 OHCI1394_HCControl_BIBimageValid); 2192 OHCI1394_HCControl_BIBimageValid);
2189 flush_writes(ohci); 2193
2194 reg_write(ohci, OHCI1394_LinkControlSet,
2195 OHCI1394_LinkControl_rcvSelfID |
2196 OHCI1394_LinkControl_rcvPhyPkt);
2197
2198 ar_context_run(&ohci->ar_request_ctx);
2199 ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
2190 2200
2191 /* We are ready to go, reset bus to finish initialization. */ 2201 /* We are ready to go, reset bus to finish initialization. */
2192 fw_schedule_bus_reset(&ohci->card, false, true); 2202 fw_schedule_bus_reset(&ohci->card, false, true);
@@ -3112,6 +3122,15 @@ static int ohci_queue_iso(struct fw_iso_context *base,
3112 return ret; 3122 return ret;
3113} 3123}
3114 3124
3125static void ohci_flush_queue_iso(struct fw_iso_context *base)
3126{
3127 struct context *ctx =
3128 &container_of(base, struct iso_context, base)->context;
3129
3130 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3131 flush_writes(ctx->ohci);
3132}
3133
3115static const struct fw_card_driver ohci_driver = { 3134static const struct fw_card_driver ohci_driver = {
3116 .enable = ohci_enable, 3135 .enable = ohci_enable,
3117 .read_phy_reg = ohci_read_phy_reg, 3136 .read_phy_reg = ohci_read_phy_reg,
@@ -3128,6 +3147,7 @@ static const struct fw_card_driver ohci_driver = {
3128 .free_iso_context = ohci_free_iso_context, 3147 .free_iso_context = ohci_free_iso_context,
3129 .set_iso_channels = ohci_set_iso_channels, 3148 .set_iso_channels = ohci_set_iso_channels,
3130 .queue_iso = ohci_queue_iso, 3149 .queue_iso = ohci_queue_iso,
3150 .flush_queue_iso = ohci_flush_queue_iso,
3131 .start_iso = ohci_start_iso, 3151 .start_iso = ohci_start_iso,
3132 .stop_iso = ohci_stop_iso, 3152 .stop_iso = ohci_stop_iso,
3133}; 3153};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 77ed589b360d..41841a3e3f99 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -125,9 +125,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
125 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 125 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
126 ", or a combination)"); 126 ", or a combination)");
127 127
128/* I don't know why the SCSI stack doesn't define something like this... */
129typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
130
131static const char sbp2_driver_name[] = "sbp2"; 128static const char sbp2_driver_name[] = "sbp2";
132 129
133/* 130/*
@@ -261,7 +258,6 @@ struct sbp2_orb {
261 struct kref kref; 258 struct kref kref;
262 dma_addr_t request_bus; 259 dma_addr_t request_bus;
263 int rcode; 260 int rcode;
264 struct sbp2_pointer pointer;
265 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status); 261 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
266 struct list_head link; 262 struct list_head link;
267}; 263};
@@ -314,7 +310,6 @@ struct sbp2_command_orb {
314 u8 command_block[SBP2_MAX_CDB_SIZE]; 310 u8 command_block[SBP2_MAX_CDB_SIZE];
315 } request; 311 } request;
316 struct scsi_cmnd *cmd; 312 struct scsi_cmnd *cmd;
317 scsi_done_fn_t done;
318 struct sbp2_logical_unit *lu; 313 struct sbp2_logical_unit *lu;
319 314
320 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); 315 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
@@ -494,10 +489,11 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
494 int node_id, int generation, u64 offset) 489 int node_id, int generation, u64 offset)
495{ 490{
496 struct fw_device *device = target_device(lu->tgt); 491 struct fw_device *device = target_device(lu->tgt);
492 struct sbp2_pointer orb_pointer;
497 unsigned long flags; 493 unsigned long flags;
498 494
499 orb->pointer.high = 0; 495 orb_pointer.high = 0;
500 orb->pointer.low = cpu_to_be32(orb->request_bus); 496 orb_pointer.low = cpu_to_be32(orb->request_bus);
501 497
502 spin_lock_irqsave(&device->card->lock, flags); 498 spin_lock_irqsave(&device->card->lock, flags);
503 list_add_tail(&orb->link, &lu->orb_list); 499 list_add_tail(&orb->link, &lu->orb_list);
@@ -508,7 +504,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
508 504
509 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 505 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
510 node_id, generation, device->max_speed, offset, 506 node_id, generation, device->max_speed, offset,
511 &orb->pointer, 8, complete_transaction, orb); 507 &orb_pointer, 8, complete_transaction, orb);
512} 508}
513 509
514static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) 510static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
@@ -830,8 +826,6 @@ static void sbp2_target_put(struct sbp2_target *tgt)
830 kref_put(&tgt->kref, sbp2_release_target); 826 kref_put(&tgt->kref, sbp2_release_target);
831} 827}
832 828
833static struct workqueue_struct *sbp2_wq;
834
835/* 829/*
836 * Always get the target's kref when scheduling work on one its units. 830 * Always get the target's kref when scheduling work on one its units.
837 * Each workqueue job is responsible to call sbp2_target_put() upon return. 831 * Each workqueue job is responsible to call sbp2_target_put() upon return.
@@ -839,7 +833,7 @@ static struct workqueue_struct *sbp2_wq;
839static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) 833static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
840{ 834{
841 sbp2_target_get(lu->tgt); 835 sbp2_target_get(lu->tgt);
842 if (!queue_delayed_work(sbp2_wq, &lu->work, delay)) 836 if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
843 sbp2_target_put(lu->tgt); 837 sbp2_target_put(lu->tgt);
844} 838}
845 839
@@ -1398,7 +1392,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
1398 sbp2_unmap_scatterlist(device->card->device, orb); 1392 sbp2_unmap_scatterlist(device->card->device, orb);
1399 1393
1400 orb->cmd->result = result; 1394 orb->cmd->result = result;
1401 orb->done(orb->cmd); 1395 orb->cmd->scsi_done(orb->cmd);
1402} 1396}
1403 1397
1404static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, 1398static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
@@ -1463,7 +1457,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1463 1457
1464/* SCSI stack integration */ 1458/* SCSI stack integration */
1465 1459
1466static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done) 1460static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
1461 struct scsi_cmnd *cmd)
1467{ 1462{
1468 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1463 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1469 struct fw_device *device = target_device(lu->tgt); 1464 struct fw_device *device = target_device(lu->tgt);
@@ -1477,7 +1472,7 @@ static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done
1477 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 1472 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
1478 fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); 1473 fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
1479 cmd->result = DID_ERROR << 16; 1474 cmd->result = DID_ERROR << 16;
1480 done(cmd); 1475 cmd->scsi_done(cmd);
1481 return 0; 1476 return 0;
1482 } 1477 }
1483 1478
@@ -1490,11 +1485,8 @@ static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done
1490 /* Initialize rcode to something not RCODE_COMPLETE. */ 1485 /* Initialize rcode to something not RCODE_COMPLETE. */
1491 orb->base.rcode = -1; 1486 orb->base.rcode = -1;
1492 kref_init(&orb->base.kref); 1487 kref_init(&orb->base.kref);
1493 1488 orb->lu = lu;
1494 orb->lu = lu; 1489 orb->cmd = cmd;
1495 orb->done = done;
1496 orb->cmd = cmd;
1497
1498 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); 1490 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
1499 orb->request.misc = cpu_to_be32( 1491 orb->request.misc = cpu_to_be32(
1500 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | 1492 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
@@ -1529,8 +1521,6 @@ static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done
1529 return retval; 1521 return retval;
1530} 1522}
1531 1523
1532static DEF_SCSI_QCMD(sbp2_scsi_queuecommand)
1533
1534static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) 1524static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1535{ 1525{
1536 struct sbp2_logical_unit *lu = sdev->hostdata; 1526 struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1653,17 +1643,12 @@ MODULE_ALIAS("sbp2");
1653 1643
1654static int __init sbp2_init(void) 1644static int __init sbp2_init(void)
1655{ 1645{
1656 sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
1657 if (!sbp2_wq)
1658 return -ENOMEM;
1659
1660 return driver_register(&sbp2_driver.driver); 1646 return driver_register(&sbp2_driver.driver);
1661} 1647}
1662 1648
1663static void __exit sbp2_cleanup(void) 1649static void __exit sbp2_cleanup(void)
1664{ 1650{
1665 driver_unregister(&sbp2_driver.driver); 1651 driver_unregister(&sbp2_driver.driver);
1666 destroy_workqueue(sbp2_wq);
1667} 1652}
1668 1653
1669module_init(sbp2_init); 1654module_init(sbp2_init);
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index fc5ccd8c923a..21c52e3b522e 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1320,14 +1320,10 @@ static int cmp_read(struct firedtv *fdtv, u64 addr, __be32 *data)
1320{ 1320{
1321 int ret; 1321 int ret;
1322 1322
1323 mutex_lock(&fdtv->avc_mutex);
1324
1325 ret = fdtv_read(fdtv, addr, data); 1323 ret = fdtv_read(fdtv, addr, data);
1326 if (ret < 0) 1324 if (ret < 0)
1327 dev_err(fdtv->device, "CMP: read I/O error\n"); 1325 dev_err(fdtv->device, "CMP: read I/O error\n");
1328 1326
1329 mutex_unlock(&fdtv->avc_mutex);
1330
1331 return ret; 1327 return ret;
1332} 1328}
1333 1329
@@ -1335,18 +1331,9 @@ static int cmp_lock(struct firedtv *fdtv, u64 addr, __be32 data[])
1335{ 1331{
1336 int ret; 1332 int ret;
1337 1333
1338 mutex_lock(&fdtv->avc_mutex); 1334 ret = fdtv_lock(fdtv, addr, data);
1339
1340 /* data[] is stack-allocated and should not be DMA-mapped. */
1341 memcpy(fdtv->avc_data, data, 8);
1342
1343 ret = fdtv_lock(fdtv, addr, fdtv->avc_data);
1344 if (ret < 0) 1335 if (ret < 0)
1345 dev_err(fdtv->device, "CMP: lock I/O error\n"); 1336 dev_err(fdtv->device, "CMP: lock I/O error\n");
1346 else
1347 memcpy(data, fdtv->avc_data, 8);
1348
1349 mutex_unlock(&fdtv->avc_mutex);
1350 1337
1351 return ret; 1338 return ret;
1352} 1339}
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 8022b743af91..864b6274c729 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -125,6 +125,7 @@ static void handle_iso(struct fw_iso_context *context, u32 cycle,
125 125
126 i = (i + 1) & (N_PACKETS - 1); 126 i = (i + 1) & (N_PACKETS - 1);
127 } 127 }
128 fw_iso_context_queue_flush(ctx->context);
128 ctx->current_packet = i; 129 ctx->current_packet = i;
129} 130}
130 131
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index c64f3680d4f1..5e6f42789afe 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -125,7 +125,6 @@ struct fw_card {
125 struct delayed_work bm_work; /* bus manager job */ 125 struct delayed_work bm_work; /* bus manager job */
126 int bm_retries; 126 int bm_retries;
127 int bm_generation; 127 int bm_generation;
128 __be32 bm_transaction_data[2];
129 int bm_node_id; 128 int bm_node_id;
130 bool bm_abdicate; 129 bool bm_abdicate;
131 130
@@ -441,12 +440,15 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
441 struct fw_iso_packet *packet, 440 struct fw_iso_packet *packet,
442 struct fw_iso_buffer *buffer, 441 struct fw_iso_buffer *buffer,
443 unsigned long payload); 442 unsigned long payload);
443void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
444int fw_iso_context_start(struct fw_iso_context *ctx, 444int fw_iso_context_start(struct fw_iso_context *ctx,
445 int cycle, int sync, int tags); 445 int cycle, int sync, int tags);
446int fw_iso_context_stop(struct fw_iso_context *ctx); 446int fw_iso_context_stop(struct fw_iso_context *ctx);
447void fw_iso_context_destroy(struct fw_iso_context *ctx); 447void fw_iso_context_destroy(struct fw_iso_context *ctx);
448void fw_iso_resource_manage(struct fw_card *card, int generation, 448void fw_iso_resource_manage(struct fw_card *card, int generation,
449 u64 channels_mask, int *channel, int *bandwidth, 449 u64 channels_mask, int *channel, int *bandwidth,
450 bool allocate, __be32 buffer[2]); 450 bool allocate);
451
452extern struct workqueue_struct *fw_workqueue;
451 453
452#endif /* _LINUX_FIREWIRE_H */ 454#endif /* _LINUX_FIREWIRE_H */
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index b18140ff2b93..87657dd7714c 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -396,6 +396,7 @@ static void out_packet_callback(struct fw_iso_context *context, u32 cycle,
396 396
397 for (i = 0; i < packets; ++i) 397 for (i = 0; i < packets; ++i)
398 queue_out_packet(s, ++cycle); 398 queue_out_packet(s, ++cycle);
399 fw_iso_context_queue_flush(s->context);
399} 400}
400 401
401static int queue_initial_skip_packets(struct amdtp_out_stream *s) 402static int queue_initial_skip_packets(struct amdtp_out_stream *s)
diff --git a/sound/firewire/cmp.c b/sound/firewire/cmp.c
index 4a37f3a6fab9..14cacbc655dd 100644
--- a/sound/firewire/cmp.c
+++ b/sound/firewire/cmp.c
@@ -49,10 +49,9 @@ static int pcr_modify(struct cmp_connection *c,
49 enum bus_reset_handling bus_reset_handling) 49 enum bus_reset_handling bus_reset_handling)
50{ 50{
51 struct fw_device *device = fw_parent_device(c->resources.unit); 51 struct fw_device *device = fw_parent_device(c->resources.unit);
52 __be32 *buffer = c->resources.buffer;
53 int generation = c->resources.generation; 52 int generation = c->resources.generation;
54 int rcode, errors = 0; 53 int rcode, errors = 0;
55 __be32 old_arg; 54 __be32 old_arg, buffer[2];
56 int err; 55 int err;
57 56
58 buffer[0] = c->last_pcr_value; 57 buffer[0] = c->last_pcr_value;
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
index 775dbd5f3445..bb9c0c1fb529 100644
--- a/sound/firewire/iso-resources.c
+++ b/sound/firewire/iso-resources.c
@@ -11,7 +11,6 @@
11#include <linux/jiffies.h> 11#include <linux/jiffies.h>
12#include <linux/mutex.h> 12#include <linux/mutex.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h> 14#include <linux/spinlock.h>
16#include "iso-resources.h" 15#include "iso-resources.h"
17 16
@@ -25,10 +24,6 @@
25 */ 24 */
26int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) 25int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
27{ 26{
28 r->buffer = kmalloc(2 * 4, GFP_KERNEL);
29 if (!r->buffer)
30 return -ENOMEM;
31
32 r->channels_mask = ~0uLL; 27 r->channels_mask = ~0uLL;
33 r->unit = fw_unit_get(unit); 28 r->unit = fw_unit_get(unit);
34 mutex_init(&r->mutex); 29 mutex_init(&r->mutex);
@@ -44,7 +39,6 @@ int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
44void fw_iso_resources_destroy(struct fw_iso_resources *r) 39void fw_iso_resources_destroy(struct fw_iso_resources *r)
45{ 40{
46 WARN_ON(r->allocated); 41 WARN_ON(r->allocated);
47 kfree(r->buffer);
48 mutex_destroy(&r->mutex); 42 mutex_destroy(&r->mutex);
49 fw_unit_put(r->unit); 43 fw_unit_put(r->unit);
50} 44}
@@ -131,7 +125,7 @@ retry_after_bus_reset:
131 125
132 bandwidth = r->bandwidth + r->bandwidth_overhead; 126 bandwidth = r->bandwidth + r->bandwidth_overhead;
133 fw_iso_resource_manage(card, r->generation, r->channels_mask, 127 fw_iso_resource_manage(card, r->generation, r->channels_mask,
134 &channel, &bandwidth, true, r->buffer); 128 &channel, &bandwidth, true);
135 if (channel == -EAGAIN) { 129 if (channel == -EAGAIN) {
136 mutex_unlock(&r->mutex); 130 mutex_unlock(&r->mutex);
137 goto retry_after_bus_reset; 131 goto retry_after_bus_reset;
@@ -184,7 +178,7 @@ int fw_iso_resources_update(struct fw_iso_resources *r)
184 bandwidth = r->bandwidth + r->bandwidth_overhead; 178 bandwidth = r->bandwidth + r->bandwidth_overhead;
185 179
186 fw_iso_resource_manage(card, r->generation, 1uLL << r->channel, 180 fw_iso_resource_manage(card, r->generation, 1uLL << r->channel,
187 &channel, &bandwidth, true, r->buffer); 181 &channel, &bandwidth, true);
188 /* 182 /*
189 * When another bus reset happens, pretend that the allocation 183 * When another bus reset happens, pretend that the allocation
190 * succeeded; we will try again for the new generation later. 184 * succeeded; we will try again for the new generation later.
@@ -220,7 +214,7 @@ void fw_iso_resources_free(struct fw_iso_resources *r)
220 if (r->allocated) { 214 if (r->allocated) {
221 bandwidth = r->bandwidth + r->bandwidth_overhead; 215 bandwidth = r->bandwidth + r->bandwidth_overhead;
222 fw_iso_resource_manage(card, r->generation, 1uLL << r->channel, 216 fw_iso_resource_manage(card, r->generation, 1uLL << r->channel,
223 &channel, &bandwidth, false, r->buffer); 217 &channel, &bandwidth, false);
224 if (channel < 0) 218 if (channel < 0)
225 dev_err(&r->unit->device, 219 dev_err(&r->unit->device,
226 "isochronous resource deallocation failed\n"); 220 "isochronous resource deallocation failed\n");
diff --git a/sound/firewire/iso-resources.h b/sound/firewire/iso-resources.h
index 3f0730e4d841..5a9af7c61657 100644
--- a/sound/firewire/iso-resources.h
+++ b/sound/firewire/iso-resources.h
@@ -24,7 +24,6 @@ struct fw_iso_resources {
24 unsigned int bandwidth_overhead; 24 unsigned int bandwidth_overhead;
25 int generation; /* in which allocation is valid */ 25 int generation; /* in which allocation is valid */
26 bool allocated; 26 bool allocated;
27 __be32 *buffer;
28}; 27};
29 28
30int fw_iso_resources_init(struct fw_iso_resources *r, 29int fw_iso_resources_init(struct fw_iso_resources *r,