aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-16 18:02:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-16 18:02:24 -0400
commit1eee21abaf54338b379b33d85b28b495292c2211 (patch)
treef51f3e270c7e075f2844a2c5e400cfc1b9481ce3 /drivers
parent9d85db2244d71fa4f2f9747a090c1920f07a8b4b (diff)
parentbe585c07dd577faac26014db4246e6d7c7a131e7 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: firewire: Add more documentation to firewire-cdev.h firewire: fix ioctl() return code firewire: fix setting tag and sy in iso transmission firewire: fw-sbp2: fix another small generation access bug firewire: fw-sbp2: enforce s/g segment size limit firewire: fw_send_request_sync() ieee1394: survive a few seconds connection loss ieee1394: nodemgr clean up class iterators ieee1394: dv1394, video1394: remove unnecessary expressions ieee1394: raw1394: make write() thread-safe ieee1394: raw1394: narrow down the state_mutex protected region ieee1394: raw1394: replace BKL by local mutex, make ioctl() and mmap() thread-safe ieee1394: sbp2: enforce s/g segment size limit ieee1394: sbp2: check for DMA mapping failures ieee1394: sbp2: stricter dma_sync ieee1394: Use DIV_ROUND_UP
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/fw-card.c56
-rw-r--r--drivers/firewire/fw-cdev.c6
-rw-r--r--drivers/firewire/fw-device.c37
-rw-r--r--drivers/firewire/fw-sbp2.c116
-rw-r--r--drivers/firewire/fw-transaction.c48
-rw-r--r--drivers/firewire/fw-transaction.h9
-rw-r--r--drivers/ieee1394/csr1212.c2
-rw-r--r--drivers/ieee1394/dv1394.c2
-rw-r--r--drivers/ieee1394/eth1394.c2
-rw-r--r--drivers/ieee1394/nodemgr.c279
-rw-r--r--drivers/ieee1394/nodemgr.h2
-rw-r--r--drivers/ieee1394/raw1394-private.h1
-rw-r--r--drivers/ieee1394/raw1394.c230
-rw-r--r--drivers/ieee1394/sbp2.c218
-rw-r--r--drivers/ieee1394/sbp2.h33
-rw-r--r--drivers/ieee1394/video1394.c8
16 files changed, 463 insertions, 586 deletions
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index bbd73a406e53..418c18f07e9d 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -189,39 +189,16 @@ static const char gap_count_table[] = {
189 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 189 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
190}; 190};
191 191
192struct bm_data {
193 struct fw_transaction t;
194 struct {
195 __be32 arg;
196 __be32 data;
197 } lock;
198 u32 old;
199 int rcode;
200 struct completion done;
201};
202
203static void
204complete_bm_lock(struct fw_card *card, int rcode,
205 void *payload, size_t length, void *data)
206{
207 struct bm_data *bmd = data;
208
209 if (rcode == RCODE_COMPLETE)
210 bmd->old = be32_to_cpu(*(__be32 *) payload);
211 bmd->rcode = rcode;
212 complete(&bmd->done);
213}
214
215static void 192static void
216fw_card_bm_work(struct work_struct *work) 193fw_card_bm_work(struct work_struct *work)
217{ 194{
218 struct fw_card *card = container_of(work, struct fw_card, work.work); 195 struct fw_card *card = container_of(work, struct fw_card, work.work);
219 struct fw_device *root_device; 196 struct fw_device *root_device;
220 struct fw_node *root_node, *local_node; 197 struct fw_node *root_node, *local_node;
221 struct bm_data bmd;
222 unsigned long flags; 198 unsigned long flags;
223 int root_id, new_root_id, irm_id, gap_count, generation, grace; 199 int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
224 bool do_reset = false; 200 bool do_reset = false;
201 __be32 lock_data[2];
225 202
226 spin_lock_irqsave(&card->lock, flags); 203 spin_lock_irqsave(&card->lock, flags);
227 local_node = card->local_node; 204 local_node = card->local_node;
@@ -263,33 +240,28 @@ fw_card_bm_work(struct work_struct *work)
263 goto pick_me; 240 goto pick_me;
264 } 241 }
265 242
266 bmd.lock.arg = cpu_to_be32(0x3f); 243 lock_data[0] = cpu_to_be32(0x3f);
267 bmd.lock.data = cpu_to_be32(local_node->node_id); 244 lock_data[1] = cpu_to_be32(local_node->node_id);
268 245
269 spin_unlock_irqrestore(&card->lock, flags); 246 spin_unlock_irqrestore(&card->lock, flags);
270 247
271 init_completion(&bmd.done); 248 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
272 fw_send_request(card, &bmd.t, TCODE_LOCK_COMPARE_SWAP, 249 irm_id, generation, SCODE_100,
273 irm_id, generation, 250 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
274 SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, 251 lock_data, sizeof(lock_data));
275 &bmd.lock, sizeof(bmd.lock),
276 complete_bm_lock, &bmd);
277 wait_for_completion(&bmd.done);
278 252
279 if (bmd.rcode == RCODE_GENERATION) { 253 if (rcode == RCODE_GENERATION)
280 /* 254 /* Another bus reset, BM work has been rescheduled. */
281 * Another bus reset happened. Just return,
282 * the BM work has been rescheduled.
283 */
284 goto out; 255 goto out;
285 }
286 256
287 if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) 257 if (rcode == RCODE_COMPLETE &&
258 lock_data[0] != cpu_to_be32(0x3f))
288 /* Somebody else is BM, let them do the work. */ 259 /* Somebody else is BM, let them do the work. */
289 goto out; 260 goto out;
290 261
291 spin_lock_irqsave(&card->lock, flags); 262 spin_lock_irqsave(&card->lock, flags);
292 if (bmd.rcode != RCODE_COMPLETE) { 263
264 if (rcode != RCODE_COMPLETE) {
293 /* 265 /*
294 * The lock request failed, maybe the IRM 266 * The lock request failed, maybe the IRM
295 * isn't really IRM capable after all. Let's 267 * isn't really IRM capable after all. Let's
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 2e6d5848d217..ed03234cbea8 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -720,8 +720,8 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
720#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) 720#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
721#define GET_INTERRUPT(v) (((v) >> 16) & 0x01) 721#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
722#define GET_SKIP(v) (((v) >> 17) & 0x01) 722#define GET_SKIP(v) (((v) >> 17) & 0x01)
723#define GET_TAG(v) (((v) >> 18) & 0x02) 723#define GET_TAG(v) (((v) >> 18) & 0x03)
724#define GET_SY(v) (((v) >> 20) & 0x04) 724#define GET_SY(v) (((v) >> 20) & 0x0f)
725#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 725#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
726 726
727static int ioctl_queue_iso(struct client *client, void *buffer) 727static int ioctl_queue_iso(struct client *client, void *buffer)
@@ -913,7 +913,7 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
913 return -EFAULT; 913 return -EFAULT;
914 } 914 }
915 915
916 return 0; 916 return retval;
917} 917}
918 918
919static long 919static long
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 0855fb5568e8..3fccdd484100 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -381,46 +381,21 @@ static struct device_attribute fw_device_attributes[] = {
381 __ATTR_NULL, 381 __ATTR_NULL,
382}; 382};
383 383
384struct read_quadlet_callback_data {
385 struct completion done;
386 int rcode;
387 u32 data;
388};
389
390static void
391complete_transaction(struct fw_card *card, int rcode,
392 void *payload, size_t length, void *data)
393{
394 struct read_quadlet_callback_data *callback_data = data;
395
396 if (rcode == RCODE_COMPLETE)
397 callback_data->data = be32_to_cpu(*(__be32 *)payload);
398 callback_data->rcode = rcode;
399 complete(&callback_data->done);
400}
401
402static int 384static int
403read_rom(struct fw_device *device, int generation, int index, u32 *data) 385read_rom(struct fw_device *device, int generation, int index, u32 *data)
404{ 386{
405 struct read_quadlet_callback_data callback_data; 387 int rcode;
406 struct fw_transaction t;
407 u64 offset;
408 388
409 /* device->node_id, accessed below, must not be older than generation */ 389 /* device->node_id, accessed below, must not be older than generation */
410 smp_rmb(); 390 smp_rmb();
411 391
412 init_completion(&callback_data.done); 392 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
413
414 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
415 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
416 device->node_id, generation, device->max_speed, 393 device->node_id, generation, device->max_speed,
417 offset, NULL, 4, complete_transaction, &callback_data); 394 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
418 395 data, 4);
419 wait_for_completion(&callback_data.done); 396 be32_to_cpus(data);
420
421 *data = callback_data.data;
422 397
423 return callback_data.rcode; 398 return rcode;
424} 399}
425 400
426#define READ_BIB_ROM_SIZE 256 401#define READ_BIB_ROM_SIZE 256
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index aaff50ebba1d..ef0b9b419c27 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <linux/bug.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
33#include <linux/device.h> 34#include <linux/device.h>
34#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
@@ -181,10 +182,16 @@ struct sbp2_target {
181#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ 182#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */
182#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ 183#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
183#define SBP2_ORB_NULL 0x80000000 184#define SBP2_ORB_NULL 0x80000000
184#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
185#define SBP2_RETRY_LIMIT 0xf /* 15 retries */ 185#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
186#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */ 186#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
187 187
188/*
189 * The default maximum s/g segment size of a FireWire controller is
190 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
191 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
192 */
193#define SBP2_MAX_SEG_SIZE 0xfffc
194
188/* Unit directory keys */ 195/* Unit directory keys */
189#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a 196#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
190#define SBP2_CSR_FIRMWARE_REVISION 0x3c 197#define SBP2_CSR_FIRMWARE_REVISION 0x3c
@@ -621,25 +628,15 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
621 return retval; 628 return retval;
622} 629}
623 630
624static void
625complete_agent_reset_write(struct fw_card *card, int rcode,
626 void *payload, size_t length, void *done)
627{
628 complete(done);
629}
630
631static void sbp2_agent_reset(struct sbp2_logical_unit *lu) 631static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
632{ 632{
633 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 633 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
634 DECLARE_COMPLETION_ONSTACK(done); 634 __be32 d = 0;
635 struct fw_transaction t;
636 static u32 z;
637 635
638 fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, 636 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
639 lu->tgt->node_id, lu->generation, device->max_speed, 637 lu->tgt->node_id, lu->generation, device->max_speed,
640 lu->command_block_agent_address + SBP2_AGENT_RESET, 638 lu->command_block_agent_address + SBP2_AGENT_RESET,
641 &z, sizeof(z), complete_agent_reset_write, &done); 639 &d, sizeof(d));
642 wait_for_completion(&done);
643} 640}
644 641
645static void 642static void
@@ -653,7 +650,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
653{ 650{
654 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 651 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
655 struct fw_transaction *t; 652 struct fw_transaction *t;
656 static u32 z; 653 static __be32 d;
657 654
658 t = kmalloc(sizeof(*t), GFP_ATOMIC); 655 t = kmalloc(sizeof(*t), GFP_ATOMIC);
659 if (t == NULL) 656 if (t == NULL)
@@ -662,7 +659,7 @@ static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
662 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, 659 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
663 lu->tgt->node_id, lu->generation, device->max_speed, 660 lu->tgt->node_id, lu->generation, device->max_speed,
664 lu->command_block_agent_address + SBP2_AGENT_RESET, 661 lu->command_block_agent_address + SBP2_AGENT_RESET,
665 &z, sizeof(z), complete_agent_reset_write_no_wait, t); 662 &d, sizeof(d), complete_agent_reset_write_no_wait, t);
666} 663}
667 664
668static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) 665static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
@@ -823,13 +820,6 @@ static void sbp2_target_put(struct sbp2_target *tgt)
823 kref_put(&tgt->kref, sbp2_release_target); 820 kref_put(&tgt->kref, sbp2_release_target);
824} 821}
825 822
826static void
827complete_set_busy_timeout(struct fw_card *card, int rcode,
828 void *payload, size_t length, void *done)
829{
830 complete(done);
831}
832
833/* 823/*
834 * Write retransmit retry values into the BUSY_TIMEOUT register. 824 * Write retransmit retry values into the BUSY_TIMEOUT register.
835 * - The single-phase retry protocol is supported by all SBP-2 devices, but the 825 * - The single-phase retry protocol is supported by all SBP-2 devices, but the
@@ -849,17 +839,12 @@ complete_set_busy_timeout(struct fw_card *card, int rcode,
849static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) 839static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
850{ 840{
851 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 841 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
852 DECLARE_COMPLETION_ONSTACK(done); 842 __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
853 struct fw_transaction t;
854 static __be32 busy_timeout;
855
856 busy_timeout = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
857 843
858 fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, 844 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
859 lu->tgt->node_id, lu->generation, device->max_speed, 845 lu->tgt->node_id, lu->generation, device->max_speed,
860 CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &busy_timeout, 846 CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
861 sizeof(busy_timeout), complete_set_busy_timeout, &done); 847 &d, sizeof(d));
862 wait_for_completion(&done);
863} 848}
864 849
865static void sbp2_reconnect(struct work_struct *work); 850static void sbp2_reconnect(struct work_struct *work);
@@ -1121,6 +1106,10 @@ static int sbp2_probe(struct device *dev)
1121 struct Scsi_Host *shost; 1106 struct Scsi_Host *shost;
1122 u32 model, firmware_revision; 1107 u32 model, firmware_revision;
1123 1108
1109 if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
1110 BUG_ON(dma_set_max_seg_size(device->card->device,
1111 SBP2_MAX_SEG_SIZE));
1112
1124 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); 1113 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
1125 if (shost == NULL) 1114 if (shost == NULL)
1126 return -ENOMEM; 1115 return -ENOMEM;
@@ -1369,14 +1358,12 @@ static int
1369sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, 1358sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1370 struct sbp2_logical_unit *lu) 1359 struct sbp2_logical_unit *lu)
1371{ 1360{
1372 struct scatterlist *sg; 1361 struct scatterlist *sg = scsi_sglist(orb->cmd);
1373 int sg_len, l, i, j, count; 1362 int i, n;
1374 dma_addr_t sg_addr; 1363
1375 1364 n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
1376 sg = scsi_sglist(orb->cmd); 1365 orb->cmd->sc_data_direction);
1377 count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd), 1366 if (n == 0)
1378 orb->cmd->sc_data_direction);
1379 if (count == 0)
1380 goto fail; 1367 goto fail;
1381 1368
1382 /* 1369 /*
@@ -1386,7 +1373,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1386 * as the second generation iPod which doesn't support page 1373 * as the second generation iPod which doesn't support page
1387 * tables. 1374 * tables.
1388 */ 1375 */
1389 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { 1376 if (n == 1) {
1390 orb->request.data_descriptor.high = 1377 orb->request.data_descriptor.high =
1391 cpu_to_be32(lu->tgt->address_high); 1378 cpu_to_be32(lu->tgt->address_high);
1392 orb->request.data_descriptor.low = 1379 orb->request.data_descriptor.low =
@@ -1396,29 +1383,9 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1396 return 0; 1383 return 0;
1397 } 1384 }
1398 1385
1399 /* 1386 for_each_sg(sg, sg, n, i) {
1400 * Convert the scatterlist to an sbp2 page table. If any 1387 orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
1401 * scatterlist entries are too big for sbp2, we split them as we 1388 orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
1402 * go. Even if we ask the block I/O layer to not give us sg
1403 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
1404 * during DMA mapping, and Linux currently doesn't prevent this.
1405 */
1406 for (i = 0, j = 0; i < count; i++, sg = sg_next(sg)) {
1407 sg_len = sg_dma_len(sg);
1408 sg_addr = sg_dma_address(sg);
1409 while (sg_len) {
1410 /* FIXME: This won't get us out of the pinch. */
1411 if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
1412 fw_error("page table overflow\n");
1413 goto fail_page_table;
1414 }
1415 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
1416 orb->page_table[j].low = cpu_to_be32(sg_addr);
1417 orb->page_table[j].high = cpu_to_be32(l << 16);
1418 sg_addr += l;
1419 sg_len -= l;
1420 j++;
1421 }
1422 } 1389 }
1423 1390
1424 orb->page_table_bus = 1391 orb->page_table_bus =
@@ -1437,13 +1404,13 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1437 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); 1404 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
1438 orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus); 1405 orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus);
1439 orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT | 1406 orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
1440 COMMAND_ORB_DATA_SIZE(j)); 1407 COMMAND_ORB_DATA_SIZE(n));
1441 1408
1442 return 0; 1409 return 0;
1443 1410
1444 fail_page_table: 1411 fail_page_table:
1445 dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd), 1412 dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
1446 orb->cmd->sc_data_direction); 1413 scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
1447 fail: 1414 fail:
1448 return -ENOMEM; 1415 return -ENOMEM;
1449} 1416}
@@ -1456,7 +1423,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1456 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 1423 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
1457 struct sbp2_command_orb *orb; 1424 struct sbp2_command_orb *orb;
1458 unsigned int max_payload; 1425 unsigned int max_payload;
1459 int retval = SCSI_MLQUEUE_HOST_BUSY; 1426 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1460 1427
1461 /* 1428 /*
1462 * Bidirectional commands are not yet implemented, and unknown 1429 * Bidirectional commands are not yet implemented, and unknown
@@ -1500,6 +1467,9 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1500 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1467 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1501 orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION); 1468 orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
1502 1469
1470 generation = device->generation;
1471 smp_rmb(); /* sbp2_map_scatterlist looks at tgt->address_high */
1472
1503 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) 1473 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
1504 goto out; 1474 goto out;
1505 1475
@@ -1512,7 +1482,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1512 if (dma_mapping_error(device->card->device, orb->base.request_bus)) 1482 if (dma_mapping_error(device->card->device, orb->base.request_bus))
1513 goto out; 1483 goto out;
1514 1484
1515 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, 1485 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
1516 lu->command_block_agent_address + SBP2_ORB_POINTER); 1486 lu->command_block_agent_address + SBP2_ORB_POINTER);
1517 retval = 0; 1487 retval = 0;
1518 out: 1488 out:
@@ -1564,6 +1534,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1564 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 1534 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1565 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); 1535 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
1566 1536
1537 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
1538
1567 return 0; 1539 return 0;
1568} 1540}
1569 1541
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index e5d1a0b64fcf..022ac4fabb67 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -247,7 +247,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
247 */ 247 */
248void 248void
249fw_send_request(struct fw_card *card, struct fw_transaction *t, 249fw_send_request(struct fw_card *card, struct fw_transaction *t,
250 int tcode, int node_id, int generation, int speed, 250 int tcode, int destination_id, int generation, int speed,
251 unsigned long long offset, 251 unsigned long long offset,
252 void *payload, size_t length, 252 void *payload, size_t length,
253 fw_transaction_callback_t callback, void *callback_data) 253 fw_transaction_callback_t callback, void *callback_data)
@@ -279,13 +279,14 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
279 card->current_tlabel = (card->current_tlabel + 1) & 0x1f; 279 card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
280 card->tlabel_mask |= (1 << tlabel); 280 card->tlabel_mask |= (1 << tlabel);
281 281
282 t->node_id = node_id; 282 t->node_id = destination_id;
283 t->tlabel = tlabel; 283 t->tlabel = tlabel;
284 t->callback = callback; 284 t->callback = callback;
285 t->callback_data = callback_data; 285 t->callback_data = callback_data;
286 286
287 fw_fill_request(&t->packet, tcode, t->tlabel, node_id, card->node_id, 287 fw_fill_request(&t->packet, tcode, t->tlabel,
288 generation, speed, offset, payload, length); 288 destination_id, card->node_id, generation,
289 speed, offset, payload, length);
289 t->packet.callback = transmit_complete_callback; 290 t->packet.callback = transmit_complete_callback;
290 291
291 list_add_tail(&t->link, &card->transaction_list); 292 list_add_tail(&t->link, &card->transaction_list);
@@ -296,6 +297,45 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
296} 297}
297EXPORT_SYMBOL(fw_send_request); 298EXPORT_SYMBOL(fw_send_request);
298 299
300struct transaction_callback_data {
301 struct completion done;
302 void *payload;
303 int rcode;
304};
305
306static void transaction_callback(struct fw_card *card, int rcode,
307 void *payload, size_t length, void *data)
308{
309 struct transaction_callback_data *d = data;
310
311 if (rcode == RCODE_COMPLETE)
312 memcpy(d->payload, payload, length);
313 d->rcode = rcode;
314 complete(&d->done);
315}
316
317/**
318 * fw_run_transaction - send request and sleep until transaction is completed
319 *
320 * Returns the RCODE.
321 */
322int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
323 int generation, int speed, unsigned long long offset,
324 void *data, size_t length)
325{
326 struct transaction_callback_data d;
327 struct fw_transaction t;
328
329 init_completion(&d.done);
330 d.payload = data;
331 fw_send_request(card, &t, tcode, destination_id, generation, speed,
332 offset, data, length, transaction_callback, &d);
333 wait_for_completion(&d.done);
334
335 return d.rcode;
336}
337EXPORT_SYMBOL(fw_run_transaction);
338
299static DEFINE_MUTEX(phy_config_mutex); 339static DEFINE_MUTEX(phy_config_mutex);
300static DECLARE_COMPLETION(phy_config_done); 340static DECLARE_COMPLETION(phy_config_done);
301 341
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 2ae1b0d6cb7b..027f58ce81ad 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -426,11 +426,14 @@ fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
426 426
427void 427void
428fw_send_request(struct fw_card *card, struct fw_transaction *t, 428fw_send_request(struct fw_card *card, struct fw_transaction *t,
429 int tcode, int node_id, int generation, int speed, 429 int tcode, int destination_id, int generation, int speed,
430 unsigned long long offset, 430 unsigned long long offset, void *data, size_t length,
431 void *data, size_t length,
432 fw_transaction_callback_t callback, void *callback_data); 431 fw_transaction_callback_t callback, void *callback_data);
433 432
433int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
434 int generation, int speed, unsigned long long offset,
435 void *data, size_t length);
436
434int fw_cancel_transaction(struct fw_card *card, 437int fw_cancel_transaction(struct fw_card *card,
435 struct fw_transaction *transaction); 438 struct fw_transaction *transaction);
436 439
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index 9f95337139e3..5e38a68b8af2 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -84,7 +84,7 @@ static const u8 csr1212_key_id_type_map[0x30] = {
84 84
85 85
86#define quads_to_bytes(_q) ((_q) * sizeof(u32)) 86#define quads_to_bytes(_q) ((_q) * sizeof(u32))
87#define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32)) 87#define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32))
88 88
89static void free_keyval(struct csr1212_keyval *kv) 89static void free_keyval(struct csr1212_keyval *kv)
90{ 90{
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9236c0d5a122..2f83543a9dfc 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -918,7 +918,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
918 /* default SYT offset is 3 cycles */ 918 /* default SYT offset is 3 cycles */
919 init->syt_offset = 3; 919 init->syt_offset = 3;
920 920
921 if ( (init->channel > 63) || (init->channel < 0) ) 921 if (init->channel > 63)
922 init->channel = 63; 922 init->channel = 63;
923 923
924 chan_mask = (u64)1 << init->channel; 924 chan_mask = (u64)1 << init->channel;
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index b166b3575fa6..20128692b339 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1361,7 +1361,7 @@ static unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
1361 hdr->ff.dgl = dgl; 1361 hdr->ff.dgl = dgl;
1362 adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF]; 1362 adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
1363 } 1363 }
1364 return (dg_size + adj_max_payload - 1) / adj_max_payload; 1364 return DIV_ROUND_UP(dg_size, adj_max_payload);
1365} 1365}
1366 1366
1367static unsigned int ether1394_encapsulate(struct sk_buff *skb, 1367static unsigned int ether1394_encapsulate(struct sk_buff *skb,
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 16240a789650..2376b729e876 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -154,9 +154,6 @@ struct host_info {
154 154
155static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); 155static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
156static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env); 156static int nodemgr_uevent(struct device *dev, struct kobj_uevent_env *env);
157static void nodemgr_resume_ne(struct node_entry *ne);
158static void nodemgr_remove_ne(struct node_entry *ne);
159static struct node_entry *find_entry_by_guid(u64 guid);
160 157
161struct bus_type ieee1394_bus_type = { 158struct bus_type ieee1394_bus_type = {
162 .name = "ieee1394", 159 .name = "ieee1394",
@@ -385,27 +382,6 @@ static ssize_t fw_get_ignore_driver(struct device *dev, struct device_attribute
385static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver); 382static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver);
386 383
387 384
388static ssize_t fw_set_destroy_node(struct bus_type *bus, const char *buf, size_t count)
389{
390 struct node_entry *ne;
391 u64 guid = (u64)simple_strtoull(buf, NULL, 16);
392
393 ne = find_entry_by_guid(guid);
394
395 if (ne == NULL || !ne->in_limbo)
396 return -EINVAL;
397
398 nodemgr_remove_ne(ne);
399
400 return count;
401}
402static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf)
403{
404 return sprintf(buf, "You can destroy in_limbo nodes by writing their GUID to this file\n");
405}
406static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node);
407
408
409static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, 385static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf,
410 size_t count) 386 size_t count)
411{ 387{
@@ -442,7 +418,6 @@ static BUS_ATTR(ignore_drivers, S_IWUSR | S_IRUGO, fw_get_ignore_drivers, fw_set
442 418
443 419
444struct bus_attribute *const fw_bus_attrs[] = { 420struct bus_attribute *const fw_bus_attrs[] = {
445 &bus_attr_destroy_node,
446 &bus_attr_rescan, 421 &bus_attr_rescan,
447 &bus_attr_ignore_drivers, 422 &bus_attr_ignore_drivers,
448 NULL 423 NULL
@@ -734,10 +709,10 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
734 709
735static DEFINE_MUTEX(nodemgr_serialize_remove_uds); 710static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
736 711
737static int __match_ne(struct device *dev, void *data) 712static int match_ne(struct device *dev, void *data)
738{ 713{
739 struct unit_directory *ud; 714 struct unit_directory *ud;
740 struct node_entry *ne = (struct node_entry *)data; 715 struct node_entry *ne = data;
741 716
742 ud = container_of(dev, struct unit_directory, unit_dev); 717 ud = container_of(dev, struct unit_directory, unit_dev);
743 return ud->ne == ne; 718 return ud->ne == ne;
@@ -754,8 +729,7 @@ static void nodemgr_remove_uds(struct node_entry *ne)
754 */ 729 */
755 mutex_lock(&nodemgr_serialize_remove_uds); 730 mutex_lock(&nodemgr_serialize_remove_uds);
756 for (;;) { 731 for (;;) {
757 dev = class_find_device(&nodemgr_ud_class, NULL, ne, 732 dev = class_find_device(&nodemgr_ud_class, NULL, ne, match_ne);
758 __match_ne);
759 if (!dev) 733 if (!dev)
760 break; 734 break;
761 ud = container_of(dev, struct unit_directory, unit_dev); 735 ud = container_of(dev, struct unit_directory, unit_dev);
@@ -785,7 +759,7 @@ static void nodemgr_remove_ne(struct node_entry *ne)
785 put_device(dev); 759 put_device(dev);
786} 760}
787 761
788static int __nodemgr_remove_host_dev(struct device *dev, void *data) 762static int remove_host_dev(struct device *dev, void *data)
789{ 763{
790 if (dev->bus == &ieee1394_bus_type) 764 if (dev->bus == &ieee1394_bus_type)
791 nodemgr_remove_ne(container_of(dev, struct node_entry, 765 nodemgr_remove_ne(container_of(dev, struct node_entry,
@@ -795,7 +769,7 @@ static int __nodemgr_remove_host_dev(struct device *dev, void *data)
795 769
796static void nodemgr_remove_host_dev(struct device *dev) 770static void nodemgr_remove_host_dev(struct device *dev)
797{ 771{
798 WARN_ON(device_for_each_child(dev, NULL, __nodemgr_remove_host_dev)); 772 device_for_each_child(dev, NULL, remove_host_dev);
799 sysfs_remove_link(&dev->kobj, "irm_id"); 773 sysfs_remove_link(&dev->kobj, "irm_id");
800 sysfs_remove_link(&dev->kobj, "busmgr_id"); 774 sysfs_remove_link(&dev->kobj, "busmgr_id");
801 sysfs_remove_link(&dev->kobj, "host_id"); 775 sysfs_remove_link(&dev->kobj, "host_id");
@@ -830,11 +804,10 @@ static void nodemgr_update_bus_options(struct node_entry *ne)
830} 804}
831 805
832 806
833static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr *csr, 807static struct node_entry *nodemgr_create_node(octlet_t guid,
834 struct host_info *hi, nodeid_t nodeid, 808 struct csr1212_csr *csr, struct hpsb_host *host,
835 unsigned int generation) 809 nodeid_t nodeid, unsigned int generation)
836{ 810{
837 struct hpsb_host *host = hi->host;
838 struct node_entry *ne; 811 struct node_entry *ne;
839 812
840 ne = kzalloc(sizeof(*ne), GFP_KERNEL); 813 ne = kzalloc(sizeof(*ne), GFP_KERNEL);
@@ -888,10 +861,10 @@ fail_alloc:
888 return NULL; 861 return NULL;
889} 862}
890 863
891static int __match_ne_guid(struct device *dev, void *data) 864static int match_ne_guid(struct device *dev, void *data)
892{ 865{
893 struct node_entry *ne; 866 struct node_entry *ne;
894 u64 *guid = (u64 *)data; 867 u64 *guid = data;
895 868
896 ne = container_of(dev, struct node_entry, node_dev); 869 ne = container_of(dev, struct node_entry, node_dev);
897 return ne->guid == *guid; 870 return ne->guid == *guid;
@@ -902,8 +875,7 @@ static struct node_entry *find_entry_by_guid(u64 guid)
902 struct device *dev; 875 struct device *dev;
903 struct node_entry *ne; 876 struct node_entry *ne;
904 877
905 dev = class_find_device(&nodemgr_ne_class, NULL, &guid, 878 dev = class_find_device(&nodemgr_ne_class, NULL, &guid, match_ne_guid);
906 __match_ne_guid);
907 if (!dev) 879 if (!dev)
908 return NULL; 880 return NULL;
909 ne = container_of(dev, struct node_entry, node_dev); 881 ne = container_of(dev, struct node_entry, node_dev);
@@ -912,21 +884,21 @@ static struct node_entry *find_entry_by_guid(u64 guid)
912 return ne; 884 return ne;
913} 885}
914 886
915struct match_nodeid_param { 887struct match_nodeid_parameter {
916 struct hpsb_host *host; 888 struct hpsb_host *host;
917 nodeid_t nodeid; 889 nodeid_t nodeid;
918}; 890};
919 891
920static int __match_ne_nodeid(struct device *dev, void *data) 892static int match_ne_nodeid(struct device *dev, void *data)
921{ 893{
922 int found = 0; 894 int found = 0;
923 struct node_entry *ne; 895 struct node_entry *ne;
924 struct match_nodeid_param *param = (struct match_nodeid_param *)data; 896 struct match_nodeid_parameter *p = data;
925 897
926 if (!dev) 898 if (!dev)
927 goto ret; 899 goto ret;
928 ne = container_of(dev, struct node_entry, node_dev); 900 ne = container_of(dev, struct node_entry, node_dev);
929 if (ne->host == param->host && ne->nodeid == param->nodeid) 901 if (ne->host == p->host && ne->nodeid == p->nodeid)
930 found = 1; 902 found = 1;
931ret: 903ret:
932 return found; 904 return found;
@@ -937,13 +909,12 @@ static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host,
937{ 909{
938 struct device *dev; 910 struct device *dev;
939 struct node_entry *ne; 911 struct node_entry *ne;
940 struct match_nodeid_param param; 912 struct match_nodeid_parameter p;
941 913
942 param.host = host; 914 p.host = host;
943 param.nodeid = nodeid; 915 p.nodeid = nodeid;
944 916
945 dev = class_find_device(&nodemgr_ne_class, NULL, &param, 917 dev = class_find_device(&nodemgr_ne_class, NULL, &p, match_ne_nodeid);
946 __match_ne_nodeid);
947 if (!dev) 918 if (!dev)
948 return NULL; 919 return NULL;
949 ne = container_of(dev, struct node_entry, node_dev); 920 ne = container_of(dev, struct node_entry, node_dev);
@@ -990,7 +961,7 @@ fail_devreg:
990 * immediate unit directories looking for software_id and 961 * immediate unit directories looking for software_id and
991 * software_version entries, in order to get driver autoloading working. */ 962 * software_version entries, in order to get driver autoloading working. */
992static struct unit_directory *nodemgr_process_unit_directory 963static struct unit_directory *nodemgr_process_unit_directory
993 (struct host_info *hi, struct node_entry *ne, struct csr1212_keyval *ud_kv, 964 (struct node_entry *ne, struct csr1212_keyval *ud_kv,
994 unsigned int *id, struct unit_directory *parent) 965 unsigned int *id, struct unit_directory *parent)
995{ 966{
996 struct unit_directory *ud; 967 struct unit_directory *ud;
@@ -1083,7 +1054,7 @@ static struct unit_directory *nodemgr_process_unit_directory
1083 nodemgr_register_device(ne, ud, &ne->device); 1054 nodemgr_register_device(ne, ud, &ne->device);
1084 1055
1085 /* process the child unit */ 1056 /* process the child unit */
1086 ud_child = nodemgr_process_unit_directory(hi, ne, kv, id, ud); 1057 ud_child = nodemgr_process_unit_directory(ne, kv, id, ud);
1087 1058
1088 if (ud_child == NULL) 1059 if (ud_child == NULL)
1089 break; 1060 break;
@@ -1137,7 +1108,7 @@ unit_directory_error:
1137} 1108}
1138 1109
1139 1110
1140static void nodemgr_process_root_directory(struct host_info *hi, struct node_entry *ne) 1111static void nodemgr_process_root_directory(struct node_entry *ne)
1141{ 1112{
1142 unsigned int ud_id = 0; 1113 unsigned int ud_id = 0;
1143 struct csr1212_dentry *dentry; 1114 struct csr1212_dentry *dentry;
@@ -1157,7 +1128,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1157 break; 1128 break;
1158 1129
1159 case CSR1212_KV_ID_UNIT: 1130 case CSR1212_KV_ID_UNIT:
1160 nodemgr_process_unit_directory(hi, ne, kv, &ud_id, NULL); 1131 nodemgr_process_unit_directory(ne, kv, &ud_id, NULL);
1161 break; 1132 break;
1162 1133
1163 case CSR1212_KV_ID_DESCRIPTOR: 1134 case CSR1212_KV_ID_DESCRIPTOR:
@@ -1273,8 +1244,7 @@ void hpsb_unregister_protocol(struct hpsb_protocol_driver *driver)
1273 * the to take whatever actions required. 1244 * the to take whatever actions required.
1274 */ 1245 */
1275static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr, 1246static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
1276 struct host_info *hi, nodeid_t nodeid, 1247 nodeid_t nodeid, unsigned int generation)
1277 unsigned int generation)
1278{ 1248{
1279 if (ne->nodeid != nodeid) { 1249 if (ne->nodeid != nodeid) {
1280 HPSB_DEBUG("Node changed: " NODE_BUS_FMT " -> " NODE_BUS_FMT, 1250 HPSB_DEBUG("Node changed: " NODE_BUS_FMT " -> " NODE_BUS_FMT,
@@ -1305,19 +1275,23 @@ static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
1305 csr1212_destroy_csr(csr); 1275 csr1212_destroy_csr(csr);
1306 } 1276 }
1307 1277
1308 if (ne->in_limbo)
1309 nodemgr_resume_ne(ne);
1310
1311 /* Mark the node current */ 1278 /* Mark the node current */
1312 ne->generation = generation; 1279 ne->generation = generation;
1313}
1314 1280
1281 if (ne->in_limbo) {
1282 device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
1283 ne->in_limbo = false;
1315 1284
1285 HPSB_DEBUG("Node reactivated: "
1286 "ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1287 NODE_BUS_ARGS(ne->host, ne->nodeid),
1288 (unsigned long long)ne->guid);
1289 }
1290}
1316 1291
1317static void nodemgr_node_scan_one(struct host_info *hi, 1292static void nodemgr_node_scan_one(struct hpsb_host *host,
1318 nodeid_t nodeid, int generation) 1293 nodeid_t nodeid, int generation)
1319{ 1294{
1320 struct hpsb_host *host = hi->host;
1321 struct node_entry *ne; 1295 struct node_entry *ne;
1322 octlet_t guid; 1296 octlet_t guid;
1323 struct csr1212_csr *csr; 1297 struct csr1212_csr *csr;
@@ -1373,16 +1347,15 @@ static void nodemgr_node_scan_one(struct host_info *hi,
1373 } 1347 }
1374 1348
1375 if (!ne) 1349 if (!ne)
1376 nodemgr_create_node(guid, csr, hi, nodeid, generation); 1350 nodemgr_create_node(guid, csr, host, nodeid, generation);
1377 else 1351 else
1378 nodemgr_update_node(ne, csr, hi, nodeid, generation); 1352 nodemgr_update_node(ne, csr, nodeid, generation);
1379} 1353}
1380 1354
1381 1355
1382static void nodemgr_node_scan(struct host_info *hi, int generation) 1356static void nodemgr_node_scan(struct hpsb_host *host, int generation)
1383{ 1357{
1384 int count; 1358 int count;
1385 struct hpsb_host *host = hi->host;
1386 struct selfid *sid = (struct selfid *)host->topology_map; 1359 struct selfid *sid = (struct selfid *)host->topology_map;
1387 nodeid_t nodeid = LOCAL_BUS; 1360 nodeid_t nodeid = LOCAL_BUS;
1388 1361
@@ -1395,89 +1368,26 @@ static void nodemgr_node_scan(struct host_info *hi, int generation)
1395 nodeid++; 1368 nodeid++;
1396 continue; 1369 continue;
1397 } 1370 }
1398 nodemgr_node_scan_one(hi, nodeid++, generation); 1371 nodemgr_node_scan_one(host, nodeid++, generation);
1399 }
1400}
1401
1402static int __nodemgr_driver_suspend(struct device *dev, void *data)
1403{
1404 struct unit_directory *ud;
1405 struct device_driver *drv;
1406 struct node_entry *ne = (struct node_entry *)data;
1407 int error;
1408
1409 ud = container_of(dev, struct unit_directory, unit_dev);
1410 if (ud->ne == ne) {
1411 drv = get_driver(ud->device.driver);
1412 if (drv) {
1413 error = 1; /* release if suspend is not implemented */
1414 if (drv->suspend) {
1415 down(&ud->device.sem);
1416 error = drv->suspend(&ud->device, PMSG_SUSPEND);
1417 up(&ud->device.sem);
1418 }
1419 if (error)
1420 device_release_driver(&ud->device);
1421 put_driver(drv);
1422 }
1423 }
1424
1425 return 0;
1426}
1427
1428static int __nodemgr_driver_resume(struct device *dev, void *data)
1429{
1430 struct unit_directory *ud;
1431 struct device_driver *drv;
1432 struct node_entry *ne = (struct node_entry *)data;
1433
1434 ud = container_of(dev, struct unit_directory, unit_dev);
1435 if (ud->ne == ne) {
1436 drv = get_driver(ud->device.driver);
1437 if (drv) {
1438 if (drv->resume) {
1439 down(&ud->device.sem);
1440 drv->resume(&ud->device);
1441 up(&ud->device.sem);
1442 }
1443 put_driver(drv);
1444 }
1445 } 1372 }
1446
1447 return 0;
1448} 1373}
1449 1374
1450static void nodemgr_suspend_ne(struct node_entry *ne) 1375static void nodemgr_pause_ne(struct node_entry *ne)
1451{ 1376{
1452 HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", 1377 HPSB_DEBUG("Node paused: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1453 NODE_BUS_ARGS(ne->host, ne->nodeid), 1378 NODE_BUS_ARGS(ne->host, ne->nodeid),
1454 (unsigned long long)ne->guid); 1379 (unsigned long long)ne->guid);
1455 1380
1456 ne->in_limbo = 1; 1381 ne->in_limbo = true;
1457 WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo)); 1382 WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo));
1458
1459 class_for_each_device(&nodemgr_ud_class, NULL, ne,
1460 __nodemgr_driver_suspend);
1461}
1462
1463
1464static void nodemgr_resume_ne(struct node_entry *ne)
1465{
1466 ne->in_limbo = 0;
1467 device_remove_file(&ne->device, &dev_attr_ne_in_limbo);
1468
1469 class_for_each_device(&nodemgr_ud_class, NULL, ne,
1470 __nodemgr_driver_resume);
1471 HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]",
1472 NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid);
1473} 1383}
1474 1384
1475static int __nodemgr_update_pdrv(struct device *dev, void *data) 1385static int update_pdrv(struct device *dev, void *data)
1476{ 1386{
1477 struct unit_directory *ud; 1387 struct unit_directory *ud;
1478 struct device_driver *drv; 1388 struct device_driver *drv;
1479 struct hpsb_protocol_driver *pdrv; 1389 struct hpsb_protocol_driver *pdrv;
1480 struct node_entry *ne = (struct node_entry *)data; 1390 struct node_entry *ne = data;
1481 int error; 1391 int error;
1482 1392
1483 ud = container_of(dev, struct unit_directory, unit_dev); 1393 ud = container_of(dev, struct unit_directory, unit_dev);
@@ -1503,11 +1413,9 @@ static int __nodemgr_update_pdrv(struct device *dev, void *data)
1503 1413
1504static void nodemgr_update_pdrv(struct node_entry *ne) 1414static void nodemgr_update_pdrv(struct node_entry *ne)
1505{ 1415{
1506 class_for_each_device(&nodemgr_ud_class, NULL, ne, 1416 class_for_each_device(&nodemgr_ud_class, NULL, ne, update_pdrv);
1507 __nodemgr_update_pdrv);
1508} 1417}
1509 1418
1510
1511/* Write the BROADCAST_CHANNEL as per IEEE1394a 8.3.2.3.11 and 8.4.2.3. This 1419/* Write the BROADCAST_CHANNEL as per IEEE1394a 8.3.2.3.11 and 8.4.2.3. This
1512 * seems like an optional service but in the end it is practically mandatory 1420 * seems like an optional service but in the end it is practically mandatory
1513 * as a consequence of these clauses. 1421 * as a consequence of these clauses.
@@ -1535,11 +1443,12 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation)
1535} 1443}
1536 1444
1537 1445
1538static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation) 1446static void nodemgr_probe_ne(struct hpsb_host *host, struct node_entry *ne,
1447 int generation)
1539{ 1448{
1540 struct device *dev; 1449 struct device *dev;
1541 1450
1542 if (ne->host != hi->host || ne->in_limbo) 1451 if (ne->host != host || ne->in_limbo)
1543 return; 1452 return;
1544 1453
1545 dev = get_device(&ne->device); 1454 dev = get_device(&ne->device);
@@ -1554,40 +1463,40 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge
1554 * down to the drivers. Otherwise, this is a dead node and we 1463 * down to the drivers. Otherwise, this is a dead node and we
1555 * suspend it. */ 1464 * suspend it. */
1556 if (ne->needs_probe) 1465 if (ne->needs_probe)
1557 nodemgr_process_root_directory(hi, ne); 1466 nodemgr_process_root_directory(ne);
1558 else if (ne->generation == generation) 1467 else if (ne->generation == generation)
1559 nodemgr_update_pdrv(ne); 1468 nodemgr_update_pdrv(ne);
1560 else 1469 else
1561 nodemgr_suspend_ne(ne); 1470 nodemgr_pause_ne(ne);
1562 1471
1563 put_device(dev); 1472 put_device(dev);
1564} 1473}
1565 1474
1566struct probe_param { 1475struct node_probe_parameter {
1567 struct host_info *hi; 1476 struct hpsb_host *host;
1568 int generation; 1477 int generation;
1569 bool probe_now; 1478 bool probe_now;
1570}; 1479};
1571 1480
1572static int node_probe(struct device *dev, void *data) 1481static int node_probe(struct device *dev, void *data)
1573{ 1482{
1574 struct probe_param *p = data; 1483 struct node_probe_parameter *p = data;
1575 struct node_entry *ne; 1484 struct node_entry *ne;
1576 1485
1577 if (p->generation != get_hpsb_generation(p->hi->host)) 1486 if (p->generation != get_hpsb_generation(p->host))
1578 return -EAGAIN; 1487 return -EAGAIN;
1579 1488
1580 ne = container_of(dev, struct node_entry, node_dev); 1489 ne = container_of(dev, struct node_entry, node_dev);
1581 if (ne->needs_probe == p->probe_now) 1490 if (ne->needs_probe == p->probe_now)
1582 nodemgr_probe_ne(p->hi, ne, p->generation); 1491 nodemgr_probe_ne(p->host, ne, p->generation);
1583 return 0; 1492 return 0;
1584} 1493}
1585 1494
1586static void nodemgr_node_probe(struct host_info *hi, int generation) 1495static int nodemgr_node_probe(struct hpsb_host *host, int generation)
1587{ 1496{
1588 struct probe_param p; 1497 struct node_probe_parameter p;
1589 1498
1590 p.hi = hi; 1499 p.host = host;
1591 p.generation = generation; 1500 p.generation = generation;
1592 /* 1501 /*
1593 * Do some processing of the nodes we've probed. This pulls them 1502 * Do some processing of the nodes we've probed. This pulls them
@@ -1604,11 +1513,11 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
1604 */ 1513 */
1605 p.probe_now = false; 1514 p.probe_now = false;
1606 if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0) 1515 if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
1607 return; 1516 return 0;
1608 1517
1609 p.probe_now = true; 1518 p.probe_now = true;
1610 if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0) 1519 if (class_for_each_device(&nodemgr_ne_class, NULL, &p, node_probe) != 0)
1611 return; 1520 return 0;
1612 /* 1521 /*
1613 * Now let's tell the bus to rescan our devices. This may seem 1522 * Now let's tell the bus to rescan our devices. This may seem
1614 * like overhead, but the driver-model core will only scan a 1523 * like overhead, but the driver-model core will only scan a
@@ -1620,6 +1529,27 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
1620 */ 1529 */
1621 if (bus_rescan_devices(&ieee1394_bus_type) != 0) 1530 if (bus_rescan_devices(&ieee1394_bus_type) != 0)
1622 HPSB_DEBUG("bus_rescan_devices had an error"); 1531 HPSB_DEBUG("bus_rescan_devices had an error");
1532
1533 return 1;
1534}
1535
1536static int remove_nodes_in_limbo(struct device *dev, void *data)
1537{
1538 struct node_entry *ne;
1539
1540 if (dev->bus != &ieee1394_bus_type)
1541 return 0;
1542
1543 ne = container_of(dev, struct node_entry, device);
1544 if (ne->in_limbo)
1545 nodemgr_remove_ne(ne);
1546
1547 return 0;
1548}
1549
1550static void nodemgr_remove_nodes_in_limbo(struct hpsb_host *host)
1551{
1552 device_for_each_child(&host->device, NULL, remove_nodes_in_limbo);
1623} 1553}
1624 1554
1625static int nodemgr_send_resume_packet(struct hpsb_host *host) 1555static int nodemgr_send_resume_packet(struct hpsb_host *host)
@@ -1730,10 +1660,9 @@ static int nodemgr_check_irm_capability(struct hpsb_host *host, int cycles)
1730 return 1; 1660 return 1;
1731} 1661}
1732 1662
1733static int nodemgr_host_thread(void *__hi) 1663static int nodemgr_host_thread(void *data)
1734{ 1664{
1735 struct host_info *hi = (struct host_info *)__hi; 1665 struct hpsb_host *host = data;
1736 struct hpsb_host *host = hi->host;
1737 unsigned int g, generation = 0; 1666 unsigned int g, generation = 0;
1738 int i, reset_cycles = 0; 1667 int i, reset_cycles = 0;
1739 1668
@@ -1787,36 +1716,48 @@ static int nodemgr_host_thread(void *__hi)
1787 * entries. This does not do the sysfs stuff, since that 1716 * entries. This does not do the sysfs stuff, since that
1788 * would trigger uevents and such, which is a bad idea at 1717 * would trigger uevents and such, which is a bad idea at
1789 * this point. */ 1718 * this point. */
1790 nodemgr_node_scan(hi, generation); 1719 nodemgr_node_scan(host, generation);
1791 1720
1792 /* This actually does the full probe, with sysfs 1721 /* This actually does the full probe, with sysfs
1793 * registration. */ 1722 * registration. */
1794 nodemgr_node_probe(hi, generation); 1723 if (!nodemgr_node_probe(host, generation))
1724 continue;
1795 1725
1796 /* Update some of our sysfs symlinks */ 1726 /* Update some of our sysfs symlinks */
1797 nodemgr_update_host_dev_links(host); 1727 nodemgr_update_host_dev_links(host);
1728
1729 /* Sleep 3 seconds */
1730 for (i = 3000/200; i; i--) {
1731 msleep_interruptible(200);
1732 if (kthread_should_stop())
1733 goto exit;
1734
1735 if (generation != get_hpsb_generation(host))
1736 break;
1737 }
1738 /* Remove nodes which are gone, unless a bus reset happened */
1739 if (!i)
1740 nodemgr_remove_nodes_in_limbo(host);
1798 } 1741 }
1799exit: 1742exit:
1800 HPSB_VERBOSE("NodeMgr: Exiting thread"); 1743 HPSB_VERBOSE("NodeMgr: Exiting thread");
1801 return 0; 1744 return 0;
1802} 1745}
1803 1746
1804struct host_iter_param { 1747struct per_host_parameter {
1805 void *data; 1748 void *data;
1806 int (*cb)(struct hpsb_host *, void *); 1749 int (*cb)(struct hpsb_host *, void *);
1807}; 1750};
1808 1751
1809static int __nodemgr_for_each_host(struct device *dev, void *data) 1752static int per_host(struct device *dev, void *data)
1810{ 1753{
1811 struct hpsb_host *host; 1754 struct hpsb_host *host;
1812 struct host_iter_param *hip = (struct host_iter_param *)data; 1755 struct per_host_parameter *p = data;
1813 int error = 0;
1814 1756
1815 host = container_of(dev, struct hpsb_host, host_dev); 1757 host = container_of(dev, struct hpsb_host, host_dev);
1816 error = hip->cb(host, hip->data); 1758 return p->cb(host, p->data);
1817
1818 return error;
1819} 1759}
1760
1820/** 1761/**
1821 * nodemgr_for_each_host - call a function for each IEEE 1394 host 1762 * nodemgr_for_each_host - call a function for each IEEE 1394 host
1822 * @data: an address to supply to the callback 1763 * @data: an address to supply to the callback
@@ -1831,15 +1772,11 @@ static int __nodemgr_for_each_host(struct device *dev, void *data)
1831 */ 1772 */
1832int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *)) 1773int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *))
1833{ 1774{
1834 struct host_iter_param hip; 1775 struct per_host_parameter p;
1835 int error;
1836
1837 hip.cb = cb;
1838 hip.data = data;
1839 error = class_for_each_device(&hpsb_host_class, NULL, &hip,
1840 __nodemgr_for_each_host);
1841 1776
1842 return error; 1777 p.cb = cb;
1778 p.data = data;
1779 return class_for_each_device(&hpsb_host_class, NULL, &p, per_host);
1843} 1780}
1844 1781
1845/* The following two convenience functions use a struct node_entry 1782/* The following two convenience functions use a struct node_entry
@@ -1893,7 +1830,7 @@ static void nodemgr_add_host(struct hpsb_host *host)
1893 return; 1830 return;
1894 } 1831 }
1895 hi->host = host; 1832 hi->host = host;
1896 hi->thread = kthread_run(nodemgr_host_thread, hi, "knodemgrd_%d", 1833 hi->thread = kthread_run(nodemgr_host_thread, host, "knodemgrd_%d",
1897 host->id); 1834 host->id);
1898 if (IS_ERR(hi->thread)) { 1835 if (IS_ERR(hi->thread)) {
1899 HPSB_ERR("NodeMgr: cannot start thread for host %d", host->id); 1836 HPSB_ERR("NodeMgr: cannot start thread for host %d", host->id);
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 6eb26465a84c..4f287a3561ba 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -110,7 +110,7 @@ struct node_entry {
110 struct device node_dev; 110 struct device node_dev;
111 111
112 /* Means this node is not attached anymore */ 112 /* Means this node is not attached anymore */
113 int in_limbo; 113 bool in_limbo;
114 114
115 struct csr1212_csr *csr; 115 struct csr1212_csr *csr;
116}; 116};
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h
index a06aaad5b448..7a225a405987 100644
--- a/drivers/ieee1394/raw1394-private.h
+++ b/drivers/ieee1394/raw1394-private.h
@@ -22,6 +22,7 @@ enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
22struct file_info { 22struct file_info {
23 struct list_head list; 23 struct list_head list;
24 24
25 struct mutex state_mutex;
25 enum { opened, initialized, connected } state; 26 enum { opened, initialized, connected } state;
26 unsigned int protocol_version; 27 unsigned int protocol_version;
27 28
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index c7833bb37ae1..9f19ac492106 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -34,6 +34,7 @@
34#include <linux/fs.h> 34#include <linux/fs.h>
35#include <linux/poll.h> 35#include <linux/poll.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/mutex.h>
37#include <linux/init.h> 38#include <linux/init.h>
38#include <linux/interrupt.h> 39#include <linux/interrupt.h>
39#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
@@ -2267,6 +2268,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2267 return -EFAULT; 2268 return -EFAULT;
2268 } 2269 }
2269 2270
2271 mutex_lock(&fi->state_mutex);
2272
2270 switch (fi->state) { 2273 switch (fi->state) {
2271 case opened: 2274 case opened:
2272 retval = state_opened(fi, req); 2275 retval = state_opened(fi, req);
@@ -2281,6 +2284,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2281 break; 2284 break;
2282 } 2285 }
2283 2286
2287 mutex_unlock(&fi->state_mutex);
2288
2284 if (retval < 0) { 2289 if (retval < 0) {
2285 free_pending_request(req); 2290 free_pending_request(req);
2286 } else { 2291 } else {
@@ -2541,109 +2546,120 @@ static int raw1394_read_cycle_timer(struct file_info *fi, void __user * uaddr)
2541static int raw1394_mmap(struct file *file, struct vm_area_struct *vma) 2546static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2542{ 2547{
2543 struct file_info *fi = file->private_data; 2548 struct file_info *fi = file->private_data;
2549 int ret;
2550
2551 mutex_lock(&fi->state_mutex);
2544 2552
2545 if (fi->iso_state == RAW1394_ISO_INACTIVE) 2553 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2546 return -EINVAL; 2554 ret = -EINVAL;
2555 else
2556 ret = dma_region_mmap(&fi->iso_handle->data_buf, file, vma);
2557
2558 mutex_unlock(&fi->state_mutex);
2547 2559
2548 return dma_region_mmap(&fi->iso_handle->data_buf, file, vma); 2560 return ret;
2549} 2561}
2550 2562
2551/* ioctl is only used for rawiso operations */ 2563static long raw1394_ioctl_inactive(struct file_info *fi, unsigned int cmd,
2552static long do_raw1394_ioctl(struct file *file, unsigned int cmd, 2564 void __user *argp)
2553 unsigned long arg) 2565{
2566 switch (cmd) {
2567 case RAW1394_IOC_ISO_XMIT_INIT:
2568 return raw1394_iso_xmit_init(fi, argp);
2569 case RAW1394_IOC_ISO_RECV_INIT:
2570 return raw1394_iso_recv_init(fi, argp);
2571 default:
2572 return -EINVAL;
2573 }
2574}
2575
2576static long raw1394_ioctl_recv(struct file_info *fi, unsigned int cmd,
2577 unsigned long arg)
2554{ 2578{
2555 struct file_info *fi = file->private_data;
2556 void __user *argp = (void __user *)arg; 2579 void __user *argp = (void __user *)arg;
2557 2580
2558 switch (fi->iso_state) { 2581 switch (cmd) {
2559 case RAW1394_ISO_INACTIVE: 2582 case RAW1394_IOC_ISO_RECV_START:{
2560 switch (cmd) { 2583 int args[3];
2561 case RAW1394_IOC_ISO_XMIT_INIT: 2584
2562 return raw1394_iso_xmit_init(fi, argp); 2585 if (copy_from_user(&args[0], argp, sizeof(args)))
2563 case RAW1394_IOC_ISO_RECV_INIT: 2586 return -EFAULT;
2564 return raw1394_iso_recv_init(fi, argp); 2587 return hpsb_iso_recv_start(fi->iso_handle,
2565 default: 2588 args[0], args[1], args[2]);
2566 break;
2567 } 2589 }
2568 break; 2590 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2569 case RAW1394_ISO_RECV: 2591 hpsb_iso_stop(fi->iso_handle);
2570 switch (cmd) { 2592 return 0;
2571 case RAW1394_IOC_ISO_RECV_START:{ 2593 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2572 /* copy args from user-space */ 2594 return hpsb_iso_recv_listen_channel(fi->iso_handle, arg);
2573 int args[3]; 2595 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2574 if (copy_from_user 2596 return hpsb_iso_recv_unlisten_channel(fi->iso_handle, arg);
2575 (&args[0], argp, sizeof(args))) 2597 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
2576 return -EFAULT; 2598 u64 mask;
2577 return hpsb_iso_recv_start(fi->iso_handle, 2599
2578 args[0], args[1], 2600 if (copy_from_user(&mask, argp, sizeof(mask)))
2579 args[2]); 2601 return -EFAULT;
2580 } 2602 return hpsb_iso_recv_set_channel_mask(fi->iso_handle,
2581 case RAW1394_IOC_ISO_XMIT_RECV_STOP: 2603 mask);
2582 hpsb_iso_stop(fi->iso_handle);
2583 return 0;
2584 case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL:
2585 return hpsb_iso_recv_listen_channel(fi->iso_handle,
2586 arg);
2587 case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL:
2588 return hpsb_iso_recv_unlisten_channel(fi->iso_handle,
2589 arg);
2590 case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK:{
2591 /* copy the u64 from user-space */
2592 u64 mask;
2593 if (copy_from_user(&mask, argp, sizeof(mask)))
2594 return -EFAULT;
2595 return hpsb_iso_recv_set_channel_mask(fi->
2596 iso_handle,
2597 mask);
2598 }
2599 case RAW1394_IOC_ISO_GET_STATUS:
2600 return raw1394_iso_get_status(fi, argp);
2601 case RAW1394_IOC_ISO_RECV_PACKETS:
2602 return raw1394_iso_recv_packets(fi, argp);
2603 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2604 return hpsb_iso_recv_release_packets(fi->iso_handle,
2605 arg);
2606 case RAW1394_IOC_ISO_RECV_FLUSH:
2607 return hpsb_iso_recv_flush(fi->iso_handle);
2608 case RAW1394_IOC_ISO_SHUTDOWN:
2609 raw1394_iso_shutdown(fi);
2610 return 0;
2611 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2612 queue_rawiso_event(fi);
2613 return 0;
2614 } 2604 }
2615 break; 2605 case RAW1394_IOC_ISO_GET_STATUS:
2616 case RAW1394_ISO_XMIT: 2606 return raw1394_iso_get_status(fi, argp);
2617 switch (cmd) { 2607 case RAW1394_IOC_ISO_RECV_PACKETS:
2618 case RAW1394_IOC_ISO_XMIT_START:{ 2608 return raw1394_iso_recv_packets(fi, argp);
2619 /* copy two ints from user-space */ 2609 case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS:
2620 int args[2]; 2610 return hpsb_iso_recv_release_packets(fi->iso_handle, arg);
2621 if (copy_from_user 2611 case RAW1394_IOC_ISO_RECV_FLUSH:
2622 (&args[0], argp, sizeof(args))) 2612 return hpsb_iso_recv_flush(fi->iso_handle);
2623 return -EFAULT; 2613 case RAW1394_IOC_ISO_SHUTDOWN:
2624 return hpsb_iso_xmit_start(fi->iso_handle, 2614 raw1394_iso_shutdown(fi);
2625 args[0], args[1]); 2615 return 0;
2626 } 2616 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2627 case RAW1394_IOC_ISO_XMIT_SYNC: 2617 queue_rawiso_event(fi);
2628 return hpsb_iso_xmit_sync(fi->iso_handle); 2618 return 0;
2629 case RAW1394_IOC_ISO_XMIT_RECV_STOP: 2619 default:
2630 hpsb_iso_stop(fi->iso_handle); 2620 return -EINVAL;
2631 return 0; 2621 }
2632 case RAW1394_IOC_ISO_GET_STATUS: 2622}
2633 return raw1394_iso_get_status(fi, argp); 2623
2634 case RAW1394_IOC_ISO_XMIT_PACKETS: 2624static long raw1394_ioctl_xmit(struct file_info *fi, unsigned int cmd,
2635 return raw1394_iso_send_packets(fi, argp); 2625 void __user *argp)
2636 case RAW1394_IOC_ISO_SHUTDOWN: 2626{
2637 raw1394_iso_shutdown(fi); 2627 switch (cmd) {
2638 return 0; 2628 case RAW1394_IOC_ISO_XMIT_START:{
2639 case RAW1394_IOC_ISO_QUEUE_ACTIVITY: 2629 int args[2];
2640 queue_rawiso_event(fi); 2630
2641 return 0; 2631 if (copy_from_user(&args[0], argp, sizeof(args)))
2632 return -EFAULT;
2633 return hpsb_iso_xmit_start(fi->iso_handle,
2634 args[0], args[1]);
2642 } 2635 }
2643 break; 2636 case RAW1394_IOC_ISO_XMIT_SYNC:
2637 return hpsb_iso_xmit_sync(fi->iso_handle);
2638 case RAW1394_IOC_ISO_XMIT_RECV_STOP:
2639 hpsb_iso_stop(fi->iso_handle);
2640 return 0;
2641 case RAW1394_IOC_ISO_GET_STATUS:
2642 return raw1394_iso_get_status(fi, argp);
2643 case RAW1394_IOC_ISO_XMIT_PACKETS:
2644 return raw1394_iso_send_packets(fi, argp);
2645 case RAW1394_IOC_ISO_SHUTDOWN:
2646 raw1394_iso_shutdown(fi);
2647 return 0;
2648 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2649 queue_rawiso_event(fi);
2650 return 0;
2644 default: 2651 default:
2645 break; 2652 return -EINVAL;
2646 } 2653 }
2654}
2655
2656/* ioctl is only used for rawiso operations */
2657static long raw1394_ioctl(struct file *file, unsigned int cmd,
2658 unsigned long arg)
2659{
2660 struct file_info *fi = file->private_data;
2661 void __user *argp = (void __user *)arg;
2662 long ret;
2647 2663
2648 /* state-independent commands */ 2664 /* state-independent commands */
2649 switch(cmd) { 2665 switch(cmd) {
@@ -2653,16 +2669,25 @@ static long do_raw1394_ioctl(struct file *file, unsigned int cmd,
2653 break; 2669 break;
2654 } 2670 }
2655 2671
2656 return -EINVAL; 2672 mutex_lock(&fi->state_mutex);
2657} 2673
2674 switch (fi->iso_state) {
2675 case RAW1394_ISO_INACTIVE:
2676 ret = raw1394_ioctl_inactive(fi, cmd, argp);
2677 break;
2678 case RAW1394_ISO_RECV:
2679 ret = raw1394_ioctl_recv(fi, cmd, arg);
2680 break;
2681 case RAW1394_ISO_XMIT:
2682 ret = raw1394_ioctl_xmit(fi, cmd, argp);
2683 break;
2684 default:
2685 ret = -EINVAL;
2686 break;
2687 }
2688
2689 mutex_unlock(&fi->state_mutex);
2658 2690
2659static long raw1394_ioctl(struct file *file, unsigned int cmd,
2660 unsigned long arg)
2661{
2662 long ret;
2663 lock_kernel();
2664 ret = do_raw1394_ioctl(file, cmd, arg);
2665 unlock_kernel();
2666 return ret; 2691 return ret;
2667} 2692}
2668 2693
@@ -2700,7 +2725,7 @@ static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd,
2700 !copy_from_user(&infos32, &arg->infos, sizeof infos32)) { 2725 !copy_from_user(&infos32, &arg->infos, sizeof infos32)) {
2701 infos = compat_ptr(infos32); 2726 infos = compat_ptr(infos32);
2702 if (!copy_to_user(&dst->infos, &infos, sizeof infos)) 2727 if (!copy_to_user(&dst->infos, &infos, sizeof infos))
2703 err = do_raw1394_ioctl(file, cmd, (unsigned long)dst); 2728 err = raw1394_ioctl(file, cmd, (unsigned long)dst);
2704 } 2729 }
2705 return err; 2730 return err;
2706} 2731}
@@ -2724,7 +2749,6 @@ static long raw1394_compat_ioctl(struct file *file,
2724 void __user *argp = (void __user *)arg; 2749 void __user *argp = (void __user *)arg;
2725 long err; 2750 long err;
2726 2751
2727 lock_kernel();
2728 switch (cmd) { 2752 switch (cmd) {
2729 /* These requests have same format as long as 'int' has same size. */ 2753 /* These requests have same format as long as 'int' has same size. */
2730 case RAW1394_IOC_ISO_RECV_INIT: 2754 case RAW1394_IOC_ISO_RECV_INIT:
@@ -2741,7 +2765,7 @@ static long raw1394_compat_ioctl(struct file *file,
2741 case RAW1394_IOC_ISO_GET_STATUS: 2765 case RAW1394_IOC_ISO_GET_STATUS:
2742 case RAW1394_IOC_ISO_SHUTDOWN: 2766 case RAW1394_IOC_ISO_SHUTDOWN:
2743 case RAW1394_IOC_ISO_QUEUE_ACTIVITY: 2767 case RAW1394_IOC_ISO_QUEUE_ACTIVITY:
2744 err = do_raw1394_ioctl(file, cmd, arg); 2768 err = raw1394_ioctl(file, cmd, arg);
2745 break; 2769 break;
2746 /* These request have different format. */ 2770 /* These request have different format. */
2747 case RAW1394_IOC_ISO_RECV_PACKETS32: 2771 case RAW1394_IOC_ISO_RECV_PACKETS32:
@@ -2757,7 +2781,6 @@ static long raw1394_compat_ioctl(struct file *file,
2757 err = -EINVAL; 2781 err = -EINVAL;
2758 break; 2782 break;
2759 } 2783 }
2760 unlock_kernel();
2761 2784
2762 return err; 2785 return err;
2763} 2786}
@@ -2791,6 +2814,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
2791 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */ 2814 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2792 2815
2793 INIT_LIST_HEAD(&fi->list); 2816 INIT_LIST_HEAD(&fi->list);
2817 mutex_init(&fi->state_mutex);
2794 fi->state = opened; 2818 fi->state = opened;
2795 INIT_LIST_HEAD(&fi->req_pending); 2819 INIT_LIST_HEAD(&fi->req_pending);
2796 INIT_LIST_HEAD(&fi->req_complete); 2820 INIT_LIST_HEAD(&fi->req_complete);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 1d6ad3435537..c52f6e6e8af2 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -526,26 +526,41 @@ static void sbp2util_write_doorbell(struct work_struct *work)
526 526
527static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu) 527static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
528{ 528{
529 struct sbp2_fwhost_info *hi = lu->hi;
530 struct sbp2_command_info *cmd; 529 struct sbp2_command_info *cmd;
530 struct device *dmadev = lu->hi->host->device.parent;
531 int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS; 531 int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;
532 532
533 for (i = 0; i < orbs; i++) { 533 for (i = 0; i < orbs; i++) {
534 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 534 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
535 if (!cmd) 535 if (!cmd)
536 return -ENOMEM; 536 goto failed_alloc;
537 cmd->command_orb_dma = dma_map_single(hi->host->device.parent, 537
538 &cmd->command_orb, 538 cmd->command_orb_dma =
539 sizeof(struct sbp2_command_orb), 539 dma_map_single(dmadev, &cmd->command_orb,
540 DMA_TO_DEVICE); 540 sizeof(struct sbp2_command_orb),
541 cmd->sge_dma = dma_map_single(hi->host->device.parent, 541 DMA_TO_DEVICE);
542 &cmd->scatter_gather_element, 542 if (dma_mapping_error(dmadev, cmd->command_orb_dma))
543 sizeof(cmd->scatter_gather_element), 543 goto failed_orb;
544 DMA_TO_DEVICE); 544
545 cmd->sge_dma =
546 dma_map_single(dmadev, &cmd->scatter_gather_element,
547 sizeof(cmd->scatter_gather_element),
548 DMA_TO_DEVICE);
549 if (dma_mapping_error(dmadev, cmd->sge_dma))
550 goto failed_sge;
551
545 INIT_LIST_HEAD(&cmd->list); 552 INIT_LIST_HEAD(&cmd->list);
546 list_add_tail(&cmd->list, &lu->cmd_orb_completed); 553 list_add_tail(&cmd->list, &lu->cmd_orb_completed);
547 } 554 }
548 return 0; 555 return 0;
556
557failed_sge:
558 dma_unmap_single(dmadev, cmd->command_orb_dma,
559 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
560failed_orb:
561 kfree(cmd);
562failed_alloc:
563 return -ENOMEM;
549} 564}
550 565
551static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu, 566static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu,
@@ -641,24 +656,11 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
641static void sbp2util_mark_command_completed(struct sbp2_lu *lu, 656static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
642 struct sbp2_command_info *cmd) 657 struct sbp2_command_info *cmd)
643{ 658{
644 struct hpsb_host *host = lu->ud->ne->host; 659 if (scsi_sg_count(cmd->Current_SCpnt))
645 660 dma_unmap_sg(lu->ud->ne->host->device.parent,
646 if (cmd->cmd_dma) { 661 scsi_sglist(cmd->Current_SCpnt),
647 if (cmd->dma_type == CMD_DMA_SINGLE) 662 scsi_sg_count(cmd->Current_SCpnt),
648 dma_unmap_single(host->device.parent, cmd->cmd_dma, 663 cmd->Current_SCpnt->sc_data_direction);
649 cmd->dma_size, cmd->dma_dir);
650 else if (cmd->dma_type == CMD_DMA_PAGE)
651 dma_unmap_page(host->device.parent, cmd->cmd_dma,
652 cmd->dma_size, cmd->dma_dir);
653 /* XXX: Check for CMD_DMA_NONE bug */
654 cmd->dma_type = CMD_DMA_NONE;
655 cmd->cmd_dma = 0;
656 }
657 if (cmd->sge_buffer) {
658 dma_unmap_sg(host->device.parent, cmd->sge_buffer,
659 cmd->dma_size, cmd->dma_dir);
660 cmd->sge_buffer = NULL;
661 }
662 list_move_tail(&cmd->list, &lu->cmd_orb_completed); 664 list_move_tail(&cmd->list, &lu->cmd_orb_completed);
663} 665}
664 666
@@ -823,6 +825,10 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
823#endif 825#endif
824 } 826 }
825 827
828 if (dma_get_max_seg_size(hi->host->device.parent) > SBP2_MAX_SEG_SIZE)
829 BUG_ON(dma_set_max_seg_size(hi->host->device.parent,
830 SBP2_MAX_SEG_SIZE));
831
826 /* Prevent unloading of the 1394 host */ 832 /* Prevent unloading of the 1394 host */
827 if (!try_module_get(hi->host->driver->owner)) { 833 if (!try_module_get(hi->host->driver->owner)) {
828 SBP2_ERR("failed to get a reference on 1394 host driver"); 834 SBP2_ERR("failed to get a reference on 1394 host driver");
@@ -1494,84 +1500,65 @@ static int sbp2_agent_reset(struct sbp2_lu *lu, int wait)
1494 return 0; 1500 return 0;
1495} 1501}
1496 1502
1497static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb, 1503static int sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1498 struct sbp2_fwhost_info *hi, 1504 struct sbp2_fwhost_info *hi,
1499 struct sbp2_command_info *cmd, 1505 struct sbp2_command_info *cmd,
1500 unsigned int scsi_use_sg, 1506 unsigned int sg_count,
1501 struct scatterlist *sg, 1507 struct scatterlist *sg,
1502 u32 orb_direction, 1508 u32 orb_direction,
1503 enum dma_data_direction dma_dir) 1509 enum dma_data_direction dma_dir)
1504{ 1510{
1505 cmd->dma_dir = dma_dir; 1511 struct device *dmadev = hi->host->device.parent;
1512 struct sbp2_unrestricted_page_table *pt;
1513 int i, n;
1514
1515 n = dma_map_sg(dmadev, sg, sg_count, dma_dir);
1516 if (n == 0)
1517 return -ENOMEM;
1518
1506 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1519 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1507 orb->misc |= ORB_SET_DIRECTION(orb_direction); 1520 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1508 1521
1509 /* special case if only one element (and less than 64KB in size) */ 1522 /* special case if only one element (and less than 64KB in size) */
1510 if (scsi_use_sg == 1 && sg->length <= SBP2_MAX_SG_ELEMENT_LENGTH) { 1523 if (n == 1) {
1511 1524 orb->misc |= ORB_SET_DATA_SIZE(sg_dma_len(sg));
1512 cmd->dma_size = sg->length; 1525 orb->data_descriptor_lo = sg_dma_address(sg);
1513 cmd->dma_type = CMD_DMA_PAGE;
1514 cmd->cmd_dma = dma_map_page(hi->host->device.parent,
1515 sg_page(sg), sg->offset,
1516 cmd->dma_size, cmd->dma_dir);
1517
1518 orb->data_descriptor_lo = cmd->cmd_dma;
1519 orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);
1520
1521 } else { 1526 } else {
1522 struct sbp2_unrestricted_page_table *sg_element = 1527 pt = &cmd->scatter_gather_element[0];
1523 &cmd->scatter_gather_element[0];
1524 u32 sg_count, sg_len;
1525 dma_addr_t sg_addr;
1526 int i, count = dma_map_sg(hi->host->device.parent, sg,
1527 scsi_use_sg, dma_dir);
1528
1529 cmd->dma_size = scsi_use_sg;
1530 cmd->sge_buffer = sg;
1531
1532 /* use page tables (s/g) */
1533 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1534 orb->data_descriptor_lo = cmd->sge_dma;
1535 1528
1536 /* loop through and fill out our SBP-2 page tables 1529 dma_sync_single_for_cpu(dmadev, cmd->sge_dma,
1537 * (and split up anything too large) */ 1530 sizeof(cmd->scatter_gather_element),
1538 for (i = 0, sg_count = 0; i < count; i++, sg = sg_next(sg)) { 1531 DMA_TO_DEVICE);
1539 sg_len = sg_dma_len(sg); 1532
1540 sg_addr = sg_dma_address(sg); 1533 for_each_sg(sg, sg, n, i) {
1541 while (sg_len) { 1534 pt[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
1542 sg_element[sg_count].segment_base_lo = sg_addr; 1535 pt[i].low = cpu_to_be32(sg_dma_address(sg));
1543 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1544 sg_element[sg_count].length_segment_base_hi =
1545 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1546 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1547 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1548 } else {
1549 sg_element[sg_count].length_segment_base_hi =
1550 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1551 sg_len = 0;
1552 }
1553 sg_count++;
1554 }
1555 } 1536 }
1556 1537
1557 orb->misc |= ORB_SET_DATA_SIZE(sg_count); 1538 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1) |
1539 ORB_SET_DATA_SIZE(n);
1540 orb->data_descriptor_lo = cmd->sge_dma;
1558 1541
1559 sbp2util_cpu_to_be32_buffer(sg_element, 1542 dma_sync_single_for_device(dmadev, cmd->sge_dma,
1560 (sizeof(struct sbp2_unrestricted_page_table)) * 1543 sizeof(cmd->scatter_gather_element),
1561 sg_count); 1544 DMA_TO_DEVICE);
1562 } 1545 }
1546 return 0;
1563} 1547}
1564 1548
1565static void sbp2_create_command_orb(struct sbp2_lu *lu, 1549static int sbp2_create_command_orb(struct sbp2_lu *lu,
1566 struct sbp2_command_info *cmd, 1550 struct sbp2_command_info *cmd,
1567 struct scsi_cmnd *SCpnt) 1551 struct scsi_cmnd *SCpnt)
1568{ 1552{
1569 struct sbp2_fwhost_info *hi = lu->hi; 1553 struct device *dmadev = lu->hi->host->device.parent;
1570 struct sbp2_command_orb *orb = &cmd->command_orb; 1554 struct sbp2_command_orb *orb = &cmd->command_orb;
1571 u32 orb_direction;
1572 unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt); 1555 unsigned int scsi_request_bufflen = scsi_bufflen(SCpnt);
1573 enum dma_data_direction dma_dir = SCpnt->sc_data_direction; 1556 enum dma_data_direction dma_dir = SCpnt->sc_data_direction;
1557 u32 orb_direction;
1558 int ret;
1574 1559
1560 dma_sync_single_for_cpu(dmadev, cmd->command_orb_dma,
1561 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
1575 /* 1562 /*
1576 * Set-up our command ORB. 1563 * Set-up our command ORB.
1577 * 1564 *
@@ -1602,15 +1589,21 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
1602 orb->data_descriptor_hi = 0x0; 1589 orb->data_descriptor_hi = 0x0;
1603 orb->data_descriptor_lo = 0x0; 1590 orb->data_descriptor_lo = 0x0;
1604 orb->misc |= ORB_SET_DIRECTION(1); 1591 orb->misc |= ORB_SET_DIRECTION(1);
1605 } else 1592 ret = 0;
1606 sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_sg_count(SCpnt), 1593 } else {
1607 scsi_sglist(SCpnt), 1594 ret = sbp2_prep_command_orb_sg(orb, lu->hi, cmd,
1608 orb_direction, dma_dir); 1595 scsi_sg_count(SCpnt),
1609 1596 scsi_sglist(SCpnt),
1597 orb_direction, dma_dir);
1598 }
1610 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb)); 1599 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
1611 1600
1612 memset(orb->cdb, 0, sizeof(orb->cdb)); 1601 memset(orb->cdb, 0, sizeof(orb->cdb));
1613 memcpy(orb->cdb, SCpnt->cmnd, SCpnt->cmd_len); 1602 memcpy(orb->cdb, SCpnt->cmnd, SCpnt->cmd_len);
1603
1604 dma_sync_single_for_device(dmadev, cmd->command_orb_dma,
1605 sizeof(struct sbp2_command_orb), DMA_TO_DEVICE);
1606 return ret;
1614} 1607}
1615 1608
1616static void sbp2_link_orb_command(struct sbp2_lu *lu, 1609static void sbp2_link_orb_command(struct sbp2_lu *lu,
@@ -1624,14 +1617,6 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
1624 size_t length; 1617 size_t length;
1625 unsigned long flags; 1618 unsigned long flags;
1626 1619
1627 dma_sync_single_for_device(hi->host->device.parent,
1628 cmd->command_orb_dma,
1629 sizeof(struct sbp2_command_orb),
1630 DMA_TO_DEVICE);
1631 dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
1632 sizeof(cmd->scatter_gather_element),
1633 DMA_TO_DEVICE);
1634
1635 /* check to see if there are any previous orbs to use */ 1620 /* check to see if there are any previous orbs to use */
1636 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 1621 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
1637 last_orb = lu->last_orb; 1622 last_orb = lu->last_orb;
@@ -1699,9 +1684,10 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
1699 if (!cmd) 1684 if (!cmd)
1700 return -EIO; 1685 return -EIO;
1701 1686
1702 sbp2_create_command_orb(lu, cmd, SCpnt); 1687 if (sbp2_create_command_orb(lu, cmd, SCpnt))
1703 sbp2_link_orb_command(lu, cmd); 1688 return -ENOMEM;
1704 1689
1690 sbp2_link_orb_command(lu, cmd);
1705 return 0; 1691 return 0;
1706} 1692}
1707 1693
@@ -1789,13 +1775,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
1789 else 1775 else
1790 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo); 1776 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
1791 if (cmd) { 1777 if (cmd) {
1792 dma_sync_single_for_cpu(hi->host->device.parent,
1793 cmd->command_orb_dma,
1794 sizeof(struct sbp2_command_orb),
1795 DMA_TO_DEVICE);
1796 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
1797 sizeof(cmd->scatter_gather_element),
1798 DMA_TO_DEVICE);
1799 /* Grab SCSI command pointers and check status. */ 1778 /* Grab SCSI command pointers and check status. */
1800 /* 1779 /*
1801 * FIXME: If the src field in the status is 1, the ORB DMA must 1780 * FIXME: If the src field in the status is 1, the ORB DMA must
@@ -1912,7 +1891,6 @@ done:
1912 1891
1913static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status) 1892static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1914{ 1893{
1915 struct sbp2_fwhost_info *hi = lu->hi;
1916 struct list_head *lh; 1894 struct list_head *lh;
1917 struct sbp2_command_info *cmd; 1895 struct sbp2_command_info *cmd;
1918 unsigned long flags; 1896 unsigned long flags;
@@ -1921,13 +1899,6 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
1921 while (!list_empty(&lu->cmd_orb_inuse)) { 1899 while (!list_empty(&lu->cmd_orb_inuse)) {
1922 lh = lu->cmd_orb_inuse.next; 1900 lh = lu->cmd_orb_inuse.next;
1923 cmd = list_entry(lh, struct sbp2_command_info, list); 1901 cmd = list_entry(lh, struct sbp2_command_info, list);
1924 dma_sync_single_for_cpu(hi->host->device.parent,
1925 cmd->command_orb_dma,
1926 sizeof(struct sbp2_command_orb),
1927 DMA_TO_DEVICE);
1928 dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
1929 sizeof(cmd->scatter_gather_element),
1930 DMA_TO_DEVICE);
1931 sbp2util_mark_command_completed(lu, cmd); 1902 sbp2util_mark_command_completed(lu, cmd);
1932 if (cmd->Current_SCpnt) { 1903 if (cmd->Current_SCpnt) {
1933 cmd->Current_SCpnt->result = status << 16; 1904 cmd->Current_SCpnt->result = status << 16;
@@ -2033,6 +2004,8 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2033 sdev->start_stop_pwr_cond = 1; 2004 sdev->start_stop_pwr_cond = 1;
2034 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 2005 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
2035 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); 2006 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
2007
2008 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
2036 return 0; 2009 return 0;
2037} 2010}
2038 2011
@@ -2049,7 +2022,6 @@ static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2049static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) 2022static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2050{ 2023{
2051 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0]; 2024 struct sbp2_lu *lu = (struct sbp2_lu *)SCpnt->device->host->hostdata[0];
2052 struct sbp2_fwhost_info *hi = lu->hi;
2053 struct sbp2_command_info *cmd; 2025 struct sbp2_command_info *cmd;
2054 unsigned long flags; 2026 unsigned long flags;
2055 2027
@@ -2063,14 +2035,6 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2063 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 2035 spin_lock_irqsave(&lu->cmd_orb_lock, flags);
2064 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt); 2036 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
2065 if (cmd) { 2037 if (cmd) {
2066 dma_sync_single_for_cpu(hi->host->device.parent,
2067 cmd->command_orb_dma,
2068 sizeof(struct sbp2_command_orb),
2069 DMA_TO_DEVICE);
2070 dma_sync_single_for_cpu(hi->host->device.parent,
2071 cmd->sge_dma,
2072 sizeof(cmd->scatter_gather_element),
2073 DMA_TO_DEVICE);
2074 sbp2util_mark_command_completed(lu, cmd); 2038 sbp2util_mark_command_completed(lu, cmd);
2075 if (cmd->Current_SCpnt) { 2039 if (cmd->Current_SCpnt) {
2076 cmd->Current_SCpnt->result = DID_ABORT << 16; 2040 cmd->Current_SCpnt->result = DID_ABORT << 16;
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index 875428bc8d29..c5036f1cc5b0 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -139,13 +139,10 @@ struct sbp2_logout_orb {
139 u32 status_fifo_lo; 139 u32 status_fifo_lo;
140} __attribute__((packed)); 140} __attribute__((packed));
141 141
142#define PAGE_TABLE_SET_SEGMENT_BASE_HI(v) ((v) & 0xffff)
143#define PAGE_TABLE_SET_SEGMENT_LENGTH(v) (((v) & 0xffff) << 16)
144
145struct sbp2_unrestricted_page_table { 142struct sbp2_unrestricted_page_table {
146 u32 length_segment_base_hi; 143 __be32 high;
147 u32 segment_base_lo; 144 __be32 low;
148} __attribute__((packed)); 145};
149 146
150#define RESP_STATUS_REQUEST_COMPLETE 0x0 147#define RESP_STATUS_REQUEST_COMPLETE 0x0
151#define RESP_STATUS_TRANSPORT_FAILURE 0x1 148#define RESP_STATUS_TRANSPORT_FAILURE 0x1
@@ -216,15 +213,18 @@ struct sbp2_status_block {
216#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e 213#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
217#define SBP2_SW_VERSION_ENTRY 0x00010483 214#define SBP2_SW_VERSION_ENTRY 0x00010483
218 215
219
220/* 216/*
221 * SCSI specific definitions 217 * The default maximum s/g segment size of a FireWire controller is
218 * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
219 * be quadlet-aligned, we set the length limit to 0xffff & ~3.
222 */ 220 */
221#define SBP2_MAX_SEG_SIZE 0xfffc
223 222
224#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 223/*
225/* There is no real limitation of the queue depth (i.e. length of the linked 224 * There is no real limitation of the queue depth (i.e. length of the linked
226 * list of command ORBs) at the target. The chosen depth is merely an 225 * list of command ORBs) at the target. The chosen depth is merely an
227 * implementation detail of the sbp2 driver. */ 226 * implementation detail of the sbp2 driver.
227 */
228#define SBP2_MAX_CMDS 8 228#define SBP2_MAX_CMDS 8
229 229
230#define SBP2_SCSI_STATUS_GOOD 0x0 230#define SBP2_SCSI_STATUS_GOOD 0x0
@@ -240,12 +240,6 @@ struct sbp2_status_block {
240 * Representations of commands and devices 240 * Representations of commands and devices
241 */ 241 */
242 242
243enum sbp2_dma_types {
244 CMD_DMA_NONE,
245 CMD_DMA_PAGE,
246 CMD_DMA_SINGLE
247};
248
249/* Per SCSI command */ 243/* Per SCSI command */
250struct sbp2_command_info { 244struct sbp2_command_info {
251 struct list_head list; 245 struct list_head list;
@@ -258,11 +252,6 @@ struct sbp2_command_info {
258 struct sbp2_unrestricted_page_table 252 struct sbp2_unrestricted_page_table
259 scatter_gather_element[SG_ALL] __attribute__((aligned(8))); 253 scatter_gather_element[SG_ALL] __attribute__((aligned(8)));
260 dma_addr_t sge_dma; 254 dma_addr_t sge_dma;
261 void *sge_buffer;
262 dma_addr_t cmd_dma;
263 enum sbp2_dma_types dma_type;
264 unsigned long dma_size;
265 enum dma_data_direction dma_dir;
266}; 255};
267 256
268/* Per FireWire host */ 257/* Per FireWire host */
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 6e73b06eed4f..679a918a5cc7 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -893,7 +893,7 @@ static long video1394_ioctl(struct file *file,
893 if (unlikely(d == NULL)) 893 if (unlikely(d == NULL))
894 return -EFAULT; 894 return -EFAULT;
895 895
896 if (unlikely((v.buffer<0) || (v.buffer>=d->num_desc - 1))) { 896 if (unlikely(v.buffer >= d->num_desc - 1)) {
897 PRINT(KERN_ERR, ohci->host->id, 897 PRINT(KERN_ERR, ohci->host->id,
898 "Buffer %d out of range",v.buffer); 898 "Buffer %d out of range",v.buffer);
899 return -EINVAL; 899 return -EINVAL;
@@ -959,7 +959,7 @@ static long video1394_ioctl(struct file *file,
959 if (unlikely(d == NULL)) 959 if (unlikely(d == NULL))
960 return -EFAULT; 960 return -EFAULT;
961 961
962 if (unlikely((v.buffer<0) || (v.buffer>d->num_desc - 1))) { 962 if (unlikely(v.buffer > d->num_desc - 1)) {
963 PRINT(KERN_ERR, ohci->host->id, 963 PRINT(KERN_ERR, ohci->host->id,
964 "Buffer %d out of range",v.buffer); 964 "Buffer %d out of range",v.buffer);
965 return -EINVAL; 965 return -EINVAL;
@@ -1030,7 +1030,7 @@ static long video1394_ioctl(struct file *file,
1030 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel); 1030 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
1031 if (d == NULL) return -EFAULT; 1031 if (d == NULL) return -EFAULT;
1032 1032
1033 if ((v.buffer<0) || (v.buffer>=d->num_desc - 1)) { 1033 if (v.buffer >= d->num_desc - 1) {
1034 PRINT(KERN_ERR, ohci->host->id, 1034 PRINT(KERN_ERR, ohci->host->id,
1035 "Buffer %d out of range",v.buffer); 1035 "Buffer %d out of range",v.buffer);
1036 return -EINVAL; 1036 return -EINVAL;
@@ -1137,7 +1137,7 @@ static long video1394_ioctl(struct file *file,
1137 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel); 1137 d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
1138 if (d == NULL) return -EFAULT; 1138 if (d == NULL) return -EFAULT;
1139 1139
1140 if ((v.buffer<0) || (v.buffer>=d->num_desc-1)) { 1140 if (v.buffer >= d->num_desc - 1) {
1141 PRINT(KERN_ERR, ohci->host->id, 1141 PRINT(KERN_ERR, ohci->host->id,
1142 "Buffer %d out of range",v.buffer); 1142 "Buffer %d out of range",v.buffer);
1143 return -EINVAL; 1143 return -EINVAL;