aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/firewire
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/fw-card.c7
-rw-r--r--drivers/firewire/fw-cdev.c2
-rw-r--r--drivers/firewire/fw-device.c38
-rw-r--r--drivers/firewire/fw-device.h1
-rw-r--r--drivers/firewire/fw-ohci.c6
-rw-r--r--drivers/firewire/fw-sbp2.c117
-rw-r--r--drivers/firewire/fw-topology.c66
-rw-r--r--drivers/firewire/fw-topology.h25
-rw-r--r--drivers/firewire/fw-transaction.h3
9 files changed, 142 insertions, 123 deletions
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 9eb1edacd825..0aeab3218bb6 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -336,8 +336,11 @@ fw_card_bm_work(struct work_struct *work)
336 } 336 }
337 337
338 pick_me: 338 pick_me:
339 /* Now figure out what gap count to set. */ 339 /*
340 if (card->topology_type == FW_TOPOLOGY_A && 340 * Pick a gap count from 1394a table E-1. The table doesn't cover
341 * the typically much larger 1394b beta repeater delays though.
342 */
343 if (!card->beta_repeaters_present &&
341 card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) 344 card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
342 gap_count = gap_count_table[card->root_node->max_hops]; 345 gap_count = gap_count_table[card->root_node->max_hops];
343 else 346 else
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index dbb76427d529..75388641a7d3 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -397,7 +397,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
397 request->tcode & 0x1f, 397 request->tcode & 0x1f,
398 device->node->node_id, 398 device->node->node_id,
399 request->generation, 399 request->generation,
400 device->node->max_speed, 400 device->max_speed,
401 request->offset, 401 request->offset,
402 response->response.data, request->length, 402 response->response.data, request->length,
403 complete_transaction, response); 403 complete_transaction, response);
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index c1ce465d9710..2b6586341635 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -401,8 +401,7 @@ static int read_rom(struct fw_device *device, int index, u32 * data)
401 401
402 offset = 0xfffff0000400ULL + index * 4; 402 offset = 0xfffff0000400ULL + index * 4;
403 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST, 403 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
404 device->node_id, 404 device->node_id, device->generation, device->max_speed,
405 device->generation, SCODE_100,
406 offset, NULL, 4, complete_transaction, &callback_data); 405 offset, NULL, 4, complete_transaction, &callback_data);
407 406
408 wait_for_completion(&callback_data.done); 407 wait_for_completion(&callback_data.done);
@@ -418,6 +417,8 @@ static int read_bus_info_block(struct fw_device *device)
418 u32 stack[16], sp, key; 417 u32 stack[16], sp, key;
419 int i, end, length; 418 int i, end, length;
420 419
420 device->max_speed = SCODE_100;
421
421 /* First read the bus info block. */ 422 /* First read the bus info block. */
422 for (i = 0; i < 5; i++) { 423 for (i = 0; i < 5; i++) {
423 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) 424 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
@@ -434,6 +435,33 @@ static int read_bus_info_block(struct fw_device *device)
434 return -1; 435 return -1;
435 } 436 }
436 437
438 device->max_speed = device->node->max_speed;
439
440 /*
441 * Determine the speed of
442 * - devices with link speed less than PHY speed,
443 * - devices with 1394b PHY (unless only connected to 1394a PHYs),
444 * - all devices if there are 1394b repeaters.
445 * Note, we cannot use the bus info block's link_spd as starting point
446 * because some buggy firmwares set it lower than necessary and because
447 * 1394-1995 nodes do not have the field.
448 */
449 if ((rom[2] & 0x7) < device->max_speed ||
450 device->max_speed == SCODE_BETA ||
451 device->card->beta_repeaters_present) {
452 u32 dummy;
453
454 /* for S1600 and S3200 */
455 if (device->max_speed == SCODE_BETA)
456 device->max_speed = device->card->link_speed;
457
458 while (device->max_speed > SCODE_100) {
459 if (read_rom(device, 0, &dummy) == RCODE_COMPLETE)
460 break;
461 device->max_speed--;
462 }
463 }
464
437 /* 465 /*
438 * Now parse the config rom. The config rom is a recursive 466 * Now parse the config rom. The config rom is a recursive
439 * directory structure so we parse it using a stack of 467 * directory structure so we parse it using a stack of
@@ -680,8 +708,10 @@ static void fw_device_init(struct work_struct *work)
680 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 708 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
681 fw_device_shutdown(&device->work.work); 709 fw_device_shutdown(&device->work.work);
682 else 710 else
683 fw_notify("created new fw device %s (%d config rom retries)\n", 711 fw_notify("created new fw device %s "
684 device->device.bus_id, device->config_rom_retries); 712 "(%d config rom retries, S%d00)\n",
713 device->device.bus_id, device->config_rom_retries,
714 1 << device->max_speed);
685 715
686 /* 716 /*
687 * Reschedule the IRM work if we just finished reading the 717 * Reschedule the IRM work if we just finished reading the
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index af1723eae4ba..d13e6a69707f 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -40,6 +40,7 @@ struct fw_device {
40 struct fw_node *node; 40 struct fw_node *node;
41 int node_id; 41 int node_id;
42 int generation; 42 int generation;
43 unsigned max_speed;
43 struct fw_card *card; 44 struct fw_card *card;
44 struct device device; 45 struct device device;
45 struct list_head link; 46 struct list_head link;
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 96c8ac5b86cc..41476abc0693 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -1934,12 +1934,12 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
1934 free_irq(pdev->irq, ohci); 1934 free_irq(pdev->irq, ohci);
1935 err = pci_save_state(pdev); 1935 err = pci_save_state(pdev);
1936 if (err) { 1936 if (err) {
1937 fw_error("pci_save_state failed with %d", err); 1937 fw_error("pci_save_state failed\n");
1938 return err; 1938 return err;
1939 } 1939 }
1940 err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1940 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
1941 if (err) { 1941 if (err) {
1942 fw_error("pci_set_power_state failed with %d", err); 1942 fw_error("pci_set_power_state failed\n");
1943 return err; 1943 return err;
1944 } 1944 }
1945 1945
@@ -1955,7 +1955,7 @@ static int pci_resume(struct pci_dev *pdev)
1955 pci_restore_state(pdev); 1955 pci_restore_state(pdev);
1956 err = pci_enable_device(pdev); 1956 err = pci_enable_device(pdev);
1957 if (err) { 1957 if (err) {
1958 fw_error("pci_enable_device failed with %d", err); 1958 fw_error("pci_enable_device failed\n");
1959 return err; 1959 return err;
1960 } 1960 }
1961 1961
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index a98d3915e26f..7c53be0387fb 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -30,10 +30,13 @@
30 30
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/moduleparam.h>
33#include <linux/mod_devicetable.h> 34#include <linux/mod_devicetable.h>
34#include <linux/device.h> 35#include <linux/device.h>
35#include <linux/scatterlist.h> 36#include <linux/scatterlist.h>
36#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38#include <linux/blkdev.h>
39#include <linux/string.h>
37#include <linux/timer.h> 40#include <linux/timer.h>
38 41
39#include <scsi/scsi.h> 42#include <scsi/scsi.h>
@@ -46,6 +49,18 @@
46#include "fw-topology.h" 49#include "fw-topology.h"
47#include "fw-device.h" 50#include "fw-device.h"
48 51
52/*
53 * So far only bridges from Oxford Semiconductor are known to support
54 * concurrent logins. Depending on firmware, four or two concurrent logins
55 * are possible on OXFW911 and newer Oxsemi bridges.
56 *
57 * Concurrent logins are useful together with cluster filesystems.
58 */
59static int sbp2_param_exclusive_login = 1;
60module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
61MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
62 "(default = Y, use N for concurrent initiators)");
63
49/* I don't know why the SCSI stack doesn't define something like this... */ 64/* I don't know why the SCSI stack doesn't define something like this... */
50typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); 65typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
51 66
@@ -154,7 +169,7 @@ struct sbp2_orb {
154#define MANAGEMENT_ORB_LUN(v) ((v)) 169#define MANAGEMENT_ORB_LUN(v) ((v))
155#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) 170#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
156#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) 171#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
157#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28) 172#define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
158#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) 173#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
159#define MANAGEMENT_ORB_NOTIFY ((1) << 31) 174#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
160 175
@@ -205,9 +220,8 @@ struct sbp2_command_orb {
205 scsi_done_fn_t done; 220 scsi_done_fn_t done;
206 struct fw_unit *unit; 221 struct fw_unit *unit;
207 222
208 struct sbp2_pointer page_table[SG_ALL]; 223 struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
209 dma_addr_t page_table_bus; 224 dma_addr_t page_table_bus;
210 dma_addr_t request_buffer_bus;
211}; 225};
212 226
213/* 227/*
@@ -347,8 +361,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
347 spin_unlock_irqrestore(&device->card->lock, flags); 361 spin_unlock_irqrestore(&device->card->lock, flags);
348 362
349 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 363 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
350 node_id, generation, 364 node_id, generation, device->max_speed, offset,
351 device->node->max_speed, offset,
352 &orb->pointer, sizeof(orb->pointer), 365 &orb->pointer, sizeof(orb->pointer),
353 complete_transaction, orb); 366 complete_transaction, orb);
354} 367}
@@ -383,7 +396,7 @@ static void
383complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 396complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
384{ 397{
385 struct sbp2_management_orb *orb = 398 struct sbp2_management_orb *orb =
386 (struct sbp2_management_orb *)base_orb; 399 container_of(base_orb, struct sbp2_management_orb, base);
387 400
388 if (status) 401 if (status)
389 memcpy(&orb->status, status, sizeof(*status)); 402 memcpy(&orb->status, status, sizeof(*status));
@@ -403,21 +416,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
403 if (orb == NULL) 416 if (orb == NULL)
404 return -ENOMEM; 417 return -ENOMEM;
405 418
406 /*
407 * The sbp2 device is going to send a block read request to
408 * read out the request from host memory, so map it for dma.
409 */
410 orb->base.request_bus =
411 dma_map_single(device->card->device, &orb->request,
412 sizeof(orb->request), DMA_TO_DEVICE);
413 if (dma_mapping_error(orb->base.request_bus))
414 goto out;
415
416 orb->response_bus = 419 orb->response_bus =
417 dma_map_single(device->card->device, &orb->response, 420 dma_map_single(device->card->device, &orb->response,
418 sizeof(orb->response), DMA_FROM_DEVICE); 421 sizeof(orb->response), DMA_FROM_DEVICE);
419 if (dma_mapping_error(orb->response_bus)) 422 if (dma_mapping_error(orb->response_bus))
420 goto out; 423 goto fail_mapping_response;
421 424
422 orb->request.response.high = 0; 425 orb->request.response.high = 0;
423 orb->request.response.low = orb->response_bus; 426 orb->request.response.low = orb->response_bus;
@@ -432,14 +435,9 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
432 orb->request.status_fifo.high = sd->address_handler.offset >> 32; 435 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
433 orb->request.status_fifo.low = sd->address_handler.offset; 436 orb->request.status_fifo.low = sd->address_handler.offset;
434 437
435 /*
436 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
437 * login and 1 second reconnect time. The reconnect setting
438 * is probably fine, but the exclusive login should be an option.
439 */
440 if (function == SBP2_LOGIN_REQUEST) { 438 if (function == SBP2_LOGIN_REQUEST) {
441 orb->request.misc |= 439 orb->request.misc |=
442 MANAGEMENT_ORB_EXCLUSIVE | 440 MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login) |
443 MANAGEMENT_ORB_RECONNECT(0); 441 MANAGEMENT_ORB_RECONNECT(0);
444 } 442 }
445 443
@@ -448,6 +446,12 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
448 init_completion(&orb->done); 446 init_completion(&orb->done);
449 orb->base.callback = complete_management_orb; 447 orb->base.callback = complete_management_orb;
450 448
449 orb->base.request_bus =
450 dma_map_single(device->card->device, &orb->request,
451 sizeof(orb->request), DMA_TO_DEVICE);
452 if (dma_mapping_error(orb->base.request_bus))
453 goto fail_mapping_request;
454
451 sbp2_send_orb(&orb->base, unit, 455 sbp2_send_orb(&orb->base, unit,
452 node_id, generation, sd->management_agent_address); 456 node_id, generation, sd->management_agent_address);
453 457
@@ -479,9 +483,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
479 out: 483 out:
480 dma_unmap_single(device->card->device, orb->base.request_bus, 484 dma_unmap_single(device->card->device, orb->base.request_bus,
481 sizeof(orb->request), DMA_TO_DEVICE); 485 sizeof(orb->request), DMA_TO_DEVICE);
486 fail_mapping_request:
482 dma_unmap_single(device->card->device, orb->response_bus, 487 dma_unmap_single(device->card->device, orb->response_bus,
483 sizeof(orb->response), DMA_FROM_DEVICE); 488 sizeof(orb->response), DMA_FROM_DEVICE);
484 489 fail_mapping_response:
485 if (response) 490 if (response)
486 fw_memcpy_from_be32(response, 491 fw_memcpy_from_be32(response,
487 orb->response, sizeof(orb->response)); 492 orb->response, sizeof(orb->response));
@@ -511,7 +516,7 @@ static int sbp2_agent_reset(struct fw_unit *unit)
511 return -ENOMEM; 516 return -ENOMEM;
512 517
513 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, 518 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
514 sd->node_id, sd->generation, SCODE_400, 519 sd->node_id, sd->generation, device->max_speed,
515 sd->command_block_agent_address + SBP2_AGENT_RESET, 520 sd->command_block_agent_address + SBP2_AGENT_RESET,
516 &zero, sizeof(zero), complete_agent_reset_write, t); 521 &zero, sizeof(zero), complete_agent_reset_write, t);
517 522
@@ -521,17 +526,15 @@ static int sbp2_agent_reset(struct fw_unit *unit)
521static void sbp2_reconnect(struct work_struct *work); 526static void sbp2_reconnect(struct work_struct *work);
522static struct scsi_host_template scsi_driver_template; 527static struct scsi_host_template scsi_driver_template;
523 528
524static void 529static void release_sbp2_device(struct kref *kref)
525release_sbp2_device(struct kref *kref)
526{ 530{
527 struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref); 531 struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
528 struct Scsi_Host *host = 532 struct Scsi_Host *host =
529 container_of((void *)sd, struct Scsi_Host, hostdata[0]); 533 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
530 534
535 scsi_remove_host(host);
531 sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation, 536 sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
532 SBP2_LOGOUT_REQUEST, sd->login_id, NULL); 537 SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
533
534 scsi_remove_host(host);
535 fw_core_remove_address_handler(&sd->address_handler); 538 fw_core_remove_address_handler(&sd->address_handler);
536 fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id); 539 fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
537 put_device(&sd->unit->device); 540 put_device(&sd->unit->device);
@@ -833,7 +836,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
833static void 836static void
834complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 837complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
835{ 838{
836 struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb; 839 struct sbp2_command_orb *orb =
840 container_of(base_orb, struct sbp2_command_orb, base);
837 struct fw_unit *unit = orb->unit; 841 struct fw_unit *unit = orb->unit;
838 struct fw_device *device = fw_device(unit->device.parent); 842 struct fw_device *device = fw_device(unit->device.parent);
839 struct scatterlist *sg; 843 struct scatterlist *sg;
@@ -880,12 +884,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
880 884
881 if (orb->page_table_bus != 0) 885 if (orb->page_table_bus != 0)
882 dma_unmap_single(device->card->device, orb->page_table_bus, 886 dma_unmap_single(device->card->device, orb->page_table_bus,
883 sizeof(orb->page_table_bus), DMA_TO_DEVICE); 887 sizeof(orb->page_table), DMA_TO_DEVICE);
884
885 if (orb->request_buffer_bus != 0)
886 dma_unmap_single(device->card->device, orb->request_buffer_bus,
887 sizeof(orb->request_buffer_bus),
888 DMA_FROM_DEVICE);
889 888
890 orb->cmd->result = result; 889 orb->cmd->result = result;
891 orb->done(orb->cmd); 890 orb->done(orb->cmd);
@@ -900,7 +899,6 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
900 struct fw_device *device = fw_device(unit->device.parent); 899 struct fw_device *device = fw_device(unit->device.parent);
901 struct scatterlist *sg; 900 struct scatterlist *sg;
902 int sg_len, l, i, j, count; 901 int sg_len, l, i, j, count;
903 size_t size;
904 dma_addr_t sg_addr; 902 dma_addr_t sg_addr;
905 903
906 sg = (struct scatterlist *)orb->cmd->request_buffer; 904 sg = (struct scatterlist *)orb->cmd->request_buffer;
@@ -935,6 +933,11 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
935 sg_len = sg_dma_len(sg + i); 933 sg_len = sg_dma_len(sg + i);
936 sg_addr = sg_dma_address(sg + i); 934 sg_addr = sg_dma_address(sg + i);
937 while (sg_len) { 935 while (sg_len) {
936 /* FIXME: This won't get us out of the pinch. */
937 if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {
938 fw_error("page table overflow\n");
939 goto fail_page_table;
940 }
938 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH); 941 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
939 orb->page_table[j].low = sg_addr; 942 orb->page_table[j].low = sg_addr;
940 orb->page_table[j].high = (l << 16); 943 orb->page_table[j].high = (l << 16);
@@ -944,7 +947,13 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
944 } 947 }
945 } 948 }
946 949
947 size = sizeof(orb->page_table[0]) * j; 950 fw_memcpy_to_be32(orb->page_table, orb->page_table,
951 sizeof(orb->page_table[0]) * j);
952 orb->page_table_bus =
953 dma_map_single(device->card->device, orb->page_table,
954 sizeof(orb->page_table), DMA_TO_DEVICE);
955 if (dma_mapping_error(orb->page_table_bus))
956 goto fail_page_table;
948 957
949 /* 958 /*
950 * The data_descriptor pointer is the one case where we need 959 * The data_descriptor pointer is the one case where we need
@@ -953,20 +962,12 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
953 * initiator (i.e. us), but data_descriptor can refer to data 962 * initiator (i.e. us), but data_descriptor can refer to data
954 * on other nodes so we need to put our ID in descriptor.high. 963 * on other nodes so we need to put our ID in descriptor.high.
955 */ 964 */
956
957 orb->page_table_bus =
958 dma_map_single(device->card->device, orb->page_table,
959 size, DMA_TO_DEVICE);
960 if (dma_mapping_error(orb->page_table_bus))
961 goto fail_page_table;
962 orb->request.data_descriptor.high = sd->address_high; 965 orb->request.data_descriptor.high = sd->address_high;
963 orb->request.data_descriptor.low = orb->page_table_bus; 966 orb->request.data_descriptor.low = orb->page_table_bus;
964 orb->request.misc |= 967 orb->request.misc |=
965 COMMAND_ORB_PAGE_TABLE_PRESENT | 968 COMMAND_ORB_PAGE_TABLE_PRESENT |
966 COMMAND_ORB_DATA_SIZE(j); 969 COMMAND_ORB_DATA_SIZE(j);
967 970
968 fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
969
970 return 0; 971 return 0;
971 972
972 fail_page_table: 973 fail_page_table:
@@ -991,7 +992,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
991 * transfer direction not handled. 992 * transfer direction not handled.
992 */ 993 */
993 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { 994 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
994 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); 995 fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
995 cmd->result = DID_ERROR << 16; 996 cmd->result = DID_ERROR << 16;
996 done(cmd); 997 done(cmd);
997 return 0; 998 return 0;
@@ -1005,11 +1006,6 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1005 1006
1006 /* Initialize rcode to something not RCODE_COMPLETE. */ 1007 /* Initialize rcode to something not RCODE_COMPLETE. */
1007 orb->base.rcode = -1; 1008 orb->base.rcode = -1;
1008 orb->base.request_bus =
1009 dma_map_single(device->card->device, &orb->request,
1010 sizeof(orb->request), DMA_TO_DEVICE);
1011 if (dma_mapping_error(orb->base.request_bus))
1012 goto fail_mapping;
1013 1009
1014 orb->unit = unit; 1010 orb->unit = unit;
1015 orb->done = done; 1011 orb->done = done;
@@ -1024,8 +1020,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1024 * if we set this to max_speed + 7, we get the right value. 1020 * if we set this to max_speed + 7, we get the right value.
1025 */ 1021 */
1026 orb->request.misc = 1022 orb->request.misc =
1027 COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) | 1023 COMMAND_ORB_MAX_PAYLOAD(device->max_speed + 7) |
1028 COMMAND_ORB_SPEED(device->node->max_speed) | 1024 COMMAND_ORB_SPEED(device->max_speed) |
1029 COMMAND_ORB_NOTIFY; 1025 COMMAND_ORB_NOTIFY;
1030 1026
1031 if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1027 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
@@ -1036,7 +1032,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1036 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); 1032 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1037 1033
1038 if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0) 1034 if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
1039 goto fail_map_payload; 1035 goto fail_mapping;
1040 1036
1041 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); 1037 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1042 1038
@@ -1045,15 +1041,17 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1045 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 1041 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
1046 1042
1047 orb->base.callback = complete_command_orb; 1043 orb->base.callback = complete_command_orb;
1044 orb->base.request_bus =
1045 dma_map_single(device->card->device, &orb->request,
1046 sizeof(orb->request), DMA_TO_DEVICE);
1047 if (dma_mapping_error(orb->base.request_bus))
1048 goto fail_mapping;
1048 1049
1049 sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation, 1050 sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
1050 sd->command_block_agent_address + SBP2_ORB_POINTER); 1051 sd->command_block_agent_address + SBP2_ORB_POINTER);
1051 1052
1052 return 0; 1053 return 0;
1053 1054
1054 fail_map_payload:
1055 dma_unmap_single(device->card->device, orb->base.request_bus,
1056 sizeof(orb->request), DMA_TO_DEVICE);
1057 fail_mapping: 1055 fail_mapping:
1058 kfree(orb); 1056 kfree(orb);
1059 fail_alloc: 1057 fail_alloc:
@@ -1087,7 +1085,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1087 fw_notify("setting fix_capacity for %s\n", unit->device.bus_id); 1085 fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
1088 sdev->fix_capacity = 1; 1086 sdev->fix_capacity = 1;
1089 } 1087 }
1090 1088 if (sd->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1089 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
1091 return 0; 1090 return 0;
1092} 1091}
1093 1092
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 7aebb8ae0efa..39e5cd12aa52 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -135,17 +135,17 @@ static void update_hop_count(struct fw_node *node)
135 int i; 135 int i;
136 136
137 for (i = 0; i < node->port_count; i++) { 137 for (i = 0; i < node->port_count; i++) {
138 if (node->ports[i].node == NULL) 138 if (node->ports[i] == NULL)
139 continue; 139 continue;
140 140
141 if (node->ports[i].node->max_hops > max_child_hops) 141 if (node->ports[i]->max_hops > max_child_hops)
142 max_child_hops = node->ports[i].node->max_hops; 142 max_child_hops = node->ports[i]->max_hops;
143 143
144 if (node->ports[i].node->max_depth > depths[0]) { 144 if (node->ports[i]->max_depth > depths[0]) {
145 depths[1] = depths[0]; 145 depths[1] = depths[0];
146 depths[0] = node->ports[i].node->max_depth; 146 depths[0] = node->ports[i]->max_depth;
147 } else if (node->ports[i].node->max_depth > depths[1]) 147 } else if (node->ports[i]->max_depth > depths[1])
148 depths[1] = node->ports[i].node->max_depth; 148 depths[1] = node->ports[i]->max_depth;
149 } 149 }
150 150
151 node->max_depth = depths[0] + 1; 151 node->max_depth = depths[0] + 1;
@@ -172,7 +172,8 @@ static struct fw_node *build_tree(struct fw_card *card,
172 struct list_head stack, *h; 172 struct list_head stack, *h;
173 u32 *next_sid, *end, q; 173 u32 *next_sid, *end, q;
174 int i, port_count, child_port_count, phy_id, parent_count, stack_depth; 174 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
175 int gap_count, topology_type; 175 int gap_count;
176 bool beta_repeaters_present;
176 177
177 local_node = NULL; 178 local_node = NULL;
178 node = NULL; 179 node = NULL;
@@ -182,7 +183,7 @@ static struct fw_node *build_tree(struct fw_card *card,
182 phy_id = 0; 183 phy_id = 0;
183 irm_node = NULL; 184 irm_node = NULL;
184 gap_count = SELF_ID_GAP_COUNT(*sid); 185 gap_count = SELF_ID_GAP_COUNT(*sid);
185 topology_type = 0; 186 beta_repeaters_present = false;
186 187
187 while (sid < end) { 188 while (sid < end) {
188 next_sid = count_ports(sid, &port_count, &child_port_count); 189 next_sid = count_ports(sid, &port_count, &child_port_count);
@@ -214,7 +215,7 @@ static struct fw_node *build_tree(struct fw_card *card,
214 215
215 node = fw_node_create(q, port_count, card->color); 216 node = fw_node_create(q, port_count, card->color);
216 if (node == NULL) { 217 if (node == NULL) {
217 fw_error("Out of memory while building topology."); 218 fw_error("Out of memory while building topology.\n");
218 return NULL; 219 return NULL;
219 } 220 }
220 221
@@ -224,11 +225,6 @@ static struct fw_node *build_tree(struct fw_card *card,
224 if (SELF_ID_CONTENDER(q)) 225 if (SELF_ID_CONTENDER(q))
225 irm_node = node; 226 irm_node = node;
226 227
227 if (node->phy_speed == SCODE_BETA)
228 topology_type |= FW_TOPOLOGY_B;
229 else
230 topology_type |= FW_TOPOLOGY_A;
231
232 parent_count = 0; 228 parent_count = 0;
233 229
234 for (i = 0; i < port_count; i++) { 230 for (i = 0; i < port_count; i++) {
@@ -249,12 +245,12 @@ static struct fw_node *build_tree(struct fw_card *card,
249 break; 245 break;
250 246
251 case SELFID_PORT_CHILD: 247 case SELFID_PORT_CHILD:
252 node->ports[i].node = child; 248 node->ports[i] = child;
253 /* 249 /*
254 * Fix up parent reference for this 250 * Fix up parent reference for this
255 * child node. 251 * child node.
256 */ 252 */
257 child->ports[child->color].node = node; 253 child->ports[child->color] = node;
258 child->color = card->color; 254 child->color = card->color;
259 child = fw_node(child->link.next); 255 child = fw_node(child->link.next);
260 break; 256 break;
@@ -278,6 +274,10 @@ static struct fw_node *build_tree(struct fw_card *card,
278 list_add_tail(&node->link, &stack); 274 list_add_tail(&node->link, &stack);
279 stack_depth += 1 - child_port_count; 275 stack_depth += 1 - child_port_count;
280 276
277 if (node->phy_speed == SCODE_BETA &&
278 parent_count + child_port_count > 1)
279 beta_repeaters_present = true;
280
281 /* 281 /*
282 * If all PHYs does not report the same gap count 282 * If all PHYs does not report the same gap count
283 * setting, we fall back to 63 which will force a gap 283 * setting, we fall back to 63 which will force a gap
@@ -295,7 +295,7 @@ static struct fw_node *build_tree(struct fw_card *card,
295 card->root_node = node; 295 card->root_node = node;
296 card->irm_node = irm_node; 296 card->irm_node = irm_node;
297 card->gap_count = gap_count; 297 card->gap_count = gap_count;
298 card->topology_type = topology_type; 298 card->beta_repeaters_present = beta_repeaters_present;
299 299
300 return local_node; 300 return local_node;
301} 301}
@@ -321,7 +321,7 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
321 node->color = card->color; 321 node->color = card->color;
322 322
323 for (i = 0; i < node->port_count; i++) { 323 for (i = 0; i < node->port_count; i++) {
324 child = node->ports[i].node; 324 child = node->ports[i];
325 if (!child) 325 if (!child)
326 continue; 326 continue;
327 if (child->color == card->color) 327 if (child->color == card->color)
@@ -382,11 +382,11 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
382 struct fw_node *tree; 382 struct fw_node *tree;
383 int i; 383 int i;
384 384
385 tree = node1->ports[port].node; 385 tree = node1->ports[port];
386 node0->ports[port].node = tree; 386 node0->ports[port] = tree;
387 for (i = 0; i < tree->port_count; i++) { 387 for (i = 0; i < tree->port_count; i++) {
388 if (tree->ports[i].node == node1) { 388 if (tree->ports[i] == node1) {
389 tree->ports[i].node = node0; 389 tree->ports[i] = node0;
390 break; 390 break;
391 } 391 }
392 } 392 }
@@ -437,19 +437,17 @@ update_tree(struct fw_card *card, struct fw_node *root)
437 card->irm_node = node0; 437 card->irm_node = node0;
438 438
439 for (i = 0; i < node0->port_count; i++) { 439 for (i = 0; i < node0->port_count; i++) {
440 if (node0->ports[i].node && node1->ports[i].node) { 440 if (node0->ports[i] && node1->ports[i]) {
441 /* 441 /*
442 * This port didn't change, queue the 442 * This port didn't change, queue the
443 * connected node for further 443 * connected node for further
444 * investigation. 444 * investigation.
445 */ 445 */
446 if (node0->ports[i].node->color == card->color) 446 if (node0->ports[i]->color == card->color)
447 continue; 447 continue;
448 list_add_tail(&node0->ports[i].node->link, 448 list_add_tail(&node0->ports[i]->link, &list0);
449 &list0); 449 list_add_tail(&node1->ports[i]->link, &list1);
450 list_add_tail(&node1->ports[i].node->link, 450 } else if (node0->ports[i]) {
451 &list1);
452 } else if (node0->ports[i].node) {
453 /* 451 /*
454 * The nodes connected here were 452 * The nodes connected here were
455 * unplugged; unref the lost nodes and 453 * unplugged; unref the lost nodes and
@@ -457,10 +455,10 @@ update_tree(struct fw_card *card, struct fw_node *root)
457 * them. 455 * them.
458 */ 456 */
459 457
460 for_each_fw_node(card, node0->ports[i].node, 458 for_each_fw_node(card, node0->ports[i],
461 report_lost_node); 459 report_lost_node);
462 node0->ports[i].node = NULL; 460 node0->ports[i] = NULL;
463 } else if (node1->ports[i].node) { 461 } else if (node1->ports[i]) {
464 /* 462 /*
465 * One or more node were connected to 463 * One or more node were connected to
466 * this port. Move the new nodes into 464 * this port. Move the new nodes into
@@ -468,7 +466,7 @@ update_tree(struct fw_card *card, struct fw_node *root)
468 * callbacks for them. 466 * callbacks for them.
469 */ 467 */
470 move_tree(node0, node1, i); 468 move_tree(node0, node1, i);
471 for_each_fw_node(card, node0->ports[i].node, 469 for_each_fw_node(card, node0->ports[i],
472 report_found_node); 470 report_found_node);
473 } 471 }
474 } 472 }
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index 363b6cbcd0b3..1b56b4ac7fb2 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -20,12 +20,6 @@
20#define __fw_topology_h 20#define __fw_topology_h
21 21
22enum { 22enum {
23 FW_TOPOLOGY_A = 0x01,
24 FW_TOPOLOGY_B = 0x02,
25 FW_TOPOLOGY_MIXED = 0x03,
26};
27
28enum {
29 FW_NODE_CREATED = 0x00, 23 FW_NODE_CREATED = 0x00,
30 FW_NODE_UPDATED = 0x01, 24 FW_NODE_UPDATED = 0x01,
31 FW_NODE_DESTROYED = 0x02, 25 FW_NODE_DESTROYED = 0x02,
@@ -33,21 +27,16 @@ enum {
33 FW_NODE_LINK_OFF = 0x04, 27 FW_NODE_LINK_OFF = 0x04,
34}; 28};
35 29
36struct fw_port {
37 struct fw_node *node;
38 unsigned speed : 3; /* S100, S200, ... S3200 */
39};
40
41struct fw_node { 30struct fw_node {
42 u16 node_id; 31 u16 node_id;
43 u8 color; 32 u8 color;
44 u8 port_count; 33 u8 port_count;
45 unsigned link_on : 1; 34 u8 link_on : 1;
46 unsigned initiated_reset : 1; 35 u8 initiated_reset : 1;
47 unsigned b_path : 1; 36 u8 b_path : 1;
48 u8 phy_speed : 3; /* As in the self ID packet. */ 37 u8 phy_speed : 2; /* As in the self ID packet. */
49 u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on 38 u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the
50 * the path from the local node to this node. */ 39 * local node to this node. */
51 u8 max_depth : 4; /* Maximum depth to any leaf node */ 40 u8 max_depth : 4; /* Maximum depth to any leaf node */
52 u8 max_hops : 4; /* Max hops in this sub tree */ 41 u8 max_hops : 4; /* Max hops in this sub tree */
53 atomic_t ref_count; 42 atomic_t ref_count;
@@ -58,7 +47,7 @@ struct fw_node {
58 /* Upper layer specific data. */ 47 /* Upper layer specific data. */
59 void *data; 48 void *data;
60 49
61 struct fw_port ports[0]; 50 struct fw_node *ports[0];
62}; 51};
63 52
64static inline struct fw_node * 53static inline struct fw_node *
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index acdc3be38c61..5abed193f4a6 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -81,7 +81,6 @@
81 81
82#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) 82#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
83#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) 83#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
84#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
85 84
86static inline void 85static inline void
87fw_memcpy_from_be32(void *_dst, void *_src, size_t size) 86fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
@@ -246,7 +245,7 @@ struct fw_card {
246 struct fw_node *irm_node; 245 struct fw_node *irm_node;
247 int color; 246 int color;
248 int gap_count; 247 int gap_count;
249 int topology_type; 248 bool beta_repeaters_present;
250 249
251 int index; 250 int index;
252 251