diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-09 18:50:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-09 18:50:56 -0400 |
commit | 71780f59e127bb281a9302d430495ca9586c14e7 (patch) | |
tree | 32e2354df58f9ed7c777c2e95868f3695826753b /drivers | |
parent | 36b774102e5ede8d0384684bd394c8285dce5a53 (diff) | |
parent | 7aa484815f8c4defd01366f239b71da5e6b8a791 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (31 commits)
firewire: fw-sbp2: fix DMA mapping of management ORBs
firewire: fw-sbp2: fix DMA mapping of command ORBs
firewire: fw-sbp2: fix DMA mapping of S/G tables
firewire: fw-sbp2: add a boundary check
firewire: fw-sbp2: correctly align page tables
firewire: fw-sbp2: memset wants string.h
firewire: fw-sbp2: use correct speed in sbp2_agent_reset
firewire: fw-sbp2: correctly dereference by container_of
firewire: Document userspace ioctl interface.
firewire: fw-sbp2: implement nonexclusive login
firewire: fw-sbp2: let SCSI shutdown commands through before logout
firewire: fw-sbp2: implement max sectors limit for some old bridges
firewire: simplify a struct type
firewire: support S100B...S400B and link slower than PHY
firewire: optimize gap count with 1394b leaf nodes
firewire: remove unused macro
firewire: missing newline in printk
firewire: fw-sbp2: remove unused struct member
ieee1394: remove old isochronous ABI
ieee1394: sbp2: change some module parameters from int to bool
...
Diffstat (limited to 'drivers')
30 files changed, 465 insertions, 840 deletions
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 9eb1edacd825..0aeab3218bb6 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -336,8 +336,11 @@ fw_card_bm_work(struct work_struct *work) | |||
336 | } | 336 | } |
337 | 337 | ||
338 | pick_me: | 338 | pick_me: |
339 | /* Now figure out what gap count to set. */ | 339 | /* |
340 | if (card->topology_type == FW_TOPOLOGY_A && | 340 | * Pick a gap count from 1394a table E-1. The table doesn't cover |
341 | * the typically much larger 1394b beta repeater delays though. | ||
342 | */ | ||
343 | if (!card->beta_repeaters_present && | ||
341 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 344 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
342 | gap_count = gap_count_table[card->root_node->max_hops]; | 345 | gap_count = gap_count_table[card->root_node->max_hops]; |
343 | else | 346 | else |
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index dbb76427d529..75388641a7d3 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -397,7 +397,7 @@ static int ioctl_send_request(struct client *client, void *buffer) | |||
397 | request->tcode & 0x1f, | 397 | request->tcode & 0x1f, |
398 | device->node->node_id, | 398 | device->node->node_id, |
399 | request->generation, | 399 | request->generation, |
400 | device->node->max_speed, | 400 | device->max_speed, |
401 | request->offset, | 401 | request->offset, |
402 | response->response.data, request->length, | 402 | response->response.data, request->length, |
403 | complete_transaction, response); | 403 | complete_transaction, response); |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index c1ce465d9710..2b6586341635 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -401,8 +401,7 @@ static int read_rom(struct fw_device *device, int index, u32 * data) | |||
401 | 401 | ||
402 | offset = 0xfffff0000400ULL + index * 4; | 402 | offset = 0xfffff0000400ULL + index * 4; |
403 | fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST, | 403 | fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST, |
404 | device->node_id, | 404 | device->node_id, device->generation, device->max_speed, |
405 | device->generation, SCODE_100, | ||
406 | offset, NULL, 4, complete_transaction, &callback_data); | 405 | offset, NULL, 4, complete_transaction, &callback_data); |
407 | 406 | ||
408 | wait_for_completion(&callback_data.done); | 407 | wait_for_completion(&callback_data.done); |
@@ -418,6 +417,8 @@ static int read_bus_info_block(struct fw_device *device) | |||
418 | u32 stack[16], sp, key; | 417 | u32 stack[16], sp, key; |
419 | int i, end, length; | 418 | int i, end, length; |
420 | 419 | ||
420 | device->max_speed = SCODE_100; | ||
421 | |||
421 | /* First read the bus info block. */ | 422 | /* First read the bus info block. */ |
422 | for (i = 0; i < 5; i++) { | 423 | for (i = 0; i < 5; i++) { |
423 | if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) | 424 | if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE) |
@@ -434,6 +435,33 @@ static int read_bus_info_block(struct fw_device *device) | |||
434 | return -1; | 435 | return -1; |
435 | } | 436 | } |
436 | 437 | ||
438 | device->max_speed = device->node->max_speed; | ||
439 | |||
440 | /* | ||
441 | * Determine the speed of | ||
442 | * - devices with link speed less than PHY speed, | ||
443 | * - devices with 1394b PHY (unless only connected to 1394a PHYs), | ||
444 | * - all devices if there are 1394b repeaters. | ||
445 | * Note, we cannot use the bus info block's link_spd as starting point | ||
446 | * because some buggy firmwares set it lower than necessary and because | ||
447 | * 1394-1995 nodes do not have the field. | ||
448 | */ | ||
449 | if ((rom[2] & 0x7) < device->max_speed || | ||
450 | device->max_speed == SCODE_BETA || | ||
451 | device->card->beta_repeaters_present) { | ||
452 | u32 dummy; | ||
453 | |||
454 | /* for S1600 and S3200 */ | ||
455 | if (device->max_speed == SCODE_BETA) | ||
456 | device->max_speed = device->card->link_speed; | ||
457 | |||
458 | while (device->max_speed > SCODE_100) { | ||
459 | if (read_rom(device, 0, &dummy) == RCODE_COMPLETE) | ||
460 | break; | ||
461 | device->max_speed--; | ||
462 | } | ||
463 | } | ||
464 | |||
437 | /* | 465 | /* |
438 | * Now parse the config rom. The config rom is a recursive | 466 | * Now parse the config rom. The config rom is a recursive |
439 | * directory structure so we parse it using a stack of | 467 | * directory structure so we parse it using a stack of |
@@ -680,8 +708,10 @@ static void fw_device_init(struct work_struct *work) | |||
680 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) | 708 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) |
681 | fw_device_shutdown(&device->work.work); | 709 | fw_device_shutdown(&device->work.work); |
682 | else | 710 | else |
683 | fw_notify("created new fw device %s (%d config rom retries)\n", | 711 | fw_notify("created new fw device %s " |
684 | device->device.bus_id, device->config_rom_retries); | 712 | "(%d config rom retries, S%d00)\n", |
713 | device->device.bus_id, device->config_rom_retries, | ||
714 | 1 << device->max_speed); | ||
685 | 715 | ||
686 | /* | 716 | /* |
687 | * Reschedule the IRM work if we just finished reading the | 717 | * Reschedule the IRM work if we just finished reading the |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index af1723eae4ba..d13e6a69707f 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -40,6 +40,7 @@ struct fw_device { | |||
40 | struct fw_node *node; | 40 | struct fw_node *node; |
41 | int node_id; | 41 | int node_id; |
42 | int generation; | 42 | int generation; |
43 | unsigned max_speed; | ||
43 | struct fw_card *card; | 44 | struct fw_card *card; |
44 | struct device device; | 45 | struct device device; |
45 | struct list_head link; | 46 | struct list_head link; |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 96c8ac5b86cc..41476abc0693 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -1934,12 +1934,12 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1934 | free_irq(pdev->irq, ohci); | 1934 | free_irq(pdev->irq, ohci); |
1935 | err = pci_save_state(pdev); | 1935 | err = pci_save_state(pdev); |
1936 | if (err) { | 1936 | if (err) { |
1937 | fw_error("pci_save_state failed with %d", err); | 1937 | fw_error("pci_save_state failed\n"); |
1938 | return err; | 1938 | return err; |
1939 | } | 1939 | } |
1940 | err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 1940 | err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
1941 | if (err) { | 1941 | if (err) { |
1942 | fw_error("pci_set_power_state failed with %d", err); | 1942 | fw_error("pci_set_power_state failed\n"); |
1943 | return err; | 1943 | return err; |
1944 | } | 1944 | } |
1945 | 1945 | ||
@@ -1955,7 +1955,7 @@ static int pci_resume(struct pci_dev *pdev) | |||
1955 | pci_restore_state(pdev); | 1955 | pci_restore_state(pdev); |
1956 | err = pci_enable_device(pdev); | 1956 | err = pci_enable_device(pdev); |
1957 | if (err) { | 1957 | if (err) { |
1958 | fw_error("pci_enable_device failed with %d", err); | 1958 | fw_error("pci_enable_device failed\n"); |
1959 | return err; | 1959 | return err; |
1960 | } | 1960 | } |
1961 | 1961 | ||
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index a98d3915e26f..7c53be0387fb 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -30,10 +30,13 @@ | |||
30 | 30 | ||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | ||
33 | #include <linux/mod_devicetable.h> | 34 | #include <linux/mod_devicetable.h> |
34 | #include <linux/device.h> | 35 | #include <linux/device.h> |
35 | #include <linux/scatterlist.h> | 36 | #include <linux/scatterlist.h> |
36 | #include <linux/dma-mapping.h> | 37 | #include <linux/dma-mapping.h> |
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/string.h> | ||
37 | #include <linux/timer.h> | 40 | #include <linux/timer.h> |
38 | 41 | ||
39 | #include <scsi/scsi.h> | 42 | #include <scsi/scsi.h> |
@@ -46,6 +49,18 @@ | |||
46 | #include "fw-topology.h" | 49 | #include "fw-topology.h" |
47 | #include "fw-device.h" | 50 | #include "fw-device.h" |
48 | 51 | ||
52 | /* | ||
53 | * So far only bridges from Oxford Semiconductor are known to support | ||
54 | * concurrent logins. Depending on firmware, four or two concurrent logins | ||
55 | * are possible on OXFW911 and newer Oxsemi bridges. | ||
56 | * | ||
57 | * Concurrent logins are useful together with cluster filesystems. | ||
58 | */ | ||
59 | static int sbp2_param_exclusive_login = 1; | ||
60 | module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644); | ||
61 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | ||
62 | "(default = Y, use N for concurrent initiators)"); | ||
63 | |||
49 | /* I don't know why the SCSI stack doesn't define something like this... */ | 64 | /* I don't know why the SCSI stack doesn't define something like this... */ |
50 | typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); | 65 | typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); |
51 | 66 | ||
@@ -154,7 +169,7 @@ struct sbp2_orb { | |||
154 | #define MANAGEMENT_ORB_LUN(v) ((v)) | 169 | #define MANAGEMENT_ORB_LUN(v) ((v)) |
155 | #define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) | 170 | #define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16) |
156 | #define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) | 171 | #define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20) |
157 | #define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28) | 172 | #define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0) |
158 | #define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) | 173 | #define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29) |
159 | #define MANAGEMENT_ORB_NOTIFY ((1) << 31) | 174 | #define MANAGEMENT_ORB_NOTIFY ((1) << 31) |
160 | 175 | ||
@@ -205,9 +220,8 @@ struct sbp2_command_orb { | |||
205 | scsi_done_fn_t done; | 220 | scsi_done_fn_t done; |
206 | struct fw_unit *unit; | 221 | struct fw_unit *unit; |
207 | 222 | ||
208 | struct sbp2_pointer page_table[SG_ALL]; | 223 | struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); |
209 | dma_addr_t page_table_bus; | 224 | dma_addr_t page_table_bus; |
210 | dma_addr_t request_buffer_bus; | ||
211 | }; | 225 | }; |
212 | 226 | ||
213 | /* | 227 | /* |
@@ -347,8 +361,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit, | |||
347 | spin_unlock_irqrestore(&device->card->lock, flags); | 361 | spin_unlock_irqrestore(&device->card->lock, flags); |
348 | 362 | ||
349 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, | 363 | fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, |
350 | node_id, generation, | 364 | node_id, generation, device->max_speed, offset, |
351 | device->node->max_speed, offset, | ||
352 | &orb->pointer, sizeof(orb->pointer), | 365 | &orb->pointer, sizeof(orb->pointer), |
353 | complete_transaction, orb); | 366 | complete_transaction, orb); |
354 | } | 367 | } |
@@ -383,7 +396,7 @@ static void | |||
383 | complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | 396 | complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) |
384 | { | 397 | { |
385 | struct sbp2_management_orb *orb = | 398 | struct sbp2_management_orb *orb = |
386 | (struct sbp2_management_orb *)base_orb; | 399 | container_of(base_orb, struct sbp2_management_orb, base); |
387 | 400 | ||
388 | if (status) | 401 | if (status) |
389 | memcpy(&orb->status, status, sizeof(*status)); | 402 | memcpy(&orb->status, status, sizeof(*status)); |
@@ -403,21 +416,11 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, | |||
403 | if (orb == NULL) | 416 | if (orb == NULL) |
404 | return -ENOMEM; | 417 | return -ENOMEM; |
405 | 418 | ||
406 | /* | ||
407 | * The sbp2 device is going to send a block read request to | ||
408 | * read out the request from host memory, so map it for dma. | ||
409 | */ | ||
410 | orb->base.request_bus = | ||
411 | dma_map_single(device->card->device, &orb->request, | ||
412 | sizeof(orb->request), DMA_TO_DEVICE); | ||
413 | if (dma_mapping_error(orb->base.request_bus)) | ||
414 | goto out; | ||
415 | |||
416 | orb->response_bus = | 419 | orb->response_bus = |
417 | dma_map_single(device->card->device, &orb->response, | 420 | dma_map_single(device->card->device, &orb->response, |
418 | sizeof(orb->response), DMA_FROM_DEVICE); | 421 | sizeof(orb->response), DMA_FROM_DEVICE); |
419 | if (dma_mapping_error(orb->response_bus)) | 422 | if (dma_mapping_error(orb->response_bus)) |
420 | goto out; | 423 | goto fail_mapping_response; |
421 | 424 | ||
422 | orb->request.response.high = 0; | 425 | orb->request.response.high = 0; |
423 | orb->request.response.low = orb->response_bus; | 426 | orb->request.response.low = orb->response_bus; |
@@ -432,14 +435,9 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, | |||
432 | orb->request.status_fifo.high = sd->address_handler.offset >> 32; | 435 | orb->request.status_fifo.high = sd->address_handler.offset >> 32; |
433 | orb->request.status_fifo.low = sd->address_handler.offset; | 436 | orb->request.status_fifo.low = sd->address_handler.offset; |
434 | 437 | ||
435 | /* | ||
436 | * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive | ||
437 | * login and 1 second reconnect time. The reconnect setting | ||
438 | * is probably fine, but the exclusive login should be an option. | ||
439 | */ | ||
440 | if (function == SBP2_LOGIN_REQUEST) { | 438 | if (function == SBP2_LOGIN_REQUEST) { |
441 | orb->request.misc |= | 439 | orb->request.misc |= |
442 | MANAGEMENT_ORB_EXCLUSIVE | | 440 | MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login) | |
443 | MANAGEMENT_ORB_RECONNECT(0); | 441 | MANAGEMENT_ORB_RECONNECT(0); |
444 | } | 442 | } |
445 | 443 | ||
@@ -448,6 +446,12 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, | |||
448 | init_completion(&orb->done); | 446 | init_completion(&orb->done); |
449 | orb->base.callback = complete_management_orb; | 447 | orb->base.callback = complete_management_orb; |
450 | 448 | ||
449 | orb->base.request_bus = | ||
450 | dma_map_single(device->card->device, &orb->request, | ||
451 | sizeof(orb->request), DMA_TO_DEVICE); | ||
452 | if (dma_mapping_error(orb->base.request_bus)) | ||
453 | goto fail_mapping_request; | ||
454 | |||
451 | sbp2_send_orb(&orb->base, unit, | 455 | sbp2_send_orb(&orb->base, unit, |
452 | node_id, generation, sd->management_agent_address); | 456 | node_id, generation, sd->management_agent_address); |
453 | 457 | ||
@@ -479,9 +483,10 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, | |||
479 | out: | 483 | out: |
480 | dma_unmap_single(device->card->device, orb->base.request_bus, | 484 | dma_unmap_single(device->card->device, orb->base.request_bus, |
481 | sizeof(orb->request), DMA_TO_DEVICE); | 485 | sizeof(orb->request), DMA_TO_DEVICE); |
486 | fail_mapping_request: | ||
482 | dma_unmap_single(device->card->device, orb->response_bus, | 487 | dma_unmap_single(device->card->device, orb->response_bus, |
483 | sizeof(orb->response), DMA_FROM_DEVICE); | 488 | sizeof(orb->response), DMA_FROM_DEVICE); |
484 | 489 | fail_mapping_response: | |
485 | if (response) | 490 | if (response) |
486 | fw_memcpy_from_be32(response, | 491 | fw_memcpy_from_be32(response, |
487 | orb->response, sizeof(orb->response)); | 492 | orb->response, sizeof(orb->response)); |
@@ -511,7 +516,7 @@ static int sbp2_agent_reset(struct fw_unit *unit) | |||
511 | return -ENOMEM; | 516 | return -ENOMEM; |
512 | 517 | ||
513 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 518 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
514 | sd->node_id, sd->generation, SCODE_400, | 519 | sd->node_id, sd->generation, device->max_speed, |
515 | sd->command_block_agent_address + SBP2_AGENT_RESET, | 520 | sd->command_block_agent_address + SBP2_AGENT_RESET, |
516 | &zero, sizeof(zero), complete_agent_reset_write, t); | 521 | &zero, sizeof(zero), complete_agent_reset_write, t); |
517 | 522 | ||
@@ -521,17 +526,15 @@ static int sbp2_agent_reset(struct fw_unit *unit) | |||
521 | static void sbp2_reconnect(struct work_struct *work); | 526 | static void sbp2_reconnect(struct work_struct *work); |
522 | static struct scsi_host_template scsi_driver_template; | 527 | static struct scsi_host_template scsi_driver_template; |
523 | 528 | ||
524 | static void | 529 | static void release_sbp2_device(struct kref *kref) |
525 | release_sbp2_device(struct kref *kref) | ||
526 | { | 530 | { |
527 | struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref); | 531 | struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref); |
528 | struct Scsi_Host *host = | 532 | struct Scsi_Host *host = |
529 | container_of((void *)sd, struct Scsi_Host, hostdata[0]); | 533 | container_of((void *)sd, struct Scsi_Host, hostdata[0]); |
530 | 534 | ||
535 | scsi_remove_host(host); | ||
531 | sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation, | 536 | sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation, |
532 | SBP2_LOGOUT_REQUEST, sd->login_id, NULL); | 537 | SBP2_LOGOUT_REQUEST, sd->login_id, NULL); |
533 | |||
534 | scsi_remove_host(host); | ||
535 | fw_core_remove_address_handler(&sd->address_handler); | 538 | fw_core_remove_address_handler(&sd->address_handler); |
536 | fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id); | 539 | fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id); |
537 | put_device(&sd->unit->device); | 540 | put_device(&sd->unit->device); |
@@ -833,7 +836,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) | |||
833 | static void | 836 | static void |
834 | complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | 837 | complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) |
835 | { | 838 | { |
836 | struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb; | 839 | struct sbp2_command_orb *orb = |
840 | container_of(base_orb, struct sbp2_command_orb, base); | ||
837 | struct fw_unit *unit = orb->unit; | 841 | struct fw_unit *unit = orb->unit; |
838 | struct fw_device *device = fw_device(unit->device.parent); | 842 | struct fw_device *device = fw_device(unit->device.parent); |
839 | struct scatterlist *sg; | 843 | struct scatterlist *sg; |
@@ -880,12 +884,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
880 | 884 | ||
881 | if (orb->page_table_bus != 0) | 885 | if (orb->page_table_bus != 0) |
882 | dma_unmap_single(device->card->device, orb->page_table_bus, | 886 | dma_unmap_single(device->card->device, orb->page_table_bus, |
883 | sizeof(orb->page_table_bus), DMA_TO_DEVICE); | 887 | sizeof(orb->page_table), DMA_TO_DEVICE); |
884 | |||
885 | if (orb->request_buffer_bus != 0) | ||
886 | dma_unmap_single(device->card->device, orb->request_buffer_bus, | ||
887 | sizeof(orb->request_buffer_bus), | ||
888 | DMA_FROM_DEVICE); | ||
889 | 888 | ||
890 | orb->cmd->result = result; | 889 | orb->cmd->result = result; |
891 | orb->done(orb->cmd); | 890 | orb->done(orb->cmd); |
@@ -900,7 +899,6 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) | |||
900 | struct fw_device *device = fw_device(unit->device.parent); | 899 | struct fw_device *device = fw_device(unit->device.parent); |
901 | struct scatterlist *sg; | 900 | struct scatterlist *sg; |
902 | int sg_len, l, i, j, count; | 901 | int sg_len, l, i, j, count; |
903 | size_t size; | ||
904 | dma_addr_t sg_addr; | 902 | dma_addr_t sg_addr; |
905 | 903 | ||
906 | sg = (struct scatterlist *)orb->cmd->request_buffer; | 904 | sg = (struct scatterlist *)orb->cmd->request_buffer; |
@@ -935,6 +933,11 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) | |||
935 | sg_len = sg_dma_len(sg + i); | 933 | sg_len = sg_dma_len(sg + i); |
936 | sg_addr = sg_dma_address(sg + i); | 934 | sg_addr = sg_dma_address(sg + i); |
937 | while (sg_len) { | 935 | while (sg_len) { |
936 | /* FIXME: This won't get us out of the pinch. */ | ||
937 | if (unlikely(j >= ARRAY_SIZE(orb->page_table))) { | ||
938 | fw_error("page table overflow\n"); | ||
939 | goto fail_page_table; | ||
940 | } | ||
938 | l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH); | 941 | l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH); |
939 | orb->page_table[j].low = sg_addr; | 942 | orb->page_table[j].low = sg_addr; |
940 | orb->page_table[j].high = (l << 16); | 943 | orb->page_table[j].high = (l << 16); |
@@ -944,7 +947,13 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) | |||
944 | } | 947 | } |
945 | } | 948 | } |
946 | 949 | ||
947 | size = sizeof(orb->page_table[0]) * j; | 950 | fw_memcpy_to_be32(orb->page_table, orb->page_table, |
951 | sizeof(orb->page_table[0]) * j); | ||
952 | orb->page_table_bus = | ||
953 | dma_map_single(device->card->device, orb->page_table, | ||
954 | sizeof(orb->page_table), DMA_TO_DEVICE); | ||
955 | if (dma_mapping_error(orb->page_table_bus)) | ||
956 | goto fail_page_table; | ||
948 | 957 | ||
949 | /* | 958 | /* |
950 | * The data_descriptor pointer is the one case where we need | 959 | * The data_descriptor pointer is the one case where we need |
@@ -953,20 +962,12 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) | |||
953 | * initiator (i.e. us), but data_descriptor can refer to data | 962 | * initiator (i.e. us), but data_descriptor can refer to data |
954 | * on other nodes so we need to put our ID in descriptor.high. | 963 | * on other nodes so we need to put our ID in descriptor.high. |
955 | */ | 964 | */ |
956 | |||
957 | orb->page_table_bus = | ||
958 | dma_map_single(device->card->device, orb->page_table, | ||
959 | size, DMA_TO_DEVICE); | ||
960 | if (dma_mapping_error(orb->page_table_bus)) | ||
961 | goto fail_page_table; | ||
962 | orb->request.data_descriptor.high = sd->address_high; | 965 | orb->request.data_descriptor.high = sd->address_high; |
963 | orb->request.data_descriptor.low = orb->page_table_bus; | 966 | orb->request.data_descriptor.low = orb->page_table_bus; |
964 | orb->request.misc |= | 967 | orb->request.misc |= |
965 | COMMAND_ORB_PAGE_TABLE_PRESENT | | 968 | COMMAND_ORB_PAGE_TABLE_PRESENT | |
966 | COMMAND_ORB_DATA_SIZE(j); | 969 | COMMAND_ORB_DATA_SIZE(j); |
967 | 970 | ||
968 | fw_memcpy_to_be32(orb->page_table, orb->page_table, size); | ||
969 | |||
970 | return 0; | 971 | return 0; |
971 | 972 | ||
972 | fail_page_table: | 973 | fail_page_table: |
@@ -991,7 +992,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
991 | * transfer direction not handled. | 992 | * transfer direction not handled. |
992 | */ | 993 | */ |
993 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { | 994 | if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { |
994 | fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); | 995 | fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n"); |
995 | cmd->result = DID_ERROR << 16; | 996 | cmd->result = DID_ERROR << 16; |
996 | done(cmd); | 997 | done(cmd); |
997 | return 0; | 998 | return 0; |
@@ -1005,11 +1006,6 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1005 | 1006 | ||
1006 | /* Initialize rcode to something not RCODE_COMPLETE. */ | 1007 | /* Initialize rcode to something not RCODE_COMPLETE. */ |
1007 | orb->base.rcode = -1; | 1008 | orb->base.rcode = -1; |
1008 | orb->base.request_bus = | ||
1009 | dma_map_single(device->card->device, &orb->request, | ||
1010 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1011 | if (dma_mapping_error(orb->base.request_bus)) | ||
1012 | goto fail_mapping; | ||
1013 | 1009 | ||
1014 | orb->unit = unit; | 1010 | orb->unit = unit; |
1015 | orb->done = done; | 1011 | orb->done = done; |
@@ -1024,8 +1020,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1024 | * if we set this to max_speed + 7, we get the right value. | 1020 | * if we set this to max_speed + 7, we get the right value. |
1025 | */ | 1021 | */ |
1026 | orb->request.misc = | 1022 | orb->request.misc = |
1027 | COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) | | 1023 | COMMAND_ORB_MAX_PAYLOAD(device->max_speed + 7) | |
1028 | COMMAND_ORB_SPEED(device->node->max_speed) | | 1024 | COMMAND_ORB_SPEED(device->max_speed) | |
1029 | COMMAND_ORB_NOTIFY; | 1025 | COMMAND_ORB_NOTIFY; |
1030 | 1026 | ||
1031 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) | 1027 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) |
@@ -1036,7 +1032,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1036 | COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); | 1032 | COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); |
1037 | 1033 | ||
1038 | if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0) | 1034 | if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0) |
1039 | goto fail_map_payload; | 1035 | goto fail_mapping; |
1040 | 1036 | ||
1041 | fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); | 1037 | fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); |
1042 | 1038 | ||
@@ -1045,15 +1041,17 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1045 | memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); | 1041 | memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); |
1046 | 1042 | ||
1047 | orb->base.callback = complete_command_orb; | 1043 | orb->base.callback = complete_command_orb; |
1044 | orb->base.request_bus = | ||
1045 | dma_map_single(device->card->device, &orb->request, | ||
1046 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1047 | if (dma_mapping_error(orb->base.request_bus)) | ||
1048 | goto fail_mapping; | ||
1048 | 1049 | ||
1049 | sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation, | 1050 | sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation, |
1050 | sd->command_block_agent_address + SBP2_ORB_POINTER); | 1051 | sd->command_block_agent_address + SBP2_ORB_POINTER); |
1051 | 1052 | ||
1052 | return 0; | 1053 | return 0; |
1053 | 1054 | ||
1054 | fail_map_payload: | ||
1055 | dma_unmap_single(device->card->device, orb->base.request_bus, | ||
1056 | sizeof(orb->request), DMA_TO_DEVICE); | ||
1057 | fail_mapping: | 1055 | fail_mapping: |
1058 | kfree(orb); | 1056 | kfree(orb); |
1059 | fail_alloc: | 1057 | fail_alloc: |
@@ -1087,7 +1085,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) | |||
1087 | fw_notify("setting fix_capacity for %s\n", unit->device.bus_id); | 1085 | fw_notify("setting fix_capacity for %s\n", unit->device.bus_id); |
1088 | sdev->fix_capacity = 1; | 1086 | sdev->fix_capacity = 1; |
1089 | } | 1087 | } |
1090 | 1088 | if (sd->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) | |
1089 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); | ||
1091 | return 0; | 1090 | return 0; |
1092 | } | 1091 | } |
1093 | 1092 | ||
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 7aebb8ae0efa..39e5cd12aa52 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -135,17 +135,17 @@ static void update_hop_count(struct fw_node *node) | |||
135 | int i; | 135 | int i; |
136 | 136 | ||
137 | for (i = 0; i < node->port_count; i++) { | 137 | for (i = 0; i < node->port_count; i++) { |
138 | if (node->ports[i].node == NULL) | 138 | if (node->ports[i] == NULL) |
139 | continue; | 139 | continue; |
140 | 140 | ||
141 | if (node->ports[i].node->max_hops > max_child_hops) | 141 | if (node->ports[i]->max_hops > max_child_hops) |
142 | max_child_hops = node->ports[i].node->max_hops; | 142 | max_child_hops = node->ports[i]->max_hops; |
143 | 143 | ||
144 | if (node->ports[i].node->max_depth > depths[0]) { | 144 | if (node->ports[i]->max_depth > depths[0]) { |
145 | depths[1] = depths[0]; | 145 | depths[1] = depths[0]; |
146 | depths[0] = node->ports[i].node->max_depth; | 146 | depths[0] = node->ports[i]->max_depth; |
147 | } else if (node->ports[i].node->max_depth > depths[1]) | 147 | } else if (node->ports[i]->max_depth > depths[1]) |
148 | depths[1] = node->ports[i].node->max_depth; | 148 | depths[1] = node->ports[i]->max_depth; |
149 | } | 149 | } |
150 | 150 | ||
151 | node->max_depth = depths[0] + 1; | 151 | node->max_depth = depths[0] + 1; |
@@ -172,7 +172,8 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
172 | struct list_head stack, *h; | 172 | struct list_head stack, *h; |
173 | u32 *next_sid, *end, q; | 173 | u32 *next_sid, *end, q; |
174 | int i, port_count, child_port_count, phy_id, parent_count, stack_depth; | 174 | int i, port_count, child_port_count, phy_id, parent_count, stack_depth; |
175 | int gap_count, topology_type; | 175 | int gap_count; |
176 | bool beta_repeaters_present; | ||
176 | 177 | ||
177 | local_node = NULL; | 178 | local_node = NULL; |
178 | node = NULL; | 179 | node = NULL; |
@@ -182,7 +183,7 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
182 | phy_id = 0; | 183 | phy_id = 0; |
183 | irm_node = NULL; | 184 | irm_node = NULL; |
184 | gap_count = SELF_ID_GAP_COUNT(*sid); | 185 | gap_count = SELF_ID_GAP_COUNT(*sid); |
185 | topology_type = 0; | 186 | beta_repeaters_present = false; |
186 | 187 | ||
187 | while (sid < end) { | 188 | while (sid < end) { |
188 | next_sid = count_ports(sid, &port_count, &child_port_count); | 189 | next_sid = count_ports(sid, &port_count, &child_port_count); |
@@ -214,7 +215,7 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
214 | 215 | ||
215 | node = fw_node_create(q, port_count, card->color); | 216 | node = fw_node_create(q, port_count, card->color); |
216 | if (node == NULL) { | 217 | if (node == NULL) { |
217 | fw_error("Out of memory while building topology."); | 218 | fw_error("Out of memory while building topology.\n"); |
218 | return NULL; | 219 | return NULL; |
219 | } | 220 | } |
220 | 221 | ||
@@ -224,11 +225,6 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
224 | if (SELF_ID_CONTENDER(q)) | 225 | if (SELF_ID_CONTENDER(q)) |
225 | irm_node = node; | 226 | irm_node = node; |
226 | 227 | ||
227 | if (node->phy_speed == SCODE_BETA) | ||
228 | topology_type |= FW_TOPOLOGY_B; | ||
229 | else | ||
230 | topology_type |= FW_TOPOLOGY_A; | ||
231 | |||
232 | parent_count = 0; | 228 | parent_count = 0; |
233 | 229 | ||
234 | for (i = 0; i < port_count; i++) { | 230 | for (i = 0; i < port_count; i++) { |
@@ -249,12 +245,12 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
249 | break; | 245 | break; |
250 | 246 | ||
251 | case SELFID_PORT_CHILD: | 247 | case SELFID_PORT_CHILD: |
252 | node->ports[i].node = child; | 248 | node->ports[i] = child; |
253 | /* | 249 | /* |
254 | * Fix up parent reference for this | 250 | * Fix up parent reference for this |
255 | * child node. | 251 | * child node. |
256 | */ | 252 | */ |
257 | child->ports[child->color].node = node; | 253 | child->ports[child->color] = node; |
258 | child->color = card->color; | 254 | child->color = card->color; |
259 | child = fw_node(child->link.next); | 255 | child = fw_node(child->link.next); |
260 | break; | 256 | break; |
@@ -278,6 +274,10 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
278 | list_add_tail(&node->link, &stack); | 274 | list_add_tail(&node->link, &stack); |
279 | stack_depth += 1 - child_port_count; | 275 | stack_depth += 1 - child_port_count; |
280 | 276 | ||
277 | if (node->phy_speed == SCODE_BETA && | ||
278 | parent_count + child_port_count > 1) | ||
279 | beta_repeaters_present = true; | ||
280 | |||
281 | /* | 281 | /* |
282 | * If all PHYs does not report the same gap count | 282 | * If all PHYs does not report the same gap count |
283 | * setting, we fall back to 63 which will force a gap | 283 | * setting, we fall back to 63 which will force a gap |
@@ -295,7 +295,7 @@ static struct fw_node *build_tree(struct fw_card *card, | |||
295 | card->root_node = node; | 295 | card->root_node = node; |
296 | card->irm_node = irm_node; | 296 | card->irm_node = irm_node; |
297 | card->gap_count = gap_count; | 297 | card->gap_count = gap_count; |
298 | card->topology_type = topology_type; | 298 | card->beta_repeaters_present = beta_repeaters_present; |
299 | 299 | ||
300 | return local_node; | 300 | return local_node; |
301 | } | 301 | } |
@@ -321,7 +321,7 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root, | |||
321 | node->color = card->color; | 321 | node->color = card->color; |
322 | 322 | ||
323 | for (i = 0; i < node->port_count; i++) { | 323 | for (i = 0; i < node->port_count; i++) { |
324 | child = node->ports[i].node; | 324 | child = node->ports[i]; |
325 | if (!child) | 325 | if (!child) |
326 | continue; | 326 | continue; |
327 | if (child->color == card->color) | 327 | if (child->color == card->color) |
@@ -382,11 +382,11 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) | |||
382 | struct fw_node *tree; | 382 | struct fw_node *tree; |
383 | int i; | 383 | int i; |
384 | 384 | ||
385 | tree = node1->ports[port].node; | 385 | tree = node1->ports[port]; |
386 | node0->ports[port].node = tree; | 386 | node0->ports[port] = tree; |
387 | for (i = 0; i < tree->port_count; i++) { | 387 | for (i = 0; i < tree->port_count; i++) { |
388 | if (tree->ports[i].node == node1) { | 388 | if (tree->ports[i] == node1) { |
389 | tree->ports[i].node = node0; | 389 | tree->ports[i] = node0; |
390 | break; | 390 | break; |
391 | } | 391 | } |
392 | } | 392 | } |
@@ -437,19 +437,17 @@ update_tree(struct fw_card *card, struct fw_node *root) | |||
437 | card->irm_node = node0; | 437 | card->irm_node = node0; |
438 | 438 | ||
439 | for (i = 0; i < node0->port_count; i++) { | 439 | for (i = 0; i < node0->port_count; i++) { |
440 | if (node0->ports[i].node && node1->ports[i].node) { | 440 | if (node0->ports[i] && node1->ports[i]) { |
441 | /* | 441 | /* |
442 | * This port didn't change, queue the | 442 | * This port didn't change, queue the |
443 | * connected node for further | 443 | * connected node for further |
444 | * investigation. | 444 | * investigation. |
445 | */ | 445 | */ |
446 | if (node0->ports[i].node->color == card->color) | 446 | if (node0->ports[i]->color == card->color) |
447 | continue; | 447 | continue; |
448 | list_add_tail(&node0->ports[i].node->link, | 448 | list_add_tail(&node0->ports[i]->link, &list0); |
449 | &list0); | 449 | list_add_tail(&node1->ports[i]->link, &list1); |
450 | list_add_tail(&node1->ports[i].node->link, | 450 | } else if (node0->ports[i]) { |
451 | &list1); | ||
452 | } else if (node0->ports[i].node) { | ||
453 | /* | 451 | /* |
454 | * The nodes connected here were | 452 | * The nodes connected here were |
455 | * unplugged; unref the lost nodes and | 453 | * unplugged; unref the lost nodes and |
@@ -457,10 +455,10 @@ update_tree(struct fw_card *card, struct fw_node *root) | |||
457 | * them. | 455 | * them. |
458 | */ | 456 | */ |
459 | 457 | ||
460 | for_each_fw_node(card, node0->ports[i].node, | 458 | for_each_fw_node(card, node0->ports[i], |
461 | report_lost_node); | 459 | report_lost_node); |
462 | node0->ports[i].node = NULL; | 460 | node0->ports[i] = NULL; |
463 | } else if (node1->ports[i].node) { | 461 | } else if (node1->ports[i]) { |
464 | /* | 462 | /* |
465 | * One or more node were connected to | 463 | * One or more node were connected to |
466 | * this port. Move the new nodes into | 464 | * this port. Move the new nodes into |
@@ -468,7 +466,7 @@ update_tree(struct fw_card *card, struct fw_node *root) | |||
468 | * callbacks for them. | 466 | * callbacks for them. |
469 | */ | 467 | */ |
470 | move_tree(node0, node1, i); | 468 | move_tree(node0, node1, i); |
471 | for_each_fw_node(card, node0->ports[i].node, | 469 | for_each_fw_node(card, node0->ports[i], |
472 | report_found_node); | 470 | report_found_node); |
473 | } | 471 | } |
474 | } | 472 | } |
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h index 363b6cbcd0b3..1b56b4ac7fb2 100644 --- a/drivers/firewire/fw-topology.h +++ b/drivers/firewire/fw-topology.h | |||
@@ -20,12 +20,6 @@ | |||
20 | #define __fw_topology_h | 20 | #define __fw_topology_h |
21 | 21 | ||
22 | enum { | 22 | enum { |
23 | FW_TOPOLOGY_A = 0x01, | ||
24 | FW_TOPOLOGY_B = 0x02, | ||
25 | FW_TOPOLOGY_MIXED = 0x03, | ||
26 | }; | ||
27 | |||
28 | enum { | ||
29 | FW_NODE_CREATED = 0x00, | 23 | FW_NODE_CREATED = 0x00, |
30 | FW_NODE_UPDATED = 0x01, | 24 | FW_NODE_UPDATED = 0x01, |
31 | FW_NODE_DESTROYED = 0x02, | 25 | FW_NODE_DESTROYED = 0x02, |
@@ -33,21 +27,16 @@ enum { | |||
33 | FW_NODE_LINK_OFF = 0x04, | 27 | FW_NODE_LINK_OFF = 0x04, |
34 | }; | 28 | }; |
35 | 29 | ||
36 | struct fw_port { | ||
37 | struct fw_node *node; | ||
38 | unsigned speed : 3; /* S100, S200, ... S3200 */ | ||
39 | }; | ||
40 | |||
41 | struct fw_node { | 30 | struct fw_node { |
42 | u16 node_id; | 31 | u16 node_id; |
43 | u8 color; | 32 | u8 color; |
44 | u8 port_count; | 33 | u8 port_count; |
45 | unsigned link_on : 1; | 34 | u8 link_on : 1; |
46 | unsigned initiated_reset : 1; | 35 | u8 initiated_reset : 1; |
47 | unsigned b_path : 1; | 36 | u8 b_path : 1; |
48 | u8 phy_speed : 3; /* As in the self ID packet. */ | 37 | u8 phy_speed : 2; /* As in the self ID packet. */ |
49 | u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on | 38 | u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the |
50 | * the path from the local node to this node. */ | 39 | * local node to this node. */ |
51 | u8 max_depth : 4; /* Maximum depth to any leaf node */ | 40 | u8 max_depth : 4; /* Maximum depth to any leaf node */ |
52 | u8 max_hops : 4; /* Max hops in this sub tree */ | 41 | u8 max_hops : 4; /* Max hops in this sub tree */ |
53 | atomic_t ref_count; | 42 | atomic_t ref_count; |
@@ -58,7 +47,7 @@ struct fw_node { | |||
58 | /* Upper layer specific data. */ | 47 | /* Upper layer specific data. */ |
59 | void *data; | 48 | void *data; |
60 | 49 | ||
61 | struct fw_port ports[0]; | 50 | struct fw_node *ports[0]; |
62 | }; | 51 | }; |
63 | 52 | ||
64 | static inline struct fw_node * | 53 | static inline struct fw_node * |
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index acdc3be38c61..5abed193f4a6 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -81,7 +81,6 @@ | |||
81 | 81 | ||
82 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) | 82 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) |
83 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) | 83 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) |
84 | #define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args) | ||
85 | 84 | ||
86 | static inline void | 85 | static inline void |
87 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) | 86 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) |
@@ -246,7 +245,7 @@ struct fw_card { | |||
246 | struct fw_node *irm_node; | 245 | struct fw_node *irm_node; |
247 | int color; | 246 | int color; |
248 | int gap_count; | 247 | int gap_count; |
249 | int topology_type; | 248 | bool beta_repeaters_present; |
250 | 249 | ||
251 | int index; | 250 | int index; |
252 | 251 | ||
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 208141377612..65722117ab6e 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c | |||
@@ -2280,7 +2280,7 @@ static void dv1394_remove_host(struct hpsb_host *host) | |||
2280 | } while (video); | 2280 | } while (video); |
2281 | 2281 | ||
2282 | if (found_ohci_card) | 2282 | if (found_ohci_card) |
2283 | class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR, | 2283 | device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR, |
2284 | IEEE1394_MINOR_BLOCK_DV1394 * 16 + (host->id << 2))); | 2284 | IEEE1394_MINOR_BLOCK_DV1394 * 16 + (host->id << 2))); |
2285 | } | 2285 | } |
2286 | 2286 | ||
@@ -2295,9 +2295,9 @@ static void dv1394_add_host(struct hpsb_host *host) | |||
2295 | 2295 | ||
2296 | ohci = (struct ti_ohci *)host->hostdata; | 2296 | ohci = (struct ti_ohci *)host->hostdata; |
2297 | 2297 | ||
2298 | class_device_create(hpsb_protocol_class, NULL, MKDEV( | 2298 | device_create(hpsb_protocol_class, NULL, MKDEV( |
2299 | IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)), | 2299 | IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)), |
2300 | NULL, "dv1394-%d", id); | 2300 | "dv1394-%d", id); |
2301 | 2301 | ||
2302 | dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE); | 2302 | dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE); |
2303 | dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT); | 2303 | dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT); |
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 7c13fb3c167b..93362eed94ed 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c | |||
@@ -599,9 +599,7 @@ static void ether1394_add_host(struct hpsb_host *host) | |||
599 | } | 599 | } |
600 | 600 | ||
601 | SET_MODULE_OWNER(dev); | 601 | SET_MODULE_OWNER(dev); |
602 | 602 | SET_NETDEV_DEV(dev, &host->device); | |
603 | /* This used to be &host->device in Linux 2.6.20 and before. */ | ||
604 | SET_NETDEV_DEV(dev, host->device.parent); | ||
605 | 603 | ||
606 | priv = netdev_priv(dev); | 604 | priv = netdev_priv(dev); |
607 | INIT_LIST_HEAD(&priv->ip_node_list); | 605 | INIT_LIST_HEAD(&priv->ip_node_list); |
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 83a493312751..b6425469b6ee 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c | |||
@@ -483,37 +483,6 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, | |||
483 | return retval; | 483 | return retval; |
484 | } | 484 | } |
485 | 485 | ||
486 | /** | ||
487 | * hpsb_listen_channel - enable receving a certain isochronous channel | ||
488 | * | ||
489 | * Reception is handled through the @hl's iso_receive op. | ||
490 | */ | ||
491 | int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, | ||
492 | unsigned int channel) | ||
493 | { | ||
494 | if (channel > 63) { | ||
495 | HPSB_ERR("%s called with invalid channel", __FUNCTION__); | ||
496 | return -EINVAL; | ||
497 | } | ||
498 | if (host->iso_listen_count[channel]++ == 0) | ||
499 | return host->driver->devctl(host, ISO_LISTEN_CHANNEL, channel); | ||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | /** | ||
504 | * hpsb_unlisten_channel - disable receving a certain isochronous channel | ||
505 | */ | ||
506 | void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, | ||
507 | unsigned int channel) | ||
508 | { | ||
509 | if (channel > 63) { | ||
510 | HPSB_ERR("%s called with invalid channel", __FUNCTION__); | ||
511 | return; | ||
512 | } | ||
513 | if (--host->iso_listen_count[channel] == 0) | ||
514 | host->driver->devctl(host, ISO_UNLISTEN_CHANNEL, channel); | ||
515 | } | ||
516 | |||
517 | static void init_hpsb_highlevel(struct hpsb_host *host) | 486 | static void init_hpsb_highlevel(struct hpsb_host *host) |
518 | { | 487 | { |
519 | INIT_LIST_HEAD(&dummy_zero_addr.host_list); | 488 | INIT_LIST_HEAD(&dummy_zero_addr.host_list); |
@@ -570,20 +539,6 @@ void highlevel_host_reset(struct hpsb_host *host) | |||
570 | read_unlock_irqrestore(&hl_irqs_lock, flags); | 539 | read_unlock_irqrestore(&hl_irqs_lock, flags); |
571 | } | 540 | } |
572 | 541 | ||
573 | void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length) | ||
574 | { | ||
575 | unsigned long flags; | ||
576 | struct hpsb_highlevel *hl; | ||
577 | int channel = (((quadlet_t *)data)[0] >> 8) & 0x3f; | ||
578 | |||
579 | read_lock_irqsave(&hl_irqs_lock, flags); | ||
580 | list_for_each_entry(hl, &hl_irqs, irq_list) { | ||
581 | if (hl->iso_receive) | ||
582 | hl->iso_receive(host, channel, data, length); | ||
583 | } | ||
584 | read_unlock_irqrestore(&hl_irqs_lock, flags); | ||
585 | } | ||
586 | |||
587 | void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, | 542 | void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, |
588 | void *data, size_t length) | 543 | void *data, size_t length) |
589 | { | 544 | { |
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h index 63474f7ee69d..eb9fe321e09a 100644 --- a/drivers/ieee1394/highlevel.h +++ b/drivers/ieee1394/highlevel.h | |||
@@ -26,9 +26,7 @@ struct hpsb_address_serve { | |||
26 | struct hpsb_highlevel { | 26 | struct hpsb_highlevel { |
27 | const char *name; | 27 | const char *name; |
28 | 28 | ||
29 | /* Any of the following pointers can legally be NULL, except for | 29 | /* Any of the following pointers can legally be NULL. */ |
30 | * iso_receive which can only be NULL when you don't request | ||
31 | * channels. */ | ||
32 | 30 | ||
33 | /* New host initialized. Will also be called during | 31 | /* New host initialized. Will also be called during |
34 | * hpsb_register_highlevel for all hosts already installed. */ | 32 | * hpsb_register_highlevel for all hosts already installed. */ |
@@ -43,13 +41,6 @@ struct hpsb_highlevel { | |||
43 | * You can not expect to be able to do stock hpsb_reads. */ | 41 | * You can not expect to be able to do stock hpsb_reads. */ |
44 | void (*host_reset)(struct hpsb_host *host); | 42 | void (*host_reset)(struct hpsb_host *host); |
45 | 43 | ||
46 | /* An isochronous packet was received. Channel contains the channel | ||
47 | * number for your convenience, it is also contained in the included | ||
48 | * packet header (first quadlet, CRCs are missing). You may get called | ||
49 | * for channel/host combinations you did not request. */ | ||
50 | void (*iso_receive)(struct hpsb_host *host, int channel, | ||
51 | quadlet_t *data, size_t length); | ||
52 | |||
53 | /* A write request was received on either the FCP_COMMAND (direction = | 44 | /* A write request was received on either the FCP_COMMAND (direction = |
54 | * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg | 45 | * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg |
55 | * contains the cts field (first byte of data). */ | 46 | * contains the cts field (first byte of data). */ |
@@ -109,7 +100,6 @@ int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store, | |||
109 | int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store, | 100 | int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store, |
110 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, | 101 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, |
111 | u16 flags); | 102 | u16 flags); |
112 | void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length); | ||
113 | void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, | 103 | void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, |
114 | void *data, size_t length); | 104 | void *data, size_t length); |
115 | 105 | ||
@@ -125,10 +115,6 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, | |||
125 | struct hpsb_address_ops *ops, u64 start, u64 end); | 115 | struct hpsb_address_ops *ops, u64 start, u64 end); |
126 | int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, | 116 | int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, |
127 | u64 start); | 117 | u64 start); |
128 | int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, | ||
129 | unsigned int channel); | ||
130 | void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, | ||
131 | unsigned int channel); | ||
132 | 118 | ||
133 | void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); | 119 | void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); |
134 | void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, | 120 | void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, |
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index bd0755c789c5..8dd09d850419 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c | |||
@@ -154,15 +154,16 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | |||
154 | 154 | ||
155 | memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); | 155 | memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); |
156 | h->device.parent = dev; | 156 | h->device.parent = dev; |
157 | set_dev_node(&h->device, dev_to_node(dev)); | ||
157 | snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); | 158 | snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); |
158 | 159 | ||
159 | h->class_dev.dev = &h->device; | 160 | h->host_dev.parent = &h->device; |
160 | h->class_dev.class = &hpsb_host_class; | 161 | h->host_dev.class = &hpsb_host_class; |
161 | snprintf(h->class_dev.class_id, BUS_ID_SIZE, "fw-host%d", h->id); | 162 | snprintf(h->host_dev.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); |
162 | 163 | ||
163 | if (device_register(&h->device)) | 164 | if (device_register(&h->device)) |
164 | goto fail; | 165 | goto fail; |
165 | if (class_device_register(&h->class_dev)) { | 166 | if (device_register(&h->host_dev)) { |
166 | device_unregister(&h->device); | 167 | device_unregister(&h->device); |
167 | goto fail; | 168 | goto fail; |
168 | } | 169 | } |
@@ -202,7 +203,7 @@ void hpsb_remove_host(struct hpsb_host *host) | |||
202 | host->driver = &dummy_driver; | 203 | host->driver = &dummy_driver; |
203 | highlevel_remove_host(host); | 204 | highlevel_remove_host(host); |
204 | 205 | ||
205 | class_device_unregister(&host->class_dev); | 206 | device_unregister(&host->host_dev); |
206 | device_unregister(&host->device); | 207 | device_unregister(&host->device); |
207 | } | 208 | } |
208 | 209 | ||
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index feb55d032294..e4e8aeb4d778 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h | |||
@@ -28,8 +28,6 @@ struct hpsb_host { | |||
28 | struct timer_list timeout; | 28 | struct timer_list timeout; |
29 | unsigned long timeout_interval; | 29 | unsigned long timeout_interval; |
30 | 30 | ||
31 | unsigned char iso_listen_count[64]; | ||
32 | |||
33 | int node_count; /* number of identified nodes on this bus */ | 31 | int node_count; /* number of identified nodes on this bus */ |
34 | int selfid_count; /* total number of SelfIDs received */ | 32 | int selfid_count; /* total number of SelfIDs received */ |
35 | int nodes_active; /* number of nodes with active link layer */ | 33 | int nodes_active; /* number of nodes with active link layer */ |
@@ -57,7 +55,7 @@ struct hpsb_host { | |||
57 | struct hpsb_host_driver *driver; | 55 | struct hpsb_host_driver *driver; |
58 | struct pci_dev *pdev; | 56 | struct pci_dev *pdev; |
59 | struct device device; | 57 | struct device device; |
60 | struct class_device class_dev; | 58 | struct device host_dev; |
61 | 59 | ||
62 | struct delayed_work delayed_reset; | 60 | struct delayed_work delayed_reset; |
63 | unsigned config_roms:31; | 61 | unsigned config_roms:31; |
@@ -99,12 +97,6 @@ enum devctl_cmd { | |||
99 | /* Cancel all outstanding async requests without resetting the bus. | 97 | /* Cancel all outstanding async requests without resetting the bus. |
100 | * Return void. */ | 98 | * Return void. */ |
101 | CANCEL_REQUESTS, | 99 | CANCEL_REQUESTS, |
102 | |||
103 | /* Start or stop receiving isochronous channel in arg. Return void. | ||
104 | * This acts as an optimization hint, hosts are not required not to | ||
105 | * listen on unrequested channels. */ | ||
106 | ISO_LISTEN_CHANNEL, | ||
107 | ISO_UNLISTEN_CHANNEL | ||
108 | }; | 100 | }; |
109 | 101 | ||
110 | enum isoctl_cmd { | 102 | enum isoctl_cmd { |
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index 8f71b6a06aa0..0fc8c6e559e4 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c | |||
@@ -1028,11 +1028,6 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, | |||
1028 | handle_incoming_packet(host, tcode, data, size, write_acked); | 1028 | handle_incoming_packet(host, tcode, data, size, write_acked); |
1029 | break; | 1029 | break; |
1030 | 1030 | ||
1031 | |||
1032 | case TCODE_ISO_DATA: | ||
1033 | highlevel_iso_receive(host, data, size); | ||
1034 | break; | ||
1035 | |||
1036 | case TCODE_CYCLE_START: | 1031 | case TCODE_CYCLE_START: |
1037 | /* simply ignore this packet if it is passed on */ | 1032 | /* simply ignore this packet if it is passed on */ |
1038 | break; | 1033 | break; |
@@ -1316,7 +1311,6 @@ EXPORT_SYMBOL(hpsb_make_streampacket); | |||
1316 | EXPORT_SYMBOL(hpsb_make_lockpacket); | 1311 | EXPORT_SYMBOL(hpsb_make_lockpacket); |
1317 | EXPORT_SYMBOL(hpsb_make_lock64packet); | 1312 | EXPORT_SYMBOL(hpsb_make_lock64packet); |
1318 | EXPORT_SYMBOL(hpsb_make_phypacket); | 1313 | EXPORT_SYMBOL(hpsb_make_phypacket); |
1319 | EXPORT_SYMBOL(hpsb_make_isopacket); | ||
1320 | EXPORT_SYMBOL(hpsb_read); | 1314 | EXPORT_SYMBOL(hpsb_read); |
1321 | EXPORT_SYMBOL(hpsb_write); | 1315 | EXPORT_SYMBOL(hpsb_write); |
1322 | EXPORT_SYMBOL(hpsb_packet_success); | 1316 | EXPORT_SYMBOL(hpsb_packet_success); |
@@ -1327,8 +1321,6 @@ EXPORT_SYMBOL(hpsb_unregister_highlevel); | |||
1327 | EXPORT_SYMBOL(hpsb_register_addrspace); | 1321 | EXPORT_SYMBOL(hpsb_register_addrspace); |
1328 | EXPORT_SYMBOL(hpsb_unregister_addrspace); | 1322 | EXPORT_SYMBOL(hpsb_unregister_addrspace); |
1329 | EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace); | 1323 | EXPORT_SYMBOL(hpsb_allocate_and_register_addrspace); |
1330 | EXPORT_SYMBOL(hpsb_listen_channel); | ||
1331 | EXPORT_SYMBOL(hpsb_unlisten_channel); | ||
1332 | EXPORT_SYMBOL(hpsb_get_hostinfo); | 1324 | EXPORT_SYMBOL(hpsb_get_hostinfo); |
1333 | EXPORT_SYMBOL(hpsb_create_hostinfo); | 1325 | EXPORT_SYMBOL(hpsb_create_hostinfo); |
1334 | EXPORT_SYMBOL(hpsb_destroy_hostinfo); | 1326 | EXPORT_SYMBOL(hpsb_destroy_hostinfo); |
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h index ad526523d0ef..21d50f73a210 100644 --- a/drivers/ieee1394/ieee1394_core.h +++ b/drivers/ieee1394/ieee1394_core.h | |||
@@ -24,9 +24,8 @@ struct hpsb_packet { | |||
24 | 24 | ||
25 | nodeid_t node_id; | 25 | nodeid_t node_id; |
26 | 26 | ||
27 | /* Async and Iso types should be clear, raw means send-as-is, do not | 27 | /* hpsb_raw = send as-is, do not CRC (but still byte-swap it) */ |
28 | * CRC! Byte swapping shall still be done in this case. */ | 28 | enum { hpsb_async, hpsb_raw } __attribute__((packed)) type; |
29 | enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type; | ||
30 | 29 | ||
31 | /* Okay, this is core internal and a no care for hosts. | 30 | /* Okay, this is core internal and a no care for hosts. |
32 | * queued = queued for sending | 31 | * queued = queued for sending |
@@ -37,7 +36,7 @@ struct hpsb_packet { | |||
37 | hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete | 36 | hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete |
38 | } __attribute__((packed)) state; | 37 | } __attribute__((packed)) state; |
39 | 38 | ||
40 | /* These are core internal. */ | 39 | /* These are core-internal. */ |
41 | signed char tlabel; | 40 | signed char tlabel; |
42 | signed char ack_code; | 41 | signed char ack_code; |
43 | unsigned char tcode; | 42 | unsigned char tcode; |
@@ -62,11 +61,15 @@ struct hpsb_packet { | |||
62 | /* Store jiffies for implementing bus timeouts. */ | 61 | /* Store jiffies for implementing bus timeouts. */ |
63 | unsigned long sendtime; | 62 | unsigned long sendtime; |
64 | 63 | ||
65 | /* Sizes are in bytes. *data can be DMA-mapped. */ | 64 | /* Core-internal. */ |
66 | size_t allocated_data_size; /* as allocated */ | 65 | size_t allocated_data_size; /* as allocated */ |
66 | |||
67 | /* Sizes are in bytes. To be set by caller of hpsb_alloc_packet. */ | ||
67 | size_t data_size; /* as filled in */ | 68 | size_t data_size; /* as filled in */ |
68 | size_t header_size; /* as filled in, not counting the CRC */ | 69 | size_t header_size; /* as filled in, not counting the CRC */ |
69 | quadlet_t *data; | 70 | |
71 | /* Buffers */ | ||
72 | quadlet_t *data; /* can be DMA-mapped */ | ||
70 | quadlet_t header[5]; | 73 | quadlet_t header[5]; |
71 | quadlet_t embedded_data[0]; /* keep as last member */ | 74 | quadlet_t embedded_data[0]; /* keep as last member */ |
72 | }; | 75 | }; |
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c index 40078ce930c8..c39c70a8aa9f 100644 --- a/drivers/ieee1394/ieee1394_transactions.c +++ b/drivers/ieee1394/ieee1394_transactions.c | |||
@@ -89,18 +89,6 @@ static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode, | |||
89 | packet->expect_response = 1; | 89 | packet->expect_response = 1; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel, | ||
93 | int tag, int sync) | ||
94 | { | ||
95 | packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) | ||
96 | | (TCODE_ISO_DATA << 4) | sync; | ||
97 | |||
98 | packet->header_size = 4; | ||
99 | packet->data_size = length; | ||
100 | packet->type = hpsb_iso; | ||
101 | packet->tcode = TCODE_ISO_DATA; | ||
102 | } | ||
103 | |||
104 | static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data) | 92 | static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data) |
105 | { | 93 | { |
106 | packet->header[0] = data; | 94 | packet->header[0] = data; |
@@ -491,24 +479,6 @@ struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data) | |||
491 | return p; | 479 | return p; |
492 | } | 480 | } |
493 | 481 | ||
494 | struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, | ||
495 | int length, int channel, | ||
496 | int tag, int sync) | ||
497 | { | ||
498 | struct hpsb_packet *p; | ||
499 | |||
500 | p = hpsb_alloc_packet(length); | ||
501 | if (!p) | ||
502 | return NULL; | ||
503 | |||
504 | p->host = host; | ||
505 | fill_iso_packet(p, length, channel, tag, sync); | ||
506 | |||
507 | p->generation = get_hpsb_generation(host); | ||
508 | |||
509 | return p; | ||
510 | } | ||
511 | |||
512 | /* | 482 | /* |
513 | * FIXME - these functions should probably read from / write to user space to | 483 | * FIXME - these functions should probably read from / write to user space to |
514 | * avoid in kernel buffers for user space callers | 484 | * avoid in kernel buffers for user space callers |
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h index 86b8ee692ea7..d2d5bc3546d7 100644 --- a/drivers/ieee1394/ieee1394_transactions.h +++ b/drivers/ieee1394/ieee1394_transactions.h | |||
@@ -19,8 +19,6 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, | |||
19 | nodeid_t node, u64 addr, int extcode, | 19 | nodeid_t node, u64 addr, int extcode, |
20 | octlet_t *data, octlet_t arg); | 20 | octlet_t *data, octlet_t arg); |
21 | struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data); | 21 | struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data); |
22 | struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, int length, | ||
23 | int channel, int tag, int sync); | ||
24 | struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, | 22 | struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, |
25 | nodeid_t node, u64 addr, | 23 | nodeid_t node, u64 addr, |
26 | quadlet_t *buffer, size_t length); | 24 | quadlet_t *buffer, size_t length); |
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 81b3864d2ba7..c4d3d4131f01 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/freezer.h> | 20 | #include <linux/freezer.h> |
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | #include <asm/semaphore.h> | ||
22 | 23 | ||
23 | #include "csr.h" | 24 | #include "csr.h" |
24 | #include "highlevel.h" | 25 | #include "highlevel.h" |
@@ -145,8 +146,6 @@ static struct csr1212_bus_ops nodemgr_csr_ops = { | |||
145 | * but now we are much simpler because of the LDM. | 146 | * but now we are much simpler because of the LDM. |
146 | */ | 147 | */ |
147 | 148 | ||
148 | static DEFINE_MUTEX(nodemgr_serialize); | ||
149 | |||
150 | struct host_info { | 149 | struct host_info { |
151 | struct hpsb_host *host; | 150 | struct hpsb_host *host; |
152 | struct list_head list; | 151 | struct list_head list; |
@@ -154,7 +153,7 @@ struct host_info { | |||
154 | }; | 153 | }; |
155 | 154 | ||
156 | static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); | 155 | static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); |
157 | static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp, | 156 | static int nodemgr_uevent(struct device *dev, char **envp, int num_envp, |
158 | char *buffer, int buffer_size); | 157 | char *buffer, int buffer_size); |
159 | static void nodemgr_resume_ne(struct node_entry *ne); | 158 | static void nodemgr_resume_ne(struct node_entry *ne); |
160 | static void nodemgr_remove_ne(struct node_entry *ne); | 159 | static void nodemgr_remove_ne(struct node_entry *ne); |
@@ -165,37 +164,38 @@ struct bus_type ieee1394_bus_type = { | |||
165 | .match = nodemgr_bus_match, | 164 | .match = nodemgr_bus_match, |
166 | }; | 165 | }; |
167 | 166 | ||
168 | static void host_cls_release(struct class_device *class_dev) | 167 | static void host_cls_release(struct device *dev) |
169 | { | 168 | { |
170 | put_device(&container_of((class_dev), struct hpsb_host, class_dev)->device); | 169 | put_device(&container_of((dev), struct hpsb_host, host_dev)->device); |
171 | } | 170 | } |
172 | 171 | ||
173 | struct class hpsb_host_class = { | 172 | struct class hpsb_host_class = { |
174 | .name = "ieee1394_host", | 173 | .name = "ieee1394_host", |
175 | .release = host_cls_release, | 174 | .dev_release = host_cls_release, |
176 | }; | 175 | }; |
177 | 176 | ||
178 | static void ne_cls_release(struct class_device *class_dev) | 177 | static void ne_cls_release(struct device *dev) |
179 | { | 178 | { |
180 | put_device(&container_of((class_dev), struct node_entry, class_dev)->device); | 179 | put_device(&container_of((dev), struct node_entry, node_dev)->device); |
181 | } | 180 | } |
182 | 181 | ||
183 | static struct class nodemgr_ne_class = { | 182 | static struct class nodemgr_ne_class = { |
184 | .name = "ieee1394_node", | 183 | .name = "ieee1394_node", |
185 | .release = ne_cls_release, | 184 | .dev_release = ne_cls_release, |
186 | }; | 185 | }; |
187 | 186 | ||
188 | static void ud_cls_release(struct class_device *class_dev) | 187 | static void ud_cls_release(struct device *dev) |
189 | { | 188 | { |
190 | put_device(&container_of((class_dev), struct unit_directory, class_dev)->device); | 189 | put_device(&container_of((dev), struct unit_directory, unit_dev)->device); |
191 | } | 190 | } |
192 | 191 | ||
193 | /* The name here is only so that unit directory hotplug works with old | 192 | /* The name here is only so that unit directory hotplug works with old |
194 | * style hotplug, which only ever did unit directories anyway. */ | 193 | * style hotplug, which only ever did unit directories anyway. |
194 | */ | ||
195 | static struct class nodemgr_ud_class = { | 195 | static struct class nodemgr_ud_class = { |
196 | .name = "ieee1394", | 196 | .name = "ieee1394", |
197 | .release = ud_cls_release, | 197 | .dev_release = ud_cls_release, |
198 | .uevent = nodemgr_uevent, | 198 | .dev_uevent = nodemgr_uevent, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | static struct hpsb_highlevel nodemgr_highlevel; | 201 | static struct hpsb_highlevel nodemgr_highlevel; |
@@ -730,11 +730,11 @@ static DEFINE_MUTEX(nodemgr_serialize_remove_uds); | |||
730 | 730 | ||
731 | static void nodemgr_remove_uds(struct node_entry *ne) | 731 | static void nodemgr_remove_uds(struct node_entry *ne) |
732 | { | 732 | { |
733 | struct class_device *cdev; | 733 | struct device *dev; |
734 | struct unit_directory *tmp, *ud; | 734 | struct unit_directory *tmp, *ud; |
735 | 735 | ||
736 | /* Iteration over nodemgr_ud_class.children has to be protected by | 736 | /* Iteration over nodemgr_ud_class.devices has to be protected by |
737 | * nodemgr_ud_class.sem, but class_device_unregister() will eventually | 737 | * nodemgr_ud_class.sem, but device_unregister() will eventually |
738 | * take nodemgr_ud_class.sem too. Therefore pick out one ud at a time, | 738 | * take nodemgr_ud_class.sem too. Therefore pick out one ud at a time, |
739 | * release the semaphore, and then unregister the ud. Since this code | 739 | * release the semaphore, and then unregister the ud. Since this code |
740 | * may be called from other contexts besides the knodemgrds, protect the | 740 | * may be called from other contexts besides the knodemgrds, protect the |
@@ -744,9 +744,9 @@ static void nodemgr_remove_uds(struct node_entry *ne) | |||
744 | for (;;) { | 744 | for (;;) { |
745 | ud = NULL; | 745 | ud = NULL; |
746 | down(&nodemgr_ud_class.sem); | 746 | down(&nodemgr_ud_class.sem); |
747 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { | 747 | list_for_each_entry(dev, &nodemgr_ud_class.devices, node) { |
748 | tmp = container_of(cdev, struct unit_directory, | 748 | tmp = container_of(dev, struct unit_directory, |
749 | class_dev); | 749 | unit_dev); |
750 | if (tmp->ne == ne) { | 750 | if (tmp->ne == ne) { |
751 | ud = tmp; | 751 | ud = tmp; |
752 | break; | 752 | break; |
@@ -755,7 +755,7 @@ static void nodemgr_remove_uds(struct node_entry *ne) | |||
755 | up(&nodemgr_ud_class.sem); | 755 | up(&nodemgr_ud_class.sem); |
756 | if (ud == NULL) | 756 | if (ud == NULL) |
757 | break; | 757 | break; |
758 | class_device_unregister(&ud->class_dev); | 758 | device_unregister(&ud->unit_dev); |
759 | device_unregister(&ud->device); | 759 | device_unregister(&ud->device); |
760 | } | 760 | } |
761 | mutex_unlock(&nodemgr_serialize_remove_uds); | 761 | mutex_unlock(&nodemgr_serialize_remove_uds); |
@@ -772,10 +772,9 @@ static void nodemgr_remove_ne(struct node_entry *ne) | |||
772 | 772 | ||
773 | HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", | 773 | HPSB_DEBUG("Node removed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", |
774 | NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); | 774 | NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); |
775 | |||
776 | nodemgr_remove_uds(ne); | 775 | nodemgr_remove_uds(ne); |
777 | 776 | ||
778 | class_device_unregister(&ne->class_dev); | 777 | device_unregister(&ne->node_dev); |
779 | device_unregister(dev); | 778 | device_unregister(dev); |
780 | 779 | ||
781 | put_device(dev); | 780 | put_device(dev); |
@@ -783,7 +782,9 @@ static void nodemgr_remove_ne(struct node_entry *ne) | |||
783 | 782 | ||
784 | static int __nodemgr_remove_host_dev(struct device *dev, void *data) | 783 | static int __nodemgr_remove_host_dev(struct device *dev, void *data) |
785 | { | 784 | { |
786 | nodemgr_remove_ne(container_of(dev, struct node_entry, device)); | 785 | if (dev->bus == &ieee1394_bus_type) |
786 | nodemgr_remove_ne(container_of(dev, struct node_entry, | ||
787 | device)); | ||
787 | return 0; | 788 | return 0; |
788 | } | 789 | } |
789 | 790 | ||
@@ -850,14 +851,14 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr | |||
850 | snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx", | 851 | snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx", |
851 | (unsigned long long)(ne->guid)); | 852 | (unsigned long long)(ne->guid)); |
852 | 853 | ||
853 | ne->class_dev.dev = &ne->device; | 854 | ne->node_dev.parent = &ne->device; |
854 | ne->class_dev.class = &nodemgr_ne_class; | 855 | ne->node_dev.class = &nodemgr_ne_class; |
855 | snprintf(ne->class_dev.class_id, BUS_ID_SIZE, "%016Lx", | 856 | snprintf(ne->node_dev.bus_id, BUS_ID_SIZE, "%016Lx", |
856 | (unsigned long long)(ne->guid)); | 857 | (unsigned long long)(ne->guid)); |
857 | 858 | ||
858 | if (device_register(&ne->device)) | 859 | if (device_register(&ne->device)) |
859 | goto fail_devreg; | 860 | goto fail_devreg; |
860 | if (class_device_register(&ne->class_dev)) | 861 | if (device_register(&ne->node_dev)) |
861 | goto fail_classdevreg; | 862 | goto fail_classdevreg; |
862 | get_device(&ne->device); | 863 | get_device(&ne->device); |
863 | 864 | ||
@@ -885,12 +886,12 @@ fail_alloc: | |||
885 | 886 | ||
886 | static struct node_entry *find_entry_by_guid(u64 guid) | 887 | static struct node_entry *find_entry_by_guid(u64 guid) |
887 | { | 888 | { |
888 | struct class_device *cdev; | 889 | struct device *dev; |
889 | struct node_entry *ne, *ret_ne = NULL; | 890 | struct node_entry *ne, *ret_ne = NULL; |
890 | 891 | ||
891 | down(&nodemgr_ne_class.sem); | 892 | down(&nodemgr_ne_class.sem); |
892 | list_for_each_entry(cdev, &nodemgr_ne_class.children, node) { | 893 | list_for_each_entry(dev, &nodemgr_ne_class.devices, node) { |
893 | ne = container_of(cdev, struct node_entry, class_dev); | 894 | ne = container_of(dev, struct node_entry, node_dev); |
894 | 895 | ||
895 | if (ne->guid == guid) { | 896 | if (ne->guid == guid) { |
896 | ret_ne = ne; | 897 | ret_ne = ne; |
@@ -906,12 +907,12 @@ static struct node_entry *find_entry_by_guid(u64 guid) | |||
906 | static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, | 907 | static struct node_entry *find_entry_by_nodeid(struct hpsb_host *host, |
907 | nodeid_t nodeid) | 908 | nodeid_t nodeid) |
908 | { | 909 | { |
909 | struct class_device *cdev; | 910 | struct device *dev; |
910 | struct node_entry *ne, *ret_ne = NULL; | 911 | struct node_entry *ne, *ret_ne = NULL; |
911 | 912 | ||
912 | down(&nodemgr_ne_class.sem); | 913 | down(&nodemgr_ne_class.sem); |
913 | list_for_each_entry(cdev, &nodemgr_ne_class.children, node) { | 914 | list_for_each_entry(dev, &nodemgr_ne_class.devices, node) { |
914 | ne = container_of(cdev, struct node_entry, class_dev); | 915 | ne = container_of(dev, struct node_entry, node_dev); |
915 | 916 | ||
916 | if (ne->host == host && ne->nodeid == nodeid) { | 917 | if (ne->host == host && ne->nodeid == nodeid) { |
917 | ret_ne = ne; | 918 | ret_ne = ne; |
@@ -935,14 +936,14 @@ static void nodemgr_register_device(struct node_entry *ne, | |||
935 | snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u", | 936 | snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u", |
936 | ne->device.bus_id, ud->id); | 937 | ne->device.bus_id, ud->id); |
937 | 938 | ||
938 | ud->class_dev.dev = &ud->device; | 939 | ud->unit_dev.parent = &ud->device; |
939 | ud->class_dev.class = &nodemgr_ud_class; | 940 | ud->unit_dev.class = &nodemgr_ud_class; |
940 | snprintf(ud->class_dev.class_id, BUS_ID_SIZE, "%s-%u", | 941 | snprintf(ud->unit_dev.bus_id, BUS_ID_SIZE, "%s-%u", |
941 | ne->device.bus_id, ud->id); | 942 | ne->device.bus_id, ud->id); |
942 | 943 | ||
943 | if (device_register(&ud->device)) | 944 | if (device_register(&ud->device)) |
944 | goto fail_devreg; | 945 | goto fail_devreg; |
945 | if (class_device_register(&ud->class_dev)) | 946 | if (device_register(&ud->unit_dev)) |
946 | goto fail_classdevreg; | 947 | goto fail_classdevreg; |
947 | get_device(&ud->device); | 948 | get_device(&ud->device); |
948 | 949 | ||
@@ -1159,7 +1160,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent | |||
1159 | 1160 | ||
1160 | #ifdef CONFIG_HOTPLUG | 1161 | #ifdef CONFIG_HOTPLUG |
1161 | 1162 | ||
1162 | static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp, | 1163 | static int nodemgr_uevent(struct device *dev, char **envp, int num_envp, |
1163 | char *buffer, int buffer_size) | 1164 | char *buffer, int buffer_size) |
1164 | { | 1165 | { |
1165 | struct unit_directory *ud; | 1166 | struct unit_directory *ud; |
@@ -1169,10 +1170,10 @@ static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp, | |||
1169 | /* ieee1394:venNmoNspNverN */ | 1170 | /* ieee1394:venNmoNspNverN */ |
1170 | char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1]; | 1171 | char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1]; |
1171 | 1172 | ||
1172 | if (!cdev) | 1173 | if (!dev) |
1173 | return -ENODEV; | 1174 | return -ENODEV; |
1174 | 1175 | ||
1175 | ud = container_of(cdev, struct unit_directory, class_dev); | 1176 | ud = container_of(dev, struct unit_directory, unit_dev); |
1176 | 1177 | ||
1177 | if (ud->ne->in_limbo || ud->ignore_driver) | 1178 | if (ud->ne->in_limbo || ud->ignore_driver) |
1178 | return -ENODEV; | 1179 | return -ENODEV; |
@@ -1207,7 +1208,7 @@ do { \ | |||
1207 | 1208 | ||
1208 | #else | 1209 | #else |
1209 | 1210 | ||
1210 | static int nodemgr_uevent(struct class_device *cdev, char **envp, int num_envp, | 1211 | static int nodemgr_uevent(struct device *dev, char **envp, int num_envp, |
1211 | char *buffer, int buffer_size) | 1212 | char *buffer, int buffer_size) |
1212 | { | 1213 | { |
1213 | return -ENODEV; | 1214 | return -ENODEV; |
@@ -1378,8 +1379,10 @@ static void nodemgr_node_scan(struct host_info *hi, int generation) | |||
1378 | 1379 | ||
1379 | static void nodemgr_suspend_ne(struct node_entry *ne) | 1380 | static void nodemgr_suspend_ne(struct node_entry *ne) |
1380 | { | 1381 | { |
1381 | struct class_device *cdev; | 1382 | struct device *dev; |
1382 | struct unit_directory *ud; | 1383 | struct unit_directory *ud; |
1384 | struct device_driver *drv; | ||
1385 | int error; | ||
1383 | 1386 | ||
1384 | HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", | 1387 | HPSB_DEBUG("Node suspended: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", |
1385 | NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); | 1388 | NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); |
@@ -1388,15 +1391,24 @@ static void nodemgr_suspend_ne(struct node_entry *ne) | |||
1388 | WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo)); | 1391 | WARN_ON(device_create_file(&ne->device, &dev_attr_ne_in_limbo)); |
1389 | 1392 | ||
1390 | down(&nodemgr_ud_class.sem); | 1393 | down(&nodemgr_ud_class.sem); |
1391 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { | 1394 | list_for_each_entry(dev, &nodemgr_ud_class.devices, node) { |
1392 | ud = container_of(cdev, struct unit_directory, class_dev); | 1395 | ud = container_of(dev, struct unit_directory, unit_dev); |
1393 | if (ud->ne != ne) | 1396 | if (ud->ne != ne) |
1394 | continue; | 1397 | continue; |
1395 | 1398 | ||
1396 | if (ud->device.driver && | 1399 | drv = get_driver(ud->device.driver); |
1397 | (!ud->device.driver->suspend || | 1400 | if (!drv) |
1398 | ud->device.driver->suspend(&ud->device, PMSG_SUSPEND))) | 1401 | continue; |
1402 | |||
1403 | error = 1; /* release if suspend is not implemented */ | ||
1404 | if (drv->suspend) { | ||
1405 | down(&ud->device.sem); | ||
1406 | error = drv->suspend(&ud->device, PMSG_SUSPEND); | ||
1407 | up(&ud->device.sem); | ||
1408 | } | ||
1409 | if (error) | ||
1399 | device_release_driver(&ud->device); | 1410 | device_release_driver(&ud->device); |
1411 | put_driver(drv); | ||
1400 | } | 1412 | } |
1401 | up(&nodemgr_ud_class.sem); | 1413 | up(&nodemgr_ud_class.sem); |
1402 | } | 1414 | } |
@@ -1404,20 +1416,29 @@ static void nodemgr_suspend_ne(struct node_entry *ne) | |||
1404 | 1416 | ||
1405 | static void nodemgr_resume_ne(struct node_entry *ne) | 1417 | static void nodemgr_resume_ne(struct node_entry *ne) |
1406 | { | 1418 | { |
1407 | struct class_device *cdev; | 1419 | struct device *dev; |
1408 | struct unit_directory *ud; | 1420 | struct unit_directory *ud; |
1421 | struct device_driver *drv; | ||
1409 | 1422 | ||
1410 | ne->in_limbo = 0; | 1423 | ne->in_limbo = 0; |
1411 | device_remove_file(&ne->device, &dev_attr_ne_in_limbo); | 1424 | device_remove_file(&ne->device, &dev_attr_ne_in_limbo); |
1412 | 1425 | ||
1413 | down(&nodemgr_ud_class.sem); | 1426 | down(&nodemgr_ud_class.sem); |
1414 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { | 1427 | list_for_each_entry(dev, &nodemgr_ud_class.devices, node) { |
1415 | ud = container_of(cdev, struct unit_directory, class_dev); | 1428 | ud = container_of(dev, struct unit_directory, unit_dev); |
1416 | if (ud->ne != ne) | 1429 | if (ud->ne != ne) |
1417 | continue; | 1430 | continue; |
1418 | 1431 | ||
1419 | if (ud->device.driver && ud->device.driver->resume) | 1432 | drv = get_driver(ud->device.driver); |
1420 | ud->device.driver->resume(&ud->device); | 1433 | if (!drv) |
1434 | continue; | ||
1435 | |||
1436 | if (drv->resume) { | ||
1437 | down(&ud->device.sem); | ||
1438 | drv->resume(&ud->device); | ||
1439 | up(&ud->device.sem); | ||
1440 | } | ||
1441 | put_driver(drv); | ||
1421 | } | 1442 | } |
1422 | up(&nodemgr_ud_class.sem); | 1443 | up(&nodemgr_ud_class.sem); |
1423 | 1444 | ||
@@ -1428,23 +1449,32 @@ static void nodemgr_resume_ne(struct node_entry *ne) | |||
1428 | 1449 | ||
1429 | static void nodemgr_update_pdrv(struct node_entry *ne) | 1450 | static void nodemgr_update_pdrv(struct node_entry *ne) |
1430 | { | 1451 | { |
1452 | struct device *dev; | ||
1431 | struct unit_directory *ud; | 1453 | struct unit_directory *ud; |
1454 | struct device_driver *drv; | ||
1432 | struct hpsb_protocol_driver *pdrv; | 1455 | struct hpsb_protocol_driver *pdrv; |
1433 | struct class_device *cdev; | 1456 | int error; |
1434 | 1457 | ||
1435 | down(&nodemgr_ud_class.sem); | 1458 | down(&nodemgr_ud_class.sem); |
1436 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { | 1459 | list_for_each_entry(dev, &nodemgr_ud_class.devices, node) { |
1437 | ud = container_of(cdev, struct unit_directory, class_dev); | 1460 | ud = container_of(dev, struct unit_directory, unit_dev); |
1438 | if (ud->ne != ne) | 1461 | if (ud->ne != ne) |
1439 | continue; | 1462 | continue; |
1440 | 1463 | ||
1441 | if (ud->device.driver) { | 1464 | drv = get_driver(ud->device.driver); |
1442 | pdrv = container_of(ud->device.driver, | 1465 | if (!drv) |
1443 | struct hpsb_protocol_driver, | 1466 | continue; |
1444 | driver); | 1467 | |
1445 | if (pdrv->update && pdrv->update(ud)) | 1468 | error = 0; |
1446 | device_release_driver(&ud->device); | 1469 | pdrv = container_of(drv, struct hpsb_protocol_driver, driver); |
1470 | if (pdrv->update) { | ||
1471 | down(&ud->device.sem); | ||
1472 | error = pdrv->update(ud); | ||
1473 | up(&ud->device.sem); | ||
1447 | } | 1474 | } |
1475 | if (error) | ||
1476 | device_release_driver(&ud->device); | ||
1477 | put_driver(drv); | ||
1448 | } | 1478 | } |
1449 | up(&nodemgr_ud_class.sem); | 1479 | up(&nodemgr_ud_class.sem); |
1450 | } | 1480 | } |
@@ -1509,7 +1539,7 @@ static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int ge | |||
1509 | static void nodemgr_node_probe(struct host_info *hi, int generation) | 1539 | static void nodemgr_node_probe(struct host_info *hi, int generation) |
1510 | { | 1540 | { |
1511 | struct hpsb_host *host = hi->host; | 1541 | struct hpsb_host *host = hi->host; |
1512 | struct class_device *cdev; | 1542 | struct device *dev; |
1513 | struct node_entry *ne; | 1543 | struct node_entry *ne; |
1514 | 1544 | ||
1515 | /* Do some processing of the nodes we've probed. This pulls them | 1545 | /* Do some processing of the nodes we've probed. This pulls them |
@@ -1522,13 +1552,13 @@ static void nodemgr_node_probe(struct host_info *hi, int generation) | |||
1522 | * improvement...) */ | 1552 | * improvement...) */ |
1523 | 1553 | ||
1524 | down(&nodemgr_ne_class.sem); | 1554 | down(&nodemgr_ne_class.sem); |
1525 | list_for_each_entry(cdev, &nodemgr_ne_class.children, node) { | 1555 | list_for_each_entry(dev, &nodemgr_ne_class.devices, node) { |
1526 | ne = container_of(cdev, struct node_entry, class_dev); | 1556 | ne = container_of(dev, struct node_entry, node_dev); |
1527 | if (!ne->needs_probe) | 1557 | if (!ne->needs_probe) |
1528 | nodemgr_probe_ne(hi, ne, generation); | 1558 | nodemgr_probe_ne(hi, ne, generation); |
1529 | } | 1559 | } |
1530 | list_for_each_entry(cdev, &nodemgr_ne_class.children, node) { | 1560 | list_for_each_entry(dev, &nodemgr_ne_class.devices, node) { |
1531 | ne = container_of(cdev, struct node_entry, class_dev); | 1561 | ne = container_of(dev, struct node_entry, node_dev); |
1532 | if (ne->needs_probe) | 1562 | if (ne->needs_probe) |
1533 | nodemgr_probe_ne(hi, ne, generation); | 1563 | nodemgr_probe_ne(hi, ne, generation); |
1534 | } | 1564 | } |
@@ -1686,18 +1716,12 @@ static int nodemgr_host_thread(void *__hi) | |||
1686 | if (kthread_should_stop()) | 1716 | if (kthread_should_stop()) |
1687 | goto exit; | 1717 | goto exit; |
1688 | 1718 | ||
1689 | if (mutex_lock_interruptible(&nodemgr_serialize)) { | ||
1690 | if (try_to_freeze()) | ||
1691 | continue; | ||
1692 | goto exit; | ||
1693 | } | ||
1694 | |||
1695 | /* Pause for 1/4 second in 1/16 second intervals, | 1719 | /* Pause for 1/4 second in 1/16 second intervals, |
1696 | * to make sure things settle down. */ | 1720 | * to make sure things settle down. */ |
1697 | g = get_hpsb_generation(host); | 1721 | g = get_hpsb_generation(host); |
1698 | for (i = 0; i < 4 ; i++) { | 1722 | for (i = 0; i < 4 ; i++) { |
1699 | if (msleep_interruptible(63) || kthread_should_stop()) | 1723 | if (msleep_interruptible(63) || kthread_should_stop()) |
1700 | goto unlock_exit; | 1724 | goto exit; |
1701 | 1725 | ||
1702 | /* Now get the generation in which the node ID's we collect | 1726 | /* Now get the generation in which the node ID's we collect |
1703 | * are valid. During the bus scan we will use this generation | 1727 | * are valid. During the bus scan we will use this generation |
@@ -1715,7 +1739,6 @@ static int nodemgr_host_thread(void *__hi) | |||
1715 | if (!nodemgr_check_irm_capability(host, reset_cycles) || | 1739 | if (!nodemgr_check_irm_capability(host, reset_cycles) || |
1716 | !nodemgr_do_irm_duties(host, reset_cycles)) { | 1740 | !nodemgr_do_irm_duties(host, reset_cycles)) { |
1717 | reset_cycles++; | 1741 | reset_cycles++; |
1718 | mutex_unlock(&nodemgr_serialize); | ||
1719 | continue; | 1742 | continue; |
1720 | } | 1743 | } |
1721 | reset_cycles = 0; | 1744 | reset_cycles = 0; |
@@ -1732,11 +1755,7 @@ static int nodemgr_host_thread(void *__hi) | |||
1732 | 1755 | ||
1733 | /* Update some of our sysfs symlinks */ | 1756 | /* Update some of our sysfs symlinks */ |
1734 | nodemgr_update_host_dev_links(host); | 1757 | nodemgr_update_host_dev_links(host); |
1735 | |||
1736 | mutex_unlock(&nodemgr_serialize); | ||
1737 | } | 1758 | } |
1738 | unlock_exit: | ||
1739 | mutex_unlock(&nodemgr_serialize); | ||
1740 | exit: | 1759 | exit: |
1741 | HPSB_VERBOSE("NodeMgr: Exiting thread"); | 1760 | HPSB_VERBOSE("NodeMgr: Exiting thread"); |
1742 | return 0; | 1761 | return 0; |
@@ -1756,13 +1775,13 @@ exit: | |||
1756 | */ | 1775 | */ |
1757 | int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *)) | 1776 | int nodemgr_for_each_host(void *data, int (*cb)(struct hpsb_host *, void *)) |
1758 | { | 1777 | { |
1759 | struct class_device *cdev; | 1778 | struct device *dev; |
1760 | struct hpsb_host *host; | 1779 | struct hpsb_host *host; |
1761 | int error = 0; | 1780 | int error = 0; |
1762 | 1781 | ||
1763 | down(&hpsb_host_class.sem); | 1782 | down(&hpsb_host_class.sem); |
1764 | list_for_each_entry(cdev, &hpsb_host_class.children, node) { | 1783 | list_for_each_entry(dev, &hpsb_host_class.devices, node) { |
1765 | host = container_of(cdev, struct hpsb_host, class_dev); | 1784 | host = container_of(dev, struct hpsb_host, host_dev); |
1766 | 1785 | ||
1767 | if ((error = cb(host, data))) | 1786 | if ((error = cb(host, data))) |
1768 | break; | 1787 | break; |
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index 4530b29d941c..919e92e2a955 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h | |||
@@ -84,7 +84,7 @@ struct unit_directory { | |||
84 | int length; /* Number of quadlets */ | 84 | int length; /* Number of quadlets */ |
85 | 85 | ||
86 | struct device device; | 86 | struct device device; |
87 | struct class_device class_dev; | 87 | struct device unit_dev; |
88 | 88 | ||
89 | struct csr1212_keyval *ud_kv; | 89 | struct csr1212_keyval *ud_kv; |
90 | u32 lun; /* logical unit number immediate value */ | 90 | u32 lun; /* logical unit number immediate value */ |
@@ -107,7 +107,7 @@ struct node_entry { | |||
107 | u32 capabilities; | 107 | u32 capabilities; |
108 | 108 | ||
109 | struct device device; | 109 | struct device device; |
110 | struct class_device class_dev; | 110 | struct device node_dev; |
111 | 111 | ||
112 | /* Means this node is not attached anymore */ | 112 | /* Means this node is not attached anymore */ |
113 | int in_limbo; | 113 | int in_limbo; |
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 5dadfd296f79..5667c8102efc 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c | |||
@@ -138,19 +138,6 @@ printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host-> | |||
138 | #define DBGMSG(fmt, args...) do {} while (0) | 138 | #define DBGMSG(fmt, args...) do {} while (0) |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG | ||
142 | #define OHCI_DMA_ALLOC(fmt, args...) \ | ||
143 | HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \ | ||
144 | ++global_outstanding_dmas, ## args) | ||
145 | #define OHCI_DMA_FREE(fmt, args...) \ | ||
146 | HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \ | ||
147 | --global_outstanding_dmas, ## args) | ||
148 | static int global_outstanding_dmas = 0; | ||
149 | #else | ||
150 | #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0) | ||
151 | #define OHCI_DMA_FREE(fmt, args...) do {} while (0) | ||
152 | #endif | ||
153 | |||
154 | /* print general (card independent) information */ | 141 | /* print general (card independent) information */ |
155 | #define PRINT_G(level, fmt, args...) \ | 142 | #define PRINT_G(level, fmt, args...) \ |
156 | printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) | 143 | printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) |
@@ -170,7 +157,6 @@ static void dma_trm_reset(struct dma_trm_ctx *d); | |||
170 | static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, | 157 | static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, |
171 | enum context_type type, int ctx, int num_desc, | 158 | enum context_type type, int ctx, int num_desc, |
172 | int buf_size, int split_buf_size, int context_base); | 159 | int buf_size, int split_buf_size, int context_base); |
173 | static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d); | ||
174 | static void free_dma_rcv_ctx(struct dma_rcv_ctx *d); | 160 | static void free_dma_rcv_ctx(struct dma_rcv_ctx *d); |
175 | 161 | ||
176 | static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, | 162 | static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, |
@@ -533,9 +519,6 @@ static void ohci_initialize(struct ti_ohci *ohci) | |||
533 | initialize_dma_trm_ctx(&ohci->at_req_context); | 519 | initialize_dma_trm_ctx(&ohci->at_req_context); |
534 | initialize_dma_trm_ctx(&ohci->at_resp_context); | 520 | initialize_dma_trm_ctx(&ohci->at_resp_context); |
535 | 521 | ||
536 | /* Initialize IR Legacy DMA channel mask */ | ||
537 | ohci->ir_legacy_channels = 0; | ||
538 | |||
539 | /* Accept AR requests from all nodes */ | 522 | /* Accept AR requests from all nodes */ |
540 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); | 523 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); |
541 | 524 | ||
@@ -733,7 +716,6 @@ static void insert_packet(struct ti_ohci *ohci, | |||
733 | pci_map_single(ohci->dev, packet->data, | 716 | pci_map_single(ohci->dev, packet->data, |
734 | packet->data_size, | 717 | packet->data_size, |
735 | PCI_DMA_TODEVICE)); | 718 | PCI_DMA_TODEVICE)); |
736 | OHCI_DMA_ALLOC("single, block transmit packet"); | ||
737 | 719 | ||
738 | d->prg_cpu[idx]->end.branchAddress = 0; | 720 | d->prg_cpu[idx]->end.branchAddress = 0; |
739 | d->prg_cpu[idx]->end.status = 0; | 721 | d->prg_cpu[idx]->end.status = 0; |
@@ -783,7 +765,6 @@ static void insert_packet(struct ti_ohci *ohci, | |||
783 | d->prg_cpu[idx]->end.address = cpu_to_le32( | 765 | d->prg_cpu[idx]->end.address = cpu_to_le32( |
784 | pci_map_single(ohci->dev, packet->data, | 766 | pci_map_single(ohci->dev, packet->data, |
785 | packet->data_size, PCI_DMA_TODEVICE)); | 767 | packet->data_size, PCI_DMA_TODEVICE)); |
786 | OHCI_DMA_ALLOC("single, iso transmit packet"); | ||
787 | 768 | ||
788 | d->prg_cpu[idx]->end.branchAddress = 0; | 769 | d->prg_cpu[idx]->end.branchAddress = 0; |
789 | d->prg_cpu[idx]->end.status = 0; | 770 | d->prg_cpu[idx]->end.status = 0; |
@@ -884,36 +865,9 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet) | |||
884 | return -EOVERFLOW; | 865 | return -EOVERFLOW; |
885 | } | 866 | } |
886 | 867 | ||
887 | /* Decide whether we have an iso, a request, or a response packet */ | ||
888 | if (packet->type == hpsb_raw) | 868 | if (packet->type == hpsb_raw) |
889 | d = &ohci->at_req_context; | 869 | d = &ohci->at_req_context; |
890 | else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) { | 870 | else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA)) |
891 | /* The legacy IT DMA context is initialized on first | ||
892 | * use. However, the alloc cannot be run from | ||
893 | * interrupt context, so we bail out if that is the | ||
894 | * case. I don't see anyone sending ISO packets from | ||
895 | * interrupt context anyway... */ | ||
896 | |||
897 | if (ohci->it_legacy_context.ohci == NULL) { | ||
898 | if (in_interrupt()) { | ||
899 | PRINT(KERN_ERR, | ||
900 | "legacy IT context cannot be initialized during interrupt"); | ||
901 | return -EINVAL; | ||
902 | } | ||
903 | |||
904 | if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context, | ||
905 | DMA_CTX_ISO, 0, IT_NUM_DESC, | ||
906 | OHCI1394_IsoXmitContextBase) < 0) { | ||
907 | PRINT(KERN_ERR, | ||
908 | "error initializing legacy IT context"); | ||
909 | return -ENOMEM; | ||
910 | } | ||
911 | |||
912 | initialize_dma_trm_ctx(&ohci->it_legacy_context); | ||
913 | } | ||
914 | |||
915 | d = &ohci->it_legacy_context; | ||
916 | } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA)) | ||
917 | d = &ohci->at_resp_context; | 871 | d = &ohci->at_resp_context; |
918 | else | 872 | else |
919 | d = &ohci->at_req_context; | 873 | d = &ohci->at_req_context; |
@@ -932,9 +886,7 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet) | |||
932 | static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) | 886 | static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) |
933 | { | 887 | { |
934 | struct ti_ohci *ohci = host->hostdata; | 888 | struct ti_ohci *ohci = host->hostdata; |
935 | int retval = 0; | 889 | int retval = 0, phy_reg; |
936 | unsigned long flags; | ||
937 | int phy_reg; | ||
938 | 890 | ||
939 | switch (cmd) { | 891 | switch (cmd) { |
940 | case RESET_BUS: | 892 | case RESET_BUS: |
@@ -1027,117 +979,6 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) | |||
1027 | dma_trm_reset(&ohci->at_resp_context); | 979 | dma_trm_reset(&ohci->at_resp_context); |
1028 | break; | 980 | break; |
1029 | 981 | ||
1030 | case ISO_LISTEN_CHANNEL: | ||
1031 | { | ||
1032 | u64 mask; | ||
1033 | struct dma_rcv_ctx *d = &ohci->ir_legacy_context; | ||
1034 | int ir_legacy_active; | ||
1035 | |||
1036 | if (arg<0 || arg>63) { | ||
1037 | PRINT(KERN_ERR, | ||
1038 | "%s: IS0 listen channel %d is out of range", | ||
1039 | __FUNCTION__, arg); | ||
1040 | return -EFAULT; | ||
1041 | } | ||
1042 | |||
1043 | mask = (u64)0x1<<arg; | ||
1044 | |||
1045 | spin_lock_irqsave(&ohci->IR_channel_lock, flags); | ||
1046 | |||
1047 | if (ohci->ISO_channel_usage & mask) { | ||
1048 | PRINT(KERN_ERR, | ||
1049 | "%s: IS0 listen channel %d is already used", | ||
1050 | __FUNCTION__, arg); | ||
1051 | spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); | ||
1052 | return -EFAULT; | ||
1053 | } | ||
1054 | |||
1055 | ir_legacy_active = ohci->ir_legacy_channels; | ||
1056 | |||
1057 | ohci->ISO_channel_usage |= mask; | ||
1058 | ohci->ir_legacy_channels |= mask; | ||
1059 | |||
1060 | spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); | ||
1061 | |||
1062 | if (!ir_legacy_active) { | ||
1063 | if (ohci1394_register_iso_tasklet(ohci, | ||
1064 | &ohci->ir_legacy_tasklet) < 0) { | ||
1065 | PRINT(KERN_ERR, "No IR DMA context available"); | ||
1066 | return -EBUSY; | ||
1067 | } | ||
1068 | |||
1069 | /* the IR context can be assigned to any DMA context | ||
1070 | * by ohci1394_register_iso_tasklet */ | ||
1071 | d->ctx = ohci->ir_legacy_tasklet.context; | ||
1072 | d->ctrlSet = OHCI1394_IsoRcvContextControlSet + | ||
1073 | 32*d->ctx; | ||
1074 | d->ctrlClear = OHCI1394_IsoRcvContextControlClear + | ||
1075 | 32*d->ctx; | ||
1076 | d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx; | ||
1077 | d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx; | ||
1078 | |||
1079 | initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1); | ||
1080 | |||
1081 | if (printk_ratelimit()) | ||
1082 | DBGMSG("IR legacy activated"); | ||
1083 | } | ||
1084 | |||
1085 | spin_lock_irqsave(&ohci->IR_channel_lock, flags); | ||
1086 | |||
1087 | if (arg>31) | ||
1088 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, | ||
1089 | 1<<(arg-32)); | ||
1090 | else | ||
1091 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, | ||
1092 | 1<<arg); | ||
1093 | |||
1094 | spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); | ||
1095 | DBGMSG("Listening enabled on channel %d", arg); | ||
1096 | break; | ||
1097 | } | ||
1098 | case ISO_UNLISTEN_CHANNEL: | ||
1099 | { | ||
1100 | u64 mask; | ||
1101 | |||
1102 | if (arg<0 || arg>63) { | ||
1103 | PRINT(KERN_ERR, | ||
1104 | "%s: IS0 unlisten channel %d is out of range", | ||
1105 | __FUNCTION__, arg); | ||
1106 | return -EFAULT; | ||
1107 | } | ||
1108 | |||
1109 | mask = (u64)0x1<<arg; | ||
1110 | |||
1111 | spin_lock_irqsave(&ohci->IR_channel_lock, flags); | ||
1112 | |||
1113 | if (!(ohci->ISO_channel_usage & mask)) { | ||
1114 | PRINT(KERN_ERR, | ||
1115 | "%s: IS0 unlisten channel %d is not used", | ||
1116 | __FUNCTION__, arg); | ||
1117 | spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); | ||
1118 | return -EFAULT; | ||
1119 | } | ||
1120 | |||
1121 | ohci->ISO_channel_usage &= ~mask; | ||
1122 | ohci->ir_legacy_channels &= ~mask; | ||
1123 | |||
1124 | if (arg>31) | ||
1125 | reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, | ||
1126 | 1<<(arg-32)); | ||
1127 | else | ||
1128 | reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, | ||
1129 | 1<<arg); | ||
1130 | |||
1131 | spin_unlock_irqrestore(&ohci->IR_channel_lock, flags); | ||
1132 | DBGMSG("Listening disabled on channel %d", arg); | ||
1133 | |||
1134 | if (ohci->ir_legacy_channels == 0) { | ||
1135 | stop_dma_rcv_ctx(&ohci->ir_legacy_context); | ||
1136 | DBGMSG("ISO legacy receive context stopped"); | ||
1137 | } | ||
1138 | |||
1139 | break; | ||
1140 | } | ||
1141 | default: | 982 | default: |
1142 | PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet", | 983 | PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet", |
1143 | cmd); | 984 | cmd); |
@@ -2869,12 +2710,10 @@ static void dma_trm_tasklet (unsigned long data) | |||
2869 | list_del_init(&packet->driver_list); | 2710 | list_del_init(&packet->driver_list); |
2870 | hpsb_packet_sent(ohci->host, packet, ack); | 2711 | hpsb_packet_sent(ohci->host, packet, ack); |
2871 | 2712 | ||
2872 | if (datasize) { | 2713 | if (datasize) |
2873 | pci_unmap_single(ohci->dev, | 2714 | pci_unmap_single(ohci->dev, |
2874 | cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address), | 2715 | cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address), |
2875 | datasize, PCI_DMA_TODEVICE); | 2716 | datasize, PCI_DMA_TODEVICE); |
2876 | OHCI_DMA_FREE("single Xmit data packet"); | ||
2877 | } | ||
2878 | 2717 | ||
2879 | d->sent_ind = (d->sent_ind+1)%d->num_desc; | 2718 | d->sent_ind = (d->sent_ind+1)%d->num_desc; |
2880 | d->free_prgs++; | 2719 | d->free_prgs++; |
@@ -2885,22 +2724,6 @@ static void dma_trm_tasklet (unsigned long data) | |||
2885 | spin_unlock_irqrestore(&d->lock, flags); | 2724 | spin_unlock_irqrestore(&d->lock, flags); |
2886 | } | 2725 | } |
2887 | 2726 | ||
2888 | static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d) | ||
2889 | { | ||
2890 | if (d->ctrlClear) { | ||
2891 | ohci1394_stop_context(d->ohci, d->ctrlClear, NULL); | ||
2892 | |||
2893 | if (d->type == DMA_CTX_ISO) { | ||
2894 | /* disable interrupts */ | ||
2895 | reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx); | ||
2896 | ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet); | ||
2897 | } else { | ||
2898 | tasklet_kill(&d->task); | ||
2899 | } | ||
2900 | } | ||
2901 | } | ||
2902 | |||
2903 | |||
2904 | static void free_dma_rcv_ctx(struct dma_rcv_ctx *d) | 2727 | static void free_dma_rcv_ctx(struct dma_rcv_ctx *d) |
2905 | { | 2728 | { |
2906 | int i; | 2729 | int i; |
@@ -2913,23 +2736,19 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d) | |||
2913 | 2736 | ||
2914 | if (d->buf_cpu) { | 2737 | if (d->buf_cpu) { |
2915 | for (i=0; i<d->num_desc; i++) | 2738 | for (i=0; i<d->num_desc; i++) |
2916 | if (d->buf_cpu[i] && d->buf_bus[i]) { | 2739 | if (d->buf_cpu[i] && d->buf_bus[i]) |
2917 | pci_free_consistent( | 2740 | pci_free_consistent( |
2918 | ohci->dev, d->buf_size, | 2741 | ohci->dev, d->buf_size, |
2919 | d->buf_cpu[i], d->buf_bus[i]); | 2742 | d->buf_cpu[i], d->buf_bus[i]); |
2920 | OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i); | ||
2921 | } | ||
2922 | kfree(d->buf_cpu); | 2743 | kfree(d->buf_cpu); |
2923 | kfree(d->buf_bus); | 2744 | kfree(d->buf_bus); |
2924 | } | 2745 | } |
2925 | if (d->prg_cpu) { | 2746 | if (d->prg_cpu) { |
2926 | for (i=0; i<d->num_desc; i++) | 2747 | for (i=0; i<d->num_desc; i++) |
2927 | if (d->prg_cpu[i] && d->prg_bus[i]) { | 2748 | if (d->prg_cpu[i] && d->prg_bus[i]) |
2928 | pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]); | 2749 | pci_pool_free(d->prg_pool, d->prg_cpu[i], |
2929 | OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i); | 2750 | d->prg_bus[i]); |
2930 | } | ||
2931 | pci_pool_destroy(d->prg_pool); | 2751 | pci_pool_destroy(d->prg_pool); |
2932 | OHCI_DMA_FREE("dma_rcv prg pool"); | ||
2933 | kfree(d->prg_cpu); | 2752 | kfree(d->prg_cpu); |
2934 | kfree(d->prg_bus); | 2753 | kfree(d->prg_bus); |
2935 | } | 2754 | } |
@@ -2998,13 +2817,10 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, | |||
2998 | } | 2817 | } |
2999 | num_allocs++; | 2818 | num_allocs++; |
3000 | 2819 | ||
3001 | OHCI_DMA_ALLOC("dma_rcv prg pool"); | ||
3002 | |||
3003 | for (i=0; i<d->num_desc; i++) { | 2820 | for (i=0; i<d->num_desc; i++) { |
3004 | d->buf_cpu[i] = pci_alloc_consistent(ohci->dev, | 2821 | d->buf_cpu[i] = pci_alloc_consistent(ohci->dev, |
3005 | d->buf_size, | 2822 | d->buf_size, |
3006 | d->buf_bus+i); | 2823 | d->buf_bus+i); |
3007 | OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i); | ||
3008 | 2824 | ||
3009 | if (d->buf_cpu[i] != NULL) { | 2825 | if (d->buf_cpu[i] != NULL) { |
3010 | memset(d->buf_cpu[i], 0, d->buf_size); | 2826 | memset(d->buf_cpu[i], 0, d->buf_size); |
@@ -3016,7 +2832,6 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, | |||
3016 | } | 2832 | } |
3017 | 2833 | ||
3018 | d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); | 2834 | d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); |
3019 | OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i); | ||
3020 | 2835 | ||
3021 | if (d->prg_cpu[i] != NULL) { | 2836 | if (d->prg_cpu[i] != NULL) { |
3022 | memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd)); | 2837 | memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd)); |
@@ -3030,18 +2845,11 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d, | |||
3030 | 2845 | ||
3031 | spin_lock_init(&d->lock); | 2846 | spin_lock_init(&d->lock); |
3032 | 2847 | ||
3033 | if (type == DMA_CTX_ISO) { | 2848 | d->ctrlSet = context_base + OHCI1394_ContextControlSet; |
3034 | ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet, | 2849 | d->ctrlClear = context_base + OHCI1394_ContextControlClear; |
3035 | OHCI_ISO_MULTICHANNEL_RECEIVE, | 2850 | d->cmdPtr = context_base + OHCI1394_ContextCommandPtr; |
3036 | dma_rcv_tasklet, (unsigned long) d); | ||
3037 | } else { | ||
3038 | d->ctrlSet = context_base + OHCI1394_ContextControlSet; | ||
3039 | d->ctrlClear = context_base + OHCI1394_ContextControlClear; | ||
3040 | d->cmdPtr = context_base + OHCI1394_ContextCommandPtr; | ||
3041 | |||
3042 | tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d); | ||
3043 | } | ||
3044 | 2851 | ||
2852 | tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d); | ||
3045 | return 0; | 2853 | return 0; |
3046 | } | 2854 | } |
3047 | 2855 | ||
@@ -3057,12 +2865,10 @@ static void free_dma_trm_ctx(struct dma_trm_ctx *d) | |||
3057 | 2865 | ||
3058 | if (d->prg_cpu) { | 2866 | if (d->prg_cpu) { |
3059 | for (i=0; i<d->num_desc; i++) | 2867 | for (i=0; i<d->num_desc; i++) |
3060 | if (d->prg_cpu[i] && d->prg_bus[i]) { | 2868 | if (d->prg_cpu[i] && d->prg_bus[i]) |
3061 | pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]); | 2869 | pci_pool_free(d->prg_pool, d->prg_cpu[i], |
3062 | OHCI_DMA_FREE("pool dma_trm prg[%d]", i); | 2870 | d->prg_bus[i]); |
3063 | } | ||
3064 | pci_pool_destroy(d->prg_pool); | 2871 | pci_pool_destroy(d->prg_pool); |
3065 | OHCI_DMA_FREE("dma_trm prg pool"); | ||
3066 | kfree(d->prg_cpu); | 2872 | kfree(d->prg_cpu); |
3067 | kfree(d->prg_bus); | 2873 | kfree(d->prg_bus); |
3068 | } | 2874 | } |
@@ -3108,11 +2914,8 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, | |||
3108 | } | 2914 | } |
3109 | num_allocs++; | 2915 | num_allocs++; |
3110 | 2916 | ||
3111 | OHCI_DMA_ALLOC("dma_rcv prg pool"); | ||
3112 | |||
3113 | for (i = 0; i < d->num_desc; i++) { | 2917 | for (i = 0; i < d->num_desc; i++) { |
3114 | d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); | 2918 | d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); |
3115 | OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i); | ||
3116 | 2919 | ||
3117 | if (d->prg_cpu[i] != NULL) { | 2920 | if (d->prg_cpu[i] != NULL) { |
3118 | memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg)); | 2921 | memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg)); |
@@ -3127,28 +2930,10 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, | |||
3127 | spin_lock_init(&d->lock); | 2930 | spin_lock_init(&d->lock); |
3128 | 2931 | ||
3129 | /* initialize tasklet */ | 2932 | /* initialize tasklet */ |
3130 | if (type == DMA_CTX_ISO) { | 2933 | d->ctrlSet = context_base + OHCI1394_ContextControlSet; |
3131 | ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT, | 2934 | d->ctrlClear = context_base + OHCI1394_ContextControlClear; |
3132 | dma_trm_tasklet, (unsigned long) d); | 2935 | d->cmdPtr = context_base + OHCI1394_ContextCommandPtr; |
3133 | if (ohci1394_register_iso_tasklet(ohci, | 2936 | tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d); |
3134 | &ohci->it_legacy_tasklet) < 0) { | ||
3135 | PRINT(KERN_ERR, "No IT DMA context available"); | ||
3136 | free_dma_trm_ctx(d); | ||
3137 | return -EBUSY; | ||
3138 | } | ||
3139 | |||
3140 | /* IT can be assigned to any context by register_iso_tasklet */ | ||
3141 | d->ctx = ohci->it_legacy_tasklet.context; | ||
3142 | d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx; | ||
3143 | d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx; | ||
3144 | d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx; | ||
3145 | } else { | ||
3146 | d->ctrlSet = context_base + OHCI1394_ContextControlSet; | ||
3147 | d->ctrlClear = context_base + OHCI1394_ContextControlClear; | ||
3148 | d->cmdPtr = context_base + OHCI1394_ContextCommandPtr; | ||
3149 | tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d); | ||
3150 | } | ||
3151 | |||
3152 | return 0; | 2937 | return 0; |
3153 | } | 2938 | } |
3154 | 2939 | ||
@@ -3294,7 +3079,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev, | |||
3294 | ohci->csr_config_rom_cpu = | 3079 | ohci->csr_config_rom_cpu = |
3295 | pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, | 3080 | pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, |
3296 | &ohci->csr_config_rom_bus); | 3081 | &ohci->csr_config_rom_bus); |
3297 | OHCI_DMA_ALLOC("consistent csr_config_rom"); | ||
3298 | if (ohci->csr_config_rom_cpu == NULL) | 3082 | if (ohci->csr_config_rom_cpu == NULL) |
3299 | FAIL(-ENOMEM, "Failed to allocate buffer config rom"); | 3083 | FAIL(-ENOMEM, "Failed to allocate buffer config rom"); |
3300 | ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER; | 3084 | ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER; |
@@ -3303,8 +3087,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev, | |||
3303 | ohci->selfid_buf_cpu = | 3087 | ohci->selfid_buf_cpu = |
3304 | pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, | 3088 | pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, |
3305 | &ohci->selfid_buf_bus); | 3089 | &ohci->selfid_buf_bus); |
3306 | OHCI_DMA_ALLOC("consistent selfid_buf"); | ||
3307 | |||
3308 | if (ohci->selfid_buf_cpu == NULL) | 3090 | if (ohci->selfid_buf_cpu == NULL) |
3309 | FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets"); | 3091 | FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets"); |
3310 | ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER; | 3092 | ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER; |
@@ -3377,20 +3159,6 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev, | |||
3377 | ohci->ISO_channel_usage = 0; | 3159 | ohci->ISO_channel_usage = 0; |
3378 | spin_lock_init(&ohci->IR_channel_lock); | 3160 | spin_lock_init(&ohci->IR_channel_lock); |
3379 | 3161 | ||
3380 | /* Allocate the IR DMA context right here so we don't have | ||
3381 | * to do it in interrupt path - note that this doesn't | ||
3382 | * waste much memory and avoids the jugglery required to | ||
3383 | * allocate it in IRQ path. */ | ||
3384 | if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context, | ||
3385 | DMA_CTX_ISO, 0, IR_NUM_DESC, | ||
3386 | IR_BUF_SIZE, IR_SPLIT_BUF_SIZE, | ||
3387 | OHCI1394_IsoRcvContextBase) < 0) { | ||
3388 | FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context"); | ||
3389 | } | ||
3390 | |||
3391 | /* We hopefully don't have to pre-allocate IT DMA like we did | ||
3392 | * for IR DMA above. Allocate it on-demand and mark inactive. */ | ||
3393 | ohci->it_legacy_context.ohci = NULL; | ||
3394 | spin_lock_init(&ohci->event_lock); | 3162 | spin_lock_init(&ohci->event_lock); |
3395 | 3163 | ||
3396 | /* | 3164 | /* |
@@ -3483,20 +3251,16 @@ static void ohci1394_pci_remove(struct pci_dev *pdev) | |||
3483 | free_dma_rcv_ctx(&ohci->ar_resp_context); | 3251 | free_dma_rcv_ctx(&ohci->ar_resp_context); |
3484 | free_dma_trm_ctx(&ohci->at_req_context); | 3252 | free_dma_trm_ctx(&ohci->at_req_context); |
3485 | free_dma_trm_ctx(&ohci->at_resp_context); | 3253 | free_dma_trm_ctx(&ohci->at_resp_context); |
3486 | free_dma_rcv_ctx(&ohci->ir_legacy_context); | ||
3487 | free_dma_trm_ctx(&ohci->it_legacy_context); | ||
3488 | 3254 | ||
3489 | case OHCI_INIT_HAVE_SELFID_BUFFER: | 3255 | case OHCI_INIT_HAVE_SELFID_BUFFER: |
3490 | pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, | 3256 | pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE, |
3491 | ohci->selfid_buf_cpu, | 3257 | ohci->selfid_buf_cpu, |
3492 | ohci->selfid_buf_bus); | 3258 | ohci->selfid_buf_bus); |
3493 | OHCI_DMA_FREE("consistent selfid_buf"); | ||
3494 | 3259 | ||
3495 | case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER: | 3260 | case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER: |
3496 | pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, | 3261 | pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN, |
3497 | ohci->csr_config_rom_cpu, | 3262 | ohci->csr_config_rom_cpu, |
3498 | ohci->csr_config_rom_bus); | 3263 | ohci->csr_config_rom_bus); |
3499 | OHCI_DMA_FREE("consistent csr_config_rom"); | ||
3500 | 3264 | ||
3501 | case OHCI_INIT_HAVE_IOMAPPING: | 3265 | case OHCI_INIT_HAVE_IOMAPPING: |
3502 | iounmap(ohci->registers); | 3266 | iounmap(ohci->registers); |
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h index f1ad539e7c1b..4320bf010495 100644 --- a/drivers/ieee1394/ohci1394.h +++ b/drivers/ieee1394/ohci1394.h | |||
@@ -190,23 +190,10 @@ struct ti_ohci { | |||
190 | unsigned long ir_multichannel_used; /* ditto */ | 190 | unsigned long ir_multichannel_used; /* ditto */ |
191 | spinlock_t IR_channel_lock; | 191 | spinlock_t IR_channel_lock; |
192 | 192 | ||
193 | /* iso receive (legacy API) */ | ||
194 | u64 ir_legacy_channels; /* note: this differs from ISO_channel_usage; | ||
195 | it only accounts for channels listened to | ||
196 | by the legacy API, so that we can know when | ||
197 | it is safe to free the legacy API context */ | ||
198 | |||
199 | struct dma_rcv_ctx ir_legacy_context; | ||
200 | struct ohci1394_iso_tasklet ir_legacy_tasklet; | ||
201 | |||
202 | /* iso transmit */ | 193 | /* iso transmit */ |
203 | int nb_iso_xmit_ctx; | 194 | int nb_iso_xmit_ctx; |
204 | unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */ | 195 | unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */ |
205 | 196 | ||
206 | /* iso transmit (legacy API) */ | ||
207 | struct dma_trm_ctx it_legacy_context; | ||
208 | struct ohci1394_iso_tasklet it_legacy_tasklet; | ||
209 | |||
210 | u64 ISO_channel_usage; | 197 | u64 ISO_channel_usage; |
211 | 198 | ||
212 | /* IEEE-1394 part follows */ | 199 | /* IEEE-1394 part follows */ |
@@ -221,7 +208,6 @@ struct ti_ohci { | |||
221 | 208 | ||
222 | /* Tasklets for iso receive and transmit, used by video1394 | 209 | /* Tasklets for iso receive and transmit, used by video1394 |
223 | * and dv1394 */ | 210 | * and dv1394 */ |
224 | |||
225 | struct list_head iso_tasklet_list; | 211 | struct list_head iso_tasklet_list; |
226 | spinlock_t iso_tasklet_list_lock; | 212 | spinlock_t iso_tasklet_list_lock; |
227 | 213 | ||
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c index 0742befe9227..d1a5bcdb5e0b 100644 --- a/drivers/ieee1394/pcilynx.c +++ b/drivers/ieee1394/pcilynx.c | |||
@@ -477,7 +477,11 @@ static void send_next(struct ti_lynx *lynx, int what) | |||
477 | struct lynx_send_data *d; | 477 | struct lynx_send_data *d; |
478 | struct hpsb_packet *packet; | 478 | struct hpsb_packet *packet; |
479 | 479 | ||
480 | #if 0 /* has been removed from ieee1394 core */ | ||
480 | d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async); | 481 | d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async); |
482 | #else | ||
483 | d = &lynx->async; | ||
484 | #endif | ||
481 | if (!list_empty(&d->pcl_queue)) { | 485 | if (!list_empty(&d->pcl_queue)) { |
482 | PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo"); | 486 | PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo"); |
483 | BUG(); | 487 | BUG(); |
@@ -511,9 +515,11 @@ static void send_next(struct ti_lynx *lynx, int what) | |||
511 | case hpsb_async: | 515 | case hpsb_async: |
512 | pcl.buffer[0].control |= PCL_CMD_XMT; | 516 | pcl.buffer[0].control |= PCL_CMD_XMT; |
513 | break; | 517 | break; |
518 | #if 0 /* has been removed from ieee1394 core */ | ||
514 | case hpsb_iso: | 519 | case hpsb_iso: |
515 | pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE; | 520 | pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE; |
516 | break; | 521 | break; |
522 | #endif | ||
517 | case hpsb_raw: | 523 | case hpsb_raw: |
518 | pcl.buffer[0].control |= PCL_CMD_UNFXMT; | 524 | pcl.buffer[0].control |= PCL_CMD_UNFXMT; |
519 | break; | 525 | break; |
@@ -542,9 +548,11 @@ static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet) | |||
542 | case hpsb_raw: | 548 | case hpsb_raw: |
543 | d = &lynx->async; | 549 | d = &lynx->async; |
544 | break; | 550 | break; |
551 | #if 0 /* has been removed from ieee1394 core */ | ||
545 | case hpsb_iso: | 552 | case hpsb_iso: |
546 | d = &lynx->iso_send; | 553 | d = &lynx->iso_send; |
547 | break; | 554 | break; |
555 | #endif | ||
548 | default: | 556 | default: |
549 | PRINT(KERN_ERR, lynx->id, "invalid packet type %d", | 557 | PRINT(KERN_ERR, lynx->id, "invalid packet type %d", |
550 | packet->type); | 558 | packet->type); |
@@ -797,7 +805,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) | |||
797 | } | 805 | } |
798 | 806 | ||
799 | break; | 807 | break; |
800 | 808 | #if 0 /* has been removed from ieee1394 core */ | |
801 | case ISO_LISTEN_CHANNEL: | 809 | case ISO_LISTEN_CHANNEL: |
802 | spin_lock_irqsave(&lynx->iso_rcv.lock, flags); | 810 | spin_lock_irqsave(&lynx->iso_rcv.lock, flags); |
803 | 811 | ||
@@ -819,7 +827,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) | |||
819 | 827 | ||
820 | spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags); | 828 | spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags); |
821 | break; | 829 | break; |
822 | 830 | #endif | |
823 | default: | 831 | default: |
824 | PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd); | 832 | PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd); |
825 | retval = -1; | 833 | retval = -1; |
@@ -1009,11 +1017,11 @@ static irqreturn_t lynx_irq_handler(int irq, void *dev_id) | |||
1009 | pci_unmap_single(lynx->dev, lynx->iso_send.data_dma, | 1017 | pci_unmap_single(lynx->dev, lynx->iso_send.data_dma, |
1010 | packet->data_size, PCI_DMA_TODEVICE); | 1018 | packet->data_size, PCI_DMA_TODEVICE); |
1011 | } | 1019 | } |
1012 | 1020 | #if 0 /* has been removed from ieee1394 core */ | |
1013 | if (!list_empty(&lynx->iso_send.queue)) { | 1021 | if (!list_empty(&lynx->iso_send.queue)) { |
1014 | send_next(lynx, hpsb_iso); | 1022 | send_next(lynx, hpsb_iso); |
1015 | } | 1023 | } |
1016 | 1024 | #endif | |
1017 | spin_unlock(&lynx->iso_send.queue_lock); | 1025 | spin_unlock(&lynx->iso_send.queue_lock); |
1018 | 1026 | ||
1019 | if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) { | 1027 | if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) { |
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h index 50daabf6e5fa..a06aaad5b448 100644 --- a/drivers/ieee1394/raw1394-private.h +++ b/drivers/ieee1394/raw1394-private.h | |||
@@ -36,11 +36,6 @@ struct file_info { | |||
36 | 36 | ||
37 | u8 __user *fcp_buffer; | 37 | u8 __user *fcp_buffer; |
38 | 38 | ||
39 | /* old ISO API */ | ||
40 | u64 listen_channels; | ||
41 | quadlet_t __user *iso_buffer; | ||
42 | size_t iso_buffer_length; | ||
43 | |||
44 | u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */ | 39 | u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */ |
45 | 40 | ||
46 | /* new rawiso API */ | 41 | /* new rawiso API */ |
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index f1d05eeb9f51..336e5ff4cfcf 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c | |||
@@ -98,21 +98,6 @@ static struct hpsb_address_ops arm_ops = { | |||
98 | 98 | ||
99 | static void queue_complete_cb(struct pending_request *req); | 99 | static void queue_complete_cb(struct pending_request *req); |
100 | 100 | ||
101 | #include <asm/current.h> | ||
102 | static void print_old_iso_deprecation(void) | ||
103 | { | ||
104 | static pid_t p; | ||
105 | |||
106 | if (p == current->pid) | ||
107 | return; | ||
108 | p = current->pid; | ||
109 | printk(KERN_WARNING "raw1394: WARNING - Program \"%s\" uses unsupported" | ||
110 | " isochronous request types which will be removed in a next" | ||
111 | " kernel release\n", current->comm); | ||
112 | printk(KERN_WARNING "raw1394: Update your software to use libraw1394's" | ||
113 | " newer interface\n"); | ||
114 | } | ||
115 | |||
116 | static struct pending_request *__alloc_pending_request(gfp_t flags) | 101 | static struct pending_request *__alloc_pending_request(gfp_t flags) |
117 | { | 102 | { |
118 | struct pending_request *req; | 103 | struct pending_request *req; |
@@ -297,67 +282,6 @@ static void host_reset(struct hpsb_host *host) | |||
297 | spin_unlock_irqrestore(&host_info_lock, flags); | 282 | spin_unlock_irqrestore(&host_info_lock, flags); |
298 | } | 283 | } |
299 | 284 | ||
300 | static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data, | ||
301 | size_t length) | ||
302 | { | ||
303 | unsigned long flags; | ||
304 | struct host_info *hi; | ||
305 | struct file_info *fi; | ||
306 | struct pending_request *req, *req_next; | ||
307 | struct iso_block_store *ibs = NULL; | ||
308 | LIST_HEAD(reqs); | ||
309 | |||
310 | if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) { | ||
311 | HPSB_INFO("dropped iso packet"); | ||
312 | return; | ||
313 | } | ||
314 | |||
315 | spin_lock_irqsave(&host_info_lock, flags); | ||
316 | hi = find_host_info(host); | ||
317 | |||
318 | if (hi != NULL) { | ||
319 | list_for_each_entry(fi, &hi->file_info_list, list) { | ||
320 | if (!(fi->listen_channels & (1ULL << channel))) | ||
321 | continue; | ||
322 | |||
323 | req = __alloc_pending_request(GFP_ATOMIC); | ||
324 | if (!req) | ||
325 | break; | ||
326 | |||
327 | if (!ibs) { | ||
328 | ibs = kmalloc(sizeof(*ibs) + length, | ||
329 | GFP_ATOMIC); | ||
330 | if (!ibs) { | ||
331 | kfree(req); | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | atomic_add(length, &iso_buffer_size); | ||
336 | atomic_set(&ibs->refcount, 0); | ||
337 | ibs->data_size = length; | ||
338 | memcpy(ibs->data, data, length); | ||
339 | } | ||
340 | |||
341 | atomic_inc(&ibs->refcount); | ||
342 | |||
343 | req->file_info = fi; | ||
344 | req->ibs = ibs; | ||
345 | req->data = ibs->data; | ||
346 | req->req.type = RAW1394_REQ_ISO_RECEIVE; | ||
347 | req->req.generation = get_hpsb_generation(host); | ||
348 | req->req.misc = 0; | ||
349 | req->req.recvb = ptr2int(fi->iso_buffer); | ||
350 | req->req.length = min(length, fi->iso_buffer_length); | ||
351 | |||
352 | list_add_tail(&req->list, &reqs); | ||
353 | } | ||
354 | } | ||
355 | spin_unlock_irqrestore(&host_info_lock, flags); | ||
356 | |||
357 | list_for_each_entry_safe(req, req_next, &reqs, list) | ||
358 | queue_complete_req(req); | ||
359 | } | ||
360 | |||
361 | static void fcp_request(struct hpsb_host *host, int nodeid, int direction, | 285 | static void fcp_request(struct hpsb_host *host, int nodeid, int direction, |
362 | int cts, u8 * data, size_t length) | 286 | int cts, u8 * data, size_t length) |
363 | { | 287 | { |
@@ -434,7 +358,11 @@ struct compat_raw1394_req { | |||
434 | 358 | ||
435 | __u64 sendb; | 359 | __u64 sendb; |
436 | __u64 recvb; | 360 | __u64 recvb; |
437 | } __attribute__((packed)); | 361 | } |
362 | #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) | ||
363 | __attribute__((packed)) | ||
364 | #endif | ||
365 | ; | ||
438 | 366 | ||
439 | static const char __user *raw1394_compat_write(const char __user *buf) | 367 | static const char __user *raw1394_compat_write(const char __user *buf) |
440 | { | 368 | { |
@@ -459,7 +387,7 @@ static const char __user *raw1394_compat_write(const char __user *buf) | |||
459 | static int | 387 | static int |
460 | raw1394_compat_read(const char __user *buf, struct raw1394_request *r) | 388 | raw1394_compat_read(const char __user *buf, struct raw1394_request *r) |
461 | { | 389 | { |
462 | struct compat_raw1394_req __user *cr = (typeof(cr)) r; | 390 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; |
463 | if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || | 391 | if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || |
464 | P(type) || | 392 | P(type) || |
465 | P(error) || | 393 | P(error) || |
@@ -587,7 +515,7 @@ static int state_opened(struct file_info *fi, struct pending_request *req) | |||
587 | 515 | ||
588 | req->req.length = 0; | 516 | req->req.length = 0; |
589 | queue_complete_req(req); | 517 | queue_complete_req(req); |
590 | return sizeof(struct raw1394_request); | 518 | return 0; |
591 | } | 519 | } |
592 | 520 | ||
593 | static int state_initialized(struct file_info *fi, struct pending_request *req) | 521 | static int state_initialized(struct file_info *fi, struct pending_request *req) |
@@ -601,7 +529,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) | |||
601 | req->req.generation = atomic_read(&internal_generation); | 529 | req->req.generation = atomic_read(&internal_generation); |
602 | req->req.length = 0; | 530 | req->req.length = 0; |
603 | queue_complete_req(req); | 531 | queue_complete_req(req); |
604 | return sizeof(struct raw1394_request); | 532 | return 0; |
605 | } | 533 | } |
606 | 534 | ||
607 | switch (req->req.type) { | 535 | switch (req->req.type) { |
@@ -673,44 +601,7 @@ out_set_card: | |||
673 | } | 601 | } |
674 | 602 | ||
675 | queue_complete_req(req); | 603 | queue_complete_req(req); |
676 | return sizeof(struct raw1394_request); | 604 | return 0; |
677 | } | ||
678 | |||
679 | static void handle_iso_listen(struct file_info *fi, struct pending_request *req) | ||
680 | { | ||
681 | int channel = req->req.misc; | ||
682 | |||
683 | if ((channel > 63) || (channel < -64)) { | ||
684 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
685 | } else if (channel >= 0) { | ||
686 | /* allocate channel req.misc */ | ||
687 | if (fi->listen_channels & (1ULL << channel)) { | ||
688 | req->req.error = RAW1394_ERROR_ALREADY; | ||
689 | } else { | ||
690 | if (hpsb_listen_channel | ||
691 | (&raw1394_highlevel, fi->host, channel)) { | ||
692 | req->req.error = RAW1394_ERROR_ALREADY; | ||
693 | } else { | ||
694 | fi->listen_channels |= 1ULL << channel; | ||
695 | fi->iso_buffer = int2ptr(req->req.recvb); | ||
696 | fi->iso_buffer_length = req->req.length; | ||
697 | } | ||
698 | } | ||
699 | } else { | ||
700 | /* deallocate channel (one's complement neg) req.misc */ | ||
701 | channel = ~channel; | ||
702 | |||
703 | if (fi->listen_channels & (1ULL << channel)) { | ||
704 | hpsb_unlisten_channel(&raw1394_highlevel, fi->host, | ||
705 | channel); | ||
706 | fi->listen_channels &= ~(1ULL << channel); | ||
707 | } else { | ||
708 | req->req.error = RAW1394_ERROR_INVALID_ARG; | ||
709 | } | ||
710 | } | ||
711 | |||
712 | req->req.length = 0; | ||
713 | queue_complete_req(req); | ||
714 | } | 605 | } |
715 | 606 | ||
716 | static void handle_fcp_listen(struct file_info *fi, struct pending_request *req) | 607 | static void handle_fcp_listen(struct file_info *fi, struct pending_request *req) |
@@ -865,7 +756,7 @@ static int handle_async_request(struct file_info *fi, | |||
865 | if (req->req.error) { | 756 | if (req->req.error) { |
866 | req->req.length = 0; | 757 | req->req.length = 0; |
867 | queue_complete_req(req); | 758 | queue_complete_req(req); |
868 | return sizeof(struct raw1394_request); | 759 | return 0; |
869 | } | 760 | } |
870 | 761 | ||
871 | hpsb_set_packet_complete_task(packet, | 762 | hpsb_set_packet_complete_task(packet, |
@@ -883,51 +774,7 @@ static int handle_async_request(struct file_info *fi, | |||
883 | hpsb_free_tlabel(packet); | 774 | hpsb_free_tlabel(packet); |
884 | queue_complete_req(req); | 775 | queue_complete_req(req); |
885 | } | 776 | } |
886 | return sizeof(struct raw1394_request); | 777 | return 0; |
887 | } | ||
888 | |||
889 | static int handle_iso_send(struct file_info *fi, struct pending_request *req, | ||
890 | int channel) | ||
891 | { | ||
892 | unsigned long flags; | ||
893 | struct hpsb_packet *packet; | ||
894 | |||
895 | packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f, | ||
896 | (req->req.misc >> 16) & 0x3, | ||
897 | req->req.misc & 0xf); | ||
898 | if (!packet) | ||
899 | return -ENOMEM; | ||
900 | |||
901 | packet->speed_code = req->req.address & 0x3; | ||
902 | |||
903 | req->packet = packet; | ||
904 | |||
905 | if (copy_from_user(packet->data, int2ptr(req->req.sendb), | ||
906 | req->req.length)) { | ||
907 | req->req.error = RAW1394_ERROR_MEMFAULT; | ||
908 | req->req.length = 0; | ||
909 | queue_complete_req(req); | ||
910 | return sizeof(struct raw1394_request); | ||
911 | } | ||
912 | |||
913 | req->req.length = 0; | ||
914 | hpsb_set_packet_complete_task(packet, | ||
915 | (void (*)(void *))queue_complete_req, | ||
916 | req); | ||
917 | |||
918 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
919 | list_add_tail(&req->list, &fi->req_pending); | ||
920 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
921 | |||
922 | /* Update the generation of the packet just before sending. */ | ||
923 | packet->generation = req->req.generation; | ||
924 | |||
925 | if (hpsb_send_packet(packet) < 0) { | ||
926 | req->req.error = RAW1394_ERROR_SEND_ERROR; | ||
927 | queue_complete_req(req); | ||
928 | } | ||
929 | |||
930 | return sizeof(struct raw1394_request); | ||
931 | } | 778 | } |
932 | 779 | ||
933 | static int handle_async_send(struct file_info *fi, struct pending_request *req) | 780 | static int handle_async_send(struct file_info *fi, struct pending_request *req) |
@@ -943,7 +790,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) | |||
943 | req->req.error = RAW1394_ERROR_INVALID_ARG; | 790 | req->req.error = RAW1394_ERROR_INVALID_ARG; |
944 | req->req.length = 0; | 791 | req->req.length = 0; |
945 | queue_complete_req(req); | 792 | queue_complete_req(req); |
946 | return sizeof(struct raw1394_request); | 793 | return 0; |
947 | } | 794 | } |
948 | 795 | ||
949 | data_size = req->req.length - header_length; | 796 | data_size = req->req.length - header_length; |
@@ -957,7 +804,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) | |||
957 | req->req.error = RAW1394_ERROR_MEMFAULT; | 804 | req->req.error = RAW1394_ERROR_MEMFAULT; |
958 | req->req.length = 0; | 805 | req->req.length = 0; |
959 | queue_complete_req(req); | 806 | queue_complete_req(req); |
960 | return sizeof(struct raw1394_request); | 807 | return 0; |
961 | } | 808 | } |
962 | 809 | ||
963 | if (copy_from_user | 810 | if (copy_from_user |
@@ -966,7 +813,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) | |||
966 | req->req.error = RAW1394_ERROR_MEMFAULT; | 813 | req->req.error = RAW1394_ERROR_MEMFAULT; |
967 | req->req.length = 0; | 814 | req->req.length = 0; |
968 | queue_complete_req(req); | 815 | queue_complete_req(req); |
969 | return sizeof(struct raw1394_request); | 816 | return 0; |
970 | } | 817 | } |
971 | 818 | ||
972 | packet->type = hpsb_async; | 819 | packet->type = hpsb_async; |
@@ -994,7 +841,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) | |||
994 | queue_complete_req(req); | 841 | queue_complete_req(req); |
995 | } | 842 | } |
996 | 843 | ||
997 | return sizeof(struct raw1394_request); | 844 | return 0; |
998 | } | 845 | } |
999 | 846 | ||
1000 | static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, | 847 | static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, |
@@ -1869,7 +1716,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req) | |||
1869 | spin_lock_irqsave(&host_info_lock, flags); | 1716 | spin_lock_irqsave(&host_info_lock, flags); |
1870 | list_add_tail(&addr->addr_list, &fi->addr_list); | 1717 | list_add_tail(&addr->addr_list, &fi->addr_list); |
1871 | spin_unlock_irqrestore(&host_info_lock, flags); | 1718 | spin_unlock_irqrestore(&host_info_lock, flags); |
1872 | return sizeof(struct raw1394_request); | 1719 | return 0; |
1873 | } | 1720 | } |
1874 | retval = | 1721 | retval = |
1875 | hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops, | 1722 | hpsb_register_addrspace(&raw1394_highlevel, fi->host, &arm_ops, |
@@ -1887,7 +1734,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req) | |||
1887 | return (-EALREADY); | 1734 | return (-EALREADY); |
1888 | } | 1735 | } |
1889 | free_pending_request(req); /* immediate success or fail */ | 1736 | free_pending_request(req); /* immediate success or fail */ |
1890 | return sizeof(struct raw1394_request); | 1737 | return 0; |
1891 | } | 1738 | } |
1892 | 1739 | ||
1893 | static int arm_unregister(struct file_info *fi, struct pending_request *req) | 1740 | static int arm_unregister(struct file_info *fi, struct pending_request *req) |
@@ -1955,7 +1802,7 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req) | |||
1955 | vfree(addr->addr_space_buffer); | 1802 | vfree(addr->addr_space_buffer); |
1956 | kfree(addr); | 1803 | kfree(addr); |
1957 | free_pending_request(req); /* immediate success or fail */ | 1804 | free_pending_request(req); /* immediate success or fail */ |
1958 | return sizeof(struct raw1394_request); | 1805 | return 0; |
1959 | } | 1806 | } |
1960 | retval = | 1807 | retval = |
1961 | hpsb_unregister_addrspace(&raw1394_highlevel, fi->host, | 1808 | hpsb_unregister_addrspace(&raw1394_highlevel, fi->host, |
@@ -1971,7 +1818,7 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req) | |||
1971 | vfree(addr->addr_space_buffer); | 1818 | vfree(addr->addr_space_buffer); |
1972 | kfree(addr); | 1819 | kfree(addr); |
1973 | free_pending_request(req); /* immediate success or fail */ | 1820 | free_pending_request(req); /* immediate success or fail */ |
1974 | return sizeof(struct raw1394_request); | 1821 | return 0; |
1975 | } | 1822 | } |
1976 | 1823 | ||
1977 | /* Copy data from ARM buffer(s) to user buffer. */ | 1824 | /* Copy data from ARM buffer(s) to user buffer. */ |
@@ -2013,7 +1860,7 @@ static int arm_get_buf(struct file_info *fi, struct pending_request *req) | |||
2013 | * queue no response, and therefore nobody | 1860 | * queue no response, and therefore nobody |
2014 | * will free it. */ | 1861 | * will free it. */ |
2015 | free_pending_request(req); | 1862 | free_pending_request(req); |
2016 | return sizeof(struct raw1394_request); | 1863 | return 0; |
2017 | } else { | 1864 | } else { |
2018 | DBGMSG("arm_get_buf request exceeded mapping"); | 1865 | DBGMSG("arm_get_buf request exceeded mapping"); |
2019 | spin_unlock_irqrestore(&host_info_lock, flags); | 1866 | spin_unlock_irqrestore(&host_info_lock, flags); |
@@ -2065,7 +1912,7 @@ static int arm_set_buf(struct file_info *fi, struct pending_request *req) | |||
2065 | * queue no response, and therefore nobody | 1912 | * queue no response, and therefore nobody |
2066 | * will free it. */ | 1913 | * will free it. */ |
2067 | free_pending_request(req); | 1914 | free_pending_request(req); |
2068 | return sizeof(struct raw1394_request); | 1915 | return 0; |
2069 | } else { | 1916 | } else { |
2070 | DBGMSG("arm_set_buf request exceeded mapping"); | 1917 | DBGMSG("arm_set_buf request exceeded mapping"); |
2071 | spin_unlock_irqrestore(&host_info_lock, flags); | 1918 | spin_unlock_irqrestore(&host_info_lock, flags); |
@@ -2086,7 +1933,7 @@ static int reset_notification(struct file_info *fi, struct pending_request *req) | |||
2086 | (req->req.misc == RAW1394_NOTIFY_ON)) { | 1933 | (req->req.misc == RAW1394_NOTIFY_ON)) { |
2087 | fi->notification = (u8) req->req.misc; | 1934 | fi->notification = (u8) req->req.misc; |
2088 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | 1935 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ |
2089 | return sizeof(struct raw1394_request); | 1936 | return 0; |
2090 | } | 1937 | } |
2091 | /* error EINVAL (22) invalid argument */ | 1938 | /* error EINVAL (22) invalid argument */ |
2092 | return (-EINVAL); | 1939 | return (-EINVAL); |
@@ -2119,12 +1966,12 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req) | |||
2119 | req->req.length = 0; | 1966 | req->req.length = 0; |
2120 | queue_complete_req(req); | 1967 | queue_complete_req(req); |
2121 | } | 1968 | } |
2122 | return sizeof(struct raw1394_request); | 1969 | return 0; |
2123 | } | 1970 | } |
2124 | 1971 | ||
2125 | static int get_config_rom(struct file_info *fi, struct pending_request *req) | 1972 | static int get_config_rom(struct file_info *fi, struct pending_request *req) |
2126 | { | 1973 | { |
2127 | int ret = sizeof(struct raw1394_request); | 1974 | int ret = 0; |
2128 | quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); | 1975 | quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); |
2129 | int status; | 1976 | int status; |
2130 | 1977 | ||
@@ -2154,7 +2001,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req) | |||
2154 | 2001 | ||
2155 | static int update_config_rom(struct file_info *fi, struct pending_request *req) | 2002 | static int update_config_rom(struct file_info *fi, struct pending_request *req) |
2156 | { | 2003 | { |
2157 | int ret = sizeof(struct raw1394_request); | 2004 | int ret = 0; |
2158 | quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); | 2005 | quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); |
2159 | if (!data) | 2006 | if (!data) |
2160 | return -ENOMEM; | 2007 | return -ENOMEM; |
@@ -2221,7 +2068,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req) | |||
2221 | 2068 | ||
2222 | hpsb_update_config_rom_image(fi->host); | 2069 | hpsb_update_config_rom_image(fi->host); |
2223 | free_pending_request(req); | 2070 | free_pending_request(req); |
2224 | return sizeof(struct raw1394_request); | 2071 | return 0; |
2225 | } | 2072 | } |
2226 | } | 2073 | } |
2227 | 2074 | ||
@@ -2286,7 +2133,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req) | |||
2286 | /* we have to free the request, because we queue no response, | 2133 | /* we have to free the request, because we queue no response, |
2287 | * and therefore nobody will free it */ | 2134 | * and therefore nobody will free it */ |
2288 | free_pending_request(req); | 2135 | free_pending_request(req); |
2289 | return sizeof(struct raw1394_request); | 2136 | return 0; |
2290 | } else { | 2137 | } else { |
2291 | for (dentry = | 2138 | for (dentry = |
2292 | fi->csr1212_dirs[dr]->value.directory.dentries_head; | 2139 | fi->csr1212_dirs[dr]->value.directory.dentries_head; |
@@ -2311,11 +2158,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req) | |||
2311 | 2158 | ||
2312 | case RAW1394_REQ_ECHO: | 2159 | case RAW1394_REQ_ECHO: |
2313 | queue_complete_req(req); | 2160 | queue_complete_req(req); |
2314 | return sizeof(struct raw1394_request); | 2161 | return 0; |
2315 | |||
2316 | case RAW1394_REQ_ISO_SEND: | ||
2317 | print_old_iso_deprecation(); | ||
2318 | return handle_iso_send(fi, req, node); | ||
2319 | 2162 | ||
2320 | case RAW1394_REQ_ARM_REGISTER: | 2163 | case RAW1394_REQ_ARM_REGISTER: |
2321 | return arm_register(fi, req); | 2164 | return arm_register(fi, req); |
@@ -2332,27 +2175,30 @@ static int state_connected(struct file_info *fi, struct pending_request *req) | |||
2332 | case RAW1394_REQ_RESET_NOTIFY: | 2175 | case RAW1394_REQ_RESET_NOTIFY: |
2333 | return reset_notification(fi, req); | 2176 | return reset_notification(fi, req); |
2334 | 2177 | ||
2178 | case RAW1394_REQ_ISO_SEND: | ||
2335 | case RAW1394_REQ_ISO_LISTEN: | 2179 | case RAW1394_REQ_ISO_LISTEN: |
2336 | print_old_iso_deprecation(); | 2180 | printk(KERN_DEBUG "raw1394: old iso ABI has been removed\n"); |
2337 | handle_iso_listen(fi, req); | 2181 | req->req.error = RAW1394_ERROR_COMPAT; |
2338 | return sizeof(struct raw1394_request); | 2182 | req->req.misc = RAW1394_KERNELAPI_VERSION; |
2183 | queue_complete_req(req); | ||
2184 | return 0; | ||
2339 | 2185 | ||
2340 | case RAW1394_REQ_FCP_LISTEN: | 2186 | case RAW1394_REQ_FCP_LISTEN: |
2341 | handle_fcp_listen(fi, req); | 2187 | handle_fcp_listen(fi, req); |
2342 | return sizeof(struct raw1394_request); | 2188 | return 0; |
2343 | 2189 | ||
2344 | case RAW1394_REQ_RESET_BUS: | 2190 | case RAW1394_REQ_RESET_BUS: |
2345 | if (req->req.misc == RAW1394_LONG_RESET) { | 2191 | if (req->req.misc == RAW1394_LONG_RESET) { |
2346 | DBGMSG("busreset called (type: LONG)"); | 2192 | DBGMSG("busreset called (type: LONG)"); |
2347 | hpsb_reset_bus(fi->host, LONG_RESET); | 2193 | hpsb_reset_bus(fi->host, LONG_RESET); |
2348 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | 2194 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ |
2349 | return sizeof(struct raw1394_request); | 2195 | return 0; |
2350 | } | 2196 | } |
2351 | if (req->req.misc == RAW1394_SHORT_RESET) { | 2197 | if (req->req.misc == RAW1394_SHORT_RESET) { |
2352 | DBGMSG("busreset called (type: SHORT)"); | 2198 | DBGMSG("busreset called (type: SHORT)"); |
2353 | hpsb_reset_bus(fi->host, SHORT_RESET); | 2199 | hpsb_reset_bus(fi->host, SHORT_RESET); |
2354 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ | 2200 | free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */ |
2355 | return sizeof(struct raw1394_request); | 2201 | return 0; |
2356 | } | 2202 | } |
2357 | /* error EINVAL (22) invalid argument */ | 2203 | /* error EINVAL (22) invalid argument */ |
2358 | return (-EINVAL); | 2204 | return (-EINVAL); |
@@ -2371,7 +2217,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req) | |||
2371 | req->req.generation = get_hpsb_generation(fi->host); | 2217 | req->req.generation = get_hpsb_generation(fi->host); |
2372 | req->req.length = 0; | 2218 | req->req.length = 0; |
2373 | queue_complete_req(req); | 2219 | queue_complete_req(req); |
2374 | return sizeof(struct raw1394_request); | 2220 | return 0; |
2375 | } | 2221 | } |
2376 | 2222 | ||
2377 | switch (req->req.type) { | 2223 | switch (req->req.type) { |
@@ -2384,7 +2230,7 @@ static int state_connected(struct file_info *fi, struct pending_request *req) | |||
2384 | if (req->req.length == 0) { | 2230 | if (req->req.length == 0) { |
2385 | req->req.error = RAW1394_ERROR_INVALID_ARG; | 2231 | req->req.error = RAW1394_ERROR_INVALID_ARG; |
2386 | queue_complete_req(req); | 2232 | queue_complete_req(req); |
2387 | return sizeof(struct raw1394_request); | 2233 | return 0; |
2388 | } | 2234 | } |
2389 | 2235 | ||
2390 | return handle_async_request(fi, req, node); | 2236 | return handle_async_request(fi, req, node); |
@@ -2395,7 +2241,7 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer, | |||
2395 | { | 2241 | { |
2396 | struct file_info *fi = (struct file_info *)file->private_data; | 2242 | struct file_info *fi = (struct file_info *)file->private_data; |
2397 | struct pending_request *req; | 2243 | struct pending_request *req; |
2398 | ssize_t retval = 0; | 2244 | ssize_t retval = -EBADFD; |
2399 | 2245 | ||
2400 | #ifdef CONFIG_COMPAT | 2246 | #ifdef CONFIG_COMPAT |
2401 | if (count == sizeof(struct compat_raw1394_req) && | 2247 | if (count == sizeof(struct compat_raw1394_req) && |
@@ -2437,6 +2283,9 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer, | |||
2437 | 2283 | ||
2438 | if (retval < 0) { | 2284 | if (retval < 0) { |
2439 | free_pending_request(req); | 2285 | free_pending_request(req); |
2286 | } else { | ||
2287 | BUG_ON(retval); | ||
2288 | retval = count; | ||
2440 | } | 2289 | } |
2441 | 2290 | ||
2442 | return retval; | 2291 | return retval; |
@@ -2802,6 +2651,103 @@ static int raw1394_ioctl(struct inode *inode, struct file *file, | |||
2802 | return -EINVAL; | 2651 | return -EINVAL; |
2803 | } | 2652 | } |
2804 | 2653 | ||
2654 | #ifdef CONFIG_COMPAT | ||
2655 | struct raw1394_iso_packets32 { | ||
2656 | __u32 n_packets; | ||
2657 | compat_uptr_t infos; | ||
2658 | } __attribute__((packed)); | ||
2659 | |||
2660 | struct raw1394_cycle_timer32 { | ||
2661 | __u32 cycle_timer; | ||
2662 | __u64 local_time; | ||
2663 | } | ||
2664 | #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) | ||
2665 | __attribute__((packed)) | ||
2666 | #endif | ||
2667 | ; | ||
2668 | |||
2669 | #define RAW1394_IOC_ISO_RECV_PACKETS32 \ | ||
2670 | _IOW ('#', 0x25, struct raw1394_iso_packets32) | ||
2671 | #define RAW1394_IOC_ISO_XMIT_PACKETS32 \ | ||
2672 | _IOW ('#', 0x27, struct raw1394_iso_packets32) | ||
2673 | #define RAW1394_IOC_GET_CYCLE_TIMER32 \ | ||
2674 | _IOR ('#', 0x30, struct raw1394_cycle_timer32) | ||
2675 | |||
2676 | static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd, | ||
2677 | struct raw1394_iso_packets32 __user *arg) | ||
2678 | { | ||
2679 | compat_uptr_t infos32; | ||
2680 | void *infos; | ||
2681 | long err = -EFAULT; | ||
2682 | struct raw1394_iso_packets __user *dst = compat_alloc_user_space(sizeof(struct raw1394_iso_packets)); | ||
2683 | |||
2684 | if (!copy_in_user(&dst->n_packets, &arg->n_packets, sizeof arg->n_packets) && | ||
2685 | !copy_from_user(&infos32, &arg->infos, sizeof infos32)) { | ||
2686 | infos = compat_ptr(infos32); | ||
2687 | if (!copy_to_user(&dst->infos, &infos, sizeof infos)) | ||
2688 | err = raw1394_ioctl(NULL, file, cmd, (unsigned long)dst); | ||
2689 | } | ||
2690 | return err; | ||
2691 | } | ||
2692 | |||
2693 | static long raw1394_read_cycle_timer32(struct file_info *fi, void __user * uaddr) | ||
2694 | { | ||
2695 | struct raw1394_cycle_timer32 ct; | ||
2696 | int err; | ||
2697 | |||
2698 | err = hpsb_read_cycle_timer(fi->host, &ct.cycle_timer, &ct.local_time); | ||
2699 | if (!err) | ||
2700 | if (copy_to_user(uaddr, &ct, sizeof(ct))) | ||
2701 | err = -EFAULT; | ||
2702 | return err; | ||
2703 | } | ||
2704 | |||
2705 | static long raw1394_compat_ioctl(struct file *file, | ||
2706 | unsigned int cmd, unsigned long arg) | ||
2707 | { | ||
2708 | struct file_info *fi = file->private_data; | ||
2709 | void __user *argp = (void __user *)arg; | ||
2710 | long err; | ||
2711 | |||
2712 | lock_kernel(); | ||
2713 | switch (cmd) { | ||
2714 | /* These requests have same format as long as 'int' has same size. */ | ||
2715 | case RAW1394_IOC_ISO_RECV_INIT: | ||
2716 | case RAW1394_IOC_ISO_RECV_START: | ||
2717 | case RAW1394_IOC_ISO_RECV_LISTEN_CHANNEL: | ||
2718 | case RAW1394_IOC_ISO_RECV_UNLISTEN_CHANNEL: | ||
2719 | case RAW1394_IOC_ISO_RECV_SET_CHANNEL_MASK: | ||
2720 | case RAW1394_IOC_ISO_RECV_RELEASE_PACKETS: | ||
2721 | case RAW1394_IOC_ISO_RECV_FLUSH: | ||
2722 | case RAW1394_IOC_ISO_XMIT_RECV_STOP: | ||
2723 | case RAW1394_IOC_ISO_XMIT_INIT: | ||
2724 | case RAW1394_IOC_ISO_XMIT_START: | ||
2725 | case RAW1394_IOC_ISO_XMIT_SYNC: | ||
2726 | case RAW1394_IOC_ISO_GET_STATUS: | ||
2727 | case RAW1394_IOC_ISO_SHUTDOWN: | ||
2728 | case RAW1394_IOC_ISO_QUEUE_ACTIVITY: | ||
2729 | err = raw1394_ioctl(NULL, file, cmd, arg); | ||
2730 | break; | ||
2731 | /* These request have different format. */ | ||
2732 | case RAW1394_IOC_ISO_RECV_PACKETS32: | ||
2733 | err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_RECV_PACKETS, argp); | ||
2734 | break; | ||
2735 | case RAW1394_IOC_ISO_XMIT_PACKETS32: | ||
2736 | err = raw1394_iso_xmit_recv_packets32(file, RAW1394_IOC_ISO_XMIT_PACKETS, argp); | ||
2737 | break; | ||
2738 | case RAW1394_IOC_GET_CYCLE_TIMER32: | ||
2739 | err = raw1394_read_cycle_timer32(fi, argp); | ||
2740 | break; | ||
2741 | default: | ||
2742 | err = -EINVAL; | ||
2743 | break; | ||
2744 | } | ||
2745 | unlock_kernel(); | ||
2746 | |||
2747 | return err; | ||
2748 | } | ||
2749 | #endif | ||
2750 | |||
2805 | static unsigned int raw1394_poll(struct file *file, poll_table * pt) | 2751 | static unsigned int raw1394_poll(struct file *file, poll_table * pt) |
2806 | { | 2752 | { |
2807 | struct file_info *fi = file->private_data; | 2753 | struct file_info *fi = file->private_data; |
@@ -2861,14 +2807,7 @@ static int raw1394_release(struct inode *inode, struct file *file) | |||
2861 | if (fi->iso_state != RAW1394_ISO_INACTIVE) | 2807 | if (fi->iso_state != RAW1394_ISO_INACTIVE) |
2862 | raw1394_iso_shutdown(fi); | 2808 | raw1394_iso_shutdown(fi); |
2863 | 2809 | ||
2864 | for (i = 0; i < 64; i++) { | ||
2865 | if (fi->listen_channels & (1ULL << i)) { | ||
2866 | hpsb_unlisten_channel(&raw1394_highlevel, fi->host, i); | ||
2867 | } | ||
2868 | } | ||
2869 | |||
2870 | spin_lock_irqsave(&host_info_lock, flags); | 2810 | spin_lock_irqsave(&host_info_lock, flags); |
2871 | fi->listen_channels = 0; | ||
2872 | 2811 | ||
2873 | fail = 0; | 2812 | fail = 0; |
2874 | /* set address-entries invalid */ | 2813 | /* set address-entries invalid */ |
@@ -3030,7 +2969,6 @@ static struct hpsb_highlevel raw1394_highlevel = { | |||
3030 | .add_host = add_host, | 2969 | .add_host = add_host, |
3031 | .remove_host = remove_host, | 2970 | .remove_host = remove_host, |
3032 | .host_reset = host_reset, | 2971 | .host_reset = host_reset, |
3033 | .iso_receive = iso_receive, | ||
3034 | .fcp_request = fcp_request, | 2972 | .fcp_request = fcp_request, |
3035 | }; | 2973 | }; |
3036 | 2974 | ||
@@ -3041,7 +2979,9 @@ static const struct file_operations raw1394_fops = { | |||
3041 | .write = raw1394_write, | 2979 | .write = raw1394_write, |
3042 | .mmap = raw1394_mmap, | 2980 | .mmap = raw1394_mmap, |
3043 | .ioctl = raw1394_ioctl, | 2981 | .ioctl = raw1394_ioctl, |
3044 | // .compat_ioctl = ... someone needs to do this | 2982 | #ifdef CONFIG_COMPAT |
2983 | .compat_ioctl = raw1394_compat_ioctl, | ||
2984 | #endif | ||
3045 | .poll = raw1394_poll, | 2985 | .poll = raw1394_poll, |
3046 | .open = raw1394_open, | 2986 | .open = raw1394_open, |
3047 | .release = raw1394_release, | 2987 | .release = raw1394_release, |
@@ -3054,9 +2994,9 @@ static int __init init_raw1394(void) | |||
3054 | hpsb_register_highlevel(&raw1394_highlevel); | 2994 | hpsb_register_highlevel(&raw1394_highlevel); |
3055 | 2995 | ||
3056 | if (IS_ERR | 2996 | if (IS_ERR |
3057 | (class_device_create | 2997 | (device_create( |
3058 | (hpsb_protocol_class, NULL, | 2998 | hpsb_protocol_class, NULL, |
3059 | MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL, | 2999 | MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), |
3060 | RAW1394_DEVICE_NAME))) { | 3000 | RAW1394_DEVICE_NAME))) { |
3061 | ret = -EFAULT; | 3001 | ret = -EFAULT; |
3062 | goto out_unreg; | 3002 | goto out_unreg; |
@@ -3083,9 +3023,9 @@ static int __init init_raw1394(void) | |||
3083 | goto out; | 3023 | goto out; |
3084 | 3024 | ||
3085 | out_dev: | 3025 | out_dev: |
3086 | class_device_destroy(hpsb_protocol_class, | 3026 | device_destroy(hpsb_protocol_class, |
3087 | MKDEV(IEEE1394_MAJOR, | 3027 | MKDEV(IEEE1394_MAJOR, |
3088 | IEEE1394_MINOR_BLOCK_RAW1394 * 16)); | 3028 | IEEE1394_MINOR_BLOCK_RAW1394 * 16)); |
3089 | out_unreg: | 3029 | out_unreg: |
3090 | hpsb_unregister_highlevel(&raw1394_highlevel); | 3030 | hpsb_unregister_highlevel(&raw1394_highlevel); |
3091 | out: | 3031 | out: |
@@ -3094,9 +3034,9 @@ static int __init init_raw1394(void) | |||
3094 | 3034 | ||
3095 | static void __exit cleanup_raw1394(void) | 3035 | static void __exit cleanup_raw1394(void) |
3096 | { | 3036 | { |
3097 | class_device_destroy(hpsb_protocol_class, | 3037 | device_destroy(hpsb_protocol_class, |
3098 | MKDEV(IEEE1394_MAJOR, | 3038 | MKDEV(IEEE1394_MAJOR, |
3099 | IEEE1394_MINOR_BLOCK_RAW1394 * 16)); | 3039 | IEEE1394_MINOR_BLOCK_RAW1394 * 16)); |
3100 | cdev_del(&raw1394_cdev); | 3040 | cdev_del(&raw1394_cdev); |
3101 | hpsb_unregister_highlevel(&raw1394_highlevel); | 3041 | hpsb_unregister_highlevel(&raw1394_highlevel); |
3102 | hpsb_unregister_protocol(&raw1394_driver); | 3042 | hpsb_unregister_protocol(&raw1394_driver); |
diff --git a/drivers/ieee1394/raw1394.h b/drivers/ieee1394/raw1394.h index 7bd22ee1afbb..963ac20373d2 100644 --- a/drivers/ieee1394/raw1394.h +++ b/drivers/ieee1394/raw1394.h | |||
@@ -17,11 +17,11 @@ | |||
17 | #define RAW1394_REQ_ASYNC_WRITE 101 | 17 | #define RAW1394_REQ_ASYNC_WRITE 101 |
18 | #define RAW1394_REQ_LOCK 102 | 18 | #define RAW1394_REQ_LOCK 102 |
19 | #define RAW1394_REQ_LOCK64 103 | 19 | #define RAW1394_REQ_LOCK64 103 |
20 | #define RAW1394_REQ_ISO_SEND 104 | 20 | #define RAW1394_REQ_ISO_SEND 104 /* removed ABI, now a no-op */ |
21 | #define RAW1394_REQ_ASYNC_SEND 105 | 21 | #define RAW1394_REQ_ASYNC_SEND 105 |
22 | #define RAW1394_REQ_ASYNC_STREAM 106 | 22 | #define RAW1394_REQ_ASYNC_STREAM 106 |
23 | 23 | ||
24 | #define RAW1394_REQ_ISO_LISTEN 200 | 24 | #define RAW1394_REQ_ISO_LISTEN 200 /* removed ABI, now a no-op */ |
25 | #define RAW1394_REQ_FCP_LISTEN 201 | 25 | #define RAW1394_REQ_FCP_LISTEN 201 |
26 | #define RAW1394_REQ_RESET_BUS 202 | 26 | #define RAW1394_REQ_RESET_BUS 202 |
27 | #define RAW1394_REQ_GET_ROM 203 | 27 | #define RAW1394_REQ_GET_ROM 203 |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 3f873cc7e247..e0c385a3b450 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -118,14 +118,13 @@ MODULE_PARM_DESC(max_speed, "Force max speed " | |||
118 | "(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)"); | 118 | "(3 = 800Mb/s, 2 = 400Mb/s, 1 = 200Mb/s, 0 = 100Mb/s)"); |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * Set serialize_io to 1 if you'd like only one scsi command sent | 121 | * Set serialize_io to 0 or N to use dynamically appended lists of command ORBs. |
122 | * down to us at a time (debugging). This might be necessary for very | 122 | * This is and always has been buggy in multiple subtle ways. See above TODOs. |
123 | * badly behaved sbp2 devices. | ||
124 | */ | 123 | */ |
125 | static int sbp2_serialize_io = 1; | 124 | static int sbp2_serialize_io = 1; |
126 | module_param_named(serialize_io, sbp2_serialize_io, int, 0444); | 125 | module_param_named(serialize_io, sbp2_serialize_io, bool, 0444); |
127 | MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers " | 126 | MODULE_PARM_DESC(serialize_io, "Serialize requests coming from SCSI drivers " |
128 | "(default = 1, faster = 0)"); | 127 | "(default = Y, faster but buggy = N)"); |
129 | 128 | ||
130 | /* | 129 | /* |
131 | * Bump up max_sectors if you'd like to support very large sized | 130 | * Bump up max_sectors if you'd like to support very large sized |
@@ -154,9 +153,9 @@ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported " | |||
154 | * are possible on OXFW911 and newer Oxsemi bridges. | 153 | * are possible on OXFW911 and newer Oxsemi bridges. |
155 | */ | 154 | */ |
156 | static int sbp2_exclusive_login = 1; | 155 | static int sbp2_exclusive_login = 1; |
157 | module_param_named(exclusive_login, sbp2_exclusive_login, int, 0644); | 156 | module_param_named(exclusive_login, sbp2_exclusive_login, bool, 0644); |
158 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | 157 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " |
159 | "(default = 1)"); | 158 | "(default = Y, use N for concurrent initiators)"); |
160 | 159 | ||
161 | /* | 160 | /* |
162 | * If any of the following workarounds is required for your device to work, | 161 | * If any of the following workarounds is required for your device to work, |
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index 44402b9d82a8..333a4bb76743 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -67,7 +67,7 @@ struct sbp2_command_orb { | |||
67 | #define ORB_SET_LUN(v) ((v) & 0xffff) | 67 | #define ORB_SET_LUN(v) ((v) & 0xffff) |
68 | #define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16) | 68 | #define ORB_SET_FUNCTION(v) (((v) & 0xf) << 16) |
69 | #define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20) | 69 | #define ORB_SET_RECONNECT(v) (((v) & 0xf) << 20) |
70 | #define ORB_SET_EXCLUSIVE(v) (((v) & 0x1) << 28) | 70 | #define ORB_SET_EXCLUSIVE(v) ((v) ? 1 << 28 : 0) |
71 | #define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff) | 71 | #define ORB_SET_LOGIN_RESP_LENGTH(v) ((v) & 0xffff) |
72 | #define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16) | 72 | #define ORB_SET_PASSWD_LENGTH(v) (((v) & 0xffff) << 16) |
73 | 73 | ||
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 87ebd0846c34..bd28adfd7afc 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c | |||
@@ -1340,9 +1340,9 @@ static void video1394_add_host (struct hpsb_host *host) | |||
1340 | hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id); | 1340 | hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id); |
1341 | 1341 | ||
1342 | minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id; | 1342 | minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id; |
1343 | class_device_create(hpsb_protocol_class, NULL, MKDEV( | 1343 | device_create(hpsb_protocol_class, NULL, |
1344 | IEEE1394_MAJOR, minor), | 1344 | MKDEV(IEEE1394_MAJOR, minor), |
1345 | NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id); | 1345 | "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id); |
1346 | } | 1346 | } |
1347 | 1347 | ||
1348 | 1348 | ||
@@ -1351,8 +1351,8 @@ static void video1394_remove_host (struct hpsb_host *host) | |||
1351 | struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host); | 1351 | struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host); |
1352 | 1352 | ||
1353 | if (ohci) | 1353 | if (ohci) |
1354 | class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR, | 1354 | device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR, |
1355 | IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id)); | 1355 | IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id)); |
1356 | return; | 1356 | return; |
1357 | } | 1357 | } |
1358 | 1358 | ||