diff options
Diffstat (limited to 'drivers')
289 files changed, 16581 insertions, 4241 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 85249395623..f911a2f8cc3 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/pm_runtime.h> | 32 | #include <linux/pm_runtime.h> |
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/pci-acpi.h> | 34 | #include <linux/pci-acpi.h> |
35 | #include <linux/pci-aspm.h> | ||
35 | #include <linux/acpi.h> | 36 | #include <linux/acpi.h> |
36 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
37 | #include <acpi/acpi_bus.h> | 38 | #include <acpi/acpi_bus.h> |
@@ -564,7 +565,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
564 | /* Indicate support for various _OSC capabilities. */ | 565 | /* Indicate support for various _OSC capabilities. */ |
565 | if (pci_ext_cfg_avail(root->bus->self)) | 566 | if (pci_ext_cfg_avail(root->bus->self)) |
566 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; | 567 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; |
567 | if (pcie_aspm_enabled()) | 568 | if (pcie_aspm_support_enabled()) |
568 | flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | | 569 | flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | |
569 | OSC_CLOCK_PWR_CAPABILITY_SUPPORT; | 570 | OSC_CLOCK_PWR_CAPABILITY_SUPPORT; |
570 | if (pci_msi_enabled()) | 571 | if (pci_msi_enabled()) |
@@ -591,12 +592,16 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
591 | 592 | ||
592 | status = acpi_pci_osc_control_set(device->handle, &flags, | 593 | status = acpi_pci_osc_control_set(device->handle, &flags, |
593 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 594 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); |
594 | if (ACPI_SUCCESS(status)) | 595 | if (ACPI_SUCCESS(status)) { |
595 | dev_info(root->bus->bridge, | 596 | dev_info(root->bus->bridge, |
596 | "ACPI _OSC control (0x%02x) granted\n", flags); | 597 | "ACPI _OSC control (0x%02x) granted\n", flags); |
597 | else | 598 | } else { |
598 | dev_dbg(root->bus->bridge, | 599 | dev_dbg(root->bus->bridge, |
599 | "ACPI _OSC request failed (code %d)\n", status); | 600 | "ACPI _OSC request failed (code %d)\n", status); |
601 | printk(KERN_INFO "Unable to assume _OSC PCIe control. " | ||
602 | "Disabling ASPM\n"); | ||
603 | pcie_no_aspm(); | ||
604 | } | ||
600 | } | 605 | } |
601 | 606 | ||
602 | pci_acpi_add_bus_pm_notifier(device, root->bus); | 607 | pci_acpi_add_bus_pm_notifier(device, root->bus); |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index d57e8d0fb82..e9e5238f310 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -168,4 +168,11 @@ config SYS_HYPERVISOR | |||
168 | bool | 168 | bool |
169 | default n | 169 | default n |
170 | 170 | ||
171 | config ARCH_NO_SYSDEV_OPS | ||
172 | bool | ||
173 | ---help--- | ||
174 | To be selected by architectures that don't use sysdev class or | ||
175 | sysdev driver power management (suspend/resume) and shutdown | ||
176 | operations. | ||
177 | |||
171 | endmenu | 178 | endmenu |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index f6fb5474160..fbe72da6c41 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -329,7 +329,7 @@ void sysdev_unregister(struct sys_device *sysdev) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | 331 | ||
332 | 332 | #ifndef CONFIG_ARCH_NO_SYSDEV_OPS | |
333 | /** | 333 | /** |
334 | * sysdev_shutdown - Shut down all system devices. | 334 | * sysdev_shutdown - Shut down all system devices. |
335 | * | 335 | * |
@@ -524,6 +524,7 @@ int sysdev_resume(void) | |||
524 | return 0; | 524 | return 0; |
525 | } | 525 | } |
526 | EXPORT_SYMBOL_GPL(sysdev_resume); | 526 | EXPORT_SYMBOL_GPL(sysdev_resume); |
527 | #endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ | ||
527 | 528 | ||
528 | int __init system_bus_init(void) | 529 | int __init system_bus_init(void) |
529 | { | 530 | { |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 35658f445fc..9bf13988f1a 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -193,7 +193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, | |||
193 | u64 *cfg_offset); | 193 | u64 *cfg_offset); |
194 | static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, | 194 | static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, |
195 | unsigned long *memory_bar); | 195 | unsigned long *memory_bar); |
196 | 196 | static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); | |
197 | 197 | ||
198 | /* performant mode helper functions */ | 198 | /* performant mode helper functions */ |
199 | static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, | 199 | static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, |
@@ -231,7 +231,7 @@ static const struct block_device_operations cciss_fops = { | |||
231 | */ | 231 | */ |
232 | static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) | 232 | static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) |
233 | { | 233 | { |
234 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | 234 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) |
235 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | 235 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
236 | } | 236 | } |
237 | 237 | ||
@@ -556,6 +556,44 @@ static void __devinit cciss_procinit(ctlr_info_t *h) | |||
556 | #define to_hba(n) container_of(n, struct ctlr_info, dev) | 556 | #define to_hba(n) container_of(n, struct ctlr_info, dev) |
557 | #define to_drv(n) container_of(n, drive_info_struct, dev) | 557 | #define to_drv(n) container_of(n, drive_info_struct, dev) |
558 | 558 | ||
559 | /* List of controllers which cannot be reset on kexec with reset_devices */ | ||
560 | static u32 unresettable_controller[] = { | ||
561 | 0x324a103C, /* Smart Array P712m */ | ||
562 | 0x324b103C, /* SmartArray P711m */ | ||
563 | 0x3223103C, /* Smart Array P800 */ | ||
564 | 0x3234103C, /* Smart Array P400 */ | ||
565 | 0x3235103C, /* Smart Array P400i */ | ||
566 | 0x3211103C, /* Smart Array E200i */ | ||
567 | 0x3212103C, /* Smart Array E200 */ | ||
568 | 0x3213103C, /* Smart Array E200i */ | ||
569 | 0x3214103C, /* Smart Array E200i */ | ||
570 | 0x3215103C, /* Smart Array E200i */ | ||
571 | 0x3237103C, /* Smart Array E500 */ | ||
572 | 0x323D103C, /* Smart Array P700m */ | ||
573 | 0x409C0E11, /* Smart Array 6400 */ | ||
574 | 0x409D0E11, /* Smart Array 6400 EM */ | ||
575 | }; | ||
576 | |||
577 | static int ctlr_is_resettable(struct ctlr_info *h) | ||
578 | { | ||
579 | int i; | ||
580 | |||
581 | for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) | ||
582 | if (unresettable_controller[i] == h->board_id) | ||
583 | return 0; | ||
584 | return 1; | ||
585 | } | ||
586 | |||
587 | static ssize_t host_show_resettable(struct device *dev, | ||
588 | struct device_attribute *attr, | ||
589 | char *buf) | ||
590 | { | ||
591 | struct ctlr_info *h = to_hba(dev); | ||
592 | |||
593 | return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); | ||
594 | } | ||
595 | static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); | ||
596 | |||
559 | static ssize_t host_store_rescan(struct device *dev, | 597 | static ssize_t host_store_rescan(struct device *dev, |
560 | struct device_attribute *attr, | 598 | struct device_attribute *attr, |
561 | const char *buf, size_t count) | 599 | const char *buf, size_t count) |
@@ -741,6 +779,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); | |||
741 | 779 | ||
742 | static struct attribute *cciss_host_attrs[] = { | 780 | static struct attribute *cciss_host_attrs[] = { |
743 | &dev_attr_rescan.attr, | 781 | &dev_attr_rescan.attr, |
782 | &dev_attr_resettable.attr, | ||
744 | NULL | 783 | NULL |
745 | }; | 784 | }; |
746 | 785 | ||
@@ -973,8 +1012,8 @@ static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) | |||
973 | temp64.val32.upper = c->ErrDesc.Addr.upper; | 1012 | temp64.val32.upper = c->ErrDesc.Addr.upper; |
974 | pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), | 1013 | pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), |
975 | c->err_info, (dma_addr_t) temp64.val); | 1014 | c->err_info, (dma_addr_t) temp64.val); |
976 | pci_free_consistent(h->pdev, sizeof(CommandList_struct), | 1015 | pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, |
977 | c, (dma_addr_t) c->busaddr); | 1016 | (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); |
978 | } | 1017 | } |
979 | 1018 | ||
980 | static inline ctlr_info_t *get_host(struct gendisk *disk) | 1019 | static inline ctlr_info_t *get_host(struct gendisk *disk) |
@@ -1490,8 +1529,7 @@ static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) | |||
1490 | return -EINVAL; | 1529 | return -EINVAL; |
1491 | if (!capable(CAP_SYS_RAWIO)) | 1530 | if (!capable(CAP_SYS_RAWIO)) |
1492 | return -EPERM; | 1531 | return -EPERM; |
1493 | ioc = (BIG_IOCTL_Command_struct *) | 1532 | ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); |
1494 | kmalloc(sizeof(*ioc), GFP_KERNEL); | ||
1495 | if (!ioc) { | 1533 | if (!ioc) { |
1496 | status = -ENOMEM; | 1534 | status = -ENOMEM; |
1497 | goto cleanup1; | 1535 | goto cleanup1; |
@@ -2653,6 +2691,10 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) | |||
2653 | c->Request.CDB[0]); | 2691 | c->Request.CDB[0]); |
2654 | return_status = IO_NEEDS_RETRY; | 2692 | return_status = IO_NEEDS_RETRY; |
2655 | break; | 2693 | break; |
2694 | case CMD_UNABORTABLE: | ||
2695 | dev_warn(&h->pdev->dev, "cmd unabortable\n"); | ||
2696 | return_status = IO_ERROR; | ||
2697 | break; | ||
2656 | default: | 2698 | default: |
2657 | dev_warn(&h->pdev->dev, "cmd 0x%02x returned " | 2699 | dev_warn(&h->pdev->dev, "cmd 0x%02x returned " |
2658 | "unknown status %x\n", c->Request.CDB[0], | 2700 | "unknown status %x\n", c->Request.CDB[0], |
@@ -3103,6 +3145,13 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, | |||
3103 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | 3145 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3104 | DID_PASSTHROUGH : DID_ERROR); | 3146 | DID_PASSTHROUGH : DID_ERROR); |
3105 | break; | 3147 | break; |
3148 | case CMD_UNABORTABLE: | ||
3149 | dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); | ||
3150 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | ||
3151 | cmd->err_info->CommandStatus, DRIVER_OK, | ||
3152 | cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? | ||
3153 | DID_PASSTHROUGH : DID_ERROR); | ||
3154 | break; | ||
3106 | default: | 3155 | default: |
3107 | dev_warn(&h->pdev->dev, "cmd %p returned " | 3156 | dev_warn(&h->pdev->dev, "cmd %p returned " |
3108 | "unknown status %x\n", cmd, | 3157 | "unknown status %x\n", cmd, |
@@ -3136,10 +3185,13 @@ static inline u32 cciss_tag_to_index(u32 tag) | |||
3136 | return tag >> DIRECT_LOOKUP_SHIFT; | 3185 | return tag >> DIRECT_LOOKUP_SHIFT; |
3137 | } | 3186 | } |
3138 | 3187 | ||
3139 | static inline u32 cciss_tag_discard_error_bits(u32 tag) | 3188 | static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) |
3140 | { | 3189 | { |
3141 | #define CCISS_ERROR_BITS 0x03 | 3190 | #define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) |
3142 | return tag & ~CCISS_ERROR_BITS; | 3191 | #define CCISS_SIMPLE_ERROR_BITS 0x03 |
3192 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) | ||
3193 | return tag & ~CCISS_PERF_ERROR_BITS; | ||
3194 | return tag & ~CCISS_SIMPLE_ERROR_BITS; | ||
3143 | } | 3195 | } |
3144 | 3196 | ||
3145 | static inline void cciss_mark_tag_indexed(u32 *tag) | 3197 | static inline void cciss_mark_tag_indexed(u32 *tag) |
@@ -3359,7 +3411,7 @@ static inline u32 next_command(ctlr_info_t *h) | |||
3359 | { | 3411 | { |
3360 | u32 a; | 3412 | u32 a; |
3361 | 3413 | ||
3362 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | 3414 | if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) |
3363 | return h->access.command_completed(h); | 3415 | return h->access.command_completed(h); |
3364 | 3416 | ||
3365 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | 3417 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { |
@@ -3394,14 +3446,12 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) | |||
3394 | /* process completion of a non-indexed command */ | 3446 | /* process completion of a non-indexed command */ |
3395 | static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) | 3447 | static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) |
3396 | { | 3448 | { |
3397 | u32 tag; | ||
3398 | CommandList_struct *c = NULL; | 3449 | CommandList_struct *c = NULL; |
3399 | __u32 busaddr_masked, tag_masked; | 3450 | __u32 busaddr_masked, tag_masked; |
3400 | 3451 | ||
3401 | tag = cciss_tag_discard_error_bits(raw_tag); | 3452 | tag_masked = cciss_tag_discard_error_bits(h, raw_tag); |
3402 | list_for_each_entry(c, &h->cmpQ, list) { | 3453 | list_for_each_entry(c, &h->cmpQ, list) { |
3403 | busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); | 3454 | busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); |
3404 | tag_masked = cciss_tag_discard_error_bits(tag); | ||
3405 | if (busaddr_masked == tag_masked) { | 3455 | if (busaddr_masked == tag_masked) { |
3406 | finish_cmd(h, c, raw_tag); | 3456 | finish_cmd(h, c, raw_tag); |
3407 | return next_command(h); | 3457 | return next_command(h); |
@@ -3753,7 +3803,8 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) | |||
3753 | } | 3803 | } |
3754 | } | 3804 | } |
3755 | 3805 | ||
3756 | static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) | 3806 | static __devinit void cciss_enter_performant_mode(ctlr_info_t *h, |
3807 | u32 use_short_tags) | ||
3757 | { | 3808 | { |
3758 | /* This is a bit complicated. There are 8 registers on | 3809 | /* This is a bit complicated. There are 8 registers on |
3759 | * the controller which we write to to tell it 8 different | 3810 | * the controller which we write to to tell it 8 different |
@@ -3808,7 +3859,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) | |||
3808 | writel(0, &h->transtable->RepQCtrAddrHigh32); | 3859 | writel(0, &h->transtable->RepQCtrAddrHigh32); |
3809 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | 3860 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); |
3810 | writel(0, &h->transtable->RepQAddr0High32); | 3861 | writel(0, &h->transtable->RepQAddr0High32); |
3811 | writel(CFGTBL_Trans_Performant, | 3862 | writel(CFGTBL_Trans_Performant | use_short_tags, |
3812 | &(h->cfgtable->HostWrite.TransportRequest)); | 3863 | &(h->cfgtable->HostWrite.TransportRequest)); |
3813 | 3864 | ||
3814 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 3865 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
@@ -3855,7 +3906,8 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) | |||
3855 | if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) | 3906 | if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) |
3856 | goto clean_up; | 3907 | goto clean_up; |
3857 | 3908 | ||
3858 | cciss_enter_performant_mode(h); | 3909 | cciss_enter_performant_mode(h, |
3910 | trans_support & CFGTBL_Trans_use_short_tags); | ||
3859 | 3911 | ||
3860 | /* Change the access methods to the performant access methods */ | 3912 | /* Change the access methods to the performant access methods */ |
3861 | h->access = SA5_performant_access; | 3913 | h->access = SA5_performant_access; |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 579f7491849..554bbd907d1 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) | |||
222 | h->ctlr, c->busaddr); | 222 | h->ctlr, c->busaddr); |
223 | #endif /* CCISS_DEBUG */ | 223 | #endif /* CCISS_DEBUG */ |
224 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | 224 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
225 | readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); | ||
225 | h->commands_outstanding++; | 226 | h->commands_outstanding++; |
226 | if ( h->commands_outstanding > h->max_outstanding) | 227 | if ( h->commands_outstanding > h->max_outstanding) |
227 | h->max_outstanding = h->commands_outstanding; | 228 | h->max_outstanding = h->commands_outstanding; |
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index 35463d2f0ee..cd441bef031 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h | |||
@@ -56,6 +56,7 @@ | |||
56 | 56 | ||
57 | #define CFGTBL_Trans_Simple 0x00000002l | 57 | #define CFGTBL_Trans_Simple 0x00000002l |
58 | #define CFGTBL_Trans_Performant 0x00000004l | 58 | #define CFGTBL_Trans_Performant 0x00000004l |
59 | #define CFGTBL_Trans_use_short_tags 0x20000000l | ||
59 | 60 | ||
60 | #define CFGTBL_BusType_Ultra2 0x00000001l | 61 | #define CFGTBL_BusType_Ultra2 0x00000001l |
61 | #define CFGTBL_BusType_Ultra3 0x00000002l | 62 | #define CFGTBL_BusType_Ultra3 0x00000002l |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 727d0225b7d..df793803f5a 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -824,13 +824,18 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, | |||
824 | break; | 824 | break; |
825 | case CMD_UNSOLICITED_ABORT: | 825 | case CMD_UNSOLICITED_ABORT: |
826 | cmd->result = DID_ABORT << 16; | 826 | cmd->result = DID_ABORT << 16; |
827 | dev_warn(&h->pdev->dev, "%p aborted do to an " | 827 | dev_warn(&h->pdev->dev, "%p aborted due to an " |
828 | "unsolicited abort\n", c); | 828 | "unsolicited abort\n", c); |
829 | break; | 829 | break; |
830 | case CMD_TIMEOUT: | 830 | case CMD_TIMEOUT: |
831 | cmd->result = DID_TIME_OUT << 16; | 831 | cmd->result = DID_TIME_OUT << 16; |
832 | dev_warn(&h->pdev->dev, "%p timedout\n", c); | 832 | dev_warn(&h->pdev->dev, "%p timedout\n", c); |
833 | break; | 833 | break; |
834 | case CMD_UNABORTABLE: | ||
835 | cmd->result = DID_ERROR << 16; | ||
836 | dev_warn(&h->pdev->dev, "c %p command " | ||
837 | "unabortable\n", c); | ||
838 | break; | ||
834 | default: | 839 | default: |
835 | cmd->result = DID_ERROR << 16; | 840 | cmd->result = DID_ERROR << 16; |
836 | dev_warn(&h->pdev->dev, | 841 | dev_warn(&h->pdev->dev, |
@@ -1007,11 +1012,15 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) | |||
1007 | break; | 1012 | break; |
1008 | case CMD_UNSOLICITED_ABORT: | 1013 | case CMD_UNSOLICITED_ABORT: |
1009 | dev_warn(&h->pdev->dev, | 1014 | dev_warn(&h->pdev->dev, |
1010 | "%p aborted do to an unsolicited abort\n", c); | 1015 | "%p aborted due to an unsolicited abort\n", c); |
1011 | break; | 1016 | break; |
1012 | case CMD_TIMEOUT: | 1017 | case CMD_TIMEOUT: |
1013 | dev_warn(&h->pdev->dev, "%p timedout\n", c); | 1018 | dev_warn(&h->pdev->dev, "%p timedout\n", c); |
1014 | break; | 1019 | break; |
1020 | case CMD_UNABORTABLE: | ||
1021 | dev_warn(&h->pdev->dev, | ||
1022 | "%p unabortable\n", c); | ||
1023 | break; | ||
1015 | default: | 1024 | default: |
1016 | dev_warn(&h->pdev->dev, | 1025 | dev_warn(&h->pdev->dev, |
1017 | "%p returned unknown status %x\n", | 1026 | "%p returned unknown status %x\n", |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index aca302492ff..2a1642bc451 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -92,7 +92,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
92 | bio->bi_end_io = drbd_md_io_complete; | 92 | bio->bi_end_io = drbd_md_io_complete; |
93 | bio->bi_rw = rw; | 93 | bio->bi_rw = rw; |
94 | 94 | ||
95 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) | 95 | if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) |
96 | bio_endio(bio, -EIO); | 96 | bio_endio(bio, -EIO); |
97 | else | 97 | else |
98 | submit_bio(rw, bio); | 98 | submit_bio(rw, bio); |
@@ -176,13 +176,17 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) | |||
176 | struct lc_element *al_ext; | 176 | struct lc_element *al_ext; |
177 | struct lc_element *tmp; | 177 | struct lc_element *tmp; |
178 | unsigned long al_flags = 0; | 178 | unsigned long al_flags = 0; |
179 | int wake; | ||
179 | 180 | ||
180 | spin_lock_irq(&mdev->al_lock); | 181 | spin_lock_irq(&mdev->al_lock); |
181 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); | 182 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); |
182 | if (unlikely(tmp != NULL)) { | 183 | if (unlikely(tmp != NULL)) { |
183 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | 184 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); |
184 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { | 185 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { |
186 | wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags); | ||
185 | spin_unlock_irq(&mdev->al_lock); | 187 | spin_unlock_irq(&mdev->al_lock); |
188 | if (wake) | ||
189 | wake_up(&mdev->al_wait); | ||
186 | return NULL; | 190 | return NULL; |
187 | } | 191 | } |
188 | } | 192 | } |
@@ -258,6 +262,33 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) | |||
258 | spin_unlock_irqrestore(&mdev->al_lock, flags); | 262 | spin_unlock_irqrestore(&mdev->al_lock, flags); |
259 | } | 263 | } |
260 | 264 | ||
265 | #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT) | ||
266 | /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT | ||
267 | * are still coupled, or assume too much about their relation. | ||
268 | * Code below will not work if this is violated. | ||
269 | * Will be cleaned up with some followup patch. | ||
270 | */ | ||
271 | # error FIXME | ||
272 | #endif | ||
273 | |||
274 | static unsigned int al_extent_to_bm_page(unsigned int al_enr) | ||
275 | { | ||
276 | return al_enr >> | ||
277 | /* bit to page */ | ||
278 | ((PAGE_SHIFT + 3) - | ||
279 | /* al extent number to bit */ | ||
280 | (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)); | ||
281 | } | ||
282 | |||
283 | static unsigned int rs_extent_to_bm_page(unsigned int rs_enr) | ||
284 | { | ||
285 | return rs_enr >> | ||
286 | /* bit to page */ | ||
287 | ((PAGE_SHIFT + 3) - | ||
288 | /* al extent number to bit */ | ||
289 | (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); | ||
290 | } | ||
291 | |||
261 | int | 292 | int |
262 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | 293 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) |
263 | { | 294 | { |
@@ -285,7 +316,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
285 | * For now, we must not write the transaction, | 316 | * For now, we must not write the transaction, |
286 | * if we cannot write out the bitmap of the evicted extent. */ | 317 | * if we cannot write out the bitmap of the evicted extent. */ |
287 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) | 318 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) |
288 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); | 319 | drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted)); |
289 | 320 | ||
290 | /* The bitmap write may have failed, causing a state change. */ | 321 | /* The bitmap write may have failed, causing a state change. */ |
291 | if (mdev->state.disk < D_INCONSISTENT) { | 322 | if (mdev->state.disk < D_INCONSISTENT) { |
@@ -334,7 +365,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
334 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; | 365 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; |
335 | 366 | ||
336 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) | 367 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) |
337 | drbd_chk_io_error(mdev, 1, TRUE); | 368 | drbd_chk_io_error(mdev, 1, true); |
338 | 369 | ||
339 | if (++mdev->al_tr_pos > | 370 | if (++mdev->al_tr_pos > |
340 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | 371 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) |
@@ -511,225 +542,6 @@ cancel: | |||
511 | return 1; | 542 | return 1; |
512 | } | 543 | } |
513 | 544 | ||
514 | static void atodb_endio(struct bio *bio, int error) | ||
515 | { | ||
516 | struct drbd_atodb_wait *wc = bio->bi_private; | ||
517 | struct drbd_conf *mdev = wc->mdev; | ||
518 | struct page *page; | ||
519 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
520 | |||
521 | /* strange behavior of some lower level drivers... | ||
522 | * fail the request by clearing the uptodate flag, | ||
523 | * but do not return any error?! */ | ||
524 | if (!error && !uptodate) | ||
525 | error = -EIO; | ||
526 | |||
527 | drbd_chk_io_error(mdev, error, TRUE); | ||
528 | if (error && wc->error == 0) | ||
529 | wc->error = error; | ||
530 | |||
531 | if (atomic_dec_and_test(&wc->count)) | ||
532 | complete(&wc->io_done); | ||
533 | |||
534 | page = bio->bi_io_vec[0].bv_page; | ||
535 | put_page(page); | ||
536 | bio_put(bio); | ||
537 | mdev->bm_writ_cnt++; | ||
538 | put_ldev(mdev); | ||
539 | } | ||
540 | |||
541 | /* sector to word */ | ||
542 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | ||
543 | |||
544 | /* activity log to on disk bitmap -- prepare bio unless that sector | ||
545 | * is already covered by previously prepared bios */ | ||
546 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | ||
547 | struct bio **bios, | ||
548 | unsigned int enr, | ||
549 | struct drbd_atodb_wait *wc) __must_hold(local) | ||
550 | { | ||
551 | struct bio *bio; | ||
552 | struct page *page; | ||
553 | sector_t on_disk_sector; | ||
554 | unsigned int page_offset = PAGE_SIZE; | ||
555 | int offset; | ||
556 | int i = 0; | ||
557 | int err = -ENOMEM; | ||
558 | |||
559 | /* We always write aligned, full 4k blocks, | ||
560 | * so we can ignore the logical_block_size (for now) */ | ||
561 | enr &= ~7U; | ||
562 | on_disk_sector = enr + mdev->ldev->md.md_offset | ||
563 | + mdev->ldev->md.bm_offset; | ||
564 | |||
565 | D_ASSERT(!(on_disk_sector & 7U)); | ||
566 | |||
567 | /* Check if that enr is already covered by an already created bio. | ||
568 | * Caution, bios[] is not NULL terminated, | ||
569 | * but only initialized to all NULL. | ||
570 | * For completely scattered activity log, | ||
571 | * the last invocation iterates over all bios, | ||
572 | * and finds the last NULL entry. | ||
573 | */ | ||
574 | while ((bio = bios[i])) { | ||
575 | if (bio->bi_sector == on_disk_sector) | ||
576 | return 0; | ||
577 | i++; | ||
578 | } | ||
579 | /* bios[i] == NULL, the next not yet used slot */ | ||
580 | |||
581 | /* GFP_KERNEL, we are not in the write-out path */ | ||
582 | bio = bio_alloc(GFP_KERNEL, 1); | ||
583 | if (bio == NULL) | ||
584 | return -ENOMEM; | ||
585 | |||
586 | if (i > 0) { | ||
587 | const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; | ||
588 | page_offset = prev_bv->bv_offset + prev_bv->bv_len; | ||
589 | page = prev_bv->bv_page; | ||
590 | } | ||
591 | if (page_offset == PAGE_SIZE) { | ||
592 | page = alloc_page(__GFP_HIGHMEM); | ||
593 | if (page == NULL) | ||
594 | goto out_bio_put; | ||
595 | page_offset = 0; | ||
596 | } else { | ||
597 | get_page(page); | ||
598 | } | ||
599 | |||
600 | offset = S2W(enr); | ||
601 | drbd_bm_get_lel(mdev, offset, | ||
602 | min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), | ||
603 | kmap(page) + page_offset); | ||
604 | kunmap(page); | ||
605 | |||
606 | bio->bi_private = wc; | ||
607 | bio->bi_end_io = atodb_endio; | ||
608 | bio->bi_bdev = mdev->ldev->md_bdev; | ||
609 | bio->bi_sector = on_disk_sector; | ||
610 | |||
611 | if (bio_add_page(bio, page, 4096, page_offset) != 4096) | ||
612 | goto out_put_page; | ||
613 | |||
614 | atomic_inc(&wc->count); | ||
615 | /* we already know that we may do this... | ||
616 | * get_ldev_if_state(mdev,D_ATTACHING); | ||
617 | * just get the extra reference, so that the local_cnt reflects | ||
618 | * the number of pending IO requests DRBD at its backing device. | ||
619 | */ | ||
620 | atomic_inc(&mdev->local_cnt); | ||
621 | |||
622 | bios[i] = bio; | ||
623 | |||
624 | return 0; | ||
625 | |||
626 | out_put_page: | ||
627 | err = -EINVAL; | ||
628 | put_page(page); | ||
629 | out_bio_put: | ||
630 | bio_put(bio); | ||
631 | return err; | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents | ||
636 | * @mdev: DRBD device. | ||
637 | * | ||
638 | * Called when we detach (unconfigure) local storage, | ||
639 | * or when we go from R_PRIMARY to R_SECONDARY role. | ||
640 | */ | ||
641 | void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | ||
642 | { | ||
643 | int i, nr_elements; | ||
644 | unsigned int enr; | ||
645 | struct bio **bios; | ||
646 | struct drbd_atodb_wait wc; | ||
647 | |||
648 | ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) | ||
649 | return; /* sorry, I don't have any act_log etc... */ | ||
650 | |||
651 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
652 | |||
653 | nr_elements = mdev->act_log->nr_elements; | ||
654 | |||
655 | /* GFP_KERNEL, we are not in anyone's write-out path */ | ||
656 | bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); | ||
657 | if (!bios) | ||
658 | goto submit_one_by_one; | ||
659 | |||
660 | atomic_set(&wc.count, 0); | ||
661 | init_completion(&wc.io_done); | ||
662 | wc.mdev = mdev; | ||
663 | wc.error = 0; | ||
664 | |||
665 | for (i = 0; i < nr_elements; i++) { | ||
666 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
667 | if (enr == LC_FREE) | ||
668 | continue; | ||
669 | /* next statement also does atomic_inc wc.count and local_cnt */ | ||
670 | if (atodb_prepare_unless_covered(mdev, bios, | ||
671 | enr/AL_EXT_PER_BM_SECT, | ||
672 | &wc)) | ||
673 | goto free_bios_submit_one_by_one; | ||
674 | } | ||
675 | |||
676 | /* unnecessary optimization? */ | ||
677 | lc_unlock(mdev->act_log); | ||
678 | wake_up(&mdev->al_wait); | ||
679 | |||
680 | /* all prepared, submit them */ | ||
681 | for (i = 0; i < nr_elements; i++) { | ||
682 | if (bios[i] == NULL) | ||
683 | break; | ||
684 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { | ||
685 | bios[i]->bi_rw = WRITE; | ||
686 | bio_endio(bios[i], -EIO); | ||
687 | } else { | ||
688 | submit_bio(WRITE, bios[i]); | ||
689 | } | ||
690 | } | ||
691 | |||
692 | /* always (try to) flush bitmap to stable storage */ | ||
693 | drbd_md_flush(mdev); | ||
694 | |||
695 | /* In case we did not submit a single IO do not wait for | ||
696 | * them to complete. ( Because we would wait forever here. ) | ||
697 | * | ||
698 | * In case we had IOs and they are already complete, there | ||
699 | * is not point in waiting anyways. | ||
700 | * Therefore this if () ... */ | ||
701 | if (atomic_read(&wc.count)) | ||
702 | wait_for_completion(&wc.io_done); | ||
703 | |||
704 | put_ldev(mdev); | ||
705 | |||
706 | kfree(bios); | ||
707 | return; | ||
708 | |||
709 | free_bios_submit_one_by_one: | ||
710 | /* free everything by calling the endio callback directly. */ | ||
711 | for (i = 0; i < nr_elements && bios[i]; i++) | ||
712 | bio_endio(bios[i], 0); | ||
713 | |||
714 | kfree(bios); | ||
715 | |||
716 | submit_one_by_one: | ||
717 | dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); | ||
718 | |||
719 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
720 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
721 | if (enr == LC_FREE) | ||
722 | continue; | ||
723 | /* Really slow: if we have al-extents 16..19 active, | ||
724 | * sector 4 will be written four times! Synchronous! */ | ||
725 | drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); | ||
726 | } | ||
727 | |||
728 | lc_unlock(mdev->act_log); | ||
729 | wake_up(&mdev->al_wait); | ||
730 | put_ldev(mdev); | ||
731 | } | ||
732 | |||
733 | /** | 545 | /** |
734 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents | 546 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents |
735 | * @mdev: DRBD device. | 547 | * @mdev: DRBD device. |
@@ -809,7 +621,7 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused | |||
809 | return 1; | 621 | return 1; |
810 | } | 622 | } |
811 | 623 | ||
812 | drbd_bm_write_sect(mdev, udw->enr); | 624 | drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr)); |
813 | put_ldev(mdev); | 625 | put_ldev(mdev); |
814 | 626 | ||
815 | kfree(udw); | 627 | kfree(udw); |
@@ -889,7 +701,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |||
889 | dev_warn(DEV, "Kicking resync_lru element enr=%u " | 701 | dev_warn(DEV, "Kicking resync_lru element enr=%u " |
890 | "out with rs_failed=%d\n", | 702 | "out with rs_failed=%d\n", |
891 | ext->lce.lc_number, ext->rs_failed); | 703 | ext->lce.lc_number, ext->rs_failed); |
892 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
893 | } | 704 | } |
894 | ext->rs_left = rs_left; | 705 | ext->rs_left = rs_left; |
895 | ext->rs_failed = success ? 0 : count; | 706 | ext->rs_failed = success ? 0 : count; |
@@ -908,7 +719,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |||
908 | drbd_queue_work_front(&mdev->data.work, &udw->w); | 719 | drbd_queue_work_front(&mdev->data.work, &udw->w); |
909 | } else { | 720 | } else { |
910 | dev_warn(DEV, "Could not kmalloc an udw\n"); | 721 | dev_warn(DEV, "Could not kmalloc an udw\n"); |
911 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
912 | } | 722 | } |
913 | } | 723 | } |
914 | } else { | 724 | } else { |
@@ -919,6 +729,22 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |||
919 | } | 729 | } |
920 | } | 730 | } |
921 | 731 | ||
732 | void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go) | ||
733 | { | ||
734 | unsigned long now = jiffies; | ||
735 | unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; | ||
736 | int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; | ||
737 | if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { | ||
738 | if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && | ||
739 | mdev->state.conn != C_PAUSED_SYNC_T && | ||
740 | mdev->state.conn != C_PAUSED_SYNC_S) { | ||
741 | mdev->rs_mark_time[next] = now; | ||
742 | mdev->rs_mark_left[next] = still_to_go; | ||
743 | mdev->rs_last_mark = next; | ||
744 | } | ||
745 | } | ||
746 | } | ||
747 | |||
922 | /* clear the bit corresponding to the piece of storage in question: | 748 | /* clear the bit corresponding to the piece of storage in question: |
923 | * size byte of data starting from sector. Only clear a bits of the affected | 749 | * size byte of data starting from sector. Only clear a bits of the affected |
924 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. | 750 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. |
@@ -936,7 +762,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
936 | int wake_up = 0; | 762 | int wake_up = 0; |
937 | unsigned long flags; | 763 | unsigned long flags; |
938 | 764 | ||
939 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 765 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
940 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", | 766 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", |
941 | (unsigned long long)sector, size); | 767 | (unsigned long long)sector, size); |
942 | return; | 768 | return; |
@@ -969,21 +795,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
969 | */ | 795 | */ |
970 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); | 796 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); |
971 | if (count && get_ldev(mdev)) { | 797 | if (count && get_ldev(mdev)) { |
972 | unsigned long now = jiffies; | 798 | drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); |
973 | unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; | ||
974 | int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; | ||
975 | if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { | ||
976 | unsigned long tw = drbd_bm_total_weight(mdev); | ||
977 | if (mdev->rs_mark_left[mdev->rs_last_mark] != tw && | ||
978 | mdev->state.conn != C_PAUSED_SYNC_T && | ||
979 | mdev->state.conn != C_PAUSED_SYNC_S) { | ||
980 | mdev->rs_mark_time[next] = now; | ||
981 | mdev->rs_mark_left[next] = tw; | ||
982 | mdev->rs_last_mark = next; | ||
983 | } | ||
984 | } | ||
985 | spin_lock_irqsave(&mdev->al_lock, flags); | 799 | spin_lock_irqsave(&mdev->al_lock, flags); |
986 | drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); | 800 | drbd_try_clear_on_disk_bm(mdev, sector, count, true); |
987 | spin_unlock_irqrestore(&mdev->al_lock, flags); | 801 | spin_unlock_irqrestore(&mdev->al_lock, flags); |
988 | 802 | ||
989 | /* just wake_up unconditional now, various lc_chaged(), | 803 | /* just wake_up unconditional now, various lc_chaged(), |
@@ -998,27 +812,27 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
998 | /* | 812 | /* |
999 | * this is intended to set one request worth of data out of sync. | 813 | * this is intended to set one request worth of data out of sync. |
1000 | * affects at least 1 bit, | 814 | * affects at least 1 bit, |
1001 | * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. | 815 | * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits. |
1002 | * | 816 | * |
1003 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). | 817 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). |
1004 | * so this can be _any_ process. | 818 | * so this can be _any_ process. |
1005 | */ | 819 | */ |
1006 | void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, | 820 | int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, |
1007 | const char *file, const unsigned int line) | 821 | const char *file, const unsigned int line) |
1008 | { | 822 | { |
1009 | unsigned long sbnr, ebnr, lbnr, flags; | 823 | unsigned long sbnr, ebnr, lbnr, flags; |
1010 | sector_t esector, nr_sectors; | 824 | sector_t esector, nr_sectors; |
1011 | unsigned int enr, count; | 825 | unsigned int enr, count = 0; |
1012 | struct lc_element *e; | 826 | struct lc_element *e; |
1013 | 827 | ||
1014 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 828 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1015 | dev_err(DEV, "sector: %llus, size: %d\n", | 829 | dev_err(DEV, "sector: %llus, size: %d\n", |
1016 | (unsigned long long)sector, size); | 830 | (unsigned long long)sector, size); |
1017 | return; | 831 | return 0; |
1018 | } | 832 | } |
1019 | 833 | ||
1020 | if (!get_ldev(mdev)) | 834 | if (!get_ldev(mdev)) |
1021 | return; /* no disk, no metadata, no bitmap to set bits in */ | 835 | return 0; /* no disk, no metadata, no bitmap to set bits in */ |
1022 | 836 | ||
1023 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | 837 | nr_sectors = drbd_get_capacity(mdev->this_bdev); |
1024 | esector = sector + (size >> 9) - 1; | 838 | esector = sector + (size >> 9) - 1; |
@@ -1048,6 +862,8 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
1048 | 862 | ||
1049 | out: | 863 | out: |
1050 | put_ldev(mdev); | 864 | put_ldev(mdev); |
865 | |||
866 | return count; | ||
1051 | } | 867 | } |
1052 | 868 | ||
1053 | static | 869 | static |
@@ -1128,7 +944,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | |||
1128 | unsigned int enr = BM_SECT_TO_EXT(sector); | 944 | unsigned int enr = BM_SECT_TO_EXT(sector); |
1129 | struct bm_extent *bm_ext; | 945 | struct bm_extent *bm_ext; |
1130 | int i, sig; | 946 | int i, sig; |
947 | int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait. | ||
948 | 200 times -> 20 seconds. */ | ||
1131 | 949 | ||
950 | retry: | ||
1132 | sig = wait_event_interruptible(mdev->al_wait, | 951 | sig = wait_event_interruptible(mdev->al_wait, |
1133 | (bm_ext = _bme_get(mdev, enr))); | 952 | (bm_ext = _bme_get(mdev, enr))); |
1134 | if (sig) | 953 | if (sig) |
@@ -1139,16 +958,25 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | |||
1139 | 958 | ||
1140 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | 959 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { |
1141 | sig = wait_event_interruptible(mdev->al_wait, | 960 | sig = wait_event_interruptible(mdev->al_wait, |
1142 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); | 961 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) || |
1143 | if (sig) { | 962 | test_bit(BME_PRIORITY, &bm_ext->flags)); |
963 | |||
964 | if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { | ||
1144 | spin_lock_irq(&mdev->al_lock); | 965 | spin_lock_irq(&mdev->al_lock); |
1145 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | 966 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { |
1146 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | 967 | bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ |
1147 | mdev->resync_locked--; | 968 | mdev->resync_locked--; |
1148 | wake_up(&mdev->al_wait); | 969 | wake_up(&mdev->al_wait); |
1149 | } | 970 | } |
1150 | spin_unlock_irq(&mdev->al_lock); | 971 | spin_unlock_irq(&mdev->al_lock); |
1151 | return -EINTR; | 972 | if (sig) |
973 | return -EINTR; | ||
974 | if (schedule_timeout_interruptible(HZ/10)) | ||
975 | return -EINTR; | ||
976 | if (sa && --sa == 0) | ||
977 | dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec." | ||
978 | "Resync stalled?\n"); | ||
979 | goto retry; | ||
1152 | } | 980 | } |
1153 | } | 981 | } |
1154 | set_bit(BME_LOCKED, &bm_ext->flags); | 982 | set_bit(BME_LOCKED, &bm_ext->flags); |
@@ -1291,8 +1119,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) | |||
1291 | } | 1119 | } |
1292 | 1120 | ||
1293 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | 1121 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { |
1294 | clear_bit(BME_LOCKED, &bm_ext->flags); | 1122 | bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ |
1295 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1296 | mdev->resync_locked--; | 1123 | mdev->resync_locked--; |
1297 | wake_up(&mdev->al_wait); | 1124 | wake_up(&mdev->al_wait); |
1298 | } | 1125 | } |
@@ -1383,7 +1210,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | |||
1383 | sector_t esector, nr_sectors; | 1210 | sector_t esector, nr_sectors; |
1384 | int wake_up = 0; | 1211 | int wake_up = 0; |
1385 | 1212 | ||
1386 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 1213 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1387 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", | 1214 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", |
1388 | (unsigned long long)sector, size); | 1215 | (unsigned long long)sector, size); |
1389 | return; | 1216 | return; |
@@ -1420,7 +1247,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | |||
1420 | mdev->rs_failed += count; | 1247 | mdev->rs_failed += count; |
1421 | 1248 | ||
1422 | if (get_ldev(mdev)) { | 1249 | if (get_ldev(mdev)) { |
1423 | drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); | 1250 | drbd_try_clear_on_disk_bm(mdev, sector, count, false); |
1424 | put_ldev(mdev); | 1251 | put_ldev(mdev); |
1425 | } | 1252 | } |
1426 | 1253 | ||
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 0645ca829a9..f0ae63d2df6 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -28,18 +28,58 @@ | |||
28 | #include <linux/drbd.h> | 28 | #include <linux/drbd.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <asm/kmap_types.h> | 30 | #include <asm/kmap_types.h> |
31 | |||
32 | #include <asm-generic/bitops/le.h> | ||
33 | |||
31 | #include "drbd_int.h" | 34 | #include "drbd_int.h" |
32 | 35 | ||
36 | |||
33 | /* OPAQUE outside this file! | 37 | /* OPAQUE outside this file! |
34 | * interface defined in drbd_int.h | 38 | * interface defined in drbd_int.h |
35 | 39 | ||
36 | * convention: | 40 | * convention: |
37 | * function name drbd_bm_... => used elsewhere, "public". | 41 | * function name drbd_bm_... => used elsewhere, "public". |
38 | * function name bm_... => internal to implementation, "private". | 42 | * function name bm_... => internal to implementation, "private". |
43 | */ | ||
44 | |||
45 | |||
46 | /* | ||
47 | * LIMITATIONS: | ||
48 | * We want to support >= peta byte of backend storage, while for now still using | ||
49 | * a granularity of one bit per 4KiB of storage. | ||
50 | * 1 << 50 bytes backend storage (1 PiB) | ||
51 | * 1 << (50 - 12) bits needed | ||
52 | * 38 --> we need u64 to index and count bits | ||
53 | * 1 << (38 - 3) bitmap bytes needed | ||
54 | * 35 --> we still need u64 to index and count bytes | ||
55 | * (that's 32 GiB of bitmap for 1 PiB storage) | ||
56 | * 1 << (35 - 2) 32bit longs needed | ||
57 | * 33 --> we'd even need u64 to index and count 32bit long words. | ||
58 | * 1 << (35 - 3) 64bit longs needed | ||
59 | * 32 --> we could get away with a 32bit unsigned int to index and count | ||
60 | * 64bit long words, but I rather stay with unsigned long for now. | ||
61 | * We probably should neither count nor point to bytes or long words | ||
62 | * directly, but either by bitnumber, or by page index and offset. | ||
63 | * 1 << (35 - 12) | ||
64 | * 22 --> we need that much 4KiB pages of bitmap. | ||
65 | * 1 << (22 + 3) --> on a 64bit arch, | ||
66 | * we need 32 MiB to store the array of page pointers. | ||
67 | * | ||
68 | * Because I'm lazy, and because the resulting patch was too large, too ugly | ||
69 | * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), | ||
70 | * (1 << 32) bits * 4k storage. | ||
71 | * | ||
39 | 72 | ||
40 | * Note that since find_first_bit returns int, at the current granularity of | 73 | * bitmap storage and IO: |
41 | * the bitmap (4KB per byte), this implementation "only" supports up to | 74 | * Bitmap is stored little endian on disk, and is kept little endian in |
42 | * 1<<(32+12) == 16 TB... | 75 | * core memory. Currently we still hold the full bitmap in core as long |
76 | * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage | ||
77 | * seems excessive. | ||
78 | * | ||
79 | * We plan to reduce the amount of in-core bitmap pages by pageing them in | ||
80 | * and out against their on-disk location as necessary, but need to make | ||
81 | * sure we don't cause too much meta data IO, and must not deadlock in | ||
82 | * tight memory situations. This needs some more work. | ||
43 | */ | 83 | */ |
44 | 84 | ||
45 | /* | 85 | /* |
@@ -55,13 +95,9 @@ | |||
55 | struct drbd_bitmap { | 95 | struct drbd_bitmap { |
56 | struct page **bm_pages; | 96 | struct page **bm_pages; |
57 | spinlock_t bm_lock; | 97 | spinlock_t bm_lock; |
58 | /* WARNING unsigned long bm_*: | 98 | |
59 | * 32bit number of bit offset is just enough for 512 MB bitmap. | 99 | /* see LIMITATIONS: above */ |
60 | * it will blow up if we make the bitmap bigger... | 100 | |
61 | * not that it makes much sense to have a bitmap that large, | ||
62 | * rather change the granularity to 16k or 64k or something. | ||
63 | * (that implies other problems, however...) | ||
64 | */ | ||
65 | unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ | 101 | unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ |
66 | unsigned long bm_bits; | 102 | unsigned long bm_bits; |
67 | size_t bm_words; | 103 | size_t bm_words; |
@@ -69,29 +105,18 @@ struct drbd_bitmap { | |||
69 | sector_t bm_dev_capacity; | 105 | sector_t bm_dev_capacity; |
70 | struct mutex bm_change; /* serializes resize operations */ | 106 | struct mutex bm_change; /* serializes resize operations */ |
71 | 107 | ||
72 | atomic_t bm_async_io; | 108 | wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ |
73 | wait_queue_head_t bm_io_wait; | ||
74 | 109 | ||
75 | unsigned long bm_flags; | 110 | enum bm_flag bm_flags; |
76 | 111 | ||
77 | /* debugging aid, in case we are still racy somewhere */ | 112 | /* debugging aid, in case we are still racy somewhere */ |
78 | char *bm_why; | 113 | char *bm_why; |
79 | struct task_struct *bm_task; | 114 | struct task_struct *bm_task; |
80 | }; | 115 | }; |
81 | 116 | ||
82 | /* definition of bits in bm_flags */ | ||
83 | #define BM_LOCKED 0 | ||
84 | #define BM_MD_IO_ERROR 1 | ||
85 | #define BM_P_VMALLOCED 2 | ||
86 | |||
87 | static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | 117 | static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, |
88 | unsigned long e, int val, const enum km_type km); | 118 | unsigned long e, int val, const enum km_type km); |
89 | 119 | ||
90 | static int bm_is_locked(struct drbd_bitmap *b) | ||
91 | { | ||
92 | return test_bit(BM_LOCKED, &b->bm_flags); | ||
93 | } | ||
94 | |||
95 | #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) | 120 | #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) |
96 | static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) | 121 | static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) |
97 | { | 122 | { |
@@ -108,7 +133,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) | |||
108 | b->bm_task == mdev->worker.task ? "worker" : "?"); | 133 | b->bm_task == mdev->worker.task ? "worker" : "?"); |
109 | } | 134 | } |
110 | 135 | ||
111 | void drbd_bm_lock(struct drbd_conf *mdev, char *why) | 136 | void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) |
112 | { | 137 | { |
113 | struct drbd_bitmap *b = mdev->bitmap; | 138 | struct drbd_bitmap *b = mdev->bitmap; |
114 | int trylock_failed; | 139 | int trylock_failed; |
@@ -131,8 +156,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) | |||
131 | b->bm_task == mdev->worker.task ? "worker" : "?"); | 156 | b->bm_task == mdev->worker.task ? "worker" : "?"); |
132 | mutex_lock(&b->bm_change); | 157 | mutex_lock(&b->bm_change); |
133 | } | 158 | } |
134 | if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) | 159 | if (BM_LOCKED_MASK & b->bm_flags) |
135 | dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); | 160 | dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); |
161 | b->bm_flags |= flags & BM_LOCKED_MASK; | ||
136 | 162 | ||
137 | b->bm_why = why; | 163 | b->bm_why = why; |
138 | b->bm_task = current; | 164 | b->bm_task = current; |
@@ -146,31 +172,137 @@ void drbd_bm_unlock(struct drbd_conf *mdev) | |||
146 | return; | 172 | return; |
147 | } | 173 | } |
148 | 174 | ||
149 | if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) | 175 | if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) |
150 | dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); | 176 | dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); |
151 | 177 | ||
178 | b->bm_flags &= ~BM_LOCKED_MASK; | ||
152 | b->bm_why = NULL; | 179 | b->bm_why = NULL; |
153 | b->bm_task = NULL; | 180 | b->bm_task = NULL; |
154 | mutex_unlock(&b->bm_change); | 181 | mutex_unlock(&b->bm_change); |
155 | } | 182 | } |
156 | 183 | ||
157 | /* word offset to long pointer */ | 184 | /* we store some "meta" info about our pages in page->private */ |
158 | static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) | 185 | /* at a granularity of 4k storage per bitmap bit: |
186 | * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks | ||
187 | * 1<<38 bits, | ||
188 | * 1<<23 4k bitmap pages. | ||
189 | * Use 24 bits as page index, covers 2 peta byte storage | ||
190 | * at a granularity of 4k per bit. | ||
191 | * Used to report the failed page idx on io error from the endio handlers. | ||
192 | */ | ||
193 | #define BM_PAGE_IDX_MASK ((1UL<<24)-1) | ||
194 | /* this page is currently read in, or written back */ | ||
195 | #define BM_PAGE_IO_LOCK 31 | ||
196 | /* if there has been an IO error for this page */ | ||
197 | #define BM_PAGE_IO_ERROR 30 | ||
198 | /* this is to be able to intelligently skip disk IO, | ||
199 | * set if bits have been set since last IO. */ | ||
200 | #define BM_PAGE_NEED_WRITEOUT 29 | ||
201 | /* to mark for lazy writeout once syncer cleared all clearable bits, | ||
202 | * we if bits have been cleared since last IO. */ | ||
203 | #define BM_PAGE_LAZY_WRITEOUT 28 | ||
204 | |||
205 | /* store_page_idx uses non-atomic assingment. It is only used directly after | ||
206 | * allocating the page. All other bm_set_page_* and bm_clear_page_* need to | ||
207 | * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap | ||
208 | * changes) may happen from various contexts, and wait_on_bit/wake_up_bit | ||
209 | * requires it all to be atomic as well. */ | ||
210 | static void bm_store_page_idx(struct page *page, unsigned long idx) | ||
159 | { | 211 | { |
160 | struct page *page; | 212 | BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); |
161 | unsigned long page_nr; | 213 | page_private(page) |= idx; |
214 | } | ||
215 | |||
216 | static unsigned long bm_page_to_idx(struct page *page) | ||
217 | { | ||
218 | return page_private(page) & BM_PAGE_IDX_MASK; | ||
219 | } | ||
220 | |||
221 | /* As is very unlikely that the same page is under IO from more than one | ||
222 | * context, we can get away with a bit per page and one wait queue per bitmap. | ||
223 | */ | ||
224 | static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) | ||
225 | { | ||
226 | struct drbd_bitmap *b = mdev->bitmap; | ||
227 | void *addr = &page_private(b->bm_pages[page_nr]); | ||
228 | wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); | ||
229 | } | ||
230 | |||
231 | static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) | ||
232 | { | ||
233 | struct drbd_bitmap *b = mdev->bitmap; | ||
234 | void *addr = &page_private(b->bm_pages[page_nr]); | ||
235 | clear_bit(BM_PAGE_IO_LOCK, addr); | ||
236 | smp_mb__after_clear_bit(); | ||
237 | wake_up(&mdev->bitmap->bm_io_wait); | ||
238 | } | ||
239 | |||
240 | /* set _before_ submit_io, so it may be reset due to being changed | ||
241 | * while this page is in flight... will get submitted later again */ | ||
242 | static void bm_set_page_unchanged(struct page *page) | ||
243 | { | ||
244 | /* use cmpxchg? */ | ||
245 | clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); | ||
246 | clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); | ||
247 | } | ||
162 | 248 | ||
249 | static void bm_set_page_need_writeout(struct page *page) | ||
250 | { | ||
251 | set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); | ||
252 | } | ||
253 | |||
254 | static int bm_test_page_unchanged(struct page *page) | ||
255 | { | ||
256 | volatile const unsigned long *addr = &page_private(page); | ||
257 | return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; | ||
258 | } | ||
259 | |||
260 | static void bm_set_page_io_err(struct page *page) | ||
261 | { | ||
262 | set_bit(BM_PAGE_IO_ERROR, &page_private(page)); | ||
263 | } | ||
264 | |||
265 | static void bm_clear_page_io_err(struct page *page) | ||
266 | { | ||
267 | clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); | ||
268 | } | ||
269 | |||
270 | static void bm_set_page_lazy_writeout(struct page *page) | ||
271 | { | ||
272 | set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); | ||
273 | } | ||
274 | |||
275 | static int bm_test_page_lazy_writeout(struct page *page) | ||
276 | { | ||
277 | return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); | ||
278 | } | ||
279 | |||
280 | /* on a 32bit box, this would allow for exactly (2<<38) bits. */ | ||
281 | static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) | ||
282 | { | ||
163 | /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ | 283 | /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ |
164 | page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); | 284 | unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); |
165 | BUG_ON(page_nr >= b->bm_number_of_pages); | 285 | BUG_ON(page_nr >= b->bm_number_of_pages); |
166 | page = b->bm_pages[page_nr]; | 286 | return page_nr; |
287 | } | ||
167 | 288 | ||
289 | static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) | ||
290 | { | ||
291 | /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ | ||
292 | unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); | ||
293 | BUG_ON(page_nr >= b->bm_number_of_pages); | ||
294 | return page_nr; | ||
295 | } | ||
296 | |||
297 | static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) | ||
298 | { | ||
299 | struct page *page = b->bm_pages[idx]; | ||
168 | return (unsigned long *) kmap_atomic(page, km); | 300 | return (unsigned long *) kmap_atomic(page, km); |
169 | } | 301 | } |
170 | 302 | ||
171 | static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) | 303 | static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) |
172 | { | 304 | { |
173 | return __bm_map_paddr(b, offset, KM_IRQ1); | 305 | return __bm_map_pidx(b, idx, KM_IRQ1); |
174 | } | 306 | } |
175 | 307 | ||
176 | static void __bm_unmap(unsigned long *p_addr, const enum km_type km) | 308 | static void __bm_unmap(unsigned long *p_addr, const enum km_type km) |
@@ -202,6 +334,7 @@ static void bm_unmap(unsigned long *p_addr) | |||
202 | * to be able to report device specific. | 334 | * to be able to report device specific. |
203 | */ | 335 | */ |
204 | 336 | ||
337 | |||
205 | static void bm_free_pages(struct page **pages, unsigned long number) | 338 | static void bm_free_pages(struct page **pages, unsigned long number) |
206 | { | 339 | { |
207 | unsigned long i; | 340 | unsigned long i; |
@@ -269,6 +402,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) | |||
269 | bm_vk_free(new_pages, vmalloced); | 402 | bm_vk_free(new_pages, vmalloced); |
270 | return NULL; | 403 | return NULL; |
271 | } | 404 | } |
405 | /* we want to know which page it is | ||
406 | * from the endio handlers */ | ||
407 | bm_store_page_idx(page, i); | ||
272 | new_pages[i] = page; | 408 | new_pages[i] = page; |
273 | } | 409 | } |
274 | } else { | 410 | } else { |
@@ -280,9 +416,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) | |||
280 | } | 416 | } |
281 | 417 | ||
282 | if (vmalloced) | 418 | if (vmalloced) |
283 | set_bit(BM_P_VMALLOCED, &b->bm_flags); | 419 | b->bm_flags |= BM_P_VMALLOCED; |
284 | else | 420 | else |
285 | clear_bit(BM_P_VMALLOCED, &b->bm_flags); | 421 | b->bm_flags &= ~BM_P_VMALLOCED; |
286 | 422 | ||
287 | return new_pages; | 423 | return new_pages; |
288 | } | 424 | } |
@@ -319,7 +455,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev) | |||
319 | { | 455 | { |
320 | ERR_IF (!mdev->bitmap) return; | 456 | ERR_IF (!mdev->bitmap) return; |
321 | bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); | 457 | bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); |
322 | bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); | 458 | bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); |
323 | kfree(mdev->bitmap); | 459 | kfree(mdev->bitmap); |
324 | mdev->bitmap = NULL; | 460 | mdev->bitmap = NULL; |
325 | } | 461 | } |
@@ -329,22 +465,39 @@ void drbd_bm_cleanup(struct drbd_conf *mdev) | |||
329 | * this masks out the remaining bits. | 465 | * this masks out the remaining bits. |
330 | * Returns the number of bits cleared. | 466 | * Returns the number of bits cleared. |
331 | */ | 467 | */ |
468 | #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) | ||
469 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) | ||
470 | #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) | ||
332 | static int bm_clear_surplus(struct drbd_bitmap *b) | 471 | static int bm_clear_surplus(struct drbd_bitmap *b) |
333 | { | 472 | { |
334 | const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; | 473 | unsigned long mask; |
335 | size_t w = b->bm_bits >> LN2_BPL; | ||
336 | int cleared = 0; | ||
337 | unsigned long *p_addr, *bm; | 474 | unsigned long *p_addr, *bm; |
475 | int tmp; | ||
476 | int cleared = 0; | ||
338 | 477 | ||
339 | p_addr = bm_map_paddr(b, w); | 478 | /* number of bits modulo bits per page */ |
340 | bm = p_addr + MLPP(w); | 479 | tmp = (b->bm_bits & BITS_PER_PAGE_MASK); |
341 | if (w < b->bm_words) { | 480 | /* mask the used bits of the word containing the last bit */ |
481 | mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; | ||
482 | /* bitmap is always stored little endian, | ||
483 | * on disk and in core memory alike */ | ||
484 | mask = cpu_to_lel(mask); | ||
485 | |||
486 | p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); | ||
487 | bm = p_addr + (tmp/BITS_PER_LONG); | ||
488 | if (mask) { | ||
489 | /* If mask != 0, we are not exactly aligned, so bm now points | ||
490 | * to the long containing the last bit. | ||
491 | * If mask == 0, bm already points to the word immediately | ||
492 | * after the last (long word aligned) bit. */ | ||
342 | cleared = hweight_long(*bm & ~mask); | 493 | cleared = hweight_long(*bm & ~mask); |
343 | *bm &= mask; | 494 | *bm &= mask; |
344 | w++; bm++; | 495 | bm++; |
345 | } | 496 | } |
346 | 497 | ||
347 | if (w < b->bm_words) { | 498 | if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { |
499 | /* on a 32bit arch, we may need to zero out | ||
500 | * a padding long to align with a 64bit remote */ | ||
348 | cleared += hweight_long(*bm); | 501 | cleared += hweight_long(*bm); |
349 | *bm = 0; | 502 | *bm = 0; |
350 | } | 503 | } |
@@ -354,66 +507,75 @@ static int bm_clear_surplus(struct drbd_bitmap *b) | |||
354 | 507 | ||
355 | static void bm_set_surplus(struct drbd_bitmap *b) | 508 | static void bm_set_surplus(struct drbd_bitmap *b) |
356 | { | 509 | { |
357 | const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; | 510 | unsigned long mask; |
358 | size_t w = b->bm_bits >> LN2_BPL; | ||
359 | unsigned long *p_addr, *bm; | 511 | unsigned long *p_addr, *bm; |
360 | 512 | int tmp; | |
361 | p_addr = bm_map_paddr(b, w); | 513 | |
362 | bm = p_addr + MLPP(w); | 514 | /* number of bits modulo bits per page */ |
363 | if (w < b->bm_words) { | 515 | tmp = (b->bm_bits & BITS_PER_PAGE_MASK); |
516 | /* mask the used bits of the word containing the last bit */ | ||
517 | mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; | ||
518 | /* bitmap is always stored little endian, | ||
519 | * on disk and in core memory alike */ | ||
520 | mask = cpu_to_lel(mask); | ||
521 | |||
522 | p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); | ||
523 | bm = p_addr + (tmp/BITS_PER_LONG); | ||
524 | if (mask) { | ||
525 | /* If mask != 0, we are not exactly aligned, so bm now points | ||
526 | * to the long containing the last bit. | ||
527 | * If mask == 0, bm already points to the word immediately | ||
528 | * after the last (long word aligned) bit. */ | ||
364 | *bm |= ~mask; | 529 | *bm |= ~mask; |
365 | bm++; w++; | 530 | bm++; |
366 | } | 531 | } |
367 | 532 | ||
368 | if (w < b->bm_words) { | 533 | if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { |
369 | *bm = ~(0UL); | 534 | /* on a 32bit arch, we may need to zero out |
535 | * a padding long to align with a 64bit remote */ | ||
536 | *bm = ~0UL; | ||
370 | } | 537 | } |
371 | bm_unmap(p_addr); | 538 | bm_unmap(p_addr); |
372 | } | 539 | } |
373 | 540 | ||
374 | static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian) | 541 | /* you better not modify the bitmap while this is running, |
542 | * or its results will be stale */ | ||
543 | static unsigned long bm_count_bits(struct drbd_bitmap *b) | ||
375 | { | 544 | { |
376 | unsigned long *p_addr, *bm, offset = 0; | 545 | unsigned long *p_addr; |
377 | unsigned long bits = 0; | 546 | unsigned long bits = 0; |
378 | unsigned long i, do_now; | 547 | unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; |
379 | 548 | int idx, i, last_word; | |
380 | while (offset < b->bm_words) { | 549 | |
381 | i = do_now = min_t(size_t, b->bm_words-offset, LWPP); | 550 | /* all but last page */ |
382 | p_addr = __bm_map_paddr(b, offset, KM_USER0); | 551 | for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { |
383 | bm = p_addr + MLPP(offset); | 552 | p_addr = __bm_map_pidx(b, idx, KM_USER0); |
384 | while (i--) { | 553 | for (i = 0; i < LWPP; i++) |
385 | #ifndef __LITTLE_ENDIAN | 554 | bits += hweight_long(p_addr[i]); |
386 | if (swap_endian) | ||
387 | *bm = lel_to_cpu(*bm); | ||
388 | #endif | ||
389 | bits += hweight_long(*bm++); | ||
390 | } | ||
391 | __bm_unmap(p_addr, KM_USER0); | 555 | __bm_unmap(p_addr, KM_USER0); |
392 | offset += do_now; | ||
393 | cond_resched(); | 556 | cond_resched(); |
394 | } | 557 | } |
395 | 558 | /* last (or only) page */ | |
559 | last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; | ||
560 | p_addr = __bm_map_pidx(b, idx, KM_USER0); | ||
561 | for (i = 0; i < last_word; i++) | ||
562 | bits += hweight_long(p_addr[i]); | ||
563 | p_addr[last_word] &= cpu_to_lel(mask); | ||
564 | bits += hweight_long(p_addr[last_word]); | ||
565 | /* 32bit arch, may have an unused padding long */ | ||
566 | if (BITS_PER_LONG == 32 && (last_word & 1) == 0) | ||
567 | p_addr[last_word+1] = 0; | ||
568 | __bm_unmap(p_addr, KM_USER0); | ||
396 | return bits; | 569 | return bits; |
397 | } | 570 | } |
398 | 571 | ||
399 | static unsigned long bm_count_bits(struct drbd_bitmap *b) | ||
400 | { | ||
401 | return __bm_count_bits(b, 0); | ||
402 | } | ||
403 | |||
404 | static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b) | ||
405 | { | ||
406 | return __bm_count_bits(b, 1); | ||
407 | } | ||
408 | |||
409 | /* offset and len in long words.*/ | 572 | /* offset and len in long words.*/ |
410 | static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | 573 | static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) |
411 | { | 574 | { |
412 | unsigned long *p_addr, *bm; | 575 | unsigned long *p_addr, *bm; |
576 | unsigned int idx; | ||
413 | size_t do_now, end; | 577 | size_t do_now, end; |
414 | 578 | ||
415 | #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) | ||
416 | |||
417 | end = offset + len; | 579 | end = offset + len; |
418 | 580 | ||
419 | if (end > b->bm_words) { | 581 | if (end > b->bm_words) { |
@@ -423,15 +585,16 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | |||
423 | 585 | ||
424 | while (offset < end) { | 586 | while (offset < end) { |
425 | do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; | 587 | do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; |
426 | p_addr = bm_map_paddr(b, offset); | 588 | idx = bm_word_to_page_idx(b, offset); |
589 | p_addr = bm_map_pidx(b, idx); | ||
427 | bm = p_addr + MLPP(offset); | 590 | bm = p_addr + MLPP(offset); |
428 | if (bm+do_now > p_addr + LWPP) { | 591 | if (bm+do_now > p_addr + LWPP) { |
429 | printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", | 592 | printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", |
430 | p_addr, bm, (int)do_now); | 593 | p_addr, bm, (int)do_now); |
431 | break; /* breaks to after catch_oob_access_end() only! */ | 594 | } else |
432 | } | 595 | memset(bm, c, do_now * sizeof(long)); |
433 | memset(bm, c, do_now * sizeof(long)); | ||
434 | bm_unmap(p_addr); | 596 | bm_unmap(p_addr); |
597 | bm_set_page_need_writeout(b->bm_pages[idx]); | ||
435 | offset += do_now; | 598 | offset += do_now; |
436 | } | 599 | } |
437 | } | 600 | } |
@@ -447,7 +610,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | |||
447 | int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | 610 | int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) |
448 | { | 611 | { |
449 | struct drbd_bitmap *b = mdev->bitmap; | 612 | struct drbd_bitmap *b = mdev->bitmap; |
450 | unsigned long bits, words, owords, obits, *p_addr, *bm; | 613 | unsigned long bits, words, owords, obits; |
451 | unsigned long want, have, onpages; /* number of pages */ | 614 | unsigned long want, have, onpages; /* number of pages */ |
452 | struct page **npages, **opages = NULL; | 615 | struct page **npages, **opages = NULL; |
453 | int err = 0, growing; | 616 | int err = 0, growing; |
@@ -455,7 +618,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
455 | 618 | ||
456 | ERR_IF(!b) return -ENOMEM; | 619 | ERR_IF(!b) return -ENOMEM; |
457 | 620 | ||
458 | drbd_bm_lock(mdev, "resize"); | 621 | drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); |
459 | 622 | ||
460 | dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", | 623 | dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", |
461 | (unsigned long long)capacity); | 624 | (unsigned long long)capacity); |
@@ -463,7 +626,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
463 | if (capacity == b->bm_dev_capacity) | 626 | if (capacity == b->bm_dev_capacity) |
464 | goto out; | 627 | goto out; |
465 | 628 | ||
466 | opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); | 629 | opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); |
467 | 630 | ||
468 | if (capacity == 0) { | 631 | if (capacity == 0) { |
469 | spin_lock_irq(&b->bm_lock); | 632 | spin_lock_irq(&b->bm_lock); |
@@ -491,18 +654,23 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
491 | words = ALIGN(bits, 64) >> LN2_BPL; | 654 | words = ALIGN(bits, 64) >> LN2_BPL; |
492 | 655 | ||
493 | if (get_ldev(mdev)) { | 656 | if (get_ldev(mdev)) { |
494 | D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12)); | 657 | u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; |
495 | put_ldev(mdev); | 658 | put_ldev(mdev); |
659 | if (bits > bits_on_disk) { | ||
660 | dev_info(DEV, "bits = %lu\n", bits); | ||
661 | dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); | ||
662 | err = -ENOSPC; | ||
663 | goto out; | ||
664 | } | ||
496 | } | 665 | } |
497 | 666 | ||
498 | /* one extra long to catch off by one errors */ | 667 | want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; |
499 | want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; | ||
500 | have = b->bm_number_of_pages; | 668 | have = b->bm_number_of_pages; |
501 | if (want == have) { | 669 | if (want == have) { |
502 | D_ASSERT(b->bm_pages != NULL); | 670 | D_ASSERT(b->bm_pages != NULL); |
503 | npages = b->bm_pages; | 671 | npages = b->bm_pages; |
504 | } else { | 672 | } else { |
505 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC)) | 673 | if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) |
506 | npages = NULL; | 674 | npages = NULL; |
507 | else | 675 | else |
508 | npages = bm_realloc_pages(b, want); | 676 | npages = bm_realloc_pages(b, want); |
@@ -542,11 +710,6 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
542 | bm_free_pages(opages + want, have - want); | 710 | bm_free_pages(opages + want, have - want); |
543 | } | 711 | } |
544 | 712 | ||
545 | p_addr = bm_map_paddr(b, words); | ||
546 | bm = p_addr + MLPP(words); | ||
547 | *bm = DRBD_MAGIC; | ||
548 | bm_unmap(p_addr); | ||
549 | |||
550 | (void)bm_clear_surplus(b); | 713 | (void)bm_clear_surplus(b); |
551 | 714 | ||
552 | spin_unlock_irq(&b->bm_lock); | 715 | spin_unlock_irq(&b->bm_lock); |
@@ -554,7 +717,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
554 | bm_vk_free(opages, opages_vmalloced); | 717 | bm_vk_free(opages, opages_vmalloced); |
555 | if (!growing) | 718 | if (!growing) |
556 | b->bm_set = bm_count_bits(b); | 719 | b->bm_set = bm_count_bits(b); |
557 | dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words); | 720 | dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); |
558 | 721 | ||
559 | out: | 722 | out: |
560 | drbd_bm_unlock(mdev); | 723 | drbd_bm_unlock(mdev); |
@@ -624,6 +787,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
624 | struct drbd_bitmap *b = mdev->bitmap; | 787 | struct drbd_bitmap *b = mdev->bitmap; |
625 | unsigned long *p_addr, *bm; | 788 | unsigned long *p_addr, *bm; |
626 | unsigned long word, bits; | 789 | unsigned long word, bits; |
790 | unsigned int idx; | ||
627 | size_t end, do_now; | 791 | size_t end, do_now; |
628 | 792 | ||
629 | end = offset + number; | 793 | end = offset + number; |
@@ -638,16 +802,18 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
638 | spin_lock_irq(&b->bm_lock); | 802 | spin_lock_irq(&b->bm_lock); |
639 | while (offset < end) { | 803 | while (offset < end) { |
640 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; | 804 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; |
641 | p_addr = bm_map_paddr(b, offset); | 805 | idx = bm_word_to_page_idx(b, offset); |
806 | p_addr = bm_map_pidx(b, idx); | ||
642 | bm = p_addr + MLPP(offset); | 807 | bm = p_addr + MLPP(offset); |
643 | offset += do_now; | 808 | offset += do_now; |
644 | while (do_now--) { | 809 | while (do_now--) { |
645 | bits = hweight_long(*bm); | 810 | bits = hweight_long(*bm); |
646 | word = *bm | lel_to_cpu(*buffer++); | 811 | word = *bm | *buffer++; |
647 | *bm++ = word; | 812 | *bm++ = word; |
648 | b->bm_set += hweight_long(word) - bits; | 813 | b->bm_set += hweight_long(word) - bits; |
649 | } | 814 | } |
650 | bm_unmap(p_addr); | 815 | bm_unmap(p_addr); |
816 | bm_set_page_need_writeout(b->bm_pages[idx]); | ||
651 | } | 817 | } |
652 | /* with 32bit <-> 64bit cross-platform connect | 818 | /* with 32bit <-> 64bit cross-platform connect |
653 | * this is only correct for current usage, | 819 | * this is only correct for current usage, |
@@ -656,7 +822,6 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
656 | */ | 822 | */ |
657 | if (end == b->bm_words) | 823 | if (end == b->bm_words) |
658 | b->bm_set -= bm_clear_surplus(b); | 824 | b->bm_set -= bm_clear_surplus(b); |
659 | |||
660 | spin_unlock_irq(&b->bm_lock); | 825 | spin_unlock_irq(&b->bm_lock); |
661 | } | 826 | } |
662 | 827 | ||
@@ -686,11 +851,11 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
686 | else { | 851 | else { |
687 | while (offset < end) { | 852 | while (offset < end) { |
688 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; | 853 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; |
689 | p_addr = bm_map_paddr(b, offset); | 854 | p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); |
690 | bm = p_addr + MLPP(offset); | 855 | bm = p_addr + MLPP(offset); |
691 | offset += do_now; | 856 | offset += do_now; |
692 | while (do_now--) | 857 | while (do_now--) |
693 | *buffer++ = cpu_to_lel(*bm++); | 858 | *buffer++ = *bm++; |
694 | bm_unmap(p_addr); | 859 | bm_unmap(p_addr); |
695 | } | 860 | } |
696 | } | 861 | } |
@@ -724,9 +889,22 @@ void drbd_bm_clear_all(struct drbd_conf *mdev) | |||
724 | spin_unlock_irq(&b->bm_lock); | 889 | spin_unlock_irq(&b->bm_lock); |
725 | } | 890 | } |
726 | 891 | ||
892 | struct bm_aio_ctx { | ||
893 | struct drbd_conf *mdev; | ||
894 | atomic_t in_flight; | ||
895 | struct completion done; | ||
896 | unsigned flags; | ||
897 | #define BM_AIO_COPY_PAGES 1 | ||
898 | int error; | ||
899 | }; | ||
900 | |||
901 | /* bv_page may be a copy, or may be the original */ | ||
727 | static void bm_async_io_complete(struct bio *bio, int error) | 902 | static void bm_async_io_complete(struct bio *bio, int error) |
728 | { | 903 | { |
729 | struct drbd_bitmap *b = bio->bi_private; | 904 | struct bm_aio_ctx *ctx = bio->bi_private; |
905 | struct drbd_conf *mdev = ctx->mdev; | ||
906 | struct drbd_bitmap *b = mdev->bitmap; | ||
907 | unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); | ||
730 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | 908 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
731 | 909 | ||
732 | 910 | ||
@@ -737,38 +915,83 @@ static void bm_async_io_complete(struct bio *bio, int error) | |||
737 | if (!error && !uptodate) | 915 | if (!error && !uptodate) |
738 | error = -EIO; | 916 | error = -EIO; |
739 | 917 | ||
918 | if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && | ||
919 | !bm_test_page_unchanged(b->bm_pages[idx])) | ||
920 | dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); | ||
921 | |||
740 | if (error) { | 922 | if (error) { |
741 | /* doh. what now? | 923 | /* ctx error will hold the completed-last non-zero error code, |
742 | * for now, set all bits, and flag MD_IO_ERROR */ | 924 | * in case error codes differ. */ |
743 | __set_bit(BM_MD_IO_ERROR, &b->bm_flags); | 925 | ctx->error = error; |
926 | bm_set_page_io_err(b->bm_pages[idx]); | ||
927 | /* Not identical to on disk version of it. | ||
928 | * Is BM_PAGE_IO_ERROR enough? */ | ||
929 | if (__ratelimit(&drbd_ratelimit_state)) | ||
930 | dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", | ||
931 | error, idx); | ||
932 | } else { | ||
933 | bm_clear_page_io_err(b->bm_pages[idx]); | ||
934 | dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); | ||
744 | } | 935 | } |
745 | if (atomic_dec_and_test(&b->bm_async_io)) | 936 | |
746 | wake_up(&b->bm_io_wait); | 937 | bm_page_unlock_io(mdev, idx); |
938 | |||
939 | /* FIXME give back to page pool */ | ||
940 | if (ctx->flags & BM_AIO_COPY_PAGES) | ||
941 | put_page(bio->bi_io_vec[0].bv_page); | ||
747 | 942 | ||
748 | bio_put(bio); | 943 | bio_put(bio); |
944 | |||
945 | if (atomic_dec_and_test(&ctx->in_flight)) | ||
946 | complete(&ctx->done); | ||
749 | } | 947 | } |
750 | 948 | ||
751 | static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local) | 949 | static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) |
752 | { | 950 | { |
753 | /* we are process context. we always get a bio */ | 951 | /* we are process context. we always get a bio */ |
754 | struct bio *bio = bio_alloc(GFP_KERNEL, 1); | 952 | struct bio *bio = bio_alloc(GFP_KERNEL, 1); |
953 | struct drbd_conf *mdev = ctx->mdev; | ||
954 | struct drbd_bitmap *b = mdev->bitmap; | ||
955 | struct page *page; | ||
755 | unsigned int len; | 956 | unsigned int len; |
957 | |||
756 | sector_t on_disk_sector = | 958 | sector_t on_disk_sector = |
757 | mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; | 959 | mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; |
758 | on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); | 960 | on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); |
759 | 961 | ||
760 | /* this might happen with very small | 962 | /* this might happen with very small |
761 | * flexible external meta data device */ | 963 | * flexible external meta data device, |
964 | * or with PAGE_SIZE > 4k */ | ||
762 | len = min_t(unsigned int, PAGE_SIZE, | 965 | len = min_t(unsigned int, PAGE_SIZE, |
763 | (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); | 966 | (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); |
764 | 967 | ||
968 | /* serialize IO on this page */ | ||
969 | bm_page_lock_io(mdev, page_nr); | ||
970 | /* before memcpy and submit, | ||
971 | * so it can be redirtied any time */ | ||
972 | bm_set_page_unchanged(b->bm_pages[page_nr]); | ||
973 | |||
974 | if (ctx->flags & BM_AIO_COPY_PAGES) { | ||
975 | /* FIXME alloc_page is good enough for now, but actually needs | ||
976 | * to use pre-allocated page pool */ | ||
977 | void *src, *dest; | ||
978 | page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); | ||
979 | dest = kmap_atomic(page, KM_USER0); | ||
980 | src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); | ||
981 | memcpy(dest, src, PAGE_SIZE); | ||
982 | kunmap_atomic(src, KM_USER1); | ||
983 | kunmap_atomic(dest, KM_USER0); | ||
984 | bm_store_page_idx(page, page_nr); | ||
985 | } else | ||
986 | page = b->bm_pages[page_nr]; | ||
987 | |||
765 | bio->bi_bdev = mdev->ldev->md_bdev; | 988 | bio->bi_bdev = mdev->ldev->md_bdev; |
766 | bio->bi_sector = on_disk_sector; | 989 | bio->bi_sector = on_disk_sector; |
767 | bio_add_page(bio, b->bm_pages[page_nr], len, 0); | 990 | bio_add_page(bio, page, len, 0); |
768 | bio->bi_private = b; | 991 | bio->bi_private = ctx; |
769 | bio->bi_end_io = bm_async_io_complete; | 992 | bio->bi_end_io = bm_async_io_complete; |
770 | 993 | ||
771 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { | 994 | if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { |
772 | bio->bi_rw |= rw; | 995 | bio->bi_rw |= rw; |
773 | bio_endio(bio, -EIO); | 996 | bio_endio(bio, -EIO); |
774 | } else { | 997 | } else { |
@@ -776,87 +999,84 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int | |||
776 | } | 999 | } |
777 | } | 1000 | } |
778 | 1001 | ||
779 | # if defined(__LITTLE_ENDIAN) | ||
780 | /* nothing to do, on disk == in memory */ | ||
781 | # define bm_cpu_to_lel(x) ((void)0) | ||
782 | # else | ||
783 | static void bm_cpu_to_lel(struct drbd_bitmap *b) | ||
784 | { | ||
785 | /* need to cpu_to_lel all the pages ... | ||
786 | * this may be optimized by using | ||
787 | * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0; | ||
788 | * the following is still not optimal, but better than nothing */ | ||
789 | unsigned int i; | ||
790 | unsigned long *p_addr, *bm; | ||
791 | if (b->bm_set == 0) { | ||
792 | /* no page at all; avoid swap if all is 0 */ | ||
793 | i = b->bm_number_of_pages; | ||
794 | } else if (b->bm_set == b->bm_bits) { | ||
795 | /* only the last page */ | ||
796 | i = b->bm_number_of_pages - 1; | ||
797 | } else { | ||
798 | /* all pages */ | ||
799 | i = 0; | ||
800 | } | ||
801 | for (; i < b->bm_number_of_pages; i++) { | ||
802 | p_addr = kmap_atomic(b->bm_pages[i], KM_USER0); | ||
803 | for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++) | ||
804 | *bm = cpu_to_lel(*bm); | ||
805 | kunmap_atomic(p_addr, KM_USER0); | ||
806 | } | ||
807 | } | ||
808 | # endif | ||
809 | /* lel_to_cpu == cpu_to_lel */ | ||
810 | # define bm_lel_to_cpu(x) bm_cpu_to_lel(x) | ||
811 | |||
812 | /* | 1002 | /* |
813 | * bm_rw: read/write the whole bitmap from/to its on disk location. | 1003 | * bm_rw: read/write the whole bitmap from/to its on disk location. |
814 | */ | 1004 | */ |
815 | static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) | 1005 | static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) |
816 | { | 1006 | { |
1007 | struct bm_aio_ctx ctx = { | ||
1008 | .mdev = mdev, | ||
1009 | .in_flight = ATOMIC_INIT(1), | ||
1010 | .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), | ||
1011 | .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0, | ||
1012 | }; | ||
817 | struct drbd_bitmap *b = mdev->bitmap; | 1013 | struct drbd_bitmap *b = mdev->bitmap; |
818 | /* sector_t sector; */ | 1014 | int num_pages, i, count = 0; |
819 | int bm_words, num_pages, i; | ||
820 | unsigned long now; | 1015 | unsigned long now; |
821 | char ppb[10]; | 1016 | char ppb[10]; |
822 | int err = 0; | 1017 | int err = 0; |
823 | 1018 | ||
824 | WARN_ON(!bm_is_locked(b)); | 1019 | /* |
825 | 1020 | * We are protected against bitmap disappearing/resizing by holding an | |
826 | /* no spinlock here, the drbd_bm_lock should be enough! */ | 1021 | * ldev reference (caller must have called get_ldev()). |
827 | 1022 | * For read/write, we are protected against changes to the bitmap by | |
828 | bm_words = drbd_bm_words(mdev); | 1023 | * the bitmap lock (see drbd_bitmap_io). |
829 | num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; | 1024 | * For lazy writeout, we don't care for ongoing changes to the bitmap, |
1025 | * as we submit copies of pages anyways. | ||
1026 | */ | ||
1027 | if (!ctx.flags) | ||
1028 | WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); | ||
830 | 1029 | ||
831 | /* on disk bitmap is little endian */ | 1030 | num_pages = b->bm_number_of_pages; |
832 | if (rw == WRITE) | ||
833 | bm_cpu_to_lel(b); | ||
834 | 1031 | ||
835 | now = jiffies; | 1032 | now = jiffies; |
836 | atomic_set(&b->bm_async_io, num_pages); | ||
837 | __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); | ||
838 | 1033 | ||
839 | /* let the layers below us try to merge these bios... */ | 1034 | /* let the layers below us try to merge these bios... */ |
840 | for (i = 0; i < num_pages; i++) | 1035 | for (i = 0; i < num_pages; i++) { |
841 | bm_page_io_async(mdev, b, i, rw); | 1036 | /* ignore completely unchanged pages */ |
1037 | if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) | ||
1038 | break; | ||
1039 | if (rw & WRITE) { | ||
1040 | if (bm_test_page_unchanged(b->bm_pages[i])) { | ||
1041 | dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); | ||
1042 | continue; | ||
1043 | } | ||
1044 | /* during lazy writeout, | ||
1045 | * ignore those pages not marked for lazy writeout. */ | ||
1046 | if (lazy_writeout_upper_idx && | ||
1047 | !bm_test_page_lazy_writeout(b->bm_pages[i])) { | ||
1048 | dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); | ||
1049 | continue; | ||
1050 | } | ||
1051 | } | ||
1052 | atomic_inc(&ctx.in_flight); | ||
1053 | bm_page_io_async(&ctx, i, rw); | ||
1054 | ++count; | ||
1055 | cond_resched(); | ||
1056 | } | ||
842 | 1057 | ||
843 | wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); | 1058 | /* |
1059 | * We initialize ctx.in_flight to one to make sure bm_async_io_complete | ||
1060 | * will not complete() early, and decrement / test it here. If there | ||
1061 | * are still some bios in flight, we need to wait for them here. | ||
1062 | */ | ||
1063 | if (!atomic_dec_and_test(&ctx.in_flight)) | ||
1064 | wait_for_completion(&ctx.done); | ||
1065 | dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", | ||
1066 | rw == WRITE ? "WRITE" : "READ", | ||
1067 | count, jiffies - now); | ||
844 | 1068 | ||
845 | if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { | 1069 | if (ctx.error) { |
846 | dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); | 1070 | dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); |
847 | drbd_chk_io_error(mdev, 1, TRUE); | 1071 | drbd_chk_io_error(mdev, 1, true); |
848 | err = -EIO; | 1072 | err = -EIO; /* ctx.error ? */ |
849 | } | 1073 | } |
850 | 1074 | ||
851 | now = jiffies; | 1075 | now = jiffies; |
852 | if (rw == WRITE) { | 1076 | if (rw == WRITE) { |
853 | /* swap back endianness */ | ||
854 | bm_lel_to_cpu(b); | ||
855 | /* flush bitmap to stable storage */ | ||
856 | drbd_md_flush(mdev); | 1077 | drbd_md_flush(mdev); |
857 | } else /* rw == READ */ { | 1078 | } else /* rw == READ */ { |
858 | /* just read, if necessary adjust endianness */ | 1079 | b->bm_set = bm_count_bits(b); |
859 | b->bm_set = bm_count_bits_swap_endian(b); | ||
860 | dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", | 1080 | dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", |
861 | jiffies - now); | 1081 | jiffies - now); |
862 | } | 1082 | } |
@@ -874,112 +1094,128 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) | |||
874 | */ | 1094 | */ |
875 | int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) | 1095 | int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) |
876 | { | 1096 | { |
877 | return bm_rw(mdev, READ); | 1097 | return bm_rw(mdev, READ, 0); |
878 | } | 1098 | } |
879 | 1099 | ||
880 | /** | 1100 | /** |
881 | * drbd_bm_write() - Write the whole bitmap to its on disk location. | 1101 | * drbd_bm_write() - Write the whole bitmap to its on disk location. |
882 | * @mdev: DRBD device. | 1102 | * @mdev: DRBD device. |
1103 | * | ||
1104 | * Will only write pages that have changed since last IO. | ||
883 | */ | 1105 | */ |
884 | int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) | 1106 | int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) |
885 | { | 1107 | { |
886 | return bm_rw(mdev, WRITE); | 1108 | return bm_rw(mdev, WRITE, 0); |
887 | } | 1109 | } |
888 | 1110 | ||
889 | /** | 1111 | /** |
890 | * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap | 1112 | * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. |
891 | * @mdev: DRBD device. | 1113 | * @mdev: DRBD device. |
892 | * @enr: Extent number in the resync lru (happens to be sector offset) | 1114 | * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages |
893 | * | ||
894 | * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered | ||
895 | * by a single sector write. Therefore enr == sector offset from the | ||
896 | * start of the bitmap. | ||
897 | */ | 1115 | */ |
898 | int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local) | 1116 | int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) |
899 | { | 1117 | { |
900 | sector_t on_disk_sector = enr + mdev->ldev->md.md_offset | 1118 | return bm_rw(mdev, WRITE, upper_idx); |
901 | + mdev->ldev->md.bm_offset; | 1119 | } |
902 | int bm_words, num_words, offset; | 1120 | |
903 | int err = 0; | ||
904 | 1121 | ||
905 | mutex_lock(&mdev->md_io_mutex); | 1122 | /** |
906 | bm_words = drbd_bm_words(mdev); | 1123 | * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap |
907 | offset = S2W(enr); /* word offset into bitmap */ | 1124 | * @mdev: DRBD device. |
908 | num_words = min(S2W(1), bm_words - offset); | 1125 | * @idx: bitmap page index |
909 | if (num_words < S2W(1)) | 1126 | * |
910 | memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE); | 1127 | * We don't want to special case on logical_block_size of the backend device, |
911 | drbd_bm_get_lel(mdev, offset, num_words, | 1128 | * so we submit PAGE_SIZE aligned pieces. |
912 | page_address(mdev->md_io_page)); | 1129 | * Note that on "most" systems, PAGE_SIZE is 4k. |
913 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) { | 1130 | * |
914 | int i; | 1131 | * In case this becomes an issue on systems with larger PAGE_SIZE, |
915 | err = -EIO; | 1132 | * we may want to change this again to write 4k aligned 4k pieces. |
916 | dev_err(DEV, "IO ERROR writing bitmap sector %lu " | 1133 | */ |
917 | "(meta-disk sector %llus)\n", | 1134 | int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) |
918 | enr, (unsigned long long)on_disk_sector); | 1135 | { |
919 | drbd_chk_io_error(mdev, 1, TRUE); | 1136 | struct bm_aio_ctx ctx = { |
920 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) | 1137 | .mdev = mdev, |
921 | drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); | 1138 | .in_flight = ATOMIC_INIT(1), |
1139 | .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), | ||
1140 | .flags = BM_AIO_COPY_PAGES, | ||
1141 | }; | ||
1142 | |||
1143 | if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { | ||
1144 | dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); | ||
1145 | return 0; | ||
922 | } | 1146 | } |
1147 | |||
1148 | bm_page_io_async(&ctx, idx, WRITE_SYNC); | ||
1149 | wait_for_completion(&ctx.done); | ||
1150 | |||
1151 | if (ctx.error) | ||
1152 | drbd_chk_io_error(mdev, 1, true); | ||
1153 | /* that should force detach, so the in memory bitmap will be | ||
1154 | * gone in a moment as well. */ | ||
1155 | |||
923 | mdev->bm_writ_cnt++; | 1156 | mdev->bm_writ_cnt++; |
924 | mutex_unlock(&mdev->md_io_mutex); | 1157 | return ctx.error; |
925 | return err; | ||
926 | } | 1158 | } |
927 | 1159 | ||
928 | /* NOTE | 1160 | /* NOTE |
929 | * find_first_bit returns int, we return unsigned long. | 1161 | * find_first_bit returns int, we return unsigned long. |
930 | * should not make much difference anyways, but ... | 1162 | * For this to work on 32bit arch with bitnumbers > (1<<32), |
1163 | * we'd need to return u64, and get a whole lot of other places | ||
1164 | * fixed where we still use unsigned long. | ||
931 | * | 1165 | * |
932 | * this returns a bit number, NOT a sector! | 1166 | * this returns a bit number, NOT a sector! |
933 | */ | 1167 | */ |
934 | #define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1) | ||
935 | static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, | 1168 | static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, |
936 | const int find_zero_bit, const enum km_type km) | 1169 | const int find_zero_bit, const enum km_type km) |
937 | { | 1170 | { |
938 | struct drbd_bitmap *b = mdev->bitmap; | 1171 | struct drbd_bitmap *b = mdev->bitmap; |
939 | unsigned long i = -1UL; | ||
940 | unsigned long *p_addr; | 1172 | unsigned long *p_addr; |
941 | unsigned long bit_offset; /* bit offset of the mapped page. */ | 1173 | unsigned long bit_offset; |
1174 | unsigned i; | ||
1175 | |||
942 | 1176 | ||
943 | if (bm_fo > b->bm_bits) { | 1177 | if (bm_fo > b->bm_bits) { |
944 | dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); | 1178 | dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); |
1179 | bm_fo = DRBD_END_OF_BITMAP; | ||
945 | } else { | 1180 | } else { |
946 | while (bm_fo < b->bm_bits) { | 1181 | while (bm_fo < b->bm_bits) { |
947 | unsigned long offset; | 1182 | /* bit offset of the first bit in the page */ |
948 | bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */ | 1183 | bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; |
949 | offset = bit_offset >> LN2_BPL; /* word offset of the page */ | 1184 | p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); |
950 | p_addr = __bm_map_paddr(b, offset, km); | ||
951 | 1185 | ||
952 | if (find_zero_bit) | 1186 | if (find_zero_bit) |
953 | i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); | 1187 | i = generic_find_next_zero_le_bit(p_addr, |
1188 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); | ||
954 | else | 1189 | else |
955 | i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); | 1190 | i = generic_find_next_le_bit(p_addr, |
1191 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); | ||
956 | 1192 | ||
957 | __bm_unmap(p_addr, km); | 1193 | __bm_unmap(p_addr, km); |
958 | if (i < PAGE_SIZE*8) { | 1194 | if (i < PAGE_SIZE*8) { |
959 | i = bit_offset + i; | 1195 | bm_fo = bit_offset + i; |
960 | if (i >= b->bm_bits) | 1196 | if (bm_fo >= b->bm_bits) |
961 | break; | 1197 | break; |
962 | goto found; | 1198 | goto found; |
963 | } | 1199 | } |
964 | bm_fo = bit_offset + PAGE_SIZE*8; | 1200 | bm_fo = bit_offset + PAGE_SIZE*8; |
965 | } | 1201 | } |
966 | i = -1UL; | 1202 | bm_fo = DRBD_END_OF_BITMAP; |
967 | } | 1203 | } |
968 | found: | 1204 | found: |
969 | return i; | 1205 | return bm_fo; |
970 | } | 1206 | } |
971 | 1207 | ||
972 | static unsigned long bm_find_next(struct drbd_conf *mdev, | 1208 | static unsigned long bm_find_next(struct drbd_conf *mdev, |
973 | unsigned long bm_fo, const int find_zero_bit) | 1209 | unsigned long bm_fo, const int find_zero_bit) |
974 | { | 1210 | { |
975 | struct drbd_bitmap *b = mdev->bitmap; | 1211 | struct drbd_bitmap *b = mdev->bitmap; |
976 | unsigned long i = -1UL; | 1212 | unsigned long i = DRBD_END_OF_BITMAP; |
977 | 1213 | ||
978 | ERR_IF(!b) return i; | 1214 | ERR_IF(!b) return i; |
979 | ERR_IF(!b->bm_pages) return i; | 1215 | ERR_IF(!b->bm_pages) return i; |
980 | 1216 | ||
981 | spin_lock_irq(&b->bm_lock); | 1217 | spin_lock_irq(&b->bm_lock); |
982 | if (bm_is_locked(b)) | 1218 | if (BM_DONT_TEST & b->bm_flags) |
983 | bm_print_lock_info(mdev); | 1219 | bm_print_lock_info(mdev); |
984 | 1220 | ||
985 | i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); | 1221 | i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); |
@@ -1005,13 +1241,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo | |||
1005 | * you must take drbd_bm_lock() first */ | 1241 | * you must take drbd_bm_lock() first */ |
1006 | unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) | 1242 | unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) |
1007 | { | 1243 | { |
1008 | /* WARN_ON(!bm_is_locked(mdev)); */ | 1244 | /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ |
1009 | return __bm_find_next(mdev, bm_fo, 0, KM_USER1); | 1245 | return __bm_find_next(mdev, bm_fo, 0, KM_USER1); |
1010 | } | 1246 | } |
1011 | 1247 | ||
1012 | unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) | 1248 | unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) |
1013 | { | 1249 | { |
1014 | /* WARN_ON(!bm_is_locked(mdev)); */ | 1250 | /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ |
1015 | return __bm_find_next(mdev, bm_fo, 1, KM_USER1); | 1251 | return __bm_find_next(mdev, bm_fo, 1, KM_USER1); |
1016 | } | 1252 | } |
1017 | 1253 | ||
@@ -1027,8 +1263,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1027 | struct drbd_bitmap *b = mdev->bitmap; | 1263 | struct drbd_bitmap *b = mdev->bitmap; |
1028 | unsigned long *p_addr = NULL; | 1264 | unsigned long *p_addr = NULL; |
1029 | unsigned long bitnr; | 1265 | unsigned long bitnr; |
1030 | unsigned long last_page_nr = -1UL; | 1266 | unsigned int last_page_nr = -1U; |
1031 | int c = 0; | 1267 | int c = 0; |
1268 | int changed_total = 0; | ||
1032 | 1269 | ||
1033 | if (e >= b->bm_bits) { | 1270 | if (e >= b->bm_bits) { |
1034 | dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", | 1271 | dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", |
@@ -1036,23 +1273,33 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1036 | e = b->bm_bits ? b->bm_bits -1 : 0; | 1273 | e = b->bm_bits ? b->bm_bits -1 : 0; |
1037 | } | 1274 | } |
1038 | for (bitnr = s; bitnr <= e; bitnr++) { | 1275 | for (bitnr = s; bitnr <= e; bitnr++) { |
1039 | unsigned long offset = bitnr>>LN2_BPL; | 1276 | unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); |
1040 | unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); | ||
1041 | if (page_nr != last_page_nr) { | 1277 | if (page_nr != last_page_nr) { |
1042 | if (p_addr) | 1278 | if (p_addr) |
1043 | __bm_unmap(p_addr, km); | 1279 | __bm_unmap(p_addr, km); |
1044 | p_addr = __bm_map_paddr(b, offset, km); | 1280 | if (c < 0) |
1281 | bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); | ||
1282 | else if (c > 0) | ||
1283 | bm_set_page_need_writeout(b->bm_pages[last_page_nr]); | ||
1284 | changed_total += c; | ||
1285 | c = 0; | ||
1286 | p_addr = __bm_map_pidx(b, page_nr, km); | ||
1045 | last_page_nr = page_nr; | 1287 | last_page_nr = page_nr; |
1046 | } | 1288 | } |
1047 | if (val) | 1289 | if (val) |
1048 | c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr)); | 1290 | c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); |
1049 | else | 1291 | else |
1050 | c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr)); | 1292 | c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); |
1051 | } | 1293 | } |
1052 | if (p_addr) | 1294 | if (p_addr) |
1053 | __bm_unmap(p_addr, km); | 1295 | __bm_unmap(p_addr, km); |
1054 | b->bm_set += c; | 1296 | if (c < 0) |
1055 | return c; | 1297 | bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); |
1298 | else if (c > 0) | ||
1299 | bm_set_page_need_writeout(b->bm_pages[last_page_nr]); | ||
1300 | changed_total += c; | ||
1301 | b->bm_set += changed_total; | ||
1302 | return changed_total; | ||
1056 | } | 1303 | } |
1057 | 1304 | ||
1058 | /* returns number of bits actually changed. | 1305 | /* returns number of bits actually changed. |
@@ -1070,7 +1317,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1070 | ERR_IF(!b->bm_pages) return 0; | 1317 | ERR_IF(!b->bm_pages) return 0; |
1071 | 1318 | ||
1072 | spin_lock_irqsave(&b->bm_lock, flags); | 1319 | spin_lock_irqsave(&b->bm_lock, flags); |
1073 | if (bm_is_locked(b)) | 1320 | if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) |
1074 | bm_print_lock_info(mdev); | 1321 | bm_print_lock_info(mdev); |
1075 | 1322 | ||
1076 | c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); | 1323 | c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); |
@@ -1187,12 +1434,11 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) | |||
1187 | ERR_IF(!b->bm_pages) return 0; | 1434 | ERR_IF(!b->bm_pages) return 0; |
1188 | 1435 | ||
1189 | spin_lock_irqsave(&b->bm_lock, flags); | 1436 | spin_lock_irqsave(&b->bm_lock, flags); |
1190 | if (bm_is_locked(b)) | 1437 | if (BM_DONT_TEST & b->bm_flags) |
1191 | bm_print_lock_info(mdev); | 1438 | bm_print_lock_info(mdev); |
1192 | if (bitnr < b->bm_bits) { | 1439 | if (bitnr < b->bm_bits) { |
1193 | unsigned long offset = bitnr>>LN2_BPL; | 1440 | p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); |
1194 | p_addr = bm_map_paddr(b, offset); | 1441 | i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; |
1195 | i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; | ||
1196 | bm_unmap(p_addr); | 1442 | bm_unmap(p_addr); |
1197 | } else if (bitnr == b->bm_bits) { | 1443 | } else if (bitnr == b->bm_bits) { |
1198 | i = -1; | 1444 | i = -1; |
@@ -1210,10 +1456,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi | |||
1210 | { | 1456 | { |
1211 | unsigned long flags; | 1457 | unsigned long flags; |
1212 | struct drbd_bitmap *b = mdev->bitmap; | 1458 | struct drbd_bitmap *b = mdev->bitmap; |
1213 | unsigned long *p_addr = NULL, page_nr = -1; | 1459 | unsigned long *p_addr = NULL; |
1214 | unsigned long bitnr; | 1460 | unsigned long bitnr; |
1461 | unsigned int page_nr = -1U; | ||
1215 | int c = 0; | 1462 | int c = 0; |
1216 | size_t w; | ||
1217 | 1463 | ||
1218 | /* If this is called without a bitmap, that is a bug. But just to be | 1464 | /* If this is called without a bitmap, that is a bug. But just to be |
1219 | * robust in case we screwed up elsewhere, in that case pretend there | 1465 | * robust in case we screwed up elsewhere, in that case pretend there |
@@ -1223,20 +1469,20 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi | |||
1223 | ERR_IF(!b->bm_pages) return 1; | 1469 | ERR_IF(!b->bm_pages) return 1; |
1224 | 1470 | ||
1225 | spin_lock_irqsave(&b->bm_lock, flags); | 1471 | spin_lock_irqsave(&b->bm_lock, flags); |
1226 | if (bm_is_locked(b)) | 1472 | if (BM_DONT_TEST & b->bm_flags) |
1227 | bm_print_lock_info(mdev); | 1473 | bm_print_lock_info(mdev); |
1228 | for (bitnr = s; bitnr <= e; bitnr++) { | 1474 | for (bitnr = s; bitnr <= e; bitnr++) { |
1229 | w = bitnr >> LN2_BPL; | 1475 | unsigned int idx = bm_bit_to_page_idx(b, bitnr); |
1230 | if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) { | 1476 | if (page_nr != idx) { |
1231 | page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3); | 1477 | page_nr = idx; |
1232 | if (p_addr) | 1478 | if (p_addr) |
1233 | bm_unmap(p_addr); | 1479 | bm_unmap(p_addr); |
1234 | p_addr = bm_map_paddr(b, w); | 1480 | p_addr = bm_map_pidx(b, idx); |
1235 | } | 1481 | } |
1236 | ERR_IF (bitnr >= b->bm_bits) { | 1482 | ERR_IF (bitnr >= b->bm_bits) { |
1237 | dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); | 1483 | dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); |
1238 | } else { | 1484 | } else { |
1239 | c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); | 1485 | c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); |
1240 | } | 1486 | } |
1241 | } | 1487 | } |
1242 | if (p_addr) | 1488 | if (p_addr) |
@@ -1271,7 +1517,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) | |||
1271 | ERR_IF(!b->bm_pages) return 0; | 1517 | ERR_IF(!b->bm_pages) return 0; |
1272 | 1518 | ||
1273 | spin_lock_irqsave(&b->bm_lock, flags); | 1519 | spin_lock_irqsave(&b->bm_lock, flags); |
1274 | if (bm_is_locked(b)) | 1520 | if (BM_DONT_TEST & b->bm_flags) |
1275 | bm_print_lock_info(mdev); | 1521 | bm_print_lock_info(mdev); |
1276 | 1522 | ||
1277 | s = S2W(enr); | 1523 | s = S2W(enr); |
@@ -1279,7 +1525,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) | |||
1279 | count = 0; | 1525 | count = 0; |
1280 | if (s < b->bm_words) { | 1526 | if (s < b->bm_words) { |
1281 | int n = e-s; | 1527 | int n = e-s; |
1282 | p_addr = bm_map_paddr(b, s); | 1528 | p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); |
1283 | bm = p_addr + MLPP(s); | 1529 | bm = p_addr + MLPP(s); |
1284 | while (n--) | 1530 | while (n--) |
1285 | count += hweight_long(*bm++); | 1531 | count += hweight_long(*bm++); |
@@ -1291,18 +1537,20 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) | |||
1291 | return count; | 1537 | return count; |
1292 | } | 1538 | } |
1293 | 1539 | ||
1294 | /* set all bits covered by the AL-extent al_enr */ | 1540 | /* Set all bits covered by the AL-extent al_enr. |
1541 | * Returns number of bits changed. */ | ||
1295 | unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) | 1542 | unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) |
1296 | { | 1543 | { |
1297 | struct drbd_bitmap *b = mdev->bitmap; | 1544 | struct drbd_bitmap *b = mdev->bitmap; |
1298 | unsigned long *p_addr, *bm; | 1545 | unsigned long *p_addr, *bm; |
1299 | unsigned long weight; | 1546 | unsigned long weight; |
1300 | int count, s, e, i, do_now; | 1547 | unsigned long s, e; |
1548 | int count, i, do_now; | ||
1301 | ERR_IF(!b) return 0; | 1549 | ERR_IF(!b) return 0; |
1302 | ERR_IF(!b->bm_pages) return 0; | 1550 | ERR_IF(!b->bm_pages) return 0; |
1303 | 1551 | ||
1304 | spin_lock_irq(&b->bm_lock); | 1552 | spin_lock_irq(&b->bm_lock); |
1305 | if (bm_is_locked(b)) | 1553 | if (BM_DONT_SET & b->bm_flags) |
1306 | bm_print_lock_info(mdev); | 1554 | bm_print_lock_info(mdev); |
1307 | weight = b->bm_set; | 1555 | weight = b->bm_set; |
1308 | 1556 | ||
@@ -1314,7 +1562,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) | |||
1314 | count = 0; | 1562 | count = 0; |
1315 | if (s < b->bm_words) { | 1563 | if (s < b->bm_words) { |
1316 | i = do_now = e-s; | 1564 | i = do_now = e-s; |
1317 | p_addr = bm_map_paddr(b, s); | 1565 | p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); |
1318 | bm = p_addr + MLPP(s); | 1566 | bm = p_addr + MLPP(s); |
1319 | while (i--) { | 1567 | while (i--) { |
1320 | count += hweight_long(*bm); | 1568 | count += hweight_long(*bm); |
@@ -1326,7 +1574,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) | |||
1326 | if (e == b->bm_words) | 1574 | if (e == b->bm_words) |
1327 | b->bm_set -= bm_clear_surplus(b); | 1575 | b->bm_set -= bm_clear_surplus(b); |
1328 | } else { | 1576 | } else { |
1329 | dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s); | 1577 | dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s); |
1330 | } | 1578 | } |
1331 | weight = b->bm_set - weight; | 1579 | weight = b->bm_set - weight; |
1332 | spin_unlock_irq(&b->bm_lock); | 1580 | spin_unlock_irq(&b->bm_lock); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index b0bd27dfc1e..81030d8d654 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -72,13 +72,6 @@ extern int fault_devs; | |||
72 | extern char usermode_helper[]; | 72 | extern char usermode_helper[]; |
73 | 73 | ||
74 | 74 | ||
75 | #ifndef TRUE | ||
76 | #define TRUE 1 | ||
77 | #endif | ||
78 | #ifndef FALSE | ||
79 | #define FALSE 0 | ||
80 | #endif | ||
81 | |||
82 | /* I don't remember why XCPU ... | 75 | /* I don't remember why XCPU ... |
83 | * This is used to wake the asender, | 76 | * This is used to wake the asender, |
84 | * and to interrupt sending the sending task | 77 | * and to interrupt sending the sending task |
@@ -104,6 +97,7 @@ extern char usermode_helper[]; | |||
104 | #define ID_SYNCER (-1ULL) | 97 | #define ID_SYNCER (-1ULL) |
105 | #define ID_VACANT 0 | 98 | #define ID_VACANT 0 |
106 | #define is_syncer_block_id(id) ((id) == ID_SYNCER) | 99 | #define is_syncer_block_id(id) ((id) == ID_SYNCER) |
100 | #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) | ||
107 | 101 | ||
108 | struct drbd_conf; | 102 | struct drbd_conf; |
109 | 103 | ||
@@ -137,20 +131,19 @@ enum { | |||
137 | DRBD_FAULT_MAX, | 131 | DRBD_FAULT_MAX, |
138 | }; | 132 | }; |
139 | 133 | ||
140 | #ifdef CONFIG_DRBD_FAULT_INJECTION | ||
141 | extern unsigned int | 134 | extern unsigned int |
142 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); | 135 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); |
136 | |||
143 | static inline int | 137 | static inline int |
144 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { | 138 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { |
139 | #ifdef CONFIG_DRBD_FAULT_INJECTION | ||
145 | return fault_rate && | 140 | return fault_rate && |
146 | (enable_faults & (1<<type)) && | 141 | (enable_faults & (1<<type)) && |
147 | _drbd_insert_fault(mdev, type); | 142 | _drbd_insert_fault(mdev, type); |
148 | } | ||
149 | #define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t))) | ||
150 | |||
151 | #else | 143 | #else |
152 | #define FAULT_ACTIVE(_m, _t) (0) | 144 | return 0; |
153 | #endif | 145 | #endif |
146 | } | ||
154 | 147 | ||
155 | /* integer division, round _UP_ to the next integer */ | 148 | /* integer division, round _UP_ to the next integer */ |
156 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) | 149 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) |
@@ -212,8 +205,10 @@ enum drbd_packets { | |||
212 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ | 205 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ |
213 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ | 206 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ |
214 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ | 207 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ |
208 | P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ | ||
209 | P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */ | ||
215 | 210 | ||
216 | P_MAX_CMD = 0x28, | 211 | P_MAX_CMD = 0x2A, |
217 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ | 212 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ |
218 | P_MAX_OPT_CMD = 0x101, | 213 | P_MAX_OPT_CMD = 0x101, |
219 | 214 | ||
@@ -269,6 +264,7 @@ static inline const char *cmdname(enum drbd_packets cmd) | |||
269 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | 264 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", |
270 | [P_COMPRESSED_BITMAP] = "CBitmap", | 265 | [P_COMPRESSED_BITMAP] = "CBitmap", |
271 | [P_DELAY_PROBE] = "DelayProbe", | 266 | [P_DELAY_PROBE] = "DelayProbe", |
267 | [P_OUT_OF_SYNC] = "OutOfSync", | ||
272 | [P_MAX_CMD] = NULL, | 268 | [P_MAX_CMD] = NULL, |
273 | }; | 269 | }; |
274 | 270 | ||
@@ -512,7 +508,7 @@ struct p_sizes { | |||
512 | u64 d_size; /* size of disk */ | 508 | u64 d_size; /* size of disk */ |
513 | u64 u_size; /* user requested size */ | 509 | u64 u_size; /* user requested size */ |
514 | u64 c_size; /* current exported size */ | 510 | u64 c_size; /* current exported size */ |
515 | u32 max_segment_size; /* Maximal size of a BIO */ | 511 | u32 max_bio_size; /* Maximal size of a BIO */ |
516 | u16 queue_order_type; /* not yet implemented in DRBD*/ | 512 | u16 queue_order_type; /* not yet implemented in DRBD*/ |
517 | u16 dds_flags; /* use enum dds_flags here. */ | 513 | u16 dds_flags; /* use enum dds_flags here. */ |
518 | } __packed; | 514 | } __packed; |
@@ -550,6 +546,13 @@ struct p_discard { | |||
550 | u32 pad; | 546 | u32 pad; |
551 | } __packed; | 547 | } __packed; |
552 | 548 | ||
549 | struct p_block_desc { | ||
550 | struct p_header80 head; | ||
551 | u64 sector; | ||
552 | u32 blksize; | ||
553 | u32 pad; /* to multiple of 8 Byte */ | ||
554 | } __packed; | ||
555 | |||
553 | /* Valid values for the encoding field. | 556 | /* Valid values for the encoding field. |
554 | * Bump proto version when changing this. */ | 557 | * Bump proto version when changing this. */ |
555 | enum drbd_bitmap_code { | 558 | enum drbd_bitmap_code { |
@@ -647,6 +650,7 @@ union p_polymorph { | |||
647 | struct p_block_req block_req; | 650 | struct p_block_req block_req; |
648 | struct p_delay_probe93 delay_probe93; | 651 | struct p_delay_probe93 delay_probe93; |
649 | struct p_rs_uuid rs_uuid; | 652 | struct p_rs_uuid rs_uuid; |
653 | struct p_block_desc block_desc; | ||
650 | } __packed; | 654 | } __packed; |
651 | 655 | ||
652 | /**********************************************************************/ | 656 | /**********************************************************************/ |
@@ -677,13 +681,6 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) | |||
677 | return thi->t_state; | 681 | return thi->t_state; |
678 | } | 682 | } |
679 | 683 | ||
680 | |||
681 | /* | ||
682 | * Having this as the first member of a struct provides sort of "inheritance". | ||
683 | * "derived" structs can be "drbd_queue_work()"ed. | ||
684 | * The callback should know and cast back to the descendant struct. | ||
685 | * drbd_request and drbd_epoch_entry are descendants of drbd_work. | ||
686 | */ | ||
687 | struct drbd_work; | 684 | struct drbd_work; |
688 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); | 685 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); |
689 | struct drbd_work { | 686 | struct drbd_work { |
@@ -712,9 +709,6 @@ struct drbd_request { | |||
712 | * starting a new epoch... | 709 | * starting a new epoch... |
713 | */ | 710 | */ |
714 | 711 | ||
715 | /* up to here, the struct layout is identical to drbd_epoch_entry; | ||
716 | * we might be able to use that to our advantage... */ | ||
717 | |||
718 | struct list_head tl_requests; /* ring list in the transfer log */ | 712 | struct list_head tl_requests; /* ring list in the transfer log */ |
719 | struct bio *master_bio; /* master bio pointer */ | 713 | struct bio *master_bio; /* master bio pointer */ |
720 | unsigned long rq_state; /* see comments above _req_mod() */ | 714 | unsigned long rq_state; /* see comments above _req_mod() */ |
@@ -831,7 +825,7 @@ enum { | |||
831 | CRASHED_PRIMARY, /* This node was a crashed primary. | 825 | CRASHED_PRIMARY, /* This node was a crashed primary. |
832 | * Gets cleared when the state.conn | 826 | * Gets cleared when the state.conn |
833 | * goes into C_CONNECTED state. */ | 827 | * goes into C_CONNECTED state. */ |
834 | WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ | 828 | NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ |
835 | CONSIDER_RESYNC, | 829 | CONSIDER_RESYNC, |
836 | 830 | ||
837 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ | 831 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ |
@@ -856,10 +850,37 @@ enum { | |||
856 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ | 850 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ |
857 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ | 851 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ |
858 | AL_SUSPENDED, /* Activity logging is currently suspended. */ | 852 | AL_SUSPENDED, /* Activity logging is currently suspended. */ |
853 | AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ | ||
859 | }; | 854 | }; |
860 | 855 | ||
861 | struct drbd_bitmap; /* opaque for drbd_conf */ | 856 | struct drbd_bitmap; /* opaque for drbd_conf */ |
862 | 857 | ||
858 | /* definition of bits in bm_flags to be used in drbd_bm_lock | ||
859 | * and drbd_bitmap_io and friends. */ | ||
860 | enum bm_flag { | ||
861 | /* do we need to kfree, or vfree bm_pages? */ | ||
862 | BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ | ||
863 | |||
864 | /* currently locked for bulk operation */ | ||
865 | BM_LOCKED_MASK = 0x7, | ||
866 | |||
867 | /* in detail, that is: */ | ||
868 | BM_DONT_CLEAR = 0x1, | ||
869 | BM_DONT_SET = 0x2, | ||
870 | BM_DONT_TEST = 0x4, | ||
871 | |||
872 | /* (test bit, count bit) allowed (common case) */ | ||
873 | BM_LOCKED_TEST_ALLOWED = 0x3, | ||
874 | |||
875 | /* testing bits, as well as setting new bits allowed, but clearing bits | ||
876 | * would be unexpected. Used during bitmap receive. Setting new bits | ||
877 | * requires sending of "out-of-sync" information, though. */ | ||
878 | BM_LOCKED_SET_ALLOWED = 0x1, | ||
879 | |||
880 | /* clear is not expected while bitmap is locked for bulk operation */ | ||
881 | }; | ||
882 | |||
883 | |||
863 | /* TODO sort members for performance | 884 | /* TODO sort members for performance |
864 | * MAYBE group them further */ | 885 | * MAYBE group them further */ |
865 | 886 | ||
@@ -925,6 +946,7 @@ struct drbd_md_io { | |||
925 | struct bm_io_work { | 946 | struct bm_io_work { |
926 | struct drbd_work w; | 947 | struct drbd_work w; |
927 | char *why; | 948 | char *why; |
949 | enum bm_flag flags; | ||
928 | int (*io_fn)(struct drbd_conf *mdev); | 950 | int (*io_fn)(struct drbd_conf *mdev); |
929 | void (*done)(struct drbd_conf *mdev, int rv); | 951 | void (*done)(struct drbd_conf *mdev, int rv); |
930 | }; | 952 | }; |
@@ -963,9 +985,12 @@ struct drbd_conf { | |||
963 | struct drbd_work resync_work, | 985 | struct drbd_work resync_work, |
964 | unplug_work, | 986 | unplug_work, |
965 | go_diskless, | 987 | go_diskless, |
966 | md_sync_work; | 988 | md_sync_work, |
989 | start_resync_work; | ||
967 | struct timer_list resync_timer; | 990 | struct timer_list resync_timer; |
968 | struct timer_list md_sync_timer; | 991 | struct timer_list md_sync_timer; |
992 | struct timer_list start_resync_timer; | ||
993 | struct timer_list request_timer; | ||
969 | #ifdef DRBD_DEBUG_MD_SYNC | 994 | #ifdef DRBD_DEBUG_MD_SYNC |
970 | struct { | 995 | struct { |
971 | unsigned int line; | 996 | unsigned int line; |
@@ -1000,9 +1025,9 @@ struct drbd_conf { | |||
1000 | struct hlist_head *tl_hash; | 1025 | struct hlist_head *tl_hash; |
1001 | unsigned int tl_hash_s; | 1026 | unsigned int tl_hash_s; |
1002 | 1027 | ||
1003 | /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ | 1028 | /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ |
1004 | unsigned long rs_total; | 1029 | unsigned long rs_total; |
1005 | /* number of sync IOs that failed in this run */ | 1030 | /* number of resync blocks that failed in this run */ |
1006 | unsigned long rs_failed; | 1031 | unsigned long rs_failed; |
1007 | /* Syncer's start time [unit jiffies] */ | 1032 | /* Syncer's start time [unit jiffies] */ |
1008 | unsigned long rs_start; | 1033 | unsigned long rs_start; |
@@ -1102,6 +1127,7 @@ struct drbd_conf { | |||
1102 | struct fifo_buffer rs_plan_s; /* correction values of resync planer */ | 1127 | struct fifo_buffer rs_plan_s; /* correction values of resync planer */ |
1103 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ | 1128 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ |
1104 | int rs_planed; /* resync sectors already planed */ | 1129 | int rs_planed; /* resync sectors already planed */ |
1130 | atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ | ||
1105 | }; | 1131 | }; |
1106 | 1132 | ||
1107 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) | 1133 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) |
@@ -1163,14 +1189,19 @@ enum dds_flags { | |||
1163 | }; | 1189 | }; |
1164 | 1190 | ||
1165 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); | 1191 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); |
1166 | extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | 1192 | extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev, |
1167 | union drbd_state mask, union drbd_state val); | 1193 | enum chg_state_flags f, |
1194 | union drbd_state mask, | ||
1195 | union drbd_state val); | ||
1168 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, | 1196 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, |
1169 | union drbd_state); | 1197 | union drbd_state); |
1170 | extern int _drbd_request_state(struct drbd_conf *, union drbd_state, | 1198 | extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *, |
1171 | union drbd_state, enum chg_state_flags); | 1199 | union drbd_state, |
1172 | extern int __drbd_set_state(struct drbd_conf *, union drbd_state, | 1200 | union drbd_state, |
1173 | enum chg_state_flags, struct completion *done); | 1201 | enum chg_state_flags); |
1202 | extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state, | ||
1203 | enum chg_state_flags, | ||
1204 | struct completion *done); | ||
1174 | extern void print_st_err(struct drbd_conf *, union drbd_state, | 1205 | extern void print_st_err(struct drbd_conf *, union drbd_state, |
1175 | union drbd_state, int); | 1206 | union drbd_state, int); |
1176 | extern int drbd_thread_start(struct drbd_thread *thi); | 1207 | extern int drbd_thread_start(struct drbd_thread *thi); |
@@ -1195,7 +1226,7 @@ extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, | |||
1195 | extern int drbd_send_protocol(struct drbd_conf *mdev); | 1226 | extern int drbd_send_protocol(struct drbd_conf *mdev); |
1196 | extern int drbd_send_uuids(struct drbd_conf *mdev); | 1227 | extern int drbd_send_uuids(struct drbd_conf *mdev); |
1197 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | 1228 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); |
1198 | extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); | 1229 | extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); |
1199 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); | 1230 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); |
1200 | extern int _drbd_send_state(struct drbd_conf *mdev); | 1231 | extern int _drbd_send_state(struct drbd_conf *mdev); |
1201 | extern int drbd_send_state(struct drbd_conf *mdev); | 1232 | extern int drbd_send_state(struct drbd_conf *mdev); |
@@ -1220,11 +1251,10 @@ extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
1220 | struct p_data *dp, int data_size); | 1251 | struct p_data *dp, int data_size); |
1221 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, | 1252 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, |
1222 | sector_t sector, int blksize, u64 block_id); | 1253 | sector_t sector, int blksize, u64 block_id); |
1254 | extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req); | ||
1223 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | 1255 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, |
1224 | struct drbd_epoch_entry *e); | 1256 | struct drbd_epoch_entry *e); |
1225 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); | 1257 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); |
1226 | extern int _drbd_send_barrier(struct drbd_conf *mdev, | ||
1227 | struct drbd_tl_epoch *barrier); | ||
1228 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | 1258 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, |
1229 | sector_t sector, int size, u64 block_id); | 1259 | sector_t sector, int size, u64 block_id); |
1230 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, | 1260 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, |
@@ -1235,14 +1265,13 @@ extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size) | |||
1235 | 1265 | ||
1236 | extern int drbd_send_bitmap(struct drbd_conf *mdev); | 1266 | extern int drbd_send_bitmap(struct drbd_conf *mdev); |
1237 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); | 1267 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); |
1238 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); | 1268 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); |
1239 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); | 1269 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); |
1240 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); | 1270 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); |
1271 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text); | ||
1241 | 1272 | ||
1242 | /* drbd_meta-data.c (still in drbd_main.c) */ | ||
1243 | extern void drbd_md_sync(struct drbd_conf *mdev); | 1273 | extern void drbd_md_sync(struct drbd_conf *mdev); |
1244 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); | 1274 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); |
1245 | /* maybe define them below as inline? */ | ||
1246 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | 1275 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); |
1247 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | 1276 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); |
1248 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | 1277 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); |
@@ -1261,10 +1290,12 @@ extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, | |||
1261 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, | 1290 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, |
1262 | int (*io_fn)(struct drbd_conf *), | 1291 | int (*io_fn)(struct drbd_conf *), |
1263 | void (*done)(struct drbd_conf *, int), | 1292 | void (*done)(struct drbd_conf *, int), |
1264 | char *why); | 1293 | char *why, enum bm_flag flags); |
1294 | extern int drbd_bitmap_io(struct drbd_conf *mdev, | ||
1295 | int (*io_fn)(struct drbd_conf *), | ||
1296 | char *why, enum bm_flag flags); | ||
1265 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); | 1297 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); |
1266 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); | 1298 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); |
1267 | extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); | ||
1268 | extern void drbd_go_diskless(struct drbd_conf *mdev); | 1299 | extern void drbd_go_diskless(struct drbd_conf *mdev); |
1269 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); | 1300 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); |
1270 | 1301 | ||
@@ -1313,6 +1344,7 @@ struct bm_extent { | |||
1313 | 1344 | ||
1314 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ | 1345 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ |
1315 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ | 1346 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ |
1347 | #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ | ||
1316 | 1348 | ||
1317 | /* drbd_bitmap.c */ | 1349 | /* drbd_bitmap.c */ |
1318 | /* | 1350 | /* |
@@ -1390,7 +1422,9 @@ struct bm_extent { | |||
1390 | * you should use 64bit OS for that much storage, anyways. */ | 1422 | * you should use 64bit OS for that much storage, anyways. */ |
1391 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) | 1423 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) |
1392 | #else | 1424 | #else |
1393 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) | 1425 | /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ |
1426 | #define DRBD_MAX_SECTORS_FLEX (1UL << 51) | ||
1427 | /* corresponds to (1UL << 38) bits right now. */ | ||
1394 | #endif | 1428 | #endif |
1395 | #endif | 1429 | #endif |
1396 | 1430 | ||
@@ -1398,7 +1432,7 @@ struct bm_extent { | |||
1398 | * With a value of 8 all IO in one 128K block make it to the same slot of the | 1432 | * With a value of 8 all IO in one 128K block make it to the same slot of the |
1399 | * hash table. */ | 1433 | * hash table. */ |
1400 | #define HT_SHIFT 8 | 1434 | #define HT_SHIFT 8 |
1401 | #define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) | 1435 | #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) |
1402 | 1436 | ||
1403 | #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ | 1437 | #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ |
1404 | 1438 | ||
@@ -1410,16 +1444,20 @@ extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new | |||
1410 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); | 1444 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); |
1411 | extern void drbd_bm_set_all(struct drbd_conf *mdev); | 1445 | extern void drbd_bm_set_all(struct drbd_conf *mdev); |
1412 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); | 1446 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); |
1447 | /* set/clear/test only a few bits at a time */ | ||
1413 | extern int drbd_bm_set_bits( | 1448 | extern int drbd_bm_set_bits( |
1414 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | 1449 | struct drbd_conf *mdev, unsigned long s, unsigned long e); |
1415 | extern int drbd_bm_clear_bits( | 1450 | extern int drbd_bm_clear_bits( |
1416 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | 1451 | struct drbd_conf *mdev, unsigned long s, unsigned long e); |
1417 | /* bm_set_bits variant for use while holding drbd_bm_lock */ | 1452 | extern int drbd_bm_count_bits( |
1453 | struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | ||
1454 | /* bm_set_bits variant for use while holding drbd_bm_lock, | ||
1455 | * may process the whole bitmap in one go */ | ||
1418 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, | 1456 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, |
1419 | const unsigned long s, const unsigned long e); | 1457 | const unsigned long s, const unsigned long e); |
1420 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); | 1458 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); |
1421 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); | 1459 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); |
1422 | extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); | 1460 | extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); |
1423 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); | 1461 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); |
1424 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); | 1462 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); |
1425 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | 1463 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, |
@@ -1427,6 +1465,8 @@ extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | |||
1427 | extern size_t drbd_bm_words(struct drbd_conf *mdev); | 1465 | extern size_t drbd_bm_words(struct drbd_conf *mdev); |
1428 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); | 1466 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); |
1429 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); | 1467 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); |
1468 | |||
1469 | #define DRBD_END_OF_BITMAP (~(unsigned long)0) | ||
1430 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | 1470 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); |
1431 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ | 1471 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ |
1432 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | 1472 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); |
@@ -1437,14 +1477,12 @@ extern int drbd_bm_rs_done(struct drbd_conf *mdev); | |||
1437 | /* for receive_bitmap */ | 1477 | /* for receive_bitmap */ |
1438 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, | 1478 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, |
1439 | size_t number, unsigned long *buffer); | 1479 | size_t number, unsigned long *buffer); |
1440 | /* for _drbd_send_bitmap and drbd_bm_write_sect */ | 1480 | /* for _drbd_send_bitmap */ |
1441 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, | 1481 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, |
1442 | size_t number, unsigned long *buffer); | 1482 | size_t number, unsigned long *buffer); |
1443 | 1483 | ||
1444 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); | 1484 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); |
1445 | extern void drbd_bm_unlock(struct drbd_conf *mdev); | 1485 | extern void drbd_bm_unlock(struct drbd_conf *mdev); |
1446 | |||
1447 | extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | ||
1448 | /* drbd_main.c */ | 1486 | /* drbd_main.c */ |
1449 | 1487 | ||
1450 | extern struct kmem_cache *drbd_request_cache; | 1488 | extern struct kmem_cache *drbd_request_cache; |
@@ -1467,7 +1505,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev); | |||
1467 | extern int proc_details; | 1505 | extern int proc_details; |
1468 | 1506 | ||
1469 | /* drbd_req */ | 1507 | /* drbd_req */ |
1470 | extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); | 1508 | extern int drbd_make_request(struct request_queue *q, struct bio *bio); |
1471 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); | 1509 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); |
1472 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | 1510 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); |
1473 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | 1511 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); |
@@ -1482,8 +1520,9 @@ enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = | |||
1482 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); | 1520 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); |
1483 | extern void resync_after_online_grow(struct drbd_conf *); | 1521 | extern void resync_after_online_grow(struct drbd_conf *); |
1484 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); | 1522 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); |
1485 | extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, | 1523 | extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, |
1486 | int force); | 1524 | enum drbd_role new_role, |
1525 | int force); | ||
1487 | extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); | 1526 | extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); |
1488 | extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); | 1527 | extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); |
1489 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); | 1528 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); |
@@ -1499,6 +1538,7 @@ extern int drbd_resync_finished(struct drbd_conf *mdev); | |||
1499 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, | 1538 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, |
1500 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | 1539 | struct drbd_backing_dev *bdev, sector_t sector, int rw); |
1501 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); | 1540 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); |
1541 | extern void drbd_rs_controller_reset(struct drbd_conf *mdev); | ||
1502 | 1542 | ||
1503 | static inline void ov_oos_print(struct drbd_conf *mdev) | 1543 | static inline void ov_oos_print(struct drbd_conf *mdev) |
1504 | { | 1544 | { |
@@ -1522,21 +1562,23 @@ extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); | |||
1522 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); | 1562 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); |
1523 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); | 1563 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); |
1524 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); | 1564 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); |
1525 | extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); | 1565 | extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int); |
1526 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); | 1566 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); |
1527 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); | 1567 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); |
1528 | extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); | ||
1529 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); | 1568 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); |
1530 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); | 1569 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); |
1531 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); | 1570 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); |
1532 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); | 1571 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); |
1533 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); | 1572 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); |
1534 | extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); | 1573 | extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); |
1574 | extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); | ||
1575 | extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); | ||
1535 | 1576 | ||
1536 | extern void resync_timer_fn(unsigned long data); | 1577 | extern void resync_timer_fn(unsigned long data); |
1578 | extern void start_resync_timer_fn(unsigned long data); | ||
1537 | 1579 | ||
1538 | /* drbd_receiver.c */ | 1580 | /* drbd_receiver.c */ |
1539 | extern int drbd_rs_should_slow_down(struct drbd_conf *mdev); | 1581 | extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); |
1540 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | 1582 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
1541 | const unsigned rw, const int fault_type); | 1583 | const unsigned rw, const int fault_type); |
1542 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); | 1584 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); |
@@ -1619,16 +1661,16 @@ extern int drbd_rs_del_all(struct drbd_conf *mdev); | |||
1619 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, | 1661 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, |
1620 | sector_t sector, int size); | 1662 | sector_t sector, int size); |
1621 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); | 1663 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); |
1664 | extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); | ||
1622 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, | 1665 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, |
1623 | int size, const char *file, const unsigned int line); | 1666 | int size, const char *file, const unsigned int line); |
1624 | #define drbd_set_in_sync(mdev, sector, size) \ | 1667 | #define drbd_set_in_sync(mdev, sector, size) \ |
1625 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) | 1668 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) |
1626 | extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, | 1669 | extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, |
1627 | int size, const char *file, const unsigned int line); | 1670 | int size, const char *file, const unsigned int line); |
1628 | #define drbd_set_out_of_sync(mdev, sector, size) \ | 1671 | #define drbd_set_out_of_sync(mdev, sector, size) \ |
1629 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) | 1672 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) |
1630 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); | 1673 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); |
1631 | extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); | ||
1632 | extern void drbd_al_shrink(struct drbd_conf *mdev); | 1674 | extern void drbd_al_shrink(struct drbd_conf *mdev); |
1633 | 1675 | ||
1634 | 1676 | ||
@@ -1747,11 +1789,11 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev) | |||
1747 | wake_up(&mdev->misc_wait); | 1789 | wake_up(&mdev->misc_wait); |
1748 | } | 1790 | } |
1749 | 1791 | ||
1750 | static inline int _drbd_set_state(struct drbd_conf *mdev, | 1792 | static inline enum drbd_state_rv |
1751 | union drbd_state ns, enum chg_state_flags flags, | 1793 | _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, |
1752 | struct completion *done) | 1794 | enum chg_state_flags flags, struct completion *done) |
1753 | { | 1795 | { |
1754 | int rv; | 1796 | enum drbd_state_rv rv; |
1755 | 1797 | ||
1756 | read_lock(&global_state_lock); | 1798 | read_lock(&global_state_lock); |
1757 | rv = __drbd_set_state(mdev, ns, flags, done); | 1799 | rv = __drbd_set_state(mdev, ns, flags, done); |
@@ -1982,17 +2024,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev) | |||
1982 | 2024 | ||
1983 | static inline void drbd_thread_stop(struct drbd_thread *thi) | 2025 | static inline void drbd_thread_stop(struct drbd_thread *thi) |
1984 | { | 2026 | { |
1985 | _drbd_thread_stop(thi, FALSE, TRUE); | 2027 | _drbd_thread_stop(thi, false, true); |
1986 | } | 2028 | } |
1987 | 2029 | ||
1988 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) | 2030 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) |
1989 | { | 2031 | { |
1990 | _drbd_thread_stop(thi, FALSE, FALSE); | 2032 | _drbd_thread_stop(thi, false, false); |
1991 | } | 2033 | } |
1992 | 2034 | ||
1993 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) | 2035 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) |
1994 | { | 2036 | { |
1995 | _drbd_thread_stop(thi, TRUE, FALSE); | 2037 | _drbd_thread_stop(thi, true, false); |
1996 | } | 2038 | } |
1997 | 2039 | ||
1998 | /* counts how many answer packets packets we expect from our peer, | 2040 | /* counts how many answer packets packets we expect from our peer, |
@@ -2146,17 +2188,18 @@ extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |||
2146 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | 2188 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, |
2147 | unsigned long *bits_left, unsigned int *per_mil_done) | 2189 | unsigned long *bits_left, unsigned int *per_mil_done) |
2148 | { | 2190 | { |
2149 | /* | 2191 | /* this is to break it at compile time when we change that, in case we |
2150 | * this is to break it at compile time when we change that | 2192 | * want to support more than (1<<32) bits on a 32bit arch. */ |
2151 | * (we may feel 4TB maximum storage per drbd is not enough) | ||
2152 | */ | ||
2153 | typecheck(unsigned long, mdev->rs_total); | 2193 | typecheck(unsigned long, mdev->rs_total); |
2154 | 2194 | ||
2155 | /* note: both rs_total and rs_left are in bits, i.e. in | 2195 | /* note: both rs_total and rs_left are in bits, i.e. in |
2156 | * units of BM_BLOCK_SIZE. | 2196 | * units of BM_BLOCK_SIZE. |
2157 | * for the percentage, we don't care. */ | 2197 | * for the percentage, we don't care. */ |
2158 | 2198 | ||
2159 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | 2199 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) |
2200 | *bits_left = mdev->ov_left; | ||
2201 | else | ||
2202 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | ||
2160 | /* >> 10 to prevent overflow, | 2203 | /* >> 10 to prevent overflow, |
2161 | * +1 to prevent division by zero */ | 2204 | * +1 to prevent division by zero */ |
2162 | if (*bits_left > mdev->rs_total) { | 2205 | if (*bits_left > mdev->rs_total) { |
@@ -2171,10 +2214,19 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | |||
2171 | *bits_left, mdev->rs_total, mdev->rs_failed); | 2214 | *bits_left, mdev->rs_total, mdev->rs_failed); |
2172 | *per_mil_done = 0; | 2215 | *per_mil_done = 0; |
2173 | } else { | 2216 | } else { |
2174 | /* make sure the calculation happens in long context */ | 2217 | /* Make sure the division happens in long context. |
2175 | unsigned long tmp = 1000UL - | 2218 | * We allow up to one petabyte storage right now, |
2176 | (*bits_left >> 10)*1000UL | 2219 | * at a granularity of 4k per bit that is 2**38 bits. |
2177 | / ((mdev->rs_total >> 10) + 1UL); | 2220 | * After shift right and multiplication by 1000, |
2221 | * this should still fit easily into a 32bit long, | ||
2222 | * so we don't need a 64bit division on 32bit arch. | ||
2223 | * Note: currently we don't support such large bitmaps on 32bit | ||
2224 | * arch anyways, but no harm done to be prepared for it here. | ||
2225 | */ | ||
2226 | unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10; | ||
2227 | unsigned long left = *bits_left >> shift; | ||
2228 | unsigned long total = 1UL + (mdev->rs_total >> shift); | ||
2229 | unsigned long tmp = 1000UL - left * 1000UL/total; | ||
2178 | *per_mil_done = tmp; | 2230 | *per_mil_done = tmp; |
2179 | } | 2231 | } |
2180 | } | 2232 | } |
@@ -2193,8 +2245,9 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev) | |||
2193 | return mxb; | 2245 | return mxb; |
2194 | } | 2246 | } |
2195 | 2247 | ||
2196 | static inline int drbd_state_is_stable(union drbd_state s) | 2248 | static inline int drbd_state_is_stable(struct drbd_conf *mdev) |
2197 | { | 2249 | { |
2250 | union drbd_state s = mdev->state; | ||
2198 | 2251 | ||
2199 | /* DO NOT add a default clause, we want the compiler to warn us | 2252 | /* DO NOT add a default clause, we want the compiler to warn us |
2200 | * for any newly introduced state we may have forgotten to add here */ | 2253 | * for any newly introduced state we may have forgotten to add here */ |
@@ -2211,11 +2264,9 @@ static inline int drbd_state_is_stable(union drbd_state s) | |||
2211 | case C_VERIFY_T: | 2264 | case C_VERIFY_T: |
2212 | case C_PAUSED_SYNC_S: | 2265 | case C_PAUSED_SYNC_S: |
2213 | case C_PAUSED_SYNC_T: | 2266 | case C_PAUSED_SYNC_T: |
2214 | /* maybe stable, look at the disk state */ | 2267 | case C_AHEAD: |
2215 | break; | 2268 | case C_BEHIND: |
2216 | 2269 | /* transitional states, IO allowed */ | |
2217 | /* no new io accepted during tansitional states | ||
2218 | * like handshake or teardown */ | ||
2219 | case C_DISCONNECTING: | 2270 | case C_DISCONNECTING: |
2220 | case C_UNCONNECTED: | 2271 | case C_UNCONNECTED: |
2221 | case C_TIMEOUT: | 2272 | case C_TIMEOUT: |
@@ -2226,7 +2277,15 @@ static inline int drbd_state_is_stable(union drbd_state s) | |||
2226 | case C_WF_REPORT_PARAMS: | 2277 | case C_WF_REPORT_PARAMS: |
2227 | case C_STARTING_SYNC_S: | 2278 | case C_STARTING_SYNC_S: |
2228 | case C_STARTING_SYNC_T: | 2279 | case C_STARTING_SYNC_T: |
2280 | break; | ||
2281 | |||
2282 | /* Allow IO in BM exchange states with new protocols */ | ||
2229 | case C_WF_BITMAP_S: | 2283 | case C_WF_BITMAP_S: |
2284 | if (mdev->agreed_pro_version < 96) | ||
2285 | return 0; | ||
2286 | break; | ||
2287 | |||
2288 | /* no new io accepted in these states */ | ||
2230 | case C_WF_BITMAP_T: | 2289 | case C_WF_BITMAP_T: |
2231 | case C_WF_SYNC_UUID: | 2290 | case C_WF_SYNC_UUID: |
2232 | case C_MASK: | 2291 | case C_MASK: |
@@ -2261,41 +2320,47 @@ static inline int is_susp(union drbd_state s) | |||
2261 | return s.susp || s.susp_nod || s.susp_fen; | 2320 | return s.susp || s.susp_nod || s.susp_fen; |
2262 | } | 2321 | } |
2263 | 2322 | ||
2264 | static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) | 2323 | static inline bool may_inc_ap_bio(struct drbd_conf *mdev) |
2265 | { | 2324 | { |
2266 | int mxb = drbd_get_max_buffers(mdev); | 2325 | int mxb = drbd_get_max_buffers(mdev); |
2267 | 2326 | ||
2268 | if (is_susp(mdev->state)) | 2327 | if (is_susp(mdev->state)) |
2269 | return 0; | 2328 | return false; |
2270 | if (test_bit(SUSPEND_IO, &mdev->flags)) | 2329 | if (test_bit(SUSPEND_IO, &mdev->flags)) |
2271 | return 0; | 2330 | return false; |
2272 | 2331 | ||
2273 | /* to avoid potential deadlock or bitmap corruption, | 2332 | /* to avoid potential deadlock or bitmap corruption, |
2274 | * in various places, we only allow new application io | 2333 | * in various places, we only allow new application io |
2275 | * to start during "stable" states. */ | 2334 | * to start during "stable" states. */ |
2276 | 2335 | ||
2277 | /* no new io accepted when attaching or detaching the disk */ | 2336 | /* no new io accepted when attaching or detaching the disk */ |
2278 | if (!drbd_state_is_stable(mdev->state)) | 2337 | if (!drbd_state_is_stable(mdev)) |
2279 | return 0; | 2338 | return false; |
2280 | 2339 | ||
2281 | /* since some older kernels don't have atomic_add_unless, | 2340 | /* since some older kernels don't have atomic_add_unless, |
2282 | * and we are within the spinlock anyways, we have this workaround. */ | 2341 | * and we are within the spinlock anyways, we have this workaround. */ |
2283 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) | 2342 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) |
2284 | return 0; | 2343 | return false; |
2285 | if (test_bit(BITMAP_IO, &mdev->flags)) | 2344 | if (test_bit(BITMAP_IO, &mdev->flags)) |
2286 | return 0; | 2345 | return false; |
2287 | return 1; | 2346 | return true; |
2288 | } | 2347 | } |
2289 | 2348 | ||
2290 | /* I'd like to use wait_event_lock_irq, | 2349 | static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count) |
2291 | * but I'm not sure when it got introduced, | ||
2292 | * and not sure when it has 3 or 4 arguments */ | ||
2293 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | ||
2294 | { | 2350 | { |
2295 | /* compare with after_state_ch, | 2351 | bool rv = false; |
2296 | * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ | 2352 | |
2297 | DEFINE_WAIT(wait); | 2353 | spin_lock_irq(&mdev->req_lock); |
2354 | rv = may_inc_ap_bio(mdev); | ||
2355 | if (rv) | ||
2356 | atomic_add(count, &mdev->ap_bio_cnt); | ||
2357 | spin_unlock_irq(&mdev->req_lock); | ||
2358 | |||
2359 | return rv; | ||
2360 | } | ||
2298 | 2361 | ||
2362 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | ||
2363 | { | ||
2299 | /* we wait here | 2364 | /* we wait here |
2300 | * as long as the device is suspended | 2365 | * as long as the device is suspended |
2301 | * until the bitmap is no longer on the fly during connection | 2366 | * until the bitmap is no longer on the fly during connection |
@@ -2304,16 +2369,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | |||
2304 | * to avoid races with the reconnect code, | 2369 | * to avoid races with the reconnect code, |
2305 | * we need to atomic_inc within the spinlock. */ | 2370 | * we need to atomic_inc within the spinlock. */ |
2306 | 2371 | ||
2307 | spin_lock_irq(&mdev->req_lock); | 2372 | wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count)); |
2308 | while (!__inc_ap_bio_cond(mdev)) { | ||
2309 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | ||
2310 | spin_unlock_irq(&mdev->req_lock); | ||
2311 | schedule(); | ||
2312 | finish_wait(&mdev->misc_wait, &wait); | ||
2313 | spin_lock_irq(&mdev->req_lock); | ||
2314 | } | ||
2315 | atomic_add(count, &mdev->ap_bio_cnt); | ||
2316 | spin_unlock_irq(&mdev->req_lock); | ||
2317 | } | 2373 | } |
2318 | 2374 | ||
2319 | static inline void dec_ap_bio(struct drbd_conf *mdev) | 2375 | static inline void dec_ap_bio(struct drbd_conf *mdev) |
@@ -2333,9 +2389,11 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) | |||
2333 | } | 2389 | } |
2334 | } | 2390 | } |
2335 | 2391 | ||
2336 | static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) | 2392 | static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) |
2337 | { | 2393 | { |
2394 | int changed = mdev->ed_uuid != val; | ||
2338 | mdev->ed_uuid = val; | 2395 | mdev->ed_uuid = val; |
2396 | return changed; | ||
2339 | } | 2397 | } |
2340 | 2398 | ||
2341 | static inline int seq_cmp(u32 a, u32 b) | 2399 | static inline int seq_cmp(u32 a, u32 b) |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8a43ce0edee..dfc85f32d31 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -85,7 +85,8 @@ MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " | |||
85 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); | 85 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); |
86 | MODULE_VERSION(REL_VERSION); | 86 | MODULE_VERSION(REL_VERSION); |
87 | MODULE_LICENSE("GPL"); | 87 | MODULE_LICENSE("GPL"); |
88 | MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)"); | 88 | MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (" |
89 | __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); | ||
89 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); | 90 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); |
90 | 91 | ||
91 | #include <linux/moduleparam.h> | 92 | #include <linux/moduleparam.h> |
@@ -115,7 +116,7 @@ module_param(fault_devs, int, 0644); | |||
115 | #endif | 116 | #endif |
116 | 117 | ||
117 | /* module parameter, defined */ | 118 | /* module parameter, defined */ |
118 | unsigned int minor_count = 32; | 119 | unsigned int minor_count = DRBD_MINOR_COUNT_DEF; |
119 | int disable_sendpage; | 120 | int disable_sendpage; |
120 | int allow_oos; | 121 | int allow_oos; |
121 | unsigned int cn_idx = CN_IDX_DRBD; | 122 | unsigned int cn_idx = CN_IDX_DRBD; |
@@ -335,6 +336,7 @@ bail: | |||
335 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | 336 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); |
336 | } | 337 | } |
337 | 338 | ||
339 | |||
338 | /** | 340 | /** |
339 | * _tl_restart() - Walks the transfer log, and applies an action to all requests | 341 | * _tl_restart() - Walks the transfer log, and applies an action to all requests |
340 | * @mdev: DRBD device. | 342 | * @mdev: DRBD device. |
@@ -456,7 +458,7 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | |||
456 | } | 458 | } |
457 | 459 | ||
458 | /** | 460 | /** |
459 | * cl_wide_st_chg() - TRUE if the state change is a cluster wide one | 461 | * cl_wide_st_chg() - true if the state change is a cluster wide one |
460 | * @mdev: DRBD device. | 462 | * @mdev: DRBD device. |
461 | * @os: old (current) state. | 463 | * @os: old (current) state. |
462 | * @ns: new (wanted) state. | 464 | * @ns: new (wanted) state. |
@@ -473,12 +475,13 @@ static int cl_wide_st_chg(struct drbd_conf *mdev, | |||
473 | (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); | 475 | (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); |
474 | } | 476 | } |
475 | 477 | ||
476 | int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | 478 | enum drbd_state_rv |
477 | union drbd_state mask, union drbd_state val) | 479 | drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, |
480 | union drbd_state mask, union drbd_state val) | ||
478 | { | 481 | { |
479 | unsigned long flags; | 482 | unsigned long flags; |
480 | union drbd_state os, ns; | 483 | union drbd_state os, ns; |
481 | int rv; | 484 | enum drbd_state_rv rv; |
482 | 485 | ||
483 | spin_lock_irqsave(&mdev->req_lock, flags); | 486 | spin_lock_irqsave(&mdev->req_lock, flags); |
484 | os = mdev->state; | 487 | os = mdev->state; |
@@ -502,20 +505,22 @@ void drbd_force_state(struct drbd_conf *mdev, | |||
502 | drbd_change_state(mdev, CS_HARD, mask, val); | 505 | drbd_change_state(mdev, CS_HARD, mask, val); |
503 | } | 506 | } |
504 | 507 | ||
505 | static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns); | 508 | static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state); |
506 | static int is_valid_state_transition(struct drbd_conf *, | 509 | static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *, |
507 | union drbd_state, union drbd_state); | 510 | union drbd_state, |
511 | union drbd_state); | ||
508 | static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, | 512 | static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, |
509 | union drbd_state ns, const char **warn_sync_abort); | 513 | union drbd_state ns, const char **warn_sync_abort); |
510 | int drbd_send_state_req(struct drbd_conf *, | 514 | int drbd_send_state_req(struct drbd_conf *, |
511 | union drbd_state, union drbd_state); | 515 | union drbd_state, union drbd_state); |
512 | 516 | ||
513 | static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | 517 | static enum drbd_state_rv |
514 | union drbd_state mask, union drbd_state val) | 518 | _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, |
519 | union drbd_state val) | ||
515 | { | 520 | { |
516 | union drbd_state os, ns; | 521 | union drbd_state os, ns; |
517 | unsigned long flags; | 522 | unsigned long flags; |
518 | int rv; | 523 | enum drbd_state_rv rv; |
519 | 524 | ||
520 | if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) | 525 | if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) |
521 | return SS_CW_SUCCESS; | 526 | return SS_CW_SUCCESS; |
@@ -536,7 +541,7 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | |||
536 | if (rv == SS_SUCCESS) { | 541 | if (rv == SS_SUCCESS) { |
537 | rv = is_valid_state_transition(mdev, ns, os); | 542 | rv = is_valid_state_transition(mdev, ns, os); |
538 | if (rv == SS_SUCCESS) | 543 | if (rv == SS_SUCCESS) |
539 | rv = 0; /* cont waiting, otherwise fail. */ | 544 | rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ |
540 | } | 545 | } |
541 | } | 546 | } |
542 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 547 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
@@ -554,14 +559,14 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | |||
554 | * Should not be called directly, use drbd_request_state() or | 559 | * Should not be called directly, use drbd_request_state() or |
555 | * _drbd_request_state(). | 560 | * _drbd_request_state(). |
556 | */ | 561 | */ |
557 | static int drbd_req_state(struct drbd_conf *mdev, | 562 | static enum drbd_state_rv |
558 | union drbd_state mask, union drbd_state val, | 563 | drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, |
559 | enum chg_state_flags f) | 564 | union drbd_state val, enum chg_state_flags f) |
560 | { | 565 | { |
561 | struct completion done; | 566 | struct completion done; |
562 | unsigned long flags; | 567 | unsigned long flags; |
563 | union drbd_state os, ns; | 568 | union drbd_state os, ns; |
564 | int rv; | 569 | enum drbd_state_rv rv; |
565 | 570 | ||
566 | init_completion(&done); | 571 | init_completion(&done); |
567 | 572 | ||
@@ -636,10 +641,11 @@ abort: | |||
636 | * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE | 641 | * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE |
637 | * flag, or when logging of failed state change requests is not desired. | 642 | * flag, or when logging of failed state change requests is not desired. |
638 | */ | 643 | */ |
639 | int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, | 644 | enum drbd_state_rv |
640 | union drbd_state val, enum chg_state_flags f) | 645 | _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, |
646 | union drbd_state val, enum chg_state_flags f) | ||
641 | { | 647 | { |
642 | int rv; | 648 | enum drbd_state_rv rv; |
643 | 649 | ||
644 | wait_event(mdev->state_wait, | 650 | wait_event(mdev->state_wait, |
645 | (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); | 651 | (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); |
@@ -663,8 +669,8 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns) | |||
663 | ); | 669 | ); |
664 | } | 670 | } |
665 | 671 | ||
666 | void print_st_err(struct drbd_conf *mdev, | 672 | void print_st_err(struct drbd_conf *mdev, union drbd_state os, |
667 | union drbd_state os, union drbd_state ns, int err) | 673 | union drbd_state ns, enum drbd_state_rv err) |
668 | { | 674 | { |
669 | if (err == SS_IN_TRANSIENT_STATE) | 675 | if (err == SS_IN_TRANSIENT_STATE) |
670 | return; | 676 | return; |
@@ -674,32 +680,18 @@ void print_st_err(struct drbd_conf *mdev, | |||
674 | } | 680 | } |
675 | 681 | ||
676 | 682 | ||
677 | #define drbd_peer_str drbd_role_str | ||
678 | #define drbd_pdsk_str drbd_disk_str | ||
679 | |||
680 | #define drbd_susp_str(A) ((A) ? "1" : "0") | ||
681 | #define drbd_aftr_isp_str(A) ((A) ? "1" : "0") | ||
682 | #define drbd_peer_isp_str(A) ((A) ? "1" : "0") | ||
683 | #define drbd_user_isp_str(A) ((A) ? "1" : "0") | ||
684 | |||
685 | #define PSC(A) \ | ||
686 | ({ if (ns.A != os.A) { \ | ||
687 | pbp += sprintf(pbp, #A "( %s -> %s ) ", \ | ||
688 | drbd_##A##_str(os.A), \ | ||
689 | drbd_##A##_str(ns.A)); \ | ||
690 | } }) | ||
691 | |||
692 | /** | 683 | /** |
693 | * is_valid_state() - Returns an SS_ error code if ns is not valid | 684 | * is_valid_state() - Returns an SS_ error code if ns is not valid |
694 | * @mdev: DRBD device. | 685 | * @mdev: DRBD device. |
695 | * @ns: State to consider. | 686 | * @ns: State to consider. |
696 | */ | 687 | */ |
697 | static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | 688 | static enum drbd_state_rv |
689 | is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | ||
698 | { | 690 | { |
699 | /* See drbd_state_sw_errors in drbd_strings.c */ | 691 | /* See drbd_state_sw_errors in drbd_strings.c */ |
700 | 692 | ||
701 | enum drbd_fencing_p fp; | 693 | enum drbd_fencing_p fp; |
702 | int rv = SS_SUCCESS; | 694 | enum drbd_state_rv rv = SS_SUCCESS; |
703 | 695 | ||
704 | fp = FP_DONT_CARE; | 696 | fp = FP_DONT_CARE; |
705 | if (get_ldev(mdev)) { | 697 | if (get_ldev(mdev)) { |
@@ -762,10 +754,11 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | |||
762 | * @ns: new state. | 754 | * @ns: new state. |
763 | * @os: old state. | 755 | * @os: old state. |
764 | */ | 756 | */ |
765 | static int is_valid_state_transition(struct drbd_conf *mdev, | 757 | static enum drbd_state_rv |
766 | union drbd_state ns, union drbd_state os) | 758 | is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns, |
759 | union drbd_state os) | ||
767 | { | 760 | { |
768 | int rv = SS_SUCCESS; | 761 | enum drbd_state_rv rv = SS_SUCCESS; |
769 | 762 | ||
770 | if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && | 763 | if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && |
771 | os.conn > C_CONNECTED) | 764 | os.conn > C_CONNECTED) |
@@ -800,6 +793,10 @@ static int is_valid_state_transition(struct drbd_conf *mdev, | |||
800 | os.conn < C_CONNECTED) | 793 | os.conn < C_CONNECTED) |
801 | rv = SS_NEED_CONNECTION; | 794 | rv = SS_NEED_CONNECTION; |
802 | 795 | ||
796 | if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE) | ||
797 | && os.conn < C_WF_REPORT_PARAMS) | ||
798 | rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */ | ||
799 | |||
803 | return rv; | 800 | return rv; |
804 | } | 801 | } |
805 | 802 | ||
@@ -817,6 +814,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
817 | union drbd_state ns, const char **warn_sync_abort) | 814 | union drbd_state ns, const char **warn_sync_abort) |
818 | { | 815 | { |
819 | enum drbd_fencing_p fp; | 816 | enum drbd_fencing_p fp; |
817 | enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max; | ||
820 | 818 | ||
821 | fp = FP_DONT_CARE; | 819 | fp = FP_DONT_CARE; |
822 | if (get_ldev(mdev)) { | 820 | if (get_ldev(mdev)) { |
@@ -869,56 +867,6 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
869 | ns.conn = C_CONNECTED; | 867 | ns.conn = C_CONNECTED; |
870 | } | 868 | } |
871 | 869 | ||
872 | if (ns.conn >= C_CONNECTED && | ||
873 | ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) || | ||
874 | (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) { | ||
875 | switch (ns.conn) { | ||
876 | case C_WF_BITMAP_T: | ||
877 | case C_PAUSED_SYNC_T: | ||
878 | ns.disk = D_OUTDATED; | ||
879 | break; | ||
880 | case C_CONNECTED: | ||
881 | case C_WF_BITMAP_S: | ||
882 | case C_SYNC_SOURCE: | ||
883 | case C_PAUSED_SYNC_S: | ||
884 | ns.disk = D_UP_TO_DATE; | ||
885 | break; | ||
886 | case C_SYNC_TARGET: | ||
887 | ns.disk = D_INCONSISTENT; | ||
888 | dev_warn(DEV, "Implicitly set disk state Inconsistent!\n"); | ||
889 | break; | ||
890 | } | ||
891 | if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE) | ||
892 | dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n"); | ||
893 | } | ||
894 | |||
895 | if (ns.conn >= C_CONNECTED && | ||
896 | (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) { | ||
897 | switch (ns.conn) { | ||
898 | case C_CONNECTED: | ||
899 | case C_WF_BITMAP_T: | ||
900 | case C_PAUSED_SYNC_T: | ||
901 | case C_SYNC_TARGET: | ||
902 | ns.pdsk = D_UP_TO_DATE; | ||
903 | break; | ||
904 | case C_WF_BITMAP_S: | ||
905 | case C_PAUSED_SYNC_S: | ||
906 | /* remap any consistent state to D_OUTDATED, | ||
907 | * but disallow "upgrade" of not even consistent states. | ||
908 | */ | ||
909 | ns.pdsk = | ||
910 | (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED) | ||
911 | ? os.pdsk : D_OUTDATED; | ||
912 | break; | ||
913 | case C_SYNC_SOURCE: | ||
914 | ns.pdsk = D_INCONSISTENT; | ||
915 | dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n"); | ||
916 | break; | ||
917 | } | ||
918 | if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE) | ||
919 | dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n"); | ||
920 | } | ||
921 | |||
922 | /* Connection breaks down before we finished "Negotiating" */ | 870 | /* Connection breaks down before we finished "Negotiating" */ |
923 | if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && | 871 | if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && |
924 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | 872 | get_ldev_if_state(mdev, D_NEGOTIATING)) { |
@@ -933,6 +881,94 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
933 | put_ldev(mdev); | 881 | put_ldev(mdev); |
934 | } | 882 | } |
935 | 883 | ||
884 | /* D_CONSISTENT and D_OUTDATED vanish when we get connected */ | ||
885 | if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) { | ||
886 | if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) | ||
887 | ns.disk = D_UP_TO_DATE; | ||
888 | if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED) | ||
889 | ns.pdsk = D_UP_TO_DATE; | ||
890 | } | ||
891 | |||
892 | /* Implications of the connection stat on the disk states */ | ||
893 | disk_min = D_DISKLESS; | ||
894 | disk_max = D_UP_TO_DATE; | ||
895 | pdsk_min = D_INCONSISTENT; | ||
896 | pdsk_max = D_UNKNOWN; | ||
897 | switch ((enum drbd_conns)ns.conn) { | ||
898 | case C_WF_BITMAP_T: | ||
899 | case C_PAUSED_SYNC_T: | ||
900 | case C_STARTING_SYNC_T: | ||
901 | case C_WF_SYNC_UUID: | ||
902 | case C_BEHIND: | ||
903 | disk_min = D_INCONSISTENT; | ||
904 | disk_max = D_OUTDATED; | ||
905 | pdsk_min = D_UP_TO_DATE; | ||
906 | pdsk_max = D_UP_TO_DATE; | ||
907 | break; | ||
908 | case C_VERIFY_S: | ||
909 | case C_VERIFY_T: | ||
910 | disk_min = D_UP_TO_DATE; | ||
911 | disk_max = D_UP_TO_DATE; | ||
912 | pdsk_min = D_UP_TO_DATE; | ||
913 | pdsk_max = D_UP_TO_DATE; | ||
914 | break; | ||
915 | case C_CONNECTED: | ||
916 | disk_min = D_DISKLESS; | ||
917 | disk_max = D_UP_TO_DATE; | ||
918 | pdsk_min = D_DISKLESS; | ||
919 | pdsk_max = D_UP_TO_DATE; | ||
920 | break; | ||
921 | case C_WF_BITMAP_S: | ||
922 | case C_PAUSED_SYNC_S: | ||
923 | case C_STARTING_SYNC_S: | ||
924 | case C_AHEAD: | ||
925 | disk_min = D_UP_TO_DATE; | ||
926 | disk_max = D_UP_TO_DATE; | ||
927 | pdsk_min = D_INCONSISTENT; | ||
928 | pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/ | ||
929 | break; | ||
930 | case C_SYNC_TARGET: | ||
931 | disk_min = D_INCONSISTENT; | ||
932 | disk_max = D_INCONSISTENT; | ||
933 | pdsk_min = D_UP_TO_DATE; | ||
934 | pdsk_max = D_UP_TO_DATE; | ||
935 | break; | ||
936 | case C_SYNC_SOURCE: | ||
937 | disk_min = D_UP_TO_DATE; | ||
938 | disk_max = D_UP_TO_DATE; | ||
939 | pdsk_min = D_INCONSISTENT; | ||
940 | pdsk_max = D_INCONSISTENT; | ||
941 | break; | ||
942 | case C_STANDALONE: | ||
943 | case C_DISCONNECTING: | ||
944 | case C_UNCONNECTED: | ||
945 | case C_TIMEOUT: | ||
946 | case C_BROKEN_PIPE: | ||
947 | case C_NETWORK_FAILURE: | ||
948 | case C_PROTOCOL_ERROR: | ||
949 | case C_TEAR_DOWN: | ||
950 | case C_WF_CONNECTION: | ||
951 | case C_WF_REPORT_PARAMS: | ||
952 | case C_MASK: | ||
953 | break; | ||
954 | } | ||
955 | if (ns.disk > disk_max) | ||
956 | ns.disk = disk_max; | ||
957 | |||
958 | if (ns.disk < disk_min) { | ||
959 | dev_warn(DEV, "Implicitly set disk from %s to %s\n", | ||
960 | drbd_disk_str(ns.disk), drbd_disk_str(disk_min)); | ||
961 | ns.disk = disk_min; | ||
962 | } | ||
963 | if (ns.pdsk > pdsk_max) | ||
964 | ns.pdsk = pdsk_max; | ||
965 | |||
966 | if (ns.pdsk < pdsk_min) { | ||
967 | dev_warn(DEV, "Implicitly set pdsk from %s to %s\n", | ||
968 | drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min)); | ||
969 | ns.pdsk = pdsk_min; | ||
970 | } | ||
971 | |||
936 | if (fp == FP_STONITH && | 972 | if (fp == FP_STONITH && |
937 | (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && | 973 | (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && |
938 | !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) | 974 | !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) |
@@ -961,6 +997,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
961 | /* helper for __drbd_set_state */ | 997 | /* helper for __drbd_set_state */ |
962 | static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) | 998 | static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) |
963 | { | 999 | { |
1000 | if (mdev->agreed_pro_version < 90) | ||
1001 | mdev->ov_start_sector = 0; | ||
1002 | mdev->rs_total = drbd_bm_bits(mdev); | ||
1003 | mdev->ov_position = 0; | ||
964 | if (cs == C_VERIFY_T) { | 1004 | if (cs == C_VERIFY_T) { |
965 | /* starting online verify from an arbitrary position | 1005 | /* starting online verify from an arbitrary position |
966 | * does not fit well into the existing protocol. | 1006 | * does not fit well into the existing protocol. |
@@ -970,11 +1010,15 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) | |||
970 | mdev->ov_start_sector = ~(sector_t)0; | 1010 | mdev->ov_start_sector = ~(sector_t)0; |
971 | } else { | 1011 | } else { |
972 | unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); | 1012 | unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); |
973 | if (bit >= mdev->rs_total) | 1013 | if (bit >= mdev->rs_total) { |
974 | mdev->ov_start_sector = | 1014 | mdev->ov_start_sector = |
975 | BM_BIT_TO_SECT(mdev->rs_total - 1); | 1015 | BM_BIT_TO_SECT(mdev->rs_total - 1); |
1016 | mdev->rs_total = 1; | ||
1017 | } else | ||
1018 | mdev->rs_total -= bit; | ||
976 | mdev->ov_position = mdev->ov_start_sector; | 1019 | mdev->ov_position = mdev->ov_start_sector; |
977 | } | 1020 | } |
1021 | mdev->ov_left = mdev->rs_total; | ||
978 | } | 1022 | } |
979 | 1023 | ||
980 | static void drbd_resume_al(struct drbd_conf *mdev) | 1024 | static void drbd_resume_al(struct drbd_conf *mdev) |
@@ -992,12 +1036,12 @@ static void drbd_resume_al(struct drbd_conf *mdev) | |||
992 | * | 1036 | * |
993 | * Caller needs to hold req_lock, and global_state_lock. Do not call directly. | 1037 | * Caller needs to hold req_lock, and global_state_lock. Do not call directly. |
994 | */ | 1038 | */ |
995 | int __drbd_set_state(struct drbd_conf *mdev, | 1039 | enum drbd_state_rv |
996 | union drbd_state ns, enum chg_state_flags flags, | 1040 | __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, |
997 | struct completion *done) | 1041 | enum chg_state_flags flags, struct completion *done) |
998 | { | 1042 | { |
999 | union drbd_state os; | 1043 | union drbd_state os; |
1000 | int rv = SS_SUCCESS; | 1044 | enum drbd_state_rv rv = SS_SUCCESS; |
1001 | const char *warn_sync_abort = NULL; | 1045 | const char *warn_sync_abort = NULL; |
1002 | struct after_state_chg_work *ascw; | 1046 | struct after_state_chg_work *ascw; |
1003 | 1047 | ||
@@ -1033,22 +1077,46 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1033 | dev_warn(DEV, "%s aborted.\n", warn_sync_abort); | 1077 | dev_warn(DEV, "%s aborted.\n", warn_sync_abort); |
1034 | 1078 | ||
1035 | { | 1079 | { |
1036 | char *pbp, pb[300]; | 1080 | char *pbp, pb[300]; |
1037 | pbp = pb; | 1081 | pbp = pb; |
1038 | *pbp = 0; | 1082 | *pbp = 0; |
1039 | PSC(role); | 1083 | if (ns.role != os.role) |
1040 | PSC(peer); | 1084 | pbp += sprintf(pbp, "role( %s -> %s ) ", |
1041 | PSC(conn); | 1085 | drbd_role_str(os.role), |
1042 | PSC(disk); | 1086 | drbd_role_str(ns.role)); |
1043 | PSC(pdsk); | 1087 | if (ns.peer != os.peer) |
1044 | if (is_susp(ns) != is_susp(os)) | 1088 | pbp += sprintf(pbp, "peer( %s -> %s ) ", |
1045 | pbp += sprintf(pbp, "susp( %s -> %s ) ", | 1089 | drbd_role_str(os.peer), |
1046 | drbd_susp_str(is_susp(os)), | 1090 | drbd_role_str(ns.peer)); |
1047 | drbd_susp_str(is_susp(ns))); | 1091 | if (ns.conn != os.conn) |
1048 | PSC(aftr_isp); | 1092 | pbp += sprintf(pbp, "conn( %s -> %s ) ", |
1049 | PSC(peer_isp); | 1093 | drbd_conn_str(os.conn), |
1050 | PSC(user_isp); | 1094 | drbd_conn_str(ns.conn)); |
1051 | dev_info(DEV, "%s\n", pb); | 1095 | if (ns.disk != os.disk) |
1096 | pbp += sprintf(pbp, "disk( %s -> %s ) ", | ||
1097 | drbd_disk_str(os.disk), | ||
1098 | drbd_disk_str(ns.disk)); | ||
1099 | if (ns.pdsk != os.pdsk) | ||
1100 | pbp += sprintf(pbp, "pdsk( %s -> %s ) ", | ||
1101 | drbd_disk_str(os.pdsk), | ||
1102 | drbd_disk_str(ns.pdsk)); | ||
1103 | if (is_susp(ns) != is_susp(os)) | ||
1104 | pbp += sprintf(pbp, "susp( %d -> %d ) ", | ||
1105 | is_susp(os), | ||
1106 | is_susp(ns)); | ||
1107 | if (ns.aftr_isp != os.aftr_isp) | ||
1108 | pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ", | ||
1109 | os.aftr_isp, | ||
1110 | ns.aftr_isp); | ||
1111 | if (ns.peer_isp != os.peer_isp) | ||
1112 | pbp += sprintf(pbp, "peer_isp( %d -> %d ) ", | ||
1113 | os.peer_isp, | ||
1114 | ns.peer_isp); | ||
1115 | if (ns.user_isp != os.user_isp) | ||
1116 | pbp += sprintf(pbp, "user_isp( %d -> %d ) ", | ||
1117 | os.user_isp, | ||
1118 | ns.user_isp); | ||
1119 | dev_info(DEV, "%s\n", pb); | ||
1052 | } | 1120 | } |
1053 | 1121 | ||
1054 | /* solve the race between becoming unconfigured, | 1122 | /* solve the race between becoming unconfigured, |
@@ -1074,6 +1142,10 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1074 | atomic_inc(&mdev->local_cnt); | 1142 | atomic_inc(&mdev->local_cnt); |
1075 | 1143 | ||
1076 | mdev->state = ns; | 1144 | mdev->state = ns; |
1145 | |||
1146 | if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) | ||
1147 | drbd_print_uuids(mdev, "attached to UUIDs"); | ||
1148 | |||
1077 | wake_up(&mdev->misc_wait); | 1149 | wake_up(&mdev->misc_wait); |
1078 | wake_up(&mdev->state_wait); | 1150 | wake_up(&mdev->state_wait); |
1079 | 1151 | ||
@@ -1081,7 +1153,7 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1081 | if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && | 1153 | if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && |
1082 | ns.conn < C_CONNECTED) { | 1154 | ns.conn < C_CONNECTED) { |
1083 | mdev->ov_start_sector = | 1155 | mdev->ov_start_sector = |
1084 | BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left); | 1156 | BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left); |
1085 | dev_info(DEV, "Online Verify reached sector %llu\n", | 1157 | dev_info(DEV, "Online Verify reached sector %llu\n", |
1086 | (unsigned long long)mdev->ov_start_sector); | 1158 | (unsigned long long)mdev->ov_start_sector); |
1087 | } | 1159 | } |
@@ -1106,14 +1178,7 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1106 | unsigned long now = jiffies; | 1178 | unsigned long now = jiffies; |
1107 | int i; | 1179 | int i; |
1108 | 1180 | ||
1109 | mdev->ov_position = 0; | 1181 | set_ov_position(mdev, ns.conn); |
1110 | mdev->rs_total = drbd_bm_bits(mdev); | ||
1111 | if (mdev->agreed_pro_version >= 90) | ||
1112 | set_ov_position(mdev, ns.conn); | ||
1113 | else | ||
1114 | mdev->ov_start_sector = 0; | ||
1115 | mdev->ov_left = mdev->rs_total | ||
1116 | - BM_SECT_TO_BIT(mdev->ov_position); | ||
1117 | mdev->rs_start = now; | 1182 | mdev->rs_start = now; |
1118 | mdev->rs_last_events = 0; | 1183 | mdev->rs_last_events = 0; |
1119 | mdev->rs_last_sect_ev = 0; | 1184 | mdev->rs_last_sect_ev = 0; |
@@ -1121,10 +1186,12 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1121 | mdev->ov_last_oos_start = 0; | 1186 | mdev->ov_last_oos_start = 0; |
1122 | 1187 | ||
1123 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | 1188 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
1124 | mdev->rs_mark_left[i] = mdev->rs_total; | 1189 | mdev->rs_mark_left[i] = mdev->ov_left; |
1125 | mdev->rs_mark_time[i] = now; | 1190 | mdev->rs_mark_time[i] = now; |
1126 | } | 1191 | } |
1127 | 1192 | ||
1193 | drbd_rs_controller_reset(mdev); | ||
1194 | |||
1128 | if (ns.conn == C_VERIFY_S) { | 1195 | if (ns.conn == C_VERIFY_S) { |
1129 | dev_info(DEV, "Starting Online Verify from sector %llu\n", | 1196 | dev_info(DEV, "Starting Online Verify from sector %llu\n", |
1130 | (unsigned long long)mdev->ov_position); | 1197 | (unsigned long long)mdev->ov_position); |
@@ -1228,6 +1295,26 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv) | |||
1228 | } | 1295 | } |
1229 | } | 1296 | } |
1230 | 1297 | ||
1298 | int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, | ||
1299 | int (*io_fn)(struct drbd_conf *), | ||
1300 | char *why, enum bm_flag flags) | ||
1301 | { | ||
1302 | int rv; | ||
1303 | |||
1304 | D_ASSERT(current == mdev->worker.task); | ||
1305 | |||
1306 | /* open coded non-blocking drbd_suspend_io(mdev); */ | ||
1307 | set_bit(SUSPEND_IO, &mdev->flags); | ||
1308 | |||
1309 | drbd_bm_lock(mdev, why, flags); | ||
1310 | rv = io_fn(mdev); | ||
1311 | drbd_bm_unlock(mdev); | ||
1312 | |||
1313 | drbd_resume_io(mdev); | ||
1314 | |||
1315 | return rv; | ||
1316 | } | ||
1317 | |||
1231 | /** | 1318 | /** |
1232 | * after_state_ch() - Perform after state change actions that may sleep | 1319 | * after_state_ch() - Perform after state change actions that may sleep |
1233 | * @mdev: DRBD device. | 1320 | * @mdev: DRBD device. |
@@ -1266,16 +1353,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1266 | 1353 | ||
1267 | nsm.i = -1; | 1354 | nsm.i = -1; |
1268 | if (ns.susp_nod) { | 1355 | if (ns.susp_nod) { |
1269 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { | 1356 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) |
1270 | if (ns.conn == C_CONNECTED) | 1357 | what = resend; |
1271 | what = resend, nsm.susp_nod = 0; | ||
1272 | else /* ns.conn > C_CONNECTED */ | ||
1273 | dev_err(DEV, "Unexpected Resynd going on!\n"); | ||
1274 | } | ||
1275 | 1358 | ||
1276 | if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) | 1359 | if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) |
1277 | what = restart_frozen_disk_io, nsm.susp_nod = 0; | 1360 | what = restart_frozen_disk_io; |
1278 | 1361 | ||
1362 | if (what != nothing) | ||
1363 | nsm.susp_nod = 0; | ||
1279 | } | 1364 | } |
1280 | 1365 | ||
1281 | if (ns.susp_fen) { | 1366 | if (ns.susp_fen) { |
@@ -1306,13 +1391,30 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1306 | spin_unlock_irq(&mdev->req_lock); | 1391 | spin_unlock_irq(&mdev->req_lock); |
1307 | } | 1392 | } |
1308 | 1393 | ||
1394 | /* Became sync source. With protocol >= 96, we still need to send out | ||
1395 | * the sync uuid now. Need to do that before any drbd_send_state, or | ||
1396 | * the other side may go "paused sync" before receiving the sync uuids, | ||
1397 | * which is unexpected. */ | ||
1398 | if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && | ||
1399 | (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && | ||
1400 | mdev->agreed_pro_version >= 96 && get_ldev(mdev)) { | ||
1401 | drbd_gen_and_send_sync_uuid(mdev); | ||
1402 | put_ldev(mdev); | ||
1403 | } | ||
1404 | |||
1309 | /* Do not change the order of the if above and the two below... */ | 1405 | /* Do not change the order of the if above and the two below... */ |
1310 | if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ | 1406 | if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ |
1311 | drbd_send_uuids(mdev); | 1407 | drbd_send_uuids(mdev); |
1312 | drbd_send_state(mdev); | 1408 | drbd_send_state(mdev); |
1313 | } | 1409 | } |
1314 | if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S) | 1410 | /* No point in queuing send_bitmap if we don't have a connection |
1315 | drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)"); | 1411 | * anymore, so check also the _current_ state, not only the new state |
1412 | * at the time this work was queued. */ | ||
1413 | if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S && | ||
1414 | mdev->state.conn == C_WF_BITMAP_S) | ||
1415 | drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, | ||
1416 | "send_bitmap (WFBitMapS)", | ||
1417 | BM_LOCKED_TEST_ALLOWED); | ||
1316 | 1418 | ||
1317 | /* Lost contact to peer's copy of the data */ | 1419 | /* Lost contact to peer's copy of the data */ |
1318 | if ((os.pdsk >= D_INCONSISTENT && | 1420 | if ((os.pdsk >= D_INCONSISTENT && |
@@ -1343,7 +1445,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1343 | 1445 | ||
1344 | /* D_DISKLESS Peer becomes secondary */ | 1446 | /* D_DISKLESS Peer becomes secondary */ |
1345 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) | 1447 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) |
1346 | drbd_al_to_on_disk_bm(mdev); | 1448 | /* We may still be Primary ourselves. |
1449 | * No harm done if the bitmap still changes, | ||
1450 | * redirtied pages will follow later. */ | ||
1451 | drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, | ||
1452 | "demote diskless peer", BM_LOCKED_SET_ALLOWED); | ||
1453 | put_ldev(mdev); | ||
1454 | } | ||
1455 | |||
1456 | /* Write out all changed bits on demote. | ||
1457 | * Though, no need to da that just yet | ||
1458 | * if there is a resync going on still */ | ||
1459 | if (os.role == R_PRIMARY && ns.role == R_SECONDARY && | ||
1460 | mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) { | ||
1461 | /* No changes to the bitmap expected this time, so assert that, | ||
1462 | * even though no harm was done if it did change. */ | ||
1463 | drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, | ||
1464 | "demote", BM_LOCKED_TEST_ALLOWED); | ||
1347 | put_ldev(mdev); | 1465 | put_ldev(mdev); |
1348 | } | 1466 | } |
1349 | 1467 | ||
@@ -1371,15 +1489,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1371 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) | 1489 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) |
1372 | drbd_send_state(mdev); | 1490 | drbd_send_state(mdev); |
1373 | 1491 | ||
1492 | if (os.conn != C_AHEAD && ns.conn == C_AHEAD) | ||
1493 | drbd_send_state(mdev); | ||
1494 | |||
1374 | /* We are in the progress to start a full sync... */ | 1495 | /* We are in the progress to start a full sync... */ |
1375 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || | 1496 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || |
1376 | (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) | 1497 | (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) |
1377 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync"); | 1498 | /* no other bitmap changes expected during this phase */ |
1499 | drbd_queue_bitmap_io(mdev, | ||
1500 | &drbd_bmio_set_n_write, &abw_start_sync, | ||
1501 | "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED); | ||
1378 | 1502 | ||
1379 | /* We are invalidating our self... */ | 1503 | /* We are invalidating our self... */ |
1380 | if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && | 1504 | if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && |
1381 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) | 1505 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) |
1382 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); | 1506 | /* other bitmap operation expected during this phase */ |
1507 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, | ||
1508 | "set_n_write from invalidate", BM_LOCKED_MASK); | ||
1383 | 1509 | ||
1384 | /* first half of local IO error, failure to attach, | 1510 | /* first half of local IO error, failure to attach, |
1385 | * or administrative detach */ | 1511 | * or administrative detach */ |
@@ -1434,8 +1560,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1434 | 1560 | ||
1435 | if (drbd_send_state(mdev)) | 1561 | if (drbd_send_state(mdev)) |
1436 | dev_warn(DEV, "Notified peer that I'm now diskless.\n"); | 1562 | dev_warn(DEV, "Notified peer that I'm now diskless.\n"); |
1437 | else | ||
1438 | dev_err(DEV, "Sending state for being diskless failed\n"); | ||
1439 | /* corresponding get_ldev in __drbd_set_state | 1563 | /* corresponding get_ldev in __drbd_set_state |
1440 | * this may finaly trigger drbd_ldev_destroy. */ | 1564 | * this may finaly trigger drbd_ldev_destroy. */ |
1441 | put_ldev(mdev); | 1565 | put_ldev(mdev); |
@@ -1459,6 +1583,19 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1459 | if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) | 1583 | if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) |
1460 | drbd_send_state(mdev); | 1584 | drbd_send_state(mdev); |
1461 | 1585 | ||
1586 | /* This triggers bitmap writeout of potentially still unwritten pages | ||
1587 | * if the resync finished cleanly, or aborted because of peer disk | ||
1588 | * failure, or because of connection loss. | ||
1589 | * For resync aborted because of local disk failure, we cannot do | ||
1590 | * any bitmap writeout anymore. | ||
1591 | * No harm done if some bits change during this phase. | ||
1592 | */ | ||
1593 | if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) { | ||
1594 | drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, | ||
1595 | "write from resync_finished", BM_LOCKED_SET_ALLOWED); | ||
1596 | put_ldev(mdev); | ||
1597 | } | ||
1598 | |||
1462 | /* free tl_hash if we Got thawed and are C_STANDALONE */ | 1599 | /* free tl_hash if we Got thawed and are C_STANDALONE */ |
1463 | if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) | 1600 | if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) |
1464 | drbd_free_tl_hash(mdev); | 1601 | drbd_free_tl_hash(mdev); |
@@ -1559,7 +1696,7 @@ int drbd_thread_start(struct drbd_thread *thi) | |||
1559 | if (!try_module_get(THIS_MODULE)) { | 1696 | if (!try_module_get(THIS_MODULE)) { |
1560 | dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); | 1697 | dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); |
1561 | spin_unlock_irqrestore(&thi->t_lock, flags); | 1698 | spin_unlock_irqrestore(&thi->t_lock, flags); |
1562 | return FALSE; | 1699 | return false; |
1563 | } | 1700 | } |
1564 | 1701 | ||
1565 | init_completion(&thi->stop); | 1702 | init_completion(&thi->stop); |
@@ -1576,7 +1713,7 @@ int drbd_thread_start(struct drbd_thread *thi) | |||
1576 | dev_err(DEV, "Couldn't start thread\n"); | 1713 | dev_err(DEV, "Couldn't start thread\n"); |
1577 | 1714 | ||
1578 | module_put(THIS_MODULE); | 1715 | module_put(THIS_MODULE); |
1579 | return FALSE; | 1716 | return false; |
1580 | } | 1717 | } |
1581 | spin_lock_irqsave(&thi->t_lock, flags); | 1718 | spin_lock_irqsave(&thi->t_lock, flags); |
1582 | thi->task = nt; | 1719 | thi->task = nt; |
@@ -1596,7 +1733,7 @@ int drbd_thread_start(struct drbd_thread *thi) | |||
1596 | break; | 1733 | break; |
1597 | } | 1734 | } |
1598 | 1735 | ||
1599 | return TRUE; | 1736 | return true; |
1600 | } | 1737 | } |
1601 | 1738 | ||
1602 | 1739 | ||
@@ -1694,8 +1831,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |||
1694 | { | 1831 | { |
1695 | int sent, ok; | 1832 | int sent, ok; |
1696 | 1833 | ||
1697 | ERR_IF(!h) return FALSE; | 1834 | ERR_IF(!h) return false; |
1698 | ERR_IF(!size) return FALSE; | 1835 | ERR_IF(!size) return false; |
1699 | 1836 | ||
1700 | h->magic = BE_DRBD_MAGIC; | 1837 | h->magic = BE_DRBD_MAGIC; |
1701 | h->command = cpu_to_be16(cmd); | 1838 | h->command = cpu_to_be16(cmd); |
@@ -1704,8 +1841,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |||
1704 | sent = drbd_send(mdev, sock, h, size, msg_flags); | 1841 | sent = drbd_send(mdev, sock, h, size, msg_flags); |
1705 | 1842 | ||
1706 | ok = (sent == size); | 1843 | ok = (sent == size); |
1707 | if (!ok) | 1844 | if (!ok && !signal_pending(current)) |
1708 | dev_err(DEV, "short sent %s size=%d sent=%d\n", | 1845 | dev_warn(DEV, "short sent %s size=%d sent=%d\n", |
1709 | cmdname(cmd), (int)size, sent); | 1846 | cmdname(cmd), (int)size, sent); |
1710 | return ok; | 1847 | return ok; |
1711 | } | 1848 | } |
@@ -1840,7 +1977,7 @@ int drbd_send_protocol(struct drbd_conf *mdev) | |||
1840 | else { | 1977 | else { |
1841 | dev_err(DEV, "--dry-run is not supported by peer"); | 1978 | dev_err(DEV, "--dry-run is not supported by peer"); |
1842 | kfree(p); | 1979 | kfree(p); |
1843 | return 0; | 1980 | return -1; |
1844 | } | 1981 | } |
1845 | } | 1982 | } |
1846 | p->conn_flags = cpu_to_be32(cf); | 1983 | p->conn_flags = cpu_to_be32(cf); |
@@ -1888,12 +2025,36 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) | |||
1888 | return _drbd_send_uuids(mdev, 8); | 2025 | return _drbd_send_uuids(mdev, 8); |
1889 | } | 2026 | } |
1890 | 2027 | ||
2028 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text) | ||
2029 | { | ||
2030 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | ||
2031 | u64 *uuid = mdev->ldev->md.uuid; | ||
2032 | dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", | ||
2033 | text, | ||
2034 | (unsigned long long)uuid[UI_CURRENT], | ||
2035 | (unsigned long long)uuid[UI_BITMAP], | ||
2036 | (unsigned long long)uuid[UI_HISTORY_START], | ||
2037 | (unsigned long long)uuid[UI_HISTORY_END]); | ||
2038 | put_ldev(mdev); | ||
2039 | } else { | ||
2040 | dev_info(DEV, "%s effective data uuid: %016llX\n", | ||
2041 | text, | ||
2042 | (unsigned long long)mdev->ed_uuid); | ||
2043 | } | ||
2044 | } | ||
1891 | 2045 | ||
1892 | int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) | 2046 | int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) |
1893 | { | 2047 | { |
1894 | struct p_rs_uuid p; | 2048 | struct p_rs_uuid p; |
2049 | u64 uuid; | ||
1895 | 2050 | ||
1896 | p.uuid = cpu_to_be64(val); | 2051 | D_ASSERT(mdev->state.disk == D_UP_TO_DATE); |
2052 | |||
2053 | uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; | ||
2054 | drbd_uuid_set(mdev, UI_BITMAP, uuid); | ||
2055 | drbd_print_uuids(mdev, "updated sync UUID"); | ||
2056 | drbd_md_sync(mdev); | ||
2057 | p.uuid = cpu_to_be64(uuid); | ||
1897 | 2058 | ||
1898 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, | 2059 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, |
1899 | (struct p_header80 *)&p, sizeof(p)); | 2060 | (struct p_header80 *)&p, sizeof(p)); |
@@ -1921,7 +2082,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl | |||
1921 | p.d_size = cpu_to_be64(d_size); | 2082 | p.d_size = cpu_to_be64(d_size); |
1922 | p.u_size = cpu_to_be64(u_size); | 2083 | p.u_size = cpu_to_be64(u_size); |
1923 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); | 2084 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); |
1924 | p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); | 2085 | p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); |
1925 | p.queue_order_type = cpu_to_be16(q_order_type); | 2086 | p.queue_order_type = cpu_to_be16(q_order_type); |
1926 | p.dds_flags = cpu_to_be16(flags); | 2087 | p.dds_flags = cpu_to_be16(flags); |
1927 | 2088 | ||
@@ -1972,7 +2133,7 @@ int drbd_send_state_req(struct drbd_conf *mdev, | |||
1972 | (struct p_header80 *)&p, sizeof(p)); | 2133 | (struct p_header80 *)&p, sizeof(p)); |
1973 | } | 2134 | } |
1974 | 2135 | ||
1975 | int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) | 2136 | int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) |
1976 | { | 2137 | { |
1977 | struct p_req_state_reply p; | 2138 | struct p_req_state_reply p; |
1978 | 2139 | ||
@@ -2076,9 +2237,15 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev, | |||
2076 | return len; | 2237 | return len; |
2077 | } | 2238 | } |
2078 | 2239 | ||
2079 | enum { OK, FAILED, DONE } | 2240 | /** |
2241 | * send_bitmap_rle_or_plain | ||
2242 | * | ||
2243 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
2244 | * code upon failure. | ||
2245 | */ | ||
2246 | static int | ||
2080 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, | 2247 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, |
2081 | struct p_header80 *h, struct bm_xfer_ctx *c) | 2248 | struct p_header80 *h, struct bm_xfer_ctx *c) |
2082 | { | 2249 | { |
2083 | struct p_compressed_bm *p = (void*)h; | 2250 | struct p_compressed_bm *p = (void*)h; |
2084 | unsigned long num_words; | 2251 | unsigned long num_words; |
@@ -2088,7 +2255,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, | |||
2088 | len = fill_bitmap_rle_bits(mdev, p, c); | 2255 | len = fill_bitmap_rle_bits(mdev, p, c); |
2089 | 2256 | ||
2090 | if (len < 0) | 2257 | if (len < 0) |
2091 | return FAILED; | 2258 | return -EIO; |
2092 | 2259 | ||
2093 | if (len) { | 2260 | if (len) { |
2094 | DCBP_set_code(p, RLE_VLI_Bits); | 2261 | DCBP_set_code(p, RLE_VLI_Bits); |
@@ -2118,11 +2285,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, | |||
2118 | if (c->bit_offset > c->bm_bits) | 2285 | if (c->bit_offset > c->bm_bits) |
2119 | c->bit_offset = c->bm_bits; | 2286 | c->bit_offset = c->bm_bits; |
2120 | } | 2287 | } |
2121 | ok = ok ? ((len == 0) ? DONE : OK) : FAILED; | 2288 | if (ok) { |
2122 | 2289 | if (len == 0) { | |
2123 | if (ok == DONE) | 2290 | INFO_bm_xfer_stats(mdev, "send", c); |
2124 | INFO_bm_xfer_stats(mdev, "send", c); | 2291 | return 0; |
2125 | return ok; | 2292 | } else |
2293 | return 1; | ||
2294 | } | ||
2295 | return -EIO; | ||
2126 | } | 2296 | } |
2127 | 2297 | ||
2128 | /* See the comment at receive_bitmap() */ | 2298 | /* See the comment at receive_bitmap() */ |
@@ -2130,16 +2300,16 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) | |||
2130 | { | 2300 | { |
2131 | struct bm_xfer_ctx c; | 2301 | struct bm_xfer_ctx c; |
2132 | struct p_header80 *p; | 2302 | struct p_header80 *p; |
2133 | int ret; | 2303 | int err; |
2134 | 2304 | ||
2135 | ERR_IF(!mdev->bitmap) return FALSE; | 2305 | ERR_IF(!mdev->bitmap) return false; |
2136 | 2306 | ||
2137 | /* maybe we should use some per thread scratch page, | 2307 | /* maybe we should use some per thread scratch page, |
2138 | * and allocate that during initial device creation? */ | 2308 | * and allocate that during initial device creation? */ |
2139 | p = (struct p_header80 *) __get_free_page(GFP_NOIO); | 2309 | p = (struct p_header80 *) __get_free_page(GFP_NOIO); |
2140 | if (!p) { | 2310 | if (!p) { |
2141 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); | 2311 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); |
2142 | return FALSE; | 2312 | return false; |
2143 | } | 2313 | } |
2144 | 2314 | ||
2145 | if (get_ldev(mdev)) { | 2315 | if (get_ldev(mdev)) { |
@@ -2165,11 +2335,11 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) | |||
2165 | }; | 2335 | }; |
2166 | 2336 | ||
2167 | do { | 2337 | do { |
2168 | ret = send_bitmap_rle_or_plain(mdev, p, &c); | 2338 | err = send_bitmap_rle_or_plain(mdev, p, &c); |
2169 | } while (ret == OK); | 2339 | } while (err > 0); |
2170 | 2340 | ||
2171 | free_page((unsigned long) p); | 2341 | free_page((unsigned long) p); |
2172 | return (ret == DONE); | 2342 | return err == 0; |
2173 | } | 2343 | } |
2174 | 2344 | ||
2175 | int drbd_send_bitmap(struct drbd_conf *mdev) | 2345 | int drbd_send_bitmap(struct drbd_conf *mdev) |
@@ -2192,7 +2362,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) | |||
2192 | p.set_size = cpu_to_be32(set_size); | 2362 | p.set_size = cpu_to_be32(set_size); |
2193 | 2363 | ||
2194 | if (mdev->state.conn < C_CONNECTED) | 2364 | if (mdev->state.conn < C_CONNECTED) |
2195 | return FALSE; | 2365 | return false; |
2196 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, | 2366 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, |
2197 | (struct p_header80 *)&p, sizeof(p)); | 2367 | (struct p_header80 *)&p, sizeof(p)); |
2198 | return ok; | 2368 | return ok; |
@@ -2220,7 +2390,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2220 | p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); | 2390 | p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); |
2221 | 2391 | ||
2222 | if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) | 2392 | if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) |
2223 | return FALSE; | 2393 | return false; |
2224 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, | 2394 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, |
2225 | (struct p_header80 *)&p, sizeof(p)); | 2395 | (struct p_header80 *)&p, sizeof(p)); |
2226 | return ok; | 2396 | return ok; |
@@ -2326,8 +2496,8 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) | |||
2326 | } | 2496 | } |
2327 | 2497 | ||
2328 | /* called on sndtimeo | 2498 | /* called on sndtimeo |
2329 | * returns FALSE if we should retry, | 2499 | * returns false if we should retry, |
2330 | * TRUE if we think connection is dead | 2500 | * true if we think connection is dead |
2331 | */ | 2501 | */ |
2332 | static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) | 2502 | static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) |
2333 | { | 2503 | { |
@@ -2340,7 +2510,7 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket * | |||
2340 | || mdev->state.conn < C_CONNECTED; | 2510 | || mdev->state.conn < C_CONNECTED; |
2341 | 2511 | ||
2342 | if (drop_it) | 2512 | if (drop_it) |
2343 | return TRUE; | 2513 | return true; |
2344 | 2514 | ||
2345 | drop_it = !--mdev->ko_count; | 2515 | drop_it = !--mdev->ko_count; |
2346 | if (!drop_it) { | 2516 | if (!drop_it) { |
@@ -2531,13 +2701,39 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |||
2531 | if (ok && dgs) { | 2701 | if (ok && dgs) { |
2532 | dgb = mdev->int_dig_out; | 2702 | dgb = mdev->int_dig_out; |
2533 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); | 2703 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); |
2534 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); | 2704 | ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); |
2535 | } | 2705 | } |
2536 | if (ok) { | 2706 | if (ok) { |
2537 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) | 2707 | /* For protocol A, we have to memcpy the payload into |
2708 | * socket buffers, as we may complete right away | ||
2709 | * as soon as we handed it over to tcp, at which point the data | ||
2710 | * pages may become invalid. | ||
2711 | * | ||
2712 | * For data-integrity enabled, we copy it as well, so we can be | ||
2713 | * sure that even if the bio pages may still be modified, it | ||
2714 | * won't change the data on the wire, thus if the digest checks | ||
2715 | * out ok after sending on this side, but does not fit on the | ||
2716 | * receiving side, we sure have detected corruption elsewhere. | ||
2717 | */ | ||
2718 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs) | ||
2538 | ok = _drbd_send_bio(mdev, req->master_bio); | 2719 | ok = _drbd_send_bio(mdev, req->master_bio); |
2539 | else | 2720 | else |
2540 | ok = _drbd_send_zc_bio(mdev, req->master_bio); | 2721 | ok = _drbd_send_zc_bio(mdev, req->master_bio); |
2722 | |||
2723 | /* double check digest, sometimes buffers have been modified in flight. */ | ||
2724 | if (dgs > 0 && dgs <= 64) { | ||
2725 | /* 64 byte, 512 bit, is the larges digest size | ||
2726 | * currently supported in kernel crypto. */ | ||
2727 | unsigned char digest[64]; | ||
2728 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); | ||
2729 | if (memcmp(mdev->int_dig_out, digest, dgs)) { | ||
2730 | dev_warn(DEV, | ||
2731 | "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", | ||
2732 | (unsigned long long)req->sector, req->size); | ||
2733 | } | ||
2734 | } /* else if (dgs > 64) { | ||
2735 | ... Be noisy about digest too large ... | ||
2736 | } */ | ||
2541 | } | 2737 | } |
2542 | 2738 | ||
2543 | drbd_put_data_sock(mdev); | 2739 | drbd_put_data_sock(mdev); |
@@ -2587,7 +2783,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2587 | if (ok && dgs) { | 2783 | if (ok && dgs) { |
2588 | dgb = mdev->int_dig_out; | 2784 | dgb = mdev->int_dig_out; |
2589 | drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); | 2785 | drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); |
2590 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); | 2786 | ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); |
2591 | } | 2787 | } |
2592 | if (ok) | 2788 | if (ok) |
2593 | ok = _drbd_send_zc_ee(mdev, e); | 2789 | ok = _drbd_send_zc_ee(mdev, e); |
@@ -2597,6 +2793,16 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2597 | return ok; | 2793 | return ok; |
2598 | } | 2794 | } |
2599 | 2795 | ||
2796 | int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req) | ||
2797 | { | ||
2798 | struct p_block_desc p; | ||
2799 | |||
2800 | p.sector = cpu_to_be64(req->sector); | ||
2801 | p.blksize = cpu_to_be32(req->size); | ||
2802 | |||
2803 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p)); | ||
2804 | } | ||
2805 | |||
2600 | /* | 2806 | /* |
2601 | drbd_send distinguishes two cases: | 2807 | drbd_send distinguishes two cases: |
2602 | 2808 | ||
@@ -2770,6 +2976,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2770 | atomic_set(&mdev->pp_in_use_by_net, 0); | 2976 | atomic_set(&mdev->pp_in_use_by_net, 0); |
2771 | atomic_set(&mdev->rs_sect_in, 0); | 2977 | atomic_set(&mdev->rs_sect_in, 0); |
2772 | atomic_set(&mdev->rs_sect_ev, 0); | 2978 | atomic_set(&mdev->rs_sect_ev, 0); |
2979 | atomic_set(&mdev->ap_in_flight, 0); | ||
2773 | 2980 | ||
2774 | mutex_init(&mdev->md_io_mutex); | 2981 | mutex_init(&mdev->md_io_mutex); |
2775 | mutex_init(&mdev->data.mutex); | 2982 | mutex_init(&mdev->data.mutex); |
@@ -2798,19 +3005,27 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2798 | INIT_LIST_HEAD(&mdev->unplug_work.list); | 3005 | INIT_LIST_HEAD(&mdev->unplug_work.list); |
2799 | INIT_LIST_HEAD(&mdev->go_diskless.list); | 3006 | INIT_LIST_HEAD(&mdev->go_diskless.list); |
2800 | INIT_LIST_HEAD(&mdev->md_sync_work.list); | 3007 | INIT_LIST_HEAD(&mdev->md_sync_work.list); |
3008 | INIT_LIST_HEAD(&mdev->start_resync_work.list); | ||
2801 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); | 3009 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); |
2802 | 3010 | ||
2803 | mdev->resync_work.cb = w_resync_inactive; | 3011 | mdev->resync_work.cb = w_resync_timer; |
2804 | mdev->unplug_work.cb = w_send_write_hint; | 3012 | mdev->unplug_work.cb = w_send_write_hint; |
2805 | mdev->go_diskless.cb = w_go_diskless; | 3013 | mdev->go_diskless.cb = w_go_diskless; |
2806 | mdev->md_sync_work.cb = w_md_sync; | 3014 | mdev->md_sync_work.cb = w_md_sync; |
2807 | mdev->bm_io_work.w.cb = w_bitmap_io; | 3015 | mdev->bm_io_work.w.cb = w_bitmap_io; |
3016 | mdev->start_resync_work.cb = w_start_resync; | ||
2808 | init_timer(&mdev->resync_timer); | 3017 | init_timer(&mdev->resync_timer); |
2809 | init_timer(&mdev->md_sync_timer); | 3018 | init_timer(&mdev->md_sync_timer); |
3019 | init_timer(&mdev->start_resync_timer); | ||
3020 | init_timer(&mdev->request_timer); | ||
2810 | mdev->resync_timer.function = resync_timer_fn; | 3021 | mdev->resync_timer.function = resync_timer_fn; |
2811 | mdev->resync_timer.data = (unsigned long) mdev; | 3022 | mdev->resync_timer.data = (unsigned long) mdev; |
2812 | mdev->md_sync_timer.function = md_sync_timer_fn; | 3023 | mdev->md_sync_timer.function = md_sync_timer_fn; |
2813 | mdev->md_sync_timer.data = (unsigned long) mdev; | 3024 | mdev->md_sync_timer.data = (unsigned long) mdev; |
3025 | mdev->start_resync_timer.function = start_resync_timer_fn; | ||
3026 | mdev->start_resync_timer.data = (unsigned long) mdev; | ||
3027 | mdev->request_timer.function = request_timer_fn; | ||
3028 | mdev->request_timer.data = (unsigned long) mdev; | ||
2814 | 3029 | ||
2815 | init_waitqueue_head(&mdev->misc_wait); | 3030 | init_waitqueue_head(&mdev->misc_wait); |
2816 | init_waitqueue_head(&mdev->state_wait); | 3031 | init_waitqueue_head(&mdev->state_wait); |
@@ -2881,6 +3096,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) | |||
2881 | D_ASSERT(list_empty(&mdev->resync_work.list)); | 3096 | D_ASSERT(list_empty(&mdev->resync_work.list)); |
2882 | D_ASSERT(list_empty(&mdev->unplug_work.list)); | 3097 | D_ASSERT(list_empty(&mdev->unplug_work.list)); |
2883 | D_ASSERT(list_empty(&mdev->go_diskless.list)); | 3098 | D_ASSERT(list_empty(&mdev->go_diskless.list)); |
3099 | |||
3100 | drbd_set_defaults(mdev); | ||
2884 | } | 3101 | } |
2885 | 3102 | ||
2886 | 3103 | ||
@@ -2923,7 +3140,7 @@ static void drbd_destroy_mempools(void) | |||
2923 | static int drbd_create_mempools(void) | 3140 | static int drbd_create_mempools(void) |
2924 | { | 3141 | { |
2925 | struct page *page; | 3142 | struct page *page; |
2926 | const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count; | 3143 | const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; |
2927 | int i; | 3144 | int i; |
2928 | 3145 | ||
2929 | /* prepare our caches and mempools */ | 3146 | /* prepare our caches and mempools */ |
@@ -3087,11 +3304,20 @@ static void drbd_cleanup(void) | |||
3087 | 3304 | ||
3088 | unregister_reboot_notifier(&drbd_notifier); | 3305 | unregister_reboot_notifier(&drbd_notifier); |
3089 | 3306 | ||
3307 | /* first remove proc, | ||
3308 | * drbdsetup uses it's presence to detect | ||
3309 | * whether DRBD is loaded. | ||
3310 | * If we would get stuck in proc removal, | ||
3311 | * but have netlink already deregistered, | ||
3312 | * some drbdsetup commands may wait forever | ||
3313 | * for an answer. | ||
3314 | */ | ||
3315 | if (drbd_proc) | ||
3316 | remove_proc_entry("drbd", NULL); | ||
3317 | |||
3090 | drbd_nl_cleanup(); | 3318 | drbd_nl_cleanup(); |
3091 | 3319 | ||
3092 | if (minor_table) { | 3320 | if (minor_table) { |
3093 | if (drbd_proc) | ||
3094 | remove_proc_entry("drbd", NULL); | ||
3095 | i = minor_count; | 3321 | i = minor_count; |
3096 | while (i--) | 3322 | while (i--) |
3097 | drbd_delete_device(i); | 3323 | drbd_delete_device(i); |
@@ -3119,7 +3345,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) | |||
3119 | char reason = '-'; | 3345 | char reason = '-'; |
3120 | int r = 0; | 3346 | int r = 0; |
3121 | 3347 | ||
3122 | if (!__inc_ap_bio_cond(mdev)) { | 3348 | if (!may_inc_ap_bio(mdev)) { |
3123 | /* DRBD has frozen IO */ | 3349 | /* DRBD has frozen IO */ |
3124 | r = bdi_bits; | 3350 | r = bdi_bits; |
3125 | reason = 'd'; | 3351 | reason = 'd'; |
@@ -3172,7 +3398,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
3172 | goto out_no_disk; | 3398 | goto out_no_disk; |
3173 | mdev->vdisk = disk; | 3399 | mdev->vdisk = disk; |
3174 | 3400 | ||
3175 | set_disk_ro(disk, TRUE); | 3401 | set_disk_ro(disk, true); |
3176 | 3402 | ||
3177 | disk->queue = q; | 3403 | disk->queue = q; |
3178 | disk->major = DRBD_MAJOR; | 3404 | disk->major = DRBD_MAJOR; |
@@ -3188,8 +3414,8 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
3188 | q->backing_dev_info.congested_fn = drbd_congested; | 3414 | q->backing_dev_info.congested_fn = drbd_congested; |
3189 | q->backing_dev_info.congested_data = mdev; | 3415 | q->backing_dev_info.congested_data = mdev; |
3190 | 3416 | ||
3191 | blk_queue_make_request(q, drbd_make_request_26); | 3417 | blk_queue_make_request(q, drbd_make_request); |
3192 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | 3418 | blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); |
3193 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 3419 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
3194 | blk_queue_merge_bvec(q, drbd_merge_bvec); | 3420 | blk_queue_merge_bvec(q, drbd_merge_bvec); |
3195 | q->queue_lock = &mdev->req_lock; | 3421 | q->queue_lock = &mdev->req_lock; |
@@ -3251,6 +3477,7 @@ void drbd_free_mdev(struct drbd_conf *mdev) | |||
3251 | put_disk(mdev->vdisk); | 3477 | put_disk(mdev->vdisk); |
3252 | blk_cleanup_queue(mdev->rq_queue); | 3478 | blk_cleanup_queue(mdev->rq_queue); |
3253 | free_cpumask_var(mdev->cpu_mask); | 3479 | free_cpumask_var(mdev->cpu_mask); |
3480 | drbd_free_tl_hash(mdev); | ||
3254 | kfree(mdev); | 3481 | kfree(mdev); |
3255 | } | 3482 | } |
3256 | 3483 | ||
@@ -3266,7 +3493,7 @@ int __init drbd_init(void) | |||
3266 | return -EINVAL; | 3493 | return -EINVAL; |
3267 | } | 3494 | } |
3268 | 3495 | ||
3269 | if (1 > minor_count || minor_count > 255) { | 3496 | if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { |
3270 | printk(KERN_ERR | 3497 | printk(KERN_ERR |
3271 | "drbd: invalid minor_count (%d)\n", minor_count); | 3498 | "drbd: invalid minor_count (%d)\n", minor_count); |
3272 | #ifdef MODULE | 3499 | #ifdef MODULE |
@@ -3448,7 +3675,7 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
3448 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { | 3675 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { |
3449 | /* this was a try anyways ... */ | 3676 | /* this was a try anyways ... */ |
3450 | dev_err(DEV, "meta data update failed!\n"); | 3677 | dev_err(DEV, "meta data update failed!\n"); |
3451 | drbd_chk_io_error(mdev, 1, TRUE); | 3678 | drbd_chk_io_error(mdev, 1, true); |
3452 | } | 3679 | } |
3453 | 3680 | ||
3454 | /* Update mdev->ldev->md.la_size_sect, | 3681 | /* Update mdev->ldev->md.la_size_sect, |
@@ -3464,7 +3691,7 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
3464 | * @mdev: DRBD device. | 3691 | * @mdev: DRBD device. |
3465 | * @bdev: Device from which the meta data should be read in. | 3692 | * @bdev: Device from which the meta data should be read in. |
3466 | * | 3693 | * |
3467 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case | 3694 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case |
3468 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. | 3695 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. |
3469 | */ | 3696 | */ |
3470 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | 3697 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) |
@@ -3534,28 +3761,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |||
3534 | return rv; | 3761 | return rv; |
3535 | } | 3762 | } |
3536 | 3763 | ||
3537 | static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index) | ||
3538 | { | ||
3539 | static char *uuid_str[UI_EXTENDED_SIZE] = { | ||
3540 | [UI_CURRENT] = "CURRENT", | ||
3541 | [UI_BITMAP] = "BITMAP", | ||
3542 | [UI_HISTORY_START] = "HISTORY_START", | ||
3543 | [UI_HISTORY_END] = "HISTORY_END", | ||
3544 | [UI_SIZE] = "SIZE", | ||
3545 | [UI_FLAGS] = "FLAGS", | ||
3546 | }; | ||
3547 | |||
3548 | if (index >= UI_EXTENDED_SIZE) { | ||
3549 | dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n"); | ||
3550 | return; | ||
3551 | } | ||
3552 | |||
3553 | dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n", | ||
3554 | uuid_str[index], | ||
3555 | (unsigned long long)mdev->ldev->md.uuid[index]); | ||
3556 | } | ||
3557 | |||
3558 | |||
3559 | /** | 3764 | /** |
3560 | * drbd_md_mark_dirty() - Mark meta data super block as dirty | 3765 | * drbd_md_mark_dirty() - Mark meta data super block as dirty |
3561 | * @mdev: DRBD device. | 3766 | * @mdev: DRBD device. |
@@ -3585,10 +3790,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) | |||
3585 | { | 3790 | { |
3586 | int i; | 3791 | int i; |
3587 | 3792 | ||
3588 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) { | 3793 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) |
3589 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; | 3794 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; |
3590 | debug_drbd_uuid(mdev, i+1); | ||
3591 | } | ||
3592 | } | 3795 | } |
3593 | 3796 | ||
3594 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | 3797 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) |
@@ -3603,7 +3806,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |||
3603 | } | 3806 | } |
3604 | 3807 | ||
3605 | mdev->ldev->md.uuid[idx] = val; | 3808 | mdev->ldev->md.uuid[idx] = val; |
3606 | debug_drbd_uuid(mdev, idx); | ||
3607 | drbd_md_mark_dirty(mdev); | 3809 | drbd_md_mark_dirty(mdev); |
3608 | } | 3810 | } |
3609 | 3811 | ||
@@ -3613,7 +3815,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |||
3613 | if (mdev->ldev->md.uuid[idx]) { | 3815 | if (mdev->ldev->md.uuid[idx]) { |
3614 | drbd_uuid_move_history(mdev); | 3816 | drbd_uuid_move_history(mdev); |
3615 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; | 3817 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; |
3616 | debug_drbd_uuid(mdev, UI_HISTORY_START); | ||
3617 | } | 3818 | } |
3618 | _drbd_uuid_set(mdev, idx, val); | 3819 | _drbd_uuid_set(mdev, idx, val); |
3619 | } | 3820 | } |
@@ -3628,14 +3829,16 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |||
3628 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) | 3829 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) |
3629 | { | 3830 | { |
3630 | u64 val; | 3831 | u64 val; |
3832 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; | ||
3833 | |||
3834 | if (bm_uuid) | ||
3835 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | ||
3631 | 3836 | ||
3632 | dev_info(DEV, "Creating new current UUID\n"); | ||
3633 | D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); | ||
3634 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; | 3837 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; |
3635 | debug_drbd_uuid(mdev, UI_BITMAP); | ||
3636 | 3838 | ||
3637 | get_random_bytes(&val, sizeof(u64)); | 3839 | get_random_bytes(&val, sizeof(u64)); |
3638 | _drbd_uuid_set(mdev, UI_CURRENT, val); | 3840 | _drbd_uuid_set(mdev, UI_CURRENT, val); |
3841 | drbd_print_uuids(mdev, "new current UUID"); | ||
3639 | /* get it to stable storage _now_ */ | 3842 | /* get it to stable storage _now_ */ |
3640 | drbd_md_sync(mdev); | 3843 | drbd_md_sync(mdev); |
3641 | } | 3844 | } |
@@ -3649,16 +3852,12 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) | |||
3649 | drbd_uuid_move_history(mdev); | 3852 | drbd_uuid_move_history(mdev); |
3650 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; | 3853 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; |
3651 | mdev->ldev->md.uuid[UI_BITMAP] = 0; | 3854 | mdev->ldev->md.uuid[UI_BITMAP] = 0; |
3652 | debug_drbd_uuid(mdev, UI_HISTORY_START); | ||
3653 | debug_drbd_uuid(mdev, UI_BITMAP); | ||
3654 | } else { | 3855 | } else { |
3655 | if (mdev->ldev->md.uuid[UI_BITMAP]) | 3856 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; |
3656 | dev_warn(DEV, "bm UUID already set"); | 3857 | if (bm_uuid) |
3657 | 3858 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | |
3658 | mdev->ldev->md.uuid[UI_BITMAP] = val; | ||
3659 | mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); | ||
3660 | 3859 | ||
3661 | debug_drbd_uuid(mdev, UI_BITMAP); | 3860 | mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); |
3662 | } | 3861 | } |
3663 | drbd_md_mark_dirty(mdev); | 3862 | drbd_md_mark_dirty(mdev); |
3664 | } | 3863 | } |
@@ -3714,15 +3913,19 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev) | |||
3714 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | 3913 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) |
3715 | { | 3914 | { |
3716 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); | 3915 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); |
3717 | int rv; | 3916 | int rv = -EIO; |
3718 | 3917 | ||
3719 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); | 3918 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); |
3720 | 3919 | ||
3721 | drbd_bm_lock(mdev, work->why); | 3920 | if (get_ldev(mdev)) { |
3722 | rv = work->io_fn(mdev); | 3921 | drbd_bm_lock(mdev, work->why, work->flags); |
3723 | drbd_bm_unlock(mdev); | 3922 | rv = work->io_fn(mdev); |
3923 | drbd_bm_unlock(mdev); | ||
3924 | put_ldev(mdev); | ||
3925 | } | ||
3724 | 3926 | ||
3725 | clear_bit(BITMAP_IO, &mdev->flags); | 3927 | clear_bit(BITMAP_IO, &mdev->flags); |
3928 | smp_mb__after_clear_bit(); | ||
3726 | wake_up(&mdev->misc_wait); | 3929 | wake_up(&mdev->misc_wait); |
3727 | 3930 | ||
3728 | if (work->done) | 3931 | if (work->done) |
@@ -3730,6 +3933,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
3730 | 3933 | ||
3731 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); | 3934 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); |
3732 | work->why = NULL; | 3935 | work->why = NULL; |
3936 | work->flags = 0; | ||
3733 | 3937 | ||
3734 | return 1; | 3938 | return 1; |
3735 | } | 3939 | } |
@@ -3784,7 +3988,7 @@ void drbd_go_diskless(struct drbd_conf *mdev) | |||
3784 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, | 3988 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, |
3785 | int (*io_fn)(struct drbd_conf *), | 3989 | int (*io_fn)(struct drbd_conf *), |
3786 | void (*done)(struct drbd_conf *, int), | 3990 | void (*done)(struct drbd_conf *, int), |
3787 | char *why) | 3991 | char *why, enum bm_flag flags) |
3788 | { | 3992 | { |
3789 | D_ASSERT(current == mdev->worker.task); | 3993 | D_ASSERT(current == mdev->worker.task); |
3790 | 3994 | ||
@@ -3798,15 +4002,15 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |||
3798 | mdev->bm_io_work.io_fn = io_fn; | 4002 | mdev->bm_io_work.io_fn = io_fn; |
3799 | mdev->bm_io_work.done = done; | 4003 | mdev->bm_io_work.done = done; |
3800 | mdev->bm_io_work.why = why; | 4004 | mdev->bm_io_work.why = why; |
4005 | mdev->bm_io_work.flags = flags; | ||
3801 | 4006 | ||
4007 | spin_lock_irq(&mdev->req_lock); | ||
3802 | set_bit(BITMAP_IO, &mdev->flags); | 4008 | set_bit(BITMAP_IO, &mdev->flags); |
3803 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { | 4009 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { |
3804 | if (list_empty(&mdev->bm_io_work.w.list)) { | 4010 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) |
3805 | set_bit(BITMAP_IO_QUEUED, &mdev->flags); | ||
3806 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); | 4011 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); |
3807 | } else | ||
3808 | dev_err(DEV, "FIXME avoided double queuing bm_io_work\n"); | ||
3809 | } | 4012 | } |
4013 | spin_unlock_irq(&mdev->req_lock); | ||
3810 | } | 4014 | } |
3811 | 4015 | ||
3812 | /** | 4016 | /** |
@@ -3818,19 +4022,22 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |||
3818 | * freezes application IO while that the actual IO operations runs. This | 4022 | * freezes application IO while that the actual IO operations runs. This |
3819 | * functions MAY NOT be called from worker context. | 4023 | * functions MAY NOT be called from worker context. |
3820 | */ | 4024 | */ |
3821 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) | 4025 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), |
4026 | char *why, enum bm_flag flags) | ||
3822 | { | 4027 | { |
3823 | int rv; | 4028 | int rv; |
3824 | 4029 | ||
3825 | D_ASSERT(current != mdev->worker.task); | 4030 | D_ASSERT(current != mdev->worker.task); |
3826 | 4031 | ||
3827 | drbd_suspend_io(mdev); | 4032 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
4033 | drbd_suspend_io(mdev); | ||
3828 | 4034 | ||
3829 | drbd_bm_lock(mdev, why); | 4035 | drbd_bm_lock(mdev, why, flags); |
3830 | rv = io_fn(mdev); | 4036 | rv = io_fn(mdev); |
3831 | drbd_bm_unlock(mdev); | 4037 | drbd_bm_unlock(mdev); |
3832 | 4038 | ||
3833 | drbd_resume_io(mdev); | 4039 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
4040 | drbd_resume_io(mdev); | ||
3834 | 4041 | ||
3835 | return rv; | 4042 | return rv; |
3836 | } | 4043 | } |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index fe81c851ca8..03b29f78a37 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -288,10 +288,11 @@ void drbd_try_outdate_peer_async(struct drbd_conf *mdev) | |||
288 | dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); | 288 | dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); |
289 | } | 289 | } |
290 | 290 | ||
291 | int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | 291 | enum drbd_state_rv |
292 | drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | ||
292 | { | 293 | { |
293 | const int max_tries = 4; | 294 | const int max_tries = 4; |
294 | int r = 0; | 295 | enum drbd_state_rv rv = SS_UNKNOWN_ERROR; |
295 | int try = 0; | 296 | int try = 0; |
296 | int forced = 0; | 297 | int forced = 0; |
297 | union drbd_state mask, val; | 298 | union drbd_state mask, val; |
@@ -306,17 +307,17 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
306 | val.i = 0; val.role = new_role; | 307 | val.i = 0; val.role = new_role; |
307 | 308 | ||
308 | while (try++ < max_tries) { | 309 | while (try++ < max_tries) { |
309 | r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); | 310 | rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); |
310 | 311 | ||
311 | /* in case we first succeeded to outdate, | 312 | /* in case we first succeeded to outdate, |
312 | * but now suddenly could establish a connection */ | 313 | * but now suddenly could establish a connection */ |
313 | if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { | 314 | if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { |
314 | val.pdsk = 0; | 315 | val.pdsk = 0; |
315 | mask.pdsk = 0; | 316 | mask.pdsk = 0; |
316 | continue; | 317 | continue; |
317 | } | 318 | } |
318 | 319 | ||
319 | if (r == SS_NO_UP_TO_DATE_DISK && force && | 320 | if (rv == SS_NO_UP_TO_DATE_DISK && force && |
320 | (mdev->state.disk < D_UP_TO_DATE && | 321 | (mdev->state.disk < D_UP_TO_DATE && |
321 | mdev->state.disk >= D_INCONSISTENT)) { | 322 | mdev->state.disk >= D_INCONSISTENT)) { |
322 | mask.disk = D_MASK; | 323 | mask.disk = D_MASK; |
@@ -325,7 +326,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
325 | continue; | 326 | continue; |
326 | } | 327 | } |
327 | 328 | ||
328 | if (r == SS_NO_UP_TO_DATE_DISK && | 329 | if (rv == SS_NO_UP_TO_DATE_DISK && |
329 | mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { | 330 | mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { |
330 | D_ASSERT(mdev->state.pdsk == D_UNKNOWN); | 331 | D_ASSERT(mdev->state.pdsk == D_UNKNOWN); |
331 | nps = drbd_try_outdate_peer(mdev); | 332 | nps = drbd_try_outdate_peer(mdev); |
@@ -341,9 +342,9 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
341 | continue; | 342 | continue; |
342 | } | 343 | } |
343 | 344 | ||
344 | if (r == SS_NOTHING_TO_DO) | 345 | if (rv == SS_NOTHING_TO_DO) |
345 | goto fail; | 346 | goto fail; |
346 | if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { | 347 | if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { |
347 | nps = drbd_try_outdate_peer(mdev); | 348 | nps = drbd_try_outdate_peer(mdev); |
348 | 349 | ||
349 | if (force && nps > D_OUTDATED) { | 350 | if (force && nps > D_OUTDATED) { |
@@ -356,25 +357,24 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
356 | 357 | ||
357 | continue; | 358 | continue; |
358 | } | 359 | } |
359 | if (r == SS_TWO_PRIMARIES) { | 360 | if (rv == SS_TWO_PRIMARIES) { |
360 | /* Maybe the peer is detected as dead very soon... | 361 | /* Maybe the peer is detected as dead very soon... |
361 | retry at most once more in this case. */ | 362 | retry at most once more in this case. */ |
362 | __set_current_state(TASK_INTERRUPTIBLE); | 363 | schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10); |
363 | schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); | ||
364 | if (try < max_tries) | 364 | if (try < max_tries) |
365 | try = max_tries - 1; | 365 | try = max_tries - 1; |
366 | continue; | 366 | continue; |
367 | } | 367 | } |
368 | if (r < SS_SUCCESS) { | 368 | if (rv < SS_SUCCESS) { |
369 | r = _drbd_request_state(mdev, mask, val, | 369 | rv = _drbd_request_state(mdev, mask, val, |
370 | CS_VERBOSE + CS_WAIT_COMPLETE); | 370 | CS_VERBOSE + CS_WAIT_COMPLETE); |
371 | if (r < SS_SUCCESS) | 371 | if (rv < SS_SUCCESS) |
372 | goto fail; | 372 | goto fail; |
373 | } | 373 | } |
374 | break; | 374 | break; |
375 | } | 375 | } |
376 | 376 | ||
377 | if (r < SS_SUCCESS) | 377 | if (rv < SS_SUCCESS) |
378 | goto fail; | 378 | goto fail; |
379 | 379 | ||
380 | if (forced) | 380 | if (forced) |
@@ -384,7 +384,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
384 | wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); | 384 | wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); |
385 | 385 | ||
386 | if (new_role == R_SECONDARY) { | 386 | if (new_role == R_SECONDARY) { |
387 | set_disk_ro(mdev->vdisk, TRUE); | 387 | set_disk_ro(mdev->vdisk, true); |
388 | if (get_ldev(mdev)) { | 388 | if (get_ldev(mdev)) { |
389 | mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; | 389 | mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; |
390 | put_ldev(mdev); | 390 | put_ldev(mdev); |
@@ -394,7 +394,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
394 | mdev->net_conf->want_lose = 0; | 394 | mdev->net_conf->want_lose = 0; |
395 | put_net_conf(mdev); | 395 | put_net_conf(mdev); |
396 | } | 396 | } |
397 | set_disk_ro(mdev->vdisk, FALSE); | 397 | set_disk_ro(mdev->vdisk, false); |
398 | if (get_ldev(mdev)) { | 398 | if (get_ldev(mdev)) { |
399 | if (((mdev->state.conn < C_CONNECTED || | 399 | if (((mdev->state.conn < C_CONNECTED || |
400 | mdev->state.pdsk <= D_FAILED) | 400 | mdev->state.pdsk <= D_FAILED) |
@@ -406,10 +406,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
406 | } | 406 | } |
407 | } | 407 | } |
408 | 408 | ||
409 | if ((new_role == R_SECONDARY) && get_ldev(mdev)) { | 409 | /* writeout of activity log covered areas of the bitmap |
410 | drbd_al_to_on_disk_bm(mdev); | 410 | * to stable storage done in after state change already */ |
411 | put_ldev(mdev); | ||
412 | } | ||
413 | 411 | ||
414 | if (mdev->state.conn >= C_WF_REPORT_PARAMS) { | 412 | if (mdev->state.conn >= C_WF_REPORT_PARAMS) { |
415 | /* if this was forced, we should consider sync */ | 413 | /* if this was forced, we should consider sync */ |
@@ -423,7 +421,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
423 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | 421 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); |
424 | fail: | 422 | fail: |
425 | mutex_unlock(&mdev->state_mutex); | 423 | mutex_unlock(&mdev->state_mutex); |
426 | return r; | 424 | return rv; |
427 | } | 425 | } |
428 | 426 | ||
429 | static struct drbd_conf *ensure_mdev(int minor, int create) | 427 | static struct drbd_conf *ensure_mdev(int minor, int create) |
@@ -528,17 +526,19 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, | |||
528 | } | 526 | } |
529 | } | 527 | } |
530 | 528 | ||
529 | /* input size is expected to be in KB */ | ||
531 | char *ppsize(char *buf, unsigned long long size) | 530 | char *ppsize(char *buf, unsigned long long size) |
532 | { | 531 | { |
533 | /* Needs 9 bytes at max. */ | 532 | /* Needs 9 bytes at max including trailing NUL: |
533 | * -1ULL ==> "16384 EB" */ | ||
534 | static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; | 534 | static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; |
535 | int base = 0; | 535 | int base = 0; |
536 | while (size >= 10000) { | 536 | while (size >= 10000 && base < sizeof(units)-1) { |
537 | /* shift + round */ | 537 | /* shift + round */ |
538 | size = (size >> 10) + !!(size & (1<<9)); | 538 | size = (size >> 10) + !!(size & (1<<9)); |
539 | base++; | 539 | base++; |
540 | } | 540 | } |
541 | sprintf(buf, "%lu %cB", (long)size, units[base]); | 541 | sprintf(buf, "%u %cB", (unsigned)size, units[base]); |
542 | 542 | ||
543 | return buf; | 543 | return buf; |
544 | } | 544 | } |
@@ -642,11 +642,19 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_ | |||
642 | || prev_size != mdev->ldev->md.md_size_sect; | 642 | || prev_size != mdev->ldev->md.md_size_sect; |
643 | 643 | ||
644 | if (la_size_changed || md_moved) { | 644 | if (la_size_changed || md_moved) { |
645 | int err; | ||
646 | |||
645 | drbd_al_shrink(mdev); /* All extents inactive. */ | 647 | drbd_al_shrink(mdev); /* All extents inactive. */ |
646 | dev_info(DEV, "Writing the whole bitmap, %s\n", | 648 | dev_info(DEV, "Writing the whole bitmap, %s\n", |
647 | la_size_changed && md_moved ? "size changed and md moved" : | 649 | la_size_changed && md_moved ? "size changed and md moved" : |
648 | la_size_changed ? "size changed" : "md moved"); | 650 | la_size_changed ? "size changed" : "md moved"); |
649 | rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ | 651 | /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ |
652 | err = drbd_bitmap_io(mdev, &drbd_bm_write, | ||
653 | "size changed", BM_LOCKED_MASK); | ||
654 | if (err) { | ||
655 | rv = dev_size_error; | ||
656 | goto out; | ||
657 | } | ||
650 | drbd_md_mark_dirty(mdev); | 658 | drbd_md_mark_dirty(mdev); |
651 | } | 659 | } |
652 | 660 | ||
@@ -765,22 +773,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev) | |||
765 | return 0; | 773 | return 0; |
766 | } | 774 | } |
767 | 775 | ||
768 | void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) | 776 | void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) |
769 | { | 777 | { |
770 | struct request_queue * const q = mdev->rq_queue; | 778 | struct request_queue * const q = mdev->rq_queue; |
771 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; | 779 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; |
772 | int max_segments = mdev->ldev->dc.max_bio_bvecs; | 780 | int max_segments = mdev->ldev->dc.max_bio_bvecs; |
781 | int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); | ||
773 | 782 | ||
774 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); | ||
775 | |||
776 | blk_queue_max_hw_sectors(q, max_seg_s >> 9); | ||
777 | blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); | ||
778 | blk_queue_max_segment_size(q, max_seg_s); | ||
779 | blk_queue_logical_block_size(q, 512); | 783 | blk_queue_logical_block_size(q, 512); |
780 | blk_queue_segment_boundary(q, PAGE_SIZE-1); | 784 | blk_queue_max_hw_sectors(q, max_hw_sectors); |
781 | blk_stack_limits(&q->limits, &b->limits, 0); | 785 | /* This is the workaround for "bio would need to, but cannot, be split" */ |
786 | blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); | ||
787 | blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); | ||
788 | blk_queue_stack_limits(q, b); | ||
782 | 789 | ||
783 | dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); | 790 | dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); |
784 | 791 | ||
785 | if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { | 792 | if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { |
786 | dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", | 793 | dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", |
@@ -850,7 +857,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev) | |||
850 | static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 857 | static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, |
851 | struct drbd_nl_cfg_reply *reply) | 858 | struct drbd_nl_cfg_reply *reply) |
852 | { | 859 | { |
853 | enum drbd_ret_codes retcode; | 860 | enum drbd_ret_code retcode; |
854 | enum determine_dev_size dd; | 861 | enum determine_dev_size dd; |
855 | sector_t max_possible_sectors; | 862 | sector_t max_possible_sectors; |
856 | sector_t min_md_device_sectors; | 863 | sector_t min_md_device_sectors; |
@@ -858,8 +865,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
858 | struct block_device *bdev; | 865 | struct block_device *bdev; |
859 | struct lru_cache *resync_lru = NULL; | 866 | struct lru_cache *resync_lru = NULL; |
860 | union drbd_state ns, os; | 867 | union drbd_state ns, os; |
861 | unsigned int max_seg_s; | 868 | unsigned int max_bio_size; |
862 | int rv; | 869 | enum drbd_state_rv rv; |
863 | int cp_discovered = 0; | 870 | int cp_discovered = 0; |
864 | int logical_block_size; | 871 | int logical_block_size; |
865 | 872 | ||
@@ -1005,9 +1012,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1005 | /* and for any other previously queued work */ | 1012 | /* and for any other previously queued work */ |
1006 | drbd_flush_workqueue(mdev); | 1013 | drbd_flush_workqueue(mdev); |
1007 | 1014 | ||
1008 | retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); | 1015 | rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); |
1016 | retcode = rv; /* FIXME: Type mismatch. */ | ||
1009 | drbd_resume_io(mdev); | 1017 | drbd_resume_io(mdev); |
1010 | if (retcode < SS_SUCCESS) | 1018 | if (rv < SS_SUCCESS) |
1011 | goto fail; | 1019 | goto fail; |
1012 | 1020 | ||
1013 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | 1021 | if (!get_ldev_if_state(mdev, D_ATTACHING)) |
@@ -1109,20 +1117,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1109 | mdev->read_cnt = 0; | 1117 | mdev->read_cnt = 0; |
1110 | mdev->writ_cnt = 0; | 1118 | mdev->writ_cnt = 0; |
1111 | 1119 | ||
1112 | max_seg_s = DRBD_MAX_SEGMENT_SIZE; | 1120 | max_bio_size = DRBD_MAX_BIO_SIZE; |
1113 | if (mdev->state.conn == C_CONNECTED) { | 1121 | if (mdev->state.conn == C_CONNECTED) { |
1114 | /* We are Primary, Connected, and now attach a new local | 1122 | /* We are Primary, Connected, and now attach a new local |
1115 | * backing store. We must not increase the user visible maximum | 1123 | * backing store. We must not increase the user visible maximum |
1116 | * bio size on this device to something the peer may not be | 1124 | * bio size on this device to something the peer may not be |
1117 | * able to handle. */ | 1125 | * able to handle. */ |
1118 | if (mdev->agreed_pro_version < 94) | 1126 | if (mdev->agreed_pro_version < 94) |
1119 | max_seg_s = queue_max_segment_size(mdev->rq_queue); | 1127 | max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; |
1120 | else if (mdev->agreed_pro_version == 94) | 1128 | else if (mdev->agreed_pro_version == 94) |
1121 | max_seg_s = DRBD_MAX_SIZE_H80_PACKET; | 1129 | max_bio_size = DRBD_MAX_SIZE_H80_PACKET; |
1122 | /* else: drbd 8.3.9 and later, stay with default */ | 1130 | /* else: drbd 8.3.9 and later, stay with default */ |
1123 | } | 1131 | } |
1124 | 1132 | ||
1125 | drbd_setup_queue_param(mdev, max_seg_s); | 1133 | drbd_setup_queue_param(mdev, max_bio_size); |
1126 | 1134 | ||
1127 | /* If I am currently not R_PRIMARY, | 1135 | /* If I am currently not R_PRIMARY, |
1128 | * but meta data primary indicator is set, | 1136 | * but meta data primary indicator is set, |
@@ -1154,12 +1162,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1154 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { | 1162 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { |
1155 | dev_info(DEV, "Assuming that all blocks are out of sync " | 1163 | dev_info(DEV, "Assuming that all blocks are out of sync " |
1156 | "(aka FullSync)\n"); | 1164 | "(aka FullSync)\n"); |
1157 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { | 1165 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, |
1166 | "set_n_write from attaching", BM_LOCKED_MASK)) { | ||
1158 | retcode = ERR_IO_MD_DISK; | 1167 | retcode = ERR_IO_MD_DISK; |
1159 | goto force_diskless_dec; | 1168 | goto force_diskless_dec; |
1160 | } | 1169 | } |
1161 | } else { | 1170 | } else { |
1162 | if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { | 1171 | if (drbd_bitmap_io(mdev, &drbd_bm_read, |
1172 | "read from attaching", BM_LOCKED_MASK) < 0) { | ||
1163 | retcode = ERR_IO_MD_DISK; | 1173 | retcode = ERR_IO_MD_DISK; |
1164 | goto force_diskless_dec; | 1174 | goto force_diskless_dec; |
1165 | } | 1175 | } |
@@ -1167,7 +1177,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1167 | 1177 | ||
1168 | if (cp_discovered) { | 1178 | if (cp_discovered) { |
1169 | drbd_al_apply_to_bm(mdev); | 1179 | drbd_al_apply_to_bm(mdev); |
1170 | drbd_al_to_on_disk_bm(mdev); | 1180 | if (drbd_bitmap_io(mdev, &drbd_bm_write, |
1181 | "crashed primary apply AL", BM_LOCKED_MASK)) { | ||
1182 | retcode = ERR_IO_MD_DISK; | ||
1183 | goto force_diskless_dec; | ||
1184 | } | ||
1171 | } | 1185 | } |
1172 | 1186 | ||
1173 | if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) | 1187 | if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) |
@@ -1279,7 +1293,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1279 | struct drbd_nl_cfg_reply *reply) | 1293 | struct drbd_nl_cfg_reply *reply) |
1280 | { | 1294 | { |
1281 | int i, ns; | 1295 | int i, ns; |
1282 | enum drbd_ret_codes retcode; | 1296 | enum drbd_ret_code retcode; |
1283 | struct net_conf *new_conf = NULL; | 1297 | struct net_conf *new_conf = NULL; |
1284 | struct crypto_hash *tfm = NULL; | 1298 | struct crypto_hash *tfm = NULL; |
1285 | struct crypto_hash *integrity_w_tfm = NULL; | 1299 | struct crypto_hash *integrity_w_tfm = NULL; |
@@ -1324,6 +1338,8 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1324 | new_conf->wire_protocol = DRBD_PROT_C; | 1338 | new_conf->wire_protocol = DRBD_PROT_C; |
1325 | new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; | 1339 | new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; |
1326 | new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; | 1340 | new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; |
1341 | new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; | ||
1342 | new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; | ||
1327 | 1343 | ||
1328 | if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { | 1344 | if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { |
1329 | retcode = ERR_MANDATORY_TAG; | 1345 | retcode = ERR_MANDATORY_TAG; |
@@ -1345,6 +1361,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1345 | } | 1361 | } |
1346 | } | 1362 | } |
1347 | 1363 | ||
1364 | if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) { | ||
1365 | retcode = ERR_CONG_NOT_PROTO_A; | ||
1366 | goto fail; | ||
1367 | } | ||
1368 | |||
1348 | if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { | 1369 | if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { |
1349 | retcode = ERR_DISCARD; | 1370 | retcode = ERR_DISCARD; |
1350 | goto fail; | 1371 | goto fail; |
@@ -1525,6 +1546,21 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
1525 | struct drbd_nl_cfg_reply *reply) | 1546 | struct drbd_nl_cfg_reply *reply) |
1526 | { | 1547 | { |
1527 | int retcode; | 1548 | int retcode; |
1549 | struct disconnect dc; | ||
1550 | |||
1551 | memset(&dc, 0, sizeof(struct disconnect)); | ||
1552 | if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) { | ||
1553 | retcode = ERR_MANDATORY_TAG; | ||
1554 | goto fail; | ||
1555 | } | ||
1556 | |||
1557 | if (dc.force) { | ||
1558 | spin_lock_irq(&mdev->req_lock); | ||
1559 | if (mdev->state.conn >= C_WF_CONNECTION) | ||
1560 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); | ||
1561 | spin_unlock_irq(&mdev->req_lock); | ||
1562 | goto done; | ||
1563 | } | ||
1528 | 1564 | ||
1529 | retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); | 1565 | retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); |
1530 | 1566 | ||
@@ -1842,6 +1878,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
1842 | { | 1878 | { |
1843 | int retcode; | 1879 | int retcode; |
1844 | 1880 | ||
1881 | /* If there is still bitmap IO pending, probably because of a previous | ||
1882 | * resync just being finished, wait for it before requesting a new resync. */ | ||
1883 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
1884 | |||
1845 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); | 1885 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); |
1846 | 1886 | ||
1847 | if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) | 1887 | if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) |
@@ -1877,6 +1917,10 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re | |||
1877 | { | 1917 | { |
1878 | int retcode; | 1918 | int retcode; |
1879 | 1919 | ||
1920 | /* If there is still bitmap IO pending, probably because of a previous | ||
1921 | * resync just being finished, wait for it before requesting a new resync. */ | ||
1922 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
1923 | |||
1880 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); | 1924 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); |
1881 | 1925 | ||
1882 | if (retcode < SS_SUCCESS) { | 1926 | if (retcode < SS_SUCCESS) { |
@@ -1885,9 +1929,9 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re | |||
1885 | into a full resync. */ | 1929 | into a full resync. */ |
1886 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); | 1930 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); |
1887 | if (retcode >= SS_SUCCESS) { | 1931 | if (retcode >= SS_SUCCESS) { |
1888 | /* open coded drbd_bitmap_io() */ | ||
1889 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, | 1932 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, |
1890 | "set_n_write from invalidate_peer")) | 1933 | "set_n_write from invalidate_peer", |
1934 | BM_LOCKED_SET_ALLOWED)) | ||
1891 | retcode = ERR_IO_MD_DISK; | 1935 | retcode = ERR_IO_MD_DISK; |
1892 | } | 1936 | } |
1893 | } else | 1937 | } else |
@@ -1914,9 +1958,17 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1914 | struct drbd_nl_cfg_reply *reply) | 1958 | struct drbd_nl_cfg_reply *reply) |
1915 | { | 1959 | { |
1916 | int retcode = NO_ERROR; | 1960 | int retcode = NO_ERROR; |
1961 | union drbd_state s; | ||
1917 | 1962 | ||
1918 | if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) | 1963 | if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { |
1919 | retcode = ERR_PAUSE_IS_CLEAR; | 1964 | s = mdev->state; |
1965 | if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { | ||
1966 | retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : | ||
1967 | s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; | ||
1968 | } else { | ||
1969 | retcode = ERR_PAUSE_IS_CLEAR; | ||
1970 | } | ||
1971 | } | ||
1920 | 1972 | ||
1921 | reply->ret_code = retcode; | 1973 | reply->ret_code = retcode; |
1922 | return 0; | 1974 | return 0; |
@@ -2054,6 +2106,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
2054 | reply->ret_code = ERR_MANDATORY_TAG; | 2106 | reply->ret_code = ERR_MANDATORY_TAG; |
2055 | return 0; | 2107 | return 0; |
2056 | } | 2108 | } |
2109 | |||
2110 | /* If there is still bitmap IO pending, e.g. previous resync or verify | ||
2111 | * just being finished, wait for it before requesting a new resync. */ | ||
2112 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
2113 | |||
2057 | /* w_make_ov_request expects position to be aligned */ | 2114 | /* w_make_ov_request expects position to be aligned */ |
2058 | mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; | 2115 | mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; |
2059 | reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); | 2116 | reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); |
@@ -2097,7 +2154,8 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
2097 | drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ | 2154 | drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ |
2098 | 2155 | ||
2099 | if (args.clear_bm) { | 2156 | if (args.clear_bm) { |
2100 | err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); | 2157 | err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, |
2158 | "clear_n_write from new_c_uuid", BM_LOCKED_MASK); | ||
2101 | if (err) { | 2159 | if (err) { |
2102 | dev_err(DEV, "Writing bitmap failed with %d\n",err); | 2160 | dev_err(DEV, "Writing bitmap failed with %d\n",err); |
2103 | retcode = ERR_IO_MD_DISK; | 2161 | retcode = ERR_IO_MD_DISK; |
@@ -2105,6 +2163,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
2105 | if (skip_initial_sync) { | 2163 | if (skip_initial_sync) { |
2106 | drbd_send_uuids_skip_initial_sync(mdev); | 2164 | drbd_send_uuids_skip_initial_sync(mdev); |
2107 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | 2165 | _drbd_uuid_set(mdev, UI_BITMAP, 0); |
2166 | drbd_print_uuids(mdev, "cleared bitmap UUID"); | ||
2108 | spin_lock_irq(&mdev->req_lock); | 2167 | spin_lock_irq(&mdev->req_lock); |
2109 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | 2168 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), |
2110 | CS_VERBOSE, NULL); | 2169 | CS_VERBOSE, NULL); |
@@ -2189,7 +2248,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms | |||
2189 | goto fail; | 2248 | goto fail; |
2190 | } | 2249 | } |
2191 | 2250 | ||
2192 | if (nlp->packet_type >= P_nl_after_last_packet) { | 2251 | if (nlp->packet_type >= P_nl_after_last_packet || |
2252 | nlp->packet_type == P_return_code_only) { | ||
2193 | retcode = ERR_PACKET_NR; | 2253 | retcode = ERR_PACKET_NR; |
2194 | goto fail; | 2254 | goto fail; |
2195 | } | 2255 | } |
@@ -2205,7 +2265,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms | |||
2205 | reply_size += cm->reply_body_size; | 2265 | reply_size += cm->reply_body_size; |
2206 | 2266 | ||
2207 | /* allocation not in the IO path, cqueue thread context */ | 2267 | /* allocation not in the IO path, cqueue thread context */ |
2208 | cn_reply = kmalloc(reply_size, GFP_KERNEL); | 2268 | cn_reply = kzalloc(reply_size, GFP_KERNEL); |
2209 | if (!cn_reply) { | 2269 | if (!cn_reply) { |
2210 | retcode = ERR_NOMEM; | 2270 | retcode = ERR_NOMEM; |
2211 | goto fail; | 2271 | goto fail; |
@@ -2213,7 +2273,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms | |||
2213 | reply = (struct drbd_nl_cfg_reply *) cn_reply->data; | 2273 | reply = (struct drbd_nl_cfg_reply *) cn_reply->data; |
2214 | 2274 | ||
2215 | reply->packet_type = | 2275 | reply->packet_type = |
2216 | cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; | 2276 | cm->reply_body_size ? nlp->packet_type : P_return_code_only; |
2217 | reply->minor = nlp->drbd_minor; | 2277 | reply->minor = nlp->drbd_minor; |
2218 | reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ | 2278 | reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ |
2219 | /* reply->tag_list; might be modified by cm->function. */ | 2279 | /* reply->tag_list; might be modified by cm->function. */ |
@@ -2376,7 +2436,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2376 | /* receiver thread context, which is not in the writeout path (of this node), | 2436 | /* receiver thread context, which is not in the writeout path (of this node), |
2377 | * but may be in the writeout path of the _other_ node. | 2437 | * but may be in the writeout path of the _other_ node. |
2378 | * GFP_NOIO to avoid potential "distributed deadlock". */ | 2438 | * GFP_NOIO to avoid potential "distributed deadlock". */ |
2379 | cn_reply = kmalloc( | 2439 | cn_reply = kzalloc( |
2380 | sizeof(struct cn_msg)+ | 2440 | sizeof(struct cn_msg)+ |
2381 | sizeof(struct drbd_nl_cfg_reply)+ | 2441 | sizeof(struct drbd_nl_cfg_reply)+ |
2382 | sizeof(struct dump_ee_tag_len_struct)+ | 2442 | sizeof(struct dump_ee_tag_len_struct)+ |
@@ -2398,10 +2458,11 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2398 | tl = tl_add_int(tl, T_ee_sector, &e->sector); | 2458 | tl = tl_add_int(tl, T_ee_sector, &e->sector); |
2399 | tl = tl_add_int(tl, T_ee_block_id, &e->block_id); | 2459 | tl = tl_add_int(tl, T_ee_block_id, &e->block_id); |
2400 | 2460 | ||
2461 | /* dump the first 32k */ | ||
2462 | len = min_t(unsigned, e->size, 32 << 10); | ||
2401 | put_unaligned(T_ee_data, tl++); | 2463 | put_unaligned(T_ee_data, tl++); |
2402 | put_unaligned(e->size, tl++); | 2464 | put_unaligned(len, tl++); |
2403 | 2465 | ||
2404 | len = e->size; | ||
2405 | page = e->pages; | 2466 | page = e->pages; |
2406 | page_chain_for_each(page) { | 2467 | page_chain_for_each(page) { |
2407 | void *d = kmap_atomic(page, KM_USER0); | 2468 | void *d = kmap_atomic(page, KM_USER0); |
@@ -2410,6 +2471,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2410 | kunmap_atomic(d, KM_USER0); | 2471 | kunmap_atomic(d, KM_USER0); |
2411 | tl = (unsigned short*)((char*)tl + l); | 2472 | tl = (unsigned short*)((char*)tl + l); |
2412 | len -= l; | 2473 | len -= l; |
2474 | if (len == 0) | ||
2475 | break; | ||
2413 | } | 2476 | } |
2414 | put_unaligned(TT_END, tl++); /* Close the tag list */ | 2477 | put_unaligned(TT_END, tl++); /* Close the tag list */ |
2415 | 2478 | ||
@@ -2508,6 +2571,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code) | |||
2508 | (struct drbd_nl_cfg_reply *)cn_reply->data; | 2571 | (struct drbd_nl_cfg_reply *)cn_reply->data; |
2509 | int rr; | 2572 | int rr; |
2510 | 2573 | ||
2574 | memset(buffer, 0, sizeof(buffer)); | ||
2511 | cn_reply->id = req->id; | 2575 | cn_reply->id = req->id; |
2512 | 2576 | ||
2513 | cn_reply->seq = req->seq; | 2577 | cn_reply->seq = req->seq; |
@@ -2515,6 +2579,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code) | |||
2515 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply); | 2579 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply); |
2516 | cn_reply->flags = 0; | 2580 | cn_reply->flags = 0; |
2517 | 2581 | ||
2582 | reply->packet_type = P_return_code_only; | ||
2518 | reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; | 2583 | reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; |
2519 | reply->ret_code = ret_code; | 2584 | reply->ret_code = ret_code; |
2520 | 2585 | ||
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 7e6ac307e2d..2959cdfb77f 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "drbd_int.h" | 34 | #include "drbd_int.h" |
35 | 35 | ||
36 | static int drbd_proc_open(struct inode *inode, struct file *file); | 36 | static int drbd_proc_open(struct inode *inode, struct file *file); |
37 | static int drbd_proc_release(struct inode *inode, struct file *file); | ||
37 | 38 | ||
38 | 39 | ||
39 | struct proc_dir_entry *drbd_proc; | 40 | struct proc_dir_entry *drbd_proc; |
@@ -42,9 +43,22 @@ const struct file_operations drbd_proc_fops = { | |||
42 | .open = drbd_proc_open, | 43 | .open = drbd_proc_open, |
43 | .read = seq_read, | 44 | .read = seq_read, |
44 | .llseek = seq_lseek, | 45 | .llseek = seq_lseek, |
45 | .release = single_release, | 46 | .release = drbd_proc_release, |
46 | }; | 47 | }; |
47 | 48 | ||
49 | void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) | ||
50 | { | ||
51 | /* v is in kB/sec. We don't expect TiByte/sec yet. */ | ||
52 | if (unlikely(v >= 1000000)) { | ||
53 | /* cool: > GiByte/s */ | ||
54 | seq_printf(seq, "%ld,", v / 1000000); | ||
55 | v /= 1000000; | ||
56 | seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000); | ||
57 | } else if (likely(v >= 1000)) | ||
58 | seq_printf(seq, "%ld,%03ld", v/1000, v % 1000); | ||
59 | else | ||
60 | seq_printf(seq, "%ld", v); | ||
61 | } | ||
48 | 62 | ||
49 | /*lge | 63 | /*lge |
50 | * progress bars shamelessly adapted from driver/md/md.c | 64 | * progress bars shamelessly adapted from driver/md/md.c |
@@ -71,10 +85,15 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
71 | seq_printf(seq, "."); | 85 | seq_printf(seq, "."); |
72 | seq_printf(seq, "] "); | 86 | seq_printf(seq, "] "); |
73 | 87 | ||
74 | seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10); | 88 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) |
75 | /* if more than 1 GB display in MB */ | 89 | seq_printf(seq, "verified:"); |
76 | if (mdev->rs_total > 0x100000L) | 90 | else |
77 | seq_printf(seq, "(%lu/%lu)M\n\t", | 91 | seq_printf(seq, "sync'ed:"); |
92 | seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); | ||
93 | |||
94 | /* if more than a few GB, display in MB */ | ||
95 | if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) | ||
96 | seq_printf(seq, "(%lu/%lu)M", | ||
78 | (unsigned long) Bit2KB(rs_left >> 10), | 97 | (unsigned long) Bit2KB(rs_left >> 10), |
79 | (unsigned long) Bit2KB(mdev->rs_total >> 10)); | 98 | (unsigned long) Bit2KB(mdev->rs_total >> 10)); |
80 | else | 99 | else |
@@ -94,6 +113,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
94 | /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is | 113 | /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is |
95 | * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at | 114 | * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at |
96 | * least DRBD_SYNC_MARK_STEP time before it will be modified. */ | 115 | * least DRBD_SYNC_MARK_STEP time before it will be modified. */ |
116 | /* ------------------------ ~18s average ------------------------ */ | ||
97 | i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; | 117 | i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; |
98 | dt = (jiffies - mdev->rs_mark_time[i]) / HZ; | 118 | dt = (jiffies - mdev->rs_mark_time[i]) / HZ; |
99 | if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) | 119 | if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) |
@@ -107,14 +127,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
107 | seq_printf(seq, "finish: %lu:%02lu:%02lu", | 127 | seq_printf(seq, "finish: %lu:%02lu:%02lu", |
108 | rt / 3600, (rt % 3600) / 60, rt % 60); | 128 | rt / 3600, (rt % 3600) / 60, rt % 60); |
109 | 129 | ||
110 | /* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */ | ||
111 | dbdt = Bit2KB(db/dt); | 130 | dbdt = Bit2KB(db/dt); |
112 | if (dbdt > 1000) | 131 | seq_printf(seq, " speed: "); |
113 | seq_printf(seq, " speed: %ld,%03ld", | 132 | seq_printf_with_thousands_grouping(seq, dbdt); |
114 | dbdt/1000, dbdt % 1000); | 133 | seq_printf(seq, " ("); |
115 | else | 134 | /* ------------------------- ~3s average ------------------------ */ |
116 | seq_printf(seq, " speed: %ld", dbdt); | 135 | if (proc_details >= 1) { |
136 | /* this is what drbd_rs_should_slow_down() uses */ | ||
137 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; | ||
138 | dt = (jiffies - mdev->rs_mark_time[i]) / HZ; | ||
139 | if (!dt) | ||
140 | dt++; | ||
141 | db = mdev->rs_mark_left[i] - rs_left; | ||
142 | dbdt = Bit2KB(db/dt); | ||
143 | seq_printf_with_thousands_grouping(seq, dbdt); | ||
144 | seq_printf(seq, " -- "); | ||
145 | } | ||
117 | 146 | ||
147 | /* --------------------- long term average ---------------------- */ | ||
118 | /* mean speed since syncer started | 148 | /* mean speed since syncer started |
119 | * we do account for PausedSync periods */ | 149 | * we do account for PausedSync periods */ |
120 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; | 150 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; |
@@ -122,20 +152,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
122 | dt = 1; | 152 | dt = 1; |
123 | db = mdev->rs_total - rs_left; | 153 | db = mdev->rs_total - rs_left; |
124 | dbdt = Bit2KB(db/dt); | 154 | dbdt = Bit2KB(db/dt); |
125 | if (dbdt > 1000) | 155 | seq_printf_with_thousands_grouping(seq, dbdt); |
126 | seq_printf(seq, " (%ld,%03ld)", | 156 | seq_printf(seq, ")"); |
127 | dbdt/1000, dbdt % 1000); | ||
128 | else | ||
129 | seq_printf(seq, " (%ld)", dbdt); | ||
130 | 157 | ||
131 | if (mdev->state.conn == C_SYNC_TARGET) { | 158 | if (mdev->state.conn == C_SYNC_TARGET || |
132 | if (mdev->c_sync_rate > 1000) | 159 | mdev->state.conn == C_VERIFY_S) { |
133 | seq_printf(seq, " want: %d,%03d", | 160 | seq_printf(seq, " want: "); |
134 | mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); | 161 | seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate); |
135 | else | ||
136 | seq_printf(seq, " want: %d", mdev->c_sync_rate); | ||
137 | } | 162 | } |
138 | seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); | 163 | seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); |
164 | |||
165 | if (proc_details >= 1) { | ||
166 | /* 64 bit: | ||
167 | * we convert to sectors in the display below. */ | ||
168 | unsigned long bm_bits = drbd_bm_bits(mdev); | ||
169 | unsigned long bit_pos; | ||
170 | if (mdev->state.conn == C_VERIFY_S || | ||
171 | mdev->state.conn == C_VERIFY_T) | ||
172 | bit_pos = bm_bits - mdev->ov_left; | ||
173 | else | ||
174 | bit_pos = mdev->bm_resync_fo; | ||
175 | /* Total sectors may be slightly off for oddly | ||
176 | * sized devices. So what. */ | ||
177 | seq_printf(seq, | ||
178 | "\t%3d%% sector pos: %llu/%llu\n", | ||
179 | (int)(bit_pos / (bm_bits/100+1)), | ||
180 | (unsigned long long)bit_pos * BM_SECT_PER_BIT, | ||
181 | (unsigned long long)bm_bits * BM_SECT_PER_BIT); | ||
182 | } | ||
139 | } | 183 | } |
140 | 184 | ||
141 | static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) | 185 | static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) |
@@ -232,20 +276,16 @@ static int drbd_seq_show(struct seq_file *seq, void *v) | |||
232 | mdev->epochs, | 276 | mdev->epochs, |
233 | write_ordering_chars[mdev->write_ordering] | 277 | write_ordering_chars[mdev->write_ordering] |
234 | ); | 278 | ); |
235 | seq_printf(seq, " oos:%lu\n", | 279 | seq_printf(seq, " oos:%llu\n", |
236 | Bit2KB(drbd_bm_total_weight(mdev))); | 280 | Bit2KB((unsigned long long) |
281 | drbd_bm_total_weight(mdev))); | ||
237 | } | 282 | } |
238 | if (mdev->state.conn == C_SYNC_SOURCE || | 283 | if (mdev->state.conn == C_SYNC_SOURCE || |
239 | mdev->state.conn == C_SYNC_TARGET) | 284 | mdev->state.conn == C_SYNC_TARGET || |
285 | mdev->state.conn == C_VERIFY_S || | ||
286 | mdev->state.conn == C_VERIFY_T) | ||
240 | drbd_syncer_progress(mdev, seq); | 287 | drbd_syncer_progress(mdev, seq); |
241 | 288 | ||
242 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | ||
243 | seq_printf(seq, "\t%3d%% %lu/%lu\n", | ||
244 | (int)((mdev->rs_total-mdev->ov_left) / | ||
245 | (mdev->rs_total/100+1)), | ||
246 | mdev->rs_total - mdev->ov_left, | ||
247 | mdev->rs_total); | ||
248 | |||
249 | if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { | 289 | if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { |
250 | lc_seq_printf_stats(seq, mdev->resync); | 290 | lc_seq_printf_stats(seq, mdev->resync); |
251 | lc_seq_printf_stats(seq, mdev->act_log); | 291 | lc_seq_printf_stats(seq, mdev->act_log); |
@@ -265,7 +305,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v) | |||
265 | 305 | ||
266 | static int drbd_proc_open(struct inode *inode, struct file *file) | 306 | static int drbd_proc_open(struct inode *inode, struct file *file) |
267 | { | 307 | { |
268 | return single_open(file, drbd_seq_show, PDE(inode)->data); | 308 | if (try_module_get(THIS_MODULE)) |
309 | return single_open(file, drbd_seq_show, PDE(inode)->data); | ||
310 | return -ENODEV; | ||
311 | } | ||
312 | |||
313 | static int drbd_proc_release(struct inode *inode, struct file *file) | ||
314 | { | ||
315 | module_put(THIS_MODULE); | ||
316 | return single_release(inode, file); | ||
269 | } | 317 | } |
270 | 318 | ||
271 | /* PROC FS stuff end */ | 319 | /* PROC FS stuff end */ |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8e68be939de..fe1564c7d8b 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) | |||
277 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; | 277 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; |
278 | int i; | 278 | int i; |
279 | 279 | ||
280 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) | 280 | if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) |
281 | i = page_chain_free(page); | 281 | i = page_chain_free(page); |
282 | else { | 282 | else { |
283 | struct page *tmp; | 283 | struct page *tmp; |
@@ -319,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |||
319 | struct page *page; | 319 | struct page *page; |
320 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; | 320 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; |
321 | 321 | ||
322 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) | 322 | if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) |
323 | return NULL; | 323 | return NULL; |
324 | 324 | ||
325 | e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); | 325 | e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); |
@@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) | |||
725 | char tb[4]; | 725 | char tb[4]; |
726 | 726 | ||
727 | if (!*sock) | 727 | if (!*sock) |
728 | return FALSE; | 728 | return false; |
729 | 729 | ||
730 | rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); | 730 | rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); |
731 | 731 | ||
732 | if (rr > 0 || rr == -EAGAIN) { | 732 | if (rr > 0 || rr == -EAGAIN) { |
733 | return TRUE; | 733 | return true; |
734 | } else { | 734 | } else { |
735 | sock_release(*sock); | 735 | sock_release(*sock); |
736 | *sock = NULL; | 736 | *sock = NULL; |
737 | return FALSE; | 737 | return false; |
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
@@ -768,8 +768,7 @@ static int drbd_connect(struct drbd_conf *mdev) | |||
768 | if (s || ++try >= 3) | 768 | if (s || ++try >= 3) |
769 | break; | 769 | break; |
770 | /* give the other side time to call bind() & listen() */ | 770 | /* give the other side time to call bind() & listen() */ |
771 | __set_current_state(TASK_INTERRUPTIBLE); | 771 | schedule_timeout_interruptible(HZ / 10); |
772 | schedule_timeout(HZ / 10); | ||
773 | } | 772 | } |
774 | 773 | ||
775 | if (s) { | 774 | if (s) { |
@@ -788,8 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev) | |||
788 | } | 787 | } |
789 | 788 | ||
790 | if (sock && msock) { | 789 | if (sock && msock) { |
791 | __set_current_state(TASK_INTERRUPTIBLE); | 790 | schedule_timeout_interruptible(HZ / 10); |
792 | schedule_timeout(HZ / 10); | ||
793 | ok = drbd_socket_okay(mdev, &sock); | 791 | ok = drbd_socket_okay(mdev, &sock); |
794 | ok = drbd_socket_okay(mdev, &msock) && ok; | 792 | ok = drbd_socket_okay(mdev, &msock) && ok; |
795 | if (ok) | 793 | if (ok) |
@@ -906,7 +904,7 @@ retry: | |||
906 | put_ldev(mdev); | 904 | put_ldev(mdev); |
907 | } | 905 | } |
908 | 906 | ||
909 | if (!drbd_send_protocol(mdev)) | 907 | if (drbd_send_protocol(mdev) == -1) |
910 | return -1; | 908 | return -1; |
911 | drbd_send_sync_param(mdev, &mdev->sync_conf); | 909 | drbd_send_sync_param(mdev, &mdev->sync_conf); |
912 | drbd_send_sizes(mdev, 0, 0); | 910 | drbd_send_sizes(mdev, 0, 0); |
@@ -914,6 +912,7 @@ retry: | |||
914 | drbd_send_state(mdev); | 912 | drbd_send_state(mdev); |
915 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); | 913 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); |
916 | clear_bit(RESIZE_PENDING, &mdev->flags); | 914 | clear_bit(RESIZE_PENDING, &mdev->flags); |
915 | mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ | ||
917 | 916 | ||
918 | return 1; | 917 | return 1; |
919 | 918 | ||
@@ -932,8 +931,9 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
932 | 931 | ||
933 | r = drbd_recv(mdev, h, sizeof(*h)); | 932 | r = drbd_recv(mdev, h, sizeof(*h)); |
934 | if (unlikely(r != sizeof(*h))) { | 933 | if (unlikely(r != sizeof(*h))) { |
935 | dev_err(DEV, "short read expecting header on sock: r=%d\n", r); | 934 | if (!signal_pending(current)) |
936 | return FALSE; | 935 | dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); |
936 | return false; | ||
937 | } | 937 | } |
938 | 938 | ||
939 | if (likely(h->h80.magic == BE_DRBD_MAGIC)) { | 939 | if (likely(h->h80.magic == BE_DRBD_MAGIC)) { |
@@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
947 | be32_to_cpu(h->h80.magic), | 947 | be32_to_cpu(h->h80.magic), |
948 | be16_to_cpu(h->h80.command), | 948 | be16_to_cpu(h->h80.command), |
949 | be16_to_cpu(h->h80.length)); | 949 | be16_to_cpu(h->h80.length)); |
950 | return FALSE; | 950 | return false; |
951 | } | 951 | } |
952 | mdev->last_received = jiffies; | 952 | mdev->last_received = jiffies; |
953 | 953 | ||
954 | return TRUE; | 954 | return true; |
955 | } | 955 | } |
956 | 956 | ||
957 | static void drbd_flush(struct drbd_conf *mdev) | 957 | static void drbd_flush(struct drbd_conf *mdev) |
@@ -1074,6 +1074,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) | |||
1074 | * @mdev: DRBD device. | 1074 | * @mdev: DRBD device. |
1075 | * @e: epoch entry | 1075 | * @e: epoch entry |
1076 | * @rw: flag field, see bio->bi_rw | 1076 | * @rw: flag field, see bio->bi_rw |
1077 | * | ||
1078 | * May spread the pages to multiple bios, | ||
1079 | * depending on bio_add_page restrictions. | ||
1080 | * | ||
1081 | * Returns 0 if all bios have been submitted, | ||
1082 | * -ENOMEM if we could not allocate enough bios, | ||
1083 | * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a | ||
1084 | * single page to an empty bio (which should never happen and likely indicates | ||
1085 | * that the lower level IO stack is in some way broken). This has been observed | ||
1086 | * on certain Xen deployments. | ||
1077 | */ | 1087 | */ |
1078 | /* TODO allocate from our own bio_set. */ | 1088 | /* TODO allocate from our own bio_set. */ |
1079 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | 1089 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
@@ -1086,6 +1096,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | |||
1086 | unsigned ds = e->size; | 1096 | unsigned ds = e->size; |
1087 | unsigned n_bios = 0; | 1097 | unsigned n_bios = 0; |
1088 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; | 1098 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; |
1099 | int err = -ENOMEM; | ||
1089 | 1100 | ||
1090 | /* In most cases, we will only need one bio. But in case the lower | 1101 | /* In most cases, we will only need one bio. But in case the lower |
1091 | * level restrictions happen to be different at this offset on this | 1102 | * level restrictions happen to be different at this offset on this |
@@ -1111,8 +1122,17 @@ next_bio: | |||
1111 | page_chain_for_each(page) { | 1122 | page_chain_for_each(page) { |
1112 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); | 1123 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); |
1113 | if (!bio_add_page(bio, page, len, 0)) { | 1124 | if (!bio_add_page(bio, page, len, 0)) { |
1114 | /* a single page must always be possible! */ | 1125 | /* A single page must always be possible! |
1115 | BUG_ON(bio->bi_vcnt == 0); | 1126 | * But in case it fails anyways, |
1127 | * we deal with it, and complain (below). */ | ||
1128 | if (bio->bi_vcnt == 0) { | ||
1129 | dev_err(DEV, | ||
1130 | "bio_add_page failed for len=%u, " | ||
1131 | "bi_vcnt=0 (bi_sector=%llu)\n", | ||
1132 | len, (unsigned long long)bio->bi_sector); | ||
1133 | err = -ENOSPC; | ||
1134 | goto fail; | ||
1135 | } | ||
1116 | goto next_bio; | 1136 | goto next_bio; |
1117 | } | 1137 | } |
1118 | ds -= len; | 1138 | ds -= len; |
@@ -1138,7 +1158,7 @@ fail: | |||
1138 | bios = bios->bi_next; | 1158 | bios = bios->bi_next; |
1139 | bio_put(bio); | 1159 | bio_put(bio); |
1140 | } | 1160 | } |
1141 | return -ENOMEM; | 1161 | return err; |
1142 | } | 1162 | } |
1143 | 1163 | ||
1144 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1164 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -1160,7 +1180,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1160 | switch (mdev->write_ordering) { | 1180 | switch (mdev->write_ordering) { |
1161 | case WO_none: | 1181 | case WO_none: |
1162 | if (rv == FE_RECYCLED) | 1182 | if (rv == FE_RECYCLED) |
1163 | return TRUE; | 1183 | return true; |
1164 | 1184 | ||
1165 | /* receiver context, in the writeout path of the other node. | 1185 | /* receiver context, in the writeout path of the other node. |
1166 | * avoid potential distributed deadlock */ | 1186 | * avoid potential distributed deadlock */ |
@@ -1188,10 +1208,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1188 | D_ASSERT(atomic_read(&epoch->active) == 0); | 1208 | D_ASSERT(atomic_read(&epoch->active) == 0); |
1189 | D_ASSERT(epoch->flags == 0); | 1209 | D_ASSERT(epoch->flags == 0); |
1190 | 1210 | ||
1191 | return TRUE; | 1211 | return true; |
1192 | default: | 1212 | default: |
1193 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); | 1213 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); |
1194 | return FALSE; | 1214 | return false; |
1195 | } | 1215 | } |
1196 | 1216 | ||
1197 | epoch->flags = 0; | 1217 | epoch->flags = 0; |
@@ -1209,7 +1229,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1209 | } | 1229 | } |
1210 | spin_unlock(&mdev->epoch_lock); | 1230 | spin_unlock(&mdev->epoch_lock); |
1211 | 1231 | ||
1212 | return TRUE; | 1232 | return true; |
1213 | } | 1233 | } |
1214 | 1234 | ||
1215 | /* used from receive_RSDataReply (recv_resync_read) | 1235 | /* used from receive_RSDataReply (recv_resync_read) |
@@ -1231,21 +1251,25 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1231 | if (dgs) { | 1251 | if (dgs) { |
1232 | rr = drbd_recv(mdev, dig_in, dgs); | 1252 | rr = drbd_recv(mdev, dig_in, dgs); |
1233 | if (rr != dgs) { | 1253 | if (rr != dgs) { |
1234 | dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", | 1254 | if (!signal_pending(current)) |
1235 | rr, dgs); | 1255 | dev_warn(DEV, |
1256 | "short read receiving data digest: read %d expected %d\n", | ||
1257 | rr, dgs); | ||
1236 | return NULL; | 1258 | return NULL; |
1237 | } | 1259 | } |
1238 | } | 1260 | } |
1239 | 1261 | ||
1240 | data_size -= dgs; | 1262 | data_size -= dgs; |
1241 | 1263 | ||
1264 | ERR_IF(data_size == 0) return NULL; | ||
1242 | ERR_IF(data_size & 0x1ff) return NULL; | 1265 | ERR_IF(data_size & 0x1ff) return NULL; |
1243 | ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; | 1266 | ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; |
1244 | 1267 | ||
1245 | /* even though we trust out peer, | 1268 | /* even though we trust out peer, |
1246 | * we sometimes have to double check. */ | 1269 | * we sometimes have to double check. */ |
1247 | if (sector + (data_size>>9) > capacity) { | 1270 | if (sector + (data_size>>9) > capacity) { |
1248 | dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", | 1271 | dev_err(DEV, "request from peer beyond end of local disk: " |
1272 | "capacity: %llus < sector: %llus + size: %u\n", | ||
1249 | (unsigned long long)capacity, | 1273 | (unsigned long long)capacity, |
1250 | (unsigned long long)sector, data_size); | 1274 | (unsigned long long)sector, data_size); |
1251 | return NULL; | 1275 | return NULL; |
@@ -1264,15 +1288,16 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1264 | unsigned len = min_t(int, ds, PAGE_SIZE); | 1288 | unsigned len = min_t(int, ds, PAGE_SIZE); |
1265 | data = kmap(page); | 1289 | data = kmap(page); |
1266 | rr = drbd_recv(mdev, data, len); | 1290 | rr = drbd_recv(mdev, data, len); |
1267 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { | 1291 | if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { |
1268 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); | 1292 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); |
1269 | data[0] = data[0] ^ (unsigned long)-1; | 1293 | data[0] = data[0] ^ (unsigned long)-1; |
1270 | } | 1294 | } |
1271 | kunmap(page); | 1295 | kunmap(page); |
1272 | if (rr != len) { | 1296 | if (rr != len) { |
1273 | drbd_free_ee(mdev, e); | 1297 | drbd_free_ee(mdev, e); |
1274 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1298 | if (!signal_pending(current)) |
1275 | rr, len); | 1299 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", |
1300 | rr, len); | ||
1276 | return NULL; | 1301 | return NULL; |
1277 | } | 1302 | } |
1278 | ds -= rr; | 1303 | ds -= rr; |
@@ -1281,7 +1306,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1281 | if (dgs) { | 1306 | if (dgs) { |
1282 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); | 1307 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); |
1283 | if (memcmp(dig_in, dig_vv, dgs)) { | 1308 | if (memcmp(dig_in, dig_vv, dgs)) { |
1284 | dev_err(DEV, "Digest integrity check FAILED.\n"); | 1309 | dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", |
1310 | (unsigned long long)sector, data_size); | ||
1285 | drbd_bcast_ee(mdev, "digest failed", | 1311 | drbd_bcast_ee(mdev, "digest failed", |
1286 | dgs, dig_in, dig_vv, e); | 1312 | dgs, dig_in, dig_vv, e); |
1287 | drbd_free_ee(mdev, e); | 1313 | drbd_free_ee(mdev, e); |
@@ -1302,7 +1328,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1302 | void *data; | 1328 | void *data; |
1303 | 1329 | ||
1304 | if (!data_size) | 1330 | if (!data_size) |
1305 | return TRUE; | 1331 | return true; |
1306 | 1332 | ||
1307 | page = drbd_pp_alloc(mdev, 1, 1); | 1333 | page = drbd_pp_alloc(mdev, 1, 1); |
1308 | 1334 | ||
@@ -1311,8 +1337,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1311 | rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); | 1337 | rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); |
1312 | if (rr != min_t(int, data_size, PAGE_SIZE)) { | 1338 | if (rr != min_t(int, data_size, PAGE_SIZE)) { |
1313 | rv = 0; | 1339 | rv = 0; |
1314 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1340 | if (!signal_pending(current)) |
1315 | rr, min_t(int, data_size, PAGE_SIZE)); | 1341 | dev_warn(DEV, |
1342 | "short read receiving data: read %d expected %d\n", | ||
1343 | rr, min_t(int, data_size, PAGE_SIZE)); | ||
1316 | break; | 1344 | break; |
1317 | } | 1345 | } |
1318 | data_size -= rr; | 1346 | data_size -= rr; |
@@ -1337,8 +1365,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1337 | if (dgs) { | 1365 | if (dgs) { |
1338 | rr = drbd_recv(mdev, dig_in, dgs); | 1366 | rr = drbd_recv(mdev, dig_in, dgs); |
1339 | if (rr != dgs) { | 1367 | if (rr != dgs) { |
1340 | dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", | 1368 | if (!signal_pending(current)) |
1341 | rr, dgs); | 1369 | dev_warn(DEV, |
1370 | "short read receiving data reply digest: read %d expected %d\n", | ||
1371 | rr, dgs); | ||
1342 | return 0; | 1372 | return 0; |
1343 | } | 1373 | } |
1344 | } | 1374 | } |
@@ -1359,9 +1389,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1359 | expect); | 1389 | expect); |
1360 | kunmap(bvec->bv_page); | 1390 | kunmap(bvec->bv_page); |
1361 | if (rr != expect) { | 1391 | if (rr != expect) { |
1362 | dev_warn(DEV, "short read receiving data reply: " | 1392 | if (!signal_pending(current)) |
1363 | "read %d expected %d\n", | 1393 | dev_warn(DEV, "short read receiving data reply: " |
1364 | rr, expect); | 1394 | "read %d expected %d\n", |
1395 | rr, expect); | ||
1365 | return 0; | 1396 | return 0; |
1366 | } | 1397 | } |
1367 | data_size -= rr; | 1398 | data_size -= rr; |
@@ -1425,11 +1456,10 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1425 | 1456 | ||
1426 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); | 1457 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); |
1427 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) | 1458 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) |
1428 | return TRUE; | 1459 | return true; |
1429 | 1460 | ||
1430 | /* drbd_submit_ee currently fails for one reason only: | 1461 | /* don't care for the reason here */ |
1431 | * not being able to allocate enough bios. | 1462 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
1432 | * Is dropping the connection going to help? */ | ||
1433 | spin_lock_irq(&mdev->req_lock); | 1463 | spin_lock_irq(&mdev->req_lock); |
1434 | list_del(&e->w.list); | 1464 | list_del(&e->w.list); |
1435 | spin_unlock_irq(&mdev->req_lock); | 1465 | spin_unlock_irq(&mdev->req_lock); |
@@ -1437,7 +1467,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1437 | drbd_free_ee(mdev, e); | 1467 | drbd_free_ee(mdev, e); |
1438 | fail: | 1468 | fail: |
1439 | put_ldev(mdev); | 1469 | put_ldev(mdev); |
1440 | return FALSE; | 1470 | return false; |
1441 | } | 1471 | } |
1442 | 1472 | ||
1443 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1473 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -1454,7 +1484,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
1454 | spin_unlock_irq(&mdev->req_lock); | 1484 | spin_unlock_irq(&mdev->req_lock); |
1455 | if (unlikely(!req)) { | 1485 | if (unlikely(!req)) { |
1456 | dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); | 1486 | dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); |
1457 | return FALSE; | 1487 | return false; |
1458 | } | 1488 | } |
1459 | 1489 | ||
1460 | /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid | 1490 | /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid |
@@ -1611,15 +1641,15 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) | |||
1611 | return ret; | 1641 | return ret; |
1612 | } | 1642 | } |
1613 | 1643 | ||
1614 | static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | 1644 | /* see also bio_flags_to_wire() |
1645 | * DRBD_REQ_*, because we need to semantically map the flags to data packet | ||
1646 | * flags and back. We may replicate to other kernel versions. */ | ||
1647 | static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | ||
1615 | { | 1648 | { |
1616 | if (mdev->agreed_pro_version >= 95) | 1649 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
1617 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | 1650 | (dpf & DP_FUA ? REQ_FUA : 0) | |
1618 | (dpf & DP_FUA ? REQ_FUA : 0) | | 1651 | (dpf & DP_FLUSH ? REQ_FLUSH : 0) | |
1619 | (dpf & DP_FLUSH ? REQ_FUA : 0) | | 1652 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); |
1620 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | ||
1621 | else | ||
1622 | return dpf & DP_RW_SYNC ? REQ_SYNC : 0; | ||
1623 | } | 1653 | } |
1624 | 1654 | ||
1625 | /* mirrored write */ | 1655 | /* mirrored write */ |
@@ -1632,9 +1662,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1632 | u32 dp_flags; | 1662 | u32 dp_flags; |
1633 | 1663 | ||
1634 | if (!get_ldev(mdev)) { | 1664 | if (!get_ldev(mdev)) { |
1635 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1636 | dev_err(DEV, "Can not write mirrored data block " | ||
1637 | "to local disk.\n"); | ||
1638 | spin_lock(&mdev->peer_seq_lock); | 1665 | spin_lock(&mdev->peer_seq_lock); |
1639 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) | 1666 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) |
1640 | mdev->peer_seq++; | 1667 | mdev->peer_seq++; |
@@ -1654,23 +1681,23 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1654 | e = read_in_block(mdev, p->block_id, sector, data_size); | 1681 | e = read_in_block(mdev, p->block_id, sector, data_size); |
1655 | if (!e) { | 1682 | if (!e) { |
1656 | put_ldev(mdev); | 1683 | put_ldev(mdev); |
1657 | return FALSE; | 1684 | return false; |
1658 | } | 1685 | } |
1659 | 1686 | ||
1660 | e->w.cb = e_end_block; | 1687 | e->w.cb = e_end_block; |
1661 | 1688 | ||
1689 | dp_flags = be32_to_cpu(p->dp_flags); | ||
1690 | rw |= wire_flags_to_bio(mdev, dp_flags); | ||
1691 | |||
1692 | if (dp_flags & DP_MAY_SET_IN_SYNC) | ||
1693 | e->flags |= EE_MAY_SET_IN_SYNC; | ||
1694 | |||
1662 | spin_lock(&mdev->epoch_lock); | 1695 | spin_lock(&mdev->epoch_lock); |
1663 | e->epoch = mdev->current_epoch; | 1696 | e->epoch = mdev->current_epoch; |
1664 | atomic_inc(&e->epoch->epoch_size); | 1697 | atomic_inc(&e->epoch->epoch_size); |
1665 | atomic_inc(&e->epoch->active); | 1698 | atomic_inc(&e->epoch->active); |
1666 | spin_unlock(&mdev->epoch_lock); | 1699 | spin_unlock(&mdev->epoch_lock); |
1667 | 1700 | ||
1668 | dp_flags = be32_to_cpu(p->dp_flags); | ||
1669 | rw |= write_flags_to_bio(mdev, dp_flags); | ||
1670 | |||
1671 | if (dp_flags & DP_MAY_SET_IN_SYNC) | ||
1672 | e->flags |= EE_MAY_SET_IN_SYNC; | ||
1673 | |||
1674 | /* I'm the receiver, I do hold a net_cnt reference. */ | 1701 | /* I'm the receiver, I do hold a net_cnt reference. */ |
1675 | if (!mdev->net_conf->two_primaries) { | 1702 | if (!mdev->net_conf->two_primaries) { |
1676 | spin_lock_irq(&mdev->req_lock); | 1703 | spin_lock_irq(&mdev->req_lock); |
@@ -1773,7 +1800,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1773 | put_ldev(mdev); | 1800 | put_ldev(mdev); |
1774 | wake_asender(mdev); | 1801 | wake_asender(mdev); |
1775 | finish_wait(&mdev->misc_wait, &wait); | 1802 | finish_wait(&mdev->misc_wait, &wait); |
1776 | return TRUE; | 1803 | return true; |
1777 | } | 1804 | } |
1778 | 1805 | ||
1779 | if (signal_pending(current)) { | 1806 | if (signal_pending(current)) { |
@@ -1829,11 +1856,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1829 | } | 1856 | } |
1830 | 1857 | ||
1831 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) | 1858 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) |
1832 | return TRUE; | 1859 | return true; |
1833 | 1860 | ||
1834 | /* drbd_submit_ee currently fails for one reason only: | 1861 | /* don't care for the reason here */ |
1835 | * not being able to allocate enough bios. | 1862 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
1836 | * Is dropping the connection going to help? */ | ||
1837 | spin_lock_irq(&mdev->req_lock); | 1863 | spin_lock_irq(&mdev->req_lock); |
1838 | list_del(&e->w.list); | 1864 | list_del(&e->w.list); |
1839 | hlist_del_init(&e->colision); | 1865 | hlist_del_init(&e->colision); |
@@ -1842,12 +1868,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1842 | drbd_al_complete_io(mdev, e->sector); | 1868 | drbd_al_complete_io(mdev, e->sector); |
1843 | 1869 | ||
1844 | out_interrupted: | 1870 | out_interrupted: |
1845 | /* yes, the epoch_size now is imbalanced. | 1871 | drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); |
1846 | * but we drop the connection anyways, so we don't have a chance to | ||
1847 | * receive a barrier... atomic_inc(&mdev->epoch_size); */ | ||
1848 | put_ldev(mdev); | 1872 | put_ldev(mdev); |
1849 | drbd_free_ee(mdev, e); | 1873 | drbd_free_ee(mdev, e); |
1850 | return FALSE; | 1874 | return false; |
1851 | } | 1875 | } |
1852 | 1876 | ||
1853 | /* We may throttle resync, if the lower device seems to be busy, | 1877 | /* We may throttle resync, if the lower device seems to be busy, |
@@ -1861,10 +1885,11 @@ out_interrupted: | |||
1861 | * The current sync rate used here uses only the most recent two step marks, | 1885 | * The current sync rate used here uses only the most recent two step marks, |
1862 | * to have a short time average so we can react faster. | 1886 | * to have a short time average so we can react faster. |
1863 | */ | 1887 | */ |
1864 | int drbd_rs_should_slow_down(struct drbd_conf *mdev) | 1888 | int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) |
1865 | { | 1889 | { |
1866 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; | 1890 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; |
1867 | unsigned long db, dt, dbdt; | 1891 | unsigned long db, dt, dbdt; |
1892 | struct lc_element *tmp; | ||
1868 | int curr_events; | 1893 | int curr_events; |
1869 | int throttle = 0; | 1894 | int throttle = 0; |
1870 | 1895 | ||
@@ -1872,9 +1897,22 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) | |||
1872 | if (mdev->sync_conf.c_min_rate == 0) | 1897 | if (mdev->sync_conf.c_min_rate == 0) |
1873 | return 0; | 1898 | return 0; |
1874 | 1899 | ||
1900 | spin_lock_irq(&mdev->al_lock); | ||
1901 | tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); | ||
1902 | if (tmp) { | ||
1903 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | ||
1904 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) { | ||
1905 | spin_unlock_irq(&mdev->al_lock); | ||
1906 | return 0; | ||
1907 | } | ||
1908 | /* Do not slow down if app IO is already waiting for this extent */ | ||
1909 | } | ||
1910 | spin_unlock_irq(&mdev->al_lock); | ||
1911 | |||
1875 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + | 1912 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + |
1876 | (int)part_stat_read(&disk->part0, sectors[1]) - | 1913 | (int)part_stat_read(&disk->part0, sectors[1]) - |
1877 | atomic_read(&mdev->rs_sect_ev); | 1914 | atomic_read(&mdev->rs_sect_ev); |
1915 | |||
1878 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { | 1916 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { |
1879 | unsigned long rs_left; | 1917 | unsigned long rs_left; |
1880 | int i; | 1918 | int i; |
@@ -1883,8 +1921,12 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) | |||
1883 | 1921 | ||
1884 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, | 1922 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, |
1885 | * approx. */ | 1923 | * approx. */ |
1886 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS; | 1924 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; |
1887 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | 1925 | |
1926 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | ||
1927 | rs_left = mdev->ov_left; | ||
1928 | else | ||
1929 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | ||
1888 | 1930 | ||
1889 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; | 1931 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; |
1890 | if (!dt) | 1932 | if (!dt) |
@@ -1912,15 +1954,15 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1912 | sector = be64_to_cpu(p->sector); | 1954 | sector = be64_to_cpu(p->sector); |
1913 | size = be32_to_cpu(p->blksize); | 1955 | size = be32_to_cpu(p->blksize); |
1914 | 1956 | ||
1915 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 1957 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1916 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | 1958 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
1917 | (unsigned long long)sector, size); | 1959 | (unsigned long long)sector, size); |
1918 | return FALSE; | 1960 | return false; |
1919 | } | 1961 | } |
1920 | if (sector + (size>>9) > capacity) { | 1962 | if (sector + (size>>9) > capacity) { |
1921 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | 1963 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
1922 | (unsigned long long)sector, size); | 1964 | (unsigned long long)sector, size); |
1923 | return FALSE; | 1965 | return false; |
1924 | } | 1966 | } |
1925 | 1967 | ||
1926 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { | 1968 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { |
@@ -1957,7 +1999,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1957 | e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); | 1999 | e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); |
1958 | if (!e) { | 2000 | if (!e) { |
1959 | put_ldev(mdev); | 2001 | put_ldev(mdev); |
1960 | return FALSE; | 2002 | return false; |
1961 | } | 2003 | } |
1962 | 2004 | ||
1963 | switch (cmd) { | 2005 | switch (cmd) { |
@@ -1970,6 +2012,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1970 | case P_RS_DATA_REQUEST: | 2012 | case P_RS_DATA_REQUEST: |
1971 | e->w.cb = w_e_end_rsdata_req; | 2013 | e->w.cb = w_e_end_rsdata_req; |
1972 | fault_type = DRBD_FAULT_RS_RD; | 2014 | fault_type = DRBD_FAULT_RS_RD; |
2015 | /* used in the sector offset progress display */ | ||
2016 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | ||
1973 | break; | 2017 | break; |
1974 | 2018 | ||
1975 | case P_OV_REPLY: | 2019 | case P_OV_REPLY: |
@@ -1991,7 +2035,11 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1991 | if (cmd == P_CSUM_RS_REQUEST) { | 2035 | if (cmd == P_CSUM_RS_REQUEST) { |
1992 | D_ASSERT(mdev->agreed_pro_version >= 89); | 2036 | D_ASSERT(mdev->agreed_pro_version >= 89); |
1993 | e->w.cb = w_e_end_csum_rs_req; | 2037 | e->w.cb = w_e_end_csum_rs_req; |
2038 | /* used in the sector offset progress display */ | ||
2039 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | ||
1994 | } else if (cmd == P_OV_REPLY) { | 2040 | } else if (cmd == P_OV_REPLY) { |
2041 | /* track progress, we may need to throttle */ | ||
2042 | atomic_add(size >> 9, &mdev->rs_sect_in); | ||
1995 | e->w.cb = w_e_end_ov_reply; | 2043 | e->w.cb = w_e_end_ov_reply; |
1996 | dec_rs_pending(mdev); | 2044 | dec_rs_pending(mdev); |
1997 | /* drbd_rs_begin_io done when we sent this request, | 2045 | /* drbd_rs_begin_io done when we sent this request, |
@@ -2003,9 +2051,16 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
2003 | case P_OV_REQUEST: | 2051 | case P_OV_REQUEST: |
2004 | if (mdev->ov_start_sector == ~(sector_t)0 && | 2052 | if (mdev->ov_start_sector == ~(sector_t)0 && |
2005 | mdev->agreed_pro_version >= 90) { | 2053 | mdev->agreed_pro_version >= 90) { |
2054 | unsigned long now = jiffies; | ||
2055 | int i; | ||
2006 | mdev->ov_start_sector = sector; | 2056 | mdev->ov_start_sector = sector; |
2007 | mdev->ov_position = sector; | 2057 | mdev->ov_position = sector; |
2008 | mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector); | 2058 | mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); |
2059 | mdev->rs_total = mdev->ov_left; | ||
2060 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | ||
2061 | mdev->rs_mark_left[i] = mdev->ov_left; | ||
2062 | mdev->rs_mark_time[i] = now; | ||
2063 | } | ||
2009 | dev_info(DEV, "Online Verify start sector: %llu\n", | 2064 | dev_info(DEV, "Online Verify start sector: %llu\n", |
2010 | (unsigned long long)sector); | 2065 | (unsigned long long)sector); |
2011 | } | 2066 | } |
@@ -2042,9 +2097,9 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
2042 | * we would also throttle its application reads. | 2097 | * we would also throttle its application reads. |
2043 | * In that case, throttling is done on the SyncTarget only. | 2098 | * In that case, throttling is done on the SyncTarget only. |
2044 | */ | 2099 | */ |
2045 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev)) | 2100 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) |
2046 | msleep(100); | 2101 | schedule_timeout_uninterruptible(HZ/10); |
2047 | if (drbd_rs_begin_io(mdev, e->sector)) | 2102 | if (drbd_rs_begin_io(mdev, sector)) |
2048 | goto out_free_e; | 2103 | goto out_free_e; |
2049 | 2104 | ||
2050 | submit_for_resync: | 2105 | submit_for_resync: |
@@ -2057,11 +2112,10 @@ submit: | |||
2057 | spin_unlock_irq(&mdev->req_lock); | 2112 | spin_unlock_irq(&mdev->req_lock); |
2058 | 2113 | ||
2059 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) | 2114 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) |
2060 | return TRUE; | 2115 | return true; |
2061 | 2116 | ||
2062 | /* drbd_submit_ee currently fails for one reason only: | 2117 | /* don't care for the reason here */ |
2063 | * not being able to allocate enough bios. | 2118 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
2064 | * Is dropping the connection going to help? */ | ||
2065 | spin_lock_irq(&mdev->req_lock); | 2119 | spin_lock_irq(&mdev->req_lock); |
2066 | list_del(&e->w.list); | 2120 | list_del(&e->w.list); |
2067 | spin_unlock_irq(&mdev->req_lock); | 2121 | spin_unlock_irq(&mdev->req_lock); |
@@ -2070,7 +2124,7 @@ submit: | |||
2070 | out_free_e: | 2124 | out_free_e: |
2071 | put_ldev(mdev); | 2125 | put_ldev(mdev); |
2072 | drbd_free_ee(mdev, e); | 2126 | drbd_free_ee(mdev, e); |
2073 | return FALSE; | 2127 | return false; |
2074 | } | 2128 | } |
2075 | 2129 | ||
2076 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | 2130 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) |
@@ -2147,10 +2201,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | |||
2147 | 2201 | ||
2148 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | 2202 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) |
2149 | { | 2203 | { |
2150 | int self, peer, hg, rv = -100; | 2204 | int hg, rv = -100; |
2151 | |||
2152 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | ||
2153 | peer = mdev->p_uuid[UI_BITMAP] & 1; | ||
2154 | 2205 | ||
2155 | switch (mdev->net_conf->after_sb_1p) { | 2206 | switch (mdev->net_conf->after_sb_1p) { |
2156 | case ASB_DISCARD_YOUNGER_PRI: | 2207 | case ASB_DISCARD_YOUNGER_PRI: |
@@ -2177,12 +2228,14 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |||
2177 | case ASB_CALL_HELPER: | 2228 | case ASB_CALL_HELPER: |
2178 | hg = drbd_asb_recover_0p(mdev); | 2229 | hg = drbd_asb_recover_0p(mdev); |
2179 | if (hg == -1 && mdev->state.role == R_PRIMARY) { | 2230 | if (hg == -1 && mdev->state.role == R_PRIMARY) { |
2180 | self = drbd_set_role(mdev, R_SECONDARY, 0); | 2231 | enum drbd_state_rv rv2; |
2232 | |||
2233 | drbd_set_role(mdev, R_SECONDARY, 0); | ||
2181 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | 2234 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
2182 | * we might be here in C_WF_REPORT_PARAMS which is transient. | 2235 | * we might be here in C_WF_REPORT_PARAMS which is transient. |
2183 | * we do not need to wait for the after state change work either. */ | 2236 | * we do not need to wait for the after state change work either. */ |
2184 | self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | 2237 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); |
2185 | if (self != SS_SUCCESS) { | 2238 | if (rv2 != SS_SUCCESS) { |
2186 | drbd_khelper(mdev, "pri-lost-after-sb"); | 2239 | drbd_khelper(mdev, "pri-lost-after-sb"); |
2187 | } else { | 2240 | } else { |
2188 | dev_warn(DEV, "Successfully gave up primary role.\n"); | 2241 | dev_warn(DEV, "Successfully gave up primary role.\n"); |
@@ -2197,10 +2250,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |||
2197 | 2250 | ||
2198 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | 2251 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) |
2199 | { | 2252 | { |
2200 | int self, peer, hg, rv = -100; | 2253 | int hg, rv = -100; |
2201 | |||
2202 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | ||
2203 | peer = mdev->p_uuid[UI_BITMAP] & 1; | ||
2204 | 2254 | ||
2205 | switch (mdev->net_conf->after_sb_2p) { | 2255 | switch (mdev->net_conf->after_sb_2p) { |
2206 | case ASB_DISCARD_YOUNGER_PRI: | 2256 | case ASB_DISCARD_YOUNGER_PRI: |
@@ -2220,11 +2270,13 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | |||
2220 | case ASB_CALL_HELPER: | 2270 | case ASB_CALL_HELPER: |
2221 | hg = drbd_asb_recover_0p(mdev); | 2271 | hg = drbd_asb_recover_0p(mdev); |
2222 | if (hg == -1) { | 2272 | if (hg == -1) { |
2273 | enum drbd_state_rv rv2; | ||
2274 | |||
2223 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | 2275 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
2224 | * we might be here in C_WF_REPORT_PARAMS which is transient. | 2276 | * we might be here in C_WF_REPORT_PARAMS which is transient. |
2225 | * we do not need to wait for the after state change work either. */ | 2277 | * we do not need to wait for the after state change work either. */ |
2226 | self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | 2278 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); |
2227 | if (self != SS_SUCCESS) { | 2279 | if (rv2 != SS_SUCCESS) { |
2228 | drbd_khelper(mdev, "pri-lost-after-sb"); | 2280 | drbd_khelper(mdev, "pri-lost-after-sb"); |
2229 | } else { | 2281 | } else { |
2230 | dev_warn(DEV, "Successfully gave up primary role.\n"); | 2282 | dev_warn(DEV, "Successfully gave up primary role.\n"); |
@@ -2263,6 +2315,8 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, | |||
2263 | -2 C_SYNC_TARGET set BitMap | 2315 | -2 C_SYNC_TARGET set BitMap |
2264 | -100 after split brain, disconnect | 2316 | -100 after split brain, disconnect |
2265 | -1000 unrelated data | 2317 | -1000 unrelated data |
2318 | -1091 requires proto 91 | ||
2319 | -1096 requires proto 96 | ||
2266 | */ | 2320 | */ |
2267 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) | 2321 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) |
2268 | { | 2322 | { |
@@ -2292,7 +2346,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2292 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { | 2346 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { |
2293 | 2347 | ||
2294 | if (mdev->agreed_pro_version < 91) | 2348 | if (mdev->agreed_pro_version < 91) |
2295 | return -1001; | 2349 | return -1091; |
2296 | 2350 | ||
2297 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && | 2351 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && |
2298 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { | 2352 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { |
@@ -2313,7 +2367,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2313 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { | 2367 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { |
2314 | 2368 | ||
2315 | if (mdev->agreed_pro_version < 91) | 2369 | if (mdev->agreed_pro_version < 91) |
2316 | return -1001; | 2370 | return -1091; |
2317 | 2371 | ||
2318 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && | 2372 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && |
2319 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { | 2373 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { |
@@ -2358,17 +2412,22 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2358 | *rule_nr = 51; | 2412 | *rule_nr = 51; |
2359 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2413 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); |
2360 | if (self == peer) { | 2414 | if (self == peer) { |
2361 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2415 | if (mdev->agreed_pro_version < 96 ? |
2362 | peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); | 2416 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == |
2363 | if (self == peer) { | 2417 | (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : |
2418 | peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { | ||
2364 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2419 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2365 | resync as sync source modifications of the peer's UUIDs. */ | 2420 | resync as sync source modifications of the peer's UUIDs. */ |
2366 | 2421 | ||
2367 | if (mdev->agreed_pro_version < 91) | 2422 | if (mdev->agreed_pro_version < 91) |
2368 | return -1001; | 2423 | return -1091; |
2369 | 2424 | ||
2370 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; | 2425 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; |
2371 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; | 2426 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; |
2427 | |||
2428 | dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); | ||
2429 | drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); | ||
2430 | |||
2372 | return -1; | 2431 | return -1; |
2373 | } | 2432 | } |
2374 | } | 2433 | } |
@@ -2390,20 +2449,20 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2390 | *rule_nr = 71; | 2449 | *rule_nr = 71; |
2391 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2450 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); |
2392 | if (self == peer) { | 2451 | if (self == peer) { |
2393 | self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); | 2452 | if (mdev->agreed_pro_version < 96 ? |
2394 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2453 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == |
2395 | if (self == peer) { | 2454 | (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : |
2455 | self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { | ||
2396 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2456 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2397 | resync as sync source modifications of our UUIDs. */ | 2457 | resync as sync source modifications of our UUIDs. */ |
2398 | 2458 | ||
2399 | if (mdev->agreed_pro_version < 91) | 2459 | if (mdev->agreed_pro_version < 91) |
2400 | return -1001; | 2460 | return -1091; |
2401 | 2461 | ||
2402 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); | 2462 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); |
2403 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); | 2463 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); |
2404 | 2464 | ||
2405 | dev_info(DEV, "Undid last start of resync:\n"); | 2465 | dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); |
2406 | |||
2407 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, | 2466 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, |
2408 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); | 2467 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); |
2409 | 2468 | ||
@@ -2466,8 +2525,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2466 | dev_alert(DEV, "Unrelated data, aborting!\n"); | 2525 | dev_alert(DEV, "Unrelated data, aborting!\n"); |
2467 | return C_MASK; | 2526 | return C_MASK; |
2468 | } | 2527 | } |
2469 | if (hg == -1001) { | 2528 | if (hg < -1000) { |
2470 | dev_alert(DEV, "To resolve this both sides have to support at least protocol\n"); | 2529 | dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); |
2471 | return C_MASK; | 2530 | return C_MASK; |
2472 | } | 2531 | } |
2473 | 2532 | ||
@@ -2566,7 +2625,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2566 | 2625 | ||
2567 | if (abs(hg) >= 2) { | 2626 | if (abs(hg) >= 2) { |
2568 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); | 2627 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); |
2569 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) | 2628 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", |
2629 | BM_LOCKED_SET_ALLOWED)) | ||
2570 | return C_MASK; | 2630 | return C_MASK; |
2571 | } | 2631 | } |
2572 | 2632 | ||
@@ -2660,7 +2720,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig | |||
2660 | unsigned char *my_alg = mdev->net_conf->integrity_alg; | 2720 | unsigned char *my_alg = mdev->net_conf->integrity_alg; |
2661 | 2721 | ||
2662 | if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) | 2722 | if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) |
2663 | return FALSE; | 2723 | return false; |
2664 | 2724 | ||
2665 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; | 2725 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; |
2666 | if (strcmp(p_integrity_alg, my_alg)) { | 2726 | if (strcmp(p_integrity_alg, my_alg)) { |
@@ -2671,11 +2731,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig | |||
2671 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); | 2731 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); |
2672 | } | 2732 | } |
2673 | 2733 | ||
2674 | return TRUE; | 2734 | return true; |
2675 | 2735 | ||
2676 | disconnect: | 2736 | disconnect: |
2677 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2737 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2678 | return FALSE; | 2738 | return false; |
2679 | } | 2739 | } |
2680 | 2740 | ||
2681 | /* helper function | 2741 | /* helper function |
@@ -2707,7 +2767,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, | |||
2707 | 2767 | ||
2708 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) | 2768 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) |
2709 | { | 2769 | { |
2710 | int ok = TRUE; | 2770 | int ok = true; |
2711 | struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; | 2771 | struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; |
2712 | unsigned int header_size, data_size, exp_max_sz; | 2772 | unsigned int header_size, data_size, exp_max_sz; |
2713 | struct crypto_hash *verify_tfm = NULL; | 2773 | struct crypto_hash *verify_tfm = NULL; |
@@ -2725,7 +2785,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2725 | if (packet_size > exp_max_sz) { | 2785 | if (packet_size > exp_max_sz) { |
2726 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", | 2786 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", |
2727 | packet_size, exp_max_sz); | 2787 | packet_size, exp_max_sz); |
2728 | return FALSE; | 2788 | return false; |
2729 | } | 2789 | } |
2730 | 2790 | ||
2731 | if (apv <= 88) { | 2791 | if (apv <= 88) { |
@@ -2745,7 +2805,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2745 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | 2805 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); |
2746 | 2806 | ||
2747 | if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) | 2807 | if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) |
2748 | return FALSE; | 2808 | return false; |
2749 | 2809 | ||
2750 | mdev->sync_conf.rate = be32_to_cpu(p->rate); | 2810 | mdev->sync_conf.rate = be32_to_cpu(p->rate); |
2751 | 2811 | ||
@@ -2755,11 +2815,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2755 | dev_err(DEV, "verify-alg too long, " | 2815 | dev_err(DEV, "verify-alg too long, " |
2756 | "peer wants %u, accepting only %u byte\n", | 2816 | "peer wants %u, accepting only %u byte\n", |
2757 | data_size, SHARED_SECRET_MAX); | 2817 | data_size, SHARED_SECRET_MAX); |
2758 | return FALSE; | 2818 | return false; |
2759 | } | 2819 | } |
2760 | 2820 | ||
2761 | if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) | 2821 | if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) |
2762 | return FALSE; | 2822 | return false; |
2763 | 2823 | ||
2764 | /* we expect NUL terminated string */ | 2824 | /* we expect NUL terminated string */ |
2765 | /* but just in case someone tries to be evil */ | 2825 | /* but just in case someone tries to be evil */ |
@@ -2853,7 +2913,7 @@ disconnect: | |||
2853 | /* but free the verify_tfm again, if csums_tfm did not work out */ | 2913 | /* but free the verify_tfm again, if csums_tfm did not work out */ |
2854 | crypto_free_hash(verify_tfm); | 2914 | crypto_free_hash(verify_tfm); |
2855 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2915 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2856 | return FALSE; | 2916 | return false; |
2857 | } | 2917 | } |
2858 | 2918 | ||
2859 | static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) | 2919 | static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) |
@@ -2879,7 +2939,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2879 | { | 2939 | { |
2880 | struct p_sizes *p = &mdev->data.rbuf.sizes; | 2940 | struct p_sizes *p = &mdev->data.rbuf.sizes; |
2881 | enum determine_dev_size dd = unchanged; | 2941 | enum determine_dev_size dd = unchanged; |
2882 | unsigned int max_seg_s; | 2942 | unsigned int max_bio_size; |
2883 | sector_t p_size, p_usize, my_usize; | 2943 | sector_t p_size, p_usize, my_usize; |
2884 | int ldsc = 0; /* local disk size changed */ | 2944 | int ldsc = 0; /* local disk size changed */ |
2885 | enum dds_flags ddsf; | 2945 | enum dds_flags ddsf; |
@@ -2890,7 +2950,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2890 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { | 2950 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { |
2891 | dev_err(DEV, "some backing storage is needed\n"); | 2951 | dev_err(DEV, "some backing storage is needed\n"); |
2892 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2952 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2893 | return FALSE; | 2953 | return false; |
2894 | } | 2954 | } |
2895 | 2955 | ||
2896 | /* just store the peer's disk size for now. | 2956 | /* just store the peer's disk size for now. |
@@ -2927,18 +2987,17 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2927 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2987 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2928 | mdev->ldev->dc.disk_size = my_usize; | 2988 | mdev->ldev->dc.disk_size = my_usize; |
2929 | put_ldev(mdev); | 2989 | put_ldev(mdev); |
2930 | return FALSE; | 2990 | return false; |
2931 | } | 2991 | } |
2932 | put_ldev(mdev); | 2992 | put_ldev(mdev); |
2933 | } | 2993 | } |
2934 | #undef min_not_zero | ||
2935 | 2994 | ||
2936 | ddsf = be16_to_cpu(p->dds_flags); | 2995 | ddsf = be16_to_cpu(p->dds_flags); |
2937 | if (get_ldev(mdev)) { | 2996 | if (get_ldev(mdev)) { |
2938 | dd = drbd_determin_dev_size(mdev, ddsf); | 2997 | dd = drbd_determin_dev_size(mdev, ddsf); |
2939 | put_ldev(mdev); | 2998 | put_ldev(mdev); |
2940 | if (dd == dev_size_error) | 2999 | if (dd == dev_size_error) |
2941 | return FALSE; | 3000 | return false; |
2942 | drbd_md_sync(mdev); | 3001 | drbd_md_sync(mdev); |
2943 | } else { | 3002 | } else { |
2944 | /* I am diskless, need to accept the peer's size. */ | 3003 | /* I am diskless, need to accept the peer's size. */ |
@@ -2952,14 +3011,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2952 | } | 3011 | } |
2953 | 3012 | ||
2954 | if (mdev->agreed_pro_version < 94) | 3013 | if (mdev->agreed_pro_version < 94) |
2955 | max_seg_s = be32_to_cpu(p->max_segment_size); | 3014 | max_bio_size = be32_to_cpu(p->max_bio_size); |
2956 | else if (mdev->agreed_pro_version == 94) | 3015 | else if (mdev->agreed_pro_version == 94) |
2957 | max_seg_s = DRBD_MAX_SIZE_H80_PACKET; | 3016 | max_bio_size = DRBD_MAX_SIZE_H80_PACKET; |
2958 | else /* drbd 8.3.8 onwards */ | 3017 | else /* drbd 8.3.8 onwards */ |
2959 | max_seg_s = DRBD_MAX_SEGMENT_SIZE; | 3018 | max_bio_size = DRBD_MAX_BIO_SIZE; |
2960 | 3019 | ||
2961 | if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) | 3020 | if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) |
2962 | drbd_setup_queue_param(mdev, max_seg_s); | 3021 | drbd_setup_queue_param(mdev, max_bio_size); |
2963 | 3022 | ||
2964 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); | 3023 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); |
2965 | put_ldev(mdev); | 3024 | put_ldev(mdev); |
@@ -2985,14 +3044,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2985 | } | 3044 | } |
2986 | } | 3045 | } |
2987 | 3046 | ||
2988 | return TRUE; | 3047 | return true; |
2989 | } | 3048 | } |
2990 | 3049 | ||
2991 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3050 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
2992 | { | 3051 | { |
2993 | struct p_uuids *p = &mdev->data.rbuf.uuids; | 3052 | struct p_uuids *p = &mdev->data.rbuf.uuids; |
2994 | u64 *p_uuid; | 3053 | u64 *p_uuid; |
2995 | int i; | 3054 | int i, updated_uuids = 0; |
2996 | 3055 | ||
2997 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); | 3056 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); |
2998 | 3057 | ||
@@ -3009,7 +3068,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3009 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", | 3068 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", |
3010 | (unsigned long long)mdev->ed_uuid); | 3069 | (unsigned long long)mdev->ed_uuid); |
3011 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3070 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3012 | return FALSE; | 3071 | return false; |
3013 | } | 3072 | } |
3014 | 3073 | ||
3015 | if (get_ldev(mdev)) { | 3074 | if (get_ldev(mdev)) { |
@@ -3021,19 +3080,21 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3021 | if (skip_initial_sync) { | 3080 | if (skip_initial_sync) { |
3022 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); | 3081 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); |
3023 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, | 3082 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, |
3024 | "clear_n_write from receive_uuids"); | 3083 | "clear_n_write from receive_uuids", |
3084 | BM_LOCKED_TEST_ALLOWED); | ||
3025 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); | 3085 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); |
3026 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | 3086 | _drbd_uuid_set(mdev, UI_BITMAP, 0); |
3027 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | 3087 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), |
3028 | CS_VERBOSE, NULL); | 3088 | CS_VERBOSE, NULL); |
3029 | drbd_md_sync(mdev); | 3089 | drbd_md_sync(mdev); |
3090 | updated_uuids = 1; | ||
3030 | } | 3091 | } |
3031 | put_ldev(mdev); | 3092 | put_ldev(mdev); |
3032 | } else if (mdev->state.disk < D_INCONSISTENT && | 3093 | } else if (mdev->state.disk < D_INCONSISTENT && |
3033 | mdev->state.role == R_PRIMARY) { | 3094 | mdev->state.role == R_PRIMARY) { |
3034 | /* I am a diskless primary, the peer just created a new current UUID | 3095 | /* I am a diskless primary, the peer just created a new current UUID |
3035 | for me. */ | 3096 | for me. */ |
3036 | drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | 3097 | updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); |
3037 | } | 3098 | } |
3038 | 3099 | ||
3039 | /* Before we test for the disk state, we should wait until an eventually | 3100 | /* Before we test for the disk state, we should wait until an eventually |
@@ -3042,9 +3103,12 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3042 | new disk state... */ | 3103 | new disk state... */ |
3043 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | 3104 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); |
3044 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) | 3105 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) |
3045 | drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | 3106 | updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); |
3046 | 3107 | ||
3047 | return TRUE; | 3108 | if (updated_uuids) |
3109 | drbd_print_uuids(mdev, "receiver updated UUIDs to"); | ||
3110 | |||
3111 | return true; | ||
3048 | } | 3112 | } |
3049 | 3113 | ||
3050 | /** | 3114 | /** |
@@ -3081,7 +3145,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3081 | { | 3145 | { |
3082 | struct p_req_state *p = &mdev->data.rbuf.req_state; | 3146 | struct p_req_state *p = &mdev->data.rbuf.req_state; |
3083 | union drbd_state mask, val; | 3147 | union drbd_state mask, val; |
3084 | int rv; | 3148 | enum drbd_state_rv rv; |
3085 | 3149 | ||
3086 | mask.i = be32_to_cpu(p->mask); | 3150 | mask.i = be32_to_cpu(p->mask); |
3087 | val.i = be32_to_cpu(p->val); | 3151 | val.i = be32_to_cpu(p->val); |
@@ -3089,7 +3153,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3089 | if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && | 3153 | if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && |
3090 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { | 3154 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { |
3091 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); | 3155 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); |
3092 | return TRUE; | 3156 | return true; |
3093 | } | 3157 | } |
3094 | 3158 | ||
3095 | mask = convert_state(mask); | 3159 | mask = convert_state(mask); |
@@ -3100,7 +3164,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3100 | drbd_send_sr_reply(mdev, rv); | 3164 | drbd_send_sr_reply(mdev, rv); |
3101 | drbd_md_sync(mdev); | 3165 | drbd_md_sync(mdev); |
3102 | 3166 | ||
3103 | return TRUE; | 3167 | return true; |
3104 | } | 3168 | } |
3105 | 3169 | ||
3106 | static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3170 | static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -3145,7 +3209,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3145 | peer_state.conn == C_CONNECTED) { | 3209 | peer_state.conn == C_CONNECTED) { |
3146 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) | 3210 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) |
3147 | drbd_resync_finished(mdev); | 3211 | drbd_resync_finished(mdev); |
3148 | return TRUE; | 3212 | return true; |
3149 | } | 3213 | } |
3150 | } | 3214 | } |
3151 | 3215 | ||
@@ -3161,6 +3225,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3161 | if (ns.conn == C_WF_REPORT_PARAMS) | 3225 | if (ns.conn == C_WF_REPORT_PARAMS) |
3162 | ns.conn = C_CONNECTED; | 3226 | ns.conn = C_CONNECTED; |
3163 | 3227 | ||
3228 | if (peer_state.conn == C_AHEAD) | ||
3229 | ns.conn = C_BEHIND; | ||
3230 | |||
3164 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && | 3231 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && |
3165 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | 3232 | get_ldev_if_state(mdev, D_NEGOTIATING)) { |
3166 | int cr; /* consider resync */ | 3233 | int cr; /* consider resync */ |
@@ -3195,10 +3262,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3195 | real_peer_disk = D_DISKLESS; | 3262 | real_peer_disk = D_DISKLESS; |
3196 | } else { | 3263 | } else { |
3197 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) | 3264 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) |
3198 | return FALSE; | 3265 | return false; |
3199 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); | 3266 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); |
3200 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3267 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3201 | return FALSE; | 3268 | return false; |
3202 | } | 3269 | } |
3203 | } | 3270 | } |
3204 | } | 3271 | } |
@@ -3223,7 +3290,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3223 | drbd_uuid_new_current(mdev); | 3290 | drbd_uuid_new_current(mdev); |
3224 | clear_bit(NEW_CUR_UUID, &mdev->flags); | 3291 | clear_bit(NEW_CUR_UUID, &mdev->flags); |
3225 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); | 3292 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); |
3226 | return FALSE; | 3293 | return false; |
3227 | } | 3294 | } |
3228 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); | 3295 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); |
3229 | ns = mdev->state; | 3296 | ns = mdev->state; |
@@ -3231,7 +3298,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3231 | 3298 | ||
3232 | if (rv < SS_SUCCESS) { | 3299 | if (rv < SS_SUCCESS) { |
3233 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3300 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3234 | return FALSE; | 3301 | return false; |
3235 | } | 3302 | } |
3236 | 3303 | ||
3237 | if (os.conn > C_WF_REPORT_PARAMS) { | 3304 | if (os.conn > C_WF_REPORT_PARAMS) { |
@@ -3249,7 +3316,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3249 | 3316 | ||
3250 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ | 3317 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ |
3251 | 3318 | ||
3252 | return TRUE; | 3319 | return true; |
3253 | } | 3320 | } |
3254 | 3321 | ||
3255 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3322 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -3258,6 +3325,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3258 | 3325 | ||
3259 | wait_event(mdev->misc_wait, | 3326 | wait_event(mdev->misc_wait, |
3260 | mdev->state.conn == C_WF_SYNC_UUID || | 3327 | mdev->state.conn == C_WF_SYNC_UUID || |
3328 | mdev->state.conn == C_BEHIND || | ||
3261 | mdev->state.conn < C_CONNECTED || | 3329 | mdev->state.conn < C_CONNECTED || |
3262 | mdev->state.disk < D_NEGOTIATING); | 3330 | mdev->state.disk < D_NEGOTIATING); |
3263 | 3331 | ||
@@ -3269,32 +3337,42 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3269 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); | 3337 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); |
3270 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); | 3338 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); |
3271 | 3339 | ||
3340 | drbd_print_uuids(mdev, "updated sync uuid"); | ||
3272 | drbd_start_resync(mdev, C_SYNC_TARGET); | 3341 | drbd_start_resync(mdev, C_SYNC_TARGET); |
3273 | 3342 | ||
3274 | put_ldev(mdev); | 3343 | put_ldev(mdev); |
3275 | } else | 3344 | } else |
3276 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); | 3345 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); |
3277 | 3346 | ||
3278 | return TRUE; | 3347 | return true; |
3279 | } | 3348 | } |
3280 | 3349 | ||
3281 | enum receive_bitmap_ret { OK, DONE, FAILED }; | 3350 | /** |
3282 | 3351 | * receive_bitmap_plain | |
3283 | static enum receive_bitmap_ret | 3352 | * |
3353 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3354 | * code upon failure. | ||
3355 | */ | ||
3356 | static int | ||
3284 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | 3357 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, |
3285 | unsigned long *buffer, struct bm_xfer_ctx *c) | 3358 | unsigned long *buffer, struct bm_xfer_ctx *c) |
3286 | { | 3359 | { |
3287 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | 3360 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); |
3288 | unsigned want = num_words * sizeof(long); | 3361 | unsigned want = num_words * sizeof(long); |
3362 | int err; | ||
3289 | 3363 | ||
3290 | if (want != data_size) { | 3364 | if (want != data_size) { |
3291 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); | 3365 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); |
3292 | return FAILED; | 3366 | return -EIO; |
3293 | } | 3367 | } |
3294 | if (want == 0) | 3368 | if (want == 0) |
3295 | return DONE; | 3369 | return 0; |
3296 | if (drbd_recv(mdev, buffer, want) != want) | 3370 | err = drbd_recv(mdev, buffer, want); |
3297 | return FAILED; | 3371 | if (err != want) { |
3372 | if (err >= 0) | ||
3373 | err = -EIO; | ||
3374 | return err; | ||
3375 | } | ||
3298 | 3376 | ||
3299 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); | 3377 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); |
3300 | 3378 | ||
@@ -3303,10 +3381,16 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | |||
3303 | if (c->bit_offset > c->bm_bits) | 3381 | if (c->bit_offset > c->bm_bits) |
3304 | c->bit_offset = c->bm_bits; | 3382 | c->bit_offset = c->bm_bits; |
3305 | 3383 | ||
3306 | return OK; | 3384 | return 1; |
3307 | } | 3385 | } |
3308 | 3386 | ||
3309 | static enum receive_bitmap_ret | 3387 | /** |
3388 | * recv_bm_rle_bits | ||
3389 | * | ||
3390 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3391 | * code upon failure. | ||
3392 | */ | ||
3393 | static int | ||
3310 | recv_bm_rle_bits(struct drbd_conf *mdev, | 3394 | recv_bm_rle_bits(struct drbd_conf *mdev, |
3311 | struct p_compressed_bm *p, | 3395 | struct p_compressed_bm *p, |
3312 | struct bm_xfer_ctx *c) | 3396 | struct bm_xfer_ctx *c) |
@@ -3326,18 +3410,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3326 | 3410 | ||
3327 | bits = bitstream_get_bits(&bs, &look_ahead, 64); | 3411 | bits = bitstream_get_bits(&bs, &look_ahead, 64); |
3328 | if (bits < 0) | 3412 | if (bits < 0) |
3329 | return FAILED; | 3413 | return -EIO; |
3330 | 3414 | ||
3331 | for (have = bits; have > 0; s += rl, toggle = !toggle) { | 3415 | for (have = bits; have > 0; s += rl, toggle = !toggle) { |
3332 | bits = vli_decode_bits(&rl, look_ahead); | 3416 | bits = vli_decode_bits(&rl, look_ahead); |
3333 | if (bits <= 0) | 3417 | if (bits <= 0) |
3334 | return FAILED; | 3418 | return -EIO; |
3335 | 3419 | ||
3336 | if (toggle) { | 3420 | if (toggle) { |
3337 | e = s + rl -1; | 3421 | e = s + rl -1; |
3338 | if (e >= c->bm_bits) { | 3422 | if (e >= c->bm_bits) { |
3339 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); | 3423 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); |
3340 | return FAILED; | 3424 | return -EIO; |
3341 | } | 3425 | } |
3342 | _drbd_bm_set_bits(mdev, s, e); | 3426 | _drbd_bm_set_bits(mdev, s, e); |
3343 | } | 3427 | } |
@@ -3347,14 +3431,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3347 | have, bits, look_ahead, | 3431 | have, bits, look_ahead, |
3348 | (unsigned int)(bs.cur.b - p->code), | 3432 | (unsigned int)(bs.cur.b - p->code), |
3349 | (unsigned int)bs.buf_len); | 3433 | (unsigned int)bs.buf_len); |
3350 | return FAILED; | 3434 | return -EIO; |
3351 | } | 3435 | } |
3352 | look_ahead >>= bits; | 3436 | look_ahead >>= bits; |
3353 | have -= bits; | 3437 | have -= bits; |
3354 | 3438 | ||
3355 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); | 3439 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); |
3356 | if (bits < 0) | 3440 | if (bits < 0) |
3357 | return FAILED; | 3441 | return -EIO; |
3358 | look_ahead |= tmp << have; | 3442 | look_ahead |= tmp << have; |
3359 | have += bits; | 3443 | have += bits; |
3360 | } | 3444 | } |
@@ -3362,10 +3446,16 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3362 | c->bit_offset = s; | 3446 | c->bit_offset = s; |
3363 | bm_xfer_ctx_bit_to_word_offset(c); | 3447 | bm_xfer_ctx_bit_to_word_offset(c); |
3364 | 3448 | ||
3365 | return (s == c->bm_bits) ? DONE : OK; | 3449 | return (s != c->bm_bits); |
3366 | } | 3450 | } |
3367 | 3451 | ||
3368 | static enum receive_bitmap_ret | 3452 | /** |
3453 | * decode_bitmap_c | ||
3454 | * | ||
3455 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3456 | * code upon failure. | ||
3457 | */ | ||
3458 | static int | ||
3369 | decode_bitmap_c(struct drbd_conf *mdev, | 3459 | decode_bitmap_c(struct drbd_conf *mdev, |
3370 | struct p_compressed_bm *p, | 3460 | struct p_compressed_bm *p, |
3371 | struct bm_xfer_ctx *c) | 3461 | struct bm_xfer_ctx *c) |
@@ -3379,7 +3469,7 @@ decode_bitmap_c(struct drbd_conf *mdev, | |||
3379 | 3469 | ||
3380 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); | 3470 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); |
3381 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | 3471 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); |
3382 | return FAILED; | 3472 | return -EIO; |
3383 | } | 3473 | } |
3384 | 3474 | ||
3385 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, | 3475 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, |
@@ -3428,13 +3518,13 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3428 | { | 3518 | { |
3429 | struct bm_xfer_ctx c; | 3519 | struct bm_xfer_ctx c; |
3430 | void *buffer; | 3520 | void *buffer; |
3431 | enum receive_bitmap_ret ret; | 3521 | int err; |
3432 | int ok = FALSE; | 3522 | int ok = false; |
3433 | struct p_header80 *h = &mdev->data.rbuf.header.h80; | 3523 | struct p_header80 *h = &mdev->data.rbuf.header.h80; |
3434 | 3524 | ||
3435 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | 3525 | drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); |
3436 | 3526 | /* you are supposed to send additional out-of-sync information | |
3437 | drbd_bm_lock(mdev, "receive bitmap"); | 3527 | * if you actually set bits during this phase */ |
3438 | 3528 | ||
3439 | /* maybe we should use some per thread scratch page, | 3529 | /* maybe we should use some per thread scratch page, |
3440 | * and allocate that during initial device creation? */ | 3530 | * and allocate that during initial device creation? */ |
@@ -3449,9 +3539,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3449 | .bm_words = drbd_bm_words(mdev), | 3539 | .bm_words = drbd_bm_words(mdev), |
3450 | }; | 3540 | }; |
3451 | 3541 | ||
3452 | do { | 3542 | for(;;) { |
3453 | if (cmd == P_BITMAP) { | 3543 | if (cmd == P_BITMAP) { |
3454 | ret = receive_bitmap_plain(mdev, data_size, buffer, &c); | 3544 | err = receive_bitmap_plain(mdev, data_size, buffer, &c); |
3455 | } else if (cmd == P_COMPRESSED_BITMAP) { | 3545 | } else if (cmd == P_COMPRESSED_BITMAP) { |
3456 | /* MAYBE: sanity check that we speak proto >= 90, | 3546 | /* MAYBE: sanity check that we speak proto >= 90, |
3457 | * and the feature is enabled! */ | 3547 | * and the feature is enabled! */ |
@@ -3468,9 +3558,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3468 | goto out; | 3558 | goto out; |
3469 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { | 3559 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { |
3470 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); | 3560 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); |
3471 | return FAILED; | 3561 | goto out; |
3472 | } | 3562 | } |
3473 | ret = decode_bitmap_c(mdev, p, &c); | 3563 | err = decode_bitmap_c(mdev, p, &c); |
3474 | } else { | 3564 | } else { |
3475 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); | 3565 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); |
3476 | goto out; | 3566 | goto out; |
@@ -3479,24 +3569,26 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3479 | c.packets[cmd == P_BITMAP]++; | 3569 | c.packets[cmd == P_BITMAP]++; |
3480 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; | 3570 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; |
3481 | 3571 | ||
3482 | if (ret != OK) | 3572 | if (err <= 0) { |
3573 | if (err < 0) | ||
3574 | goto out; | ||
3483 | break; | 3575 | break; |
3484 | 3576 | } | |
3485 | if (!drbd_recv_header(mdev, &cmd, &data_size)) | 3577 | if (!drbd_recv_header(mdev, &cmd, &data_size)) |
3486 | goto out; | 3578 | goto out; |
3487 | } while (ret == OK); | 3579 | } |
3488 | if (ret == FAILED) | ||
3489 | goto out; | ||
3490 | 3580 | ||
3491 | INFO_bm_xfer_stats(mdev, "receive", &c); | 3581 | INFO_bm_xfer_stats(mdev, "receive", &c); |
3492 | 3582 | ||
3493 | if (mdev->state.conn == C_WF_BITMAP_T) { | 3583 | if (mdev->state.conn == C_WF_BITMAP_T) { |
3584 | enum drbd_state_rv rv; | ||
3585 | |||
3494 | ok = !drbd_send_bitmap(mdev); | 3586 | ok = !drbd_send_bitmap(mdev); |
3495 | if (!ok) | 3587 | if (!ok) |
3496 | goto out; | 3588 | goto out; |
3497 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ | 3589 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ |
3498 | ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); | 3590 | rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); |
3499 | D_ASSERT(ok == SS_SUCCESS); | 3591 | D_ASSERT(rv == SS_SUCCESS); |
3500 | } else if (mdev->state.conn != C_WF_BITMAP_S) { | 3592 | } else if (mdev->state.conn != C_WF_BITMAP_S) { |
3501 | /* admin may have requested C_DISCONNECTING, | 3593 | /* admin may have requested C_DISCONNECTING, |
3502 | * other threads may have noticed network errors */ | 3594 | * other threads may have noticed network errors */ |
@@ -3504,7 +3596,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3504 | drbd_conn_str(mdev->state.conn)); | 3596 | drbd_conn_str(mdev->state.conn)); |
3505 | } | 3597 | } |
3506 | 3598 | ||
3507 | ok = TRUE; | 3599 | ok = true; |
3508 | out: | 3600 | out: |
3509 | drbd_bm_unlock(mdev); | 3601 | drbd_bm_unlock(mdev); |
3510 | if (ok && mdev->state.conn == C_WF_BITMAP_S) | 3602 | if (ok && mdev->state.conn == C_WF_BITMAP_S) |
@@ -3538,7 +3630,26 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u | |||
3538 | * with the data requests being unplugged */ | 3630 | * with the data requests being unplugged */ |
3539 | drbd_tcp_quickack(mdev->data.socket); | 3631 | drbd_tcp_quickack(mdev->data.socket); |
3540 | 3632 | ||
3541 | return TRUE; | 3633 | return true; |
3634 | } | ||
3635 | |||
3636 | static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | ||
3637 | { | ||
3638 | struct p_block_desc *p = &mdev->data.rbuf.block_desc; | ||
3639 | |||
3640 | switch (mdev->state.conn) { | ||
3641 | case C_WF_SYNC_UUID: | ||
3642 | case C_WF_BITMAP_T: | ||
3643 | case C_BEHIND: | ||
3644 | break; | ||
3645 | default: | ||
3646 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", | ||
3647 | drbd_conn_str(mdev->state.conn)); | ||
3648 | } | ||
3649 | |||
3650 | drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); | ||
3651 | |||
3652 | return true; | ||
3542 | } | 3653 | } |
3543 | 3654 | ||
3544 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); | 3655 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); |
@@ -3571,6 +3682,7 @@ static struct data_cmd drbd_cmd_handler[] = { | |||
3571 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | 3682 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, |
3572 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | 3683 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, |
3573 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, | 3684 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, |
3685 | [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, | ||
3574 | /* anything missing from this table is in | 3686 | /* anything missing from this table is in |
3575 | * the asender_tbl, see get_asender_cmd */ | 3687 | * the asender_tbl, see get_asender_cmd */ |
3576 | [P_MAX_CMD] = { 0, 0, NULL }, | 3688 | [P_MAX_CMD] = { 0, 0, NULL }, |
@@ -3610,7 +3722,8 @@ static void drbdd(struct drbd_conf *mdev) | |||
3610 | if (shs) { | 3722 | if (shs) { |
3611 | rv = drbd_recv(mdev, &header->h80.payload, shs); | 3723 | rv = drbd_recv(mdev, &header->h80.payload, shs); |
3612 | if (unlikely(rv != shs)) { | 3724 | if (unlikely(rv != shs)) { |
3613 | dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); | 3725 | if (!signal_pending(current)) |
3726 | dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); | ||
3614 | goto err_out; | 3727 | goto err_out; |
3615 | } | 3728 | } |
3616 | } | 3729 | } |
@@ -3682,9 +3795,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3682 | 3795 | ||
3683 | if (mdev->state.conn == C_STANDALONE) | 3796 | if (mdev->state.conn == C_STANDALONE) |
3684 | return; | 3797 | return; |
3685 | if (mdev->state.conn >= C_WF_CONNECTION) | ||
3686 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", | ||
3687 | drbd_conn_str(mdev->state.conn)); | ||
3688 | 3798 | ||
3689 | /* asender does not clean up anything. it must not interfere, either */ | 3799 | /* asender does not clean up anything. it must not interfere, either */ |
3690 | drbd_thread_stop(&mdev->asender); | 3800 | drbd_thread_stop(&mdev->asender); |
@@ -3713,6 +3823,8 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3713 | atomic_set(&mdev->rs_pending_cnt, 0); | 3823 | atomic_set(&mdev->rs_pending_cnt, 0); |
3714 | wake_up(&mdev->misc_wait); | 3824 | wake_up(&mdev->misc_wait); |
3715 | 3825 | ||
3826 | del_timer(&mdev->request_timer); | ||
3827 | |||
3716 | /* make sure syncer is stopped and w_resume_next_sg queued */ | 3828 | /* make sure syncer is stopped and w_resume_next_sg queued */ |
3717 | del_timer_sync(&mdev->resync_timer); | 3829 | del_timer_sync(&mdev->resync_timer); |
3718 | resync_timer_fn((unsigned long)mdev); | 3830 | resync_timer_fn((unsigned long)mdev); |
@@ -3758,13 +3870,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3758 | if (os.conn == C_DISCONNECTING) { | 3870 | if (os.conn == C_DISCONNECTING) { |
3759 | wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); | 3871 | wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); |
3760 | 3872 | ||
3761 | if (!is_susp(mdev->state)) { | ||
3762 | /* we must not free the tl_hash | ||
3763 | * while application io is still on the fly */ | ||
3764 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | ||
3765 | drbd_free_tl_hash(mdev); | ||
3766 | } | ||
3767 | |||
3768 | crypto_free_hash(mdev->cram_hmac_tfm); | 3873 | crypto_free_hash(mdev->cram_hmac_tfm); |
3769 | mdev->cram_hmac_tfm = NULL; | 3874 | mdev->cram_hmac_tfm = NULL; |
3770 | 3875 | ||
@@ -3773,6 +3878,10 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3773 | drbd_request_state(mdev, NS(conn, C_STANDALONE)); | 3878 | drbd_request_state(mdev, NS(conn, C_STANDALONE)); |
3774 | } | 3879 | } |
3775 | 3880 | ||
3881 | /* serialize with bitmap writeout triggered by the state change, | ||
3882 | * if any. */ | ||
3883 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
3884 | |||
3776 | /* tcp_close and release of sendpage pages can be deferred. I don't | 3885 | /* tcp_close and release of sendpage pages can be deferred. I don't |
3777 | * want to use SO_LINGER, because apparently it can be deferred for | 3886 | * want to use SO_LINGER, because apparently it can be deferred for |
3778 | * more than 20 seconds (longest time I checked). | 3887 | * more than 20 seconds (longest time I checked). |
@@ -3873,7 +3982,8 @@ static int drbd_do_handshake(struct drbd_conf *mdev) | |||
3873 | rv = drbd_recv(mdev, &p->head.payload, expect); | 3982 | rv = drbd_recv(mdev, &p->head.payload, expect); |
3874 | 3983 | ||
3875 | if (rv != expect) { | 3984 | if (rv != expect) { |
3876 | dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); | 3985 | if (!signal_pending(current)) |
3986 | dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); | ||
3877 | return 0; | 3987 | return 0; |
3878 | } | 3988 | } |
3879 | 3989 | ||
@@ -3975,7 +4085,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) | |||
3975 | rv = drbd_recv(mdev, peers_ch, length); | 4085 | rv = drbd_recv(mdev, peers_ch, length); |
3976 | 4086 | ||
3977 | if (rv != length) { | 4087 | if (rv != length) { |
3978 | dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); | 4088 | if (!signal_pending(current)) |
4089 | dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); | ||
3979 | rv = 0; | 4090 | rv = 0; |
3980 | goto fail; | 4091 | goto fail; |
3981 | } | 4092 | } |
@@ -4022,7 +4133,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) | |||
4022 | rv = drbd_recv(mdev, response , resp_size); | 4133 | rv = drbd_recv(mdev, response , resp_size); |
4023 | 4134 | ||
4024 | if (rv != resp_size) { | 4135 | if (rv != resp_size) { |
4025 | dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); | 4136 | if (!signal_pending(current)) |
4137 | dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); | ||
4026 | rv = 0; | 4138 | rv = 0; |
4027 | goto fail; | 4139 | goto fail; |
4028 | } | 4140 | } |
@@ -4074,8 +4186,7 @@ int drbdd_init(struct drbd_thread *thi) | |||
4074 | h = drbd_connect(mdev); | 4186 | h = drbd_connect(mdev); |
4075 | if (h == 0) { | 4187 | if (h == 0) { |
4076 | drbd_disconnect(mdev); | 4188 | drbd_disconnect(mdev); |
4077 | __set_current_state(TASK_INTERRUPTIBLE); | 4189 | schedule_timeout_interruptible(HZ); |
4078 | schedule_timeout(HZ); | ||
4079 | } | 4190 | } |
4080 | if (h == -1) { | 4191 | if (h == -1) { |
4081 | dev_warn(DEV, "Discarding network configuration.\n"); | 4192 | dev_warn(DEV, "Discarding network configuration.\n"); |
@@ -4113,7 +4224,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) | |||
4113 | } | 4224 | } |
4114 | wake_up(&mdev->state_wait); | 4225 | wake_up(&mdev->state_wait); |
4115 | 4226 | ||
4116 | return TRUE; | 4227 | return true; |
4117 | } | 4228 | } |
4118 | 4229 | ||
4119 | static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) | 4230 | static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4129,7 +4240,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4129 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) | 4240 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) |
4130 | wake_up(&mdev->misc_wait); | 4241 | wake_up(&mdev->misc_wait); |
4131 | 4242 | ||
4132 | return TRUE; | 4243 | return true; |
4133 | } | 4244 | } |
4134 | 4245 | ||
4135 | static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) | 4246 | static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4152,7 +4263,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) | |||
4152 | dec_rs_pending(mdev); | 4263 | dec_rs_pending(mdev); |
4153 | atomic_add(blksize >> 9, &mdev->rs_sect_in); | 4264 | atomic_add(blksize >> 9, &mdev->rs_sect_in); |
4154 | 4265 | ||
4155 | return TRUE; | 4266 | return true; |
4156 | } | 4267 | } |
4157 | 4268 | ||
4158 | /* when we receive the ACK for a write request, | 4269 | /* when we receive the ACK for a write request, |
@@ -4176,8 +4287,6 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, | |||
4176 | return req; | 4287 | return req; |
4177 | } | 4288 | } |
4178 | } | 4289 | } |
4179 | dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", | ||
4180 | (void *)(unsigned long)id, (unsigned long long)sector); | ||
4181 | return NULL; | 4290 | return NULL; |
4182 | } | 4291 | } |
4183 | 4292 | ||
@@ -4195,15 +4304,17 @@ static int validate_req_change_req_state(struct drbd_conf *mdev, | |||
4195 | req = validator(mdev, id, sector); | 4304 | req = validator(mdev, id, sector); |
4196 | if (unlikely(!req)) { | 4305 | if (unlikely(!req)) { |
4197 | spin_unlock_irq(&mdev->req_lock); | 4306 | spin_unlock_irq(&mdev->req_lock); |
4198 | dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); | 4307 | |
4199 | return FALSE; | 4308 | dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func, |
4309 | (void *)(unsigned long)id, (unsigned long long)sector); | ||
4310 | return false; | ||
4200 | } | 4311 | } |
4201 | __req_mod(req, what, &m); | 4312 | __req_mod(req, what, &m); |
4202 | spin_unlock_irq(&mdev->req_lock); | 4313 | spin_unlock_irq(&mdev->req_lock); |
4203 | 4314 | ||
4204 | if (m.bio) | 4315 | if (m.bio) |
4205 | complete_master_bio(mdev, &m); | 4316 | complete_master_bio(mdev, &m); |
4206 | return TRUE; | 4317 | return true; |
4207 | } | 4318 | } |
4208 | 4319 | ||
4209 | static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | 4320 | static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4218,7 +4329,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4218 | if (is_syncer_block_id(p->block_id)) { | 4329 | if (is_syncer_block_id(p->block_id)) { |
4219 | drbd_set_in_sync(mdev, sector, blksize); | 4330 | drbd_set_in_sync(mdev, sector, blksize); |
4220 | dec_rs_pending(mdev); | 4331 | dec_rs_pending(mdev); |
4221 | return TRUE; | 4332 | return true; |
4222 | } | 4333 | } |
4223 | switch (be16_to_cpu(h->command)) { | 4334 | switch (be16_to_cpu(h->command)) { |
4224 | case P_RS_WRITE_ACK: | 4335 | case P_RS_WRITE_ACK: |
@@ -4239,7 +4350,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4239 | break; | 4350 | break; |
4240 | default: | 4351 | default: |
4241 | D_ASSERT(0); | 4352 | D_ASSERT(0); |
4242 | return FALSE; | 4353 | return false; |
4243 | } | 4354 | } |
4244 | 4355 | ||
4245 | return validate_req_change_req_state(mdev, p->block_id, sector, | 4356 | return validate_req_change_req_state(mdev, p->block_id, sector, |
@@ -4250,20 +4361,44 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4250 | { | 4361 | { |
4251 | struct p_block_ack *p = (struct p_block_ack *)h; | 4362 | struct p_block_ack *p = (struct p_block_ack *)h; |
4252 | sector_t sector = be64_to_cpu(p->sector); | 4363 | sector_t sector = be64_to_cpu(p->sector); |
4253 | 4364 | int size = be32_to_cpu(p->blksize); | |
4254 | if (__ratelimit(&drbd_ratelimit_state)) | 4365 | struct drbd_request *req; |
4255 | dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); | 4366 | struct bio_and_error m; |
4256 | 4367 | ||
4257 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | 4368 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); |
4258 | 4369 | ||
4259 | if (is_syncer_block_id(p->block_id)) { | 4370 | if (is_syncer_block_id(p->block_id)) { |
4260 | int size = be32_to_cpu(p->blksize); | ||
4261 | dec_rs_pending(mdev); | 4371 | dec_rs_pending(mdev); |
4262 | drbd_rs_failed_io(mdev, sector, size); | 4372 | drbd_rs_failed_io(mdev, sector, size); |
4263 | return TRUE; | 4373 | return true; |
4264 | } | 4374 | } |
4265 | return validate_req_change_req_state(mdev, p->block_id, sector, | 4375 | |
4266 | _ack_id_to_req, __func__ , neg_acked); | 4376 | spin_lock_irq(&mdev->req_lock); |
4377 | req = _ack_id_to_req(mdev, p->block_id, sector); | ||
4378 | if (!req) { | ||
4379 | spin_unlock_irq(&mdev->req_lock); | ||
4380 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A || | ||
4381 | mdev->net_conf->wire_protocol == DRBD_PROT_B) { | ||
4382 | /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. | ||
4383 | The master bio might already be completed, therefore the | ||
4384 | request is no longer in the collision hash. | ||
4385 | => Do not try to validate block_id as request. */ | ||
4386 | /* In Protocol B we might already have got a P_RECV_ACK | ||
4387 | but then get a P_NEG_ACK after wards. */ | ||
4388 | drbd_set_out_of_sync(mdev, sector, size); | ||
4389 | return true; | ||
4390 | } else { | ||
4391 | dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__, | ||
4392 | (void *)(unsigned long)p->block_id, (unsigned long long)sector); | ||
4393 | return false; | ||
4394 | } | ||
4395 | } | ||
4396 | __req_mod(req, neg_acked, &m); | ||
4397 | spin_unlock_irq(&mdev->req_lock); | ||
4398 | |||
4399 | if (m.bio) | ||
4400 | complete_master_bio(mdev, &m); | ||
4401 | return true; | ||
4267 | } | 4402 | } |
4268 | 4403 | ||
4269 | static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) | 4404 | static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4294,11 +4429,20 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) | |||
4294 | 4429 | ||
4295 | if (get_ldev_if_state(mdev, D_FAILED)) { | 4430 | if (get_ldev_if_state(mdev, D_FAILED)) { |
4296 | drbd_rs_complete_io(mdev, sector); | 4431 | drbd_rs_complete_io(mdev, sector); |
4297 | drbd_rs_failed_io(mdev, sector, size); | 4432 | switch (be16_to_cpu(h->command)) { |
4433 | case P_NEG_RS_DREPLY: | ||
4434 | drbd_rs_failed_io(mdev, sector, size); | ||
4435 | case P_RS_CANCEL: | ||
4436 | break; | ||
4437 | default: | ||
4438 | D_ASSERT(0); | ||
4439 | put_ldev(mdev); | ||
4440 | return false; | ||
4441 | } | ||
4298 | put_ldev(mdev); | 4442 | put_ldev(mdev); |
4299 | } | 4443 | } |
4300 | 4444 | ||
4301 | return TRUE; | 4445 | return true; |
4302 | } | 4446 | } |
4303 | 4447 | ||
4304 | static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) | 4448 | static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4307,7 +4451,14 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4307 | 4451 | ||
4308 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); | 4452 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); |
4309 | 4453 | ||
4310 | return TRUE; | 4454 | if (mdev->state.conn == C_AHEAD && |
4455 | atomic_read(&mdev->ap_in_flight) == 0 && | ||
4456 | !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { | ||
4457 | mdev->start_resync_timer.expires = jiffies + HZ; | ||
4458 | add_timer(&mdev->start_resync_timer); | ||
4459 | } | ||
4460 | |||
4461 | return true; | ||
4311 | } | 4462 | } |
4312 | 4463 | ||
4313 | static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | 4464 | static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4328,12 +4479,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | |||
4328 | ov_oos_print(mdev); | 4479 | ov_oos_print(mdev); |
4329 | 4480 | ||
4330 | if (!get_ldev(mdev)) | 4481 | if (!get_ldev(mdev)) |
4331 | return TRUE; | 4482 | return true; |
4332 | 4483 | ||
4333 | drbd_rs_complete_io(mdev, sector); | 4484 | drbd_rs_complete_io(mdev, sector); |
4334 | dec_rs_pending(mdev); | 4485 | dec_rs_pending(mdev); |
4335 | 4486 | ||
4336 | if (--mdev->ov_left == 0) { | 4487 | --mdev->ov_left; |
4488 | |||
4489 | /* let's advance progress step marks only for every other megabyte */ | ||
4490 | if ((mdev->ov_left & 0x200) == 0x200) | ||
4491 | drbd_advance_rs_marks(mdev, mdev->ov_left); | ||
4492 | |||
4493 | if (mdev->ov_left == 0) { | ||
4337 | w = kmalloc(sizeof(*w), GFP_NOIO); | 4494 | w = kmalloc(sizeof(*w), GFP_NOIO); |
4338 | if (w) { | 4495 | if (w) { |
4339 | w->cb = w_ov_finished; | 4496 | w->cb = w_ov_finished; |
@@ -4345,12 +4502,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | |||
4345 | } | 4502 | } |
4346 | } | 4503 | } |
4347 | put_ldev(mdev); | 4504 | put_ldev(mdev); |
4348 | return TRUE; | 4505 | return true; |
4349 | } | 4506 | } |
4350 | 4507 | ||
4351 | static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) | 4508 | static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) |
4352 | { | 4509 | { |
4353 | return TRUE; | 4510 | return true; |
4354 | } | 4511 | } |
4355 | 4512 | ||
4356 | struct asender_cmd { | 4513 | struct asender_cmd { |
@@ -4378,6 +4535,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) | |||
4378 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | 4535 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, |
4379 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | 4536 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, |
4380 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, | 4537 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, |
4538 | [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, | ||
4381 | [P_MAX_CMD] = { 0, NULL }, | 4539 | [P_MAX_CMD] = { 0, NULL }, |
4382 | }; | 4540 | }; |
4383 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) | 4541 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index ad3fc6228f2..5c0c8be1bb0 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -140,9 +140,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, | |||
140 | struct hlist_node *n; | 140 | struct hlist_node *n; |
141 | struct hlist_head *slot; | 141 | struct hlist_head *slot; |
142 | 142 | ||
143 | /* before we can signal completion to the upper layers, | 143 | /* Before we can signal completion to the upper layers, |
144 | * we may need to close the current epoch */ | 144 | * we may need to close the current epoch. |
145 | * We can skip this, if this request has not even been sent, because we | ||
146 | * did not have a fully established connection yet/anymore, during | ||
147 | * bitmap exchange, or while we are C_AHEAD due to congestion policy. | ||
148 | */ | ||
145 | if (mdev->state.conn >= C_CONNECTED && | 149 | if (mdev->state.conn >= C_CONNECTED && |
150 | (s & RQ_NET_SENT) != 0 && | ||
146 | req->epoch == mdev->newest_tle->br_number) | 151 | req->epoch == mdev->newest_tle->br_number) |
147 | queue_barrier(mdev); | 152 | queue_barrier(mdev); |
148 | 153 | ||
@@ -440,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
440 | req->rq_state |= RQ_LOCAL_COMPLETED; | 445 | req->rq_state |= RQ_LOCAL_COMPLETED; |
441 | req->rq_state &= ~RQ_LOCAL_PENDING; | 446 | req->rq_state &= ~RQ_LOCAL_PENDING; |
442 | 447 | ||
443 | __drbd_chk_io_error(mdev, FALSE); | 448 | __drbd_chk_io_error(mdev, false); |
444 | _req_may_be_done_not_susp(req, m); | 449 | _req_may_be_done_not_susp(req, m); |
445 | put_ldev(mdev); | 450 | put_ldev(mdev); |
446 | break; | 451 | break; |
@@ -461,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
461 | 466 | ||
462 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | 467 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
463 | 468 | ||
464 | __drbd_chk_io_error(mdev, FALSE); | 469 | __drbd_chk_io_error(mdev, false); |
465 | put_ldev(mdev); | 470 | put_ldev(mdev); |
466 | 471 | ||
467 | /* no point in retrying if there is no good remote data, | 472 | /* no point in retrying if there is no good remote data, |
@@ -545,6 +550,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
545 | 550 | ||
546 | break; | 551 | break; |
547 | 552 | ||
553 | case queue_for_send_oos: | ||
554 | req->rq_state |= RQ_NET_QUEUED; | ||
555 | req->w.cb = w_send_oos; | ||
556 | drbd_queue_work(&mdev->data.work, &req->w); | ||
557 | break; | ||
558 | |||
559 | case oos_handed_to_network: | ||
560 | /* actually the same */ | ||
548 | case send_canceled: | 561 | case send_canceled: |
549 | /* treat it the same */ | 562 | /* treat it the same */ |
550 | case send_failed: | 563 | case send_failed: |
@@ -558,6 +571,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
558 | 571 | ||
559 | case handed_over_to_network: | 572 | case handed_over_to_network: |
560 | /* assert something? */ | 573 | /* assert something? */ |
574 | if (bio_data_dir(req->master_bio) == WRITE) | ||
575 | atomic_add(req->size>>9, &mdev->ap_in_flight); | ||
576 | |||
561 | if (bio_data_dir(req->master_bio) == WRITE && | 577 | if (bio_data_dir(req->master_bio) == WRITE && |
562 | mdev->net_conf->wire_protocol == DRBD_PROT_A) { | 578 | mdev->net_conf->wire_protocol == DRBD_PROT_A) { |
563 | /* this is what is dangerous about protocol A: | 579 | /* this is what is dangerous about protocol A: |
@@ -591,6 +607,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
591 | dec_ap_pending(mdev); | 607 | dec_ap_pending(mdev); |
592 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); | 608 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
593 | req->rq_state |= RQ_NET_DONE; | 609 | req->rq_state |= RQ_NET_DONE; |
610 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) | ||
611 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
612 | |||
594 | /* if it is still queued, we may not complete it here. | 613 | /* if it is still queued, we may not complete it here. |
595 | * it will be canceled soon. */ | 614 | * it will be canceled soon. */ |
596 | if (!(req->rq_state & RQ_NET_QUEUED)) | 615 | if (!(req->rq_state & RQ_NET_QUEUED)) |
@@ -628,14 +647,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
628 | req->rq_state |= RQ_NET_OK; | 647 | req->rq_state |= RQ_NET_OK; |
629 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 648 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
630 | dec_ap_pending(mdev); | 649 | dec_ap_pending(mdev); |
650 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
631 | req->rq_state &= ~RQ_NET_PENDING; | 651 | req->rq_state &= ~RQ_NET_PENDING; |
632 | _req_may_be_done_not_susp(req, m); | 652 | _req_may_be_done_not_susp(req, m); |
633 | break; | 653 | break; |
634 | 654 | ||
635 | case neg_acked: | 655 | case neg_acked: |
636 | /* assert something? */ | 656 | /* assert something? */ |
637 | if (req->rq_state & RQ_NET_PENDING) | 657 | if (req->rq_state & RQ_NET_PENDING) { |
638 | dec_ap_pending(mdev); | 658 | dec_ap_pending(mdev); |
659 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
660 | } | ||
639 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); | 661 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
640 | 662 | ||
641 | req->rq_state |= RQ_NET_DONE; | 663 | req->rq_state |= RQ_NET_DONE; |
@@ -690,8 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
690 | dev_err(DEV, "FIXME (barrier_acked but pending)\n"); | 712 | dev_err(DEV, "FIXME (barrier_acked but pending)\n"); |
691 | list_move(&req->tl_requests, &mdev->out_of_sequence_requests); | 713 | list_move(&req->tl_requests, &mdev->out_of_sequence_requests); |
692 | } | 714 | } |
693 | D_ASSERT(req->rq_state & RQ_NET_SENT); | 715 | if ((req->rq_state & RQ_NET_MASK) != 0) { |
694 | req->rq_state |= RQ_NET_DONE; | 716 | req->rq_state |= RQ_NET_DONE; |
717 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) | ||
718 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
719 | } | ||
695 | _req_may_be_done(req, m); /* Allowed while state.susp */ | 720 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
696 | break; | 721 | break; |
697 | 722 | ||
@@ -738,14 +763,14 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s | |||
738 | return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); | 763 | return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); |
739 | } | 764 | } |
740 | 765 | ||
741 | static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | 766 | static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) |
742 | { | 767 | { |
743 | const int rw = bio_rw(bio); | 768 | const int rw = bio_rw(bio); |
744 | const int size = bio->bi_size; | 769 | const int size = bio->bi_size; |
745 | const sector_t sector = bio->bi_sector; | 770 | const sector_t sector = bio->bi_sector; |
746 | struct drbd_tl_epoch *b = NULL; | 771 | struct drbd_tl_epoch *b = NULL; |
747 | struct drbd_request *req; | 772 | struct drbd_request *req; |
748 | int local, remote; | 773 | int local, remote, send_oos = 0; |
749 | int err = -EIO; | 774 | int err = -EIO; |
750 | int ret = 0; | 775 | int ret = 0; |
751 | 776 | ||
@@ -759,6 +784,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
759 | bio_endio(bio, -ENOMEM); | 784 | bio_endio(bio, -ENOMEM); |
760 | return 0; | 785 | return 0; |
761 | } | 786 | } |
787 | req->start_time = start_time; | ||
762 | 788 | ||
763 | local = get_ldev(mdev); | 789 | local = get_ldev(mdev); |
764 | if (!local) { | 790 | if (!local) { |
@@ -808,9 +834,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
808 | drbd_al_begin_io(mdev, sector); | 834 | drbd_al_begin_io(mdev, sector); |
809 | } | 835 | } |
810 | 836 | ||
811 | remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || | 837 | remote = remote && drbd_should_do_remote(mdev->state); |
812 | (mdev->state.pdsk == D_INCONSISTENT && | 838 | send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); |
813 | mdev->state.conn >= C_CONNECTED)); | 839 | D_ASSERT(!(remote && send_oos)); |
814 | 840 | ||
815 | if (!(local || remote) && !is_susp(mdev->state)) { | 841 | if (!(local || remote) && !is_susp(mdev->state)) { |
816 | if (__ratelimit(&drbd_ratelimit_state)) | 842 | if (__ratelimit(&drbd_ratelimit_state)) |
@@ -824,7 +850,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
824 | * but there is a race between testing the bit and pointer outside the | 850 | * but there is a race between testing the bit and pointer outside the |
825 | * spinlock, and grabbing the spinlock. | 851 | * spinlock, and grabbing the spinlock. |
826 | * if we lost that race, we retry. */ | 852 | * if we lost that race, we retry. */ |
827 | if (rw == WRITE && remote && | 853 | if (rw == WRITE && (remote || send_oos) && |
828 | mdev->unused_spare_tle == NULL && | 854 | mdev->unused_spare_tle == NULL && |
829 | test_bit(CREATE_BARRIER, &mdev->flags)) { | 855 | test_bit(CREATE_BARRIER, &mdev->flags)) { |
830 | allocate_barrier: | 856 | allocate_barrier: |
@@ -842,18 +868,19 @@ allocate_barrier: | |||
842 | if (is_susp(mdev->state)) { | 868 | if (is_susp(mdev->state)) { |
843 | /* If we got suspended, use the retry mechanism of | 869 | /* If we got suspended, use the retry mechanism of |
844 | generic_make_request() to restart processing of this | 870 | generic_make_request() to restart processing of this |
845 | bio. In the next call to drbd_make_request_26 | 871 | bio. In the next call to drbd_make_request |
846 | we sleep in inc_ap_bio() */ | 872 | we sleep in inc_ap_bio() */ |
847 | ret = 1; | 873 | ret = 1; |
848 | spin_unlock_irq(&mdev->req_lock); | 874 | spin_unlock_irq(&mdev->req_lock); |
849 | goto fail_free_complete; | 875 | goto fail_free_complete; |
850 | } | 876 | } |
851 | 877 | ||
852 | if (remote) { | 878 | if (remote || send_oos) { |
853 | remote = (mdev->state.pdsk == D_UP_TO_DATE || | 879 | remote = drbd_should_do_remote(mdev->state); |
854 | (mdev->state.pdsk == D_INCONSISTENT && | 880 | send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); |
855 | mdev->state.conn >= C_CONNECTED)); | 881 | D_ASSERT(!(remote && send_oos)); |
856 | if (!remote) | 882 | |
883 | if (!(remote || send_oos)) | ||
857 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); | 884 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); |
858 | if (!(local || remote)) { | 885 | if (!(local || remote)) { |
859 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | 886 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); |
@@ -866,7 +893,7 @@ allocate_barrier: | |||
866 | mdev->unused_spare_tle = b; | 893 | mdev->unused_spare_tle = b; |
867 | b = NULL; | 894 | b = NULL; |
868 | } | 895 | } |
869 | if (rw == WRITE && remote && | 896 | if (rw == WRITE && (remote || send_oos) && |
870 | mdev->unused_spare_tle == NULL && | 897 | mdev->unused_spare_tle == NULL && |
871 | test_bit(CREATE_BARRIER, &mdev->flags)) { | 898 | test_bit(CREATE_BARRIER, &mdev->flags)) { |
872 | /* someone closed the current epoch | 899 | /* someone closed the current epoch |
@@ -889,7 +916,7 @@ allocate_barrier: | |||
889 | * barrier packet. To get the write ordering right, we only have to | 916 | * barrier packet. To get the write ordering right, we only have to |
890 | * make sure that, if this is a write request and it triggered a | 917 | * make sure that, if this is a write request and it triggered a |
891 | * barrier packet, this request is queued within the same spinlock. */ | 918 | * barrier packet, this request is queued within the same spinlock. */ |
892 | if (remote && mdev->unused_spare_tle && | 919 | if ((remote || send_oos) && mdev->unused_spare_tle && |
893 | test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { | 920 | test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { |
894 | _tl_add_barrier(mdev, mdev->unused_spare_tle); | 921 | _tl_add_barrier(mdev, mdev->unused_spare_tle); |
895 | mdev->unused_spare_tle = NULL; | 922 | mdev->unused_spare_tle = NULL; |
@@ -937,6 +964,34 @@ allocate_barrier: | |||
937 | ? queue_for_net_write | 964 | ? queue_for_net_write |
938 | : queue_for_net_read); | 965 | : queue_for_net_read); |
939 | } | 966 | } |
967 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) | ||
968 | _req_mod(req, queue_for_send_oos); | ||
969 | |||
970 | if (remote && | ||
971 | mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { | ||
972 | int congested = 0; | ||
973 | |||
974 | if (mdev->net_conf->cong_fill && | ||
975 | atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { | ||
976 | dev_info(DEV, "Congestion-fill threshold reached\n"); | ||
977 | congested = 1; | ||
978 | } | ||
979 | |||
980 | if (mdev->act_log->used >= mdev->net_conf->cong_extents) { | ||
981 | dev_info(DEV, "Congestion-extents threshold reached\n"); | ||
982 | congested = 1; | ||
983 | } | ||
984 | |||
985 | if (congested) { | ||
986 | queue_barrier(mdev); /* last barrier, after mirrored writes */ | ||
987 | |||
988 | if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) | ||
989 | _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); | ||
990 | else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ | ||
991 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); | ||
992 | } | ||
993 | } | ||
994 | |||
940 | spin_unlock_irq(&mdev->req_lock); | 995 | spin_unlock_irq(&mdev->req_lock); |
941 | kfree(b); /* if someone else has beaten us to it... */ | 996 | kfree(b); /* if someone else has beaten us to it... */ |
942 | 997 | ||
@@ -949,9 +1004,9 @@ allocate_barrier: | |||
949 | * stable storage, and this is a WRITE, we may not even submit | 1004 | * stable storage, and this is a WRITE, we may not even submit |
950 | * this bio. */ | 1005 | * this bio. */ |
951 | if (get_ldev(mdev)) { | 1006 | if (get_ldev(mdev)) { |
952 | if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR | 1007 | if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR |
953 | : rw == READ ? DRBD_FAULT_DT_RD | 1008 | : rw == READ ? DRBD_FAULT_DT_RD |
954 | : DRBD_FAULT_DT_RA)) | 1009 | : DRBD_FAULT_DT_RA)) |
955 | bio_endio(req->private_bio, -EIO); | 1010 | bio_endio(req->private_bio, -EIO); |
956 | else | 1011 | else |
957 | generic_make_request(req->private_bio); | 1012 | generic_make_request(req->private_bio); |
@@ -1018,16 +1073,19 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) | |||
1018 | return 0; | 1073 | return 0; |
1019 | } | 1074 | } |
1020 | 1075 | ||
1021 | int drbd_make_request_26(struct request_queue *q, struct bio *bio) | 1076 | int drbd_make_request(struct request_queue *q, struct bio *bio) |
1022 | { | 1077 | { |
1023 | unsigned int s_enr, e_enr; | 1078 | unsigned int s_enr, e_enr; |
1024 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | 1079 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
1080 | unsigned long start_time; | ||
1025 | 1081 | ||
1026 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { | 1082 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { |
1027 | bio_endio(bio, -EPERM); | 1083 | bio_endio(bio, -EPERM); |
1028 | return 0; | 1084 | return 0; |
1029 | } | 1085 | } |
1030 | 1086 | ||
1087 | start_time = jiffies; | ||
1088 | |||
1031 | /* | 1089 | /* |
1032 | * what we "blindly" assume: | 1090 | * what we "blindly" assume: |
1033 | */ | 1091 | */ |
@@ -1042,12 +1100,12 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1042 | 1100 | ||
1043 | if (likely(s_enr == e_enr)) { | 1101 | if (likely(s_enr == e_enr)) { |
1044 | inc_ap_bio(mdev, 1); | 1102 | inc_ap_bio(mdev, 1); |
1045 | return drbd_make_request_common(mdev, bio); | 1103 | return drbd_make_request_common(mdev, bio, start_time); |
1046 | } | 1104 | } |
1047 | 1105 | ||
1048 | /* can this bio be split generically? | 1106 | /* can this bio be split generically? |
1049 | * Maybe add our own split-arbitrary-bios function. */ | 1107 | * Maybe add our own split-arbitrary-bios function. */ |
1050 | if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) { | 1108 | if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) { |
1051 | /* rather error out here than BUG in bio_split */ | 1109 | /* rather error out here than BUG in bio_split */ |
1052 | dev_err(DEV, "bio would need to, but cannot, be split: " | 1110 | dev_err(DEV, "bio would need to, but cannot, be split: " |
1053 | "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", | 1111 | "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", |
@@ -1069,11 +1127,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1069 | const int sps = 1 << HT_SHIFT; /* sectors per slot */ | 1127 | const int sps = 1 << HT_SHIFT; /* sectors per slot */ |
1070 | const int mask = sps - 1; | 1128 | const int mask = sps - 1; |
1071 | const sector_t first_sectors = sps - (sect & mask); | 1129 | const sector_t first_sectors = sps - (sect & mask); |
1072 | bp = bio_split(bio, | 1130 | bp = bio_split(bio, first_sectors); |
1073 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) | ||
1074 | bio_split_pool, | ||
1075 | #endif | ||
1076 | first_sectors); | ||
1077 | 1131 | ||
1078 | /* we need to get a "reference count" (ap_bio_cnt) | 1132 | /* we need to get a "reference count" (ap_bio_cnt) |
1079 | * to avoid races with the disconnect/reconnect/suspend code. | 1133 | * to avoid races with the disconnect/reconnect/suspend code. |
@@ -1084,10 +1138,10 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1084 | 1138 | ||
1085 | D_ASSERT(e_enr == s_enr + 1); | 1139 | D_ASSERT(e_enr == s_enr + 1); |
1086 | 1140 | ||
1087 | while (drbd_make_request_common(mdev, &bp->bio1)) | 1141 | while (drbd_make_request_common(mdev, &bp->bio1, start_time)) |
1088 | inc_ap_bio(mdev, 1); | 1142 | inc_ap_bio(mdev, 1); |
1089 | 1143 | ||
1090 | while (drbd_make_request_common(mdev, &bp->bio2)) | 1144 | while (drbd_make_request_common(mdev, &bp->bio2, start_time)) |
1091 | inc_ap_bio(mdev, 1); | 1145 | inc_ap_bio(mdev, 1); |
1092 | 1146 | ||
1093 | dec_ap_bio(mdev); | 1147 | dec_ap_bio(mdev); |
@@ -1098,7 +1152,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1098 | } | 1152 | } |
1099 | 1153 | ||
1100 | /* This is called by bio_add_page(). With this function we reduce | 1154 | /* This is called by bio_add_page(). With this function we reduce |
1101 | * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs | 1155 | * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs |
1102 | * units (was AL_EXTENTs). | 1156 | * units (was AL_EXTENTs). |
1103 | * | 1157 | * |
1104 | * we do the calculation within the lower 32bit of the byte offsets, | 1158 | * we do the calculation within the lower 32bit of the byte offsets, |
@@ -1108,7 +1162,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1108 | * As long as the BIO is empty we have to allow at least one bvec, | 1162 | * As long as the BIO is empty we have to allow at least one bvec, |
1109 | * regardless of size and offset. so the resulting bio may still | 1163 | * regardless of size and offset. so the resulting bio may still |
1110 | * cross extent boundaries. those are dealt with (bio_split) in | 1164 | * cross extent boundaries. those are dealt with (bio_split) in |
1111 | * drbd_make_request_26. | 1165 | * drbd_make_request. |
1112 | */ | 1166 | */ |
1113 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | 1167 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) |
1114 | { | 1168 | { |
@@ -1118,8 +1172,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1118 | unsigned int bio_size = bvm->bi_size; | 1172 | unsigned int bio_size = bvm->bi_size; |
1119 | int limit, backing_limit; | 1173 | int limit, backing_limit; |
1120 | 1174 | ||
1121 | limit = DRBD_MAX_SEGMENT_SIZE | 1175 | limit = DRBD_MAX_BIO_SIZE |
1122 | - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size); | 1176 | - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size); |
1123 | if (limit < 0) | 1177 | if (limit < 0) |
1124 | limit = 0; | 1178 | limit = 0; |
1125 | if (bio_size == 0) { | 1179 | if (bio_size == 0) { |
@@ -1136,3 +1190,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1136 | } | 1190 | } |
1137 | return limit; | 1191 | return limit; |
1138 | } | 1192 | } |
1193 | |||
1194 | void request_timer_fn(unsigned long data) | ||
1195 | { | ||
1196 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
1197 | struct drbd_request *req; /* oldest request */ | ||
1198 | struct list_head *le; | ||
1199 | unsigned long et = 0; /* effective timeout = ko_count * timeout */ | ||
1200 | |||
1201 | if (get_net_conf(mdev)) { | ||
1202 | et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count; | ||
1203 | put_net_conf(mdev); | ||
1204 | } | ||
1205 | if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) | ||
1206 | return; /* Recurring timer stopped */ | ||
1207 | |||
1208 | spin_lock_irq(&mdev->req_lock); | ||
1209 | le = &mdev->oldest_tle->requests; | ||
1210 | if (list_empty(le)) { | ||
1211 | spin_unlock_irq(&mdev->req_lock); | ||
1212 | mod_timer(&mdev->request_timer, jiffies + et); | ||
1213 | return; | ||
1214 | } | ||
1215 | |||
1216 | le = le->prev; | ||
1217 | req = list_entry(le, struct drbd_request, tl_requests); | ||
1218 | if (time_is_before_eq_jiffies(req->start_time + et)) { | ||
1219 | if (req->rq_state & RQ_NET_PENDING) { | ||
1220 | dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); | ||
1221 | _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL); | ||
1222 | } else { | ||
1223 | dev_warn(DEV, "Local backing block device frozen?\n"); | ||
1224 | mod_timer(&mdev->request_timer, jiffies + et); | ||
1225 | } | ||
1226 | } else { | ||
1227 | mod_timer(&mdev->request_timer, req->start_time + et); | ||
1228 | } | ||
1229 | |||
1230 | spin_unlock_irq(&mdev->req_lock); | ||
1231 | } | ||
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index ab2bd09d54b..32e2c3e6a81 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h | |||
@@ -82,14 +82,16 @@ enum drbd_req_event { | |||
82 | to_be_submitted, | 82 | to_be_submitted, |
83 | 83 | ||
84 | /* XXX yes, now I am inconsistent... | 84 | /* XXX yes, now I am inconsistent... |
85 | * these two are not "events" but "actions" | 85 | * these are not "events" but "actions" |
86 | * oh, well... */ | 86 | * oh, well... */ |
87 | queue_for_net_write, | 87 | queue_for_net_write, |
88 | queue_for_net_read, | 88 | queue_for_net_read, |
89 | queue_for_send_oos, | ||
89 | 90 | ||
90 | send_canceled, | 91 | send_canceled, |
91 | send_failed, | 92 | send_failed, |
92 | handed_over_to_network, | 93 | handed_over_to_network, |
94 | oos_handed_to_network, | ||
93 | connection_lost_while_pending, | 95 | connection_lost_while_pending, |
94 | read_retry_remote_canceled, | 96 | read_retry_remote_canceled, |
95 | recv_acked_by_peer, | 97 | recv_acked_by_peer, |
@@ -289,7 +291,6 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, | |||
289 | req->epoch = 0; | 291 | req->epoch = 0; |
290 | req->sector = bio_src->bi_sector; | 292 | req->sector = bio_src->bi_sector; |
291 | req->size = bio_src->bi_size; | 293 | req->size = bio_src->bi_size; |
292 | req->start_time = jiffies; | ||
293 | INIT_HLIST_NODE(&req->colision); | 294 | INIT_HLIST_NODE(&req->colision); |
294 | INIT_LIST_HEAD(&req->tl_requests); | 295 | INIT_LIST_HEAD(&req->tl_requests); |
295 | INIT_LIST_HEAD(&req->w.list); | 296 | INIT_LIST_HEAD(&req->w.list); |
@@ -321,6 +322,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
321 | struct bio_and_error *m); | 322 | struct bio_and_error *m); |
322 | extern void complete_master_bio(struct drbd_conf *mdev, | 323 | extern void complete_master_bio(struct drbd_conf *mdev, |
323 | struct bio_and_error *m); | 324 | struct bio_and_error *m); |
325 | extern void request_timer_fn(unsigned long data); | ||
324 | 326 | ||
325 | /* use this if you don't want to deal with calling complete_master_bio() | 327 | /* use this if you don't want to deal with calling complete_master_bio() |
326 | * outside the spinlock, e.g. when walking some list on cleanup. */ | 328 | * outside the spinlock, e.g. when walking some list on cleanup. */ |
@@ -338,23 +340,43 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) | |||
338 | return rv; | 340 | return rv; |
339 | } | 341 | } |
340 | 342 | ||
341 | /* completion of master bio is outside of spinlock. | 343 | /* completion of master bio is outside of our spinlock. |
342 | * If you need it irqsave, do it your self! | 344 | * We still may or may not be inside some irqs disabled section |
343 | * Which means: don't use from bio endio callback. */ | 345 | * of the lower level driver completion callback, so we need to |
346 | * spin_lock_irqsave here. */ | ||
344 | static inline int req_mod(struct drbd_request *req, | 347 | static inline int req_mod(struct drbd_request *req, |
345 | enum drbd_req_event what) | 348 | enum drbd_req_event what) |
346 | { | 349 | { |
350 | unsigned long flags; | ||
347 | struct drbd_conf *mdev = req->mdev; | 351 | struct drbd_conf *mdev = req->mdev; |
348 | struct bio_and_error m; | 352 | struct bio_and_error m; |
349 | int rv; | 353 | int rv; |
350 | 354 | ||
351 | spin_lock_irq(&mdev->req_lock); | 355 | spin_lock_irqsave(&mdev->req_lock, flags); |
352 | rv = __req_mod(req, what, &m); | 356 | rv = __req_mod(req, what, &m); |
353 | spin_unlock_irq(&mdev->req_lock); | 357 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
354 | 358 | ||
355 | if (m.bio) | 359 | if (m.bio) |
356 | complete_master_bio(mdev, &m); | 360 | complete_master_bio(mdev, &m); |
357 | 361 | ||
358 | return rv; | 362 | return rv; |
359 | } | 363 | } |
364 | |||
365 | static inline bool drbd_should_do_remote(union drbd_state s) | ||
366 | { | ||
367 | return s.pdsk == D_UP_TO_DATE || | ||
368 | (s.pdsk >= D_INCONSISTENT && | ||
369 | s.conn >= C_WF_BITMAP_T && | ||
370 | s.conn < C_AHEAD); | ||
371 | /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. | ||
372 | That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* | ||
373 | states. */ | ||
374 | } | ||
375 | static inline bool drbd_should_send_oos(union drbd_state s) | ||
376 | { | ||
377 | return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; | ||
378 | /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary | ||
379 | since we enter state C_AHEAD only if proto >= 96 */ | ||
380 | } | ||
381 | |||
360 | #endif | 382 | #endif |
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c index 85179e1fb50..c44a2a60277 100644 --- a/drivers/block/drbd/drbd_strings.c +++ b/drivers/block/drbd/drbd_strings.c | |||
@@ -48,6 +48,8 @@ static const char *drbd_conn_s_names[] = { | |||
48 | [C_PAUSED_SYNC_T] = "PausedSyncT", | 48 | [C_PAUSED_SYNC_T] = "PausedSyncT", |
49 | [C_VERIFY_S] = "VerifyS", | 49 | [C_VERIFY_S] = "VerifyS", |
50 | [C_VERIFY_T] = "VerifyT", | 50 | [C_VERIFY_T] = "VerifyT", |
51 | [C_AHEAD] = "Ahead", | ||
52 | [C_BEHIND] = "Behind", | ||
51 | }; | 53 | }; |
52 | 54 | ||
53 | static const char *drbd_role_s_names[] = { | 55 | static const char *drbd_role_s_names[] = { |
@@ -92,7 +94,7 @@ static const char *drbd_state_sw_errors[] = { | |||
92 | const char *drbd_conn_str(enum drbd_conns s) | 94 | const char *drbd_conn_str(enum drbd_conns s) |
93 | { | 95 | { |
94 | /* enums are unsigned... */ | 96 | /* enums are unsigned... */ |
95 | return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s]; | 97 | return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s]; |
96 | } | 98 | } |
97 | 99 | ||
98 | const char *drbd_role_str(enum drbd_role s) | 100 | const char *drbd_role_str(enum drbd_role s) |
@@ -105,7 +107,7 @@ const char *drbd_disk_str(enum drbd_disk_state s) | |||
105 | return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; | 107 | return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; |
106 | } | 108 | } |
107 | 109 | ||
108 | const char *drbd_set_st_err_str(enum drbd_state_ret_codes err) | 110 | const char *drbd_set_st_err_str(enum drbd_state_rv err) |
109 | { | 111 | { |
110 | return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : | 112 | return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : |
111 | err > SS_TWO_PRIMARIES ? "TOO_LARGE" | 113 | err > SS_TWO_PRIMARIES ? "TOO_LARGE" |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index e027446590d..f7e6c92f8d0 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -39,18 +39,17 @@ | |||
39 | #include "drbd_req.h" | 39 | #include "drbd_req.h" |
40 | 40 | ||
41 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); | 41 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); |
42 | static int w_make_resync_request(struct drbd_conf *mdev, | ||
43 | struct drbd_work *w, int cancel); | ||
42 | 44 | ||
43 | 45 | ||
44 | 46 | ||
45 | /* defined here: | 47 | /* endio handlers: |
46 | drbd_md_io_complete | 48 | * drbd_md_io_complete (defined here) |
47 | drbd_endio_sec | 49 | * drbd_endio_pri (defined here) |
48 | drbd_endio_pri | 50 | * drbd_endio_sec (defined here) |
49 | 51 | * bm_async_io_complete (defined in drbd_bitmap.c) | |
50 | * more endio handlers: | 52 | * |
51 | atodb_endio in drbd_actlog.c | ||
52 | drbd_bm_async_io_complete in drbd_bitmap.c | ||
53 | |||
54 | * For all these callbacks, note the following: | 53 | * For all these callbacks, note the following: |
55 | * The callbacks will be called in irq context by the IDE drivers, | 54 | * The callbacks will be called in irq context by the IDE drivers, |
56 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. | 55 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. |
@@ -94,7 +93,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) | |||
94 | if (list_empty(&mdev->read_ee)) | 93 | if (list_empty(&mdev->read_ee)) |
95 | wake_up(&mdev->ee_wait); | 94 | wake_up(&mdev->ee_wait); |
96 | if (test_bit(__EE_WAS_ERROR, &e->flags)) | 95 | if (test_bit(__EE_WAS_ERROR, &e->flags)) |
97 | __drbd_chk_io_error(mdev, FALSE); | 96 | __drbd_chk_io_error(mdev, false); |
98 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 97 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
99 | 98 | ||
100 | drbd_queue_work(&mdev->data.work, &e->w); | 99 | drbd_queue_work(&mdev->data.work, &e->w); |
@@ -137,7 +136,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo | |||
137 | : list_empty(&mdev->active_ee); | 136 | : list_empty(&mdev->active_ee); |
138 | 137 | ||
139 | if (test_bit(__EE_WAS_ERROR, &e->flags)) | 138 | if (test_bit(__EE_WAS_ERROR, &e->flags)) |
140 | __drbd_chk_io_error(mdev, FALSE); | 139 | __drbd_chk_io_error(mdev, false); |
141 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 140 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
142 | 141 | ||
143 | if (is_syncer_req) | 142 | if (is_syncer_req) |
@@ -163,14 +162,15 @@ void drbd_endio_sec(struct bio *bio, int error) | |||
163 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | 162 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
164 | int is_write = bio_data_dir(bio) == WRITE; | 163 | int is_write = bio_data_dir(bio) == WRITE; |
165 | 164 | ||
166 | if (error) | 165 | if (error && __ratelimit(&drbd_ratelimit_state)) |
167 | dev_warn(DEV, "%s: error=%d s=%llus\n", | 166 | dev_warn(DEV, "%s: error=%d s=%llus\n", |
168 | is_write ? "write" : "read", error, | 167 | is_write ? "write" : "read", error, |
169 | (unsigned long long)e->sector); | 168 | (unsigned long long)e->sector); |
170 | if (!error && !uptodate) { | 169 | if (!error && !uptodate) { |
171 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", | 170 | if (__ratelimit(&drbd_ratelimit_state)) |
172 | is_write ? "write" : "read", | 171 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", |
173 | (unsigned long long)e->sector); | 172 | is_write ? "write" : "read", |
173 | (unsigned long long)e->sector); | ||
174 | /* strange behavior of some lower level drivers... | 174 | /* strange behavior of some lower level drivers... |
175 | * fail the request by clearing the uptodate flag, | 175 | * fail the request by clearing the uptodate flag, |
176 | * but do not return any error?! */ | 176 | * but do not return any error?! */ |
@@ -250,13 +250,6 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
250 | return w_send_read_req(mdev, w, 0); | 250 | return w_send_read_req(mdev, w, 0); |
251 | } | 251 | } |
252 | 252 | ||
253 | int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
254 | { | ||
255 | ERR_IF(cancel) return 1; | ||
256 | dev_err(DEV, "resync inactive, but callback triggered??\n"); | ||
257 | return 1; /* Simply ignore this! */ | ||
258 | } | ||
259 | |||
260 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) | 253 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) |
261 | { | 254 | { |
262 | struct hash_desc desc; | 255 | struct hash_desc desc; |
@@ -355,7 +348,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |||
355 | if (!get_ldev(mdev)) | 348 | if (!get_ldev(mdev)) |
356 | return -EIO; | 349 | return -EIO; |
357 | 350 | ||
358 | if (drbd_rs_should_slow_down(mdev)) | 351 | if (drbd_rs_should_slow_down(mdev, sector)) |
359 | goto defer; | 352 | goto defer; |
360 | 353 | ||
361 | /* GFP_TRY, because if there is no memory available right now, this may | 354 | /* GFP_TRY, because if there is no memory available right now, this may |
@@ -373,9 +366,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |||
373 | if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) | 366 | if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) |
374 | return 0; | 367 | return 0; |
375 | 368 | ||
376 | /* drbd_submit_ee currently fails for one reason only: | 369 | /* If it failed because of ENOMEM, retry should help. If it failed |
377 | * not being able to allocate enough bios. | 370 | * because bio_add_page failed (probably broken lower level driver), |
378 | * Is dropping the connection going to help? */ | 371 | * retry may or may not help. |
372 | * If it does not, you may need to force disconnect. */ | ||
379 | spin_lock_irq(&mdev->req_lock); | 373 | spin_lock_irq(&mdev->req_lock); |
380 | list_del(&e->w.list); | 374 | list_del(&e->w.list); |
381 | spin_unlock_irq(&mdev->req_lock); | 375 | spin_unlock_irq(&mdev->req_lock); |
@@ -386,26 +380,25 @@ defer: | |||
386 | return -EAGAIN; | 380 | return -EAGAIN; |
387 | } | 381 | } |
388 | 382 | ||
389 | void resync_timer_fn(unsigned long data) | 383 | int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
390 | { | 384 | { |
391 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
392 | int queue; | ||
393 | |||
394 | queue = 1; | ||
395 | switch (mdev->state.conn) { | 385 | switch (mdev->state.conn) { |
396 | case C_VERIFY_S: | 386 | case C_VERIFY_S: |
397 | mdev->resync_work.cb = w_make_ov_request; | 387 | w_make_ov_request(mdev, w, cancel); |
398 | break; | 388 | break; |
399 | case C_SYNC_TARGET: | 389 | case C_SYNC_TARGET: |
400 | mdev->resync_work.cb = w_make_resync_request; | 390 | w_make_resync_request(mdev, w, cancel); |
401 | break; | 391 | break; |
402 | default: | ||
403 | queue = 0; | ||
404 | mdev->resync_work.cb = w_resync_inactive; | ||
405 | } | 392 | } |
406 | 393 | ||
407 | /* harmless race: list_empty outside data.work.q_lock */ | 394 | return 1; |
408 | if (list_empty(&mdev->resync_work.list) && queue) | 395 | } |
396 | |||
397 | void resync_timer_fn(unsigned long data) | ||
398 | { | ||
399 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
400 | |||
401 | if (list_empty(&mdev->resync_work.list)) | ||
409 | drbd_queue_work(&mdev->data.work, &mdev->resync_work); | 402 | drbd_queue_work(&mdev->data.work, &mdev->resync_work); |
410 | } | 403 | } |
411 | 404 | ||
@@ -438,7 +431,7 @@ static void fifo_add_val(struct fifo_buffer *fb, int value) | |||
438 | fb->values[i] += value; | 431 | fb->values[i] += value; |
439 | } | 432 | } |
440 | 433 | ||
441 | int drbd_rs_controller(struct drbd_conf *mdev) | 434 | static int drbd_rs_controller(struct drbd_conf *mdev) |
442 | { | 435 | { |
443 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ | 436 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ |
444 | unsigned int want; /* The number of sectors we want in the proxy */ | 437 | unsigned int want; /* The number of sectors we want in the proxy */ |
@@ -492,29 +485,36 @@ int drbd_rs_controller(struct drbd_conf *mdev) | |||
492 | return req_sect; | 485 | return req_sect; |
493 | } | 486 | } |
494 | 487 | ||
495 | int w_make_resync_request(struct drbd_conf *mdev, | 488 | static int drbd_rs_number_requests(struct drbd_conf *mdev) |
496 | struct drbd_work *w, int cancel) | 489 | { |
490 | int number; | ||
491 | if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ | ||
492 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); | ||
493 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | ||
494 | } else { | ||
495 | mdev->c_sync_rate = mdev->sync_conf.rate; | ||
496 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | ||
497 | } | ||
498 | |||
499 | /* ignore the amount of pending requests, the resync controller should | ||
500 | * throttle down to incoming reply rate soon enough anyways. */ | ||
501 | return number; | ||
502 | } | ||
503 | |||
504 | static int w_make_resync_request(struct drbd_conf *mdev, | ||
505 | struct drbd_work *w, int cancel) | ||
497 | { | 506 | { |
498 | unsigned long bit; | 507 | unsigned long bit; |
499 | sector_t sector; | 508 | sector_t sector; |
500 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | 509 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); |
501 | int max_segment_size; | 510 | int max_bio_size; |
502 | int number, rollback_i, size, pe, mx; | 511 | int number, rollback_i, size; |
503 | int align, queued, sndbuf; | 512 | int align, queued, sndbuf; |
504 | int i = 0; | 513 | int i = 0; |
505 | 514 | ||
506 | if (unlikely(cancel)) | 515 | if (unlikely(cancel)) |
507 | return 1; | 516 | return 1; |
508 | 517 | ||
509 | if (unlikely(mdev->state.conn < C_CONNECTED)) { | ||
510 | dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected"); | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | if (mdev->state.conn != C_SYNC_TARGET) | ||
515 | dev_err(DEV, "%s in w_make_resync_request\n", | ||
516 | drbd_conn_str(mdev->state.conn)); | ||
517 | |||
518 | if (mdev->rs_total == 0) { | 518 | if (mdev->rs_total == 0) { |
519 | /* empty resync? */ | 519 | /* empty resync? */ |
520 | drbd_resync_finished(mdev); | 520 | drbd_resync_finished(mdev); |
@@ -527,49 +527,19 @@ int w_make_resync_request(struct drbd_conf *mdev, | |||
527 | to continue resync with a broken disk makes no sense at | 527 | to continue resync with a broken disk makes no sense at |
528 | all */ | 528 | all */ |
529 | dev_err(DEV, "Disk broke down during resync!\n"); | 529 | dev_err(DEV, "Disk broke down during resync!\n"); |
530 | mdev->resync_work.cb = w_resync_inactive; | ||
531 | return 1; | 530 | return 1; |
532 | } | 531 | } |
533 | 532 | ||
534 | /* starting with drbd 8.3.8, we can handle multi-bio EEs, | 533 | /* starting with drbd 8.3.8, we can handle multi-bio EEs, |
535 | * if it should be necessary */ | 534 | * if it should be necessary */ |
536 | max_segment_size = | 535 | max_bio_size = |
537 | mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : | 536 | mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 : |
538 | mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; | 537 | mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE; |
539 | 538 | ||
540 | if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ | 539 | number = drbd_rs_number_requests(mdev); |
541 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); | 540 | if (number == 0) |
542 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | ||
543 | } else { | ||
544 | mdev->c_sync_rate = mdev->sync_conf.rate; | ||
545 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | ||
546 | } | ||
547 | |||
548 | /* Throttle resync on lower level disk activity, which may also be | ||
549 | * caused by application IO on Primary/SyncTarget. | ||
550 | * Keep this after the call to drbd_rs_controller, as that assumes | ||
551 | * to be called as precisely as possible every SLEEP_TIME, | ||
552 | * and would be confused otherwise. */ | ||
553 | if (drbd_rs_should_slow_down(mdev)) | ||
554 | goto requeue; | 541 | goto requeue; |
555 | 542 | ||
556 | mutex_lock(&mdev->data.mutex); | ||
557 | if (mdev->data.socket) | ||
558 | mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req); | ||
559 | else | ||
560 | mx = 1; | ||
561 | mutex_unlock(&mdev->data.mutex); | ||
562 | |||
563 | /* For resync rates >160MB/sec, allow more pending RS requests */ | ||
564 | if (number > mx) | ||
565 | mx = number; | ||
566 | |||
567 | /* Limit the number of pending RS requests to no more than the peer's receive buffer */ | ||
568 | pe = atomic_read(&mdev->rs_pending_cnt); | ||
569 | if ((pe + number) > mx) { | ||
570 | number = mx - pe; | ||
571 | } | ||
572 | |||
573 | for (i = 0; i < number; i++) { | 543 | for (i = 0; i < number; i++) { |
574 | /* Stop generating RS requests, when half of the send buffer is filled */ | 544 | /* Stop generating RS requests, when half of the send buffer is filled */ |
575 | mutex_lock(&mdev->data.mutex); | 545 | mutex_lock(&mdev->data.mutex); |
@@ -588,16 +558,16 @@ next_sector: | |||
588 | size = BM_BLOCK_SIZE; | 558 | size = BM_BLOCK_SIZE; |
589 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); | 559 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); |
590 | 560 | ||
591 | if (bit == -1UL) { | 561 | if (bit == DRBD_END_OF_BITMAP) { |
592 | mdev->bm_resync_fo = drbd_bm_bits(mdev); | 562 | mdev->bm_resync_fo = drbd_bm_bits(mdev); |
593 | mdev->resync_work.cb = w_resync_inactive; | ||
594 | put_ldev(mdev); | 563 | put_ldev(mdev); |
595 | return 1; | 564 | return 1; |
596 | } | 565 | } |
597 | 566 | ||
598 | sector = BM_BIT_TO_SECT(bit); | 567 | sector = BM_BIT_TO_SECT(bit); |
599 | 568 | ||
600 | if (drbd_try_rs_begin_io(mdev, sector)) { | 569 | if (drbd_rs_should_slow_down(mdev, sector) || |
570 | drbd_try_rs_begin_io(mdev, sector)) { | ||
601 | mdev->bm_resync_fo = bit; | 571 | mdev->bm_resync_fo = bit; |
602 | goto requeue; | 572 | goto requeue; |
603 | } | 573 | } |
@@ -608,7 +578,7 @@ next_sector: | |||
608 | goto next_sector; | 578 | goto next_sector; |
609 | } | 579 | } |
610 | 580 | ||
611 | #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE | 581 | #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE |
612 | /* try to find some adjacent bits. | 582 | /* try to find some adjacent bits. |
613 | * we stop if we have already the maximum req size. | 583 | * we stop if we have already the maximum req size. |
614 | * | 584 | * |
@@ -618,7 +588,7 @@ next_sector: | |||
618 | align = 1; | 588 | align = 1; |
619 | rollback_i = i; | 589 | rollback_i = i; |
620 | for (;;) { | 590 | for (;;) { |
621 | if (size + BM_BLOCK_SIZE > max_segment_size) | 591 | if (size + BM_BLOCK_SIZE > max_bio_size) |
622 | break; | 592 | break; |
623 | 593 | ||
624 | /* Be always aligned */ | 594 | /* Be always aligned */ |
@@ -685,7 +655,6 @@ next_sector: | |||
685 | * resync data block, and the last bit is cleared. | 655 | * resync data block, and the last bit is cleared. |
686 | * until then resync "work" is "inactive" ... | 656 | * until then resync "work" is "inactive" ... |
687 | */ | 657 | */ |
688 | mdev->resync_work.cb = w_resync_inactive; | ||
689 | put_ldev(mdev); | 658 | put_ldev(mdev); |
690 | return 1; | 659 | return 1; |
691 | } | 660 | } |
@@ -706,27 +675,18 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca | |||
706 | if (unlikely(cancel)) | 675 | if (unlikely(cancel)) |
707 | return 1; | 676 | return 1; |
708 | 677 | ||
709 | if (unlikely(mdev->state.conn < C_CONNECTED)) { | 678 | number = drbd_rs_number_requests(mdev); |
710 | dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected"); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ); | ||
715 | if (atomic_read(&mdev->rs_pending_cnt) > number) | ||
716 | goto requeue; | ||
717 | |||
718 | number -= atomic_read(&mdev->rs_pending_cnt); | ||
719 | 679 | ||
720 | sector = mdev->ov_position; | 680 | sector = mdev->ov_position; |
721 | for (i = 0; i < number; i++) { | 681 | for (i = 0; i < number; i++) { |
722 | if (sector >= capacity) { | 682 | if (sector >= capacity) { |
723 | mdev->resync_work.cb = w_resync_inactive; | ||
724 | return 1; | 683 | return 1; |
725 | } | 684 | } |
726 | 685 | ||
727 | size = BM_BLOCK_SIZE; | 686 | size = BM_BLOCK_SIZE; |
728 | 687 | ||
729 | if (drbd_try_rs_begin_io(mdev, sector)) { | 688 | if (drbd_rs_should_slow_down(mdev, sector) || |
689 | drbd_try_rs_begin_io(mdev, sector)) { | ||
730 | mdev->ov_position = sector; | 690 | mdev->ov_position = sector; |
731 | goto requeue; | 691 | goto requeue; |
732 | } | 692 | } |
@@ -744,11 +704,33 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca | |||
744 | mdev->ov_position = sector; | 704 | mdev->ov_position = sector; |
745 | 705 | ||
746 | requeue: | 706 | requeue: |
707 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); | ||
747 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); | 708 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
748 | return 1; | 709 | return 1; |
749 | } | 710 | } |
750 | 711 | ||
751 | 712 | ||
713 | void start_resync_timer_fn(unsigned long data) | ||
714 | { | ||
715 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
716 | |||
717 | drbd_queue_work(&mdev->data.work, &mdev->start_resync_work); | ||
718 | } | ||
719 | |||
720 | int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
721 | { | ||
722 | if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { | ||
723 | dev_warn(DEV, "w_start_resync later...\n"); | ||
724 | mdev->start_resync_timer.expires = jiffies + HZ/10; | ||
725 | add_timer(&mdev->start_resync_timer); | ||
726 | return 1; | ||
727 | } | ||
728 | |||
729 | drbd_start_resync(mdev, C_SYNC_SOURCE); | ||
730 | clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); | ||
731 | return 1; | ||
732 | } | ||
733 | |||
752 | int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 734 | int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
753 | { | 735 | { |
754 | kfree(w); | 736 | kfree(w); |
@@ -782,6 +764,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
782 | union drbd_state os, ns; | 764 | union drbd_state os, ns; |
783 | struct drbd_work *w; | 765 | struct drbd_work *w; |
784 | char *khelper_cmd = NULL; | 766 | char *khelper_cmd = NULL; |
767 | int verify_done = 0; | ||
785 | 768 | ||
786 | /* Remove all elements from the resync LRU. Since future actions | 769 | /* Remove all elements from the resync LRU. Since future actions |
787 | * might set bits in the (main) bitmap, then the entries in the | 770 | * might set bits in the (main) bitmap, then the entries in the |
@@ -792,8 +775,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
792 | * queue (or even the read operations for those packets | 775 | * queue (or even the read operations for those packets |
793 | * is not finished by now). Retry in 100ms. */ | 776 | * is not finished by now). Retry in 100ms. */ |
794 | 777 | ||
795 | __set_current_state(TASK_INTERRUPTIBLE); | 778 | schedule_timeout_interruptible(HZ / 10); |
796 | schedule_timeout(HZ / 10); | ||
797 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); | 779 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
798 | if (w) { | 780 | if (w) { |
799 | w->cb = w_resync_finished; | 781 | w->cb = w_resync_finished; |
@@ -818,6 +800,8 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
818 | spin_lock_irq(&mdev->req_lock); | 800 | spin_lock_irq(&mdev->req_lock); |
819 | os = mdev->state; | 801 | os = mdev->state; |
820 | 802 | ||
803 | verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); | ||
804 | |||
821 | /* This protects us against multiple calls (that can happen in the presence | 805 | /* This protects us against multiple calls (that can happen in the presence |
822 | of application IO), and against connectivity loss just before we arrive here. */ | 806 | of application IO), and against connectivity loss just before we arrive here. */ |
823 | if (os.conn <= C_CONNECTED) | 807 | if (os.conn <= C_CONNECTED) |
@@ -827,8 +811,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
827 | ns.conn = C_CONNECTED; | 811 | ns.conn = C_CONNECTED; |
828 | 812 | ||
829 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", | 813 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", |
830 | (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ? | 814 | verify_done ? "Online verify " : "Resync", |
831 | "Online verify " : "Resync", | ||
832 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); | 815 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); |
833 | 816 | ||
834 | n_oos = drbd_bm_total_weight(mdev); | 817 | n_oos = drbd_bm_total_weight(mdev); |
@@ -886,14 +869,18 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
886 | } | 869 | } |
887 | } | 870 | } |
888 | 871 | ||
889 | drbd_uuid_set_bm(mdev, 0UL); | 872 | if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { |
890 | 873 | /* for verify runs, we don't update uuids here, | |
891 | if (mdev->p_uuid) { | 874 | * so there would be nothing to report. */ |
892 | /* Now the two UUID sets are equal, update what we | 875 | drbd_uuid_set_bm(mdev, 0UL); |
893 | * know of the peer. */ | 876 | drbd_print_uuids(mdev, "updated UUIDs"); |
894 | int i; | 877 | if (mdev->p_uuid) { |
895 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | 878 | /* Now the two UUID sets are equal, update what we |
896 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; | 879 | * know of the peer. */ |
880 | int i; | ||
881 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | ||
882 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; | ||
883 | } | ||
897 | } | 884 | } |
898 | } | 885 | } |
899 | 886 | ||
@@ -905,15 +892,11 @@ out: | |||
905 | mdev->rs_total = 0; | 892 | mdev->rs_total = 0; |
906 | mdev->rs_failed = 0; | 893 | mdev->rs_failed = 0; |
907 | mdev->rs_paused = 0; | 894 | mdev->rs_paused = 0; |
908 | mdev->ov_start_sector = 0; | 895 | if (verify_done) |
896 | mdev->ov_start_sector = 0; | ||
909 | 897 | ||
910 | drbd_md_sync(mdev); | 898 | drbd_md_sync(mdev); |
911 | 899 | ||
912 | if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { | ||
913 | dev_info(DEV, "Writing the whole bitmap\n"); | ||
914 | drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); | ||
915 | } | ||
916 | |||
917 | if (khelper_cmd) | 900 | if (khelper_cmd) |
918 | drbd_khelper(mdev, khelper_cmd); | 901 | drbd_khelper(mdev, khelper_cmd); |
919 | 902 | ||
@@ -994,7 +977,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
994 | put_ldev(mdev); | 977 | put_ldev(mdev); |
995 | } | 978 | } |
996 | 979 | ||
997 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { | 980 | if (mdev->state.conn == C_AHEAD) { |
981 | ok = drbd_send_ack(mdev, P_RS_CANCEL, e); | ||
982 | } else if (likely((e->flags & EE_WAS_ERROR) == 0)) { | ||
998 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { | 983 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { |
999 | inc_rs_pending(mdev); | 984 | inc_rs_pending(mdev); |
1000 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); | 985 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); |
@@ -1096,25 +1081,27 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1096 | if (unlikely(cancel)) | 1081 | if (unlikely(cancel)) |
1097 | goto out; | 1082 | goto out; |
1098 | 1083 | ||
1099 | if (unlikely((e->flags & EE_WAS_ERROR) != 0)) | ||
1100 | goto out; | ||
1101 | |||
1102 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); | 1084 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
1103 | /* FIXME if this allocation fails, online verify will not terminate! */ | ||
1104 | digest = kmalloc(digest_size, GFP_NOIO); | 1085 | digest = kmalloc(digest_size, GFP_NOIO); |
1105 | if (digest) { | 1086 | if (!digest) { |
1106 | drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); | 1087 | ok = 0; /* terminate the connection in case the allocation failed */ |
1107 | inc_rs_pending(mdev); | 1088 | goto out; |
1108 | ok = drbd_send_drequest_csum(mdev, e->sector, e->size, | ||
1109 | digest, digest_size, P_OV_REPLY); | ||
1110 | if (!ok) | ||
1111 | dec_rs_pending(mdev); | ||
1112 | kfree(digest); | ||
1113 | } | 1089 | } |
1114 | 1090 | ||
1091 | if (likely(!(e->flags & EE_WAS_ERROR))) | ||
1092 | drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); | ||
1093 | else | ||
1094 | memset(digest, 0, digest_size); | ||
1095 | |||
1096 | inc_rs_pending(mdev); | ||
1097 | ok = drbd_send_drequest_csum(mdev, e->sector, e->size, | ||
1098 | digest, digest_size, P_OV_REPLY); | ||
1099 | if (!ok) | ||
1100 | dec_rs_pending(mdev); | ||
1101 | kfree(digest); | ||
1102 | |||
1115 | out: | 1103 | out: |
1116 | drbd_free_ee(mdev, e); | 1104 | drbd_free_ee(mdev, e); |
1117 | |||
1118 | dec_unacked(mdev); | 1105 | dec_unacked(mdev); |
1119 | 1106 | ||
1120 | return ok; | 1107 | return ok; |
@@ -1129,7 +1116,6 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size) | |||
1129 | mdev->ov_last_oos_size = size>>9; | 1116 | mdev->ov_last_oos_size = size>>9; |
1130 | } | 1117 | } |
1131 | drbd_set_out_of_sync(mdev, sector, size); | 1118 | drbd_set_out_of_sync(mdev, sector, size); |
1132 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
1133 | } | 1119 | } |
1134 | 1120 | ||
1135 | int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 1121 | int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
@@ -1165,10 +1151,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1165 | eq = !memcmp(digest, di->digest, digest_size); | 1151 | eq = !memcmp(digest, di->digest, digest_size); |
1166 | kfree(digest); | 1152 | kfree(digest); |
1167 | } | 1153 | } |
1168 | } else { | ||
1169 | ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); | ||
1170 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1171 | dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); | ||
1172 | } | 1154 | } |
1173 | 1155 | ||
1174 | dec_unacked(mdev); | 1156 | dec_unacked(mdev); |
@@ -1182,7 +1164,13 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1182 | 1164 | ||
1183 | drbd_free_ee(mdev, e); | 1165 | drbd_free_ee(mdev, e); |
1184 | 1166 | ||
1185 | if (--mdev->ov_left == 0) { | 1167 | --mdev->ov_left; |
1168 | |||
1169 | /* let's advance progress step marks only for every other megabyte */ | ||
1170 | if ((mdev->ov_left & 0x200) == 0x200) | ||
1171 | drbd_advance_rs_marks(mdev, mdev->ov_left); | ||
1172 | |||
1173 | if (mdev->ov_left == 0) { | ||
1186 | ov_oos_print(mdev); | 1174 | ov_oos_print(mdev); |
1187 | drbd_resync_finished(mdev); | 1175 | drbd_resync_finished(mdev); |
1188 | } | 1176 | } |
@@ -1235,6 +1223,22 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1235 | return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); | 1223 | return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); |
1236 | } | 1224 | } |
1237 | 1225 | ||
1226 | int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
1227 | { | ||
1228 | struct drbd_request *req = container_of(w, struct drbd_request, w); | ||
1229 | int ok; | ||
1230 | |||
1231 | if (unlikely(cancel)) { | ||
1232 | req_mod(req, send_canceled); | ||
1233 | return 1; | ||
1234 | } | ||
1235 | |||
1236 | ok = drbd_send_oos(mdev, req); | ||
1237 | req_mod(req, oos_handed_to_network); | ||
1238 | |||
1239 | return ok; | ||
1240 | } | ||
1241 | |||
1238 | /** | 1242 | /** |
1239 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request | 1243 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request |
1240 | * @mdev: DRBD device. | 1244 | * @mdev: DRBD device. |
@@ -1430,6 +1434,17 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na) | |||
1430 | return retcode; | 1434 | return retcode; |
1431 | } | 1435 | } |
1432 | 1436 | ||
1437 | void drbd_rs_controller_reset(struct drbd_conf *mdev) | ||
1438 | { | ||
1439 | atomic_set(&mdev->rs_sect_in, 0); | ||
1440 | atomic_set(&mdev->rs_sect_ev, 0); | ||
1441 | mdev->rs_in_flight = 0; | ||
1442 | mdev->rs_planed = 0; | ||
1443 | spin_lock(&mdev->peer_seq_lock); | ||
1444 | fifo_set(&mdev->rs_plan_s, 0); | ||
1445 | spin_unlock(&mdev->peer_seq_lock); | ||
1446 | } | ||
1447 | |||
1433 | /** | 1448 | /** |
1434 | * drbd_start_resync() - Start the resync process | 1449 | * drbd_start_resync() - Start the resync process |
1435 | * @mdev: DRBD device. | 1450 | * @mdev: DRBD device. |
@@ -1443,13 +1458,18 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1443 | union drbd_state ns; | 1458 | union drbd_state ns; |
1444 | int r; | 1459 | int r; |
1445 | 1460 | ||
1446 | if (mdev->state.conn >= C_SYNC_SOURCE) { | 1461 | if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { |
1447 | dev_err(DEV, "Resync already running!\n"); | 1462 | dev_err(DEV, "Resync already running!\n"); |
1448 | return; | 1463 | return; |
1449 | } | 1464 | } |
1450 | 1465 | ||
1451 | /* In case a previous resync run was aborted by an IO error/detach on the peer. */ | 1466 | if (mdev->state.conn < C_AHEAD) { |
1452 | drbd_rs_cancel_all(mdev); | 1467 | /* In case a previous resync run was aborted by an IO error/detach on the peer. */ |
1468 | drbd_rs_cancel_all(mdev); | ||
1469 | /* This should be done when we abort the resync. We definitely do not | ||
1470 | want to have this for connections going back and forth between | ||
1471 | Ahead/Behind and SyncSource/SyncTarget */ | ||
1472 | } | ||
1453 | 1473 | ||
1454 | if (side == C_SYNC_TARGET) { | 1474 | if (side == C_SYNC_TARGET) { |
1455 | /* Since application IO was locked out during C_WF_BITMAP_T and | 1475 | /* Since application IO was locked out during C_WF_BITMAP_T and |
@@ -1463,6 +1483,20 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1463 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 1483 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
1464 | return; | 1484 | return; |
1465 | } | 1485 | } |
1486 | } else /* C_SYNC_SOURCE */ { | ||
1487 | r = drbd_khelper(mdev, "before-resync-source"); | ||
1488 | r = (r >> 8) & 0xff; | ||
1489 | if (r > 0) { | ||
1490 | if (r == 3) { | ||
1491 | dev_info(DEV, "before-resync-source handler returned %d, " | ||
1492 | "ignoring. Old userland tools?", r); | ||
1493 | } else { | ||
1494 | dev_info(DEV, "before-resync-source handler returned %d, " | ||
1495 | "dropping connection.\n", r); | ||
1496 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
1497 | return; | ||
1498 | } | ||
1499 | } | ||
1466 | } | 1500 | } |
1467 | 1501 | ||
1468 | drbd_state_lock(mdev); | 1502 | drbd_state_lock(mdev); |
@@ -1472,18 +1506,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1472 | return; | 1506 | return; |
1473 | } | 1507 | } |
1474 | 1508 | ||
1475 | if (side == C_SYNC_TARGET) { | ||
1476 | mdev->bm_resync_fo = 0; | ||
1477 | } else /* side == C_SYNC_SOURCE */ { | ||
1478 | u64 uuid; | ||
1479 | |||
1480 | get_random_bytes(&uuid, sizeof(u64)); | ||
1481 | drbd_uuid_set(mdev, UI_BITMAP, uuid); | ||
1482 | drbd_send_sync_uuid(mdev, uuid); | ||
1483 | |||
1484 | D_ASSERT(mdev->state.disk == D_UP_TO_DATE); | ||
1485 | } | ||
1486 | |||
1487 | write_lock_irq(&global_state_lock); | 1509 | write_lock_irq(&global_state_lock); |
1488 | ns = mdev->state; | 1510 | ns = mdev->state; |
1489 | 1511 | ||
@@ -1521,13 +1543,24 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1521 | _drbd_pause_after(mdev); | 1543 | _drbd_pause_after(mdev); |
1522 | } | 1544 | } |
1523 | write_unlock_irq(&global_state_lock); | 1545 | write_unlock_irq(&global_state_lock); |
1524 | put_ldev(mdev); | ||
1525 | 1546 | ||
1526 | if (r == SS_SUCCESS) { | 1547 | if (r == SS_SUCCESS) { |
1527 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", | 1548 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", |
1528 | drbd_conn_str(ns.conn), | 1549 | drbd_conn_str(ns.conn), |
1529 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), | 1550 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), |
1530 | (unsigned long) mdev->rs_total); | 1551 | (unsigned long) mdev->rs_total); |
1552 | if (side == C_SYNC_TARGET) | ||
1553 | mdev->bm_resync_fo = 0; | ||
1554 | |||
1555 | /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid | ||
1556 | * with w_send_oos, or the sync target will get confused as to | ||
1557 | * how much bits to resync. We cannot do that always, because for an | ||
1558 | * empty resync and protocol < 95, we need to do it here, as we call | ||
1559 | * drbd_resync_finished from here in that case. | ||
1560 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, | ||
1561 | * and from after_state_ch otherwise. */ | ||
1562 | if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96) | ||
1563 | drbd_gen_and_send_sync_uuid(mdev); | ||
1531 | 1564 | ||
1532 | if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { | 1565 | if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { |
1533 | /* This still has a race (about when exactly the peers | 1566 | /* This still has a race (about when exactly the peers |
@@ -1547,13 +1580,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1547 | drbd_resync_finished(mdev); | 1580 | drbd_resync_finished(mdev); |
1548 | } | 1581 | } |
1549 | 1582 | ||
1550 | atomic_set(&mdev->rs_sect_in, 0); | 1583 | drbd_rs_controller_reset(mdev); |
1551 | atomic_set(&mdev->rs_sect_ev, 0); | ||
1552 | mdev->rs_in_flight = 0; | ||
1553 | mdev->rs_planed = 0; | ||
1554 | spin_lock(&mdev->peer_seq_lock); | ||
1555 | fifo_set(&mdev->rs_plan_s, 0); | ||
1556 | spin_unlock(&mdev->peer_seq_lock); | ||
1557 | /* ns.conn may already be != mdev->state.conn, | 1584 | /* ns.conn may already be != mdev->state.conn, |
1558 | * we may have been paused in between, or become paused until | 1585 | * we may have been paused in between, or become paused until |
1559 | * the timer triggers. | 1586 | * the timer triggers. |
@@ -1563,6 +1590,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1563 | 1590 | ||
1564 | drbd_md_sync(mdev); | 1591 | drbd_md_sync(mdev); |
1565 | } | 1592 | } |
1593 | put_ldev(mdev); | ||
1566 | drbd_state_unlock(mdev); | 1594 | drbd_state_unlock(mdev); |
1567 | } | 1595 | } |
1568 | 1596 | ||
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h index 53586fa5ae1..151f1a37478 100644 --- a/drivers/block/drbd/drbd_wrappers.h +++ b/drivers/block/drbd/drbd_wrappers.h | |||
@@ -39,7 +39,7 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev, | |||
39 | return; | 39 | return; |
40 | } | 40 | } |
41 | 41 | ||
42 | if (FAULT_ACTIVE(mdev, fault_type)) | 42 | if (drbd_insert_fault(mdev, fault_type)) |
43 | bio_endio(bio, -EIO); | 43 | bio_endio(bio, -EIO); |
44 | else | 44 | else |
45 | generic_make_request(bio); | 45 | generic_make_request(bio); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0f17ad8585d..b03771d4787 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/cpu.h> | 28 | #include <linux/cpu.h> |
29 | #include <linux/completion.h> | 29 | #include <linux/completion.h> |
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | #include <linux/syscore_ops.h> | ||
31 | 32 | ||
32 | #include <trace/events/power.h> | 33 | #include <trace/events/power.h> |
33 | 34 | ||
@@ -1340,35 +1341,31 @@ out: | |||
1340 | } | 1341 | } |
1341 | EXPORT_SYMBOL(cpufreq_get); | 1342 | EXPORT_SYMBOL(cpufreq_get); |
1342 | 1343 | ||
1344 | static struct sysdev_driver cpufreq_sysdev_driver = { | ||
1345 | .add = cpufreq_add_dev, | ||
1346 | .remove = cpufreq_remove_dev, | ||
1347 | }; | ||
1348 | |||
1343 | 1349 | ||
1344 | /** | 1350 | /** |
1345 | * cpufreq_suspend - let the low level driver prepare for suspend | 1351 | * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. |
1352 | * | ||
1353 | * This function is only executed for the boot processor. The other CPUs | ||
1354 | * have been put offline by means of CPU hotplug. | ||
1346 | */ | 1355 | */ |
1347 | 1356 | static int cpufreq_bp_suspend(void) | |
1348 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | ||
1349 | { | 1357 | { |
1350 | int ret = 0; | 1358 | int ret = 0; |
1351 | 1359 | ||
1352 | int cpu = sysdev->id; | 1360 | int cpu = smp_processor_id(); |
1353 | struct cpufreq_policy *cpu_policy; | 1361 | struct cpufreq_policy *cpu_policy; |
1354 | 1362 | ||
1355 | dprintk("suspending cpu %u\n", cpu); | 1363 | dprintk("suspending cpu %u\n", cpu); |
1356 | 1364 | ||
1357 | if (!cpu_online(cpu)) | 1365 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1358 | return 0; | ||
1359 | |||
1360 | /* we may be lax here as interrupts are off. Nonetheless | ||
1361 | * we need to grab the correct cpu policy, as to check | ||
1362 | * whether we really run on this CPU. | ||
1363 | */ | ||
1364 | |||
1365 | cpu_policy = cpufreq_cpu_get(cpu); | 1366 | cpu_policy = cpufreq_cpu_get(cpu); |
1366 | if (!cpu_policy) | 1367 | if (!cpu_policy) |
1367 | return -EINVAL; | 1368 | return 0; |
1368 | |||
1369 | /* only handle each CPU group once */ | ||
1370 | if (unlikely(cpu_policy->cpu != cpu)) | ||
1371 | goto out; | ||
1372 | 1369 | ||
1373 | if (cpufreq_driver->suspend) { | 1370 | if (cpufreq_driver->suspend) { |
1374 | ret = cpufreq_driver->suspend(cpu_policy); | 1371 | ret = cpufreq_driver->suspend(cpu_policy); |
@@ -1377,13 +1374,12 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1377 | "step on CPU %u\n", cpu_policy->cpu); | 1374 | "step on CPU %u\n", cpu_policy->cpu); |
1378 | } | 1375 | } |
1379 | 1376 | ||
1380 | out: | ||
1381 | cpufreq_cpu_put(cpu_policy); | 1377 | cpufreq_cpu_put(cpu_policy); |
1382 | return ret; | 1378 | return ret; |
1383 | } | 1379 | } |
1384 | 1380 | ||
1385 | /** | 1381 | /** |
1386 | * cpufreq_resume - restore proper CPU frequency handling after resume | 1382 | * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. |
1387 | * | 1383 | * |
1388 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | 1384 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) |
1389 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are | 1385 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are |
@@ -1391,31 +1387,23 @@ out: | |||
1391 | * what we believe it to be. This is a bit later than when it | 1387 | * what we believe it to be. This is a bit later than when it |
1392 | * should be, but nonethteless it's better than calling | 1388 | * should be, but nonethteless it's better than calling |
1393 | * cpufreq_driver->get() here which might re-enable interrupts... | 1389 | * cpufreq_driver->get() here which might re-enable interrupts... |
1390 | * | ||
1391 | * This function is only executed for the boot CPU. The other CPUs have not | ||
1392 | * been turned on yet. | ||
1394 | */ | 1393 | */ |
1395 | static int cpufreq_resume(struct sys_device *sysdev) | 1394 | static void cpufreq_bp_resume(void) |
1396 | { | 1395 | { |
1397 | int ret = 0; | 1396 | int ret = 0; |
1398 | 1397 | ||
1399 | int cpu = sysdev->id; | 1398 | int cpu = smp_processor_id(); |
1400 | struct cpufreq_policy *cpu_policy; | 1399 | struct cpufreq_policy *cpu_policy; |
1401 | 1400 | ||
1402 | dprintk("resuming cpu %u\n", cpu); | 1401 | dprintk("resuming cpu %u\n", cpu); |
1403 | 1402 | ||
1404 | if (!cpu_online(cpu)) | 1403 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1405 | return 0; | ||
1406 | |||
1407 | /* we may be lax here as interrupts are off. Nonetheless | ||
1408 | * we need to grab the correct cpu policy, as to check | ||
1409 | * whether we really run on this CPU. | ||
1410 | */ | ||
1411 | |||
1412 | cpu_policy = cpufreq_cpu_get(cpu); | 1404 | cpu_policy = cpufreq_cpu_get(cpu); |
1413 | if (!cpu_policy) | 1405 | if (!cpu_policy) |
1414 | return -EINVAL; | 1406 | return; |
1415 | |||
1416 | /* only handle each CPU group once */ | ||
1417 | if (unlikely(cpu_policy->cpu != cpu)) | ||
1418 | goto fail; | ||
1419 | 1407 | ||
1420 | if (cpufreq_driver->resume) { | 1408 | if (cpufreq_driver->resume) { |
1421 | ret = cpufreq_driver->resume(cpu_policy); | 1409 | ret = cpufreq_driver->resume(cpu_policy); |
@@ -1430,14 +1418,11 @@ static int cpufreq_resume(struct sys_device *sysdev) | |||
1430 | 1418 | ||
1431 | fail: | 1419 | fail: |
1432 | cpufreq_cpu_put(cpu_policy); | 1420 | cpufreq_cpu_put(cpu_policy); |
1433 | return ret; | ||
1434 | } | 1421 | } |
1435 | 1422 | ||
1436 | static struct sysdev_driver cpufreq_sysdev_driver = { | 1423 | static struct syscore_ops cpufreq_syscore_ops = { |
1437 | .add = cpufreq_add_dev, | 1424 | .suspend = cpufreq_bp_suspend, |
1438 | .remove = cpufreq_remove_dev, | 1425 | .resume = cpufreq_bp_resume, |
1439 | .suspend = cpufreq_suspend, | ||
1440 | .resume = cpufreq_resume, | ||
1441 | }; | 1426 | }; |
1442 | 1427 | ||
1443 | 1428 | ||
@@ -2002,6 +1987,7 @@ static int __init cpufreq_core_init(void) | |||
2002 | cpufreq_global_kobject = kobject_create_and_add("cpufreq", | 1987 | cpufreq_global_kobject = kobject_create_and_add("cpufreq", |
2003 | &cpu_sysdev_class.kset.kobj); | 1988 | &cpu_sysdev_class.kset.kobj); |
2004 | BUG_ON(!cpufreq_global_kobject); | 1989 | BUG_ON(!cpufreq_global_kobject); |
1990 | register_syscore_ops(&cpufreq_syscore_ops); | ||
2005 | 1991 | ||
2006 | return 0; | 1992 | return 0; |
2007 | } | 1993 | } |
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c index 33fc685cb38..3525ad91877 100644 --- a/drivers/gpio/adp5588-gpio.c +++ b/drivers/gpio/adp5588-gpio.c | |||
@@ -289,10 +289,10 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev) | |||
289 | 289 | ||
290 | for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) { | 290 | for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) { |
291 | int irq = gpio + dev->irq_base; | 291 | int irq = gpio + dev->irq_base; |
292 | set_irq_chip_data(irq, dev); | 292 | irq_set_chip_data(irq, dev); |
293 | set_irq_chip_and_handler(irq, &adp5588_irq_chip, | 293 | irq_set_chip_and_handler(irq, &adp5588_irq_chip, |
294 | handle_level_irq); | 294 | handle_level_irq); |
295 | set_irq_nested_thread(irq, 1); | 295 | irq_set_nested_thread(irq, 1); |
296 | #ifdef CONFIG_ARM | 296 | #ifdef CONFIG_ARM |
297 | /* | 297 | /* |
298 | * ARM needs us to explicitly flag the IRQ as VALID, | 298 | * ARM needs us to explicitly flag the IRQ as VALID, |
@@ -300,7 +300,7 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev) | |||
300 | */ | 300 | */ |
301 | set_irq_flags(irq, IRQF_VALID); | 301 | set_irq_flags(irq, IRQF_VALID); |
302 | #else | 302 | #else |
303 | set_irq_noprobe(irq); | 303 | irq_set_noprobe(irq); |
304 | #endif | 304 | #endif |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 649550e2cae..36a2974815b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -1656,51 +1656,6 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) | |||
1656 | chip->get | 1656 | chip->get |
1657 | ? (chip->get(chip, i) ? "hi" : "lo") | 1657 | ? (chip->get(chip, i) ? "hi" : "lo") |
1658 | : "? "); | 1658 | : "? "); |
1659 | |||
1660 | if (!is_out) { | ||
1661 | int irq = gpio_to_irq(gpio); | ||
1662 | struct irq_desc *desc = irq_to_desc(irq); | ||
1663 | |||
1664 | /* This races with request_irq(), set_irq_type(), | ||
1665 | * and set_irq_wake() ... but those are "rare". | ||
1666 | * | ||
1667 | * More significantly, trigger type flags aren't | ||
1668 | * currently maintained by genirq. | ||
1669 | */ | ||
1670 | if (irq >= 0 && desc->action) { | ||
1671 | char *trigger; | ||
1672 | |||
1673 | switch (desc->status & IRQ_TYPE_SENSE_MASK) { | ||
1674 | case IRQ_TYPE_NONE: | ||
1675 | trigger = "(default)"; | ||
1676 | break; | ||
1677 | case IRQ_TYPE_EDGE_FALLING: | ||
1678 | trigger = "edge-falling"; | ||
1679 | break; | ||
1680 | case IRQ_TYPE_EDGE_RISING: | ||
1681 | trigger = "edge-rising"; | ||
1682 | break; | ||
1683 | case IRQ_TYPE_EDGE_BOTH: | ||
1684 | trigger = "edge-both"; | ||
1685 | break; | ||
1686 | case IRQ_TYPE_LEVEL_HIGH: | ||
1687 | trigger = "level-high"; | ||
1688 | break; | ||
1689 | case IRQ_TYPE_LEVEL_LOW: | ||
1690 | trigger = "level-low"; | ||
1691 | break; | ||
1692 | default: | ||
1693 | trigger = "?trigger?"; | ||
1694 | break; | ||
1695 | } | ||
1696 | |||
1697 | seq_printf(s, " irq-%d %s%s", | ||
1698 | irq, trigger, | ||
1699 | (desc->status & IRQ_WAKEUP) | ||
1700 | ? " wakeup" : ""); | ||
1701 | } | ||
1702 | } | ||
1703 | |||
1704 | seq_printf(s, "\n"); | 1659 | seq_printf(s, "\n"); |
1705 | } | 1660 | } |
1706 | } | 1661 | } |
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c index 9e1d01f0071..ad6951edc16 100644 --- a/drivers/gpio/max732x.c +++ b/drivers/gpio/max732x.c | |||
@@ -470,14 +470,14 @@ static int max732x_irq_setup(struct max732x_chip *chip, | |||
470 | if (!(chip->dir_input & (1 << lvl))) | 470 | if (!(chip->dir_input & (1 << lvl))) |
471 | continue; | 471 | continue; |
472 | 472 | ||
473 | set_irq_chip_data(irq, chip); | 473 | irq_set_chip_data(irq, chip); |
474 | set_irq_chip_and_handler(irq, &max732x_irq_chip, | 474 | irq_set_chip_and_handler(irq, &max732x_irq_chip, |
475 | handle_edge_irq); | 475 | handle_edge_irq); |
476 | set_irq_nested_thread(irq, 1); | 476 | irq_set_nested_thread(irq, 1); |
477 | #ifdef CONFIG_ARM | 477 | #ifdef CONFIG_ARM |
478 | set_irq_flags(irq, IRQF_VALID); | 478 | set_irq_flags(irq, IRQF_VALID); |
479 | #else | 479 | #else |
480 | set_irq_noprobe(irq); | 480 | irq_set_noprobe(irq); |
481 | #endif | 481 | #endif |
482 | } | 482 | } |
483 | 483 | ||
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 2fc25dec7cf..583e9259207 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -395,13 +395,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, | |||
395 | for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { | 395 | for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { |
396 | int irq = lvl + chip->irq_base; | 396 | int irq = lvl + chip->irq_base; |
397 | 397 | ||
398 | set_irq_chip_data(irq, chip); | 398 | irq_set_chip_data(irq, chip); |
399 | set_irq_chip_and_handler(irq, &pca953x_irq_chip, | 399 | irq_set_chip_and_handler(irq, &pca953x_irq_chip, |
400 | handle_edge_irq); | 400 | handle_edge_irq); |
401 | #ifdef CONFIG_ARM | 401 | #ifdef CONFIG_ARM |
402 | set_irq_flags(irq, IRQF_VALID); | 402 | set_irq_flags(irq, IRQF_VALID); |
403 | #else | 403 | #else |
404 | set_irq_noprobe(irq); | 404 | irq_set_noprobe(irq); |
405 | #endif | 405 | #endif |
406 | } | 406 | } |
407 | 407 | ||
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index 838ddbdf90c..6fcb28cdd86 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c | |||
@@ -210,7 +210,7 @@ static struct irq_chip pl061_irqchip = { | |||
210 | 210 | ||
211 | static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) | 211 | static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) |
212 | { | 212 | { |
213 | struct list_head *chip_list = get_irq_data(irq); | 213 | struct list_head *chip_list = irq_get_handler_data(irq); |
214 | struct list_head *ptr; | 214 | struct list_head *ptr; |
215 | struct pl061_gpio *chip; | 215 | struct pl061_gpio *chip; |
216 | 216 | ||
@@ -294,7 +294,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id) | |||
294 | ret = -ENODEV; | 294 | ret = -ENODEV; |
295 | goto iounmap; | 295 | goto iounmap; |
296 | } | 296 | } |
297 | set_irq_chained_handler(irq, pl061_irq_handler); | 297 | irq_set_chained_handler(irq, pl061_irq_handler); |
298 | if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ | 298 | if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */ |
299 | chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); | 299 | chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL); |
300 | if (chip_list == NULL) { | 300 | if (chip_list == NULL) { |
@@ -303,9 +303,9 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id) | |||
303 | goto iounmap; | 303 | goto iounmap; |
304 | } | 304 | } |
305 | INIT_LIST_HEAD(chip_list); | 305 | INIT_LIST_HEAD(chip_list); |
306 | set_irq_data(irq, chip_list); | 306 | irq_set_handler_data(irq, chip_list); |
307 | } else | 307 | } else |
308 | chip_list = get_irq_data(irq); | 308 | chip_list = irq_get_handler_data(irq); |
309 | list_add(&chip->list, chip_list); | 309 | list_add(&chip->list, chip_list); |
310 | 310 | ||
311 | for (i = 0; i < PL061_GPIO_NR; i++) { | 311 | for (i = 0; i < PL061_GPIO_NR; i++) { |
@@ -315,10 +315,10 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id) | |||
315 | else | 315 | else |
316 | pl061_direction_input(&chip->gc, i); | 316 | pl061_direction_input(&chip->gc, i); |
317 | 317 | ||
318 | set_irq_chip(i+chip->irq_base, &pl061_irqchip); | 318 | irq_set_chip_and_handler(i + chip->irq_base, &pl061_irqchip, |
319 | set_irq_handler(i+chip->irq_base, handle_simple_irq); | 319 | handle_simple_irq); |
320 | set_irq_flags(i+chip->irq_base, IRQF_VALID); | 320 | set_irq_flags(i+chip->irq_base, IRQF_VALID); |
321 | set_irq_chip_data(i+chip->irq_base, chip); | 321 | irq_set_chip_data(i + chip->irq_base, chip); |
322 | } | 322 | } |
323 | 323 | ||
324 | return 0; | 324 | return 0; |
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c index eb2901f8ab5..4c980b57332 100644 --- a/drivers/gpio/stmpe-gpio.c +++ b/drivers/gpio/stmpe-gpio.c | |||
@@ -254,14 +254,14 @@ static int __devinit stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) | |||
254 | int irq; | 254 | int irq; |
255 | 255 | ||
256 | for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) { | 256 | for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) { |
257 | set_irq_chip_data(irq, stmpe_gpio); | 257 | irq_set_chip_data(irq, stmpe_gpio); |
258 | set_irq_chip_and_handler(irq, &stmpe_gpio_irq_chip, | 258 | irq_set_chip_and_handler(irq, &stmpe_gpio_irq_chip, |
259 | handle_simple_irq); | 259 | handle_simple_irq); |
260 | set_irq_nested_thread(irq, 1); | 260 | irq_set_nested_thread(irq, 1); |
261 | #ifdef CONFIG_ARM | 261 | #ifdef CONFIG_ARM |
262 | set_irq_flags(irq, IRQF_VALID); | 262 | set_irq_flags(irq, IRQF_VALID); |
263 | #else | 263 | #else |
264 | set_irq_noprobe(irq); | 264 | irq_set_noprobe(irq); |
265 | #endif | 265 | #endif |
266 | } | 266 | } |
267 | 267 | ||
@@ -277,8 +277,8 @@ static void stmpe_gpio_irq_remove(struct stmpe_gpio *stmpe_gpio) | |||
277 | #ifdef CONFIG_ARM | 277 | #ifdef CONFIG_ARM |
278 | set_irq_flags(irq, 0); | 278 | set_irq_flags(irq, 0); |
279 | #endif | 279 | #endif |
280 | set_irq_chip_and_handler(irq, NULL, NULL); | 280 | irq_set_chip_and_handler(irq, NULL, NULL); |
281 | set_irq_chip_data(irq, NULL); | 281 | irq_set_chip_data(irq, NULL); |
282 | } | 282 | } |
283 | } | 283 | } |
284 | 284 | ||
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c index d2f874c3d3d..a4f73534394 100644 --- a/drivers/gpio/sx150x.c +++ b/drivers/gpio/sx150x.c | |||
@@ -551,12 +551,12 @@ static int sx150x_install_irq_chip(struct sx150x_chip *chip, | |||
551 | 551 | ||
552 | for (n = 0; n < chip->dev_cfg->ngpios; ++n) { | 552 | for (n = 0; n < chip->dev_cfg->ngpios; ++n) { |
553 | irq = irq_base + n; | 553 | irq = irq_base + n; |
554 | set_irq_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq); | 554 | irq_set_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq); |
555 | set_irq_nested_thread(irq, 1); | 555 | irq_set_nested_thread(irq, 1); |
556 | #ifdef CONFIG_ARM | 556 | #ifdef CONFIG_ARM |
557 | set_irq_flags(irq, IRQF_VALID); | 557 | set_irq_flags(irq, IRQF_VALID); |
558 | #else | 558 | #else |
559 | set_irq_noprobe(irq); | 559 | irq_set_noprobe(irq); |
560 | #endif | 560 | #endif |
561 | } | 561 | } |
562 | 562 | ||
@@ -583,8 +583,7 @@ static void sx150x_remove_irq_chip(struct sx150x_chip *chip) | |||
583 | 583 | ||
584 | for (n = 0; n < chip->dev_cfg->ngpios; ++n) { | 584 | for (n = 0; n < chip->dev_cfg->ngpios; ++n) { |
585 | irq = chip->irq_base + n; | 585 | irq = chip->irq_base + n; |
586 | set_irq_handler(irq, NULL); | 586 | irq_set_chip_and_handler(irq, NULL, NULL); |
587 | set_irq_chip(irq, NULL); | ||
588 | } | 587 | } |
589 | } | 588 | } |
590 | 589 | ||
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c index 27200af1a59..2a82e8999a4 100644 --- a/drivers/gpio/tc3589x-gpio.c +++ b/drivers/gpio/tc3589x-gpio.c | |||
@@ -239,14 +239,14 @@ static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio) | |||
239 | int irq; | 239 | int irq; |
240 | 240 | ||
241 | for (irq = base; irq < base + tc3589x_gpio->chip.ngpio; irq++) { | 241 | for (irq = base; irq < base + tc3589x_gpio->chip.ngpio; irq++) { |
242 | set_irq_chip_data(irq, tc3589x_gpio); | 242 | irq_set_chip_data(irq, tc3589x_gpio); |
243 | set_irq_chip_and_handler(irq, &tc3589x_gpio_irq_chip, | 243 | irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip, |
244 | handle_simple_irq); | 244 | handle_simple_irq); |
245 | set_irq_nested_thread(irq, 1); | 245 | irq_set_nested_thread(irq, 1); |
246 | #ifdef CONFIG_ARM | 246 | #ifdef CONFIG_ARM |
247 | set_irq_flags(irq, IRQF_VALID); | 247 | set_irq_flags(irq, IRQF_VALID); |
248 | #else | 248 | #else |
249 | set_irq_noprobe(irq); | 249 | irq_set_noprobe(irq); |
250 | #endif | 250 | #endif |
251 | } | 251 | } |
252 | 252 | ||
@@ -262,8 +262,8 @@ static void tc3589x_gpio_irq_remove(struct tc3589x_gpio *tc3589x_gpio) | |||
262 | #ifdef CONFIG_ARM | 262 | #ifdef CONFIG_ARM |
263 | set_irq_flags(irq, 0); | 263 | set_irq_flags(irq, 0); |
264 | #endif | 264 | #endif |
265 | set_irq_chip_and_handler(irq, NULL, NULL); | 265 | irq_set_chip_and_handler(irq, NULL, NULL); |
266 | set_irq_chip_data(irq, NULL); | 266 | irq_set_chip_data(irq, NULL); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | 269 | ||
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c index ffcd815b8b8..edbe1eae531 100644 --- a/drivers/gpio/timbgpio.c +++ b/drivers/gpio/timbgpio.c | |||
@@ -196,7 +196,7 @@ out: | |||
196 | 196 | ||
197 | static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) | 197 | static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) |
198 | { | 198 | { |
199 | struct timbgpio *tgpio = get_irq_data(irq); | 199 | struct timbgpio *tgpio = irq_get_handler_data(irq); |
200 | unsigned long ipr; | 200 | unsigned long ipr; |
201 | int offset; | 201 | int offset; |
202 | 202 | ||
@@ -292,16 +292,16 @@ static int __devinit timbgpio_probe(struct platform_device *pdev) | |||
292 | return 0; | 292 | return 0; |
293 | 293 | ||
294 | for (i = 0; i < pdata->nr_pins; i++) { | 294 | for (i = 0; i < pdata->nr_pins; i++) { |
295 | set_irq_chip_and_handler_name(tgpio->irq_base + i, | 295 | irq_set_chip_and_handler_name(tgpio->irq_base + i, |
296 | &timbgpio_irqchip, handle_simple_irq, "mux"); | 296 | &timbgpio_irqchip, handle_simple_irq, "mux"); |
297 | set_irq_chip_data(tgpio->irq_base + i, tgpio); | 297 | irq_set_chip_data(tgpio->irq_base + i, tgpio); |
298 | #ifdef CONFIG_ARM | 298 | #ifdef CONFIG_ARM |
299 | set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE); | 299 | set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE); |
300 | #endif | 300 | #endif |
301 | } | 301 | } |
302 | 302 | ||
303 | set_irq_data(irq, tgpio); | 303 | irq_set_handler_data(irq, tgpio); |
304 | set_irq_chained_handler(irq, timbgpio_irq); | 304 | irq_set_chained_handler(irq, timbgpio_irq); |
305 | 305 | ||
306 | return 0; | 306 | return 0; |
307 | 307 | ||
@@ -327,12 +327,12 @@ static int __devexit timbgpio_remove(struct platform_device *pdev) | |||
327 | if (irq >= 0 && tgpio->irq_base > 0) { | 327 | if (irq >= 0 && tgpio->irq_base > 0) { |
328 | int i; | 328 | int i; |
329 | for (i = 0; i < tgpio->gpio.ngpio; i++) { | 329 | for (i = 0; i < tgpio->gpio.ngpio; i++) { |
330 | set_irq_chip(tgpio->irq_base + i, NULL); | 330 | irq_set_chip(tgpio->irq_base + i, NULL); |
331 | set_irq_chip_data(tgpio->irq_base + i, NULL); | 331 | irq_set_chip_data(tgpio->irq_base + i, NULL); |
332 | } | 332 | } |
333 | 333 | ||
334 | set_irq_handler(irq, NULL); | 334 | irq_set_handler(irq, NULL); |
335 | set_irq_data(irq, NULL); | 335 | irq_set_handler_data(irq, NULL); |
336 | } | 336 | } |
337 | 337 | ||
338 | err = gpiochip_remove(&tgpio->gpio); | 338 | err = gpiochip_remove(&tgpio->gpio); |
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c index cffa3bd7ad3..a365be040b3 100644 --- a/drivers/gpio/vr41xx_giu.c +++ b/drivers/gpio/vr41xx_giu.c | |||
@@ -238,13 +238,13 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, | |||
238 | break; | 238 | break; |
239 | } | 239 | } |
240 | } | 240 | } |
241 | set_irq_chip_and_handler(GIU_IRQ(pin), | 241 | irq_set_chip_and_handler(GIU_IRQ(pin), |
242 | &giuint_low_irq_chip, | 242 | &giuint_low_irq_chip, |
243 | handle_edge_irq); | 243 | handle_edge_irq); |
244 | } else { | 244 | } else { |
245 | giu_clear(GIUINTTYPL, mask); | 245 | giu_clear(GIUINTTYPL, mask); |
246 | giu_clear(GIUINTHTSELL, mask); | 246 | giu_clear(GIUINTHTSELL, mask); |
247 | set_irq_chip_and_handler(GIU_IRQ(pin), | 247 | irq_set_chip_and_handler(GIU_IRQ(pin), |
248 | &giuint_low_irq_chip, | 248 | &giuint_low_irq_chip, |
249 | handle_level_irq); | 249 | handle_level_irq); |
250 | } | 250 | } |
@@ -273,13 +273,13 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, | |||
273 | break; | 273 | break; |
274 | } | 274 | } |
275 | } | 275 | } |
276 | set_irq_chip_and_handler(GIU_IRQ(pin), | 276 | irq_set_chip_and_handler(GIU_IRQ(pin), |
277 | &giuint_high_irq_chip, | 277 | &giuint_high_irq_chip, |
278 | handle_edge_irq); | 278 | handle_edge_irq); |
279 | } else { | 279 | } else { |
280 | giu_clear(GIUINTTYPH, mask); | 280 | giu_clear(GIUINTTYPH, mask); |
281 | giu_clear(GIUINTHTSELH, mask); | 281 | giu_clear(GIUINTHTSELH, mask); |
282 | set_irq_chip_and_handler(GIU_IRQ(pin), | 282 | irq_set_chip_and_handler(GIU_IRQ(pin), |
283 | &giuint_high_irq_chip, | 283 | &giuint_high_irq_chip, |
284 | handle_level_irq); | 284 | handle_level_irq); |
285 | } | 285 | } |
@@ -539,9 +539,9 @@ static int __devinit giu_probe(struct platform_device *pdev) | |||
539 | chip = &giuint_high_irq_chip; | 539 | chip = &giuint_high_irq_chip; |
540 | 540 | ||
541 | if (trigger & (1 << pin)) | 541 | if (trigger & (1 << pin)) |
542 | set_irq_chip_and_handler(i, chip, handle_edge_irq); | 542 | irq_set_chip_and_handler(i, chip, handle_edge_irq); |
543 | else | 543 | else |
544 | set_irq_chip_and_handler(i, chip, handle_level_irq); | 544 | irq_set_chip_and_handler(i, chip, handle_level_irq); |
545 | 545 | ||
546 | } | 546 | } |
547 | 547 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 4c95b5fd9df..799e1490cf2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, | |||
1073 | uint32_t __user *encoder_id; | 1073 | uint32_t __user *encoder_id; |
1074 | struct drm_mode_group *mode_group; | 1074 | struct drm_mode_group *mode_group; |
1075 | 1075 | ||
1076 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1077 | return -EINVAL; | ||
1078 | |||
1076 | mutex_lock(&dev->mode_config.mutex); | 1079 | mutex_lock(&dev->mode_config.mutex); |
1077 | 1080 | ||
1078 | /* | 1081 | /* |
@@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev, | |||
1244 | struct drm_mode_object *obj; | 1247 | struct drm_mode_object *obj; |
1245 | int ret = 0; | 1248 | int ret = 0; |
1246 | 1249 | ||
1250 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1251 | return -EINVAL; | ||
1252 | |||
1247 | mutex_lock(&dev->mode_config.mutex); | 1253 | mutex_lock(&dev->mode_config.mutex); |
1248 | 1254 | ||
1249 | obj = drm_mode_object_find(dev, crtc_resp->crtc_id, | 1255 | obj = drm_mode_object_find(dev, crtc_resp->crtc_id, |
@@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
1312 | uint64_t __user *prop_values; | 1318 | uint64_t __user *prop_values; |
1313 | uint32_t __user *encoder_ptr; | 1319 | uint32_t __user *encoder_ptr; |
1314 | 1320 | ||
1321 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1322 | return -EINVAL; | ||
1323 | |||
1315 | memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); | 1324 | memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); |
1316 | 1325 | ||
1317 | DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); | 1326 | DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); |
@@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, | |||
1431 | struct drm_encoder *encoder; | 1440 | struct drm_encoder *encoder; |
1432 | int ret = 0; | 1441 | int ret = 0; |
1433 | 1442 | ||
1443 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1444 | return -EINVAL; | ||
1445 | |||
1434 | mutex_lock(&dev->mode_config.mutex); | 1446 | mutex_lock(&dev->mode_config.mutex); |
1435 | obj = drm_mode_object_find(dev, enc_resp->encoder_id, | 1447 | obj = drm_mode_object_find(dev, enc_resp->encoder_id, |
1436 | DRM_MODE_OBJECT_ENCODER); | 1448 | DRM_MODE_OBJECT_ENCODER); |
@@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1486 | int ret = 0; | 1498 | int ret = 0; |
1487 | int i; | 1499 | int i; |
1488 | 1500 | ||
1501 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1502 | return -EINVAL; | ||
1503 | |||
1489 | mutex_lock(&dev->mode_config.mutex); | 1504 | mutex_lock(&dev->mode_config.mutex); |
1490 | obj = drm_mode_object_find(dev, crtc_req->crtc_id, | 1505 | obj = drm_mode_object_find(dev, crtc_req->crtc_id, |
1491 | DRM_MODE_OBJECT_CRTC); | 1506 | DRM_MODE_OBJECT_CRTC); |
@@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, | |||
1603 | struct drm_crtc *crtc; | 1618 | struct drm_crtc *crtc; |
1604 | int ret = 0; | 1619 | int ret = 0; |
1605 | 1620 | ||
1621 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1622 | return -EINVAL; | ||
1623 | |||
1606 | if (!req->flags) { | 1624 | if (!req->flags) { |
1607 | DRM_ERROR("no operation set\n"); | 1625 | DRM_ERROR("no operation set\n"); |
1608 | return -EINVAL; | 1626 | return -EINVAL; |
@@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev, | |||
1667 | struct drm_framebuffer *fb; | 1685 | struct drm_framebuffer *fb; |
1668 | int ret = 0; | 1686 | int ret = 0; |
1669 | 1687 | ||
1688 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1689 | return -EINVAL; | ||
1690 | |||
1670 | if ((config->min_width > r->width) || (r->width > config->max_width)) { | 1691 | if ((config->min_width > r->width) || (r->width > config->max_width)) { |
1671 | DRM_ERROR("mode new framebuffer width not within limits\n"); | 1692 | DRM_ERROR("mode new framebuffer width not within limits\n"); |
1672 | return -EINVAL; | 1693 | return -EINVAL; |
@@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev, | |||
1724 | int ret = 0; | 1745 | int ret = 0; |
1725 | int found = 0; | 1746 | int found = 0; |
1726 | 1747 | ||
1748 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1749 | return -EINVAL; | ||
1750 | |||
1727 | mutex_lock(&dev->mode_config.mutex); | 1751 | mutex_lock(&dev->mode_config.mutex); |
1728 | obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); | 1752 | obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); |
1729 | /* TODO check that we realy get a framebuffer back. */ | 1753 | /* TODO check that we realy get a framebuffer back. */ |
@@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev, | |||
1780 | struct drm_framebuffer *fb; | 1804 | struct drm_framebuffer *fb; |
1781 | int ret = 0; | 1805 | int ret = 0; |
1782 | 1806 | ||
1807 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1808 | return -EINVAL; | ||
1809 | |||
1783 | mutex_lock(&dev->mode_config.mutex); | 1810 | mutex_lock(&dev->mode_config.mutex); |
1784 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); | 1811 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); |
1785 | if (!obj) { | 1812 | if (!obj) { |
@@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | |||
1813 | int num_clips; | 1840 | int num_clips; |
1814 | int ret = 0; | 1841 | int ret = 0; |
1815 | 1842 | ||
1843 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
1844 | return -EINVAL; | ||
1845 | |||
1816 | mutex_lock(&dev->mode_config.mutex); | 1846 | mutex_lock(&dev->mode_config.mutex); |
1817 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); | 1847 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); |
1818 | if (!obj) { | 1848 | if (!obj) { |
@@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, | |||
1996 | struct drm_mode_modeinfo *umode = &mode_cmd->mode; | 2026 | struct drm_mode_modeinfo *umode = &mode_cmd->mode; |
1997 | int ret = 0; | 2027 | int ret = 0; |
1998 | 2028 | ||
2029 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2030 | return -EINVAL; | ||
2031 | |||
1999 | mutex_lock(&dev->mode_config.mutex); | 2032 | mutex_lock(&dev->mode_config.mutex); |
2000 | 2033 | ||
2001 | obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); | 2034 | obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); |
@@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, | |||
2042 | struct drm_mode_modeinfo *umode = &mode_cmd->mode; | 2075 | struct drm_mode_modeinfo *umode = &mode_cmd->mode; |
2043 | int ret = 0; | 2076 | int ret = 0; |
2044 | 2077 | ||
2078 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2079 | return -EINVAL; | ||
2080 | |||
2045 | mutex_lock(&dev->mode_config.mutex); | 2081 | mutex_lock(&dev->mode_config.mutex); |
2046 | 2082 | ||
2047 | obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); | 2083 | obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); |
@@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, | |||
2211 | uint64_t __user *values_ptr; | 2247 | uint64_t __user *values_ptr; |
2212 | uint32_t __user *blob_length_ptr; | 2248 | uint32_t __user *blob_length_ptr; |
2213 | 2249 | ||
2250 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2251 | return -EINVAL; | ||
2252 | |||
2214 | mutex_lock(&dev->mode_config.mutex); | 2253 | mutex_lock(&dev->mode_config.mutex); |
2215 | obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); | 2254 | obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); |
2216 | if (!obj) { | 2255 | if (!obj) { |
@@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, | |||
2333 | int ret = 0; | 2372 | int ret = 0; |
2334 | void *blob_ptr; | 2373 | void *blob_ptr; |
2335 | 2374 | ||
2375 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2376 | return -EINVAL; | ||
2377 | |||
2336 | mutex_lock(&dev->mode_config.mutex); | 2378 | mutex_lock(&dev->mode_config.mutex); |
2337 | obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); | 2379 | obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); |
2338 | if (!obj) { | 2380 | if (!obj) { |
@@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, | |||
2393 | int ret = -EINVAL; | 2435 | int ret = -EINVAL; |
2394 | int i; | 2436 | int i; |
2395 | 2437 | ||
2438 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2439 | return -EINVAL; | ||
2440 | |||
2396 | mutex_lock(&dev->mode_config.mutex); | 2441 | mutex_lock(&dev->mode_config.mutex); |
2397 | 2442 | ||
2398 | obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); | 2443 | obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); |
@@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, | |||
2509 | int size; | 2554 | int size; |
2510 | int ret = 0; | 2555 | int ret = 0; |
2511 | 2556 | ||
2557 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2558 | return -EINVAL; | ||
2559 | |||
2512 | mutex_lock(&dev->mode_config.mutex); | 2560 | mutex_lock(&dev->mode_config.mutex); |
2513 | obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); | 2561 | obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); |
2514 | if (!obj) { | 2562 | if (!obj) { |
@@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, | |||
2560 | int size; | 2608 | int size; |
2561 | int ret = 0; | 2609 | int ret = 0; |
2562 | 2610 | ||
2611 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2612 | return -EINVAL; | ||
2613 | |||
2563 | mutex_lock(&dev->mode_config.mutex); | 2614 | mutex_lock(&dev->mode_config.mutex); |
2564 | obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); | 2615 | obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); |
2565 | if (!obj) { | 2616 | if (!obj) { |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 57ce27c9a74..74e4ff57801 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -499,11 +499,12 @@ EXPORT_SYMBOL(drm_gem_vm_open); | |||
499 | void drm_gem_vm_close(struct vm_area_struct *vma) | 499 | void drm_gem_vm_close(struct vm_area_struct *vma) |
500 | { | 500 | { |
501 | struct drm_gem_object *obj = vma->vm_private_data; | 501 | struct drm_gem_object *obj = vma->vm_private_data; |
502 | struct drm_device *dev = obj->dev; | ||
502 | 503 | ||
503 | mutex_lock(&obj->dev->struct_mutex); | 504 | mutex_lock(&dev->struct_mutex); |
504 | drm_vm_close_locked(vma); | 505 | drm_vm_close_locked(vma); |
505 | drm_gem_object_unreference(obj); | 506 | drm_gem_object_unreference(obj); |
506 | mutex_unlock(&obj->dev->struct_mutex); | 507 | mutex_unlock(&dev->struct_mutex); |
507 | } | 508 | } |
508 | EXPORT_SYMBOL(drm_gem_vm_close); | 509 | EXPORT_SYMBOL(drm_gem_vm_close); |
509 | 510 | ||
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 7f6912a1676..904d7e9c8e4 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -280,6 +280,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
280 | if (dev->driver->dumb_create) | 280 | if (dev->driver->dumb_create) |
281 | req->value = 1; | 281 | req->value = 1; |
282 | break; | 282 | break; |
283 | case DRM_CAP_VBLANK_HIGH_CRTC: | ||
284 | req->value = 1; | ||
285 | break; | ||
283 | default: | 286 | default: |
284 | return -EINVAL; | 287 | return -EINVAL; |
285 | } | 288 | } |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index a34ef97d3c8..741457bd1c4 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -1125,7 +1125,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
1125 | { | 1125 | { |
1126 | union drm_wait_vblank *vblwait = data; | 1126 | union drm_wait_vblank *vblwait = data; |
1127 | int ret = 0; | 1127 | int ret = 0; |
1128 | unsigned int flags, seq, crtc; | 1128 | unsigned int flags, seq, crtc, high_crtc; |
1129 | 1129 | ||
1130 | if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) | 1130 | if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) |
1131 | return -EINVAL; | 1131 | return -EINVAL; |
@@ -1134,16 +1134,21 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
1134 | return -EINVAL; | 1134 | return -EINVAL; |
1135 | 1135 | ||
1136 | if (vblwait->request.type & | 1136 | if (vblwait->request.type & |
1137 | ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { | 1137 | ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | |
1138 | _DRM_VBLANK_HIGH_CRTC_MASK)) { | ||
1138 | DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", | 1139 | DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", |
1139 | vblwait->request.type, | 1140 | vblwait->request.type, |
1140 | (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); | 1141 | (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | |
1142 | _DRM_VBLANK_HIGH_CRTC_MASK)); | ||
1141 | return -EINVAL; | 1143 | return -EINVAL; |
1142 | } | 1144 | } |
1143 | 1145 | ||
1144 | flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; | 1146 | flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; |
1145 | crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; | 1147 | high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); |
1146 | 1148 | if (high_crtc) | |
1149 | crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; | ||
1150 | else | ||
1151 | crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; | ||
1147 | if (crtc >= dev->num_crtcs) | 1152 | if (crtc >= dev->num_crtcs) |
1148 | return -EINVAL; | 1153 | return -EINVAL; |
1149 | 1154 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 09e0327fc6c..87c8e29465e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -892,7 +892,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
892 | seq_printf(m, "Render p-state limit: %d\n", | 892 | seq_printf(m, "Render p-state limit: %d\n", |
893 | rp_state_limits & 0xff); | 893 | rp_state_limits & 0xff); |
894 | seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> | 894 | seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> |
895 | GEN6_CAGF_SHIFT) * 100); | 895 | GEN6_CAGF_SHIFT) * 50); |
896 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & | 896 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & |
897 | GEN6_CURICONT_MASK); | 897 | GEN6_CURICONT_MASK); |
898 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & | 898 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & |
@@ -908,15 +908,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
908 | 908 | ||
909 | max_freq = (rp_state_cap & 0xff0000) >> 16; | 909 | max_freq = (rp_state_cap & 0xff0000) >> 16; |
910 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", | 910 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", |
911 | max_freq * 100); | 911 | max_freq * 50); |
912 | 912 | ||
913 | max_freq = (rp_state_cap & 0xff00) >> 8; | 913 | max_freq = (rp_state_cap & 0xff00) >> 8; |
914 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", | 914 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", |
915 | max_freq * 100); | 915 | max_freq * 50); |
916 | 916 | ||
917 | max_freq = rp_state_cap & 0xff; | 917 | max_freq = rp_state_cap & 0xff; |
918 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 918 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
919 | max_freq * 100); | 919 | max_freq * 50); |
920 | 920 | ||
921 | __gen6_gt_force_wake_put(dev_priv); | 921 | __gen6_gt_force_wake_put(dev_priv); |
922 | } else { | 922 | } else { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c4c2855d002..7ce3f353af3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -224,7 +224,7 @@ i915_gem_dumb_create(struct drm_file *file, | |||
224 | struct drm_mode_create_dumb *args) | 224 | struct drm_mode_create_dumb *args) |
225 | { | 225 | { |
226 | /* have to work out size/pitch and return them */ | 226 | /* have to work out size/pitch and return them */ |
227 | args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64); | 227 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); |
228 | args->size = args->pitch * args->height; | 228 | args->size = args->pitch * args->height; |
229 | return i915_gem_create(file, dev, | 229 | return i915_gem_create(file, dev, |
230 | args->size, &args->handle); | 230 | args->size, &args->handle); |
@@ -1356,9 +1356,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) | |||
1356 | if (!obj->fault_mappable) | 1356 | if (!obj->fault_mappable) |
1357 | return; | 1357 | return; |
1358 | 1358 | ||
1359 | unmap_mapping_range(obj->base.dev->dev_mapping, | 1359 | if (obj->base.dev->dev_mapping) |
1360 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, | 1360 | unmap_mapping_range(obj->base.dev->dev_mapping, |
1361 | obj->base.size, 1); | 1361 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, |
1362 | obj->base.size, 1); | ||
1362 | 1363 | ||
1363 | obj->fault_mappable = false; | 1364 | obj->fault_mappable = false; |
1364 | } | 1365 | } |
@@ -1796,8 +1797,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |||
1796 | return; | 1797 | return; |
1797 | 1798 | ||
1798 | spin_lock(&file_priv->mm.lock); | 1799 | spin_lock(&file_priv->mm.lock); |
1799 | list_del(&request->client_list); | 1800 | if (request->file_priv) { |
1800 | request->file_priv = NULL; | 1801 | list_del(&request->client_list); |
1802 | request->file_priv = NULL; | ||
1803 | } | ||
1801 | spin_unlock(&file_priv->mm.lock); | 1804 | spin_unlock(&file_priv->mm.lock); |
1802 | } | 1805 | } |
1803 | 1806 | ||
@@ -2217,13 +2220,18 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring, | |||
2217 | { | 2220 | { |
2218 | int ret; | 2221 | int ret; |
2219 | 2222 | ||
2223 | if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) | ||
2224 | return 0; | ||
2225 | |||
2220 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); | 2226 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); |
2221 | 2227 | ||
2222 | ret = ring->flush(ring, invalidate_domains, flush_domains); | 2228 | ret = ring->flush(ring, invalidate_domains, flush_domains); |
2223 | if (ret) | 2229 | if (ret) |
2224 | return ret; | 2230 | return ret; |
2225 | 2231 | ||
2226 | i915_gem_process_flushing_list(ring, flush_domains); | 2232 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
2233 | i915_gem_process_flushing_list(ring, flush_domains); | ||
2234 | |||
2227 | return 0; | 2235 | return 0; |
2228 | } | 2236 | } |
2229 | 2237 | ||
@@ -2579,8 +2587,23 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2579 | reg = &dev_priv->fence_regs[obj->fence_reg]; | 2587 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
2580 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 2588 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); |
2581 | 2589 | ||
2582 | if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) | 2590 | if (obj->tiling_changed) { |
2583 | pipelined = NULL; | 2591 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2592 | if (ret) | ||
2593 | return ret; | ||
2594 | |||
2595 | if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) | ||
2596 | pipelined = NULL; | ||
2597 | |||
2598 | if (pipelined) { | ||
2599 | reg->setup_seqno = | ||
2600 | i915_gem_next_request_seqno(pipelined); | ||
2601 | obj->last_fenced_seqno = reg->setup_seqno; | ||
2602 | obj->last_fenced_ring = pipelined; | ||
2603 | } | ||
2604 | |||
2605 | goto update; | ||
2606 | } | ||
2584 | 2607 | ||
2585 | if (!pipelined) { | 2608 | if (!pipelined) { |
2586 | if (reg->setup_seqno) { | 2609 | if (reg->setup_seqno) { |
@@ -2599,31 +2622,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2599 | ret = i915_gem_object_flush_fence(obj, pipelined); | 2622 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2600 | if (ret) | 2623 | if (ret) |
2601 | return ret; | 2624 | return ret; |
2602 | } else if (obj->tiling_changed) { | ||
2603 | if (obj->fenced_gpu_access) { | ||
2604 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
2605 | ret = i915_gem_flush_ring(obj->ring, | ||
2606 | 0, obj->base.write_domain); | ||
2607 | if (ret) | ||
2608 | return ret; | ||
2609 | } | ||
2610 | |||
2611 | obj->fenced_gpu_access = false; | ||
2612 | } | ||
2613 | } | ||
2614 | |||
2615 | if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) | ||
2616 | pipelined = NULL; | ||
2617 | BUG_ON(!pipelined && reg->setup_seqno); | ||
2618 | |||
2619 | if (obj->tiling_changed) { | ||
2620 | if (pipelined) { | ||
2621 | reg->setup_seqno = | ||
2622 | i915_gem_next_request_seqno(pipelined); | ||
2623 | obj->last_fenced_seqno = reg->setup_seqno; | ||
2624 | obj->last_fenced_ring = pipelined; | ||
2625 | } | ||
2626 | goto update; | ||
2627 | } | 2625 | } |
2628 | 2626 | ||
2629 | return 0; | 2627 | return 0; |
@@ -3606,6 +3604,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) | |||
3606 | return; | 3604 | return; |
3607 | } | 3605 | } |
3608 | 3606 | ||
3607 | trace_i915_gem_object_destroy(obj); | ||
3608 | |||
3609 | if (obj->base.map_list.map) | 3609 | if (obj->base.map_list.map) |
3610 | i915_gem_free_mmap_offset(obj); | 3610 | i915_gem_free_mmap_offset(obj); |
3611 | 3611 | ||
@@ -3615,8 +3615,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) | |||
3615 | kfree(obj->page_cpu_valid); | 3615 | kfree(obj->page_cpu_valid); |
3616 | kfree(obj->bit_17); | 3616 | kfree(obj->bit_17); |
3617 | kfree(obj); | 3617 | kfree(obj); |
3618 | |||
3619 | trace_i915_gem_object_destroy(obj); | ||
3620 | } | 3618 | } |
3621 | 3619 | ||
3622 | void i915_gem_free_object(struct drm_gem_object *gem_obj) | 3620 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7ff7f933ddf..20a4cc5b818 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -367,6 +367,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
367 | uint32_t __iomem *reloc_entry; | 367 | uint32_t __iomem *reloc_entry; |
368 | void __iomem *reloc_page; | 368 | void __iomem *reloc_page; |
369 | 369 | ||
370 | /* We can't wait for rendering with pagefaults disabled */ | ||
371 | if (obj->active && in_atomic()) | ||
372 | return -EFAULT; | ||
373 | |||
370 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 374 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
371 | if (ret) | 375 | if (ret) |
372 | return ret; | 376 | return ret; |
@@ -440,15 +444,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
440 | struct list_head *objects) | 444 | struct list_head *objects) |
441 | { | 445 | { |
442 | struct drm_i915_gem_object *obj; | 446 | struct drm_i915_gem_object *obj; |
443 | int ret; | 447 | int ret = 0; |
444 | 448 | ||
449 | /* This is the fast path and we cannot handle a pagefault whilst | ||
450 | * holding the struct mutex lest the user pass in the relocations | ||
451 | * contained within a mmaped bo. For in such a case we, the page | ||
452 | * fault handler would call i915_gem_fault() and we would try to | ||
453 | * acquire the struct mutex again. Obviously this is bad and so | ||
454 | * lockdep complains vehemently. | ||
455 | */ | ||
456 | pagefault_disable(); | ||
445 | list_for_each_entry(obj, objects, exec_list) { | 457 | list_for_each_entry(obj, objects, exec_list) { |
446 | ret = i915_gem_execbuffer_relocate_object(obj, eb); | 458 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
447 | if (ret) | 459 | if (ret) |
448 | return ret; | 460 | break; |
449 | } | 461 | } |
462 | pagefault_enable(); | ||
450 | 463 | ||
451 | return 0; | 464 | return ret; |
452 | } | 465 | } |
453 | 466 | ||
454 | static int | 467 | static int |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3106c0dc838..432fc04c6bf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1516,9 +1516,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
1516 | 1516 | ||
1517 | reg = PIPECONF(pipe); | 1517 | reg = PIPECONF(pipe); |
1518 | val = I915_READ(reg); | 1518 | val = I915_READ(reg); |
1519 | val |= PIPECONF_ENABLE; | 1519 | if (val & PIPECONF_ENABLE) |
1520 | I915_WRITE(reg, val); | 1520 | return; |
1521 | POSTING_READ(reg); | 1521 | |
1522 | I915_WRITE(reg, val | PIPECONF_ENABLE); | ||
1522 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1523 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1523 | } | 1524 | } |
1524 | 1525 | ||
@@ -1552,9 +1553,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |||
1552 | 1553 | ||
1553 | reg = PIPECONF(pipe); | 1554 | reg = PIPECONF(pipe); |
1554 | val = I915_READ(reg); | 1555 | val = I915_READ(reg); |
1555 | val &= ~PIPECONF_ENABLE; | 1556 | if ((val & PIPECONF_ENABLE) == 0) |
1556 | I915_WRITE(reg, val); | 1557 | return; |
1557 | POSTING_READ(reg); | 1558 | |
1559 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | ||
1558 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | 1560 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1559 | } | 1561 | } |
1560 | 1562 | ||
@@ -1577,9 +1579,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv, | |||
1577 | 1579 | ||
1578 | reg = DSPCNTR(plane); | 1580 | reg = DSPCNTR(plane); |
1579 | val = I915_READ(reg); | 1581 | val = I915_READ(reg); |
1580 | val |= DISPLAY_PLANE_ENABLE; | 1582 | if (val & DISPLAY_PLANE_ENABLE) |
1581 | I915_WRITE(reg, val); | 1583 | return; |
1582 | POSTING_READ(reg); | 1584 | |
1585 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | ||
1583 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1586 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1584 | } | 1587 | } |
1585 | 1588 | ||
@@ -1610,9 +1613,10 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, | |||
1610 | 1613 | ||
1611 | reg = DSPCNTR(plane); | 1614 | reg = DSPCNTR(plane); |
1612 | val = I915_READ(reg); | 1615 | val = I915_READ(reg); |
1613 | val &= ~DISPLAY_PLANE_ENABLE; | 1616 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1614 | I915_WRITE(reg, val); | 1617 | return; |
1615 | POSTING_READ(reg); | 1618 | |
1619 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | ||
1616 | intel_flush_display_plane(dev_priv, plane); | 1620 | intel_flush_display_plane(dev_priv, plane); |
1617 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1621 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1618 | } | 1622 | } |
@@ -1769,7 +1773,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1769 | return; | 1773 | return; |
1770 | 1774 | ||
1771 | I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | 1775 | I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); |
1772 | POSTING_READ(DPFC_CONTROL); | ||
1773 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1776 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1774 | } | 1777 | } |
1775 | 1778 | ||
@@ -1861,7 +1864,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1861 | return; | 1864 | return; |
1862 | 1865 | ||
1863 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | 1866 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); |
1864 | POSTING_READ(ILK_DPFC_CONTROL); | ||
1865 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1867 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1866 | } | 1868 | } |
1867 | 1869 | ||
@@ -3883,10 +3885,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, | |||
3883 | display, cursor); | 3885 | display, cursor); |
3884 | } | 3886 | } |
3885 | 3887 | ||
3886 | static inline bool single_plane_enabled(unsigned int mask) | 3888 | #define single_plane_enabled(mask) is_power_of_2(mask) |
3887 | { | ||
3888 | return mask && (mask & -mask) == 0; | ||
3889 | } | ||
3890 | 3889 | ||
3891 | static void g4x_update_wm(struct drm_device *dev) | 3890 | static void g4x_update_wm(struct drm_device *dev) |
3892 | { | 3891 | { |
@@ -5777,7 +5776,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
5777 | 5776 | ||
5778 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 5777 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
5779 | I915_WRITE(dpll_reg, dpll); | 5778 | I915_WRITE(dpll_reg, dpll); |
5780 | POSTING_READ(dpll_reg); | ||
5781 | intel_wait_for_vblank(dev, pipe); | 5779 | intel_wait_for_vblank(dev, pipe); |
5782 | 5780 | ||
5783 | dpll = I915_READ(dpll_reg); | 5781 | dpll = I915_READ(dpll_reg); |
@@ -5821,7 +5819,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
5821 | 5819 | ||
5822 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 5820 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
5823 | I915_WRITE(dpll_reg, dpll); | 5821 | I915_WRITE(dpll_reg, dpll); |
5824 | dpll = I915_READ(dpll_reg); | ||
5825 | intel_wait_for_vblank(dev, pipe); | 5822 | intel_wait_for_vblank(dev, pipe); |
5826 | dpll = I915_READ(dpll_reg); | 5823 | dpll = I915_READ(dpll_reg); |
5827 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 5824 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
@@ -6933,7 +6930,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6933 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | 6930 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
6934 | if (pcu_mbox & (1<<31)) { /* OC supported */ | 6931 | if (pcu_mbox & (1<<31)) { /* OC supported */ |
6935 | max_freq = pcu_mbox & 0xff; | 6932 | max_freq = pcu_mbox & 0xff; |
6936 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); | 6933 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
6937 | } | 6934 | } |
6938 | 6935 | ||
6939 | /* In units of 100MHz */ | 6936 | /* In units of 100MHz */ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d29e33f815d..0daefca5cbb 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1957,9 +1957,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1957 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | 1957 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
1958 | } else { | 1958 | } else { |
1959 | /* if this fails, presume the device is a ghost */ | 1959 | /* if this fails, presume the device is a ghost */ |
1960 | DRM_ERROR("failed to retrieve link info\n"); | 1960 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); |
1961 | intel_dp_destroy(&intel_connector->base); | ||
1962 | intel_dp_encoder_destroy(&intel_dp->base.base); | 1961 | intel_dp_encoder_destroy(&intel_dp->base.base); |
1962 | intel_dp_destroy(&intel_connector->base); | ||
1963 | return; | 1963 | return; |
1964 | } | 1964 | } |
1965 | } | 1965 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 789c47801ba..e9e6f71418a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -65,62 +65,60 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
65 | u32 cmd; | 65 | u32 cmd; |
66 | int ret; | 66 | int ret; |
67 | 67 | ||
68 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 68 | /* |
69 | * read/write caches: | ||
70 | * | ||
71 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | ||
72 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | ||
73 | * also flushed at 2d versus 3d pipeline switches. | ||
74 | * | ||
75 | * read-only caches: | ||
76 | * | ||
77 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | ||
78 | * MI_READ_FLUSH is set, and is always flushed on 965. | ||
79 | * | ||
80 | * I915_GEM_DOMAIN_COMMAND may not exist? | ||
81 | * | ||
82 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
83 | * invalidated when MI_EXE_FLUSH is set. | ||
84 | * | ||
85 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
86 | * invalidated with every MI_FLUSH. | ||
87 | * | ||
88 | * TLBs: | ||
89 | * | ||
90 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
91 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
92 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
93 | * are flushed at any MI_FLUSH. | ||
94 | */ | ||
95 | |||
96 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
97 | if ((invalidate_domains|flush_domains) & | ||
98 | I915_GEM_DOMAIN_RENDER) | ||
99 | cmd &= ~MI_NO_WRITE_FLUSH; | ||
100 | if (INTEL_INFO(dev)->gen < 4) { | ||
69 | /* | 101 | /* |
70 | * read/write caches: | 102 | * On the 965, the sampler cache always gets flushed |
71 | * | 103 | * and this bit is reserved. |
72 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | ||
73 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | ||
74 | * also flushed at 2d versus 3d pipeline switches. | ||
75 | * | ||
76 | * read-only caches: | ||
77 | * | ||
78 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | ||
79 | * MI_READ_FLUSH is set, and is always flushed on 965. | ||
80 | * | ||
81 | * I915_GEM_DOMAIN_COMMAND may not exist? | ||
82 | * | ||
83 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
84 | * invalidated when MI_EXE_FLUSH is set. | ||
85 | * | ||
86 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
87 | * invalidated with every MI_FLUSH. | ||
88 | * | ||
89 | * TLBs: | ||
90 | * | ||
91 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
92 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
93 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
94 | * are flushed at any MI_FLUSH. | ||
95 | */ | 104 | */ |
105 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | ||
106 | cmd |= MI_READ_FLUSH; | ||
107 | } | ||
108 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | ||
109 | cmd |= MI_EXE_FLUSH; | ||
96 | 110 | ||
97 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | 111 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
98 | if ((invalidate_domains|flush_domains) & | 112 | (IS_G4X(dev) || IS_GEN5(dev))) |
99 | I915_GEM_DOMAIN_RENDER) | 113 | cmd |= MI_INVALIDATE_ISP; |
100 | cmd &= ~MI_NO_WRITE_FLUSH; | ||
101 | if (INTEL_INFO(dev)->gen < 4) { | ||
102 | /* | ||
103 | * On the 965, the sampler cache always gets flushed | ||
104 | * and this bit is reserved. | ||
105 | */ | ||
106 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | ||
107 | cmd |= MI_READ_FLUSH; | ||
108 | } | ||
109 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | ||
110 | cmd |= MI_EXE_FLUSH; | ||
111 | |||
112 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && | ||
113 | (IS_G4X(dev) || IS_GEN5(dev))) | ||
114 | cmd |= MI_INVALIDATE_ISP; | ||
115 | 114 | ||
116 | ret = intel_ring_begin(ring, 2); | 115 | ret = intel_ring_begin(ring, 2); |
117 | if (ret) | 116 | if (ret) |
118 | return ret; | 117 | return ret; |
119 | 118 | ||
120 | intel_ring_emit(ring, cmd); | 119 | intel_ring_emit(ring, cmd); |
121 | intel_ring_emit(ring, MI_NOOP); | 120 | intel_ring_emit(ring, MI_NOOP); |
122 | intel_ring_advance(ring); | 121 | intel_ring_advance(ring); |
123 | } | ||
124 | 122 | ||
125 | return 0; | 123 | return 0; |
126 | } | 124 | } |
@@ -568,9 +566,6 @@ bsd_ring_flush(struct intel_ring_buffer *ring, | |||
568 | { | 566 | { |
569 | int ret; | 567 | int ret; |
570 | 568 | ||
571 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | ||
572 | return 0; | ||
573 | |||
574 | ret = intel_ring_begin(ring, 2); | 569 | ret = intel_ring_begin(ring, 2); |
575 | if (ret) | 570 | if (ret) |
576 | return ret; | 571 | return ret; |
@@ -1056,9 +1051,6 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1056 | uint32_t cmd; | 1051 | uint32_t cmd; |
1057 | int ret; | 1052 | int ret; |
1058 | 1053 | ||
1059 | if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0) | ||
1060 | return 0; | ||
1061 | |||
1062 | ret = intel_ring_begin(ring, 4); | 1054 | ret = intel_ring_begin(ring, 4); |
1063 | if (ret) | 1055 | if (ret) |
1064 | return ret; | 1056 | return ret; |
@@ -1230,9 +1222,6 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, | |||
1230 | uint32_t cmd; | 1222 | uint32_t cmd; |
1231 | int ret; | 1223 | int ret; |
1232 | 1224 | ||
1233 | if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0) | ||
1234 | return 0; | ||
1235 | |||
1236 | ret = blt_ring_begin(ring, 4); | 1225 | ret = blt_ring_begin(ring, 4); |
1237 | if (ret) | 1226 | if (ret) |
1238 | return ret; | 1227 | return ret; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 3cd3234ba0a..10e41af6b02 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
957 | /* adjust pixel clock as needed */ | 957 | /* adjust pixel clock as needed */ |
958 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); | 958 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); |
959 | 959 | ||
960 | if (ASIC_IS_AVIVO(rdev)) | 960 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
961 | /* TV seems to prefer the legacy algo on some boards */ | ||
962 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
963 | &ref_div, &post_div); | ||
964 | else if (ASIC_IS_AVIVO(rdev)) | ||
961 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 965 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
962 | &ref_div, &post_div); | 966 | &ref_div, &post_div); |
963 | else | 967 | else |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index cf7c8d5b4ec..cf602e2d071 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
448 | 448 | ||
449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | 449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) |
450 | { | 450 | { |
451 | int edid_info; | 451 | int edid_info, size; |
452 | struct edid *edid; | 452 | struct edid *edid; |
453 | unsigned char *raw; | 453 | unsigned char *raw; |
454 | edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); | 454 | edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); |
@@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | |||
456 | return false; | 456 | return false; |
457 | 457 | ||
458 | raw = rdev->bios + edid_info; | 458 | raw = rdev->bios + edid_info; |
459 | edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); | 459 | size = EDID_LENGTH * (raw[0x7e] + 1); |
460 | edid = kmalloc(size, GFP_KERNEL); | ||
460 | if (edid == NULL) | 461 | if (edid == NULL) |
461 | return false; | 462 | return false; |
462 | 463 | ||
463 | memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); | 464 | memcpy((unsigned char *)edid, raw, size); |
464 | 465 | ||
465 | if (!drm_edid_is_valid(edid)) { | 466 | if (!drm_edid_is_valid(edid)) { |
466 | kfree(edid); | 467 | kfree(edid); |
@@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | |||
468 | } | 469 | } |
469 | 470 | ||
470 | rdev->mode_info.bios_hardcoded_edid = edid; | 471 | rdev->mode_info.bios_hardcoded_edid = edid; |
472 | rdev->mode_info.bios_hardcoded_edid_size = size; | ||
471 | return true; | 473 | return true; |
472 | } | 474 | } |
473 | 475 | ||
@@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | |||
475 | struct edid * | 477 | struct edid * |
476 | radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) | 478 | radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) |
477 | { | 479 | { |
478 | if (rdev->mode_info.bios_hardcoded_edid) | 480 | struct edid *edid; |
479 | return rdev->mode_info.bios_hardcoded_edid; | 481 | |
482 | if (rdev->mode_info.bios_hardcoded_edid) { | ||
483 | edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); | ||
484 | if (edid) { | ||
485 | memcpy((unsigned char *)edid, | ||
486 | (unsigned char *)rdev->mode_info.bios_hardcoded_edid, | ||
487 | rdev->mode_info.bios_hardcoded_edid_size); | ||
488 | return edid; | ||
489 | } | ||
490 | } | ||
480 | return NULL; | 491 | return NULL; |
481 | } | 492 | } |
482 | 493 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 28c7961cd19..2ef6d513506 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -633,6 +633,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, | |||
633 | static enum drm_connector_status | 633 | static enum drm_connector_status |
634 | radeon_vga_detect(struct drm_connector *connector, bool force) | 634 | radeon_vga_detect(struct drm_connector *connector, bool force) |
635 | { | 635 | { |
636 | struct drm_device *dev = connector->dev; | ||
637 | struct radeon_device *rdev = dev->dev_private; | ||
636 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 638 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
637 | struct drm_encoder *encoder; | 639 | struct drm_encoder *encoder; |
638 | struct drm_encoder_helper_funcs *encoder_funcs; | 640 | struct drm_encoder_helper_funcs *encoder_funcs; |
@@ -683,6 +685,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
683 | 685 | ||
684 | if (ret == connector_status_connected) | 686 | if (ret == connector_status_connected) |
685 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); | 687 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); |
688 | |||
689 | /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the | ||
690 | * vbios to deal with KVMs. If we have one and are not able to detect a monitor | ||
691 | * by other means, assume the CRT is connected and use that EDID. | ||
692 | */ | ||
693 | if ((!rdev->is_atom_bios) && | ||
694 | (ret == connector_status_disconnected) && | ||
695 | rdev->mode_info.bios_hardcoded_edid_size) { | ||
696 | ret = connector_status_connected; | ||
697 | } | ||
698 | |||
686 | radeon_connector_update_scratch_regs(connector, ret); | 699 | radeon_connector_update_scratch_regs(connector, ret); |
687 | return ret; | 700 | return ret; |
688 | } | 701 | } |
@@ -794,6 +807,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) | |||
794 | static enum drm_connector_status | 807 | static enum drm_connector_status |
795 | radeon_dvi_detect(struct drm_connector *connector, bool force) | 808 | radeon_dvi_detect(struct drm_connector *connector, bool force) |
796 | { | 809 | { |
810 | struct drm_device *dev = connector->dev; | ||
811 | struct radeon_device *rdev = dev->dev_private; | ||
797 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 812 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
798 | struct drm_encoder *encoder = NULL; | 813 | struct drm_encoder *encoder = NULL; |
799 | struct drm_encoder_helper_funcs *encoder_funcs; | 814 | struct drm_encoder_helper_funcs *encoder_funcs; |
@@ -833,8 +848,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
833 | * you don't really know what's connected to which port as both are digital. | 848 | * you don't really know what's connected to which port as both are digital. |
834 | */ | 849 | */ |
835 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { | 850 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
836 | struct drm_device *dev = connector->dev; | ||
837 | struct radeon_device *rdev = dev->dev_private; | ||
838 | struct drm_connector *list_connector; | 851 | struct drm_connector *list_connector; |
839 | struct radeon_connector *list_radeon_connector; | 852 | struct radeon_connector *list_radeon_connector; |
840 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | 853 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { |
@@ -899,6 +912,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
899 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); | 912 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); |
900 | } | 913 | } |
901 | 914 | ||
915 | /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the | ||
916 | * vbios to deal with KVMs. If we have one and are not able to detect a monitor | ||
917 | * by other means, assume the DFP is connected and use that EDID. In most | ||
918 | * cases the DVI port is actually a virtual KVM port connected to the service | ||
919 | * processor. | ||
920 | */ | ||
921 | if ((!rdev->is_atom_bios) && | ||
922 | (ret == connector_status_disconnected) && | ||
923 | rdev->mode_info.bios_hardcoded_edid_size) { | ||
924 | radeon_connector->use_digital = true; | ||
925 | ret = connector_status_connected; | ||
926 | } | ||
927 | |||
902 | out: | 928 | out: |
903 | /* updated in get modes as well since we need to know if it's analog or digital */ | 929 | /* updated in get modes as well since we need to know if it's analog or digital */ |
904 | radeon_connector_update_scratch_regs(connector, ret); | 930 | radeon_connector_update_scratch_regs(connector, ret); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e4582814bb7..9c57538231d 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -239,6 +239,7 @@ struct radeon_mode_info { | |||
239 | struct drm_property *underscan_vborder_property; | 239 | struct drm_property *underscan_vborder_property; |
240 | /* hardcoded DFP edid from BIOS */ | 240 | /* hardcoded DFP edid from BIOS */ |
241 | struct edid *bios_hardcoded_edid; | 241 | struct edid *bios_hardcoded_edid; |
242 | int bios_hardcoded_edid_size; | ||
242 | 243 | ||
243 | /* pointer to fbdev info structure */ | 244 | /* pointer to fbdev info structure */ |
244 | struct radeon_fbdev *rfbdev; | 245 | struct radeon_fbdev *rfbdev; |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 2aed03bde4b..08de669e025 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -365,12 +365,14 @@ static ssize_t radeon_set_pm_profile(struct device *dev, | |||
365 | else if (strncmp("high", buf, strlen("high")) == 0) | 365 | else if (strncmp("high", buf, strlen("high")) == 0) |
366 | rdev->pm.profile = PM_PROFILE_HIGH; | 366 | rdev->pm.profile = PM_PROFILE_HIGH; |
367 | else { | 367 | else { |
368 | DRM_ERROR("invalid power profile!\n"); | 368 | count = -EINVAL; |
369 | goto fail; | 369 | goto fail; |
370 | } | 370 | } |
371 | radeon_pm_update_profile(rdev); | 371 | radeon_pm_update_profile(rdev); |
372 | radeon_pm_set_clocks(rdev); | 372 | radeon_pm_set_clocks(rdev); |
373 | } | 373 | } else |
374 | count = -EINVAL; | ||
375 | |||
374 | fail: | 376 | fail: |
375 | mutex_unlock(&rdev->pm.mutex); | 377 | mutex_unlock(&rdev->pm.mutex); |
376 | 378 | ||
@@ -413,7 +415,7 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
413 | mutex_unlock(&rdev->pm.mutex); | 415 | mutex_unlock(&rdev->pm.mutex); |
414 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); | 416 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
415 | } else { | 417 | } else { |
416 | DRM_ERROR("invalid power method!\n"); | 418 | count = -EINVAL; |
417 | goto fail; | 419 | goto fail; |
418 | } | 420 | } |
419 | radeon_pm_compute_clocks(rdev); | 421 | radeon_pm_compute_clocks(rdev); |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 81131eda554..060ef632787 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -315,11 +315,22 @@ config SENSORS_F71805F | |||
315 | will be called f71805f. | 315 | will be called f71805f. |
316 | 316 | ||
317 | config SENSORS_F71882FG | 317 | config SENSORS_F71882FG |
318 | tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000" | 318 | tristate "Fintek F71882FG and compatibles" |
319 | help | 319 | help |
320 | If you say yes here you get support for hardware monitoring | 320 | If you say yes here you get support for hardware monitoring |
321 | features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG, | 321 | features of many Fintek Super-I/O (LPC) chips. The currently |
322 | F71889FG and F8000 Super-I/O chips. | 322 | supported chips are: |
323 | F71808E | ||
324 | F71858FG | ||
325 | F71862FG | ||
326 | F71863FG | ||
327 | F71869F/E | ||
328 | F71882FG | ||
329 | F71883FG | ||
330 | F71889FG/ED/A | ||
331 | F8000 | ||
332 | F81801U | ||
333 | F81865F | ||
323 | 334 | ||
324 | This driver can also be built as a module. If so, the module | 335 | This driver can also be built as a module. If so, the module |
325 | will be called f71882fg. | 336 | will be called f71882fg. |
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index a4d430ee7e2..ca07a32447c 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c | |||
@@ -54,7 +54,9 @@ | |||
54 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ | 54 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ |
55 | #define SIO_F71889_ID 0x0723 /* Chipset ID */ | 55 | #define SIO_F71889_ID 0x0723 /* Chipset ID */ |
56 | #define SIO_F71889E_ID 0x0909 /* Chipset ID */ | 56 | #define SIO_F71889E_ID 0x0909 /* Chipset ID */ |
57 | #define SIO_F71889A_ID 0x1005 /* Chipset ID */ | ||
57 | #define SIO_F8000_ID 0x0581 /* Chipset ID */ | 58 | #define SIO_F8000_ID 0x0581 /* Chipset ID */ |
59 | #define SIO_F81865_ID 0x0704 /* Chipset ID */ | ||
58 | 60 | ||
59 | #define REGION_LENGTH 8 | 61 | #define REGION_LENGTH 8 |
60 | #define ADDR_REG_OFFSET 5 | 62 | #define ADDR_REG_OFFSET 5 |
@@ -106,7 +108,7 @@ module_param(force_id, ushort, 0); | |||
106 | MODULE_PARM_DESC(force_id, "Override the detected device ID"); | 108 | MODULE_PARM_DESC(force_id, "Override the detected device ID"); |
107 | 109 | ||
108 | enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, | 110 | enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, |
109 | f71889ed, f8000 }; | 111 | f71889ed, f71889a, f8000, f81865f }; |
110 | 112 | ||
111 | static const char *f71882fg_names[] = { | 113 | static const char *f71882fg_names[] = { |
112 | "f71808e", | 114 | "f71808e", |
@@ -114,42 +116,76 @@ static const char *f71882fg_names[] = { | |||
114 | "f71862fg", | 116 | "f71862fg", |
115 | "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ | 117 | "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ |
116 | "f71882fg", | 118 | "f71882fg", |
117 | "f71889fg", | 119 | "f71889fg", /* f81801u too, same id */ |
118 | "f71889ed", | 120 | "f71889ed", |
121 | "f71889a", | ||
119 | "f8000", | 122 | "f8000", |
123 | "f81865f", | ||
120 | }; | 124 | }; |
121 | 125 | ||
122 | static const char f71882fg_has_in[8][F71882FG_MAX_INS] = { | 126 | static const char f71882fg_has_in[][F71882FG_MAX_INS] = { |
123 | { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */ | 127 | [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, |
124 | { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */ | 128 | [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, |
125 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */ | 129 | [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
126 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */ | 130 | [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
127 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */ | 131 | [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
128 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */ | 132 | [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
129 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */ | 133 | [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
130 | { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */ | 134 | [f71889a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
135 | [f8000] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, | ||
136 | [f81865f] = { 1, 1, 1, 1, 1, 1, 1, 0, 0 }, | ||
131 | }; | 137 | }; |
132 | 138 | ||
133 | static const char f71882fg_has_in1_alarm[8] = { | 139 | static const char f71882fg_has_in1_alarm[] = { |
134 | 0, /* f71808e */ | 140 | [f71808e] = 0, |
135 | 0, /* f71858fg */ | 141 | [f71858fg] = 0, |
136 | 0, /* f71862fg */ | 142 | [f71862fg] = 0, |
137 | 0, /* f71869 */ | 143 | [f71869] = 0, |
138 | 1, /* f71882fg */ | 144 | [f71882fg] = 1, |
139 | 1, /* f71889fg */ | 145 | [f71889fg] = 1, |
140 | 1, /* f71889ed */ | 146 | [f71889ed] = 1, |
141 | 0, /* f8000 */ | 147 | [f71889a] = 1, |
148 | [f8000] = 0, | ||
149 | [f81865f] = 1, | ||
142 | }; | 150 | }; |
143 | 151 | ||
144 | static const char f71882fg_has_beep[8] = { | 152 | static const char f71882fg_has_beep[] = { |
145 | 0, /* f71808e */ | 153 | [f71808e] = 0, |
146 | 0, /* f71858fg */ | 154 | [f71858fg] = 0, |
147 | 1, /* f71862fg */ | 155 | [f71862fg] = 1, |
148 | 1, /* f71869 */ | 156 | [f71869] = 1, |
149 | 1, /* f71882fg */ | 157 | [f71882fg] = 1, |
150 | 1, /* f71889fg */ | 158 | [f71889fg] = 1, |
151 | 1, /* f71889ed */ | 159 | [f71889ed] = 1, |
152 | 0, /* f8000 */ | 160 | [f71889a] = 1, |
161 | [f8000] = 0, | ||
162 | [f81865f] = 1, | ||
163 | }; | ||
164 | |||
165 | static const char f71882fg_nr_fans[] = { | ||
166 | [f71808e] = 3, | ||
167 | [f71858fg] = 3, | ||
168 | [f71862fg] = 3, | ||
169 | [f71869] = 3, | ||
170 | [f71882fg] = 4, | ||
171 | [f71889fg] = 3, | ||
172 | [f71889ed] = 3, | ||
173 | [f71889a] = 3, | ||
174 | [f8000] = 3, | ||
175 | [f81865f] = 2, | ||
176 | }; | ||
177 | |||
178 | static const char f71882fg_nr_temps[] = { | ||
179 | [f71808e] = 2, | ||
180 | [f71858fg] = 3, | ||
181 | [f71862fg] = 3, | ||
182 | [f71869] = 3, | ||
183 | [f71882fg] = 3, | ||
184 | [f71889fg] = 3, | ||
185 | [f71889ed] = 3, | ||
186 | [f71889a] = 3, | ||
187 | [f8000] = 3, | ||
188 | [f81865f] = 2, | ||
153 | }; | 189 | }; |
154 | 190 | ||
155 | static struct platform_device *f71882fg_pdev; | 191 | static struct platform_device *f71882fg_pdev; |
@@ -1071,9 +1107,9 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) | |||
1071 | static struct f71882fg_data *f71882fg_update_device(struct device *dev) | 1107 | static struct f71882fg_data *f71882fg_update_device(struct device *dev) |
1072 | { | 1108 | { |
1073 | struct f71882fg_data *data = dev_get_drvdata(dev); | 1109 | struct f71882fg_data *data = dev_get_drvdata(dev); |
1110 | int nr_fans = f71882fg_nr_fans[data->type]; | ||
1111 | int nr_temps = f71882fg_nr_temps[data->type]; | ||
1074 | int nr, reg, point; | 1112 | int nr, reg, point; |
1075 | int nr_fans = (data->type == f71882fg) ? 4 : 3; | ||
1076 | int nr_temps = (data->type == f71808e) ? 2 : 3; | ||
1077 | 1113 | ||
1078 | mutex_lock(&data->update_lock); | 1114 | mutex_lock(&data->update_lock); |
1079 | 1115 | ||
@@ -2042,8 +2078,9 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2042 | { | 2078 | { |
2043 | struct f71882fg_data *data; | 2079 | struct f71882fg_data *data; |
2044 | struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; | 2080 | struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; |
2045 | int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3; | 2081 | int nr_fans = f71882fg_nr_fans[sio_data->type]; |
2046 | int nr_temps = (sio_data->type == f71808e) ? 2 : 3; | 2082 | int nr_temps = f71882fg_nr_temps[sio_data->type]; |
2083 | int err, i; | ||
2047 | u8 start_reg, reg; | 2084 | u8 start_reg, reg; |
2048 | 2085 | ||
2049 | data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL); | 2086 | data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL); |
@@ -2138,6 +2175,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2138 | /* Fall through to select correct fan/pwm reg bank! */ | 2175 | /* Fall through to select correct fan/pwm reg bank! */ |
2139 | case f71889fg: | 2176 | case f71889fg: |
2140 | case f71889ed: | 2177 | case f71889ed: |
2178 | case f71889a: | ||
2141 | reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T); | 2179 | reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T); |
2142 | if (reg & F71882FG_FAN_NEG_TEMP_EN) | 2180 | if (reg & F71882FG_FAN_NEG_TEMP_EN) |
2143 | data->auto_point_temp_signed = 1; | 2181 | data->auto_point_temp_signed = 1; |
@@ -2163,16 +2201,12 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2163 | case f71862fg: | 2201 | case f71862fg: |
2164 | err = (data->pwm_enable & 0x15) != 0x15; | 2202 | err = (data->pwm_enable & 0x15) != 0x15; |
2165 | break; | 2203 | break; |
2166 | case f71808e: | ||
2167 | case f71869: | ||
2168 | case f71882fg: | ||
2169 | case f71889fg: | ||
2170 | case f71889ed: | ||
2171 | err = 0; | ||
2172 | break; | ||
2173 | case f8000: | 2204 | case f8000: |
2174 | err = data->pwm_enable & 0x20; | 2205 | err = data->pwm_enable & 0x20; |
2175 | break; | 2206 | break; |
2207 | default: | ||
2208 | err = 0; | ||
2209 | break; | ||
2176 | } | 2210 | } |
2177 | if (err) { | 2211 | if (err) { |
2178 | dev_err(&pdev->dev, | 2212 | dev_err(&pdev->dev, |
@@ -2199,6 +2233,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2199 | case f71869: | 2233 | case f71869: |
2200 | case f71889fg: | 2234 | case f71889fg: |
2201 | case f71889ed: | 2235 | case f71889ed: |
2236 | case f71889a: | ||
2202 | for (i = 0; i < nr_fans; i++) { | 2237 | for (i = 0; i < nr_fans; i++) { |
2203 | data->pwm_auto_point_mapping[i] = | 2238 | data->pwm_auto_point_mapping[i] = |
2204 | f71882fg_read8(data, | 2239 | f71882fg_read8(data, |
@@ -2276,8 +2311,9 @@ exit_free: | |||
2276 | static int f71882fg_remove(struct platform_device *pdev) | 2311 | static int f71882fg_remove(struct platform_device *pdev) |
2277 | { | 2312 | { |
2278 | struct f71882fg_data *data = platform_get_drvdata(pdev); | 2313 | struct f71882fg_data *data = platform_get_drvdata(pdev); |
2279 | int i, nr_fans = (data->type == f71882fg) ? 4 : 3; | 2314 | int nr_fans = f71882fg_nr_fans[data->type]; |
2280 | int nr_temps = (data->type == f71808e) ? 2 : 3; | 2315 | int nr_temps = f71882fg_nr_temps[data->type]; |
2316 | int i; | ||
2281 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); | 2317 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); |
2282 | 2318 | ||
2283 | if (data->hwmon_dev) | 2319 | if (data->hwmon_dev) |
@@ -2406,9 +2442,15 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
2406 | case SIO_F71889E_ID: | 2442 | case SIO_F71889E_ID: |
2407 | sio_data->type = f71889ed; | 2443 | sio_data->type = f71889ed; |
2408 | break; | 2444 | break; |
2445 | case SIO_F71889A_ID: | ||
2446 | sio_data->type = f71889a; | ||
2447 | break; | ||
2409 | case SIO_F8000_ID: | 2448 | case SIO_F8000_ID: |
2410 | sio_data->type = f8000; | 2449 | sio_data->type = f8000; |
2411 | break; | 2450 | break; |
2451 | case SIO_F81865_ID: | ||
2452 | sio_data->type = f81865f; | ||
2453 | break; | ||
2412 | default: | 2454 | default: |
2413 | pr_info("Unsupported Fintek device: %04x\n", | 2455 | pr_info("Unsupported Fintek device: %04x\n", |
2414 | (unsigned int)devid); | 2456 | (unsigned int)devid); |
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c index 6474512f49b..edfb92e4173 100644 --- a/drivers/hwmon/pmbus_core.c +++ b/drivers/hwmon/pmbus_core.c | |||
@@ -752,7 +752,7 @@ static void pmbus_add_boolean_cmp(struct pmbus_data *data, | |||
752 | static void pmbus_add_sensor(struct pmbus_data *data, | 752 | static void pmbus_add_sensor(struct pmbus_data *data, |
753 | const char *name, const char *type, int seq, | 753 | const char *name, const char *type, int seq, |
754 | int page, int reg, enum pmbus_sensor_classes class, | 754 | int page, int reg, enum pmbus_sensor_classes class, |
755 | bool update) | 755 | bool update, bool readonly) |
756 | { | 756 | { |
757 | struct pmbus_sensor *sensor; | 757 | struct pmbus_sensor *sensor; |
758 | 758 | ||
@@ -765,7 +765,7 @@ static void pmbus_add_sensor(struct pmbus_data *data, | |||
765 | sensor->reg = reg; | 765 | sensor->reg = reg; |
766 | sensor->class = class; | 766 | sensor->class = class; |
767 | sensor->update = update; | 767 | sensor->update = update; |
768 | if (update) | 768 | if (readonly) |
769 | PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, | 769 | PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, |
770 | data->num_sensors); | 770 | data->num_sensors); |
771 | else | 771 | else |
@@ -916,14 +916,14 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
916 | 916 | ||
917 | i0 = data->num_sensors; | 917 | i0 = data->num_sensors; |
918 | pmbus_add_label(data, "in", in_index, "vin", 0); | 918 | pmbus_add_label(data, "in", in_index, "vin", 0); |
919 | pmbus_add_sensor(data, "in", "input", in_index, | 919 | pmbus_add_sensor(data, "in", "input", in_index, 0, |
920 | 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true); | 920 | PMBUS_READ_VIN, PSC_VOLTAGE_IN, true, true); |
921 | if (pmbus_check_word_register(client, 0, | 921 | if (pmbus_check_word_register(client, 0, |
922 | PMBUS_VIN_UV_WARN_LIMIT)) { | 922 | PMBUS_VIN_UV_WARN_LIMIT)) { |
923 | i1 = data->num_sensors; | 923 | i1 = data->num_sensors; |
924 | pmbus_add_sensor(data, "in", "min", in_index, | 924 | pmbus_add_sensor(data, "in", "min", in_index, |
925 | 0, PMBUS_VIN_UV_WARN_LIMIT, | 925 | 0, PMBUS_VIN_UV_WARN_LIMIT, |
926 | PSC_VOLTAGE_IN, false); | 926 | PSC_VOLTAGE_IN, false, false); |
927 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 927 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
928 | pmbus_add_boolean_reg(data, "in", "min_alarm", | 928 | pmbus_add_boolean_reg(data, "in", "min_alarm", |
929 | in_index, | 929 | in_index, |
@@ -937,7 +937,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
937 | i1 = data->num_sensors; | 937 | i1 = data->num_sensors; |
938 | pmbus_add_sensor(data, "in", "lcrit", in_index, | 938 | pmbus_add_sensor(data, "in", "lcrit", in_index, |
939 | 0, PMBUS_VIN_UV_FAULT_LIMIT, | 939 | 0, PMBUS_VIN_UV_FAULT_LIMIT, |
940 | PSC_VOLTAGE_IN, false); | 940 | PSC_VOLTAGE_IN, false, false); |
941 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 941 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
942 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", | 942 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", |
943 | in_index, | 943 | in_index, |
@@ -951,7 +951,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
951 | i1 = data->num_sensors; | 951 | i1 = data->num_sensors; |
952 | pmbus_add_sensor(data, "in", "max", in_index, | 952 | pmbus_add_sensor(data, "in", "max", in_index, |
953 | 0, PMBUS_VIN_OV_WARN_LIMIT, | 953 | 0, PMBUS_VIN_OV_WARN_LIMIT, |
954 | PSC_VOLTAGE_IN, false); | 954 | PSC_VOLTAGE_IN, false, false); |
955 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 955 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
956 | pmbus_add_boolean_reg(data, "in", "max_alarm", | 956 | pmbus_add_boolean_reg(data, "in", "max_alarm", |
957 | in_index, | 957 | in_index, |
@@ -965,7 +965,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
965 | i1 = data->num_sensors; | 965 | i1 = data->num_sensors; |
966 | pmbus_add_sensor(data, "in", "crit", in_index, | 966 | pmbus_add_sensor(data, "in", "crit", in_index, |
967 | 0, PMBUS_VIN_OV_FAULT_LIMIT, | 967 | 0, PMBUS_VIN_OV_FAULT_LIMIT, |
968 | PSC_VOLTAGE_IN, false); | 968 | PSC_VOLTAGE_IN, false, false); |
969 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 969 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
970 | pmbus_add_boolean_reg(data, "in", "crit_alarm", | 970 | pmbus_add_boolean_reg(data, "in", "crit_alarm", |
971 | in_index, | 971 | in_index, |
@@ -988,7 +988,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
988 | if (info->func[0] & PMBUS_HAVE_VCAP) { | 988 | if (info->func[0] & PMBUS_HAVE_VCAP) { |
989 | pmbus_add_label(data, "in", in_index, "vcap", 0); | 989 | pmbus_add_label(data, "in", in_index, "vcap", 0); |
990 | pmbus_add_sensor(data, "in", "input", in_index, 0, | 990 | pmbus_add_sensor(data, "in", "input", in_index, 0, |
991 | PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true); | 991 | PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true, true); |
992 | in_index++; | 992 | in_index++; |
993 | } | 993 | } |
994 | 994 | ||
@@ -1004,13 +1004,13 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1004 | i0 = data->num_sensors; | 1004 | i0 = data->num_sensors; |
1005 | pmbus_add_label(data, "in", in_index, "vout", page + 1); | 1005 | pmbus_add_label(data, "in", in_index, "vout", page + 1); |
1006 | pmbus_add_sensor(data, "in", "input", in_index, page, | 1006 | pmbus_add_sensor(data, "in", "input", in_index, page, |
1007 | PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true); | 1007 | PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true, true); |
1008 | if (pmbus_check_word_register(client, page, | 1008 | if (pmbus_check_word_register(client, page, |
1009 | PMBUS_VOUT_UV_WARN_LIMIT)) { | 1009 | PMBUS_VOUT_UV_WARN_LIMIT)) { |
1010 | i1 = data->num_sensors; | 1010 | i1 = data->num_sensors; |
1011 | pmbus_add_sensor(data, "in", "min", in_index, page, | 1011 | pmbus_add_sensor(data, "in", "min", in_index, page, |
1012 | PMBUS_VOUT_UV_WARN_LIMIT, | 1012 | PMBUS_VOUT_UV_WARN_LIMIT, |
1013 | PSC_VOLTAGE_OUT, false); | 1013 | PSC_VOLTAGE_OUT, false, false); |
1014 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1014 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1015 | pmbus_add_boolean_reg(data, "in", "min_alarm", | 1015 | pmbus_add_boolean_reg(data, "in", "min_alarm", |
1016 | in_index, | 1016 | in_index, |
@@ -1025,7 +1025,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1025 | i1 = data->num_sensors; | 1025 | i1 = data->num_sensors; |
1026 | pmbus_add_sensor(data, "in", "lcrit", in_index, page, | 1026 | pmbus_add_sensor(data, "in", "lcrit", in_index, page, |
1027 | PMBUS_VOUT_UV_FAULT_LIMIT, | 1027 | PMBUS_VOUT_UV_FAULT_LIMIT, |
1028 | PSC_VOLTAGE_OUT, false); | 1028 | PSC_VOLTAGE_OUT, false, false); |
1029 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1029 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1030 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", | 1030 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", |
1031 | in_index, | 1031 | in_index, |
@@ -1040,7 +1040,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1040 | i1 = data->num_sensors; | 1040 | i1 = data->num_sensors; |
1041 | pmbus_add_sensor(data, "in", "max", in_index, page, | 1041 | pmbus_add_sensor(data, "in", "max", in_index, page, |
1042 | PMBUS_VOUT_OV_WARN_LIMIT, | 1042 | PMBUS_VOUT_OV_WARN_LIMIT, |
1043 | PSC_VOLTAGE_OUT, false); | 1043 | PSC_VOLTAGE_OUT, false, false); |
1044 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1044 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1045 | pmbus_add_boolean_reg(data, "in", "max_alarm", | 1045 | pmbus_add_boolean_reg(data, "in", "max_alarm", |
1046 | in_index, | 1046 | in_index, |
@@ -1055,7 +1055,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1055 | i1 = data->num_sensors; | 1055 | i1 = data->num_sensors; |
1056 | pmbus_add_sensor(data, "in", "crit", in_index, page, | 1056 | pmbus_add_sensor(data, "in", "crit", in_index, page, |
1057 | PMBUS_VOUT_OV_FAULT_LIMIT, | 1057 | PMBUS_VOUT_OV_FAULT_LIMIT, |
1058 | PSC_VOLTAGE_OUT, false); | 1058 | PSC_VOLTAGE_OUT, false, false); |
1059 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1059 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1060 | pmbus_add_boolean_reg(data, "in", "crit_alarm", | 1060 | pmbus_add_boolean_reg(data, "in", "crit_alarm", |
1061 | in_index, | 1061 | in_index, |
@@ -1088,14 +1088,14 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1088 | if (info->func[0] & PMBUS_HAVE_IIN) { | 1088 | if (info->func[0] & PMBUS_HAVE_IIN) { |
1089 | i0 = data->num_sensors; | 1089 | i0 = data->num_sensors; |
1090 | pmbus_add_label(data, "curr", in_index, "iin", 0); | 1090 | pmbus_add_label(data, "curr", in_index, "iin", 0); |
1091 | pmbus_add_sensor(data, "curr", "input", in_index, | 1091 | pmbus_add_sensor(data, "curr", "input", in_index, 0, |
1092 | 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true); | 1092 | PMBUS_READ_IIN, PSC_CURRENT_IN, true, true); |
1093 | if (pmbus_check_word_register(client, 0, | 1093 | if (pmbus_check_word_register(client, 0, |
1094 | PMBUS_IIN_OC_WARN_LIMIT)) { | 1094 | PMBUS_IIN_OC_WARN_LIMIT)) { |
1095 | i1 = data->num_sensors; | 1095 | i1 = data->num_sensors; |
1096 | pmbus_add_sensor(data, "curr", "max", in_index, | 1096 | pmbus_add_sensor(data, "curr", "max", in_index, |
1097 | 0, PMBUS_IIN_OC_WARN_LIMIT, | 1097 | 0, PMBUS_IIN_OC_WARN_LIMIT, |
1098 | PSC_CURRENT_IN, false); | 1098 | PSC_CURRENT_IN, false, false); |
1099 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 1099 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
1100 | pmbus_add_boolean_reg(data, "curr", "max_alarm", | 1100 | pmbus_add_boolean_reg(data, "curr", "max_alarm", |
1101 | in_index, | 1101 | in_index, |
@@ -1108,7 +1108,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1108 | i1 = data->num_sensors; | 1108 | i1 = data->num_sensors; |
1109 | pmbus_add_sensor(data, "curr", "crit", in_index, | 1109 | pmbus_add_sensor(data, "curr", "crit", in_index, |
1110 | 0, PMBUS_IIN_OC_FAULT_LIMIT, | 1110 | 0, PMBUS_IIN_OC_FAULT_LIMIT, |
1111 | PSC_CURRENT_IN, false); | 1111 | PSC_CURRENT_IN, false, false); |
1112 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) | 1112 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) |
1113 | pmbus_add_boolean_reg(data, "curr", | 1113 | pmbus_add_boolean_reg(data, "curr", |
1114 | "crit_alarm", | 1114 | "crit_alarm", |
@@ -1131,13 +1131,13 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1131 | i0 = data->num_sensors; | 1131 | i0 = data->num_sensors; |
1132 | pmbus_add_label(data, "curr", in_index, "iout", page + 1); | 1132 | pmbus_add_label(data, "curr", in_index, "iout", page + 1); |
1133 | pmbus_add_sensor(data, "curr", "input", in_index, page, | 1133 | pmbus_add_sensor(data, "curr", "input", in_index, page, |
1134 | PMBUS_READ_IOUT, PSC_CURRENT_OUT, true); | 1134 | PMBUS_READ_IOUT, PSC_CURRENT_OUT, true, true); |
1135 | if (pmbus_check_word_register(client, page, | 1135 | if (pmbus_check_word_register(client, page, |
1136 | PMBUS_IOUT_OC_WARN_LIMIT)) { | 1136 | PMBUS_IOUT_OC_WARN_LIMIT)) { |
1137 | i1 = data->num_sensors; | 1137 | i1 = data->num_sensors; |
1138 | pmbus_add_sensor(data, "curr", "max", in_index, page, | 1138 | pmbus_add_sensor(data, "curr", "max", in_index, page, |
1139 | PMBUS_IOUT_OC_WARN_LIMIT, | 1139 | PMBUS_IOUT_OC_WARN_LIMIT, |
1140 | PSC_CURRENT_OUT, false); | 1140 | PSC_CURRENT_OUT, false, false); |
1141 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { | 1141 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { |
1142 | pmbus_add_boolean_reg(data, "curr", "max_alarm", | 1142 | pmbus_add_boolean_reg(data, "curr", "max_alarm", |
1143 | in_index, | 1143 | in_index, |
@@ -1151,7 +1151,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1151 | i1 = data->num_sensors; | 1151 | i1 = data->num_sensors; |
1152 | pmbus_add_sensor(data, "curr", "lcrit", in_index, page, | 1152 | pmbus_add_sensor(data, "curr", "lcrit", in_index, page, |
1153 | PMBUS_IOUT_UC_FAULT_LIMIT, | 1153 | PMBUS_IOUT_UC_FAULT_LIMIT, |
1154 | PSC_CURRENT_OUT, false); | 1154 | PSC_CURRENT_OUT, false, false); |
1155 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { | 1155 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { |
1156 | pmbus_add_boolean_reg(data, "curr", | 1156 | pmbus_add_boolean_reg(data, "curr", |
1157 | "lcrit_alarm", | 1157 | "lcrit_alarm", |
@@ -1166,7 +1166,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1166 | i1 = data->num_sensors; | 1166 | i1 = data->num_sensors; |
1167 | pmbus_add_sensor(data, "curr", "crit", in_index, page, | 1167 | pmbus_add_sensor(data, "curr", "crit", in_index, page, |
1168 | PMBUS_IOUT_OC_FAULT_LIMIT, | 1168 | PMBUS_IOUT_OC_FAULT_LIMIT, |
1169 | PSC_CURRENT_OUT, false); | 1169 | PSC_CURRENT_OUT, false, false); |
1170 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { | 1170 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { |
1171 | pmbus_add_boolean_reg(data, "curr", | 1171 | pmbus_add_boolean_reg(data, "curr", |
1172 | "crit_alarm", | 1172 | "crit_alarm", |
@@ -1199,13 +1199,13 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1199 | i0 = data->num_sensors; | 1199 | i0 = data->num_sensors; |
1200 | pmbus_add_label(data, "power", in_index, "pin", 0); | 1200 | pmbus_add_label(data, "power", in_index, "pin", 0); |
1201 | pmbus_add_sensor(data, "power", "input", in_index, | 1201 | pmbus_add_sensor(data, "power", "input", in_index, |
1202 | 0, PMBUS_READ_PIN, PSC_POWER, true); | 1202 | 0, PMBUS_READ_PIN, PSC_POWER, true, true); |
1203 | if (pmbus_check_word_register(client, 0, | 1203 | if (pmbus_check_word_register(client, 0, |
1204 | PMBUS_PIN_OP_WARN_LIMIT)) { | 1204 | PMBUS_PIN_OP_WARN_LIMIT)) { |
1205 | i1 = data->num_sensors; | 1205 | i1 = data->num_sensors; |
1206 | pmbus_add_sensor(data, "power", "max", in_index, | 1206 | pmbus_add_sensor(data, "power", "max", in_index, |
1207 | 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER, | 1207 | 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER, |
1208 | false); | 1208 | false, false); |
1209 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) | 1209 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) |
1210 | pmbus_add_boolean_reg(data, "power", | 1210 | pmbus_add_boolean_reg(data, "power", |
1211 | "alarm", | 1211 | "alarm", |
@@ -1228,7 +1228,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1228 | i0 = data->num_sensors; | 1228 | i0 = data->num_sensors; |
1229 | pmbus_add_label(data, "power", in_index, "pout", page + 1); | 1229 | pmbus_add_label(data, "power", in_index, "pout", page + 1); |
1230 | pmbus_add_sensor(data, "power", "input", in_index, page, | 1230 | pmbus_add_sensor(data, "power", "input", in_index, page, |
1231 | PMBUS_READ_POUT, PSC_POWER, true); | 1231 | PMBUS_READ_POUT, PSC_POWER, true, true); |
1232 | /* | 1232 | /* |
1233 | * Per hwmon sysfs API, power_cap is to be used to limit output | 1233 | * Per hwmon sysfs API, power_cap is to be used to limit output |
1234 | * power. | 1234 | * power. |
@@ -1241,7 +1241,8 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1241 | if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) { | 1241 | if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) { |
1242 | i1 = data->num_sensors; | 1242 | i1 = data->num_sensors; |
1243 | pmbus_add_sensor(data, "power", "cap", in_index, page, | 1243 | pmbus_add_sensor(data, "power", "cap", in_index, page, |
1244 | PMBUS_POUT_MAX, PSC_POWER, false); | 1244 | PMBUS_POUT_MAX, PSC_POWER, |
1245 | false, false); | ||
1245 | need_alarm = true; | 1246 | need_alarm = true; |
1246 | } | 1247 | } |
1247 | if (pmbus_check_word_register(client, page, | 1248 | if (pmbus_check_word_register(client, page, |
@@ -1249,7 +1250,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1249 | i1 = data->num_sensors; | 1250 | i1 = data->num_sensors; |
1250 | pmbus_add_sensor(data, "power", "max", in_index, page, | 1251 | pmbus_add_sensor(data, "power", "max", in_index, page, |
1251 | PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER, | 1252 | PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER, |
1252 | false); | 1253 | false, false); |
1253 | need_alarm = true; | 1254 | need_alarm = true; |
1254 | } | 1255 | } |
1255 | if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT)) | 1256 | if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT)) |
@@ -1264,7 +1265,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1264 | i1 = data->num_sensors; | 1265 | i1 = data->num_sensors; |
1265 | pmbus_add_sensor(data, "power", "crit", in_index, page, | 1266 | pmbus_add_sensor(data, "power", "crit", in_index, page, |
1266 | PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER, | 1267 | PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER, |
1267 | false); | 1268 | false, false); |
1268 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) | 1269 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) |
1269 | pmbus_add_boolean_reg(data, "power", | 1270 | pmbus_add_boolean_reg(data, "power", |
1270 | "crit_alarm", | 1271 | "crit_alarm", |
@@ -1302,7 +1303,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1302 | i0 = data->num_sensors; | 1303 | i0 = data->num_sensors; |
1303 | pmbus_add_sensor(data, "temp", "input", in_index, page, | 1304 | pmbus_add_sensor(data, "temp", "input", in_index, page, |
1304 | pmbus_temp_registers[t], | 1305 | pmbus_temp_registers[t], |
1305 | PSC_TEMPERATURE, true); | 1306 | PSC_TEMPERATURE, true, true); |
1306 | 1307 | ||
1307 | /* | 1308 | /* |
1308 | * PMBus provides only one status register for TEMP1-3. | 1309 | * PMBus provides only one status register for TEMP1-3. |
@@ -1323,7 +1324,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1323 | i1 = data->num_sensors; | 1324 | i1 = data->num_sensors; |
1324 | pmbus_add_sensor(data, "temp", "min", in_index, | 1325 | pmbus_add_sensor(data, "temp", "min", in_index, |
1325 | page, PMBUS_UT_WARN_LIMIT, | 1326 | page, PMBUS_UT_WARN_LIMIT, |
1326 | PSC_TEMPERATURE, true); | 1327 | PSC_TEMPERATURE, true, false); |
1327 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1328 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1328 | pmbus_add_boolean_cmp(data, "temp", | 1329 | pmbus_add_boolean_cmp(data, "temp", |
1329 | "min_alarm", in_index, i1, i0, | 1330 | "min_alarm", in_index, i1, i0, |
@@ -1338,7 +1339,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1338 | pmbus_add_sensor(data, "temp", "lcrit", | 1339 | pmbus_add_sensor(data, "temp", "lcrit", |
1339 | in_index, page, | 1340 | in_index, page, |
1340 | PMBUS_UT_FAULT_LIMIT, | 1341 | PMBUS_UT_FAULT_LIMIT, |
1341 | PSC_TEMPERATURE, true); | 1342 | PSC_TEMPERATURE, true, false); |
1342 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1343 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1343 | pmbus_add_boolean_cmp(data, "temp", | 1344 | pmbus_add_boolean_cmp(data, "temp", |
1344 | "lcrit_alarm", in_index, i1, i0, | 1345 | "lcrit_alarm", in_index, i1, i0, |
@@ -1352,7 +1353,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1352 | i1 = data->num_sensors; | 1353 | i1 = data->num_sensors; |
1353 | pmbus_add_sensor(data, "temp", "max", in_index, | 1354 | pmbus_add_sensor(data, "temp", "max", in_index, |
1354 | page, PMBUS_OT_WARN_LIMIT, | 1355 | page, PMBUS_OT_WARN_LIMIT, |
1355 | PSC_TEMPERATURE, true); | 1356 | PSC_TEMPERATURE, true, false); |
1356 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1357 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1357 | pmbus_add_boolean_cmp(data, "temp", | 1358 | pmbus_add_boolean_cmp(data, "temp", |
1358 | "max_alarm", in_index, i0, i1, | 1359 | "max_alarm", in_index, i0, i1, |
@@ -1366,7 +1367,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1366 | i1 = data->num_sensors; | 1367 | i1 = data->num_sensors; |
1367 | pmbus_add_sensor(data, "temp", "crit", in_index, | 1368 | pmbus_add_sensor(data, "temp", "crit", in_index, |
1368 | page, PMBUS_OT_FAULT_LIMIT, | 1369 | page, PMBUS_OT_FAULT_LIMIT, |
1369 | PSC_TEMPERATURE, true); | 1370 | PSC_TEMPERATURE, true, false); |
1370 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1371 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1371 | pmbus_add_boolean_cmp(data, "temp", | 1372 | pmbus_add_boolean_cmp(data, "temp", |
1372 | "crit_alarm", in_index, i0, i1, | 1373 | "crit_alarm", in_index, i0, i1, |
@@ -1421,7 +1422,8 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1421 | 1422 | ||
1422 | i0 = data->num_sensors; | 1423 | i0 = data->num_sensors; |
1423 | pmbus_add_sensor(data, "fan", "input", in_index, page, | 1424 | pmbus_add_sensor(data, "fan", "input", in_index, page, |
1424 | pmbus_fan_registers[f], PSC_FAN, true); | 1425 | pmbus_fan_registers[f], PSC_FAN, true, |
1426 | true); | ||
1425 | 1427 | ||
1426 | /* | 1428 | /* |
1427 | * Each fan status register covers multiple fans, | 1429 | * Each fan status register covers multiple fans, |
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index eb4af28f856..1f29bab6b3e 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | config HWSPINLOCK | 5 | config HWSPINLOCK |
6 | tristate "Generic Hardware Spinlock framework" | 6 | tristate "Generic Hardware Spinlock framework" |
7 | depends on ARCH_OMAP4 | ||
7 | help | 8 | help |
8 | Say y here to support the generic hardware spinlock framework. | 9 | Say y here to support the generic hardware spinlock framework. |
9 | You only need to enable this if you have hardware spinlock module | 10 | You only need to enable this if you have hardware spinlock module |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index f4077840d3a..0e406d73b2c 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -440,6 +440,7 @@ void do_ide_request(struct request_queue *q) | |||
440 | struct ide_host *host = hwif->host; | 440 | struct ide_host *host = hwif->host; |
441 | struct request *rq = NULL; | 441 | struct request *rq = NULL; |
442 | ide_startstop_t startstop; | 442 | ide_startstop_t startstop; |
443 | unsigned long queue_run_ms = 3; /* old plug delay */ | ||
443 | 444 | ||
444 | spin_unlock_irq(q->queue_lock); | 445 | spin_unlock_irq(q->queue_lock); |
445 | 446 | ||
@@ -459,6 +460,9 @@ repeat: | |||
459 | prev_port = hwif->host->cur_port; | 460 | prev_port = hwif->host->cur_port; |
460 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && | 461 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && |
461 | time_after(drive->sleep, jiffies)) { | 462 | time_after(drive->sleep, jiffies)) { |
463 | unsigned long left = jiffies - drive->sleep; | ||
464 | |||
465 | queue_run_ms = jiffies_to_msecs(left + 1); | ||
462 | ide_unlock_port(hwif); | 466 | ide_unlock_port(hwif); |
463 | goto plug_device; | 467 | goto plug_device; |
464 | } | 468 | } |
@@ -547,8 +551,10 @@ plug_device: | |||
547 | plug_device_2: | 551 | plug_device_2: |
548 | spin_lock_irq(q->queue_lock); | 552 | spin_lock_irq(q->queue_lock); |
549 | 553 | ||
550 | if (rq) | 554 | if (rq) { |
551 | blk_requeue_request(q, rq); | 555 | blk_requeue_request(q, rq); |
556 | blk_delay_queue(q, queue_run_ms); | ||
557 | } | ||
552 | } | 558 | } |
553 | 559 | ||
554 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | 560 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
@@ -562,6 +568,10 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | |||
562 | blk_requeue_request(q, rq); | 568 | blk_requeue_request(q, rq); |
563 | 569 | ||
564 | spin_unlock_irqrestore(q->queue_lock, flags); | 570 | spin_unlock_irqrestore(q->queue_lock, flags); |
571 | |||
572 | /* Use 3ms as that was the old plug delay */ | ||
573 | if (rq) | ||
574 | blk_delay_queue(q, 3); | ||
565 | } | 575 | } |
566 | 576 | ||
567 | static int drive_is_ready(ide_drive_t *drive) | 577 | static int drive_is_ready(ide_drive_t *drive) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c7a6213c699..fbe1973f77b 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -625,7 +625,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
625 | 625 | ||
626 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, | 626 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, |
627 | !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), | 627 | !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), |
628 | MLX4_PROTOCOL_IB); | 628 | MLX4_PROT_IB_IPV6); |
629 | if (err) | 629 | if (err) |
630 | return err; | 630 | return err; |
631 | 631 | ||
@@ -636,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
636 | return 0; | 636 | return 0; |
637 | 637 | ||
638 | err_add: | 638 | err_add: |
639 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); | 639 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); |
640 | return err; | 640 | return err; |
641 | } | 641 | } |
642 | 642 | ||
@@ -666,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
666 | struct mlx4_ib_gid_entry *ge; | 666 | struct mlx4_ib_gid_entry *ge; |
667 | 667 | ||
668 | err = mlx4_multicast_detach(mdev->dev, | 668 | err = mlx4_multicast_detach(mdev->dev, |
669 | &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); | 669 | &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); |
670 | if (err) | 670 | if (err) |
671 | return err; | 671 | return err; |
672 | 672 | ||
@@ -721,7 +721,6 @@ static int init_node_data(struct mlx4_ib_dev *dev) | |||
721 | if (err) | 721 | if (err) |
722 | goto out; | 722 | goto out; |
723 | 723 | ||
724 | dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); | ||
725 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); | 724 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); |
726 | 725 | ||
727 | out: | 726 | out: |
@@ -954,7 +953,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event | |||
954 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 953 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
955 | oldnd = iboe->netdevs[port - 1]; | 954 | oldnd = iboe->netdevs[port - 1]; |
956 | iboe->netdevs[port - 1] = | 955 | iboe->netdevs[port - 1] = |
957 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port); | 956 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); |
958 | if (oldnd != iboe->netdevs[port - 1]) { | 957 | if (oldnd != iboe->netdevs[port - 1]) { |
959 | if (iboe->netdevs[port - 1]) | 958 | if (iboe->netdevs[port - 1]) |
960 | netdev_added(ibdev, port); | 959 | netdev_added(ibdev, port); |
@@ -1207,7 +1206,7 @@ static struct mlx4_interface mlx4_ib_interface = { | |||
1207 | .add = mlx4_ib_add, | 1206 | .add = mlx4_ib_add, |
1208 | .remove = mlx4_ib_remove, | 1207 | .remove = mlx4_ib_remove, |
1209 | .event = mlx4_ib_event, | 1208 | .event = mlx4_ib_event, |
1210 | .protocol = MLX4_PROTOCOL_IB | 1209 | .protocol = MLX4_PROT_IB_IPV6 |
1211 | }; | 1210 | }; |
1212 | 1211 | ||
1213 | static int __init mlx4_ib_init(void) | 1212 | static int __init mlx4_ib_init(void) |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index ef3291551bc..cfa3a2b2223 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -1116,7 +1116,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi | |||
1116 | return rc; | 1116 | return rc; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | if (netif_is_bond_slave(netdev)) | 1119 | if (netif_is_bond_slave(nesvnic->netdev)) |
1120 | netdev = nesvnic->netdev->master; | 1120 | netdev = nesvnic->netdev->master; |
1121 | else | 1121 | else |
1122 | netdev = nesvnic->netdev; | 1122 | netdev = nesvnic->netdev; |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 112ec55f293..434fd800cd2 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -641,7 +641,7 @@ config TOUCHSCREEN_TOUCHIT213 | |||
641 | 641 | ||
642 | config TOUCHSCREEN_TSC2005 | 642 | config TOUCHSCREEN_TSC2005 |
643 | tristate "TSC2005 based touchscreens" | 643 | tristate "TSC2005 based touchscreens" |
644 | depends on SPI_MASTER | 644 | depends on SPI_MASTER && GENERIC_HARDIRQS |
645 | help | 645 | help |
646 | Say Y here if you have a TSC2005 based touchscreen. | 646 | Say Y here if you have a TSC2005 based touchscreen. |
647 | 647 | ||
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c index 87420616efa..cbf0ff32267 100644 --- a/drivers/input/touchscreen/tsc2005.c +++ b/drivers/input/touchscreen/tsc2005.c | |||
@@ -358,7 +358,7 @@ static void __tsc2005_enable(struct tsc2005 *ts) | |||
358 | if (ts->esd_timeout && ts->set_reset) { | 358 | if (ts->esd_timeout && ts->set_reset) { |
359 | ts->last_valid_interrupt = jiffies; | 359 | ts->last_valid_interrupt = jiffies; |
360 | schedule_delayed_work(&ts->esd_work, | 360 | schedule_delayed_work(&ts->esd_work, |
361 | round_jiffies(jiffies + | 361 | round_jiffies_relative( |
362 | msecs_to_jiffies(ts->esd_timeout))); | 362 | msecs_to_jiffies(ts->esd_timeout))); |
363 | } | 363 | } |
364 | 364 | ||
@@ -477,7 +477,14 @@ static void tsc2005_esd_work(struct work_struct *work) | |||
477 | int error; | 477 | int error; |
478 | u16 r; | 478 | u16 r; |
479 | 479 | ||
480 | mutex_lock(&ts->mutex); | 480 | if (!mutex_trylock(&ts->mutex)) { |
481 | /* | ||
482 | * If the mutex is taken, it means that disable or enable is in | ||
483 | * progress. In that case just reschedule the work. If the work | ||
484 | * is not needed, it will be canceled by disable. | ||
485 | */ | ||
486 | goto reschedule; | ||
487 | } | ||
481 | 488 | ||
482 | if (time_is_after_jiffies(ts->last_valid_interrupt + | 489 | if (time_is_after_jiffies(ts->last_valid_interrupt + |
483 | msecs_to_jiffies(ts->esd_timeout))) | 490 | msecs_to_jiffies(ts->esd_timeout))) |
@@ -510,11 +517,12 @@ static void tsc2005_esd_work(struct work_struct *work) | |||
510 | tsc2005_start_scan(ts); | 517 | tsc2005_start_scan(ts); |
511 | 518 | ||
512 | out: | 519 | out: |
520 | mutex_unlock(&ts->mutex); | ||
521 | reschedule: | ||
513 | /* re-arm the watchdog */ | 522 | /* re-arm the watchdog */ |
514 | schedule_delayed_work(&ts->esd_work, | 523 | schedule_delayed_work(&ts->esd_work, |
515 | round_jiffies(jiffies + | 524 | round_jiffies_relative( |
516 | msecs_to_jiffies(ts->esd_timeout))); | 525 | msecs_to_jiffies(ts->esd_timeout))); |
517 | mutex_unlock(&ts->mutex); | ||
518 | } | 526 | } |
519 | 527 | ||
520 | static int tsc2005_open(struct input_dev *input) | 528 | static int tsc2005_open(struct input_dev *input) |
@@ -663,7 +671,7 @@ static int __devinit tsc2005_probe(struct spi_device *spi) | |||
663 | goto err_remove_sysfs; | 671 | goto err_remove_sysfs; |
664 | } | 672 | } |
665 | 673 | ||
666 | set_irq_wake(spi->irq, 1); | 674 | irq_set_irq_wake(spi->irq, 1); |
667 | return 0; | 675 | return 0; |
668 | 676 | ||
669 | err_remove_sysfs: | 677 | err_remove_sysfs: |
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index c41eb6180c9..4bebae73334 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c | |||
@@ -231,6 +231,26 @@ void led_trigger_event(struct led_trigger *trigger, | |||
231 | } | 231 | } |
232 | EXPORT_SYMBOL_GPL(led_trigger_event); | 232 | EXPORT_SYMBOL_GPL(led_trigger_event); |
233 | 233 | ||
234 | void led_trigger_blink(struct led_trigger *trigger, | ||
235 | unsigned long *delay_on, | ||
236 | unsigned long *delay_off) | ||
237 | { | ||
238 | struct list_head *entry; | ||
239 | |||
240 | if (!trigger) | ||
241 | return; | ||
242 | |||
243 | read_lock(&trigger->leddev_list_lock); | ||
244 | list_for_each(entry, &trigger->led_cdevs) { | ||
245 | struct led_classdev *led_cdev; | ||
246 | |||
247 | led_cdev = list_entry(entry, struct led_classdev, trig_list); | ||
248 | led_blink_set(led_cdev, delay_on, delay_off); | ||
249 | } | ||
250 | read_unlock(&trigger->leddev_list_lock); | ||
251 | } | ||
252 | EXPORT_SYMBOL_GPL(led_trigger_blink); | ||
253 | |||
234 | void led_trigger_register_simple(const char *name, struct led_trigger **tp) | 254 | void led_trigger_register_simple(const char *name, struct led_trigger **tp) |
235 | { | 255 | { |
236 | struct led_trigger *trigger; | 256 | struct led_trigger *trigger; |
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 98d9ec85e0e..8420129fc5e 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -327,4 +327,10 @@ config DM_UEVENT | |||
327 | ---help--- | 327 | ---help--- |
328 | Generate udev events for DM events. | 328 | Generate udev events for DM events. |
329 | 329 | ||
330 | config DM_FLAKEY | ||
331 | tristate "Flakey target (EXPERIMENTAL)" | ||
332 | depends on BLK_DEV_DM && EXPERIMENTAL | ||
333 | ---help--- | ||
334 | A target that intermittently fails I/O for debugging purposes. | ||
335 | |||
330 | endif # MD | 336 | endif # MD |
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index d0138606c2e..448838b1f92 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o | |||
29 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o | 29 | obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o |
30 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o | 30 | obj-$(CONFIG_DM_CRYPT) += dm-crypt.o |
31 | obj-$(CONFIG_DM_DELAY) += dm-delay.o | 31 | obj-$(CONFIG_DM_DELAY) += dm-delay.o |
32 | obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o | ||
32 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o | 33 | obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o |
33 | obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o | 34 | obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o |
34 | obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o | 35 | obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2c62c1169f7..c8827ffd85b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1324,20 +1324,29 @@ static int crypt_setkey_allcpus(struct crypt_config *cc) | |||
1324 | 1324 | ||
1325 | static int crypt_set_key(struct crypt_config *cc, char *key) | 1325 | static int crypt_set_key(struct crypt_config *cc, char *key) |
1326 | { | 1326 | { |
1327 | int r = -EINVAL; | ||
1328 | int key_string_len = strlen(key); | ||
1329 | |||
1327 | /* The key size may not be changed. */ | 1330 | /* The key size may not be changed. */ |
1328 | if (cc->key_size != (strlen(key) >> 1)) | 1331 | if (cc->key_size != (key_string_len >> 1)) |
1329 | return -EINVAL; | 1332 | goto out; |
1330 | 1333 | ||
1331 | /* Hyphen (which gives a key_size of zero) means there is no key. */ | 1334 | /* Hyphen (which gives a key_size of zero) means there is no key. */ |
1332 | if (!cc->key_size && strcmp(key, "-")) | 1335 | if (!cc->key_size && strcmp(key, "-")) |
1333 | return -EINVAL; | 1336 | goto out; |
1334 | 1337 | ||
1335 | if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) | 1338 | if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) |
1336 | return -EINVAL; | 1339 | goto out; |
1337 | 1340 | ||
1338 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | 1341 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
1339 | 1342 | ||
1340 | return crypt_setkey_allcpus(cc); | 1343 | r = crypt_setkey_allcpus(cc); |
1344 | |||
1345 | out: | ||
1346 | /* Hex key string not needed after here, so wipe it. */ | ||
1347 | memset(key, '0', key_string_len); | ||
1348 | |||
1349 | return r; | ||
1341 | } | 1350 | } |
1342 | 1351 | ||
1343 | static int crypt_wipe_key(struct crypt_config *cc) | 1352 | static int crypt_wipe_key(struct crypt_config *cc) |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c new file mode 100644 index 00000000000..ea790623c30 --- /dev/null +++ b/drivers/md/dm-flakey.c | |||
@@ -0,0 +1,212 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Sistina Software (UK) Limited. | ||
3 | * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved. | ||
4 | * | ||
5 | * This file is released under the GPL. | ||
6 | */ | ||
7 | |||
8 | #include <linux/device-mapper.h> | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/blkdev.h> | ||
13 | #include <linux/bio.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #define DM_MSG_PREFIX "flakey" | ||
17 | |||
18 | /* | ||
19 | * Flakey: Used for testing only, simulates intermittent, | ||
20 | * catastrophic device failure. | ||
21 | */ | ||
22 | struct flakey_c { | ||
23 | struct dm_dev *dev; | ||
24 | unsigned long start_time; | ||
25 | sector_t start; | ||
26 | unsigned up_interval; | ||
27 | unsigned down_interval; | ||
28 | }; | ||
29 | |||
30 | /* | ||
31 | * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval> | ||
32 | */ | ||
33 | static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||
34 | { | ||
35 | struct flakey_c *fc; | ||
36 | unsigned long long tmp; | ||
37 | |||
38 | if (argc != 4) { | ||
39 | ti->error = "dm-flakey: Invalid argument count"; | ||
40 | return -EINVAL; | ||
41 | } | ||
42 | |||
43 | fc = kmalloc(sizeof(*fc), GFP_KERNEL); | ||
44 | if (!fc) { | ||
45 | ti->error = "dm-flakey: Cannot allocate linear context"; | ||
46 | return -ENOMEM; | ||
47 | } | ||
48 | fc->start_time = jiffies; | ||
49 | |||
50 | if (sscanf(argv[1], "%llu", &tmp) != 1) { | ||
51 | ti->error = "dm-flakey: Invalid device sector"; | ||
52 | goto bad; | ||
53 | } | ||
54 | fc->start = tmp; | ||
55 | |||
56 | if (sscanf(argv[2], "%u", &fc->up_interval) != 1) { | ||
57 | ti->error = "dm-flakey: Invalid up interval"; | ||
58 | goto bad; | ||
59 | } | ||
60 | |||
61 | if (sscanf(argv[3], "%u", &fc->down_interval) != 1) { | ||
62 | ti->error = "dm-flakey: Invalid down interval"; | ||
63 | goto bad; | ||
64 | } | ||
65 | |||
66 | if (!(fc->up_interval + fc->down_interval)) { | ||
67 | ti->error = "dm-flakey: Total (up + down) interval is zero"; | ||
68 | goto bad; | ||
69 | } | ||
70 | |||
71 | if (fc->up_interval + fc->down_interval < fc->up_interval) { | ||
72 | ti->error = "dm-flakey: Interval overflow"; | ||
73 | goto bad; | ||
74 | } | ||
75 | |||
76 | if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) { | ||
77 | ti->error = "dm-flakey: Device lookup failed"; | ||
78 | goto bad; | ||
79 | } | ||
80 | |||
81 | ti->num_flush_requests = 1; | ||
82 | ti->private = fc; | ||
83 | return 0; | ||
84 | |||
85 | bad: | ||
86 | kfree(fc); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | static void flakey_dtr(struct dm_target *ti) | ||
91 | { | ||
92 | struct flakey_c *fc = ti->private; | ||
93 | |||
94 | dm_put_device(ti, fc->dev); | ||
95 | kfree(fc); | ||
96 | } | ||
97 | |||
98 | static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) | ||
99 | { | ||
100 | struct flakey_c *fc = ti->private; | ||
101 | |||
102 | return fc->start + (bi_sector - ti->begin); | ||
103 | } | ||
104 | |||
105 | static void flakey_map_bio(struct dm_target *ti, struct bio *bio) | ||
106 | { | ||
107 | struct flakey_c *fc = ti->private; | ||
108 | |||
109 | bio->bi_bdev = fc->dev->bdev; | ||
110 | if (bio_sectors(bio)) | ||
111 | bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); | ||
112 | } | ||
113 | |||
114 | static int flakey_map(struct dm_target *ti, struct bio *bio, | ||
115 | union map_info *map_context) | ||
116 | { | ||
117 | struct flakey_c *fc = ti->private; | ||
118 | unsigned elapsed; | ||
119 | |||
120 | /* Are we alive ? */ | ||
121 | elapsed = (jiffies - fc->start_time) / HZ; | ||
122 | if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) | ||
123 | return -EIO; | ||
124 | |||
125 | flakey_map_bio(ti, bio); | ||
126 | |||
127 | return DM_MAPIO_REMAPPED; | ||
128 | } | ||
129 | |||
130 | static int flakey_status(struct dm_target *ti, status_type_t type, | ||
131 | char *result, unsigned int maxlen) | ||
132 | { | ||
133 | struct flakey_c *fc = ti->private; | ||
134 | |||
135 | switch (type) { | ||
136 | case STATUSTYPE_INFO: | ||
137 | result[0] = '\0'; | ||
138 | break; | ||
139 | |||
140 | case STATUSTYPE_TABLE: | ||
141 | snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name, | ||
142 | (unsigned long long)fc->start, fc->up_interval, | ||
143 | fc->down_interval); | ||
144 | break; | ||
145 | } | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) | ||
150 | { | ||
151 | struct flakey_c *fc = ti->private; | ||
152 | |||
153 | return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg); | ||
154 | } | ||
155 | |||
156 | static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
157 | struct bio_vec *biovec, int max_size) | ||
158 | { | ||
159 | struct flakey_c *fc = ti->private; | ||
160 | struct request_queue *q = bdev_get_queue(fc->dev->bdev); | ||
161 | |||
162 | if (!q->merge_bvec_fn) | ||
163 | return max_size; | ||
164 | |||
165 | bvm->bi_bdev = fc->dev->bdev; | ||
166 | bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector); | ||
167 | |||
168 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
169 | } | ||
170 | |||
171 | static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) | ||
172 | { | ||
173 | struct flakey_c *fc = ti->private; | ||
174 | |||
175 | return fn(ti, fc->dev, fc->start, ti->len, data); | ||
176 | } | ||
177 | |||
178 | static struct target_type flakey_target = { | ||
179 | .name = "flakey", | ||
180 | .version = {1, 1, 0}, | ||
181 | .module = THIS_MODULE, | ||
182 | .ctr = flakey_ctr, | ||
183 | .dtr = flakey_dtr, | ||
184 | .map = flakey_map, | ||
185 | .status = flakey_status, | ||
186 | .ioctl = flakey_ioctl, | ||
187 | .merge = flakey_merge, | ||
188 | .iterate_devices = flakey_iterate_devices, | ||
189 | }; | ||
190 | |||
191 | static int __init dm_flakey_init(void) | ||
192 | { | ||
193 | int r = dm_register_target(&flakey_target); | ||
194 | |||
195 | if (r < 0) | ||
196 | DMERR("register failed %d", r); | ||
197 | |||
198 | return r; | ||
199 | } | ||
200 | |||
201 | static void __exit dm_flakey_exit(void) | ||
202 | { | ||
203 | dm_unregister_target(&flakey_target); | ||
204 | } | ||
205 | |||
206 | /* Module hooks */ | ||
207 | module_init(dm_flakey_init); | ||
208 | module_exit(dm_flakey_exit); | ||
209 | |||
210 | MODULE_DESCRIPTION(DM_NAME " flakey target"); | ||
211 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | ||
212 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 6d12775a106..4cacdad2270 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -1501,14 +1501,10 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) | |||
1501 | return r; | 1501 | return r; |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | static void free_params(struct dm_ioctl *param) | ||
1505 | { | ||
1506 | vfree(param); | ||
1507 | } | ||
1508 | |||
1509 | static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) | 1504 | static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) |
1510 | { | 1505 | { |
1511 | struct dm_ioctl tmp, *dmi; | 1506 | struct dm_ioctl tmp, *dmi; |
1507 | int secure_data; | ||
1512 | 1508 | ||
1513 | if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) | 1509 | if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) |
1514 | return -EFAULT; | 1510 | return -EFAULT; |
@@ -1516,17 +1512,30 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) | |||
1516 | if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) | 1512 | if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) |
1517 | return -EINVAL; | 1513 | return -EINVAL; |
1518 | 1514 | ||
1515 | secure_data = tmp.flags & DM_SECURE_DATA_FLAG; | ||
1516 | |||
1519 | dmi = vmalloc(tmp.data_size); | 1517 | dmi = vmalloc(tmp.data_size); |
1520 | if (!dmi) | 1518 | if (!dmi) { |
1519 | if (secure_data && clear_user(user, tmp.data_size)) | ||
1520 | return -EFAULT; | ||
1521 | return -ENOMEM; | 1521 | return -ENOMEM; |
1522 | |||
1523 | if (copy_from_user(dmi, user, tmp.data_size)) { | ||
1524 | vfree(dmi); | ||
1525 | return -EFAULT; | ||
1526 | } | 1522 | } |
1527 | 1523 | ||
1524 | if (copy_from_user(dmi, user, tmp.data_size)) | ||
1525 | goto bad; | ||
1526 | |||
1527 | /* Wipe the user buffer so we do not return it to userspace */ | ||
1528 | if (secure_data && clear_user(user, tmp.data_size)) | ||
1529 | goto bad; | ||
1530 | |||
1528 | *param = dmi; | 1531 | *param = dmi; |
1529 | return 0; | 1532 | return 0; |
1533 | |||
1534 | bad: | ||
1535 | if (secure_data) | ||
1536 | memset(dmi, 0, tmp.data_size); | ||
1537 | vfree(dmi); | ||
1538 | return -EFAULT; | ||
1530 | } | 1539 | } |
1531 | 1540 | ||
1532 | static int validate_params(uint cmd, struct dm_ioctl *param) | 1541 | static int validate_params(uint cmd, struct dm_ioctl *param) |
@@ -1534,6 +1543,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) | |||
1534 | /* Always clear this flag */ | 1543 | /* Always clear this flag */ |
1535 | param->flags &= ~DM_BUFFER_FULL_FLAG; | 1544 | param->flags &= ~DM_BUFFER_FULL_FLAG; |
1536 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; | 1545 | param->flags &= ~DM_UEVENT_GENERATED_FLAG; |
1546 | param->flags &= ~DM_SECURE_DATA_FLAG; | ||
1537 | 1547 | ||
1538 | /* Ignores parameters */ | 1548 | /* Ignores parameters */ |
1539 | if (cmd == DM_REMOVE_ALL_CMD || | 1549 | if (cmd == DM_REMOVE_ALL_CMD || |
@@ -1561,10 +1571,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param) | |||
1561 | static int ctl_ioctl(uint command, struct dm_ioctl __user *user) | 1571 | static int ctl_ioctl(uint command, struct dm_ioctl __user *user) |
1562 | { | 1572 | { |
1563 | int r = 0; | 1573 | int r = 0; |
1574 | int wipe_buffer; | ||
1564 | unsigned int cmd; | 1575 | unsigned int cmd; |
1565 | struct dm_ioctl *uninitialized_var(param); | 1576 | struct dm_ioctl *uninitialized_var(param); |
1566 | ioctl_fn fn = NULL; | 1577 | ioctl_fn fn = NULL; |
1567 | size_t param_size; | 1578 | size_t input_param_size; |
1568 | 1579 | ||
1569 | /* only root can play with this */ | 1580 | /* only root can play with this */ |
1570 | if (!capable(CAP_SYS_ADMIN)) | 1581 | if (!capable(CAP_SYS_ADMIN)) |
@@ -1611,13 +1622,15 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user) | |||
1611 | if (r) | 1622 | if (r) |
1612 | return r; | 1623 | return r; |
1613 | 1624 | ||
1625 | input_param_size = param->data_size; | ||
1626 | wipe_buffer = param->flags & DM_SECURE_DATA_FLAG; | ||
1627 | |||
1614 | r = validate_params(cmd, param); | 1628 | r = validate_params(cmd, param); |
1615 | if (r) | 1629 | if (r) |
1616 | goto out; | 1630 | goto out; |
1617 | 1631 | ||
1618 | param_size = param->data_size; | ||
1619 | param->data_size = sizeof(*param); | 1632 | param->data_size = sizeof(*param); |
1620 | r = fn(param, param_size); | 1633 | r = fn(param, input_param_size); |
1621 | 1634 | ||
1622 | /* | 1635 | /* |
1623 | * Copy the results back to userland. | 1636 | * Copy the results back to userland. |
@@ -1625,8 +1638,11 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user) | |||
1625 | if (!r && copy_to_user(user, param, param->data_size)) | 1638 | if (!r && copy_to_user(user, param, param->data_size)) |
1626 | r = -EFAULT; | 1639 | r = -EFAULT; |
1627 | 1640 | ||
1628 | out: | 1641 | out: |
1629 | free_params(param); | 1642 | if (wipe_buffer) |
1643 | memset(param, 0, input_param_size); | ||
1644 | |||
1645 | vfree(param); | ||
1630 | return r; | 1646 | return r; |
1631 | } | 1647 | } |
1632 | 1648 | ||
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 57968eb382c..a1f32188967 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -543,7 +543,7 @@ static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
543 | return -EINVAL; | 543 | return -EINVAL; |
544 | } | 544 | } |
545 | 545 | ||
546 | r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &dev); | 546 | r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); |
547 | if (r) | 547 | if (r) |
548 | return r; | 548 | return r; |
549 | 549 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 4b0b63c290a..a550a057d99 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -844,8 +844,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
844 | { | 844 | { |
845 | /* target parameters */ | 845 | /* target parameters */ |
846 | static struct param _params[] = { | 846 | static struct param _params[] = { |
847 | {1, 1024, "invalid number of priority groups"}, | 847 | {0, 1024, "invalid number of priority groups"}, |
848 | {1, 1024, "invalid initial priority group number"}, | 848 | {0, 1024, "invalid initial priority group number"}, |
849 | }; | 849 | }; |
850 | 850 | ||
851 | int r; | 851 | int r; |
@@ -879,6 +879,13 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
879 | if (r) | 879 | if (r) |
880 | goto bad; | 880 | goto bad; |
881 | 881 | ||
882 | if ((!m->nr_priority_groups && next_pg_num) || | ||
883 | (m->nr_priority_groups && !next_pg_num)) { | ||
884 | ti->error = "invalid initial priority group"; | ||
885 | r = -EINVAL; | ||
886 | goto bad; | ||
887 | } | ||
888 | |||
882 | /* parse the priority groups */ | 889 | /* parse the priority groups */ |
883 | while (as.argc) { | 890 | while (as.argc) { |
884 | struct priority_group *pg; | 891 | struct priority_group *pg; |
@@ -1065,7 +1072,7 @@ out: | |||
1065 | static int action_dev(struct multipath *m, struct dm_dev *dev, | 1072 | static int action_dev(struct multipath *m, struct dm_dev *dev, |
1066 | action_fn action) | 1073 | action_fn action) |
1067 | { | 1074 | { |
1068 | int r = 0; | 1075 | int r = -EINVAL; |
1069 | struct pgpath *pgpath; | 1076 | struct pgpath *pgpath; |
1070 | struct priority_group *pg; | 1077 | struct priority_group *pg; |
1071 | 1078 | ||
@@ -1415,7 +1422,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type, | |||
1415 | else if (m->current_pg) | 1422 | else if (m->current_pg) |
1416 | pg_num = m->current_pg->pg_num; | 1423 | pg_num = m->current_pg->pg_num; |
1417 | else | 1424 | else |
1418 | pg_num = 1; | 1425 | pg_num = (m->nr_priority_groups ? 1 : 0); |
1419 | 1426 | ||
1420 | DMEMIT("%u ", pg_num); | 1427 | DMEMIT("%u ", pg_num); |
1421 | 1428 | ||
@@ -1669,7 +1676,7 @@ out: | |||
1669 | *---------------------------------------------------------------*/ | 1676 | *---------------------------------------------------------------*/ |
1670 | static struct target_type multipath_target = { | 1677 | static struct target_type multipath_target = { |
1671 | .name = "multipath", | 1678 | .name = "multipath", |
1672 | .version = {1, 2, 0}, | 1679 | .version = {1, 3, 0}, |
1673 | .module = THIS_MODULE, | 1680 | .module = THIS_MODULE, |
1674 | .ctr = multipath_ctr, | 1681 | .ctr = multipath_ctr, |
1675 | .dtr = multipath_dtr, | 1682 | .dtr = multipath_dtr, |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index fdde53cd12b..a2d330942cb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1080,7 +1080,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1080 | argv++; | 1080 | argv++; |
1081 | argc--; | 1081 | argc--; |
1082 | 1082 | ||
1083 | r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow); | 1083 | r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); |
1084 | if (r) { | 1084 | if (r) { |
1085 | ti->error = "Cannot get COW device"; | 1085 | ti->error = "Cannot get COW device"; |
1086 | goto bad_cow; | 1086 | goto bad_cow; |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index dddfa14f298..3d80cf0c152 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -396,9 +396,29 @@ static void stripe_io_hints(struct dm_target *ti, | |||
396 | blk_limits_io_opt(limits, chunk_size * sc->stripes); | 396 | blk_limits_io_opt(limits, chunk_size * sc->stripes); |
397 | } | 397 | } |
398 | 398 | ||
399 | static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | ||
400 | struct bio_vec *biovec, int max_size) | ||
401 | { | ||
402 | struct stripe_c *sc = ti->private; | ||
403 | sector_t bvm_sector = bvm->bi_sector; | ||
404 | uint32_t stripe; | ||
405 | struct request_queue *q; | ||
406 | |||
407 | stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector); | ||
408 | |||
409 | q = bdev_get_queue(sc->stripe[stripe].dev->bdev); | ||
410 | if (!q->merge_bvec_fn) | ||
411 | return max_size; | ||
412 | |||
413 | bvm->bi_bdev = sc->stripe[stripe].dev->bdev; | ||
414 | bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector; | ||
415 | |||
416 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | ||
417 | } | ||
418 | |||
399 | static struct target_type stripe_target = { | 419 | static struct target_type stripe_target = { |
400 | .name = "striped", | 420 | .name = "striped", |
401 | .version = {1, 3, 1}, | 421 | .version = {1, 4, 0}, |
402 | .module = THIS_MODULE, | 422 | .module = THIS_MODULE, |
403 | .ctr = stripe_ctr, | 423 | .ctr = stripe_ctr, |
404 | .dtr = stripe_dtr, | 424 | .dtr = stripe_dtr, |
@@ -407,6 +427,7 @@ static struct target_type stripe_target = { | |||
407 | .status = stripe_status, | 427 | .status = stripe_status, |
408 | .iterate_devices = stripe_iterate_devices, | 428 | .iterate_devices = stripe_iterate_devices, |
409 | .io_hints = stripe_io_hints, | 429 | .io_hints = stripe_io_hints, |
430 | .merge = stripe_merge, | ||
410 | }; | 431 | }; |
411 | 432 | ||
412 | int __init dm_stripe_init(void) | 433 | int __init dm_stripe_init(void) |
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index 9be6a830f1d..ac0e42b47b2 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c | |||
@@ -187,7 +187,7 @@ static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int | |||
187 | sample_period = dev->params.sample_period; | 187 | sample_period = dev->params.sample_period; |
188 | ldata = (unsigned long *)data; | 188 | ldata = (unsigned long *)data; |
189 | size = length << 3; | 189 | size = length << 3; |
190 | next_one = generic_find_next_le_bit(ldata, size, 0); | 190 | next_one = find_next_bit_le(ldata, size, 0); |
191 | if (next_one > 0) { | 191 | if (next_one > 0) { |
192 | ev.pulse = true; | 192 | ev.pulse = true; |
193 | ev.duration = | 193 | ev.duration = |
@@ -196,14 +196,14 @@ static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int | |||
196 | } | 196 | } |
197 | 197 | ||
198 | while (next_one < size) { | 198 | while (next_one < size) { |
199 | next_zero = generic_find_next_zero_le_bit(ldata, size, next_one + 1); | 199 | next_zero = find_next_zero_bit_le(ldata, size, next_one + 1); |
200 | ev.pulse = false; | 200 | ev.pulse = false; |
201 | ev.duration = ITE_BITS_TO_NS(next_zero - next_one, sample_period); | 201 | ev.duration = ITE_BITS_TO_NS(next_zero - next_one, sample_period); |
202 | ir_raw_event_store_with_filter(dev->rdev, &ev); | 202 | ir_raw_event_store_with_filter(dev->rdev, &ev); |
203 | 203 | ||
204 | if (next_zero < size) { | 204 | if (next_zero < size) { |
205 | next_one = | 205 | next_one = |
206 | generic_find_next_le_bit(ldata, | 206 | find_next_bit_le(ldata, |
207 | size, | 207 | size, |
208 | next_zero + 1); | 208 | next_zero + 1); |
209 | ev.pulse = true; | 209 | ev.pulse = true; |
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig index 4ce5c8dffb6..cc0997a0517 100644 --- a/drivers/memstick/host/Kconfig +++ b/drivers/memstick/host/Kconfig | |||
@@ -30,3 +30,15 @@ config MEMSTICK_JMICRON_38X | |||
30 | 30 | ||
31 | To compile this driver as a module, choose M here: the | 31 | To compile this driver as a module, choose M here: the |
32 | module will be called jmb38x_ms. | 32 | module will be called jmb38x_ms. |
33 | |||
34 | config MEMSTICK_R592 | ||
35 | tristate "Ricoh R5C592 MemoryStick interface support (EXPERIMENTAL)" | ||
36 | depends on EXPERIMENTAL && PCI | ||
37 | |||
38 | help | ||
39 | Say Y here if you want to be able to access MemoryStick cards with | ||
40 | the Ricoh R5C592 MemoryStick card reader (which is part of 5 in one | ||
41 | multifunction reader) | ||
42 | |||
43 | To compile this driver as a module, choose M here: the module will | ||
44 | be called r592. | ||
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile index a1815e9dd01..31ba8d378e4 100644 --- a/drivers/memstick/host/Makefile +++ b/drivers/memstick/host/Makefile | |||
@@ -4,3 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o | 5 | obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o |
6 | obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o | 6 | obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o |
7 | obj-$(CONFIG_MEMSTICK_R592) += r592.o | ||
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c new file mode 100644 index 00000000000..767406c9529 --- /dev/null +++ b/drivers/memstick/host/r592.c | |||
@@ -0,0 +1,908 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 - Maxim Levitsky | ||
3 | * driver for Ricoh memstick readers | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/freezer.h> | ||
13 | #include <linux/jiffies.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/pci_ids.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/kthread.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/highmem.h> | ||
22 | #include <asm/byteorder.h> | ||
23 | #include <linux/swab.h> | ||
24 | #include "r592.h" | ||
25 | |||
26 | static int enable_dma = 1; | ||
27 | static int debug; | ||
28 | |||
29 | static const char *tpc_names[] = { | ||
30 | "MS_TPC_READ_MG_STATUS", | ||
31 | "MS_TPC_READ_LONG_DATA", | ||
32 | "MS_TPC_READ_SHORT_DATA", | ||
33 | "MS_TPC_READ_REG", | ||
34 | "MS_TPC_READ_QUAD_DATA", | ||
35 | "INVALID", | ||
36 | "MS_TPC_GET_INT", | ||
37 | "MS_TPC_SET_RW_REG_ADRS", | ||
38 | "MS_TPC_EX_SET_CMD", | ||
39 | "MS_TPC_WRITE_QUAD_DATA", | ||
40 | "MS_TPC_WRITE_REG", | ||
41 | "MS_TPC_WRITE_SHORT_DATA", | ||
42 | "MS_TPC_WRITE_LONG_DATA", | ||
43 | "MS_TPC_SET_CMD", | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * memstick_debug_get_tpc_name - debug helper that returns string for | ||
48 | * a TPC number | ||
49 | */ | ||
50 | const char *memstick_debug_get_tpc_name(int tpc) | ||
51 | { | ||
52 | return tpc_names[tpc-1]; | ||
53 | } | ||
54 | EXPORT_SYMBOL(memstick_debug_get_tpc_name); | ||
55 | |||
56 | |||
57 | /* Read a register*/ | ||
58 | static inline u32 r592_read_reg(struct r592_device *dev, int address) | ||
59 | { | ||
60 | u32 value = readl(dev->mmio + address); | ||
61 | dbg_reg("reg #%02d == 0x%08x", address, value); | ||
62 | return value; | ||
63 | } | ||
64 | |||
65 | /* Write a register */ | ||
66 | static inline void r592_write_reg(struct r592_device *dev, | ||
67 | int address, u32 value) | ||
68 | { | ||
69 | dbg_reg("reg #%02d <- 0x%08x", address, value); | ||
70 | writel(value, dev->mmio + address); | ||
71 | } | ||
72 | |||
73 | /* Reads a big endian DWORD register */ | ||
74 | static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address) | ||
75 | { | ||
76 | u32 value = __raw_readl(dev->mmio + address); | ||
77 | dbg_reg("reg #%02d == 0x%08x", address, value); | ||
78 | return be32_to_cpu(value); | ||
79 | } | ||
80 | |||
81 | /* Writes a big endian DWORD register */ | ||
82 | static inline void r592_write_reg_raw_be(struct r592_device *dev, | ||
83 | int address, u32 value) | ||
84 | { | ||
85 | dbg_reg("reg #%02d <- 0x%08x", address, value); | ||
86 | __raw_writel(cpu_to_be32(value), dev->mmio + address); | ||
87 | } | ||
88 | |||
89 | /* Set specific bits in a register (little endian) */ | ||
90 | static inline void r592_set_reg_mask(struct r592_device *dev, | ||
91 | int address, u32 mask) | ||
92 | { | ||
93 | u32 reg = readl(dev->mmio + address); | ||
94 | dbg_reg("reg #%02d |= 0x%08x (old =0x%08x)", address, mask, reg); | ||
95 | writel(reg | mask , dev->mmio + address); | ||
96 | } | ||
97 | |||
98 | /* Clear specific bits in a register (little endian) */ | ||
99 | static inline void r592_clear_reg_mask(struct r592_device *dev, | ||
100 | int address, u32 mask) | ||
101 | { | ||
102 | u32 reg = readl(dev->mmio + address); | ||
103 | dbg_reg("reg #%02d &= 0x%08x (old = 0x%08x, mask = 0x%08x)", | ||
104 | address, ~mask, reg, mask); | ||
105 | writel(reg & ~mask, dev->mmio + address); | ||
106 | } | ||
107 | |||
108 | |||
109 | /* Wait for status bits while checking for errors */ | ||
110 | static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask) | ||
111 | { | ||
112 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
113 | u32 reg = r592_read_reg(dev, R592_STATUS); | ||
114 | |||
115 | if ((reg & mask) == wanted_mask) | ||
116 | return 0; | ||
117 | |||
118 | while (time_before(jiffies, timeout)) { | ||
119 | |||
120 | reg = r592_read_reg(dev, R592_STATUS); | ||
121 | |||
122 | if ((reg & mask) == wanted_mask) | ||
123 | return 0; | ||
124 | |||
125 | if (reg & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR)) | ||
126 | return -EIO; | ||
127 | |||
128 | cpu_relax(); | ||
129 | } | ||
130 | return -ETIME; | ||
131 | } | ||
132 | |||
133 | |||
134 | /* Enable/disable device */ | ||
135 | static int r592_enable_device(struct r592_device *dev, bool enable) | ||
136 | { | ||
137 | dbg("%sabling the device", enable ? "en" : "dis"); | ||
138 | |||
139 | if (enable) { | ||
140 | |||
141 | /* Power up the card */ | ||
142 | r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1); | ||
143 | |||
144 | /* Perform a reset */ | ||
145 | r592_set_reg_mask(dev, R592_IO, R592_IO_RESET); | ||
146 | |||
147 | msleep(100); | ||
148 | } else | ||
149 | /* Power down the card */ | ||
150 | r592_write_reg(dev, R592_POWER, 0); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | /* Set serial/parallel mode */ | ||
156 | static int r592_set_mode(struct r592_device *dev, bool parallel_mode) | ||
157 | { | ||
158 | if (!parallel_mode) { | ||
159 | dbg("switching to serial mode"); | ||
160 | |||
161 | /* Set serial mode */ | ||
162 | r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL); | ||
163 | |||
164 | r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20); | ||
165 | |||
166 | } else { | ||
167 | dbg("switching to parallel mode"); | ||
168 | |||
169 | /* This setting should be set _before_ switch TPC */ | ||
170 | r592_set_reg_mask(dev, R592_POWER, R592_POWER_20); | ||
171 | |||
172 | r592_clear_reg_mask(dev, R592_IO, | ||
173 | R592_IO_SERIAL1 | R592_IO_SERIAL2); | ||
174 | |||
175 | /* Set the parallel mode now */ | ||
176 | r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL); | ||
177 | } | ||
178 | |||
179 | dev->parallel_mode = parallel_mode; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | /* Perform a controller reset without powering down the card */ | ||
184 | static void r592_host_reset(struct r592_device *dev) | ||
185 | { | ||
186 | r592_set_reg_mask(dev, R592_IO, R592_IO_RESET); | ||
187 | msleep(100); | ||
188 | r592_set_mode(dev, dev->parallel_mode); | ||
189 | } | ||
190 | |||
191 | /* Disable all hardware interrupts */ | ||
192 | static void r592_clear_interrupts(struct r592_device *dev) | ||
193 | { | ||
194 | /* Disable & ACK all interrupts */ | ||
195 | r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK); | ||
196 | r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK); | ||
197 | } | ||
198 | |||
199 | /* Tests if there is an CRC error */ | ||
200 | static int r592_test_io_error(struct r592_device *dev) | ||
201 | { | ||
202 | if (!(r592_read_reg(dev, R592_STATUS) & | ||
203 | (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR))) | ||
204 | return 0; | ||
205 | |||
206 | return -EIO; | ||
207 | } | ||
208 | |||
209 | /* Ensure that FIFO is ready for use */ | ||
210 | static int r592_test_fifo_empty(struct r592_device *dev) | ||
211 | { | ||
212 | if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY) | ||
213 | return 0; | ||
214 | |||
215 | dbg("FIFO not ready, trying to reset the device"); | ||
216 | r592_host_reset(dev); | ||
217 | |||
218 | if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY) | ||
219 | return 0; | ||
220 | |||
221 | message("FIFO still not ready, giving up"); | ||
222 | return -EIO; | ||
223 | } | ||
224 | |||
225 | /* Activates the DMA transfer from to FIFO */ | ||
226 | static void r592_start_dma(struct r592_device *dev, bool is_write) | ||
227 | { | ||
228 | unsigned long flags; | ||
229 | u32 reg; | ||
230 | spin_lock_irqsave(&dev->irq_lock, flags); | ||
231 | |||
232 | /* Ack interrupts (just in case) + enable them */ | ||
233 | r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK); | ||
234 | r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK); | ||
235 | |||
236 | /* Set DMA address */ | ||
237 | r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg)); | ||
238 | |||
239 | /* Enable the DMA */ | ||
240 | reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS); | ||
241 | reg |= R592_FIFO_DMA_SETTINGS_EN; | ||
242 | |||
243 | if (!is_write) | ||
244 | reg |= R592_FIFO_DMA_SETTINGS_DIR; | ||
245 | else | ||
246 | reg &= ~R592_FIFO_DMA_SETTINGS_DIR; | ||
247 | r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg); | ||
248 | |||
249 | spin_unlock_irqrestore(&dev->irq_lock, flags); | ||
250 | } | ||
251 | |||
252 | /* Cleanups DMA related settings */ | ||
253 | static void r592_stop_dma(struct r592_device *dev, int error) | ||
254 | { | ||
255 | r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS, | ||
256 | R592_FIFO_DMA_SETTINGS_EN); | ||
257 | |||
258 | /* This is only a precation */ | ||
259 | r592_write_reg(dev, R592_FIFO_DMA, | ||
260 | dev->dummy_dma_page_physical_address); | ||
261 | |||
262 | r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK); | ||
263 | r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK); | ||
264 | dev->dma_error = error; | ||
265 | } | ||
266 | |||
267 | /* Test if hardware supports DMA */ | ||
268 | static void r592_check_dma(struct r592_device *dev) | ||
269 | { | ||
270 | dev->dma_capable = enable_dma && | ||
271 | (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & | ||
272 | R592_FIFO_DMA_SETTINGS_CAP); | ||
273 | } | ||
274 | |||
275 | /* Transfers fifo contents in/out using DMA */ | ||
276 | static int r592_transfer_fifo_dma(struct r592_device *dev) | ||
277 | { | ||
278 | int len, sg_count; | ||
279 | bool is_write; | ||
280 | |||
281 | if (!dev->dma_capable || !dev->req->long_data) | ||
282 | return -EINVAL; | ||
283 | |||
284 | len = dev->req->sg.length; | ||
285 | is_write = dev->req->data_dir == WRITE; | ||
286 | |||
287 | if (len != R592_LFIFO_SIZE) | ||
288 | return -EINVAL; | ||
289 | |||
290 | dbg_verbose("doing dma transfer"); | ||
291 | |||
292 | dev->dma_error = 0; | ||
293 | INIT_COMPLETION(dev->dma_done); | ||
294 | |||
295 | /* TODO: hidden assumption about nenth beeing always 1 */ | ||
296 | sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? | ||
297 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | ||
298 | |||
299 | if (sg_count != 1 || | ||
300 | (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) { | ||
301 | message("problem in dma_map_sg"); | ||
302 | return -EIO; | ||
303 | } | ||
304 | |||
305 | r592_start_dma(dev, is_write); | ||
306 | |||
307 | /* Wait for DMA completion */ | ||
308 | if (!wait_for_completion_timeout( | ||
309 | &dev->dma_done, msecs_to_jiffies(1000))) { | ||
310 | message("DMA timeout"); | ||
311 | r592_stop_dma(dev, -ETIMEDOUT); | ||
312 | } | ||
313 | |||
314 | dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? | ||
315 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); | ||
316 | |||
317 | |||
318 | return dev->dma_error; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Writes the FIFO in 4 byte chunks. | ||
323 | * If length isn't 4 byte aligned, rest of the data if put to a fifo | ||
324 | * to be written later | ||
325 | * Use r592_flush_fifo_write to flush that fifo when writing for the | ||
326 | * last time | ||
327 | */ | ||
328 | static void r592_write_fifo_pio(struct r592_device *dev, | ||
329 | unsigned char *buffer, int len) | ||
330 | { | ||
331 | /* flush spill from former write */ | ||
332 | if (!kfifo_is_empty(&dev->pio_fifo)) { | ||
333 | |||
334 | u8 tmp[4] = {0}; | ||
335 | int copy_len = kfifo_in(&dev->pio_fifo, buffer, len); | ||
336 | |||
337 | if (!kfifo_is_full(&dev->pio_fifo)) | ||
338 | return; | ||
339 | len -= copy_len; | ||
340 | buffer += copy_len; | ||
341 | |||
342 | copy_len = kfifo_out(&dev->pio_fifo, tmp, 4); | ||
343 | WARN_ON(copy_len != 4); | ||
344 | r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp); | ||
345 | } | ||
346 | |||
347 | WARN_ON(!kfifo_is_empty(&dev->pio_fifo)); | ||
348 | |||
349 | /* write full dwords */ | ||
350 | while (len >= 4) { | ||
351 | r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); | ||
352 | buffer += 4; | ||
353 | len -= 4; | ||
354 | } | ||
355 | |||
356 | /* put remaining bytes to the spill */ | ||
357 | if (len) | ||
358 | kfifo_in(&dev->pio_fifo, buffer, len); | ||
359 | } | ||
360 | |||
361 | /* Flushes the temporary FIFO used to make aligned DWORD writes */ | ||
362 | static void r592_flush_fifo_write(struct r592_device *dev) | ||
363 | { | ||
364 | u8 buffer[4] = { 0 }; | ||
365 | int len; | ||
366 | |||
367 | if (kfifo_is_empty(&dev->pio_fifo)) | ||
368 | return; | ||
369 | |||
370 | len = kfifo_out(&dev->pio_fifo, buffer, 4); | ||
371 | r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Read a fifo in 4 bytes chunks. | ||
376 | * If input doesn't fit the buffer, it places bytes of last dword in spill | ||
377 | * buffer, so that they don't get lost on last read, just throw these away. | ||
378 | */ | ||
379 | static void r592_read_fifo_pio(struct r592_device *dev, | ||
380 | unsigned char *buffer, int len) | ||
381 | { | ||
382 | u8 tmp[4]; | ||
383 | |||
384 | /* Read from last spill */ | ||
385 | if (!kfifo_is_empty(&dev->pio_fifo)) { | ||
386 | int bytes_copied = | ||
387 | kfifo_out(&dev->pio_fifo, buffer, min(4, len)); | ||
388 | buffer += bytes_copied; | ||
389 | len -= bytes_copied; | ||
390 | |||
391 | if (!kfifo_is_empty(&dev->pio_fifo)) | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | /* Reads dwords from FIFO */ | ||
396 | while (len >= 4) { | ||
397 | *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO); | ||
398 | buffer += 4; | ||
399 | len -= 4; | ||
400 | } | ||
401 | |||
402 | if (len) { | ||
403 | *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO); | ||
404 | kfifo_in(&dev->pio_fifo, tmp, 4); | ||
405 | len -= kfifo_out(&dev->pio_fifo, buffer, len); | ||
406 | } | ||
407 | |||
408 | WARN_ON(len); | ||
409 | return; | ||
410 | } | ||
411 | |||
412 | /* Transfers actual data using PIO. */ | ||
413 | static int r592_transfer_fifo_pio(struct r592_device *dev) | ||
414 | { | ||
415 | unsigned long flags; | ||
416 | |||
417 | bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; | ||
418 | struct sg_mapping_iter miter; | ||
419 | |||
420 | kfifo_reset(&dev->pio_fifo); | ||
421 | |||
422 | if (!dev->req->long_data) { | ||
423 | if (is_write) { | ||
424 | r592_write_fifo_pio(dev, dev->req->data, | ||
425 | dev->req->data_len); | ||
426 | r592_flush_fifo_write(dev); | ||
427 | } else | ||
428 | r592_read_fifo_pio(dev, dev->req->data, | ||
429 | dev->req->data_len); | ||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | local_irq_save(flags); | ||
434 | sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC | | ||
435 | (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG)); | ||
436 | |||
437 | /* Do the transfer fifo<->memory*/ | ||
438 | while (sg_miter_next(&miter)) | ||
439 | if (is_write) | ||
440 | r592_write_fifo_pio(dev, miter.addr, miter.length); | ||
441 | else | ||
442 | r592_read_fifo_pio(dev, miter.addr, miter.length); | ||
443 | |||
444 | |||
445 | /* Write last few non aligned bytes*/ | ||
446 | if (is_write) | ||
447 | r592_flush_fifo_write(dev); | ||
448 | |||
449 | sg_miter_stop(&miter); | ||
450 | local_irq_restore(flags); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* Executes one TPC (data is read/written from small or large fifo) */ | ||
455 | static void r592_execute_tpc(struct r592_device *dev) | ||
456 | { | ||
457 | bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; | ||
458 | int len, error; | ||
459 | u32 status, reg; | ||
460 | |||
461 | if (!dev->req) { | ||
462 | message("BUG: tpc execution without request!"); | ||
463 | return; | ||
464 | } | ||
465 | |||
466 | len = dev->req->long_data ? | ||
467 | dev->req->sg.length : dev->req->data_len; | ||
468 | |||
469 | /* Ensure that FIFO can hold the input data */ | ||
470 | if (len > R592_LFIFO_SIZE) { | ||
471 | message("IO: hardware doesn't support TPCs longer that 512"); | ||
472 | error = -ENOSYS; | ||
473 | goto out; | ||
474 | } | ||
475 | |||
476 | if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) { | ||
477 | dbg("IO: refusing to send TPC because card is absent"); | ||
478 | error = -ENODEV; | ||
479 | goto out; | ||
480 | } | ||
481 | |||
482 | dbg("IO: executing %s LEN=%d", | ||
483 | memstick_debug_get_tpc_name(dev->req->tpc), len); | ||
484 | |||
485 | /* Set IO direction */ | ||
486 | if (is_write) | ||
487 | r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION); | ||
488 | else | ||
489 | r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION); | ||
490 | |||
491 | |||
492 | error = r592_test_fifo_empty(dev); | ||
493 | if (error) | ||
494 | goto out; | ||
495 | |||
496 | /* Transfer write data */ | ||
497 | if (is_write) { | ||
498 | error = r592_transfer_fifo_dma(dev); | ||
499 | if (error == -EINVAL) | ||
500 | error = r592_transfer_fifo_pio(dev); | ||
501 | } | ||
502 | |||
503 | if (error) | ||
504 | goto out; | ||
505 | |||
506 | /* Trigger the TPC */ | ||
507 | reg = (len << R592_TPC_EXEC_LEN_SHIFT) | | ||
508 | (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) | | ||
509 | R592_TPC_EXEC_BIG_FIFO; | ||
510 | |||
511 | r592_write_reg(dev, R592_TPC_EXEC, reg); | ||
512 | |||
513 | /* Wait for TPC completion */ | ||
514 | status = R592_STATUS_RDY; | ||
515 | if (dev->req->need_card_int) | ||
516 | status |= R592_STATUS_CED; | ||
517 | |||
518 | error = r592_wait_status(dev, status, status); | ||
519 | if (error) { | ||
520 | message("card didn't respond"); | ||
521 | goto out; | ||
522 | } | ||
523 | |||
524 | /* Test IO errors */ | ||
525 | error = r592_test_io_error(dev); | ||
526 | if (error) { | ||
527 | dbg("IO error"); | ||
528 | goto out; | ||
529 | } | ||
530 | |||
531 | /* Read data from FIFO */ | ||
532 | if (!is_write) { | ||
533 | error = r592_transfer_fifo_dma(dev); | ||
534 | if (error == -EINVAL) | ||
535 | error = r592_transfer_fifo_pio(dev); | ||
536 | } | ||
537 | |||
538 | /* read INT reg. This can be shortened with shifts, but that way | ||
539 | its more readable */ | ||
540 | if (dev->parallel_mode && dev->req->need_card_int) { | ||
541 | |||
542 | dev->req->int_reg = 0; | ||
543 | status = r592_read_reg(dev, R592_STATUS); | ||
544 | |||
545 | if (status & R592_STATUS_P_CMDNACK) | ||
546 | dev->req->int_reg |= MEMSTICK_INT_CMDNAK; | ||
547 | if (status & R592_STATUS_P_BREQ) | ||
548 | dev->req->int_reg |= MEMSTICK_INT_BREQ; | ||
549 | if (status & R592_STATUS_P_INTERR) | ||
550 | dev->req->int_reg |= MEMSTICK_INT_ERR; | ||
551 | if (status & R592_STATUS_P_CED) | ||
552 | dev->req->int_reg |= MEMSTICK_INT_CED; | ||
553 | } | ||
554 | |||
555 | if (error) | ||
556 | dbg("FIFO read error"); | ||
557 | out: | ||
558 | dev->req->error = error; | ||
559 | r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED); | ||
560 | return; | ||
561 | } | ||
562 | |||
563 | /* Main request processing thread */ | ||
564 | static int r592_process_thread(void *data) | ||
565 | { | ||
566 | int error; | ||
567 | struct r592_device *dev = (struct r592_device *)data; | ||
568 | unsigned long flags; | ||
569 | |||
570 | while (!kthread_should_stop()) { | ||
571 | spin_lock_irqsave(&dev->io_thread_lock, flags); | ||
572 | set_current_state(TASK_INTERRUPTIBLE); | ||
573 | error = memstick_next_req(dev->host, &dev->req); | ||
574 | spin_unlock_irqrestore(&dev->io_thread_lock, flags); | ||
575 | |||
576 | if (error) { | ||
577 | if (error == -ENXIO || error == -EAGAIN) { | ||
578 | dbg_verbose("IO: done IO, sleeping"); | ||
579 | } else { | ||
580 | dbg("IO: unknown error from " | ||
581 | "memstick_next_req %d", error); | ||
582 | } | ||
583 | |||
584 | if (kthread_should_stop()) | ||
585 | set_current_state(TASK_RUNNING); | ||
586 | |||
587 | schedule(); | ||
588 | } else { | ||
589 | set_current_state(TASK_RUNNING); | ||
590 | r592_execute_tpc(dev); | ||
591 | } | ||
592 | } | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | /* Reprogram chip to detect change in card state */ | ||
597 | /* eg, if card is detected, arm it to detect removal, and vice versa */ | ||
598 | static void r592_update_card_detect(struct r592_device *dev) | ||
599 | { | ||
600 | u32 reg = r592_read_reg(dev, R592_REG_MSC); | ||
601 | bool card_detected = reg & R592_REG_MSC_PRSNT; | ||
602 | |||
603 | dbg("update card detect. card state: %s", card_detected ? | ||
604 | "present" : "absent"); | ||
605 | |||
606 | reg &= ~((R592_REG_MSC_IRQ_REMOVE | R592_REG_MSC_IRQ_INSERT) << 16); | ||
607 | |||
608 | if (card_detected) | ||
609 | reg |= (R592_REG_MSC_IRQ_REMOVE << 16); | ||
610 | else | ||
611 | reg |= (R592_REG_MSC_IRQ_INSERT << 16); | ||
612 | |||
613 | r592_write_reg(dev, R592_REG_MSC, reg); | ||
614 | } | ||
615 | |||
616 | /* Timer routine that fires 1 second after last card detection event, */ | ||
617 | static void r592_detect_timer(long unsigned int data) | ||
618 | { | ||
619 | struct r592_device *dev = (struct r592_device *)data; | ||
620 | r592_update_card_detect(dev); | ||
621 | memstick_detect_change(dev->host); | ||
622 | } | ||
623 | |||
624 | /* Interrupt handler */ | ||
625 | static irqreturn_t r592_irq(int irq, void *data) | ||
626 | { | ||
627 | struct r592_device *dev = (struct r592_device *)data; | ||
628 | irqreturn_t ret = IRQ_NONE; | ||
629 | u32 reg; | ||
630 | u16 irq_enable, irq_status; | ||
631 | unsigned long flags; | ||
632 | int error; | ||
633 | |||
634 | spin_lock_irqsave(&dev->irq_lock, flags); | ||
635 | |||
636 | reg = r592_read_reg(dev, R592_REG_MSC); | ||
637 | irq_enable = reg >> 16; | ||
638 | irq_status = reg & 0xFFFF; | ||
639 | |||
640 | /* Ack the interrupts */ | ||
641 | reg &= ~irq_status; | ||
642 | r592_write_reg(dev, R592_REG_MSC, reg); | ||
643 | |||
644 | /* Get the IRQ status minus bits that aren't enabled */ | ||
645 | irq_status &= (irq_enable); | ||
646 | |||
647 | /* Due to limitation of memstick core, we don't look at bits that | ||
648 | indicate that card was removed/inserted and/or present */ | ||
649 | if (irq_status & (R592_REG_MSC_IRQ_INSERT | R592_REG_MSC_IRQ_REMOVE)) { | ||
650 | |||
651 | bool card_was_added = irq_status & R592_REG_MSC_IRQ_INSERT; | ||
652 | ret = IRQ_HANDLED; | ||
653 | |||
654 | message("IRQ: card %s", card_was_added ? "added" : "removed"); | ||
655 | |||
656 | mod_timer(&dev->detect_timer, | ||
657 | jiffies + msecs_to_jiffies(card_was_added ? 500 : 50)); | ||
658 | } | ||
659 | |||
660 | if (irq_status & | ||
661 | (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)) { | ||
662 | ret = IRQ_HANDLED; | ||
663 | |||
664 | if (irq_status & R592_REG_MSC_FIFO_DMA_ERR) { | ||
665 | message("IRQ: DMA error"); | ||
666 | error = -EIO; | ||
667 | } else { | ||
668 | dbg_verbose("IRQ: dma done"); | ||
669 | error = 0; | ||
670 | } | ||
671 | |||
672 | r592_stop_dma(dev, error); | ||
673 | complete(&dev->dma_done); | ||
674 | } | ||
675 | |||
676 | spin_unlock_irqrestore(&dev->irq_lock, flags); | ||
677 | return ret; | ||
678 | } | ||
679 | |||
680 | /* External inteface: set settings */ | ||
681 | static int r592_set_param(struct memstick_host *host, | ||
682 | enum memstick_param param, int value) | ||
683 | { | ||
684 | struct r592_device *dev = memstick_priv(host); | ||
685 | |||
686 | switch (param) { | ||
687 | case MEMSTICK_POWER: | ||
688 | switch (value) { | ||
689 | case MEMSTICK_POWER_ON: | ||
690 | return r592_enable_device(dev, true); | ||
691 | case MEMSTICK_POWER_OFF: | ||
692 | return r592_enable_device(dev, false); | ||
693 | default: | ||
694 | return -EINVAL; | ||
695 | } | ||
696 | case MEMSTICK_INTERFACE: | ||
697 | switch (value) { | ||
698 | case MEMSTICK_SERIAL: | ||
699 | return r592_set_mode(dev, 0); | ||
700 | case MEMSTICK_PAR4: | ||
701 | return r592_set_mode(dev, 1); | ||
702 | default: | ||
703 | return -EINVAL; | ||
704 | } | ||
705 | default: | ||
706 | return -EINVAL; | ||
707 | } | ||
708 | } | ||
709 | |||
710 | /* External interface: submit requests */ | ||
711 | static void r592_submit_req(struct memstick_host *host) | ||
712 | { | ||
713 | struct r592_device *dev = memstick_priv(host); | ||
714 | unsigned long flags; | ||
715 | |||
716 | if (dev->req) | ||
717 | return; | ||
718 | |||
719 | spin_lock_irqsave(&dev->io_thread_lock, flags); | ||
720 | if (wake_up_process(dev->io_thread)) | ||
721 | dbg_verbose("IO thread woken to process requests"); | ||
722 | spin_unlock_irqrestore(&dev->io_thread_lock, flags); | ||
723 | } | ||
724 | |||
725 | static const struct pci_device_id r592_pci_id_tbl[] = { | ||
726 | |||
727 | { PCI_VDEVICE(RICOH, 0x0592), }, | ||
728 | { }, | ||
729 | }; | ||
730 | |||
731 | /* Main entry */ | ||
732 | static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
733 | { | ||
734 | int error = -ENOMEM; | ||
735 | struct memstick_host *host; | ||
736 | struct r592_device *dev; | ||
737 | |||
738 | /* Allocate memory */ | ||
739 | host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev); | ||
740 | if (!host) | ||
741 | goto error1; | ||
742 | |||
743 | dev = memstick_priv(host); | ||
744 | dev->host = host; | ||
745 | dev->pci_dev = pdev; | ||
746 | pci_set_drvdata(pdev, dev); | ||
747 | |||
748 | /* pci initialization */ | ||
749 | error = pci_enable_device(pdev); | ||
750 | if (error) | ||
751 | goto error2; | ||
752 | |||
753 | pci_set_master(pdev); | ||
754 | error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
755 | if (error) | ||
756 | goto error3; | ||
757 | |||
758 | error = pci_request_regions(pdev, DRV_NAME); | ||
759 | if (error) | ||
760 | goto error3; | ||
761 | |||
762 | dev->mmio = pci_ioremap_bar(pdev, 0); | ||
763 | if (!dev->mmio) | ||
764 | goto error4; | ||
765 | |||
766 | dev->irq = pdev->irq; | ||
767 | spin_lock_init(&dev->irq_lock); | ||
768 | spin_lock_init(&dev->io_thread_lock); | ||
769 | init_completion(&dev->dma_done); | ||
770 | INIT_KFIFO(dev->pio_fifo); | ||
771 | setup_timer(&dev->detect_timer, | ||
772 | r592_detect_timer, (long unsigned int)dev); | ||
773 | |||
774 | /* Host initialization */ | ||
775 | host->caps = MEMSTICK_CAP_PAR4; | ||
776 | host->request = r592_submit_req; | ||
777 | host->set_param = r592_set_param; | ||
778 | r592_check_dma(dev); | ||
779 | |||
780 | dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io"); | ||
781 | if (IS_ERR(dev->io_thread)) { | ||
782 | error = PTR_ERR(dev->io_thread); | ||
783 | goto error5; | ||
784 | } | ||
785 | |||
786 | /* This is just a precation, so don't fail */ | ||
787 | dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, | ||
788 | &dev->dummy_dma_page_physical_address); | ||
789 | r592_stop_dma(dev , 0); | ||
790 | |||
791 | if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, | ||
792 | DRV_NAME, dev)) | ||
793 | goto error6; | ||
794 | |||
795 | r592_update_card_detect(dev); | ||
796 | if (memstick_add_host(host)) | ||
797 | goto error7; | ||
798 | |||
799 | message("driver succesfully loaded"); | ||
800 | return 0; | ||
801 | error7: | ||
802 | free_irq(dev->irq, dev); | ||
803 | error6: | ||
804 | if (dev->dummy_dma_page) | ||
805 | pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, | ||
806 | dev->dummy_dma_page_physical_address); | ||
807 | |||
808 | kthread_stop(dev->io_thread); | ||
809 | error5: | ||
810 | iounmap(dev->mmio); | ||
811 | error4: | ||
812 | pci_release_regions(pdev); | ||
813 | error3: | ||
814 | pci_disable_device(pdev); | ||
815 | error2: | ||
816 | memstick_free_host(host); | ||
817 | error1: | ||
818 | return error; | ||
819 | } | ||
820 | |||
821 | static void r592_remove(struct pci_dev *pdev) | ||
822 | { | ||
823 | int error = 0; | ||
824 | struct r592_device *dev = pci_get_drvdata(pdev); | ||
825 | |||
826 | /* Stop the processing thread. | ||
827 | That ensures that we won't take any more requests */ | ||
828 | kthread_stop(dev->io_thread); | ||
829 | |||
830 | r592_enable_device(dev, false); | ||
831 | |||
832 | while (!error && dev->req) { | ||
833 | dev->req->error = -ETIME; | ||
834 | error = memstick_next_req(dev->host, &dev->req); | ||
835 | } | ||
836 | memstick_remove_host(dev->host); | ||
837 | |||
838 | free_irq(dev->irq, dev); | ||
839 | iounmap(dev->mmio); | ||
840 | pci_release_regions(pdev); | ||
841 | pci_disable_device(pdev); | ||
842 | memstick_free_host(dev->host); | ||
843 | |||
844 | if (dev->dummy_dma_page) | ||
845 | pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, | ||
846 | dev->dummy_dma_page_physical_address); | ||
847 | } | ||
848 | |||
849 | #ifdef CONFIG_PM | ||
850 | static int r592_suspend(struct device *core_dev) | ||
851 | { | ||
852 | struct pci_dev *pdev = to_pci_dev(core_dev); | ||
853 | struct r592_device *dev = pci_get_drvdata(pdev); | ||
854 | |||
855 | r592_clear_interrupts(dev); | ||
856 | memstick_suspend_host(dev->host); | ||
857 | del_timer_sync(&dev->detect_timer); | ||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | static int r592_resume(struct device *core_dev) | ||
862 | { | ||
863 | struct pci_dev *pdev = to_pci_dev(core_dev); | ||
864 | struct r592_device *dev = pci_get_drvdata(pdev); | ||
865 | |||
866 | r592_clear_interrupts(dev); | ||
867 | r592_enable_device(dev, false); | ||
868 | memstick_resume_host(dev->host); | ||
869 | r592_update_card_detect(dev); | ||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume); | ||
874 | #endif | ||
875 | |||
876 | MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl); | ||
877 | |||
878 | static struct pci_driver r852_pci_driver = { | ||
879 | .name = DRV_NAME, | ||
880 | .id_table = r592_pci_id_tbl, | ||
881 | .probe = r592_probe, | ||
882 | .remove = r592_remove, | ||
883 | #ifdef CONFIG_PM | ||
884 | .driver.pm = &r592_pm_ops, | ||
885 | #endif | ||
886 | }; | ||
887 | |||
888 | static __init int r592_module_init(void) | ||
889 | { | ||
890 | return pci_register_driver(&r852_pci_driver); | ||
891 | } | ||
892 | |||
893 | static void __exit r592_module_exit(void) | ||
894 | { | ||
895 | pci_unregister_driver(&r852_pci_driver); | ||
896 | } | ||
897 | |||
898 | module_init(r592_module_init); | ||
899 | module_exit(r592_module_exit); | ||
900 | |||
901 | module_param(enable_dma, bool, S_IRUGO); | ||
902 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); | ||
903 | module_param(debug, int, S_IRUGO | S_IWUSR); | ||
904 | MODULE_PARM_DESC(debug, "Debug level (0-3)"); | ||
905 | |||
906 | MODULE_LICENSE("GPL"); | ||
907 | MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); | ||
908 | MODULE_DESCRIPTION("Ricoh R5C592 Memstick/Memstick PRO card reader driver"); | ||
diff --git a/drivers/memstick/host/r592.h b/drivers/memstick/host/r592.h new file mode 100644 index 00000000000..eee264e6028 --- /dev/null +++ b/drivers/memstick/host/r592.h | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 - Maxim Levitsky | ||
3 | * driver for Ricoh memstick readers | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef R592_H | ||
11 | |||
12 | #include <linux/memstick.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/workqueue.h> | ||
16 | #include <linux/kfifo.h> | ||
17 | #include <linux/ctype.h> | ||
18 | |||
19 | /* write to this reg (number,len) triggers TPC execution */ | ||
20 | #define R592_TPC_EXEC 0x00 | ||
21 | #define R592_TPC_EXEC_LEN_SHIFT 16 /* Bits 16..25 are TPC len */ | ||
22 | #define R592_TPC_EXEC_BIG_FIFO (1 << 26) /* If bit 26 is set, large fifo is used (reg 48) */ | ||
23 | #define R592_TPC_EXEC_TPC_SHIFT 28 /* Bits 28..31 are the TPC number */ | ||
24 | |||
25 | |||
26 | /* Window for small TPC fifo (big endian)*/ | ||
27 | /* reads and writes always are done in 8 byte chunks */ | ||
28 | /* Not used in driver, because large fifo does better job */ | ||
29 | #define R592_SFIFO 0x08 | ||
30 | |||
31 | |||
32 | /* Status register (ms int, small fifo, IO)*/ | ||
33 | #define R592_STATUS 0x10 | ||
34 | /* Parallel INT bits */ | ||
35 | #define R592_STATUS_P_CMDNACK (1 << 16) /* INT reg: NACK (parallel mode) */ | ||
36 | #define R592_STATUS_P_BREQ (1 << 17) /* INT reg: card ready (parallel mode)*/ | ||
37 | #define R592_STATUS_P_INTERR (1 << 18) /* INT reg: int error (parallel mode)*/ | ||
38 | #define R592_STATUS_P_CED (1 << 19) /* INT reg: command done (parallel mode) */ | ||
39 | |||
40 | /* Fifo status */ | ||
41 | #define R592_STATUS_SFIFO_FULL (1 << 20) /* Small Fifo almost full (last chunk is written) */ | ||
42 | #define R592_STATUS_SFIFO_EMPTY (1 << 21) /* Small Fifo empty */ | ||
43 | |||
44 | /* Error detection via CRC */ | ||
45 | #define R592_STATUS_SEND_ERR (1 << 24) /* Send failed */ | ||
46 | #define R592_STATUS_RECV_ERR (1 << 25) /* Recieve failed */ | ||
47 | |||
48 | /* Card state */ | ||
49 | #define R592_STATUS_RDY (1 << 28) /* RDY signal recieved */ | ||
50 | #define R592_STATUS_CED (1 << 29) /* INT: Command done (serial mode)*/ | ||
51 | #define R592_STATUS_SFIFO_INPUT (1 << 30) /* Small fifo recieved data*/ | ||
52 | |||
53 | #define R592_SFIFO_SIZE 32 /* total size of small fifo is 32 bytes */ | ||
54 | #define R592_SFIFO_PACKET 8 /* packet size of small fifo */ | ||
55 | |||
56 | /* IO control */ | ||
57 | #define R592_IO 0x18 | ||
58 | #define R592_IO_16 (1 << 16) /* Set by default, can be cleared */ | ||
59 | #define R592_IO_18 (1 << 18) /* Set by default, can be cleared */ | ||
60 | #define R592_IO_SERIAL1 (1 << 20) /* Set by default, can be cleared, (cleared on parallel) */ | ||
61 | #define R592_IO_22 (1 << 22) /* Set by default, can be cleared */ | ||
62 | #define R592_IO_DIRECTION (1 << 24) /* TPC direction (1 write 0 read) */ | ||
63 | #define R592_IO_26 (1 << 26) /* Set by default, can be cleared */ | ||
64 | #define R592_IO_SERIAL2 (1 << 30) /* Set by default, can be cleared (cleared on parallel), serial doesn't work if unset */ | ||
65 | #define R592_IO_RESET (1 << 31) /* Reset, sets defaults*/ | ||
66 | |||
67 | |||
68 | /* Turns hardware on/off */ | ||
69 | #define R592_POWER 0x20 /* bits 0-7 writeable */ | ||
70 | #define R592_POWER_0 (1 << 0) /* set on start, cleared on stop - must be set*/ | ||
71 | #define R592_POWER_1 (1 << 1) /* set on start, cleared on stop - must be set*/ | ||
72 | #define R592_POWER_3 (1 << 3) /* must be clear */ | ||
73 | #define R592_POWER_20 (1 << 5) /* set before switch to parallel */ | ||
74 | |||
75 | /* IO mode*/ | ||
76 | #define R592_IO_MODE 0x24 | ||
77 | #define R592_IO_MODE_SERIAL 1 | ||
78 | #define R592_IO_MODE_PARALLEL 3 | ||
79 | |||
80 | |||
81 | /* IRQ,card detection,large fifo (first word irq status, second enable) */ | ||
82 | /* IRQs are ACKed by clearing the bits */ | ||
83 | #define R592_REG_MSC 0x28 | ||
84 | #define R592_REG_MSC_PRSNT (1 << 1) /* card present (only status)*/ | ||
85 | #define R592_REG_MSC_IRQ_INSERT (1 << 8) /* detect insert / card insered */ | ||
86 | #define R592_REG_MSC_IRQ_REMOVE (1 << 9) /* detect removal / card removed */ | ||
87 | #define R592_REG_MSC_FIFO_EMPTY (1 << 10) /* fifo is empty */ | ||
88 | #define R592_REG_MSC_FIFO_DMA_DONE (1 << 11) /* dma enable / dma done */ | ||
89 | |||
90 | #define R592_REG_MSC_FIFO_USER_ORN (1 << 12) /* set if software reads empty fifo (if R592_REG_MSC_FIFO_EMPTY is set) */ | ||
91 | #define R592_REG_MSC_FIFO_MISMATH (1 << 13) /* set if amount of data in fifo doesn't match amount in TPC */ | ||
92 | #define R592_REG_MSC_FIFO_DMA_ERR (1 << 14) /* IO failure */ | ||
93 | #define R592_REG_MSC_LED (1 << 15) /* clear to turn led off (only status)*/ | ||
94 | |||
95 | #define DMA_IRQ_ACK_MASK \ | ||
96 | (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR) | ||
97 | |||
98 | #define DMA_IRQ_EN_MASK (DMA_IRQ_ACK_MASK << 16) | ||
99 | |||
100 | #define IRQ_ALL_ACK_MASK 0x00007F00 | ||
101 | #define IRQ_ALL_EN_MASK (IRQ_ALL_ACK_MASK << 16) | ||
102 | |||
103 | /* DMA address for large FIFO read/writes*/ | ||
104 | #define R592_FIFO_DMA 0x2C | ||
105 | |||
106 | /* PIO access to large FIFO (512 bytes) (big endian)*/ | ||
107 | #define R592_FIFO_PIO 0x30 | ||
108 | #define R592_LFIFO_SIZE 512 /* large fifo size */ | ||
109 | |||
110 | |||
111 | /* large FIFO DMA settings */ | ||
112 | #define R592_FIFO_DMA_SETTINGS 0x34 | ||
113 | #define R592_FIFO_DMA_SETTINGS_EN (1 << 0) /* DMA enabled */ | ||
114 | #define R592_FIFO_DMA_SETTINGS_DIR (1 << 1) /* Dma direction (1 read, 0 write) */ | ||
115 | #define R592_FIFO_DMA_SETTINGS_CAP (1 << 24) /* Dma is aviable */ | ||
116 | |||
117 | /* Maybe just an delay */ | ||
118 | /* Bits 17..19 are just number */ | ||
119 | /* bit 16 is set, then bit 20 is waited */ | ||
120 | /* time to wait is about 50 spins * 2 ^ (bits 17..19) */ | ||
121 | /* seems to be possible just to ignore */ | ||
122 | /* Probably debug register */ | ||
123 | #define R592_REG38 0x38 | ||
124 | #define R592_REG38_CHANGE (1 << 16) /* Start bit */ | ||
125 | #define R592_REG38_DONE (1 << 20) /* HW set this after the delay */ | ||
126 | #define R592_REG38_SHIFT 17 | ||
127 | |||
128 | /* Debug register, written (0xABCDEF00) when error happens - not used*/ | ||
129 | #define R592_REG_3C 0x3C | ||
130 | |||
131 | struct r592_device { | ||
132 | struct pci_dev *pci_dev; | ||
133 | struct memstick_host *host; /* host backpointer */ | ||
134 | struct memstick_request *req; /* current request */ | ||
135 | |||
136 | /* Registers, IRQ */ | ||
137 | void __iomem *mmio; | ||
138 | int irq; | ||
139 | spinlock_t irq_lock; | ||
140 | spinlock_t io_thread_lock; | ||
141 | struct timer_list detect_timer; | ||
142 | |||
143 | struct task_struct *io_thread; | ||
144 | bool parallel_mode; | ||
145 | |||
146 | DECLARE_KFIFO(pio_fifo, u8, sizeof(u32)); | ||
147 | |||
148 | /* DMA area */ | ||
149 | int dma_capable; | ||
150 | int dma_error; | ||
151 | struct completion dma_done; | ||
152 | void *dummy_dma_page; | ||
153 | dma_addr_t dummy_dma_page_physical_address; | ||
154 | |||
155 | }; | ||
156 | |||
157 | #define DRV_NAME "r592" | ||
158 | |||
159 | |||
160 | #define message(format, ...) \ | ||
161 | printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__) | ||
162 | |||
163 | #define __dbg(level, format, ...) \ | ||
164 | do { \ | ||
165 | if (debug >= level) \ | ||
166 | printk(KERN_DEBUG DRV_NAME \ | ||
167 | ": " format "\n", ## __VA_ARGS__); \ | ||
168 | } while (0) | ||
169 | |||
170 | |||
171 | #define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__) | ||
172 | #define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__) | ||
173 | #define dbg_reg(format, ...) __dbg(3, format, ## __VA_ARGS__) | ||
174 | |||
175 | #endif | ||
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 9c511c1604a..011cb6ce861 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c | |||
@@ -416,7 +416,6 @@ static int __devinit device_irq_init(struct pm860x_chip *chip, | |||
416 | : chip->companion; | 416 | : chip->companion; |
417 | unsigned char status_buf[INT_STATUS_NUM]; | 417 | unsigned char status_buf[INT_STATUS_NUM]; |
418 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; | 418 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; |
419 | struct irq_desc *desc; | ||
420 | int i, data, mask, ret = -EINVAL; | 419 | int i, data, mask, ret = -EINVAL; |
421 | int __irq; | 420 | int __irq; |
422 | 421 | ||
@@ -468,19 +467,17 @@ static int __devinit device_irq_init(struct pm860x_chip *chip, | |||
468 | if (!chip->core_irq) | 467 | if (!chip->core_irq) |
469 | goto out; | 468 | goto out; |
470 | 469 | ||
471 | desc = irq_to_desc(chip->core_irq); | ||
472 | |||
473 | /* register IRQ by genirq */ | 470 | /* register IRQ by genirq */ |
474 | for (i = 0; i < ARRAY_SIZE(pm860x_irqs); i++) { | 471 | for (i = 0; i < ARRAY_SIZE(pm860x_irqs); i++) { |
475 | __irq = i + chip->irq_base; | 472 | __irq = i + chip->irq_base; |
476 | set_irq_chip_data(__irq, chip); | 473 | irq_set_chip_data(__irq, chip); |
477 | set_irq_chip_and_handler(__irq, &pm860x_irq_chip, | 474 | irq_set_chip_and_handler(__irq, &pm860x_irq_chip, |
478 | handle_edge_irq); | 475 | handle_edge_irq); |
479 | set_irq_nested_thread(__irq, 1); | 476 | irq_set_nested_thread(__irq, 1); |
480 | #ifdef CONFIG_ARM | 477 | #ifdef CONFIG_ARM |
481 | set_irq_flags(__irq, IRQF_VALID); | 478 | set_irq_flags(__irq, IRQF_VALID); |
482 | #else | 479 | #else |
483 | set_irq_noprobe(__irq); | 480 | irq_set_noprobe(__irq); |
484 | #endif | 481 | #endif |
485 | } | 482 | } |
486 | 483 | ||
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a9a1af49281..e986f91fff9 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -133,6 +133,7 @@ config TPS6105X | |||
133 | tristate "TPS61050/61052 Boost Converters" | 133 | tristate "TPS61050/61052 Boost Converters" |
134 | depends on I2C | 134 | depends on I2C |
135 | select REGULATOR | 135 | select REGULATOR |
136 | select MFD_CORE | ||
136 | select REGULATOR_FIXED_VOLTAGE | 137 | select REGULATOR_FIXED_VOLTAGE |
137 | help | 138 | help |
138 | This option enables a driver for the TP61050/TPS61052 | 139 | This option enables a driver for the TP61050/TPS61052 |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 47f5709f382..ef489f25340 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -63,7 +63,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o | |||
63 | obj-$(CONFIG_PMIC_DA903X) += da903x.o | 63 | obj-$(CONFIG_PMIC_DA903X) += da903x.o |
64 | max8925-objs := max8925-core.o max8925-i2c.o | 64 | max8925-objs := max8925-core.o max8925-i2c.o |
65 | obj-$(CONFIG_MFD_MAX8925) += max8925.o | 65 | obj-$(CONFIG_MFD_MAX8925) += max8925.o |
66 | obj-$(CONFIG_MFD_MAX8997) += max8997.o | 66 | obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o |
67 | obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o | 67 | obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o |
68 | 68 | ||
69 | pcf50633-objs := pcf50633-core.o pcf50633-irq.o | 69 | pcf50633-objs := pcf50633-core.o pcf50633-irq.o |
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c index c12d0428522..ff86acf3e6b 100644 --- a/drivers/mfd/ab3550-core.c +++ b/drivers/mfd/ab3550-core.c | |||
@@ -668,7 +668,7 @@ static int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq) | |||
668 | struct ab3550_platform_data *plf_data; | 668 | struct ab3550_platform_data *plf_data; |
669 | bool val; | 669 | bool val; |
670 | 670 | ||
671 | ab = get_irq_chip_data(irq); | 671 | ab = irq_get_chip_data(irq); |
672 | plf_data = ab->i2c_client[0]->dev.platform_data; | 672 | plf_data = ab->i2c_client[0]->dev.platform_data; |
673 | irq -= plf_data->irq.base; | 673 | irq -= plf_data->irq.base; |
674 | val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0); | 674 | val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0); |
@@ -1296,14 +1296,14 @@ static int __init ab3550_probe(struct i2c_client *client, | |||
1296 | unsigned int irq; | 1296 | unsigned int irq; |
1297 | 1297 | ||
1298 | irq = ab3550_plf_data->irq.base + i; | 1298 | irq = ab3550_plf_data->irq.base + i; |
1299 | set_irq_chip_data(irq, ab); | 1299 | irq_set_chip_data(irq, ab); |
1300 | set_irq_chip_and_handler(irq, &ab3550_irq_chip, | 1300 | irq_set_chip_and_handler(irq, &ab3550_irq_chip, |
1301 | handle_simple_irq); | 1301 | handle_simple_irq); |
1302 | set_irq_nested_thread(irq, 1); | 1302 | irq_set_nested_thread(irq, 1); |
1303 | #ifdef CONFIG_ARM | 1303 | #ifdef CONFIG_ARM |
1304 | set_irq_flags(irq, IRQF_VALID); | 1304 | set_irq_flags(irq, IRQF_VALID); |
1305 | #else | 1305 | #else |
1306 | set_irq_noprobe(irq); | 1306 | irq_set_noprobe(irq); |
1307 | #endif | 1307 | #endif |
1308 | } | 1308 | } |
1309 | 1309 | ||
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 6e185b272d0..62e33e2258d 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c | |||
@@ -334,14 +334,14 @@ static int ab8500_irq_init(struct ab8500 *ab8500) | |||
334 | int irq; | 334 | int irq; |
335 | 335 | ||
336 | for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { | 336 | for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { |
337 | set_irq_chip_data(irq, ab8500); | 337 | irq_set_chip_data(irq, ab8500); |
338 | set_irq_chip_and_handler(irq, &ab8500_irq_chip, | 338 | irq_set_chip_and_handler(irq, &ab8500_irq_chip, |
339 | handle_simple_irq); | 339 | handle_simple_irq); |
340 | set_irq_nested_thread(irq, 1); | 340 | irq_set_nested_thread(irq, 1); |
341 | #ifdef CONFIG_ARM | 341 | #ifdef CONFIG_ARM |
342 | set_irq_flags(irq, IRQF_VALID); | 342 | set_irq_flags(irq, IRQF_VALID); |
343 | #else | 343 | #else |
344 | set_irq_noprobe(irq); | 344 | irq_set_noprobe(irq); |
345 | #endif | 345 | #endif |
346 | } | 346 | } |
347 | 347 | ||
@@ -357,8 +357,8 @@ static void ab8500_irq_remove(struct ab8500 *ab8500) | |||
357 | #ifdef CONFIG_ARM | 357 | #ifdef CONFIG_ARM |
358 | set_irq_flags(irq, 0); | 358 | set_irq_flags(irq, 0); |
359 | #endif | 359 | #endif |
360 | set_irq_chip_and_handler(irq, NULL, NULL); | 360 | irq_set_chip_and_handler(irq, NULL, NULL); |
361 | set_irq_chip_data(irq, NULL); | 361 | irq_set_chip_data(irq, NULL); |
362 | } | 362 | } |
363 | } | 363 | } |
364 | 364 | ||
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 0241f08fc00..d4a851c6b5b 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -139,13 +139,12 @@ static void asic3_irq_flip_edge(struct asic3 *asic, | |||
139 | 139 | ||
140 | static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | 140 | static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) |
141 | { | 141 | { |
142 | struct asic3 *asic = irq_desc_get_handler_data(desc); | ||
143 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
142 | int iter, i; | 144 | int iter, i; |
143 | unsigned long flags; | 145 | unsigned long flags; |
144 | struct asic3 *asic; | ||
145 | |||
146 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
147 | 146 | ||
148 | asic = get_irq_data(irq); | 147 | data->chip->irq_ack(irq_data); |
149 | 148 | ||
150 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { | 149 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { |
151 | u32 status; | 150 | u32 status; |
@@ -188,8 +187,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
188 | irqnr = asic->irq_base + | 187 | irqnr = asic->irq_base + |
189 | (ASIC3_GPIOS_PER_BANK * bank) | 188 | (ASIC3_GPIOS_PER_BANK * bank) |
190 | + i; | 189 | + i; |
191 | desc = irq_to_desc(irqnr); | 190 | generic_handle_irq(irqnr); |
192 | desc->handle_irq(irqnr, desc); | ||
193 | if (asic->irq_bothedge[bank] & bit) | 191 | if (asic->irq_bothedge[bank] & bit) |
194 | asic3_irq_flip_edge(asic, base, | 192 | asic3_irq_flip_edge(asic, base, |
195 | bit); | 193 | bit); |
@@ -200,11 +198,8 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
200 | /* Handle remaining IRQs in the status register */ | 198 | /* Handle remaining IRQs in the status register */ |
201 | for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) { | 199 | for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) { |
202 | /* They start at bit 4 and go up */ | 200 | /* They start at bit 4 and go up */ |
203 | if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) { | 201 | if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) |
204 | desc = irq_to_desc(asic->irq_base + i); | 202 | generic_handle_irq(asic->irq_base + i); |
205 | desc->handle_irq(asic->irq_base + i, | ||
206 | desc); | ||
207 | } | ||
208 | } | 203 | } |
209 | } | 204 | } |
210 | 205 | ||
@@ -393,21 +388,21 @@ static int __init asic3_irq_probe(struct platform_device *pdev) | |||
393 | 388 | ||
394 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { | 389 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { |
395 | if (irq < asic->irq_base + ASIC3_NUM_GPIOS) | 390 | if (irq < asic->irq_base + ASIC3_NUM_GPIOS) |
396 | set_irq_chip(irq, &asic3_gpio_irq_chip); | 391 | irq_set_chip(irq, &asic3_gpio_irq_chip); |
397 | else | 392 | else |
398 | set_irq_chip(irq, &asic3_irq_chip); | 393 | irq_set_chip(irq, &asic3_irq_chip); |
399 | 394 | ||
400 | set_irq_chip_data(irq, asic); | 395 | irq_set_chip_data(irq, asic); |
401 | set_irq_handler(irq, handle_level_irq); | 396 | irq_set_handler(irq, handle_level_irq); |
402 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 397 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
403 | } | 398 | } |
404 | 399 | ||
405 | asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK), | 400 | asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK), |
406 | ASIC3_INTMASK_GINTMASK); | 401 | ASIC3_INTMASK_GINTMASK); |
407 | 402 | ||
408 | set_irq_chained_handler(asic->irq_nr, asic3_irq_demux); | 403 | irq_set_chained_handler(asic->irq_nr, asic3_irq_demux); |
409 | set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING); | 404 | irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING); |
410 | set_irq_data(asic->irq_nr, asic); | 405 | irq_set_handler_data(asic->irq_nr, asic); |
411 | 406 | ||
412 | return 0; | 407 | return 0; |
413 | } | 408 | } |
@@ -421,11 +416,10 @@ static void asic3_irq_remove(struct platform_device *pdev) | |||
421 | 416 | ||
422 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { | 417 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { |
423 | set_irq_flags(irq, 0); | 418 | set_irq_flags(irq, 0); |
424 | set_irq_handler(irq, NULL); | 419 | irq_set_chip_and_handler(irq, NULL, NULL); |
425 | set_irq_chip(irq, NULL); | 420 | irq_set_chip_data(irq, NULL); |
426 | set_irq_chip_data(irq, NULL); | ||
427 | } | 421 | } |
428 | set_irq_chained_handler(asic->irq_nr, NULL); | 422 | irq_set_chained_handler(asic->irq_nr, NULL); |
429 | } | 423 | } |
430 | 424 | ||
431 | /* GPIOs */ | 425 | /* GPIOs */ |
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c index 886a0687106..155fa040788 100644 --- a/drivers/mfd/cs5535-mfd.c +++ b/drivers/mfd/cs5535-mfd.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/mfd/core.h> | 27 | #include <linux/mfd/core.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <asm/olpc.h> | ||
30 | 31 | ||
31 | #define DRV_NAME "cs5535-mfd" | 32 | #define DRV_NAME "cs5535-mfd" |
32 | 33 | ||
@@ -111,6 +112,20 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = { | |||
111 | }, | 112 | }, |
112 | }; | 113 | }; |
113 | 114 | ||
115 | #ifdef CONFIG_OLPC | ||
116 | static void __devinit cs5535_clone_olpc_cells(void) | ||
117 | { | ||
118 | const char *acpi_clones[] = { "olpc-xo1-pm-acpi", "olpc-xo1-sci-acpi" }; | ||
119 | |||
120 | if (!machine_is_olpc()) | ||
121 | return; | ||
122 | |||
123 | mfd_clone_cell("cs5535-acpi", acpi_clones, ARRAY_SIZE(acpi_clones)); | ||
124 | } | ||
125 | #else | ||
126 | static void cs5535_clone_olpc_cells(void) { } | ||
127 | #endif | ||
128 | |||
114 | static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, | 129 | static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, |
115 | const struct pci_device_id *id) | 130 | const struct pci_device_id *id) |
116 | { | 131 | { |
@@ -139,6 +154,7 @@ static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, | |||
139 | dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); | 154 | dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); |
140 | goto err_disable; | 155 | goto err_disable; |
141 | } | 156 | } |
157 | cs5535_clone_olpc_cells(); | ||
142 | 158 | ||
143 | dev_info(&pdev->dev, "%zu devices registered.\n", | 159 | dev_info(&pdev->dev, "%zu devices registered.\n", |
144 | ARRAY_SIZE(cs5535_mfd_cells)); | 160 | ARRAY_SIZE(cs5535_mfd_cells)); |
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 9e2d8dd5f9e..f2f4029e21a 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c | |||
@@ -162,6 +162,7 @@ static void pcap_unmask_irq(struct irq_data *d) | |||
162 | 162 | ||
163 | static struct irq_chip pcap_irq_chip = { | 163 | static struct irq_chip pcap_irq_chip = { |
164 | .name = "pcap", | 164 | .name = "pcap", |
165 | .irq_disable = pcap_mask_irq, | ||
165 | .irq_mask = pcap_mask_irq, | 166 | .irq_mask = pcap_mask_irq, |
166 | .irq_unmask = pcap_unmask_irq, | 167 | .irq_unmask = pcap_unmask_irq, |
167 | }; | 168 | }; |
@@ -196,17 +197,8 @@ static void pcap_isr_work(struct work_struct *work) | |||
196 | local_irq_disable(); | 197 | local_irq_disable(); |
197 | service = isr & ~msr; | 198 | service = isr & ~msr; |
198 | for (irq = pcap->irq_base; service; service >>= 1, irq++) { | 199 | for (irq = pcap->irq_base; service; service >>= 1, irq++) { |
199 | if (service & 1) { | 200 | if (service & 1) |
200 | struct irq_desc *desc = irq_to_desc(irq); | 201 | generic_handle_irq(irq); |
201 | |||
202 | if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq)) | ||
203 | break; | ||
204 | |||
205 | if (desc->status & IRQ_DISABLED) | ||
206 | note_interrupt(irq, desc, IRQ_NONE); | ||
207 | else | ||
208 | desc->handle_irq(irq, desc); | ||
209 | } | ||
210 | } | 202 | } |
211 | local_irq_enable(); | 203 | local_irq_enable(); |
212 | ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); | 204 | ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); |
@@ -215,7 +207,7 @@ static void pcap_isr_work(struct work_struct *work) | |||
215 | 207 | ||
216 | static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) | 208 | static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) |
217 | { | 209 | { |
218 | struct pcap_chip *pcap = get_irq_data(irq); | 210 | struct pcap_chip *pcap = irq_get_handler_data(irq); |
219 | 211 | ||
220 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 212 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
221 | queue_work(pcap->workqueue, &pcap->isr_work); | 213 | queue_work(pcap->workqueue, &pcap->isr_work); |
@@ -419,7 +411,7 @@ static int __devexit ezx_pcap_remove(struct spi_device *spi) | |||
419 | 411 | ||
420 | /* cleanup irqchip */ | 412 | /* cleanup irqchip */ |
421 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) | 413 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) |
422 | set_irq_chip_and_handler(i, NULL, NULL); | 414 | irq_set_chip_and_handler(i, NULL, NULL); |
423 | 415 | ||
424 | destroy_workqueue(pcap->workqueue); | 416 | destroy_workqueue(pcap->workqueue); |
425 | 417 | ||
@@ -476,12 +468,12 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi) | |||
476 | 468 | ||
477 | /* setup irq chip */ | 469 | /* setup irq chip */ |
478 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { | 470 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { |
479 | set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); | 471 | irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); |
480 | set_irq_chip_data(i, pcap); | 472 | irq_set_chip_data(i, pcap); |
481 | #ifdef CONFIG_ARM | 473 | #ifdef CONFIG_ARM |
482 | set_irq_flags(i, IRQF_VALID); | 474 | set_irq_flags(i, IRQF_VALID); |
483 | #else | 475 | #else |
484 | set_irq_noprobe(i); | 476 | irq_set_noprobe(i); |
485 | #endif | 477 | #endif |
486 | } | 478 | } |
487 | 479 | ||
@@ -490,10 +482,10 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi) | |||
490 | ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); | 482 | ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); |
491 | pcap->msr = PCAP_MASK_ALL_INTERRUPT; | 483 | pcap->msr = PCAP_MASK_ALL_INTERRUPT; |
492 | 484 | ||
493 | set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); | 485 | irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); |
494 | set_irq_data(spi->irq, pcap); | 486 | irq_set_handler_data(spi->irq, pcap); |
495 | set_irq_chained_handler(spi->irq, pcap_irq_handler); | 487 | irq_set_chained_handler(spi->irq, pcap_irq_handler); |
496 | set_irq_wake(spi->irq, 1); | 488 | irq_set_irq_wake(spi->irq, 1); |
497 | 489 | ||
498 | /* ADC */ | 490 | /* ADC */ |
499 | adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? | 491 | adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? |
@@ -522,7 +514,7 @@ remove_subdevs: | |||
522 | free_irq(adc_irq, pcap); | 514 | free_irq(adc_irq, pcap); |
523 | free_irqchip: | 515 | free_irqchip: |
524 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) | 516 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) |
525 | set_irq_chip_and_handler(i, NULL, NULL); | 517 | irq_set_chip_and_handler(i, NULL, NULL); |
526 | /* destroy_workqueue: */ | 518 | /* destroy_workqueue: */ |
527 | destroy_workqueue(pcap->workqueue); | 519 | destroy_workqueue(pcap->workqueue); |
528 | free_pcap: | 520 | free_pcap: |
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index d00b6d1a69e..bbaec0ccba8 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c | |||
@@ -100,7 +100,7 @@ static struct irq_chip egpio_muxed_chip = { | |||
100 | 100 | ||
101 | static void egpio_handler(unsigned int irq, struct irq_desc *desc) | 101 | static void egpio_handler(unsigned int irq, struct irq_desc *desc) |
102 | { | 102 | { |
103 | struct egpio_info *ei = get_irq_data(irq); | 103 | struct egpio_info *ei = irq_desc_get_handler_data(desc); |
104 | int irqpin; | 104 | int irqpin; |
105 | 105 | ||
106 | /* Read current pins. */ | 106 | /* Read current pins. */ |
@@ -113,9 +113,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc) | |||
113 | for_each_set_bit(irqpin, &readval, ei->nirqs) { | 113 | for_each_set_bit(irqpin, &readval, ei->nirqs) { |
114 | /* Run irq handler */ | 114 | /* Run irq handler */ |
115 | pr_debug("got IRQ %d\n", irqpin); | 115 | pr_debug("got IRQ %d\n", irqpin); |
116 | irq = ei->irq_start + irqpin; | 116 | generic_handle_irq(ei->irq_start + irqpin); |
117 | desc = irq_to_desc(irq); | ||
118 | desc->handle_irq(irq, desc); | ||
119 | } | 117 | } |
120 | } | 118 | } |
121 | 119 | ||
@@ -346,14 +344,14 @@ static int __init egpio_probe(struct platform_device *pdev) | |||
346 | ei->ack_write = 0; | 344 | ei->ack_write = 0; |
347 | irq_end = ei->irq_start + ei->nirqs; | 345 | irq_end = ei->irq_start + ei->nirqs; |
348 | for (irq = ei->irq_start; irq < irq_end; irq++) { | 346 | for (irq = ei->irq_start; irq < irq_end; irq++) { |
349 | set_irq_chip(irq, &egpio_muxed_chip); | 347 | irq_set_chip_and_handler(irq, &egpio_muxed_chip, |
350 | set_irq_chip_data(irq, ei); | 348 | handle_simple_irq); |
351 | set_irq_handler(irq, handle_simple_irq); | 349 | irq_set_chip_data(irq, ei); |
352 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 350 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
353 | } | 351 | } |
354 | set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING); | 352 | irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING); |
355 | set_irq_data(ei->chained_irq, ei); | 353 | irq_set_handler_data(ei->chained_irq, ei); |
356 | set_irq_chained_handler(ei->chained_irq, egpio_handler); | 354 | irq_set_chained_handler(ei->chained_irq, egpio_handler); |
357 | ack_irqs(ei); | 355 | ack_irqs(ei); |
358 | 356 | ||
359 | device_init_wakeup(&pdev->dev, 1); | 357 | device_init_wakeup(&pdev->dev, 1); |
@@ -375,11 +373,10 @@ static int __exit egpio_remove(struct platform_device *pdev) | |||
375 | if (ei->chained_irq) { | 373 | if (ei->chained_irq) { |
376 | irq_end = ei->irq_start + ei->nirqs; | 374 | irq_end = ei->irq_start + ei->nirqs; |
377 | for (irq = ei->irq_start; irq < irq_end; irq++) { | 375 | for (irq = ei->irq_start; irq < irq_end; irq++) { |
378 | set_irq_chip(irq, NULL); | 376 | irq_set_chip_and_handler(irq, NULL, NULL); |
379 | set_irq_handler(irq, NULL); | ||
380 | set_irq_flags(irq, 0); | 377 | set_irq_flags(irq, 0); |
381 | } | 378 | } |
382 | set_irq_chained_handler(ei->chained_irq, NULL); | 379 | irq_set_chained_handler(ei->chained_irq, NULL); |
383 | device_init_wakeup(&pdev->dev, 0); | 380 | device_init_wakeup(&pdev->dev, 0); |
384 | } | 381 | } |
385 | iounmap(ei->base_addr); | 382 | iounmap(ei->base_addr); |
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c index 296ad1562f6..d55065cc324 100644 --- a/drivers/mfd/htc-i2cpld.c +++ b/drivers/mfd/htc-i2cpld.c | |||
@@ -58,6 +58,7 @@ struct htcpld_chip { | |||
58 | uint irq_start; | 58 | uint irq_start; |
59 | int nirqs; | 59 | int nirqs; |
60 | 60 | ||
61 | unsigned int flow_type; | ||
61 | /* | 62 | /* |
62 | * Work structure to allow for setting values outside of any | 63 | * Work structure to allow for setting values outside of any |
63 | * possible interrupt context | 64 | * possible interrupt context |
@@ -97,12 +98,7 @@ static void htcpld_unmask(struct irq_data *data) | |||
97 | 98 | ||
98 | static int htcpld_set_type(struct irq_data *data, unsigned int flags) | 99 | static int htcpld_set_type(struct irq_data *data, unsigned int flags) |
99 | { | 100 | { |
100 | struct irq_desc *d = irq_to_desc(data->irq); | 101 | struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); |
101 | |||
102 | if (!d) { | ||
103 | pr_err("HTCPLD invalid IRQ: %d\n", data->irq); | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | 102 | ||
107 | if (flags & ~IRQ_TYPE_SENSE_MASK) | 103 | if (flags & ~IRQ_TYPE_SENSE_MASK) |
108 | return -EINVAL; | 104 | return -EINVAL; |
@@ -111,9 +107,7 @@ static int htcpld_set_type(struct irq_data *data, unsigned int flags) | |||
111 | if (flags & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)) | 107 | if (flags & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)) |
112 | return -EINVAL; | 108 | return -EINVAL; |
113 | 109 | ||
114 | d->status &= ~IRQ_TYPE_SENSE_MASK; | 110 | chip->flow_type = flags; |
115 | d->status |= flags; | ||
116 | |||
117 | return 0; | 111 | return 0; |
118 | } | 112 | } |
119 | 113 | ||
@@ -135,7 +129,6 @@ static irqreturn_t htcpld_handler(int irq, void *dev) | |||
135 | unsigned int i; | 129 | unsigned int i; |
136 | unsigned long flags; | 130 | unsigned long flags; |
137 | int irqpin; | 131 | int irqpin; |
138 | struct irq_desc *desc; | ||
139 | 132 | ||
140 | if (!htcpld) { | 133 | if (!htcpld) { |
141 | pr_debug("htcpld is null in ISR\n"); | 134 | pr_debug("htcpld is null in ISR\n"); |
@@ -195,23 +188,19 @@ static irqreturn_t htcpld_handler(int irq, void *dev) | |||
195 | * associated interrupts. | 188 | * associated interrupts. |
196 | */ | 189 | */ |
197 | for (irqpin = 0; irqpin < chip->nirqs; irqpin++) { | 190 | for (irqpin = 0; irqpin < chip->nirqs; irqpin++) { |
198 | unsigned oldb, newb; | 191 | unsigned oldb, newb, type = chip->flow_type; |
199 | int flags; | ||
200 | 192 | ||
201 | irq = chip->irq_start + irqpin; | 193 | irq = chip->irq_start + irqpin; |
202 | desc = irq_to_desc(irq); | ||
203 | flags = desc->status; | ||
204 | 194 | ||
205 | /* Run the IRQ handler, but only if the bit value | 195 | /* Run the IRQ handler, but only if the bit value |
206 | * changed, and the proper flags are set */ | 196 | * changed, and the proper flags are set */ |
207 | oldb = (old_val >> irqpin) & 1; | 197 | oldb = (old_val >> irqpin) & 1; |
208 | newb = (uval >> irqpin) & 1; | 198 | newb = (uval >> irqpin) & 1; |
209 | 199 | ||
210 | if ((!oldb && newb && (flags & IRQ_TYPE_EDGE_RISING)) || | 200 | if ((!oldb && newb && (type & IRQ_TYPE_EDGE_RISING)) || |
211 | (oldb && !newb && | 201 | (oldb && !newb && (type & IRQ_TYPE_EDGE_FALLING))) { |
212 | (flags & IRQ_TYPE_EDGE_FALLING))) { | ||
213 | pr_debug("fire IRQ %d\n", irqpin); | 202 | pr_debug("fire IRQ %d\n", irqpin); |
214 | desc->handle_irq(irq, desc); | 203 | generic_handle_irq(irq); |
215 | } | 204 | } |
216 | } | 205 | } |
217 | } | 206 | } |
@@ -359,13 +348,13 @@ static int __devinit htcpld_setup_chip_irq( | |||
359 | /* Setup irq handlers */ | 348 | /* Setup irq handlers */ |
360 | irq_end = chip->irq_start + chip->nirqs; | 349 | irq_end = chip->irq_start + chip->nirqs; |
361 | for (irq = chip->irq_start; irq < irq_end; irq++) { | 350 | for (irq = chip->irq_start; irq < irq_end; irq++) { |
362 | set_irq_chip(irq, &htcpld_muxed_chip); | 351 | irq_set_chip_and_handler(irq, &htcpld_muxed_chip, |
363 | set_irq_chip_data(irq, chip); | 352 | handle_simple_irq); |
364 | set_irq_handler(irq, handle_simple_irq); | 353 | irq_set_chip_data(irq, chip); |
365 | #ifdef CONFIG_ARM | 354 | #ifdef CONFIG_ARM |
366 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 355 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
367 | #else | 356 | #else |
368 | set_irq_probe(irq); | 357 | irq_set_probe(irq); |
369 | #endif | 358 | #endif |
370 | } | 359 | } |
371 | 360 | ||
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index aa518b9beaf..a0bd0cf05af 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -112,7 +112,7 @@ static struct irq_chip jz4740_adc_irq_chip = { | |||
112 | 112 | ||
113 | static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) | 113 | static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) |
114 | { | 114 | { |
115 | struct jz4740_adc *adc = get_irq_desc_data(desc); | 115 | struct jz4740_adc *adc = irq_desc_get_handler_data(desc); |
116 | uint8_t status; | 116 | uint8_t status; |
117 | unsigned int i; | 117 | unsigned int i; |
118 | 118 | ||
@@ -310,13 +310,13 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) | |||
310 | platform_set_drvdata(pdev, adc); | 310 | platform_set_drvdata(pdev, adc); |
311 | 311 | ||
312 | for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) { | 312 | for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) { |
313 | set_irq_chip_data(irq, adc); | 313 | irq_set_chip_data(irq, adc); |
314 | set_irq_chip_and_handler(irq, &jz4740_adc_irq_chip, | 314 | irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip, |
315 | handle_level_irq); | 315 | handle_level_irq); |
316 | } | 316 | } |
317 | 317 | ||
318 | set_irq_data(adc->irq, adc); | 318 | irq_set_handler_data(adc->irq, adc); |
319 | set_irq_chained_handler(adc->irq, jz4740_adc_irq_demux); | 319 | irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux); |
320 | 320 | ||
321 | writeb(0x00, adc->base + JZ_REG_ADC_ENABLE); | 321 | writeb(0x00, adc->base + JZ_REG_ADC_ENABLE); |
322 | writeb(0xff, adc->base + JZ_REG_ADC_CTRL); | 322 | writeb(0xff, adc->base + JZ_REG_ADC_CTRL); |
@@ -347,8 +347,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev) | |||
347 | 347 | ||
348 | mfd_remove_devices(&pdev->dev); | 348 | mfd_remove_devices(&pdev->dev); |
349 | 349 | ||
350 | set_irq_data(adc->irq, NULL); | 350 | irq_set_handler_data(adc->irq, NULL); |
351 | set_irq_chained_handler(adc->irq, NULL); | 351 | irq_set_chained_handler(adc->irq, NULL); |
352 | 352 | ||
353 | iounmap(adc->base); | 353 | iounmap(adc->base); |
354 | release_mem_region(adc->mem->start, resource_size(adc->mem)); | 354 | release_mem_region(adc->mem->start, resource_size(adc->mem)); |
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 0e998dc4e7d..58cc5fdde01 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c | |||
@@ -517,7 +517,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, | |||
517 | struct max8925_platform_data *pdata) | 517 | struct max8925_platform_data *pdata) |
518 | { | 518 | { |
519 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; | 519 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; |
520 | struct irq_desc *desc; | ||
521 | int i, ret; | 520 | int i, ret; |
522 | int __irq; | 521 | int __irq; |
523 | 522 | ||
@@ -544,19 +543,18 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, | |||
544 | mutex_init(&chip->irq_lock); | 543 | mutex_init(&chip->irq_lock); |
545 | chip->core_irq = irq; | 544 | chip->core_irq = irq; |
546 | chip->irq_base = pdata->irq_base; | 545 | chip->irq_base = pdata->irq_base; |
547 | desc = irq_to_desc(chip->core_irq); | ||
548 | 546 | ||
549 | /* register with genirq */ | 547 | /* register with genirq */ |
550 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { | 548 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { |
551 | __irq = i + chip->irq_base; | 549 | __irq = i + chip->irq_base; |
552 | set_irq_chip_data(__irq, chip); | 550 | irq_set_chip_data(__irq, chip); |
553 | set_irq_chip_and_handler(__irq, &max8925_irq_chip, | 551 | irq_set_chip_and_handler(__irq, &max8925_irq_chip, |
554 | handle_edge_irq); | 552 | handle_edge_irq); |
555 | set_irq_nested_thread(__irq, 1); | 553 | irq_set_nested_thread(__irq, 1); |
556 | #ifdef CONFIG_ARM | 554 | #ifdef CONFIG_ARM |
557 | set_irq_flags(__irq, IRQF_VALID); | 555 | set_irq_flags(__irq, IRQF_VALID); |
558 | #else | 556 | #else |
559 | set_irq_noprobe(__irq); | 557 | irq_set_noprobe(__irq); |
560 | #endif | 558 | #endif |
561 | } | 559 | } |
562 | if (!irq) { | 560 | if (!irq) { |
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c new file mode 100644 index 00000000000..638bf7e4d3b --- /dev/null +++ b/drivers/mfd/max8997-irq.c | |||
@@ -0,0 +1,377 @@ | |||
1 | /* | ||
2 | * max8997-irq.c - Interrupt controller support for MAX8997 | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co.Ltd | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | * This driver is based on max8998-irq.c | ||
22 | */ | ||
23 | |||
24 | #include <linux/err.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/mfd/max8997.h> | ||
28 | #include <linux/mfd/max8997-private.h> | ||
29 | |||
30 | static const u8 max8997_mask_reg[] = { | ||
31 | [PMIC_INT1] = MAX8997_REG_INT1MSK, | ||
32 | [PMIC_INT2] = MAX8997_REG_INT2MSK, | ||
33 | [PMIC_INT3] = MAX8997_REG_INT3MSK, | ||
34 | [PMIC_INT4] = MAX8997_REG_INT4MSK, | ||
35 | [FUEL_GAUGE] = MAX8997_REG_INVALID, | ||
36 | [MUIC_INT1] = MAX8997_MUIC_REG_INTMASK1, | ||
37 | [MUIC_INT2] = MAX8997_MUIC_REG_INTMASK2, | ||
38 | [MUIC_INT3] = MAX8997_MUIC_REG_INTMASK3, | ||
39 | [GPIO_LOW] = MAX8997_REG_INVALID, | ||
40 | [GPIO_HI] = MAX8997_REG_INVALID, | ||
41 | [FLASH_STATUS] = MAX8997_REG_INVALID, | ||
42 | }; | ||
43 | |||
44 | static struct i2c_client *get_i2c(struct max8997_dev *max8997, | ||
45 | enum max8997_irq_source src) | ||
46 | { | ||
47 | switch (src) { | ||
48 | case PMIC_INT1 ... PMIC_INT4: | ||
49 | return max8997->i2c; | ||
50 | case FUEL_GAUGE: | ||
51 | return NULL; | ||
52 | case MUIC_INT1 ... MUIC_INT3: | ||
53 | return max8997->muic; | ||
54 | case GPIO_LOW ... GPIO_HI: | ||
55 | return max8997->i2c; | ||
56 | case FLASH_STATUS: | ||
57 | return max8997->i2c; | ||
58 | default: | ||
59 | return ERR_PTR(-EINVAL); | ||
60 | } | ||
61 | |||
62 | return ERR_PTR(-EINVAL); | ||
63 | } | ||
64 | |||
65 | struct max8997_irq_data { | ||
66 | int mask; | ||
67 | enum max8997_irq_source group; | ||
68 | }; | ||
69 | |||
70 | #define DECLARE_IRQ(idx, _group, _mask) \ | ||
71 | [(idx)] = { .group = (_group), .mask = (_mask) } | ||
72 | static const struct max8997_irq_data max8997_irqs[] = { | ||
73 | DECLARE_IRQ(MAX8997_PMICIRQ_PWRONR, PMIC_INT1, 1 << 0), | ||
74 | DECLARE_IRQ(MAX8997_PMICIRQ_PWRONF, PMIC_INT1, 1 << 1), | ||
75 | DECLARE_IRQ(MAX8997_PMICIRQ_PWRON1SEC, PMIC_INT1, 1 << 3), | ||
76 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGONR, PMIC_INT1, 1 << 4), | ||
77 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGONF, PMIC_INT1, 1 << 5), | ||
78 | DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT2, PMIC_INT1, 1 << 6), | ||
79 | DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT1, PMIC_INT1, 1 << 7), | ||
80 | |||
81 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGR, PMIC_INT2, 1 << 0), | ||
82 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGF, PMIC_INT2, 1 << 1), | ||
83 | DECLARE_IRQ(MAX8997_PMICIRQ_MR, PMIC_INT2, 1 << 2), | ||
84 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS1OK, PMIC_INT2, 1 << 3), | ||
85 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS2OK, PMIC_INT2, 1 << 4), | ||
86 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS3OK, PMIC_INT2, 1 << 5), | ||
87 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS4OK, PMIC_INT2, 1 << 6), | ||
88 | |||
89 | DECLARE_IRQ(MAX8997_PMICIRQ_CHGINS, PMIC_INT3, 1 << 0), | ||
90 | DECLARE_IRQ(MAX8997_PMICIRQ_CHGRM, PMIC_INT3, 1 << 1), | ||
91 | DECLARE_IRQ(MAX8997_PMICIRQ_DCINOVP, PMIC_INT3, 1 << 2), | ||
92 | DECLARE_IRQ(MAX8997_PMICIRQ_TOPOFFR, PMIC_INT3, 1 << 3), | ||
93 | DECLARE_IRQ(MAX8997_PMICIRQ_CHGRSTF, PMIC_INT3, 1 << 5), | ||
94 | DECLARE_IRQ(MAX8997_PMICIRQ_MBCHGTMEXPD, PMIC_INT3, 1 << 7), | ||
95 | |||
96 | DECLARE_IRQ(MAX8997_PMICIRQ_RTC60S, PMIC_INT4, 1 << 0), | ||
97 | DECLARE_IRQ(MAX8997_PMICIRQ_RTCA1, PMIC_INT4, 1 << 1), | ||
98 | DECLARE_IRQ(MAX8997_PMICIRQ_RTCA2, PMIC_INT4, 1 << 2), | ||
99 | DECLARE_IRQ(MAX8997_PMICIRQ_SMPL_INT, PMIC_INT4, 1 << 3), | ||
100 | DECLARE_IRQ(MAX8997_PMICIRQ_RTC1S, PMIC_INT4, 1 << 4), | ||
101 | DECLARE_IRQ(MAX8997_PMICIRQ_WTSR, PMIC_INT4, 1 << 5), | ||
102 | |||
103 | DECLARE_IRQ(MAX8997_MUICIRQ_ADCError, MUIC_INT1, 1 << 2), | ||
104 | DECLARE_IRQ(MAX8997_MUICIRQ_ADCLow, MUIC_INT1, 1 << 1), | ||
105 | DECLARE_IRQ(MAX8997_MUICIRQ_ADC, MUIC_INT1, 1 << 0), | ||
106 | |||
107 | DECLARE_IRQ(MAX8997_MUICIRQ_VBVolt, MUIC_INT2, 1 << 4), | ||
108 | DECLARE_IRQ(MAX8997_MUICIRQ_DBChg, MUIC_INT2, 1 << 3), | ||
109 | DECLARE_IRQ(MAX8997_MUICIRQ_DCDTmr, MUIC_INT2, 1 << 2), | ||
110 | DECLARE_IRQ(MAX8997_MUICIRQ_ChgDetRun, MUIC_INT2, 1 << 1), | ||
111 | DECLARE_IRQ(MAX8997_MUICIRQ_ChgTyp, MUIC_INT2, 1 << 0), | ||
112 | |||
113 | DECLARE_IRQ(MAX8997_MUICIRQ_OVP, MUIC_INT3, 1 << 2), | ||
114 | }; | ||
115 | |||
116 | static void max8997_irq_lock(struct irq_data *data) | ||
117 | { | ||
118 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
119 | |||
120 | mutex_lock(&max8997->irqlock); | ||
121 | } | ||
122 | |||
123 | static void max8997_irq_sync_unlock(struct irq_data *data) | ||
124 | { | ||
125 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
126 | int i; | ||
127 | |||
128 | for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { | ||
129 | u8 mask_reg = max8997_mask_reg[i]; | ||
130 | struct i2c_client *i2c = get_i2c(max8997, i); | ||
131 | |||
132 | if (mask_reg == MAX8997_REG_INVALID || | ||
133 | IS_ERR_OR_NULL(i2c)) | ||
134 | continue; | ||
135 | max8997->irq_masks_cache[i] = max8997->irq_masks_cur[i]; | ||
136 | |||
137 | max8997_write_reg(i2c, max8997_mask_reg[i], | ||
138 | max8997->irq_masks_cur[i]); | ||
139 | } | ||
140 | |||
141 | mutex_unlock(&max8997->irqlock); | ||
142 | } | ||
143 | |||
144 | static const inline struct max8997_irq_data * | ||
145 | irq_to_max8997_irq(struct max8997_dev *max8997, int irq) | ||
146 | { | ||
147 | return &max8997_irqs[irq - max8997->irq_base]; | ||
148 | } | ||
149 | |||
150 | static void max8997_irq_mask(struct irq_data *data) | ||
151 | { | ||
152 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
153 | const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, | ||
154 | data->irq); | ||
155 | |||
156 | max8997->irq_masks_cur[irq_data->group] |= irq_data->mask; | ||
157 | } | ||
158 | |||
159 | static void max8997_irq_unmask(struct irq_data *data) | ||
160 | { | ||
161 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
162 | const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, | ||
163 | data->irq); | ||
164 | |||
165 | max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask; | ||
166 | } | ||
167 | |||
168 | static struct irq_chip max8997_irq_chip = { | ||
169 | .name = "max8997", | ||
170 | .irq_bus_lock = max8997_irq_lock, | ||
171 | .irq_bus_sync_unlock = max8997_irq_sync_unlock, | ||
172 | .irq_mask = max8997_irq_mask, | ||
173 | .irq_unmask = max8997_irq_unmask, | ||
174 | }; | ||
175 | |||
176 | #define MAX8997_IRQSRC_PMIC (1 << 1) | ||
177 | #define MAX8997_IRQSRC_FUELGAUGE (1 << 2) | ||
178 | #define MAX8997_IRQSRC_MUIC (1 << 3) | ||
179 | #define MAX8997_IRQSRC_GPIO (1 << 4) | ||
180 | #define MAX8997_IRQSRC_FLASH (1 << 5) | ||
181 | static irqreturn_t max8997_irq_thread(int irq, void *data) | ||
182 | { | ||
183 | struct max8997_dev *max8997 = data; | ||
184 | u8 irq_reg[MAX8997_IRQ_GROUP_NR] = {}; | ||
185 | u8 irq_src; | ||
186 | int ret; | ||
187 | int i; | ||
188 | |||
189 | ret = max8997_read_reg(max8997->i2c, MAX8997_REG_INTSRC, &irq_src); | ||
190 | if (ret < 0) { | ||
191 | dev_err(max8997->dev, "Failed to read interrupt source: %d\n", | ||
192 | ret); | ||
193 | return IRQ_NONE; | ||
194 | } | ||
195 | |||
196 | if (irq_src & MAX8997_IRQSRC_PMIC) { | ||
197 | /* PMIC INT1 ~ INT4 */ | ||
198 | max8997_bulk_read(max8997->i2c, MAX8997_REG_INT1, 4, | ||
199 | &irq_reg[PMIC_INT1]); | ||
200 | } | ||
201 | if (irq_src & MAX8997_IRQSRC_FUELGAUGE) { | ||
202 | /* | ||
203 | * TODO: FUEL GAUGE | ||
204 | * | ||
205 | * This is to be supported by Max17042 driver. When | ||
206 | * an interrupt incurs here, it should be relayed to a | ||
207 | * Max17042 device that is connected (probably by | ||
208 | * platform-data). However, we do not have interrupt | ||
209 | * handling in Max17042 driver currently. The Max17042 IRQ | ||
210 | * driver should be ready to be used as a stand-alone device and | ||
211 | * a Max8997-dependent device. Because it is not ready in | ||
212 | * Max17042-side and it is not too critical in operating | ||
213 | * Max8997, we do not implement this in initial releases. | ||
214 | */ | ||
215 | irq_reg[FUEL_GAUGE] = 0; | ||
216 | } | ||
217 | if (irq_src & MAX8997_IRQSRC_MUIC) { | ||
218 | /* MUIC INT1 ~ INT3 */ | ||
219 | max8997_bulk_read(max8997->muic, MAX8997_MUIC_REG_INT1, 3, | ||
220 | &irq_reg[MUIC_INT1]); | ||
221 | } | ||
222 | if (irq_src & MAX8997_IRQSRC_GPIO) { | ||
223 | /* GPIO Interrupt */ | ||
224 | u8 gpio_info[MAX8997_NUM_GPIO]; | ||
225 | |||
226 | irq_reg[GPIO_LOW] = 0; | ||
227 | irq_reg[GPIO_HI] = 0; | ||
228 | |||
229 | max8997_bulk_read(max8997->i2c, MAX8997_REG_GPIOCNTL1, | ||
230 | MAX8997_NUM_GPIO, gpio_info); | ||
231 | for (i = 0; i < MAX8997_NUM_GPIO; i++) { | ||
232 | bool interrupt = false; | ||
233 | |||
234 | switch (gpio_info[i] & MAX8997_GPIO_INT_MASK) { | ||
235 | case MAX8997_GPIO_INT_BOTH: | ||
236 | if (max8997->gpio_status[i] != gpio_info[i]) | ||
237 | interrupt = true; | ||
238 | break; | ||
239 | case MAX8997_GPIO_INT_RISE: | ||
240 | if ((max8997->gpio_status[i] != gpio_info[i]) && | ||
241 | (gpio_info[i] & MAX8997_GPIO_DATA_MASK)) | ||
242 | interrupt = true; | ||
243 | break; | ||
244 | case MAX8997_GPIO_INT_FALL: | ||
245 | if ((max8997->gpio_status[i] != gpio_info[i]) && | ||
246 | !(gpio_info[i] & MAX8997_GPIO_DATA_MASK)) | ||
247 | interrupt = true; | ||
248 | break; | ||
249 | default: | ||
250 | break; | ||
251 | } | ||
252 | |||
253 | if (interrupt) { | ||
254 | if (i < 8) | ||
255 | irq_reg[GPIO_LOW] |= (1 << i); | ||
256 | else | ||
257 | irq_reg[GPIO_HI] |= (1 << (i - 8)); | ||
258 | } | ||
259 | |||
260 | } | ||
261 | } | ||
262 | if (irq_src & MAX8997_IRQSRC_FLASH) { | ||
263 | /* Flash Status Interrupt */ | ||
264 | ret = max8997_read_reg(max8997->i2c, MAX8997_REG_FLASHSTATUS, | ||
265 | &irq_reg[FLASH_STATUS]); | ||
266 | } | ||
267 | |||
268 | /* Apply masking */ | ||
269 | for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) | ||
270 | irq_reg[i] &= ~max8997->irq_masks_cur[i]; | ||
271 | |||
272 | /* Report */ | ||
273 | for (i = 0; i < MAX8997_IRQ_NR; i++) { | ||
274 | if (irq_reg[max8997_irqs[i].group] & max8997_irqs[i].mask) | ||
275 | handle_nested_irq(max8997->irq_base + i); | ||
276 | } | ||
277 | |||
278 | return IRQ_HANDLED; | ||
279 | } | ||
280 | |||
281 | int max8997_irq_resume(struct max8997_dev *max8997) | ||
282 | { | ||
283 | if (max8997->irq && max8997->irq_base) | ||
284 | max8997_irq_thread(max8997->irq_base, max8997); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | int max8997_irq_init(struct max8997_dev *max8997) | ||
289 | { | ||
290 | int i; | ||
291 | int cur_irq; | ||
292 | int ret; | ||
293 | u8 val; | ||
294 | |||
295 | if (!max8997->irq) { | ||
296 | dev_warn(max8997->dev, "No interrupt specified.\n"); | ||
297 | max8997->irq_base = 0; | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | if (!max8997->irq_base) { | ||
302 | dev_err(max8997->dev, "No interrupt base specified.\n"); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | mutex_init(&max8997->irqlock); | ||
307 | |||
308 | /* Mask individual interrupt sources */ | ||
309 | for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { | ||
310 | struct i2c_client *i2c; | ||
311 | |||
312 | max8997->irq_masks_cur[i] = 0xff; | ||
313 | max8997->irq_masks_cache[i] = 0xff; | ||
314 | i2c = get_i2c(max8997, i); | ||
315 | |||
316 | if (IS_ERR_OR_NULL(i2c)) | ||
317 | continue; | ||
318 | if (max8997_mask_reg[i] == MAX8997_REG_INVALID) | ||
319 | continue; | ||
320 | |||
321 | max8997_write_reg(i2c, max8997_mask_reg[i], 0xff); | ||
322 | } | ||
323 | |||
324 | for (i = 0; i < MAX8997_NUM_GPIO; i++) { | ||
325 | max8997->gpio_status[i] = (max8997_read_reg(max8997->i2c, | ||
326 | MAX8997_REG_GPIOCNTL1 + i, | ||
327 | &val) | ||
328 | & MAX8997_GPIO_DATA_MASK) ? | ||
329 | true : false; | ||
330 | } | ||
331 | |||
332 | /* Register with genirq */ | ||
333 | for (i = 0; i < MAX8997_IRQ_NR; i++) { | ||
334 | cur_irq = i + max8997->irq_base; | ||
335 | irq_set_chip_data(cur_irq, max8997); | ||
336 | irq_set_chip_and_handler(cur_irq, &max8997_irq_chip, | ||
337 | handle_edge_irq); | ||
338 | irq_set_nested_thread(cur_irq, 1); | ||
339 | #ifdef CONFIG_ARM | ||
340 | set_irq_flags(cur_irq, IRQF_VALID); | ||
341 | #else | ||
342 | irq_set_noprobe(cur_irq); | ||
343 | #endif | ||
344 | } | ||
345 | |||
346 | ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread, | ||
347 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | ||
348 | "max8997-irq", max8997); | ||
349 | |||
350 | if (ret) { | ||
351 | dev_err(max8997->dev, "Failed to request IRQ %d: %d\n", | ||
352 | max8997->irq, ret); | ||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | if (!max8997->ono) | ||
357 | return 0; | ||
358 | |||
359 | ret = request_threaded_irq(max8997->ono, NULL, max8997_irq_thread, | ||
360 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | | ||
361 | IRQF_ONESHOT, "max8997-ono", max8997); | ||
362 | |||
363 | if (ret) | ||
364 | dev_err(max8997->dev, "Failed to request ono-IRQ %d: %d\n", | ||
365 | max8997->ono, ret); | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | void max8997_irq_exit(struct max8997_dev *max8997) | ||
371 | { | ||
372 | if (max8997->ono) | ||
373 | free_irq(max8997->ono, max8997); | ||
374 | |||
375 | if (max8997->irq) | ||
376 | free_irq(max8997->irq, max8997); | ||
377 | } | ||
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c index 3903e1fbb33..5919710dc9e 100644 --- a/drivers/mfd/max8998-irq.c +++ b/drivers/mfd/max8998-irq.c | |||
@@ -224,14 +224,14 @@ int max8998_irq_init(struct max8998_dev *max8998) | |||
224 | /* register with genirq */ | 224 | /* register with genirq */ |
225 | for (i = 0; i < MAX8998_IRQ_NR; i++) { | 225 | for (i = 0; i < MAX8998_IRQ_NR; i++) { |
226 | cur_irq = i + max8998->irq_base; | 226 | cur_irq = i + max8998->irq_base; |
227 | set_irq_chip_data(cur_irq, max8998); | 227 | irq_set_chip_data(cur_irq, max8998); |
228 | set_irq_chip_and_handler(cur_irq, &max8998_irq_chip, | 228 | irq_set_chip_and_handler(cur_irq, &max8998_irq_chip, |
229 | handle_edge_irq); | 229 | handle_edge_irq); |
230 | set_irq_nested_thread(cur_irq, 1); | 230 | irq_set_nested_thread(cur_irq, 1); |
231 | #ifdef CONFIG_ARM | 231 | #ifdef CONFIG_ARM |
232 | set_irq_flags(cur_irq, IRQF_VALID); | 232 | set_irq_flags(cur_irq, IRQF_VALID); |
233 | #else | 233 | #else |
234 | set_irq_noprobe(cur_irq); | 234 | irq_set_noprobe(cur_irq); |
235 | #endif | 235 | #endif |
236 | } | 236 | } |
237 | 237 | ||
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c index c00214257da..9ec7570f5b8 100644 --- a/drivers/mfd/max8998.c +++ b/drivers/mfd/max8998.c | |||
@@ -209,7 +209,7 @@ static int max8998_suspend(struct device *dev) | |||
209 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); | 209 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); |
210 | 210 | ||
211 | if (max8998->wakeup) | 211 | if (max8998->wakeup) |
212 | set_irq_wake(max8998->irq, 1); | 212 | irq_set_irq_wake(max8998->irq, 1); |
213 | return 0; | 213 | return 0; |
214 | } | 214 | } |
215 | 215 | ||
@@ -219,7 +219,7 @@ static int max8998_resume(struct device *dev) | |||
219 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); | 219 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); |
220 | 220 | ||
221 | if (max8998->wakeup) | 221 | if (max8998->wakeup) |
222 | set_irq_wake(max8998->irq, 0); | 222 | irq_set_irq_wake(max8998->irq, 0); |
223 | /* | 223 | /* |
224 | * In LP3974, if IRQ registers are not "read & clear" | 224 | * In LP3974, if IRQ registers are not "read & clear" |
225 | * when it's set during sleep, the interrupt becomes | 225 | * when it's set during sleep, the interrupt becomes |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 79eda0264fb..d01574d9887 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
@@ -184,16 +184,12 @@ void mfd_remove_devices(struct device *parent) | |||
184 | } | 184 | } |
185 | EXPORT_SYMBOL(mfd_remove_devices); | 185 | EXPORT_SYMBOL(mfd_remove_devices); |
186 | 186 | ||
187 | static int add_shared_platform_device(const char *cell, const char *name) | 187 | int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) |
188 | { | 188 | { |
189 | struct mfd_cell cell_entry; | 189 | struct mfd_cell cell_entry; |
190 | struct device *dev; | 190 | struct device *dev; |
191 | struct platform_device *pdev; | 191 | struct platform_device *pdev; |
192 | int err; | 192 | int i; |
193 | |||
194 | /* check if we've already registered a device (don't fail if we have) */ | ||
195 | if (bus_find_device_by_name(&platform_bus_type, NULL, name)) | ||
196 | return 0; | ||
197 | 193 | ||
198 | /* fetch the parent cell's device (should already be registered!) */ | 194 | /* fetch the parent cell's device (should already be registered!) */ |
199 | dev = bus_find_device_by_name(&platform_bus_type, NULL, cell); | 195 | dev = bus_find_device_by_name(&platform_bus_type, NULL, cell); |
@@ -206,44 +202,17 @@ static int add_shared_platform_device(const char *cell, const char *name) | |||
206 | 202 | ||
207 | WARN_ON(!cell_entry.enable); | 203 | WARN_ON(!cell_entry.enable); |
208 | 204 | ||
209 | cell_entry.name = name; | 205 | for (i = 0; i < n_clones; i++) { |
210 | err = mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0); | 206 | cell_entry.name = clones[i]; |
211 | if (err) | 207 | /* don't give up if a single call fails; just report error */ |
212 | dev_err(dev, "MFD add devices failed: %d\n", err); | 208 | if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0)) |
213 | return err; | 209 | dev_err(dev, "failed to create platform device '%s'\n", |
214 | } | 210 | clones[i]); |
215 | 211 | } | |
216 | int mfd_shared_platform_driver_register(struct platform_driver *drv, | ||
217 | const char *cellname) | ||
218 | { | ||
219 | int err; | ||
220 | |||
221 | err = add_shared_platform_device(cellname, drv->driver.name); | ||
222 | if (err) | ||
223 | printk(KERN_ERR "failed to add platform device %s\n", | ||
224 | drv->driver.name); | ||
225 | |||
226 | err = platform_driver_register(drv); | ||
227 | if (err) | ||
228 | printk(KERN_ERR "failed to add platform driver %s\n", | ||
229 | drv->driver.name); | ||
230 | |||
231 | return err; | ||
232 | } | ||
233 | EXPORT_SYMBOL(mfd_shared_platform_driver_register); | ||
234 | |||
235 | void mfd_shared_platform_driver_unregister(struct platform_driver *drv) | ||
236 | { | ||
237 | struct device *dev; | ||
238 | |||
239 | dev = bus_find_device_by_name(&platform_bus_type, NULL, | ||
240 | drv->driver.name); | ||
241 | if (dev) | ||
242 | platform_device_unregister(to_platform_device(dev)); | ||
243 | 212 | ||
244 | platform_driver_unregister(drv); | 213 | return 0; |
245 | } | 214 | } |
246 | EXPORT_SYMBOL(mfd_shared_platform_driver_unregister); | 215 | EXPORT_SYMBOL(mfd_clone_cell); |
247 | 216 | ||
248 | MODULE_LICENSE("GPL"); | 217 | MODULE_LICENSE("GPL"); |
249 | MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); | 218 | MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); |
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index c1306ed43e3..c7687f6a78a 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c | |||
@@ -356,7 +356,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) | |||
356 | return 0; | 356 | return 0; |
357 | } | 357 | } |
358 | 358 | ||
359 | static struct i2c_device_id pcf50633_id_table[] = { | 359 | static const struct i2c_device_id pcf50633_id_table[] = { |
360 | {"pcf50633", 0x73}, | 360 | {"pcf50633", 0x73}, |
361 | {/* end of list */} | 361 | {/* end of list */} |
362 | }; | 362 | }; |
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c index 193c940225b..10dbe6374a8 100644 --- a/drivers/mfd/rdc321x-southbridge.c +++ b/drivers/mfd/rdc321x-southbridge.c | |||
@@ -97,6 +97,7 @@ static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = { | |||
97 | { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, | 97 | { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, |
98 | {} | 98 | {} |
99 | }; | 99 | }; |
100 | MODULE_DEVICE_TABLE(pci, rdc321x_sb_table); | ||
100 | 101 | ||
101 | static struct pci_driver rdc321x_sb_driver = { | 102 | static struct pci_driver rdc321x_sb_driver = { |
102 | .name = "RDC321x Southbridge", | 103 | .name = "RDC321x Southbridge", |
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 3e5732b58c4..7ab7746631d 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c | |||
@@ -762,14 +762,14 @@ static int __devinit stmpe_irq_init(struct stmpe *stmpe) | |||
762 | int irq; | 762 | int irq; |
763 | 763 | ||
764 | for (irq = base; irq < base + num_irqs; irq++) { | 764 | for (irq = base; irq < base + num_irqs; irq++) { |
765 | set_irq_chip_data(irq, stmpe); | 765 | irq_set_chip_data(irq, stmpe); |
766 | set_irq_chip_and_handler(irq, &stmpe_irq_chip, | 766 | irq_set_chip_and_handler(irq, &stmpe_irq_chip, |
767 | handle_edge_irq); | 767 | handle_edge_irq); |
768 | set_irq_nested_thread(irq, 1); | 768 | irq_set_nested_thread(irq, 1); |
769 | #ifdef CONFIG_ARM | 769 | #ifdef CONFIG_ARM |
770 | set_irq_flags(irq, IRQF_VALID); | 770 | set_irq_flags(irq, IRQF_VALID); |
771 | #else | 771 | #else |
772 | set_irq_noprobe(irq); | 772 | irq_set_noprobe(irq); |
773 | #endif | 773 | #endif |
774 | } | 774 | } |
775 | 775 | ||
@@ -786,8 +786,8 @@ static void stmpe_irq_remove(struct stmpe *stmpe) | |||
786 | #ifdef CONFIG_ARM | 786 | #ifdef CONFIG_ARM |
787 | set_irq_flags(irq, 0); | 787 | set_irq_flags(irq, 0); |
788 | #endif | 788 | #endif |
789 | set_irq_chip_and_handler(irq, NULL, NULL); | 789 | irq_set_chip_and_handler(irq, NULL, NULL); |
790 | set_irq_chip_data(irq, NULL); | 790 | irq_set_chip_data(irq, NULL); |
791 | } | 791 | } |
792 | } | 792 | } |
793 | 793 | ||
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index af57fc706a4..42830e69296 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c | |||
@@ -186,7 +186,7 @@ static struct mfd_cell t7l66xb_cells[] = { | |||
186 | /* Handle the T7L66XB interrupt mux */ | 186 | /* Handle the T7L66XB interrupt mux */ |
187 | static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) | 187 | static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) |
188 | { | 188 | { |
189 | struct t7l66xb *t7l66xb = get_irq_data(irq); | 189 | struct t7l66xb *t7l66xb = irq_get_handler_data(irq); |
190 | unsigned int isr; | 190 | unsigned int isr; |
191 | unsigned int i, irq_base; | 191 | unsigned int i, irq_base; |
192 | 192 | ||
@@ -243,17 +243,16 @@ static void t7l66xb_attach_irq(struct platform_device *dev) | |||
243 | irq_base = t7l66xb->irq_base; | 243 | irq_base = t7l66xb->irq_base; |
244 | 244 | ||
245 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { | 245 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { |
246 | set_irq_chip(irq, &t7l66xb_chip); | 246 | irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq); |
247 | set_irq_chip_data(irq, t7l66xb); | 247 | irq_set_chip_data(irq, t7l66xb); |
248 | set_irq_handler(irq, handle_level_irq); | ||
249 | #ifdef CONFIG_ARM | 248 | #ifdef CONFIG_ARM |
250 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 249 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
251 | #endif | 250 | #endif |
252 | } | 251 | } |
253 | 252 | ||
254 | set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); | 253 | irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); |
255 | set_irq_data(t7l66xb->irq, t7l66xb); | 254 | irq_set_handler_data(t7l66xb->irq, t7l66xb); |
256 | set_irq_chained_handler(t7l66xb->irq, t7l66xb_irq); | 255 | irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq); |
257 | } | 256 | } |
258 | 257 | ||
259 | static void t7l66xb_detach_irq(struct platform_device *dev) | 258 | static void t7l66xb_detach_irq(struct platform_device *dev) |
@@ -263,15 +262,15 @@ static void t7l66xb_detach_irq(struct platform_device *dev) | |||
263 | 262 | ||
264 | irq_base = t7l66xb->irq_base; | 263 | irq_base = t7l66xb->irq_base; |
265 | 264 | ||
266 | set_irq_chained_handler(t7l66xb->irq, NULL); | 265 | irq_set_chained_handler(t7l66xb->irq, NULL); |
267 | set_irq_data(t7l66xb->irq, NULL); | 266 | irq_set_handler_data(t7l66xb->irq, NULL); |
268 | 267 | ||
269 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { | 268 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { |
270 | #ifdef CONFIG_ARM | 269 | #ifdef CONFIG_ARM |
271 | set_irq_flags(irq, 0); | 270 | set_irq_flags(irq, 0); |
272 | #endif | 271 | #endif |
273 | set_irq_chip(irq, NULL); | 272 | irq_set_chip(irq, NULL); |
274 | set_irq_chip_data(irq, NULL); | 273 | irq_set_chip_data(irq, NULL); |
275 | } | 274 | } |
276 | } | 275 | } |
277 | 276 | ||
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index 729dbeed2ce..c27e515b072 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c | |||
@@ -192,14 +192,14 @@ static int tc3589x_irq_init(struct tc3589x *tc3589x) | |||
192 | int irq; | 192 | int irq; |
193 | 193 | ||
194 | for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { | 194 | for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { |
195 | set_irq_chip_data(irq, tc3589x); | 195 | irq_set_chip_data(irq, tc3589x); |
196 | set_irq_chip_and_handler(irq, &dummy_irq_chip, | 196 | irq_set_chip_and_handler(irq, &dummy_irq_chip, |
197 | handle_edge_irq); | 197 | handle_edge_irq); |
198 | set_irq_nested_thread(irq, 1); | 198 | irq_set_nested_thread(irq, 1); |
199 | #ifdef CONFIG_ARM | 199 | #ifdef CONFIG_ARM |
200 | set_irq_flags(irq, IRQF_VALID); | 200 | set_irq_flags(irq, IRQF_VALID); |
201 | #else | 201 | #else |
202 | set_irq_noprobe(irq); | 202 | irq_set_noprobe(irq); |
203 | #endif | 203 | #endif |
204 | } | 204 | } |
205 | 205 | ||
@@ -215,8 +215,8 @@ static void tc3589x_irq_remove(struct tc3589x *tc3589x) | |||
215 | #ifdef CONFIG_ARM | 215 | #ifdef CONFIG_ARM |
216 | set_irq_flags(irq, 0); | 216 | set_irq_flags(irq, 0); |
217 | #endif | 217 | #endif |
218 | set_irq_chip_and_handler(irq, NULL, NULL); | 218 | irq_set_chip_and_handler(irq, NULL, NULL); |
219 | set_irq_chip_data(irq, NULL); | 219 | irq_set_chip_data(irq, NULL); |
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 3d62ded86a8..fc53ce28760 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c | |||
@@ -513,7 +513,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) | |||
513 | static void | 513 | static void |
514 | tc6393xb_irq(unsigned int irq, struct irq_desc *desc) | 514 | tc6393xb_irq(unsigned int irq, struct irq_desc *desc) |
515 | { | 515 | { |
516 | struct tc6393xb *tc6393xb = get_irq_data(irq); | 516 | struct tc6393xb *tc6393xb = irq_get_handler_data(irq); |
517 | unsigned int isr; | 517 | unsigned int isr; |
518 | unsigned int i, irq_base; | 518 | unsigned int i, irq_base; |
519 | 519 | ||
@@ -572,15 +572,14 @@ static void tc6393xb_attach_irq(struct platform_device *dev) | |||
572 | irq_base = tc6393xb->irq_base; | 572 | irq_base = tc6393xb->irq_base; |
573 | 573 | ||
574 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { | 574 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { |
575 | set_irq_chip(irq, &tc6393xb_chip); | 575 | irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq); |
576 | set_irq_chip_data(irq, tc6393xb); | 576 | irq_set_chip_data(irq, tc6393xb); |
577 | set_irq_handler(irq, handle_edge_irq); | ||
578 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 577 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
579 | } | 578 | } |
580 | 579 | ||
581 | set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING); | 580 | irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING); |
582 | set_irq_data(tc6393xb->irq, tc6393xb); | 581 | irq_set_handler_data(tc6393xb->irq, tc6393xb); |
583 | set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq); | 582 | irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq); |
584 | } | 583 | } |
585 | 584 | ||
586 | static void tc6393xb_detach_irq(struct platform_device *dev) | 585 | static void tc6393xb_detach_irq(struct platform_device *dev) |
@@ -588,15 +587,15 @@ static void tc6393xb_detach_irq(struct platform_device *dev) | |||
588 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | 587 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); |
589 | unsigned int irq, irq_base; | 588 | unsigned int irq, irq_base; |
590 | 589 | ||
591 | set_irq_chained_handler(tc6393xb->irq, NULL); | 590 | irq_set_chained_handler(tc6393xb->irq, NULL); |
592 | set_irq_data(tc6393xb->irq, NULL); | 591 | irq_set_handler_data(tc6393xb->irq, NULL); |
593 | 592 | ||
594 | irq_base = tc6393xb->irq_base; | 593 | irq_base = tc6393xb->irq_base; |
595 | 594 | ||
596 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { | 595 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { |
597 | set_irq_flags(irq, 0); | 596 | set_irq_flags(irq, 0); |
598 | set_irq_chip(irq, NULL); | 597 | irq_set_chip(irq, NULL); |
599 | set_irq_chip_data(irq, NULL); | 598 | irq_set_chip_data(irq, NULL); |
600 | } | 599 | } |
601 | } | 600 | } |
602 | 601 | ||
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 0aa9186aec1..b600808690c 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c | |||
@@ -422,10 +422,10 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq, | |||
422 | 422 | ||
423 | for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) { | 423 | for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) { |
424 | int __irq = i + tps6586x->irq_base; | 424 | int __irq = i + tps6586x->irq_base; |
425 | set_irq_chip_data(__irq, tps6586x); | 425 | irq_set_chip_data(__irq, tps6586x); |
426 | set_irq_chip_and_handler(__irq, &tps6586x->irq_chip, | 426 | irq_set_chip_and_handler(__irq, &tps6586x->irq_chip, |
427 | handle_simple_irq); | 427 | handle_simple_irq); |
428 | set_irq_nested_thread(__irq, 1); | 428 | irq_set_nested_thread(__irq, 1); |
429 | #ifdef CONFIG_ARM | 429 | #ifdef CONFIG_ARM |
430 | set_irq_flags(__irq, IRQF_VALID); | 430 | set_irq_flags(__irq, IRQF_VALID); |
431 | #endif | 431 | #endif |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 63a30e88908..8a7ee3139b8 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c | |||
@@ -320,24 +320,8 @@ static int twl4030_irq_thread(void *data) | |||
320 | for (module_irq = twl4030_irq_base; | 320 | for (module_irq = twl4030_irq_base; |
321 | pih_isr; | 321 | pih_isr; |
322 | pih_isr >>= 1, module_irq++) { | 322 | pih_isr >>= 1, module_irq++) { |
323 | if (pih_isr & 0x1) { | 323 | if (pih_isr & 0x1) |
324 | struct irq_desc *d = irq_to_desc(module_irq); | 324 | generic_handle_irq(module_irq); |
325 | |||
326 | if (!d) { | ||
327 | pr_err("twl4030: Invalid SIH IRQ: %d\n", | ||
328 | module_irq); | ||
329 | return -EINVAL; | ||
330 | } | ||
331 | |||
332 | /* These can't be masked ... always warn | ||
333 | * if we get any surprises. | ||
334 | */ | ||
335 | if (d->status & IRQ_DISABLED) | ||
336 | note_interrupt(module_irq, d, | ||
337 | IRQ_NONE); | ||
338 | else | ||
339 | d->handle_irq(module_irq, d); | ||
340 | } | ||
341 | } | 325 | } |
342 | local_irq_enable(); | 326 | local_irq_enable(); |
343 | 327 | ||
@@ -470,7 +454,7 @@ static inline void activate_irq(int irq) | |||
470 | set_irq_flags(irq, IRQF_VALID); | 454 | set_irq_flags(irq, IRQF_VALID); |
471 | #else | 455 | #else |
472 | /* same effect on other architectures */ | 456 | /* same effect on other architectures */ |
473 | set_irq_noprobe(irq); | 457 | irq_set_noprobe(irq); |
474 | #endif | 458 | #endif |
475 | } | 459 | } |
476 | 460 | ||
@@ -560,24 +544,18 @@ static void twl4030_sih_do_edge(struct work_struct *work) | |||
560 | /* Modify only the bits we know must change */ | 544 | /* Modify only the bits we know must change */ |
561 | while (edge_change) { | 545 | while (edge_change) { |
562 | int i = fls(edge_change) - 1; | 546 | int i = fls(edge_change) - 1; |
563 | struct irq_desc *d = irq_to_desc(i + agent->irq_base); | 547 | struct irq_data *idata = irq_get_irq_data(i + agent->irq_base); |
564 | int byte = 1 + (i >> 2); | 548 | int byte = 1 + (i >> 2); |
565 | int off = (i & 0x3) * 2; | 549 | int off = (i & 0x3) * 2; |
566 | 550 | unsigned int type; | |
567 | if (!d) { | ||
568 | pr_err("twl4030: Invalid IRQ: %d\n", | ||
569 | i + agent->irq_base); | ||
570 | return; | ||
571 | } | ||
572 | 551 | ||
573 | bytes[byte] &= ~(0x03 << off); | 552 | bytes[byte] &= ~(0x03 << off); |
574 | 553 | ||
575 | raw_spin_lock_irq(&d->lock); | 554 | type = irqd_get_trigger_type(idata); |
576 | if (d->status & IRQ_TYPE_EDGE_RISING) | 555 | if (type & IRQ_TYPE_EDGE_RISING) |
577 | bytes[byte] |= BIT(off + 1); | 556 | bytes[byte] |= BIT(off + 1); |
578 | if (d->status & IRQ_TYPE_EDGE_FALLING) | 557 | if (type & IRQ_TYPE_EDGE_FALLING) |
579 | bytes[byte] |= BIT(off + 0); | 558 | bytes[byte] |= BIT(off + 0); |
580 | raw_spin_unlock_irq(&d->lock); | ||
581 | 559 | ||
582 | edge_change &= ~BIT(i); | 560 | edge_change &= ~BIT(i); |
583 | } | 561 | } |
@@ -626,21 +604,13 @@ static void twl4030_sih_unmask(struct irq_data *data) | |||
626 | static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger) | 604 | static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger) |
627 | { | 605 | { |
628 | struct sih_agent *sih = irq_data_get_irq_chip_data(data); | 606 | struct sih_agent *sih = irq_data_get_irq_chip_data(data); |
629 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
630 | unsigned long flags; | 607 | unsigned long flags; |
631 | 608 | ||
632 | if (!desc) { | ||
633 | pr_err("twl4030: Invalid IRQ: %d\n", data->irq); | ||
634 | return -EINVAL; | ||
635 | } | ||
636 | |||
637 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 609 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
638 | return -EINVAL; | 610 | return -EINVAL; |
639 | 611 | ||
640 | spin_lock_irqsave(&sih_agent_lock, flags); | 612 | spin_lock_irqsave(&sih_agent_lock, flags); |
641 | if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) { | 613 | if (irqd_get_trigger_type(data) != trigger) { |
642 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
643 | desc->status |= trigger; | ||
644 | sih->edge_change |= BIT(data->irq - sih->irq_base); | 614 | sih->edge_change |= BIT(data->irq - sih->irq_base); |
645 | queue_work(wq, &sih->edge_work); | 615 | queue_work(wq, &sih->edge_work); |
646 | } | 616 | } |
@@ -680,7 +650,7 @@ static inline int sih_read_isr(const struct sih *sih) | |||
680 | */ | 650 | */ |
681 | static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) | 651 | static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) |
682 | { | 652 | { |
683 | struct sih_agent *agent = get_irq_data(irq); | 653 | struct sih_agent *agent = irq_get_handler_data(irq); |
684 | const struct sih *sih = agent->sih; | 654 | const struct sih *sih = agent->sih; |
685 | int isr; | 655 | int isr; |
686 | 656 | ||
@@ -754,9 +724,9 @@ int twl4030_sih_setup(int module) | |||
754 | for (i = 0; i < sih->bits; i++) { | 724 | for (i = 0; i < sih->bits; i++) { |
755 | irq = irq_base + i; | 725 | irq = irq_base + i; |
756 | 726 | ||
757 | set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip, | 727 | irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip, |
758 | handle_edge_irq); | 728 | handle_edge_irq); |
759 | set_irq_chip_data(irq, agent); | 729 | irq_set_chip_data(irq, agent); |
760 | activate_irq(irq); | 730 | activate_irq(irq); |
761 | } | 731 | } |
762 | 732 | ||
@@ -765,8 +735,8 @@ int twl4030_sih_setup(int module) | |||
765 | 735 | ||
766 | /* replace generic PIH handler (handle_simple_irq) */ | 736 | /* replace generic PIH handler (handle_simple_irq) */ |
767 | irq = sih_mod + twl4030_irq_base; | 737 | irq = sih_mod + twl4030_irq_base; |
768 | set_irq_data(irq, agent); | 738 | irq_set_handler_data(irq, agent); |
769 | set_irq_chained_handler(irq, handle_twl4030_sih); | 739 | irq_set_chained_handler(irq, handle_twl4030_sih); |
770 | 740 | ||
771 | pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, | 741 | pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, |
772 | irq, irq_base, twl4030_irq_next - 1); | 742 | irq, irq_base, twl4030_irq_next - 1); |
@@ -815,8 +785,8 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | |||
815 | twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; | 785 | twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; |
816 | 786 | ||
817 | for (i = irq_base; i < irq_end; i++) { | 787 | for (i = irq_base; i < irq_end; i++) { |
818 | set_irq_chip_and_handler(i, &twl4030_irq_chip, | 788 | irq_set_chip_and_handler(i, &twl4030_irq_chip, |
819 | handle_simple_irq); | 789 | handle_simple_irq); |
820 | activate_irq(i); | 790 | activate_irq(i); |
821 | } | 791 | } |
822 | twl4030_irq_next = i; | 792 | twl4030_irq_next = i; |
@@ -856,7 +826,7 @@ fail_rqirq: | |||
856 | /* clean up twl4030_sih_setup */ | 826 | /* clean up twl4030_sih_setup */ |
857 | fail: | 827 | fail: |
858 | for (i = irq_base; i < irq_end; i++) | 828 | for (i = irq_base; i < irq_end; i++) |
859 | set_irq_chip_and_handler(i, NULL, NULL); | 829 | irq_set_chip_and_handler(i, NULL, NULL); |
860 | destroy_workqueue(wq); | 830 | destroy_workqueue(wq); |
861 | wq = NULL; | 831 | wq = NULL; |
862 | return status; | 832 | return status; |
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 4082ed73613..fa937052fba 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c | |||
@@ -140,22 +140,7 @@ static int twl6030_irq_thread(void *data) | |||
140 | if (sts.int_sts & 0x1) { | 140 | if (sts.int_sts & 0x1) { |
141 | int module_irq = twl6030_irq_base + | 141 | int module_irq = twl6030_irq_base + |
142 | twl6030_interrupt_mapping[i]; | 142 | twl6030_interrupt_mapping[i]; |
143 | struct irq_desc *d = irq_to_desc(module_irq); | 143 | generic_handle_irq(module_irq); |
144 | |||
145 | if (!d) { | ||
146 | pr_err("twl6030: Invalid SIH IRQ: %d\n", | ||
147 | module_irq); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
151 | /* These can't be masked ... always warn | ||
152 | * if we get any surprises. | ||
153 | */ | ||
154 | if (d->status & IRQ_DISABLED) | ||
155 | note_interrupt(module_irq, d, | ||
156 | IRQ_NONE); | ||
157 | else | ||
158 | d->handle_irq(module_irq, d); | ||
159 | 144 | ||
160 | } | 145 | } |
161 | local_irq_enable(); | 146 | local_irq_enable(); |
@@ -198,7 +183,7 @@ static inline void activate_irq(int irq) | |||
198 | set_irq_flags(irq, IRQF_VALID); | 183 | set_irq_flags(irq, IRQF_VALID); |
199 | #else | 184 | #else |
200 | /* same effect on other architectures */ | 185 | /* same effect on other architectures */ |
201 | set_irq_noprobe(irq); | 186 | irq_set_noprobe(irq); |
202 | #endif | 187 | #endif |
203 | } | 188 | } |
204 | 189 | ||
@@ -335,8 +320,8 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | |||
335 | twl6030_irq_chip.irq_set_type = NULL; | 320 | twl6030_irq_chip.irq_set_type = NULL; |
336 | 321 | ||
337 | for (i = irq_base; i < irq_end; i++) { | 322 | for (i = irq_base; i < irq_end; i++) { |
338 | set_irq_chip_and_handler(i, &twl6030_irq_chip, | 323 | irq_set_chip_and_handler(i, &twl6030_irq_chip, |
339 | handle_simple_irq); | 324 | handle_simple_irq); |
340 | activate_irq(i); | 325 | activate_irq(i); |
341 | } | 326 | } |
342 | 327 | ||
@@ -365,7 +350,7 @@ fail_irq: | |||
365 | 350 | ||
366 | fail_kthread: | 351 | fail_kthread: |
367 | for (i = irq_base; i < irq_end; i++) | 352 | for (i = irq_base; i < irq_end; i++) |
368 | set_irq_chip_and_handler(i, NULL, NULL); | 353 | irq_set_chip_and_handler(i, NULL, NULL); |
369 | return status; | 354 | return status; |
370 | } | 355 | } |
371 | 356 | ||
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c index f76f6c79804..04914f2836c 100644 --- a/drivers/mfd/wl1273-core.c +++ b/drivers/mfd/wl1273-core.c | |||
@@ -25,7 +25,7 @@ | |||
25 | 25 | ||
26 | #define DRIVER_DESC "WL1273 FM Radio Core" | 26 | #define DRIVER_DESC "WL1273 FM Radio Core" |
27 | 27 | ||
28 | static struct i2c_device_id wl1273_driver_id_table[] = { | 28 | static const struct i2c_device_id wl1273_driver_id_table[] = { |
29 | { WL1273_FM_DRIVER_NAME, 0 }, | 29 | { WL1273_FM_DRIVER_NAME, 0 }, |
30 | { } | 30 | { } |
31 | }; | 31 | }; |
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index a5cd17e18d0..23e66af89de 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c | |||
@@ -553,17 +553,17 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) | |||
553 | for (cur_irq = wm831x->irq_base; | 553 | for (cur_irq = wm831x->irq_base; |
554 | cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; | 554 | cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; |
555 | cur_irq++) { | 555 | cur_irq++) { |
556 | set_irq_chip_data(cur_irq, wm831x); | 556 | irq_set_chip_data(cur_irq, wm831x); |
557 | set_irq_chip_and_handler(cur_irq, &wm831x_irq_chip, | 557 | irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip, |
558 | handle_edge_irq); | 558 | handle_edge_irq); |
559 | set_irq_nested_thread(cur_irq, 1); | 559 | irq_set_nested_thread(cur_irq, 1); |
560 | 560 | ||
561 | /* ARM needs us to explicitly flag the IRQ as valid | 561 | /* ARM needs us to explicitly flag the IRQ as valid |
562 | * and will set them noprobe when we do so. */ | 562 | * and will set them noprobe when we do so. */ |
563 | #ifdef CONFIG_ARM | 563 | #ifdef CONFIG_ARM |
564 | set_irq_flags(cur_irq, IRQF_VALID); | 564 | set_irq_flags(cur_irq, IRQF_VALID); |
565 | #else | 565 | #else |
566 | set_irq_noprobe(cur_irq); | 566 | irq_set_noprobe(cur_irq); |
567 | #endif | 567 | #endif |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c index 5839966ebd8..ed4b22a167b 100644 --- a/drivers/mfd/wm8350-irq.c +++ b/drivers/mfd/wm8350-irq.c | |||
@@ -518,17 +518,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq, | |||
518 | for (cur_irq = wm8350->irq_base; | 518 | for (cur_irq = wm8350->irq_base; |
519 | cur_irq < ARRAY_SIZE(wm8350_irqs) + wm8350->irq_base; | 519 | cur_irq < ARRAY_SIZE(wm8350_irqs) + wm8350->irq_base; |
520 | cur_irq++) { | 520 | cur_irq++) { |
521 | set_irq_chip_data(cur_irq, wm8350); | 521 | irq_set_chip_data(cur_irq, wm8350); |
522 | set_irq_chip_and_handler(cur_irq, &wm8350_irq_chip, | 522 | irq_set_chip_and_handler(cur_irq, &wm8350_irq_chip, |
523 | handle_edge_irq); | 523 | handle_edge_irq); |
524 | set_irq_nested_thread(cur_irq, 1); | 524 | irq_set_nested_thread(cur_irq, 1); |
525 | 525 | ||
526 | /* ARM needs us to explicitly flag the IRQ as valid | 526 | /* ARM needs us to explicitly flag the IRQ as valid |
527 | * and will set them noprobe when we do so. */ | 527 | * and will set them noprobe when we do so. */ |
528 | #ifdef CONFIG_ARM | 528 | #ifdef CONFIG_ARM |
529 | set_irq_flags(cur_irq, IRQF_VALID); | 529 | set_irq_flags(cur_irq, IRQF_VALID); |
530 | #else | 530 | #else |
531 | set_irq_noprobe(cur_irq); | 531 | irq_set_noprobe(cur_irq); |
532 | #endif | 532 | #endif |
533 | } | 533 | } |
534 | 534 | ||
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c index 1e3bf4a2ff8..71c6e8f9aed 100644 --- a/drivers/mfd/wm8994-irq.c +++ b/drivers/mfd/wm8994-irq.c | |||
@@ -278,17 +278,17 @@ int wm8994_irq_init(struct wm8994 *wm8994) | |||
278 | for (cur_irq = wm8994->irq_base; | 278 | for (cur_irq = wm8994->irq_base; |
279 | cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base; | 279 | cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base; |
280 | cur_irq++) { | 280 | cur_irq++) { |
281 | set_irq_chip_data(cur_irq, wm8994); | 281 | irq_set_chip_data(cur_irq, wm8994); |
282 | set_irq_chip_and_handler(cur_irq, &wm8994_irq_chip, | 282 | irq_set_chip_and_handler(cur_irq, &wm8994_irq_chip, |
283 | handle_edge_irq); | 283 | handle_edge_irq); |
284 | set_irq_nested_thread(cur_irq, 1); | 284 | irq_set_nested_thread(cur_irq, 1); |
285 | 285 | ||
286 | /* ARM needs us to explicitly flag the IRQ as valid | 286 | /* ARM needs us to explicitly flag the IRQ as valid |
287 | * and will set them noprobe when we do so. */ | 287 | * and will set them noprobe when we do so. */ |
288 | #ifdef CONFIG_ARM | 288 | #ifdef CONFIG_ARM |
289 | set_irq_flags(cur_irq, IRQF_VALID); | 289 | set_irq_flags(cur_irq, IRQF_VALID); |
290 | #else | 290 | #else |
291 | set_irq_noprobe(cur_irq); | 291 | irq_set_noprobe(cur_irq); |
292 | #endif | 292 | #endif |
293 | } | 293 | } |
294 | 294 | ||
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 59c118c19a9..27dc463097f 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c | |||
@@ -988,7 +988,7 @@ static void kgdbts_run_tests(void) | |||
988 | 988 | ||
989 | static int kgdbts_option_setup(char *opt) | 989 | static int kgdbts_option_setup(char *opt) |
990 | { | 990 | { |
991 | if (strlen(opt) > MAX_CONFIG_LEN) { | 991 | if (strlen(opt) >= MAX_CONFIG_LEN) { |
992 | printk(KERN_ERR "kgdbts: config string too long\n"); | 992 | printk(KERN_ERR "kgdbts: config string too long\n"); |
993 | return -ENOSPC; | 993 | return -ENOSPC; |
994 | } | 994 | } |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 77414702cb0..b4567c35a32 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -33,14 +33,6 @@ config MTD_TESTS | |||
33 | should normally be compiled as kernel modules. The modules perform | 33 | should normally be compiled as kernel modules. The modules perform |
34 | various checks and verifications when loaded. | 34 | various checks and verifications when loaded. |
35 | 35 | ||
36 | config MTD_CONCAT | ||
37 | tristate "MTD concatenating support" | ||
38 | help | ||
39 | Support for concatenating several MTD devices into a single | ||
40 | (virtual) one. This allows you to have -for example- a JFFS(2) | ||
41 | file system spanning multiple physical flash chips. If unsure, | ||
42 | say 'Y'. | ||
43 | |||
44 | config MTD_PARTITIONS | 36 | config MTD_PARTITIONS |
45 | bool "MTD partitioning support" | 37 | bool "MTD partitioning support" |
46 | help | 38 | help |
@@ -333,6 +325,16 @@ config MTD_OOPS | |||
333 | To use, add console=ttyMTDx to the kernel command line, | 325 | To use, add console=ttyMTDx to the kernel command line, |
334 | where x is the MTD device number to use. | 326 | where x is the MTD device number to use. |
335 | 327 | ||
328 | config MTD_SWAP | ||
329 | tristate "Swap on MTD device support" | ||
330 | depends on MTD && SWAP | ||
331 | select MTD_BLKDEVS | ||
332 | help | ||
333 | Provides volatile block device driver on top of mtd partition | ||
334 | suitable for swapping. The mapping of written blocks is not saved. | ||
335 | The driver provides wear leveling by storing erase counter into the | ||
336 | OOB. | ||
337 | |||
336 | source "drivers/mtd/chips/Kconfig" | 338 | source "drivers/mtd/chips/Kconfig" |
337 | 339 | ||
338 | source "drivers/mtd/maps/Kconfig" | 340 | source "drivers/mtd/maps/Kconfig" |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index d4e7f25b1eb..d578095fb25 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -4,11 +4,10 @@ | |||
4 | 4 | ||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o | 7 | mtd-y := mtdcore.o mtdsuper.o mtdconcat.o |
8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o | 8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o |
9 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o | 9 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o |
10 | 10 | ||
11 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | ||
12 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o | 11 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o |
13 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o | 12 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o |
14 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o | 13 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
@@ -26,6 +25,7 @@ obj-$(CONFIG_RFD_FTL) += rfd_ftl.o | |||
26 | obj-$(CONFIG_SSFDC) += ssfdc.o | 25 | obj-$(CONFIG_SSFDC) += ssfdc.o |
27 | obj-$(CONFIG_SM_FTL) += sm_ftl.o | 26 | obj-$(CONFIG_SM_FTL) += sm_ftl.o |
28 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o | 27 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o |
28 | obj-$(CONFIG_MTD_SWAP) += mtdswap.o | ||
29 | 29 | ||
30 | nftl-objs := nftlcore.o nftlmount.o | 30 | nftl-objs := nftlcore.o nftlmount.o |
31 | inftl-objs := inftlcore.o inftlmount.o | 31 | inftl-objs := inftlcore.o inftlmount.o |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 4aaa88f8ab5..092aef11120 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -455,7 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
455 | mtd->flags = MTD_CAP_NORFLASH; | 455 | mtd->flags = MTD_CAP_NORFLASH; |
456 | mtd->name = map->name; | 456 | mtd->name = map->name; |
457 | mtd->writesize = 1; | 457 | mtd->writesize = 1; |
458 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 458 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
459 | 459 | ||
460 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; | 460 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; |
461 | 461 | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index f072fcfde04..f9a5331e944 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -349,6 +349,7 @@ static struct cfi_fixup cfi_fixup_table[] = { | |||
349 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, | 349 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, |
350 | #ifdef AMD_BOOTLOC_BUG | 350 | #ifdef AMD_BOOTLOC_BUG |
351 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, | 351 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, |
352 | { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, | ||
352 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, | 353 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, |
353 | #endif | 354 | #endif |
354 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, | 355 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, |
@@ -440,7 +441,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
440 | mtd->flags = MTD_CAP_NORFLASH; | 441 | mtd->flags = MTD_CAP_NORFLASH; |
441 | mtd->name = map->name; | 442 | mtd->name = map->name; |
442 | mtd->writesize = 1; | 443 | mtd->writesize = 1; |
443 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 444 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
444 | 445 | ||
445 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", | 446 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", |
446 | __func__, mtd->writebufsize); | 447 | __func__, mtd->writebufsize); |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index c04b7658abe..ed56ad3884f 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -238,7 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) | |||
238 | mtd->resume = cfi_staa_resume; | 238 | mtd->resume = cfi_staa_resume; |
239 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; | 239 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; |
240 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ | 240 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ |
241 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 241 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
242 | map->fldrv = &cfi_staa_chipdrv; | 242 | map->fldrv = &cfi_staa_chipdrv; |
243 | __module_get(THIS_MODULE); | 243 | __module_get(THIS_MODULE); |
244 | mtd->name = map->name; | 244 | mtd->name = map->name; |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index e4eba6cc1b2..3fb981d4bb5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -655,7 +655,8 @@ static const struct spi_device_id m25p_ids[] = { | |||
655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, | 655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, | 656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
657 | 657 | ||
658 | /* EON -- en25pxx */ | 658 | /* EON -- en25xxx */ |
659 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, | ||
659 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, | 660 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
660 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, | 661 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
661 | 662 | ||
@@ -728,6 +729,8 @@ static const struct spi_device_id m25p_ids[] = { | |||
728 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, | 729 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
729 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, | 730 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
730 | 731 | ||
732 | { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, | ||
733 | |||
731 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ | 734 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ |
732 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, | 735 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, |
733 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, | 736 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, |
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 26a6e809013..1483e18971c 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c | |||
@@ -121,6 +121,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, | |||
121 | mtd->flags = MTD_CAP_RAM; | 121 | mtd->flags = MTD_CAP_RAM; |
122 | mtd->size = size; | 122 | mtd->size = size; |
123 | mtd->writesize = 1; | 123 | mtd->writesize = 1; |
124 | mtd->writebufsize = 64; /* Mimic CFI NOR flashes */ | ||
124 | mtd->erasesize = MTDRAM_ERASE_SIZE; | 125 | mtd->erasesize = MTDRAM_ERASE_SIZE; |
125 | mtd->priv = mapped_address; | 126 | mtd->priv = mapped_address; |
126 | 127 | ||
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 52393282eaf..8d28fa02a5a 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c | |||
@@ -117,6 +117,7 @@ static void unregister_devices(void) | |||
117 | list_for_each_entry_safe(this, safe, &phram_list, list) { | 117 | list_for_each_entry_safe(this, safe, &phram_list, list) { |
118 | del_mtd_device(&this->mtd); | 118 | del_mtd_device(&this->mtd); |
119 | iounmap(this->mtd.priv); | 119 | iounmap(this->mtd.priv); |
120 | kfree(this->mtd.name); | ||
120 | kfree(this); | 121 | kfree(this); |
121 | } | 122 | } |
122 | } | 123 | } |
@@ -275,6 +276,8 @@ static int phram_setup(const char *val, struct kernel_param *kp) | |||
275 | ret = register_device(name, start, len); | 276 | ret = register_device(name, start, len); |
276 | if (!ret) | 277 | if (!ret) |
277 | pr_info("%s device: %#x at %#x\n", name, len, start); | 278 | pr_info("%s device: %#x at %#x\n", name, len, start); |
279 | else | ||
280 | kfree(name); | ||
278 | 281 | ||
279 | return ret; | 282 | return ret; |
280 | } | 283 | } |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 5d37d315fa9..44b1f46458c 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -114,7 +114,7 @@ config MTD_SUN_UFLASH | |||
114 | 114 | ||
115 | config MTD_SC520CDP | 115 | config MTD_SC520CDP |
116 | tristate "CFI Flash device mapped on AMD SC520 CDP" | 116 | tristate "CFI Flash device mapped on AMD SC520 CDP" |
117 | depends on X86 && MTD_CFI && MTD_CONCAT | 117 | depends on X86 && MTD_CFI |
118 | help | 118 | help |
119 | The SC520 CDP board has two banks of CFI-compliant chips and one | 119 | The SC520 CDP board has two banks of CFI-compliant chips and one |
120 | Dual-in-line JEDEC chip. This 'mapping' driver supports that | 120 | Dual-in-line JEDEC chip. This 'mapping' driver supports that |
@@ -262,7 +262,7 @@ config MTD_BCM963XX | |||
262 | 262 | ||
263 | config MTD_DILNETPC | 263 | config MTD_DILNETPC |
264 | tristate "CFI Flash device mapped on DIL/Net PC" | 264 | tristate "CFI Flash device mapped on DIL/Net PC" |
265 | depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN | 265 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN |
266 | help | 266 | help |
267 | MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". | 267 | MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". |
268 | For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> | 268 | For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> |
@@ -552,4 +552,13 @@ config MTD_PISMO | |||
552 | 552 | ||
553 | When built as a module, it will be called pismo.ko | 553 | When built as a module, it will be called pismo.ko |
554 | 554 | ||
555 | config MTD_LATCH_ADDR | ||
556 | tristate "Latch-assisted Flash Chip Support" | ||
557 | depends on MTD_COMPLEX_MAPPINGS | ||
558 | help | ||
559 | Map driver which allows flashes to be partially physically addressed | ||
560 | and have the upper address lines set by a board specific code. | ||
561 | |||
562 | If compiled as a module, it will be called latch-addr-flash. | ||
563 | |||
555 | endmenu | 564 | endmenu |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index c7869c7a6b1..08533bd5cba 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -59,3 +59,4 @@ obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o | |||
59 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o | 59 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o |
60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o | 60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o |
61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o | 61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o |
62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o | ||
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c index c09f4f57093..e5f645b775a 100644 --- a/drivers/mtd/maps/ceiva.c +++ b/drivers/mtd/maps/ceiva.c | |||
@@ -194,16 +194,10 @@ static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info | |||
194 | * We detected multiple devices. Concatenate | 194 | * We detected multiple devices. Concatenate |
195 | * them together. | 195 | * them together. |
196 | */ | 196 | */ |
197 | #ifdef CONFIG_MTD_CONCAT | ||
198 | *rmtd = mtd_concat_create(subdev, found, | 197 | *rmtd = mtd_concat_create(subdev, found, |
199 | "clps flash"); | 198 | "clps flash"); |
200 | if (*rmtd == NULL) | 199 | if (*rmtd == NULL) |
201 | ret = -ENXIO; | 200 | ret = -ENXIO; |
202 | #else | ||
203 | printk(KERN_ERR "clps flash: multiple devices " | ||
204 | "found but MTD concat support disabled.\n"); | ||
205 | ret = -ENXIO; | ||
206 | #endif | ||
207 | } | 201 | } |
208 | } | 202 | } |
209 | 203 | ||
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c index 2aac41bde8b..e22ff5adbbf 100644 --- a/drivers/mtd/maps/integrator-flash.c +++ b/drivers/mtd/maps/integrator-flash.c | |||
@@ -202,7 +202,6 @@ static int armflash_probe(struct platform_device *dev) | |||
202 | if (info->nr_subdev == 1) | 202 | if (info->nr_subdev == 1) |
203 | info->mtd = info->subdev[0].mtd; | 203 | info->mtd = info->subdev[0].mtd; |
204 | else if (info->nr_subdev > 1) { | 204 | else if (info->nr_subdev > 1) { |
205 | #ifdef CONFIG_MTD_CONCAT | ||
206 | struct mtd_info *cdev[info->nr_subdev]; | 205 | struct mtd_info *cdev[info->nr_subdev]; |
207 | 206 | ||
208 | /* | 207 | /* |
@@ -215,11 +214,6 @@ static int armflash_probe(struct platform_device *dev) | |||
215 | dev_name(&dev->dev)); | 214 | dev_name(&dev->dev)); |
216 | if (info->mtd == NULL) | 215 | if (info->mtd == NULL) |
217 | err = -ENXIO; | 216 | err = -ENXIO; |
218 | #else | ||
219 | printk(KERN_ERR "armflash: multiple devices found but " | ||
220 | "MTD concat support disabled.\n"); | ||
221 | err = -ENXIO; | ||
222 | #endif | ||
223 | } | 217 | } |
224 | 218 | ||
225 | if (err < 0) | 219 | if (err < 0) |
@@ -244,10 +238,8 @@ static int armflash_probe(struct platform_device *dev) | |||
244 | cleanup: | 238 | cleanup: |
245 | if (info->mtd) { | 239 | if (info->mtd) { |
246 | del_mtd_partitions(info->mtd); | 240 | del_mtd_partitions(info->mtd); |
247 | #ifdef CONFIG_MTD_CONCAT | ||
248 | if (info->mtd != info->subdev[0].mtd) | 241 | if (info->mtd != info->subdev[0].mtd) |
249 | mtd_concat_destroy(info->mtd); | 242 | mtd_concat_destroy(info->mtd); |
250 | #endif | ||
251 | } | 243 | } |
252 | kfree(info->parts); | 244 | kfree(info->parts); |
253 | subdev_err: | 245 | subdev_err: |
@@ -272,10 +264,8 @@ static int armflash_remove(struct platform_device *dev) | |||
272 | if (info) { | 264 | if (info) { |
273 | if (info->mtd) { | 265 | if (info->mtd) { |
274 | del_mtd_partitions(info->mtd); | 266 | del_mtd_partitions(info->mtd); |
275 | #ifdef CONFIG_MTD_CONCAT | ||
276 | if (info->mtd != info->subdev[0].mtd) | 267 | if (info->mtd != info->subdev[0].mtd) |
277 | mtd_concat_destroy(info->mtd); | 268 | mtd_concat_destroy(info->mtd); |
278 | #endif | ||
279 | } | 269 | } |
280 | kfree(info->parts); | 270 | kfree(info->parts); |
281 | 271 | ||
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c new file mode 100644 index 00000000000..ee254808533 --- /dev/null +++ b/drivers/mtd/maps/latch-addr-flash.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Interface for NOR flash driver whose high address lines are latched | ||
3 | * | ||
4 | * Copyright © 2000 Nicolas Pitre <nico@cam.org> | ||
5 | * Copyright © 2005-2008 Analog Devices Inc. | ||
6 | * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public License | ||
9 | * version 2. This program is licensed "as is" without any warranty of any | ||
10 | * kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/mtd/map.h> | ||
18 | #include <linux/mtd/partitions.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/mtd/latch-addr-flash.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #define DRIVER_NAME "latch-addr-flash" | ||
24 | |||
25 | struct latch_addr_flash_info { | ||
26 | struct mtd_info *mtd; | ||
27 | struct map_info map; | ||
28 | struct resource *res; | ||
29 | |||
30 | void (*set_window)(unsigned long offset, void *data); | ||
31 | void *data; | ||
32 | |||
33 | /* cache; could be found out of res */ | ||
34 | unsigned long win_mask; | ||
35 | |||
36 | int nr_parts; | ||
37 | struct mtd_partition *parts; | ||
38 | |||
39 | spinlock_t lock; | ||
40 | }; | ||
41 | |||
42 | static map_word lf_read(struct map_info *map, unsigned long ofs) | ||
43 | { | ||
44 | struct latch_addr_flash_info *info; | ||
45 | map_word datum; | ||
46 | |||
47 | info = (struct latch_addr_flash_info *)map->map_priv_1; | ||
48 | |||
49 | spin_lock(&info->lock); | ||
50 | |||
51 | info->set_window(ofs, info->data); | ||
52 | datum = inline_map_read(map, info->win_mask & ofs); | ||
53 | |||
54 | spin_unlock(&info->lock); | ||
55 | |||
56 | return datum; | ||
57 | } | ||
58 | |||
59 | static void lf_write(struct map_info *map, map_word datum, unsigned long ofs) | ||
60 | { | ||
61 | struct latch_addr_flash_info *info; | ||
62 | |||
63 | info = (struct latch_addr_flash_info *)map->map_priv_1; | ||
64 | |||
65 | spin_lock(&info->lock); | ||
66 | |||
67 | info->set_window(ofs, info->data); | ||
68 | inline_map_write(map, datum, info->win_mask & ofs); | ||
69 | |||
70 | spin_unlock(&info->lock); | ||
71 | } | ||
72 | |||
73 | static void lf_copy_from(struct map_info *map, void *to, | ||
74 | unsigned long from, ssize_t len) | ||
75 | { | ||
76 | struct latch_addr_flash_info *info = | ||
77 | (struct latch_addr_flash_info *) map->map_priv_1; | ||
78 | unsigned n; | ||
79 | |||
80 | while (len > 0) { | ||
81 | n = info->win_mask + 1 - (from & info->win_mask); | ||
82 | if (n > len) | ||
83 | n = len; | ||
84 | |||
85 | spin_lock(&info->lock); | ||
86 | |||
87 | info->set_window(from, info->data); | ||
88 | memcpy_fromio(to, map->virt + (from & info->win_mask), n); | ||
89 | |||
90 | spin_unlock(&info->lock); | ||
91 | |||
92 | to += n; | ||
93 | from += n; | ||
94 | len -= n; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static char *rom_probe_types[] = { "cfi_probe", NULL }; | ||
99 | |||
100 | static char *part_probe_types[] = { "cmdlinepart", NULL }; | ||
101 | |||
102 | static int latch_addr_flash_remove(struct platform_device *dev) | ||
103 | { | ||
104 | struct latch_addr_flash_info *info; | ||
105 | struct latch_addr_flash_data *latch_addr_data; | ||
106 | |||
107 | info = platform_get_drvdata(dev); | ||
108 | if (info == NULL) | ||
109 | return 0; | ||
110 | platform_set_drvdata(dev, NULL); | ||
111 | |||
112 | latch_addr_data = dev->dev.platform_data; | ||
113 | |||
114 | if (info->mtd != NULL) { | ||
115 | if (mtd_has_partitions()) { | ||
116 | if (info->nr_parts) { | ||
117 | del_mtd_partitions(info->mtd); | ||
118 | kfree(info->parts); | ||
119 | } else if (latch_addr_data->nr_parts) { | ||
120 | del_mtd_partitions(info->mtd); | ||
121 | } else { | ||
122 | del_mtd_device(info->mtd); | ||
123 | } | ||
124 | } else { | ||
125 | del_mtd_device(info->mtd); | ||
126 | } | ||
127 | map_destroy(info->mtd); | ||
128 | } | ||
129 | |||
130 | if (info->map.virt != NULL) | ||
131 | iounmap(info->map.virt); | ||
132 | |||
133 | if (info->res != NULL) | ||
134 | release_mem_region(info->res->start, resource_size(info->res)); | ||
135 | |||
136 | kfree(info); | ||
137 | |||
138 | if (latch_addr_data->done) | ||
139 | latch_addr_data->done(latch_addr_data->data); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int __devinit latch_addr_flash_probe(struct platform_device *dev) | ||
145 | { | ||
146 | struct latch_addr_flash_data *latch_addr_data; | ||
147 | struct latch_addr_flash_info *info; | ||
148 | resource_size_t win_base = dev->resource->start; | ||
149 | resource_size_t win_size = resource_size(dev->resource); | ||
150 | char **probe_type; | ||
151 | int chipsel; | ||
152 | int err; | ||
153 | |||
154 | latch_addr_data = dev->dev.platform_data; | ||
155 | if (latch_addr_data == NULL) | ||
156 | return -ENODEV; | ||
157 | |||
158 | pr_notice("latch-addr platform flash device: %#llx byte " | ||
159 | "window at %#.8llx\n", | ||
160 | (unsigned long long)win_size, (unsigned long long)win_base); | ||
161 | |||
162 | chipsel = dev->id; | ||
163 | |||
164 | if (latch_addr_data->init) { | ||
165 | err = latch_addr_data->init(latch_addr_data->data, chipsel); | ||
166 | if (err != 0) | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL); | ||
171 | if (info == NULL) { | ||
172 | err = -ENOMEM; | ||
173 | goto done; | ||
174 | } | ||
175 | |||
176 | platform_set_drvdata(dev, info); | ||
177 | |||
178 | info->res = request_mem_region(win_base, win_size, DRIVER_NAME); | ||
179 | if (info->res == NULL) { | ||
180 | dev_err(&dev->dev, "Could not reserve memory region\n"); | ||
181 | err = -EBUSY; | ||
182 | goto free_info; | ||
183 | } | ||
184 | |||
185 | info->map.name = DRIVER_NAME; | ||
186 | info->map.size = latch_addr_data->size; | ||
187 | info->map.bankwidth = latch_addr_data->width; | ||
188 | |||
189 | info->map.phys = NO_XIP; | ||
190 | info->map.virt = ioremap(win_base, win_size); | ||
191 | if (!info->map.virt) { | ||
192 | err = -ENOMEM; | ||
193 | goto free_res; | ||
194 | } | ||
195 | |||
196 | info->map.map_priv_1 = (unsigned long)info; | ||
197 | |||
198 | info->map.read = lf_read; | ||
199 | info->map.copy_from = lf_copy_from; | ||
200 | info->map.write = lf_write; | ||
201 | info->set_window = latch_addr_data->set_window; | ||
202 | info->data = latch_addr_data->data; | ||
203 | info->win_mask = win_size - 1; | ||
204 | |||
205 | spin_lock_init(&info->lock); | ||
206 | |||
207 | for (probe_type = rom_probe_types; !info->mtd && *probe_type; | ||
208 | probe_type++) | ||
209 | info->mtd = do_map_probe(*probe_type, &info->map); | ||
210 | |||
211 | if (info->mtd == NULL) { | ||
212 | dev_err(&dev->dev, "map_probe failed\n"); | ||
213 | err = -ENODEV; | ||
214 | goto iounmap; | ||
215 | } | ||
216 | info->mtd->owner = THIS_MODULE; | ||
217 | |||
218 | if (mtd_has_partitions()) { | ||
219 | |||
220 | err = parse_mtd_partitions(info->mtd, | ||
221 | (const char **)part_probe_types, | ||
222 | &info->parts, 0); | ||
223 | if (err > 0) { | ||
224 | add_mtd_partitions(info->mtd, info->parts, err); | ||
225 | return 0; | ||
226 | } | ||
227 | if (latch_addr_data->nr_parts) { | ||
228 | pr_notice("Using latch-addr-flash partition information\n"); | ||
229 | add_mtd_partitions(info->mtd, latch_addr_data->parts, | ||
230 | latch_addr_data->nr_parts); | ||
231 | return 0; | ||
232 | } | ||
233 | } | ||
234 | add_mtd_device(info->mtd); | ||
235 | return 0; | ||
236 | |||
237 | iounmap: | ||
238 | iounmap(info->map.virt); | ||
239 | free_res: | ||
240 | release_mem_region(info->res->start, resource_size(info->res)); | ||
241 | free_info: | ||
242 | kfree(info); | ||
243 | done: | ||
244 | if (latch_addr_data->done) | ||
245 | latch_addr_data->done(latch_addr_data->data); | ||
246 | return err; | ||
247 | } | ||
248 | |||
249 | static struct platform_driver latch_addr_flash_driver = { | ||
250 | .probe = latch_addr_flash_probe, | ||
251 | .remove = __devexit_p(latch_addr_flash_remove), | ||
252 | .driver = { | ||
253 | .name = DRIVER_NAME, | ||
254 | }, | ||
255 | }; | ||
256 | |||
257 | static int __init latch_addr_flash_init(void) | ||
258 | { | ||
259 | return platform_driver_register(&latch_addr_flash_driver); | ||
260 | } | ||
261 | module_init(latch_addr_flash_init); | ||
262 | |||
263 | static void __exit latch_addr_flash_exit(void) | ||
264 | { | ||
265 | platform_driver_unregister(&latch_addr_flash_driver); | ||
266 | } | ||
267 | module_exit(latch_addr_flash_exit); | ||
268 | |||
269 | MODULE_AUTHOR("David Griego <dgriego@mvista.com>"); | ||
270 | MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper " | ||
271 | "address lines being set board specifically"); | ||
272 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 4c18b98a311..7522df4f71f 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -59,10 +59,8 @@ static int physmap_flash_remove(struct platform_device *dev) | |||
59 | #else | 59 | #else |
60 | del_mtd_device(info->cmtd); | 60 | del_mtd_device(info->cmtd); |
61 | #endif | 61 | #endif |
62 | #ifdef CONFIG_MTD_CONCAT | ||
63 | if (info->cmtd != info->mtd[0]) | 62 | if (info->cmtd != info->mtd[0]) |
64 | mtd_concat_destroy(info->cmtd); | 63 | mtd_concat_destroy(info->cmtd); |
65 | #endif | ||
66 | } | 64 | } |
67 | 65 | ||
68 | for (i = 0; i < MAX_RESOURCES; i++) { | 66 | for (i = 0; i < MAX_RESOURCES; i++) { |
@@ -159,15 +157,9 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
159 | /* | 157 | /* |
160 | * We detected multiple devices. Concatenate them together. | 158 | * We detected multiple devices. Concatenate them together. |
161 | */ | 159 | */ |
162 | #ifdef CONFIG_MTD_CONCAT | ||
163 | info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); | 160 | info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); |
164 | if (info->cmtd == NULL) | 161 | if (info->cmtd == NULL) |
165 | err = -ENXIO; | 162 | err = -ENXIO; |
166 | #else | ||
167 | printk(KERN_ERR "physmap-flash: multiple devices " | ||
168 | "found but MTD concat support disabled.\n"); | ||
169 | err = -ENXIO; | ||
170 | #endif | ||
171 | } | 163 | } |
172 | if (err) | 164 | if (err) |
173 | goto err_out; | 165 | goto err_out; |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 3db0cb083d3..bd483f0c57e 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -104,12 +104,10 @@ static int of_flash_remove(struct platform_device *dev) | |||
104 | return 0; | 104 | return 0; |
105 | dev_set_drvdata(&dev->dev, NULL); | 105 | dev_set_drvdata(&dev->dev, NULL); |
106 | 106 | ||
107 | #ifdef CONFIG_MTD_CONCAT | ||
108 | if (info->cmtd != info->list[0].mtd) { | 107 | if (info->cmtd != info->list[0].mtd) { |
109 | del_mtd_device(info->cmtd); | 108 | del_mtd_device(info->cmtd); |
110 | mtd_concat_destroy(info->cmtd); | 109 | mtd_concat_destroy(info->cmtd); |
111 | } | 110 | } |
112 | #endif | ||
113 | 111 | ||
114 | if (info->cmtd) { | 112 | if (info->cmtd) { |
115 | if (OF_FLASH_PARTS(info)) { | 113 | if (OF_FLASH_PARTS(info)) { |
@@ -337,16 +335,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
337 | /* | 335 | /* |
338 | * We detected multiple devices. Concatenate them together. | 336 | * We detected multiple devices. Concatenate them together. |
339 | */ | 337 | */ |
340 | #ifdef CONFIG_MTD_CONCAT | ||
341 | info->cmtd = mtd_concat_create(mtd_list, info->list_size, | 338 | info->cmtd = mtd_concat_create(mtd_list, info->list_size, |
342 | dev_name(&dev->dev)); | 339 | dev_name(&dev->dev)); |
343 | if (info->cmtd == NULL) | 340 | if (info->cmtd == NULL) |
344 | err = -ENXIO; | 341 | err = -ENXIO; |
345 | #else | ||
346 | printk(KERN_ERR "physmap_of: multiple devices " | ||
347 | "found but MTD concat support disabled.\n"); | ||
348 | err = -ENXIO; | ||
349 | #endif | ||
350 | } | 342 | } |
351 | if (err) | 343 | if (err) |
352 | goto err_out; | 344 | goto err_out; |
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index f3af87e08ec..da875908ea8 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
@@ -232,10 +232,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla | |||
232 | else | 232 | else |
233 | del_mtd_partitions(info->mtd); | 233 | del_mtd_partitions(info->mtd); |
234 | #endif | 234 | #endif |
235 | #ifdef CONFIG_MTD_CONCAT | ||
236 | if (info->mtd != info->subdev[0].mtd) | 235 | if (info->mtd != info->subdev[0].mtd) |
237 | mtd_concat_destroy(info->mtd); | 236 | mtd_concat_destroy(info->mtd); |
238 | #endif | ||
239 | } | 237 | } |
240 | 238 | ||
241 | kfree(info->parts); | 239 | kfree(info->parts); |
@@ -321,7 +319,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) | |||
321 | info->mtd = info->subdev[0].mtd; | 319 | info->mtd = info->subdev[0].mtd; |
322 | ret = 0; | 320 | ret = 0; |
323 | } else if (info->num_subdev > 1) { | 321 | } else if (info->num_subdev > 1) { |
324 | #ifdef CONFIG_MTD_CONCAT | ||
325 | struct mtd_info *cdev[nr]; | 322 | struct mtd_info *cdev[nr]; |
326 | /* | 323 | /* |
327 | * We detected multiple devices. Concatenate them together. | 324 | * We detected multiple devices. Concatenate them together. |
@@ -333,11 +330,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) | |||
333 | plat->name); | 330 | plat->name); |
334 | if (info->mtd == NULL) | 331 | if (info->mtd == NULL) |
335 | ret = -ENXIO; | 332 | ret = -ENXIO; |
336 | #else | ||
337 | printk(KERN_ERR "SA1100 flash: multiple devices " | ||
338 | "found but MTD concat support disabled.\n"); | ||
339 | ret = -ENXIO; | ||
340 | #endif | ||
341 | } | 333 | } |
342 | 334 | ||
343 | if (ret == 0) | 335 | if (ret == 0) |
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index e2147bf11c8..e02dfa9d4dd 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c | |||
@@ -94,7 +94,6 @@ static int __init init_ts5500_map(void) | |||
94 | return 0; | 94 | return 0; |
95 | 95 | ||
96 | err1: | 96 | err1: |
97 | map_destroy(mymtd); | ||
98 | iounmap(ts5500_map.virt); | 97 | iounmap(ts5500_map.virt); |
99 | err2: | 98 | err2: |
100 | return rc; | 99 | return rc; |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index e0a2373bf0e..a534e1f0c34 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -40,7 +40,7 @@ | |||
40 | static LIST_HEAD(blktrans_majors); | 40 | static LIST_HEAD(blktrans_majors); |
41 | static DEFINE_MUTEX(blktrans_ref_mutex); | 41 | static DEFINE_MUTEX(blktrans_ref_mutex); |
42 | 42 | ||
43 | void blktrans_dev_release(struct kref *kref) | 43 | static void blktrans_dev_release(struct kref *kref) |
44 | { | 44 | { |
45 | struct mtd_blktrans_dev *dev = | 45 | struct mtd_blktrans_dev *dev = |
46 | container_of(kref, struct mtd_blktrans_dev, ref); | 46 | container_of(kref, struct mtd_blktrans_dev, ref); |
@@ -67,7 +67,7 @@ unlock: | |||
67 | return dev; | 67 | return dev; |
68 | } | 68 | } |
69 | 69 | ||
70 | void blktrans_dev_put(struct mtd_blktrans_dev *dev) | 70 | static void blktrans_dev_put(struct mtd_blktrans_dev *dev) |
71 | { | 71 | { |
72 | mutex_lock(&blktrans_ref_mutex); | 72 | mutex_lock(&blktrans_ref_mutex); |
73 | kref_put(&dev->ref, blktrans_dev_release); | 73 | kref_put(&dev->ref, blktrans_dev_release); |
@@ -119,18 +119,43 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) | ||
123 | { | ||
124 | if (kthread_should_stop()) | ||
125 | return 1; | ||
126 | |||
127 | return dev->bg_stop; | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); | ||
130 | |||
122 | static int mtd_blktrans_thread(void *arg) | 131 | static int mtd_blktrans_thread(void *arg) |
123 | { | 132 | { |
124 | struct mtd_blktrans_dev *dev = arg; | 133 | struct mtd_blktrans_dev *dev = arg; |
134 | struct mtd_blktrans_ops *tr = dev->tr; | ||
125 | struct request_queue *rq = dev->rq; | 135 | struct request_queue *rq = dev->rq; |
126 | struct request *req = NULL; | 136 | struct request *req = NULL; |
137 | int background_done = 0; | ||
127 | 138 | ||
128 | spin_lock_irq(rq->queue_lock); | 139 | spin_lock_irq(rq->queue_lock); |
129 | 140 | ||
130 | while (!kthread_should_stop()) { | 141 | while (!kthread_should_stop()) { |
131 | int res; | 142 | int res; |
132 | 143 | ||
144 | dev->bg_stop = false; | ||
133 | if (!req && !(req = blk_fetch_request(rq))) { | 145 | if (!req && !(req = blk_fetch_request(rq))) { |
146 | if (tr->background && !background_done) { | ||
147 | spin_unlock_irq(rq->queue_lock); | ||
148 | mutex_lock(&dev->lock); | ||
149 | tr->background(dev); | ||
150 | mutex_unlock(&dev->lock); | ||
151 | spin_lock_irq(rq->queue_lock); | ||
152 | /* | ||
153 | * Do background processing just once per idle | ||
154 | * period. | ||
155 | */ | ||
156 | background_done = !dev->bg_stop; | ||
157 | continue; | ||
158 | } | ||
134 | set_current_state(TASK_INTERRUPTIBLE); | 159 | set_current_state(TASK_INTERRUPTIBLE); |
135 | 160 | ||
136 | if (kthread_should_stop()) | 161 | if (kthread_should_stop()) |
@@ -152,6 +177,8 @@ static int mtd_blktrans_thread(void *arg) | |||
152 | 177 | ||
153 | if (!__blk_end_request_cur(req, res)) | 178 | if (!__blk_end_request_cur(req, res)) |
154 | req = NULL; | 179 | req = NULL; |
180 | |||
181 | background_done = 0; | ||
155 | } | 182 | } |
156 | 183 | ||
157 | if (req) | 184 | if (req) |
@@ -172,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq) | |||
172 | if (!dev) | 199 | if (!dev) |
173 | while ((req = blk_fetch_request(rq)) != NULL) | 200 | while ((req = blk_fetch_request(rq)) != NULL) |
174 | __blk_end_request_all(req, -ENODEV); | 201 | __blk_end_request_all(req, -ENODEV); |
175 | else | 202 | else { |
203 | dev->bg_stop = true; | ||
176 | wake_up_process(dev->thread); | 204 | wake_up_process(dev->thread); |
205 | } | ||
177 | } | 206 | } |
178 | 207 | ||
179 | static int blktrans_open(struct block_device *bdev, fmode_t mode) | 208 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
@@ -379,9 +408,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
379 | new->rq->queuedata = new; | 408 | new->rq->queuedata = new; |
380 | blk_queue_logical_block_size(new->rq, tr->blksize); | 409 | blk_queue_logical_block_size(new->rq, tr->blksize); |
381 | 410 | ||
382 | if (tr->discard) | 411 | if (tr->discard) { |
383 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | 412 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); |
384 | new->rq); | 413 | new->rq->limits.max_discard_sectors = UINT_MAX; |
414 | } | ||
385 | 415 | ||
386 | gd->queue = new->rq; | 416 | gd->queue = new->rq; |
387 | 417 | ||
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 5f5777bd3f7..5060e608ea5 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -750,6 +750,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
750 | struct mtd_concat *concat; | 750 | struct mtd_concat *concat; |
751 | uint32_t max_erasesize, curr_erasesize; | 751 | uint32_t max_erasesize, curr_erasesize; |
752 | int num_erase_region; | 752 | int num_erase_region; |
753 | int max_writebufsize = 0; | ||
753 | 754 | ||
754 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); | 755 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); |
755 | for (i = 0; i < num_devs; i++) | 756 | for (i = 0; i < num_devs; i++) |
@@ -776,7 +777,12 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
776 | concat->mtd.size = subdev[0]->size; | 777 | concat->mtd.size = subdev[0]->size; |
777 | concat->mtd.erasesize = subdev[0]->erasesize; | 778 | concat->mtd.erasesize = subdev[0]->erasesize; |
778 | concat->mtd.writesize = subdev[0]->writesize; | 779 | concat->mtd.writesize = subdev[0]->writesize; |
779 | concat->mtd.writebufsize = subdev[0]->writebufsize; | 780 | |
781 | for (i = 0; i < num_devs; i++) | ||
782 | if (max_writebufsize < subdev[i]->writebufsize) | ||
783 | max_writebufsize = subdev[i]->writebufsize; | ||
784 | concat->mtd.writebufsize = max_writebufsize; | ||
785 | |||
780 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; | 786 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; |
781 | concat->mtd.oobsize = subdev[0]->oobsize; | 787 | concat->mtd.oobsize = subdev[0]->oobsize; |
782 | concat->mtd.oobavail = subdev[0]->oobavail; | 788 | concat->mtd.oobavail = subdev[0]->oobavail; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 527cebf58da..da69bc8a5a7 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * backing device capabilities for non-mappable devices (such as NAND flash) | 43 | * backing device capabilities for non-mappable devices (such as NAND flash) |
44 | * - permits private mappings, copies are taken of the data | 44 | * - permits private mappings, copies are taken of the data |
45 | */ | 45 | */ |
46 | struct backing_dev_info mtd_bdi_unmappable = { | 46 | static struct backing_dev_info mtd_bdi_unmappable = { |
47 | .capabilities = BDI_CAP_MAP_COPY, | 47 | .capabilities = BDI_CAP_MAP_COPY, |
48 | }; | 48 | }; |
49 | 49 | ||
@@ -52,7 +52,7 @@ struct backing_dev_info mtd_bdi_unmappable = { | |||
52 | * - permits private mappings, copies are taken of the data | 52 | * - permits private mappings, copies are taken of the data |
53 | * - permits non-writable shared mappings | 53 | * - permits non-writable shared mappings |
54 | */ | 54 | */ |
55 | struct backing_dev_info mtd_bdi_ro_mappable = { | 55 | static struct backing_dev_info mtd_bdi_ro_mappable = { |
56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | 56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | |
57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | 57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), |
58 | }; | 58 | }; |
@@ -62,7 +62,7 @@ struct backing_dev_info mtd_bdi_ro_mappable = { | |||
62 | * - permits private mappings, copies are taken of the data | 62 | * - permits private mappings, copies are taken of the data |
63 | * - permits non-writable shared mappings | 63 | * - permits non-writable shared mappings |
64 | */ | 64 | */ |
65 | struct backing_dev_info mtd_bdi_rw_mappable = { | 65 | static struct backing_dev_info mtd_bdi_rw_mappable = { |
66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | 66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | |
67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | 67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | |
68 | BDI_CAP_WRITE_MAP), | 68 | BDI_CAP_WRITE_MAP), |
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c new file mode 100644 index 00000000000..237913c5c92 --- /dev/null +++ b/drivers/mtd/mtdswap.c | |||
@@ -0,0 +1,1587 @@ | |||
1 | /* | ||
2 | * Swap block device support for MTDs | ||
3 | * Turns an MTD device into a swap device with block wear leveling | ||
4 | * | ||
5 | * Copyright © 2007,2011 Nokia Corporation. All rights reserved. | ||
6 | * | ||
7 | * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com> | ||
8 | * | ||
9 | * Based on Richard Purdie's earlier implementation in 2007. Background | ||
10 | * support and lock-less operation written by Adrian Hunter. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * version 2 as published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
24 | * 02110-1301 USA | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/mtd/mtd.h> | ||
30 | #include <linux/mtd/blktrans.h> | ||
31 | #include <linux/rbtree.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/genhd.h> | ||
36 | #include <linux/swap.h> | ||
37 | #include <linux/debugfs.h> | ||
38 | #include <linux/seq_file.h> | ||
39 | #include <linux/device.h> | ||
40 | #include <linux/math64.h> | ||
41 | |||
42 | #define MTDSWAP_PREFIX "mtdswap" | ||
43 | |||
44 | /* | ||
45 | * The number of free eraseblocks when GC should stop | ||
46 | */ | ||
47 | #define CLEAN_BLOCK_THRESHOLD 20 | ||
48 | |||
49 | /* | ||
50 | * Number of free eraseblocks below which GC can also collect low frag | ||
51 | * blocks. | ||
52 | */ | ||
53 | #define LOW_FRAG_GC_TRESHOLD 5 | ||
54 | |||
55 | /* | ||
56 | * Wear level cost amortization. We want to do wear leveling on the background | ||
57 | * without disturbing gc too much. This is made by defining max GC frequency. | ||
58 | * Frequency value 6 means 1/6 of the GC passes will pick an erase block based | ||
59 | * on the biggest wear difference rather than the biggest dirtiness. | ||
60 | * | ||
61 | * The lower freq2 should be chosen so that it makes sure the maximum erase | ||
62 | * difference will decrease even if a malicious application is deliberately | ||
63 | * trying to make erase differences large. | ||
64 | */ | ||
65 | #define MAX_ERASE_DIFF 4000 | ||
66 | #define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF | ||
67 | #define COLLECT_NONDIRTY_FREQ1 6 | ||
68 | #define COLLECT_NONDIRTY_FREQ2 4 | ||
69 | |||
70 | #define PAGE_UNDEF UINT_MAX | ||
71 | #define BLOCK_UNDEF UINT_MAX | ||
72 | #define BLOCK_ERROR (UINT_MAX - 1) | ||
73 | #define BLOCK_MAX (UINT_MAX - 2) | ||
74 | |||
75 | #define EBLOCK_BAD (1 << 0) | ||
76 | #define EBLOCK_NOMAGIC (1 << 1) | ||
77 | #define EBLOCK_BITFLIP (1 << 2) | ||
78 | #define EBLOCK_FAILED (1 << 3) | ||
79 | #define EBLOCK_READERR (1 << 4) | ||
80 | #define EBLOCK_IDX_SHIFT 5 | ||
81 | |||
82 | struct swap_eb { | ||
83 | struct rb_node rb; | ||
84 | struct rb_root *root; | ||
85 | |||
86 | unsigned int flags; | ||
87 | unsigned int active_count; | ||
88 | unsigned int erase_count; | ||
89 | unsigned int pad; /* speeds up pointer decremtnt */ | ||
90 | }; | ||
91 | |||
92 | #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ | ||
93 | rb)->erase_count) | ||
94 | #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \ | ||
95 | rb)->erase_count) | ||
96 | |||
97 | struct mtdswap_tree { | ||
98 | struct rb_root root; | ||
99 | unsigned int count; | ||
100 | }; | ||
101 | |||
102 | enum { | ||
103 | MTDSWAP_CLEAN, | ||
104 | MTDSWAP_USED, | ||
105 | MTDSWAP_LOWFRAG, | ||
106 | MTDSWAP_HIFRAG, | ||
107 | MTDSWAP_DIRTY, | ||
108 | MTDSWAP_BITFLIP, | ||
109 | MTDSWAP_FAILING, | ||
110 | MTDSWAP_TREE_CNT, | ||
111 | }; | ||
112 | |||
113 | struct mtdswap_dev { | ||
114 | struct mtd_blktrans_dev *mbd_dev; | ||
115 | struct mtd_info *mtd; | ||
116 | struct device *dev; | ||
117 | |||
118 | unsigned int *page_data; | ||
119 | unsigned int *revmap; | ||
120 | |||
121 | unsigned int eblks; | ||
122 | unsigned int spare_eblks; | ||
123 | unsigned int pages_per_eblk; | ||
124 | unsigned int max_erase_count; | ||
125 | struct swap_eb *eb_data; | ||
126 | |||
127 | struct mtdswap_tree trees[MTDSWAP_TREE_CNT]; | ||
128 | |||
129 | unsigned long long sect_read_count; | ||
130 | unsigned long long sect_write_count; | ||
131 | unsigned long long mtd_write_count; | ||
132 | unsigned long long mtd_read_count; | ||
133 | unsigned long long discard_count; | ||
134 | unsigned long long discard_page_count; | ||
135 | |||
136 | unsigned int curr_write_pos; | ||
137 | struct swap_eb *curr_write; | ||
138 | |||
139 | char *page_buf; | ||
140 | char *oob_buf; | ||
141 | |||
142 | struct dentry *debugfs_root; | ||
143 | }; | ||
144 | |||
145 | struct mtdswap_oobdata { | ||
146 | __le16 magic; | ||
147 | __le32 count; | ||
148 | } __attribute__((packed)); | ||
149 | |||
150 | #define MTDSWAP_MAGIC_CLEAN 0x2095 | ||
151 | #define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1) | ||
152 | #define MTDSWAP_TYPE_CLEAN 0 | ||
153 | #define MTDSWAP_TYPE_DIRTY 1 | ||
154 | #define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata) | ||
155 | |||
156 | #define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */ | ||
157 | #define MTDSWAP_IO_RETRIES 3 | ||
158 | |||
159 | enum { | ||
160 | MTDSWAP_SCANNED_CLEAN, | ||
161 | MTDSWAP_SCANNED_DIRTY, | ||
162 | MTDSWAP_SCANNED_BITFLIP, | ||
163 | MTDSWAP_SCANNED_BAD, | ||
164 | }; | ||
165 | |||
166 | /* | ||
167 | * In the worst case mtdswap_writesect() has allocated the last clean | ||
168 | * page from the current block and is then pre-empted by the GC | ||
169 | * thread. The thread can consume a full erase block when moving a | ||
170 | * block. | ||
171 | */ | ||
172 | #define MIN_SPARE_EBLOCKS 2 | ||
173 | #define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1) | ||
174 | |||
175 | #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root) | ||
176 | #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL) | ||
177 | #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name)) | ||
178 | #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count) | ||
179 | |||
180 | #define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv) | ||
181 | |||
182 | static char partitions[128] = ""; | ||
183 | module_param_string(partitions, partitions, sizeof(partitions), 0444); | ||
184 | MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap " | ||
185 | "partitions=\"1,3,5\""); | ||
186 | |||
187 | static unsigned int spare_eblocks = 10; | ||
188 | module_param(spare_eblocks, uint, 0444); | ||
189 | MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for " | ||
190 | "garbage collection (default 10%)"); | ||
191 | |||
192 | static bool header; /* false */ | ||
193 | module_param(header, bool, 0444); | ||
194 | MODULE_PARM_DESC(header, | ||
195 | "Include builtin swap header (default 0, without header)"); | ||
196 | |||
197 | static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background); | ||
198 | |||
199 | static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) | ||
200 | { | ||
201 | return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; | ||
202 | } | ||
203 | |||
204 | static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) | ||
205 | { | ||
206 | unsigned int oldidx; | ||
207 | struct mtdswap_tree *tp; | ||
208 | |||
209 | if (eb->root) { | ||
210 | tp = container_of(eb->root, struct mtdswap_tree, root); | ||
211 | oldidx = tp - &d->trees[0]; | ||
212 | |||
213 | d->trees[oldidx].count--; | ||
214 | rb_erase(&eb->rb, eb->root); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb) | ||
219 | { | ||
220 | struct rb_node **p, *parent = NULL; | ||
221 | struct swap_eb *cur; | ||
222 | |||
223 | p = &root->rb_node; | ||
224 | while (*p) { | ||
225 | parent = *p; | ||
226 | cur = rb_entry(parent, struct swap_eb, rb); | ||
227 | if (eb->erase_count > cur->erase_count) | ||
228 | p = &(*p)->rb_right; | ||
229 | else | ||
230 | p = &(*p)->rb_left; | ||
231 | } | ||
232 | |||
233 | rb_link_node(&eb->rb, parent, p); | ||
234 | rb_insert_color(&eb->rb, root); | ||
235 | } | ||
236 | |||
237 | static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) | ||
238 | { | ||
239 | struct rb_root *root; | ||
240 | |||
241 | if (eb->root == &d->trees[idx].root) | ||
242 | return; | ||
243 | |||
244 | mtdswap_eb_detach(d, eb); | ||
245 | root = &d->trees[idx].root; | ||
246 | __mtdswap_rb_add(root, eb); | ||
247 | eb->root = root; | ||
248 | d->trees[idx].count++; | ||
249 | } | ||
250 | |||
251 | static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx) | ||
252 | { | ||
253 | struct rb_node *p; | ||
254 | unsigned int i; | ||
255 | |||
256 | p = rb_first(root); | ||
257 | i = 0; | ||
258 | while (i < idx && p) { | ||
259 | p = rb_next(p); | ||
260 | i++; | ||
261 | } | ||
262 | |||
263 | return p; | ||
264 | } | ||
265 | |||
266 | static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) | ||
267 | { | ||
268 | int ret; | ||
269 | loff_t offset; | ||
270 | |||
271 | d->spare_eblks--; | ||
272 | eb->flags |= EBLOCK_BAD; | ||
273 | mtdswap_eb_detach(d, eb); | ||
274 | eb->root = NULL; | ||
275 | |||
276 | /* badblocks not supported */ | ||
277 | if (!d->mtd->block_markbad) | ||
278 | return 1; | ||
279 | |||
280 | offset = mtdswap_eb_offset(d, eb); | ||
281 | dev_warn(d->dev, "Marking bad block at %08llx\n", offset); | ||
282 | ret = d->mtd->block_markbad(d->mtd, offset); | ||
283 | |||
284 | if (ret) { | ||
285 | dev_warn(d->dev, "Mark block bad failed for block at %08llx " | ||
286 | "error %d\n", offset, ret); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | return 1; | ||
291 | |||
292 | } | ||
293 | |||
294 | static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) | ||
295 | { | ||
296 | unsigned int marked = eb->flags & EBLOCK_FAILED; | ||
297 | struct swap_eb *curr_write = d->curr_write; | ||
298 | |||
299 | eb->flags |= EBLOCK_FAILED; | ||
300 | if (curr_write == eb) { | ||
301 | d->curr_write = NULL; | ||
302 | |||
303 | if (!marked && d->curr_write_pos != 0) { | ||
304 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
305 | return 0; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | return mtdswap_handle_badblock(d, eb); | ||
310 | } | ||
311 | |||
312 | static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, | ||
313 | struct mtd_oob_ops *ops) | ||
314 | { | ||
315 | int ret = d->mtd->read_oob(d->mtd, from, ops); | ||
316 | |||
317 | if (ret == -EUCLEAN) | ||
318 | return ret; | ||
319 | |||
320 | if (ret) { | ||
321 | dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n", | ||
322 | ret, from); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | if (ops->oobretlen < ops->ooblen) { | ||
327 | dev_warn(d->dev, "Read OOB return short read (%zd bytes not " | ||
328 | "%zd) for block at %08llx\n", | ||
329 | ops->oobretlen, ops->ooblen, from); | ||
330 | return -EIO; | ||
331 | } | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) | ||
337 | { | ||
338 | struct mtdswap_oobdata *data, *data2; | ||
339 | int ret; | ||
340 | loff_t offset; | ||
341 | struct mtd_oob_ops ops; | ||
342 | |||
343 | offset = mtdswap_eb_offset(d, eb); | ||
344 | |||
345 | /* Check first if the block is bad. */ | ||
346 | if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset)) | ||
347 | return MTDSWAP_SCANNED_BAD; | ||
348 | |||
349 | ops.ooblen = 2 * d->mtd->ecclayout->oobavail; | ||
350 | ops.oobbuf = d->oob_buf; | ||
351 | ops.ooboffs = 0; | ||
352 | ops.datbuf = NULL; | ||
353 | ops.mode = MTD_OOB_AUTO; | ||
354 | |||
355 | ret = mtdswap_read_oob(d, offset, &ops); | ||
356 | |||
357 | if (ret && ret != -EUCLEAN) | ||
358 | return ret; | ||
359 | |||
360 | data = (struct mtdswap_oobdata *)d->oob_buf; | ||
361 | data2 = (struct mtdswap_oobdata *) | ||
362 | (d->oob_buf + d->mtd->ecclayout->oobavail); | ||
363 | |||
364 | if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { | ||
365 | eb->erase_count = le32_to_cpu(data->count); | ||
366 | if (ret == -EUCLEAN) | ||
367 | ret = MTDSWAP_SCANNED_BITFLIP; | ||
368 | else { | ||
369 | if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) | ||
370 | ret = MTDSWAP_SCANNED_DIRTY; | ||
371 | else | ||
372 | ret = MTDSWAP_SCANNED_CLEAN; | ||
373 | } | ||
374 | } else { | ||
375 | eb->flags |= EBLOCK_NOMAGIC; | ||
376 | ret = MTDSWAP_SCANNED_DIRTY; | ||
377 | } | ||
378 | |||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, | ||
383 | u16 marker) | ||
384 | { | ||
385 | struct mtdswap_oobdata n; | ||
386 | int ret; | ||
387 | loff_t offset; | ||
388 | struct mtd_oob_ops ops; | ||
389 | |||
390 | ops.ooboffs = 0; | ||
391 | ops.oobbuf = (uint8_t *)&n; | ||
392 | ops.mode = MTD_OOB_AUTO; | ||
393 | ops.datbuf = NULL; | ||
394 | |||
395 | if (marker == MTDSWAP_TYPE_CLEAN) { | ||
396 | n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN); | ||
397 | n.count = cpu_to_le32(eb->erase_count); | ||
398 | ops.ooblen = MTDSWAP_OOBSIZE; | ||
399 | offset = mtdswap_eb_offset(d, eb); | ||
400 | } else { | ||
401 | n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY); | ||
402 | ops.ooblen = sizeof(n.magic); | ||
403 | offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; | ||
404 | } | ||
405 | |||
406 | ret = d->mtd->write_oob(d->mtd, offset , &ops); | ||
407 | |||
408 | if (ret) { | ||
409 | dev_warn(d->dev, "Write OOB failed for block at %08llx " | ||
410 | "error %d\n", offset, ret); | ||
411 | if (ret == -EIO || ret == -EBADMSG) | ||
412 | mtdswap_handle_write_error(d, eb); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | if (ops.oobretlen != ops.ooblen) { | ||
417 | dev_warn(d->dev, "Short OOB write for block at %08llx: " | ||
418 | "%zd not %zd\n", | ||
419 | offset, ops.oobretlen, ops.ooblen); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * Are there any erase blocks without MAGIC_CLEAN header, presumably | ||
428 | * because power was cut off after erase but before header write? We | ||
429 | * need to guestimate the erase count. | ||
430 | */ | ||
431 | static void mtdswap_check_counts(struct mtdswap_dev *d) | ||
432 | { | ||
433 | struct rb_root hist_root = RB_ROOT; | ||
434 | struct rb_node *medrb; | ||
435 | struct swap_eb *eb; | ||
436 | unsigned int i, cnt, median; | ||
437 | |||
438 | cnt = 0; | ||
439 | for (i = 0; i < d->eblks; i++) { | ||
440 | eb = d->eb_data + i; | ||
441 | |||
442 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) | ||
443 | continue; | ||
444 | |||
445 | __mtdswap_rb_add(&hist_root, eb); | ||
446 | cnt++; | ||
447 | } | ||
448 | |||
449 | if (cnt == 0) | ||
450 | return; | ||
451 | |||
452 | medrb = mtdswap_rb_index(&hist_root, cnt / 2); | ||
453 | median = rb_entry(medrb, struct swap_eb, rb)->erase_count; | ||
454 | |||
455 | d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root); | ||
456 | |||
457 | for (i = 0; i < d->eblks; i++) { | ||
458 | eb = d->eb_data + i; | ||
459 | |||
460 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) | ||
461 | eb->erase_count = median; | ||
462 | |||
463 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) | ||
464 | continue; | ||
465 | |||
466 | rb_erase(&eb->rb, &hist_root); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static void mtdswap_scan_eblks(struct mtdswap_dev *d) | ||
471 | { | ||
472 | int status; | ||
473 | unsigned int i, idx; | ||
474 | struct swap_eb *eb; | ||
475 | |||
476 | for (i = 0; i < d->eblks; i++) { | ||
477 | eb = d->eb_data + i; | ||
478 | |||
479 | status = mtdswap_read_markers(d, eb); | ||
480 | if (status < 0) | ||
481 | eb->flags |= EBLOCK_READERR; | ||
482 | else if (status == MTDSWAP_SCANNED_BAD) { | ||
483 | eb->flags |= EBLOCK_BAD; | ||
484 | continue; | ||
485 | } | ||
486 | |||
487 | switch (status) { | ||
488 | case MTDSWAP_SCANNED_CLEAN: | ||
489 | idx = MTDSWAP_CLEAN; | ||
490 | break; | ||
491 | case MTDSWAP_SCANNED_DIRTY: | ||
492 | case MTDSWAP_SCANNED_BITFLIP: | ||
493 | idx = MTDSWAP_DIRTY; | ||
494 | break; | ||
495 | default: | ||
496 | idx = MTDSWAP_FAILING; | ||
497 | } | ||
498 | |||
499 | eb->flags |= (idx << EBLOCK_IDX_SHIFT); | ||
500 | } | ||
501 | |||
502 | mtdswap_check_counts(d); | ||
503 | |||
504 | for (i = 0; i < d->eblks; i++) { | ||
505 | eb = d->eb_data + i; | ||
506 | |||
507 | if (eb->flags & EBLOCK_BAD) | ||
508 | continue; | ||
509 | |||
510 | idx = eb->flags >> EBLOCK_IDX_SHIFT; | ||
511 | mtdswap_rb_add(d, eb, idx); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * Place eblk into a tree corresponding to its number of active blocks | ||
517 | * it contains. | ||
518 | */ | ||
519 | static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) | ||
520 | { | ||
521 | unsigned int weight = eb->active_count; | ||
522 | unsigned int maxweight = d->pages_per_eblk; | ||
523 | |||
524 | if (eb == d->curr_write) | ||
525 | return; | ||
526 | |||
527 | if (eb->flags & EBLOCK_BITFLIP) | ||
528 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | ||
529 | else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) | ||
530 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
531 | if (weight == maxweight) | ||
532 | mtdswap_rb_add(d, eb, MTDSWAP_USED); | ||
533 | else if (weight == 0) | ||
534 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | ||
535 | else if (weight > (maxweight/2)) | ||
536 | mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); | ||
537 | else | ||
538 | mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); | ||
539 | } | ||
540 | |||
541 | |||
542 | static void mtdswap_erase_callback(struct erase_info *done) | ||
543 | { | ||
544 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; | ||
545 | wake_up(wait_q); | ||
546 | } | ||
547 | |||
548 | static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) | ||
549 | { | ||
550 | struct mtd_info *mtd = d->mtd; | ||
551 | struct erase_info erase; | ||
552 | wait_queue_head_t wq; | ||
553 | unsigned int retries = 0; | ||
554 | int ret; | ||
555 | |||
556 | eb->erase_count++; | ||
557 | if (eb->erase_count > d->max_erase_count) | ||
558 | d->max_erase_count = eb->erase_count; | ||
559 | |||
560 | retry: | ||
561 | init_waitqueue_head(&wq); | ||
562 | memset(&erase, 0, sizeof(struct erase_info)); | ||
563 | |||
564 | erase.mtd = mtd; | ||
565 | erase.callback = mtdswap_erase_callback; | ||
566 | erase.addr = mtdswap_eb_offset(d, eb); | ||
567 | erase.len = mtd->erasesize; | ||
568 | erase.priv = (u_long)&wq; | ||
569 | |||
570 | ret = mtd->erase(mtd, &erase); | ||
571 | if (ret) { | ||
572 | if (retries++ < MTDSWAP_ERASE_RETRIES) { | ||
573 | dev_warn(d->dev, | ||
574 | "erase of erase block %#llx on %s failed", | ||
575 | erase.addr, mtd->name); | ||
576 | yield(); | ||
577 | goto retry; | ||
578 | } | ||
579 | |||
580 | dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", | ||
581 | erase.addr, mtd->name); | ||
582 | |||
583 | mtdswap_handle_badblock(d, eb); | ||
584 | return -EIO; | ||
585 | } | ||
586 | |||
587 | ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE || | ||
588 | erase.state == MTD_ERASE_FAILED); | ||
589 | if (ret) { | ||
590 | dev_err(d->dev, "Interrupted erase block %#llx erassure on %s", | ||
591 | erase.addr, mtd->name); | ||
592 | return -EINTR; | ||
593 | } | ||
594 | |||
595 | if (erase.state == MTD_ERASE_FAILED) { | ||
596 | if (retries++ < MTDSWAP_ERASE_RETRIES) { | ||
597 | dev_warn(d->dev, | ||
598 | "erase of erase block %#llx on %s failed", | ||
599 | erase.addr, mtd->name); | ||
600 | yield(); | ||
601 | goto retry; | ||
602 | } | ||
603 | |||
604 | mtdswap_handle_badblock(d, eb); | ||
605 | return -EIO; | ||
606 | } | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, | ||
612 | unsigned int *block) | ||
613 | { | ||
614 | int ret; | ||
615 | struct swap_eb *old_eb = d->curr_write; | ||
616 | struct rb_root *clean_root; | ||
617 | struct swap_eb *eb; | ||
618 | |||
619 | if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) { | ||
620 | do { | ||
621 | if (TREE_EMPTY(d, CLEAN)) | ||
622 | return -ENOSPC; | ||
623 | |||
624 | clean_root = TREE_ROOT(d, CLEAN); | ||
625 | eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); | ||
626 | rb_erase(&eb->rb, clean_root); | ||
627 | eb->root = NULL; | ||
628 | TREE_COUNT(d, CLEAN)--; | ||
629 | |||
630 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); | ||
631 | } while (ret == -EIO || ret == -EBADMSG); | ||
632 | |||
633 | if (ret) | ||
634 | return ret; | ||
635 | |||
636 | d->curr_write_pos = 0; | ||
637 | d->curr_write = eb; | ||
638 | if (old_eb) | ||
639 | mtdswap_store_eb(d, old_eb); | ||
640 | } | ||
641 | |||
642 | *block = (d->curr_write - d->eb_data) * d->pages_per_eblk + | ||
643 | d->curr_write_pos; | ||
644 | |||
645 | d->curr_write->active_count++; | ||
646 | d->revmap[*block] = page; | ||
647 | d->curr_write_pos++; | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d) | ||
653 | { | ||
654 | return TREE_COUNT(d, CLEAN) * d->pages_per_eblk + | ||
655 | d->pages_per_eblk - d->curr_write_pos; | ||
656 | } | ||
657 | |||
658 | static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d) | ||
659 | { | ||
660 | return mtdswap_free_page_cnt(d) > d->pages_per_eblk; | ||
661 | } | ||
662 | |||
663 | static int mtdswap_write_block(struct mtdswap_dev *d, char *buf, | ||
664 | unsigned int page, unsigned int *bp, int gc_context) | ||
665 | { | ||
666 | struct mtd_info *mtd = d->mtd; | ||
667 | struct swap_eb *eb; | ||
668 | size_t retlen; | ||
669 | loff_t writepos; | ||
670 | int ret; | ||
671 | |||
672 | retry: | ||
673 | if (!gc_context) | ||
674 | while (!mtdswap_enough_free_pages(d)) | ||
675 | if (mtdswap_gc(d, 0) > 0) | ||
676 | return -ENOSPC; | ||
677 | |||
678 | ret = mtdswap_map_free_block(d, page, bp); | ||
679 | eb = d->eb_data + (*bp / d->pages_per_eblk); | ||
680 | |||
681 | if (ret == -EIO || ret == -EBADMSG) { | ||
682 | d->curr_write = NULL; | ||
683 | eb->active_count--; | ||
684 | d->revmap[*bp] = PAGE_UNDEF; | ||
685 | goto retry; | ||
686 | } | ||
687 | |||
688 | if (ret < 0) | ||
689 | return ret; | ||
690 | |||
691 | writepos = (loff_t)*bp << PAGE_SHIFT; | ||
692 | ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); | ||
693 | if (ret == -EIO || ret == -EBADMSG) { | ||
694 | d->curr_write_pos--; | ||
695 | eb->active_count--; | ||
696 | d->revmap[*bp] = PAGE_UNDEF; | ||
697 | mtdswap_handle_write_error(d, eb); | ||
698 | goto retry; | ||
699 | } | ||
700 | |||
701 | if (ret < 0) { | ||
702 | dev_err(d->dev, "Write to MTD device failed: %d (%zd written)", | ||
703 | ret, retlen); | ||
704 | goto err; | ||
705 | } | ||
706 | |||
707 | if (retlen != PAGE_SIZE) { | ||
708 | dev_err(d->dev, "Short write to MTD device: %zd written", | ||
709 | retlen); | ||
710 | ret = -EIO; | ||
711 | goto err; | ||
712 | } | ||
713 | |||
714 | return ret; | ||
715 | |||
716 | err: | ||
717 | d->curr_write_pos--; | ||
718 | eb->active_count--; | ||
719 | d->revmap[*bp] = PAGE_UNDEF; | ||
720 | |||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, | ||
725 | unsigned int *newblock) | ||
726 | { | ||
727 | struct mtd_info *mtd = d->mtd; | ||
728 | struct swap_eb *eb, *oldeb; | ||
729 | int ret; | ||
730 | size_t retlen; | ||
731 | unsigned int page, retries; | ||
732 | loff_t readpos; | ||
733 | |||
734 | page = d->revmap[oldblock]; | ||
735 | readpos = (loff_t) oldblock << PAGE_SHIFT; | ||
736 | retries = 0; | ||
737 | |||
738 | retry: | ||
739 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); | ||
740 | |||
741 | if (ret < 0 && ret != -EUCLEAN) { | ||
742 | oldeb = d->eb_data + oldblock / d->pages_per_eblk; | ||
743 | oldeb->flags |= EBLOCK_READERR; | ||
744 | |||
745 | dev_err(d->dev, "Read Error: %d (block %u)\n", ret, | ||
746 | oldblock); | ||
747 | retries++; | ||
748 | if (retries < MTDSWAP_IO_RETRIES) | ||
749 | goto retry; | ||
750 | |||
751 | goto read_error; | ||
752 | } | ||
753 | |||
754 | if (retlen != PAGE_SIZE) { | ||
755 | dev_err(d->dev, "Short read: %zd (block %u)\n", retlen, | ||
756 | oldblock); | ||
757 | ret = -EIO; | ||
758 | goto read_error; | ||
759 | } | ||
760 | |||
761 | ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1); | ||
762 | if (ret < 0) { | ||
763 | d->page_data[page] = BLOCK_ERROR; | ||
764 | dev_err(d->dev, "Write error: %d\n", ret); | ||
765 | return ret; | ||
766 | } | ||
767 | |||
768 | eb = d->eb_data + *newblock / d->pages_per_eblk; | ||
769 | d->page_data[page] = *newblock; | ||
770 | d->revmap[oldblock] = PAGE_UNDEF; | ||
771 | eb = d->eb_data + oldblock / d->pages_per_eblk; | ||
772 | eb->active_count--; | ||
773 | |||
774 | return 0; | ||
775 | |||
776 | read_error: | ||
777 | d->page_data[page] = BLOCK_ERROR; | ||
778 | d->revmap[oldblock] = PAGE_UNDEF; | ||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) | ||
783 | { | ||
784 | unsigned int i, block, eblk_base, newblock; | ||
785 | int ret, errcode; | ||
786 | |||
787 | errcode = 0; | ||
788 | eblk_base = (eb - d->eb_data) * d->pages_per_eblk; | ||
789 | |||
790 | for (i = 0; i < d->pages_per_eblk; i++) { | ||
791 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
792 | return -ENOSPC; | ||
793 | |||
794 | block = eblk_base + i; | ||
795 | if (d->revmap[block] == PAGE_UNDEF) | ||
796 | continue; | ||
797 | |||
798 | ret = mtdswap_move_block(d, block, &newblock); | ||
799 | if (ret < 0 && !errcode) | ||
800 | errcode = ret; | ||
801 | } | ||
802 | |||
803 | return errcode; | ||
804 | } | ||
805 | |||
806 | static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) | ||
807 | { | ||
808 | int idx, stopat; | ||
809 | |||
810 | if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) | ||
811 | stopat = MTDSWAP_LOWFRAG; | ||
812 | else | ||
813 | stopat = MTDSWAP_HIFRAG; | ||
814 | |||
815 | for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--) | ||
816 | if (d->trees[idx].root.rb_node != NULL) | ||
817 | return idx; | ||
818 | |||
819 | return -1; | ||
820 | } | ||
821 | |||
822 | static int mtdswap_wlfreq(unsigned int maxdiff) | ||
823 | { | ||
824 | unsigned int h, x, y, dist, base; | ||
825 | |||
826 | /* | ||
827 | * Calculate linear ramp down from f1 to f2 when maxdiff goes from | ||
828 | * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar | ||
829 | * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE. | ||
830 | */ | ||
831 | |||
832 | dist = maxdiff - MAX_ERASE_DIFF; | ||
833 | if (dist > COLLECT_NONDIRTY_BASE) | ||
834 | dist = COLLECT_NONDIRTY_BASE; | ||
835 | |||
836 | /* | ||
837 | * Modelling the slop as right angular triangle with base | ||
838 | * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is | ||
839 | * equal to the ratio h/base. | ||
840 | */ | ||
841 | h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2; | ||
842 | base = COLLECT_NONDIRTY_BASE; | ||
843 | |||
844 | x = dist - base; | ||
845 | y = (x * h + base / 2) / base; | ||
846 | |||
847 | return COLLECT_NONDIRTY_FREQ2 + y; | ||
848 | } | ||
849 | |||
850 | static int mtdswap_choose_wl_tree(struct mtdswap_dev *d) | ||
851 | { | ||
852 | static unsigned int pick_cnt; | ||
853 | unsigned int i, idx = -1, wear, max; | ||
854 | struct rb_root *root; | ||
855 | |||
856 | max = 0; | ||
857 | for (i = 0; i <= MTDSWAP_DIRTY; i++) { | ||
858 | root = &d->trees[i].root; | ||
859 | if (root->rb_node == NULL) | ||
860 | continue; | ||
861 | |||
862 | wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root); | ||
863 | if (wear > max) { | ||
864 | max = wear; | ||
865 | idx = i; | ||
866 | } | ||
867 | } | ||
868 | |||
869 | if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) { | ||
870 | pick_cnt = 0; | ||
871 | return idx; | ||
872 | } | ||
873 | |||
874 | pick_cnt++; | ||
875 | return -1; | ||
876 | } | ||
877 | |||
878 | static int mtdswap_choose_gc_tree(struct mtdswap_dev *d, | ||
879 | unsigned int background) | ||
880 | { | ||
881 | int idx; | ||
882 | |||
883 | if (TREE_NONEMPTY(d, FAILING) && | ||
884 | (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY)))) | ||
885 | return MTDSWAP_FAILING; | ||
886 | |||
887 | idx = mtdswap_choose_wl_tree(d); | ||
888 | if (idx >= MTDSWAP_CLEAN) | ||
889 | return idx; | ||
890 | |||
891 | return __mtdswap_choose_gc_tree(d); | ||
892 | } | ||
893 | |||
894 | static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d, | ||
895 | unsigned int background) | ||
896 | { | ||
897 | struct rb_root *rp = NULL; | ||
898 | struct swap_eb *eb = NULL; | ||
899 | int idx; | ||
900 | |||
901 | if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD && | ||
902 | TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING)) | ||
903 | return NULL; | ||
904 | |||
905 | idx = mtdswap_choose_gc_tree(d, background); | ||
906 | if (idx < 0) | ||
907 | return NULL; | ||
908 | |||
909 | rp = &d->trees[idx].root; | ||
910 | eb = rb_entry(rb_first(rp), struct swap_eb, rb); | ||
911 | |||
912 | rb_erase(&eb->rb, rp); | ||
913 | eb->root = NULL; | ||
914 | d->trees[idx].count--; | ||
915 | return eb; | ||
916 | } | ||
917 | |||
918 | static unsigned int mtdswap_test_patt(unsigned int i) | ||
919 | { | ||
920 | return i % 2 ? 0x55555555 : 0xAAAAAAAA; | ||
921 | } | ||
922 | |||
923 | static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, | ||
924 | struct swap_eb *eb) | ||
925 | { | ||
926 | struct mtd_info *mtd = d->mtd; | ||
927 | unsigned int test, i, j, patt, mtd_pages; | ||
928 | loff_t base, pos; | ||
929 | unsigned int *p1 = (unsigned int *)d->page_buf; | ||
930 | unsigned char *p2 = (unsigned char *)d->oob_buf; | ||
931 | struct mtd_oob_ops ops; | ||
932 | int ret; | ||
933 | |||
934 | ops.mode = MTD_OOB_AUTO; | ||
935 | ops.len = mtd->writesize; | ||
936 | ops.ooblen = mtd->ecclayout->oobavail; | ||
937 | ops.ooboffs = 0; | ||
938 | ops.datbuf = d->page_buf; | ||
939 | ops.oobbuf = d->oob_buf; | ||
940 | base = mtdswap_eb_offset(d, eb); | ||
941 | mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize; | ||
942 | |||
943 | for (test = 0; test < 2; test++) { | ||
944 | pos = base; | ||
945 | for (i = 0; i < mtd_pages; i++) { | ||
946 | patt = mtdswap_test_patt(test + i); | ||
947 | memset(d->page_buf, patt, mtd->writesize); | ||
948 | memset(d->oob_buf, patt, mtd->ecclayout->oobavail); | ||
949 | ret = mtd->write_oob(mtd, pos, &ops); | ||
950 | if (ret) | ||
951 | goto error; | ||
952 | |||
953 | pos += mtd->writesize; | ||
954 | } | ||
955 | |||
956 | pos = base; | ||
957 | for (i = 0; i < mtd_pages; i++) { | ||
958 | ret = mtd->read_oob(mtd, pos, &ops); | ||
959 | if (ret) | ||
960 | goto error; | ||
961 | |||
962 | patt = mtdswap_test_patt(test + i); | ||
963 | for (j = 0; j < mtd->writesize/sizeof(int); j++) | ||
964 | if (p1[j] != patt) | ||
965 | goto error; | ||
966 | |||
967 | for (j = 0; j < mtd->ecclayout->oobavail; j++) | ||
968 | if (p2[j] != (unsigned char)patt) | ||
969 | goto error; | ||
970 | |||
971 | pos += mtd->writesize; | ||
972 | } | ||
973 | |||
974 | ret = mtdswap_erase_block(d, eb); | ||
975 | if (ret) | ||
976 | goto error; | ||
977 | } | ||
978 | |||
979 | eb->flags &= ~EBLOCK_READERR; | ||
980 | return 1; | ||
981 | |||
982 | error: | ||
983 | mtdswap_handle_badblock(d, eb); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) | ||
988 | { | ||
989 | struct swap_eb *eb; | ||
990 | int ret; | ||
991 | |||
992 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
993 | return 1; | ||
994 | |||
995 | eb = mtdswap_pick_gc_eblk(d, background); | ||
996 | if (!eb) | ||
997 | return 1; | ||
998 | |||
999 | ret = mtdswap_gc_eblock(d, eb); | ||
1000 | if (ret == -ENOSPC) | ||
1001 | return 1; | ||
1002 | |||
1003 | if (eb->flags & EBLOCK_FAILED) { | ||
1004 | mtdswap_handle_badblock(d, eb); | ||
1005 | return 0; | ||
1006 | } | ||
1007 | |||
1008 | eb->flags &= ~EBLOCK_BITFLIP; | ||
1009 | ret = mtdswap_erase_block(d, eb); | ||
1010 | if ((eb->flags & EBLOCK_READERR) && | ||
1011 | (ret || !mtdswap_eblk_passes(d, eb))) | ||
1012 | return 0; | ||
1013 | |||
1014 | if (ret == 0) | ||
1015 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); | ||
1016 | |||
1017 | if (ret == 0) | ||
1018 | mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); | ||
1019 | else if (ret != -EIO && ret != -EBADMSG) | ||
1020 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | static void mtdswap_background(struct mtd_blktrans_dev *dev) | ||
1026 | { | ||
1027 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1028 | int ret; | ||
1029 | |||
1030 | while (1) { | ||
1031 | ret = mtdswap_gc(d, 1); | ||
1032 | if (ret || mtd_blktrans_cease_background(dev)) | ||
1033 | return; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | static void mtdswap_cleanup(struct mtdswap_dev *d) | ||
1038 | { | ||
1039 | vfree(d->eb_data); | ||
1040 | vfree(d->revmap); | ||
1041 | vfree(d->page_data); | ||
1042 | kfree(d->oob_buf); | ||
1043 | kfree(d->page_buf); | ||
1044 | } | ||
1045 | |||
1046 | static int mtdswap_flush(struct mtd_blktrans_dev *dev) | ||
1047 | { | ||
1048 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1049 | |||
1050 | if (d->mtd->sync) | ||
1051 | d->mtd->sync(d->mtd); | ||
1052 | return 0; | ||
1053 | } | ||
1054 | |||
1055 | static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size) | ||
1056 | { | ||
1057 | loff_t offset; | ||
1058 | unsigned int badcnt; | ||
1059 | |||
1060 | badcnt = 0; | ||
1061 | |||
1062 | if (mtd->block_isbad) | ||
1063 | for (offset = 0; offset < size; offset += mtd->erasesize) | ||
1064 | if (mtd->block_isbad(mtd, offset)) | ||
1065 | badcnt++; | ||
1066 | |||
1067 | return badcnt; | ||
1068 | } | ||
1069 | |||
1070 | static int mtdswap_writesect(struct mtd_blktrans_dev *dev, | ||
1071 | unsigned long page, char *buf) | ||
1072 | { | ||
1073 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1074 | unsigned int newblock, mapped; | ||
1075 | struct swap_eb *eb; | ||
1076 | int ret; | ||
1077 | |||
1078 | d->sect_write_count++; | ||
1079 | |||
1080 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
1081 | return -ENOSPC; | ||
1082 | |||
1083 | if (header) { | ||
1084 | /* Ignore writes to the header page */ | ||
1085 | if (unlikely(page == 0)) | ||
1086 | return 0; | ||
1087 | |||
1088 | page--; | ||
1089 | } | ||
1090 | |||
1091 | mapped = d->page_data[page]; | ||
1092 | if (mapped <= BLOCK_MAX) { | ||
1093 | eb = d->eb_data + (mapped / d->pages_per_eblk); | ||
1094 | eb->active_count--; | ||
1095 | mtdswap_store_eb(d, eb); | ||
1096 | d->page_data[page] = BLOCK_UNDEF; | ||
1097 | d->revmap[mapped] = PAGE_UNDEF; | ||
1098 | } | ||
1099 | |||
1100 | ret = mtdswap_write_block(d, buf, page, &newblock, 0); | ||
1101 | d->mtd_write_count++; | ||
1102 | |||
1103 | if (ret < 0) | ||
1104 | return ret; | ||
1105 | |||
1106 | eb = d->eb_data + (newblock / d->pages_per_eblk); | ||
1107 | d->page_data[page] = newblock; | ||
1108 | |||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | /* Provide a dummy swap header for the kernel */ | ||
1113 | static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf) | ||
1114 | { | ||
1115 | union swap_header *hd = (union swap_header *)(buf); | ||
1116 | |||
1117 | memset(buf, 0, PAGE_SIZE - 10); | ||
1118 | |||
1119 | hd->info.version = 1; | ||
1120 | hd->info.last_page = d->mbd_dev->size - 1; | ||
1121 | hd->info.nr_badpages = 0; | ||
1122 | |||
1123 | memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static int mtdswap_readsect(struct mtd_blktrans_dev *dev, | ||
1129 | unsigned long page, char *buf) | ||
1130 | { | ||
1131 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1132 | struct mtd_info *mtd = d->mtd; | ||
1133 | unsigned int realblock, retries; | ||
1134 | loff_t readpos; | ||
1135 | struct swap_eb *eb; | ||
1136 | size_t retlen; | ||
1137 | int ret; | ||
1138 | |||
1139 | d->sect_read_count++; | ||
1140 | |||
1141 | if (header) { | ||
1142 | if (unlikely(page == 0)) | ||
1143 | return mtdswap_auto_header(d, buf); | ||
1144 | |||
1145 | page--; | ||
1146 | } | ||
1147 | |||
1148 | realblock = d->page_data[page]; | ||
1149 | if (realblock > BLOCK_MAX) { | ||
1150 | memset(buf, 0x0, PAGE_SIZE); | ||
1151 | if (realblock == BLOCK_UNDEF) | ||
1152 | return 0; | ||
1153 | else | ||
1154 | return -EIO; | ||
1155 | } | ||
1156 | |||
1157 | eb = d->eb_data + (realblock / d->pages_per_eblk); | ||
1158 | BUG_ON(d->revmap[realblock] == PAGE_UNDEF); | ||
1159 | |||
1160 | readpos = (loff_t)realblock << PAGE_SHIFT; | ||
1161 | retries = 0; | ||
1162 | |||
1163 | retry: | ||
1164 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); | ||
1165 | |||
1166 | d->mtd_read_count++; | ||
1167 | if (ret == -EUCLEAN) { | ||
1168 | eb->flags |= EBLOCK_BITFLIP; | ||
1169 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | ||
1170 | ret = 0; | ||
1171 | } | ||
1172 | |||
1173 | if (ret < 0) { | ||
1174 | dev_err(d->dev, "Read error %d\n", ret); | ||
1175 | eb->flags |= EBLOCK_READERR; | ||
1176 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
1177 | retries++; | ||
1178 | if (retries < MTDSWAP_IO_RETRIES) | ||
1179 | goto retry; | ||
1180 | |||
1181 | return ret; | ||
1182 | } | ||
1183 | |||
1184 | if (retlen != PAGE_SIZE) { | ||
1185 | dev_err(d->dev, "Short read %zd\n", retlen); | ||
1186 | return -EIO; | ||
1187 | } | ||
1188 | |||
1189 | return 0; | ||
1190 | } | ||
1191 | |||
1192 | static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first, | ||
1193 | unsigned nr_pages) | ||
1194 | { | ||
1195 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1196 | unsigned long page; | ||
1197 | struct swap_eb *eb; | ||
1198 | unsigned int mapped; | ||
1199 | |||
1200 | d->discard_count++; | ||
1201 | |||
1202 | for (page = first; page < first + nr_pages; page++) { | ||
1203 | mapped = d->page_data[page]; | ||
1204 | if (mapped <= BLOCK_MAX) { | ||
1205 | eb = d->eb_data + (mapped / d->pages_per_eblk); | ||
1206 | eb->active_count--; | ||
1207 | mtdswap_store_eb(d, eb); | ||
1208 | d->page_data[page] = BLOCK_UNDEF; | ||
1209 | d->revmap[mapped] = PAGE_UNDEF; | ||
1210 | d->discard_page_count++; | ||
1211 | } else if (mapped == BLOCK_ERROR) { | ||
1212 | d->page_data[page] = BLOCK_UNDEF; | ||
1213 | d->discard_page_count++; | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static int mtdswap_show(struct seq_file *s, void *data) | ||
1221 | { | ||
1222 | struct mtdswap_dev *d = (struct mtdswap_dev *) s->private; | ||
1223 | unsigned long sum; | ||
1224 | unsigned int count[MTDSWAP_TREE_CNT]; | ||
1225 | unsigned int min[MTDSWAP_TREE_CNT]; | ||
1226 | unsigned int max[MTDSWAP_TREE_CNT]; | ||
1227 | unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages; | ||
1228 | uint64_t use_size; | ||
1229 | char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip", | ||
1230 | "failing"}; | ||
1231 | |||
1232 | mutex_lock(&d->mbd_dev->lock); | ||
1233 | |||
1234 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) { | ||
1235 | struct rb_root *root = &d->trees[i].root; | ||
1236 | |||
1237 | if (root->rb_node) { | ||
1238 | count[i] = d->trees[i].count; | ||
1239 | min[i] = rb_entry(rb_first(root), struct swap_eb, | ||
1240 | rb)->erase_count; | ||
1241 | max[i] = rb_entry(rb_last(root), struct swap_eb, | ||
1242 | rb)->erase_count; | ||
1243 | } else | ||
1244 | count[i] = 0; | ||
1245 | } | ||
1246 | |||
1247 | if (d->curr_write) { | ||
1248 | cw = 1; | ||
1249 | cwp = d->curr_write_pos; | ||
1250 | cwecount = d->curr_write->erase_count; | ||
1251 | } | ||
1252 | |||
1253 | sum = 0; | ||
1254 | for (i = 0; i < d->eblks; i++) | ||
1255 | sum += d->eb_data[i].erase_count; | ||
1256 | |||
1257 | use_size = (uint64_t)d->eblks * d->mtd->erasesize; | ||
1258 | bb_cnt = mtdswap_badblocks(d->mtd, use_size); | ||
1259 | |||
1260 | mapped = 0; | ||
1261 | pages = d->mbd_dev->size; | ||
1262 | for (i = 0; i < pages; i++) | ||
1263 | if (d->page_data[i] != BLOCK_UNDEF) | ||
1264 | mapped++; | ||
1265 | |||
1266 | mutex_unlock(&d->mbd_dev->lock); | ||
1267 | |||
1268 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) { | ||
1269 | if (!count[i]) | ||
1270 | continue; | ||
1271 | |||
1272 | if (min[i] != max[i]) | ||
1273 | seq_printf(s, "%s:\t%5d erase blocks, erased min %d, " | ||
1274 | "max %d times\n", | ||
1275 | name[i], count[i], min[i], max[i]); | ||
1276 | else | ||
1277 | seq_printf(s, "%s:\t%5d erase blocks, all erased %d " | ||
1278 | "times\n", name[i], count[i], min[i]); | ||
1279 | } | ||
1280 | |||
1281 | if (bb_cnt) | ||
1282 | seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt); | ||
1283 | |||
1284 | if (cw) | ||
1285 | seq_printf(s, "current erase block: %u pages used, %u free, " | ||
1286 | "erased %u times\n", | ||
1287 | cwp, d->pages_per_eblk - cwp, cwecount); | ||
1288 | |||
1289 | seq_printf(s, "total erasures: %lu\n", sum); | ||
1290 | |||
1291 | seq_printf(s, "\n"); | ||
1292 | |||
1293 | seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count); | ||
1294 | seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count); | ||
1295 | seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count); | ||
1296 | seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count); | ||
1297 | seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count); | ||
1298 | seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count); | ||
1299 | |||
1300 | seq_printf(s, "\n"); | ||
1301 | seq_printf(s, "total pages: %u\n", pages); | ||
1302 | seq_printf(s, "pages mapped: %u\n", mapped); | ||
1303 | |||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | static int mtdswap_open(struct inode *inode, struct file *file) | ||
1308 | { | ||
1309 | return single_open(file, mtdswap_show, inode->i_private); | ||
1310 | } | ||
1311 | |||
1312 | static const struct file_operations mtdswap_fops = { | ||
1313 | .open = mtdswap_open, | ||
1314 | .read = seq_read, | ||
1315 | .llseek = seq_lseek, | ||
1316 | .release = single_release, | ||
1317 | }; | ||
1318 | |||
1319 | static int mtdswap_add_debugfs(struct mtdswap_dev *d) | ||
1320 | { | ||
1321 | struct gendisk *gd = d->mbd_dev->disk; | ||
1322 | struct device *dev = disk_to_dev(gd); | ||
1323 | |||
1324 | struct dentry *root; | ||
1325 | struct dentry *dent; | ||
1326 | |||
1327 | root = debugfs_create_dir(gd->disk_name, NULL); | ||
1328 | if (IS_ERR(root)) | ||
1329 | return 0; | ||
1330 | |||
1331 | if (!root) { | ||
1332 | dev_err(dev, "failed to initialize debugfs\n"); | ||
1333 | return -1; | ||
1334 | } | ||
1335 | |||
1336 | d->debugfs_root = root; | ||
1337 | |||
1338 | dent = debugfs_create_file("stats", S_IRUSR, root, d, | ||
1339 | &mtdswap_fops); | ||
1340 | if (!dent) { | ||
1341 | dev_err(d->dev, "debugfs_create_file failed\n"); | ||
1342 | debugfs_remove_recursive(root); | ||
1343 | d->debugfs_root = NULL; | ||
1344 | return -1; | ||
1345 | } | ||
1346 | |||
1347 | return 0; | ||
1348 | } | ||
1349 | |||
1350 | static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, | ||
1351 | unsigned int spare_cnt) | ||
1352 | { | ||
1353 | struct mtd_info *mtd = d->mbd_dev->mtd; | ||
1354 | unsigned int i, eblk_bytes, pages, blocks; | ||
1355 | int ret = -ENOMEM; | ||
1356 | |||
1357 | d->mtd = mtd; | ||
1358 | d->eblks = eblocks; | ||
1359 | d->spare_eblks = spare_cnt; | ||
1360 | d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT; | ||
1361 | |||
1362 | pages = d->mbd_dev->size; | ||
1363 | blocks = eblocks * d->pages_per_eblk; | ||
1364 | |||
1365 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) | ||
1366 | d->trees[i].root = RB_ROOT; | ||
1367 | |||
1368 | d->page_data = vmalloc(sizeof(int)*pages); | ||
1369 | if (!d->page_data) | ||
1370 | goto page_data_fail; | ||
1371 | |||
1372 | d->revmap = vmalloc(sizeof(int)*blocks); | ||
1373 | if (!d->revmap) | ||
1374 | goto revmap_fail; | ||
1375 | |||
1376 | eblk_bytes = sizeof(struct swap_eb)*d->eblks; | ||
1377 | d->eb_data = vmalloc(eblk_bytes); | ||
1378 | if (!d->eb_data) | ||
1379 | goto eb_data_fail; | ||
1380 | |||
1381 | memset(d->eb_data, 0, eblk_bytes); | ||
1382 | for (i = 0; i < pages; i++) | ||
1383 | d->page_data[i] = BLOCK_UNDEF; | ||
1384 | |||
1385 | for (i = 0; i < blocks; i++) | ||
1386 | d->revmap[i] = PAGE_UNDEF; | ||
1387 | |||
1388 | d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1389 | if (!d->page_buf) | ||
1390 | goto page_buf_fail; | ||
1391 | |||
1392 | d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL); | ||
1393 | if (!d->oob_buf) | ||
1394 | goto oob_buf_fail; | ||
1395 | |||
1396 | mtdswap_scan_eblks(d); | ||
1397 | |||
1398 | return 0; | ||
1399 | |||
1400 | oob_buf_fail: | ||
1401 | kfree(d->page_buf); | ||
1402 | page_buf_fail: | ||
1403 | vfree(d->eb_data); | ||
1404 | eb_data_fail: | ||
1405 | vfree(d->revmap); | ||
1406 | revmap_fail: | ||
1407 | vfree(d->page_data); | ||
1408 | page_data_fail: | ||
1409 | printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret); | ||
1410 | return ret; | ||
1411 | } | ||
1412 | |||
1413 | static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | ||
1414 | { | ||
1415 | struct mtdswap_dev *d; | ||
1416 | struct mtd_blktrans_dev *mbd_dev; | ||
1417 | char *parts; | ||
1418 | char *this_opt; | ||
1419 | unsigned long part; | ||
1420 | unsigned int eblocks, eavailable, bad_blocks, spare_cnt; | ||
1421 | uint64_t swap_size, use_size, size_limit; | ||
1422 | struct nand_ecclayout *oinfo; | ||
1423 | int ret; | ||
1424 | |||
1425 | parts = &partitions[0]; | ||
1426 | if (!*parts) | ||
1427 | return; | ||
1428 | |||
1429 | while ((this_opt = strsep(&parts, ",")) != NULL) { | ||
1430 | if (strict_strtoul(this_opt, 0, &part) < 0) | ||
1431 | return; | ||
1432 | |||
1433 | if (mtd->index == part) | ||
1434 | break; | ||
1435 | } | ||
1436 | |||
1437 | if (mtd->index != part) | ||
1438 | return; | ||
1439 | |||
1440 | if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) { | ||
1441 | printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE " | ||
1442 | "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE); | ||
1443 | return; | ||
1444 | } | ||
1445 | |||
1446 | if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) { | ||
1447 | printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size" | ||
1448 | " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize); | ||
1449 | return; | ||
1450 | } | ||
1451 | |||
1452 | oinfo = mtd->ecclayout; | ||
1453 | if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { | ||
1454 | printk(KERN_ERR "%s: Not enough free bytes in OOB, " | ||
1455 | "%d available, %lu needed.\n", | ||
1456 | MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); | ||
1457 | return; | ||
1458 | } | ||
1459 | |||
1460 | if (spare_eblocks > 100) | ||
1461 | spare_eblocks = 100; | ||
1462 | |||
1463 | use_size = mtd->size; | ||
1464 | size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE; | ||
1465 | |||
1466 | if (mtd->size > size_limit) { | ||
1467 | printk(KERN_WARNING "%s: Device too large. Limiting size to " | ||
1468 | "%llu bytes\n", MTDSWAP_PREFIX, size_limit); | ||
1469 | use_size = size_limit; | ||
1470 | } | ||
1471 | |||
1472 | eblocks = mtd_div_by_eb(use_size, mtd); | ||
1473 | use_size = eblocks * mtd->erasesize; | ||
1474 | bad_blocks = mtdswap_badblocks(mtd, use_size); | ||
1475 | eavailable = eblocks - bad_blocks; | ||
1476 | |||
1477 | if (eavailable < MIN_ERASE_BLOCKS) { | ||
1478 | printk(KERN_ERR "%s: Not enough erase blocks. %u available, " | ||
1479 | "%d needed\n", MTDSWAP_PREFIX, eavailable, | ||
1480 | MIN_ERASE_BLOCKS); | ||
1481 | return; | ||
1482 | } | ||
1483 | |||
1484 | spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100); | ||
1485 | |||
1486 | if (spare_cnt < MIN_SPARE_EBLOCKS) | ||
1487 | spare_cnt = MIN_SPARE_EBLOCKS; | ||
1488 | |||
1489 | if (spare_cnt > eavailable - 1) | ||
1490 | spare_cnt = eavailable - 1; | ||
1491 | |||
1492 | swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize + | ||
1493 | (header ? PAGE_SIZE : 0); | ||
1494 | |||
1495 | printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, " | ||
1496 | "%u spare, %u bad blocks\n", | ||
1497 | MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks); | ||
1498 | |||
1499 | d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL); | ||
1500 | if (!d) | ||
1501 | return; | ||
1502 | |||
1503 | mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); | ||
1504 | if (!mbd_dev) { | ||
1505 | kfree(d); | ||
1506 | return; | ||
1507 | } | ||
1508 | |||
1509 | d->mbd_dev = mbd_dev; | ||
1510 | mbd_dev->priv = d; | ||
1511 | |||
1512 | mbd_dev->mtd = mtd; | ||
1513 | mbd_dev->devnum = mtd->index; | ||
1514 | mbd_dev->size = swap_size >> PAGE_SHIFT; | ||
1515 | mbd_dev->tr = tr; | ||
1516 | |||
1517 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
1518 | mbd_dev->readonly = 1; | ||
1519 | |||
1520 | if (mtdswap_init(d, eblocks, spare_cnt) < 0) | ||
1521 | goto init_failed; | ||
1522 | |||
1523 | if (add_mtd_blktrans_dev(mbd_dev) < 0) | ||
1524 | goto cleanup; | ||
1525 | |||
1526 | d->dev = disk_to_dev(mbd_dev->disk); | ||
1527 | |||
1528 | ret = mtdswap_add_debugfs(d); | ||
1529 | if (ret < 0) | ||
1530 | goto debugfs_failed; | ||
1531 | |||
1532 | return; | ||
1533 | |||
1534 | debugfs_failed: | ||
1535 | del_mtd_blktrans_dev(mbd_dev); | ||
1536 | |||
1537 | cleanup: | ||
1538 | mtdswap_cleanup(d); | ||
1539 | |||
1540 | init_failed: | ||
1541 | kfree(mbd_dev); | ||
1542 | kfree(d); | ||
1543 | } | ||
1544 | |||
1545 | static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev) | ||
1546 | { | ||
1547 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1548 | |||
1549 | debugfs_remove_recursive(d->debugfs_root); | ||
1550 | del_mtd_blktrans_dev(dev); | ||
1551 | mtdswap_cleanup(d); | ||
1552 | kfree(d); | ||
1553 | } | ||
1554 | |||
1555 | static struct mtd_blktrans_ops mtdswap_ops = { | ||
1556 | .name = "mtdswap", | ||
1557 | .major = 0, | ||
1558 | .part_bits = 0, | ||
1559 | .blksize = PAGE_SIZE, | ||
1560 | .flush = mtdswap_flush, | ||
1561 | .readsect = mtdswap_readsect, | ||
1562 | .writesect = mtdswap_writesect, | ||
1563 | .discard = mtdswap_discard, | ||
1564 | .background = mtdswap_background, | ||
1565 | .add_mtd = mtdswap_add_mtd, | ||
1566 | .remove_dev = mtdswap_remove_dev, | ||
1567 | .owner = THIS_MODULE, | ||
1568 | }; | ||
1569 | |||
1570 | static int __init mtdswap_modinit(void) | ||
1571 | { | ||
1572 | return register_mtd_blktrans(&mtdswap_ops); | ||
1573 | } | ||
1574 | |||
1575 | static void __exit mtdswap_modexit(void) | ||
1576 | { | ||
1577 | deregister_mtd_blktrans(&mtdswap_ops); | ||
1578 | } | ||
1579 | |||
1580 | module_init(mtdswap_modinit); | ||
1581 | module_exit(mtdswap_modexit); | ||
1582 | |||
1583 | |||
1584 | MODULE_LICENSE("GPL"); | ||
1585 | MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); | ||
1586 | MODULE_DESCRIPTION("Block device access to an MTD suitable for using as " | ||
1587 | "swap space"); | ||
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 4f6c06f1632..a92054e945e 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -31,6 +31,21 @@ config MTD_NAND_VERIFY_WRITE | |||
31 | device thinks the write was successful, a bit could have been | 31 | device thinks the write was successful, a bit could have been |
32 | flipped accidentally due to device wear or something else. | 32 | flipped accidentally due to device wear or something else. |
33 | 33 | ||
34 | config MTD_NAND_BCH | ||
35 | tristate | ||
36 | select BCH | ||
37 | depends on MTD_NAND_ECC_BCH | ||
38 | default MTD_NAND | ||
39 | |||
40 | config MTD_NAND_ECC_BCH | ||
41 | bool "Support software BCH ECC" | ||
42 | default n | ||
43 | help | ||
44 | This enables support for software BCH error correction. Binary BCH | ||
45 | codes are more powerful and cpu intensive than traditional Hamming | ||
46 | ECC codes. They are used with NAND devices requiring more than 1 bit | ||
47 | of error correction. | ||
48 | |||
34 | config MTD_SM_COMMON | 49 | config MTD_SM_COMMON |
35 | tristate | 50 | tristate |
36 | default n | 51 | default n |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 8ad6faec72c..5745d831168 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_MTD_NAND) += nand.o | 5 | obj-$(CONFIG_MTD_NAND) += nand.o |
6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o | 6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o |
7 | obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o | ||
7 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o | 8 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o |
8 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o | 9 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o |
9 | 10 | ||
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index ccce0f03b5d..6fae04b3fc6 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -48,6 +48,9 @@ | |||
48 | #define no_ecc 0 | 48 | #define no_ecc 0 |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | static int use_dma = 1; | ||
52 | module_param(use_dma, int, 0); | ||
53 | |||
51 | static int on_flash_bbt = 0; | 54 | static int on_flash_bbt = 0; |
52 | module_param(on_flash_bbt, int, 0); | 55 | module_param(on_flash_bbt, int, 0); |
53 | 56 | ||
@@ -89,11 +92,20 @@ struct atmel_nand_host { | |||
89 | struct nand_chip nand_chip; | 92 | struct nand_chip nand_chip; |
90 | struct mtd_info mtd; | 93 | struct mtd_info mtd; |
91 | void __iomem *io_base; | 94 | void __iomem *io_base; |
95 | dma_addr_t io_phys; | ||
92 | struct atmel_nand_data *board; | 96 | struct atmel_nand_data *board; |
93 | struct device *dev; | 97 | struct device *dev; |
94 | void __iomem *ecc; | 98 | void __iomem *ecc; |
99 | |||
100 | struct completion comp; | ||
101 | struct dma_chan *dma_chan; | ||
95 | }; | 102 | }; |
96 | 103 | ||
104 | static int cpu_has_dma(void) | ||
105 | { | ||
106 | return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); | ||
107 | } | ||
108 | |||
97 | /* | 109 | /* |
98 | * Enable NAND. | 110 | * Enable NAND. |
99 | */ | 111 | */ |
@@ -150,7 +162,7 @@ static int atmel_nand_device_ready(struct mtd_info *mtd) | |||
150 | /* | 162 | /* |
151 | * Minimal-overhead PIO for data access. | 163 | * Minimal-overhead PIO for data access. |
152 | */ | 164 | */ |
153 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | 165 | static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) |
154 | { | 166 | { |
155 | struct nand_chip *nand_chip = mtd->priv; | 167 | struct nand_chip *nand_chip = mtd->priv; |
156 | 168 | ||
@@ -164,7 +176,7 @@ static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) | |||
164 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); | 176 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); |
165 | } | 177 | } |
166 | 178 | ||
167 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | 179 | static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) |
168 | { | 180 | { |
169 | struct nand_chip *nand_chip = mtd->priv; | 181 | struct nand_chip *nand_chip = mtd->priv; |
170 | 182 | ||
@@ -178,6 +190,121 @@ static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) | |||
178 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); | 190 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); |
179 | } | 191 | } |
180 | 192 | ||
193 | static void dma_complete_func(void *completion) | ||
194 | { | ||
195 | complete(completion); | ||
196 | } | ||
197 | |||
198 | static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, | ||
199 | int is_read) | ||
200 | { | ||
201 | struct dma_device *dma_dev; | ||
202 | enum dma_ctrl_flags flags; | ||
203 | dma_addr_t dma_src_addr, dma_dst_addr, phys_addr; | ||
204 | struct dma_async_tx_descriptor *tx = NULL; | ||
205 | dma_cookie_t cookie; | ||
206 | struct nand_chip *chip = mtd->priv; | ||
207 | struct atmel_nand_host *host = chip->priv; | ||
208 | void *p = buf; | ||
209 | int err = -EIO; | ||
210 | enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
211 | |||
212 | if (buf >= high_memory) { | ||
213 | struct page *pg; | ||
214 | |||
215 | if (((size_t)buf & PAGE_MASK) != | ||
216 | ((size_t)(buf + len - 1) & PAGE_MASK)) { | ||
217 | dev_warn(host->dev, "Buffer not fit in one page\n"); | ||
218 | goto err_buf; | ||
219 | } | ||
220 | |||
221 | pg = vmalloc_to_page(buf); | ||
222 | if (pg == 0) { | ||
223 | dev_err(host->dev, "Failed to vmalloc_to_page\n"); | ||
224 | goto err_buf; | ||
225 | } | ||
226 | p = page_address(pg) + ((size_t)buf & ~PAGE_MASK); | ||
227 | } | ||
228 | |||
229 | dma_dev = host->dma_chan->device; | ||
230 | |||
231 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | ||
232 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
233 | |||
234 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); | ||
235 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { | ||
236 | dev_err(host->dev, "Failed to dma_map_single\n"); | ||
237 | goto err_buf; | ||
238 | } | ||
239 | |||
240 | if (is_read) { | ||
241 | dma_src_addr = host->io_phys; | ||
242 | dma_dst_addr = phys_addr; | ||
243 | } else { | ||
244 | dma_src_addr = phys_addr; | ||
245 | dma_dst_addr = host->io_phys; | ||
246 | } | ||
247 | |||
248 | tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, | ||
249 | dma_src_addr, len, flags); | ||
250 | if (!tx) { | ||
251 | dev_err(host->dev, "Failed to prepare DMA memcpy\n"); | ||
252 | goto err_dma; | ||
253 | } | ||
254 | |||
255 | init_completion(&host->comp); | ||
256 | tx->callback = dma_complete_func; | ||
257 | tx->callback_param = &host->comp; | ||
258 | |||
259 | cookie = tx->tx_submit(tx); | ||
260 | if (dma_submit_error(cookie)) { | ||
261 | dev_err(host->dev, "Failed to do DMA tx_submit\n"); | ||
262 | goto err_dma; | ||
263 | } | ||
264 | |||
265 | dma_async_issue_pending(host->dma_chan); | ||
266 | wait_for_completion(&host->comp); | ||
267 | |||
268 | err = 0; | ||
269 | |||
270 | err_dma: | ||
271 | dma_unmap_single(dma_dev->dev, phys_addr, len, dir); | ||
272 | err_buf: | ||
273 | if (err != 0) | ||
274 | dev_warn(host->dev, "Fall back to CPU I/O\n"); | ||
275 | return err; | ||
276 | } | ||
277 | |||
278 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | ||
279 | { | ||
280 | struct nand_chip *chip = mtd->priv; | ||
281 | struct atmel_nand_host *host = chip->priv; | ||
282 | |||
283 | if (use_dma && len >= mtd->oobsize) | ||
284 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) | ||
285 | return; | ||
286 | |||
287 | if (host->board->bus_width_16) | ||
288 | atmel_read_buf16(mtd, buf, len); | ||
289 | else | ||
290 | atmel_read_buf8(mtd, buf, len); | ||
291 | } | ||
292 | |||
293 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | ||
294 | { | ||
295 | struct nand_chip *chip = mtd->priv; | ||
296 | struct atmel_nand_host *host = chip->priv; | ||
297 | |||
298 | if (use_dma && len >= mtd->oobsize) | ||
299 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) | ||
300 | return; | ||
301 | |||
302 | if (host->board->bus_width_16) | ||
303 | atmel_write_buf16(mtd, buf, len); | ||
304 | else | ||
305 | atmel_write_buf8(mtd, buf, len); | ||
306 | } | ||
307 | |||
181 | /* | 308 | /* |
182 | * Calculate HW ECC | 309 | * Calculate HW ECC |
183 | * | 310 | * |
@@ -398,6 +525,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
398 | return -ENOMEM; | 525 | return -ENOMEM; |
399 | } | 526 | } |
400 | 527 | ||
528 | host->io_phys = (dma_addr_t)mem->start; | ||
529 | |||
401 | host->io_base = ioremap(mem->start, mem->end - mem->start + 1); | 530 | host->io_base = ioremap(mem->start, mem->end - mem->start + 1); |
402 | if (host->io_base == NULL) { | 531 | if (host->io_base == NULL) { |
403 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); | 532 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); |
@@ -448,14 +577,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
448 | 577 | ||
449 | nand_chip->chip_delay = 20; /* 20us command delay time */ | 578 | nand_chip->chip_delay = 20; /* 20us command delay time */ |
450 | 579 | ||
451 | if (host->board->bus_width_16) { /* 16-bit bus width */ | 580 | if (host->board->bus_width_16) /* 16-bit bus width */ |
452 | nand_chip->options |= NAND_BUSWIDTH_16; | 581 | nand_chip->options |= NAND_BUSWIDTH_16; |
453 | nand_chip->read_buf = atmel_read_buf16; | 582 | |
454 | nand_chip->write_buf = atmel_write_buf16; | 583 | nand_chip->read_buf = atmel_read_buf; |
455 | } else { | 584 | nand_chip->write_buf = atmel_write_buf; |
456 | nand_chip->read_buf = atmel_read_buf; | ||
457 | nand_chip->write_buf = atmel_write_buf; | ||
458 | } | ||
459 | 585 | ||
460 | platform_set_drvdata(pdev, host); | 586 | platform_set_drvdata(pdev, host); |
461 | atmel_nand_enable(host); | 587 | atmel_nand_enable(host); |
@@ -473,6 +599,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
473 | nand_chip->options |= NAND_USE_FLASH_BBT; | 599 | nand_chip->options |= NAND_USE_FLASH_BBT; |
474 | } | 600 | } |
475 | 601 | ||
602 | if (cpu_has_dma() && use_dma) { | ||
603 | dma_cap_mask_t mask; | ||
604 | |||
605 | dma_cap_zero(mask); | ||
606 | dma_cap_set(DMA_MEMCPY, mask); | ||
607 | host->dma_chan = dma_request_channel(mask, 0, NULL); | ||
608 | if (!host->dma_chan) { | ||
609 | dev_err(host->dev, "Failed to request DMA channel\n"); | ||
610 | use_dma = 0; | ||
611 | } | ||
612 | } | ||
613 | if (use_dma) | ||
614 | dev_info(host->dev, "Using DMA for NAND access.\n"); | ||
615 | else | ||
616 | dev_info(host->dev, "No DMA support for NAND access.\n"); | ||
617 | |||
476 | /* first scan to find the device and get the page size */ | 618 | /* first scan to find the device and get the page size */ |
477 | if (nand_scan_ident(mtd, 1, NULL)) { | 619 | if (nand_scan_ident(mtd, 1, NULL)) { |
478 | res = -ENXIO; | 620 | res = -ENXIO; |
@@ -555,6 +697,8 @@ err_scan_ident: | |||
555 | err_no_card: | 697 | err_no_card: |
556 | atmel_nand_disable(host); | 698 | atmel_nand_disable(host); |
557 | platform_set_drvdata(pdev, NULL); | 699 | platform_set_drvdata(pdev, NULL); |
700 | if (host->dma_chan) | ||
701 | dma_release_channel(host->dma_chan); | ||
558 | if (host->ecc) | 702 | if (host->ecc) |
559 | iounmap(host->ecc); | 703 | iounmap(host->ecc); |
560 | err_ecc_ioremap: | 704 | err_ecc_ioremap: |
@@ -578,6 +722,10 @@ static int __exit atmel_nand_remove(struct platform_device *pdev) | |||
578 | 722 | ||
579 | if (host->ecc) | 723 | if (host->ecc) |
580 | iounmap(host->ecc); | 724 | iounmap(host->ecc); |
725 | |||
726 | if (host->dma_chan) | ||
727 | dma_release_channel(host->dma_chan); | ||
728 | |||
581 | iounmap(host->io_base); | 729 | iounmap(host->io_base); |
582 | kfree(host); | 730 | kfree(host); |
583 | 731 | ||
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index a90fde3ede2..aff3468867a 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -37,9 +37,6 @@ | |||
37 | #include <mach/nand.h> | 37 | #include <mach/nand.h> |
38 | #include <mach/aemif.h> | 38 | #include <mach/aemif.h> |
39 | 39 | ||
40 | #include <asm/mach-types.h> | ||
41 | |||
42 | |||
43 | /* | 40 | /* |
44 | * This is a device driver for the NAND flash controller found on the | 41 | * This is a device driver for the NAND flash controller found on the |
45 | * various DaVinci family chips. It handles up to four SoC chipselects, | 42 | * various DaVinci family chips. It handles up to four SoC chipselects, |
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index c2f95437e5e..0b81b5b499d 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/err.h> | ||
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
34 | #include <linux/io.h> | 35 | #include <linux/io.h> |
@@ -757,9 +758,9 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
757 | 758 | ||
758 | /* Enable NFC clock */ | 759 | /* Enable NFC clock */ |
759 | prv->clk = clk_get(dev, "nfc_clk"); | 760 | prv->clk = clk_get(dev, "nfc_clk"); |
760 | if (!prv->clk) { | 761 | if (IS_ERR(prv->clk)) { |
761 | dev_err(dev, "Unable to acquire NFC clock!\n"); | 762 | dev_err(dev, "Unable to acquire NFC clock!\n"); |
762 | retval = -ENODEV; | 763 | retval = PTR_ERR(prv->clk); |
763 | goto error; | 764 | goto error; |
764 | } | 765 | } |
765 | 766 | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 5ae1d9ee2cf..42a95fb4150 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -211,6 +211,31 @@ static struct nand_ecclayout nandv2_hw_eccoob_largepage = { | |||
211 | } | 211 | } |
212 | }; | 212 | }; |
213 | 213 | ||
214 | /* OOB description for 4096 byte pages with 128 byte OOB */ | ||
215 | static struct nand_ecclayout nandv2_hw_eccoob_4k = { | ||
216 | .eccbytes = 8 * 9, | ||
217 | .eccpos = { | ||
218 | 7, 8, 9, 10, 11, 12, 13, 14, 15, | ||
219 | 23, 24, 25, 26, 27, 28, 29, 30, 31, | ||
220 | 39, 40, 41, 42, 43, 44, 45, 46, 47, | ||
221 | 55, 56, 57, 58, 59, 60, 61, 62, 63, | ||
222 | 71, 72, 73, 74, 75, 76, 77, 78, 79, | ||
223 | 87, 88, 89, 90, 91, 92, 93, 94, 95, | ||
224 | 103, 104, 105, 106, 107, 108, 109, 110, 111, | ||
225 | 119, 120, 121, 122, 123, 124, 125, 126, 127, | ||
226 | }, | ||
227 | .oobfree = { | ||
228 | {.offset = 2, .length = 4}, | ||
229 | {.offset = 16, .length = 7}, | ||
230 | {.offset = 32, .length = 7}, | ||
231 | {.offset = 48, .length = 7}, | ||
232 | {.offset = 64, .length = 7}, | ||
233 | {.offset = 80, .length = 7}, | ||
234 | {.offset = 96, .length = 7}, | ||
235 | {.offset = 112, .length = 7}, | ||
236 | } | ||
237 | }; | ||
238 | |||
214 | #ifdef CONFIG_MTD_PARTITIONS | 239 | #ifdef CONFIG_MTD_PARTITIONS |
215 | static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; | 240 | static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; |
216 | #endif | 241 | #endif |
@@ -641,9 +666,9 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
641 | 666 | ||
642 | n = min(n, len); | 667 | n = min(n, len); |
643 | 668 | ||
644 | memcpy(buf, host->data_buf + col, len); | 669 | memcpy(buf, host->data_buf + col, n); |
645 | 670 | ||
646 | host->buf_start += len; | 671 | host->buf_start += n; |
647 | } | 672 | } |
648 | 673 | ||
649 | /* Used by the upper layer to verify the data in NAND Flash | 674 | /* Used by the upper layer to verify the data in NAND Flash |
@@ -1185,6 +1210,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1185 | 1210 | ||
1186 | if (mtd->writesize == 2048) | 1211 | if (mtd->writesize == 2048) |
1187 | this->ecc.layout = oob_largepage; | 1212 | this->ecc.layout = oob_largepage; |
1213 | if (nfc_is_v21() && mtd->writesize == 4096) | ||
1214 | this->ecc.layout = &nandv2_hw_eccoob_4k; | ||
1188 | 1215 | ||
1189 | /* second phase scan */ | 1216 | /* second phase scan */ |
1190 | if (nand_scan_tail(mtd)) { | 1217 | if (nand_scan_tail(mtd)) { |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a9c6ce74576..85cfc061d41 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/mtd/mtd.h> | 42 | #include <linux/mtd/mtd.h> |
43 | #include <linux/mtd/nand.h> | 43 | #include <linux/mtd/nand.h> |
44 | #include <linux/mtd/nand_ecc.h> | 44 | #include <linux/mtd/nand_ecc.h> |
45 | #include <linux/mtd/nand_bch.h> | ||
45 | #include <linux/interrupt.h> | 46 | #include <linux/interrupt.h> |
46 | #include <linux/bitops.h> | 47 | #include <linux/bitops.h> |
47 | #include <linux/leds.h> | 48 | #include <linux/leds.h> |
@@ -2377,7 +2378,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2377 | return -EINVAL; | 2378 | return -EINVAL; |
2378 | } | 2379 | } |
2379 | 2380 | ||
2380 | /* Do not allow reads past end of device */ | 2381 | /* Do not allow write past end of device */ |
2381 | if (unlikely(to >= mtd->size || | 2382 | if (unlikely(to >= mtd->size || |
2382 | ops->ooboffs + ops->ooblen > | 2383 | ops->ooboffs + ops->ooblen > |
2383 | ((mtd->size >> chip->page_shift) - | 2384 | ((mtd->size >> chip->page_shift) - |
@@ -3248,7 +3249,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3248 | /* | 3249 | /* |
3249 | * If no default placement scheme is given, select an appropriate one | 3250 | * If no default placement scheme is given, select an appropriate one |
3250 | */ | 3251 | */ |
3251 | if (!chip->ecc.layout) { | 3252 | if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { |
3252 | switch (mtd->oobsize) { | 3253 | switch (mtd->oobsize) { |
3253 | case 8: | 3254 | case 8: |
3254 | chip->ecc.layout = &nand_oob_8; | 3255 | chip->ecc.layout = &nand_oob_8; |
@@ -3351,6 +3352,40 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3351 | chip->ecc.bytes = 3; | 3352 | chip->ecc.bytes = 3; |
3352 | break; | 3353 | break; |
3353 | 3354 | ||
3355 | case NAND_ECC_SOFT_BCH: | ||
3356 | if (!mtd_nand_has_bch()) { | ||
3357 | printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); | ||
3358 | BUG(); | ||
3359 | } | ||
3360 | chip->ecc.calculate = nand_bch_calculate_ecc; | ||
3361 | chip->ecc.correct = nand_bch_correct_data; | ||
3362 | chip->ecc.read_page = nand_read_page_swecc; | ||
3363 | chip->ecc.read_subpage = nand_read_subpage; | ||
3364 | chip->ecc.write_page = nand_write_page_swecc; | ||
3365 | chip->ecc.read_page_raw = nand_read_page_raw; | ||
3366 | chip->ecc.write_page_raw = nand_write_page_raw; | ||
3367 | chip->ecc.read_oob = nand_read_oob_std; | ||
3368 | chip->ecc.write_oob = nand_write_oob_std; | ||
3369 | /* | ||
3370 | * Board driver should supply ecc.size and ecc.bytes values to | ||
3371 | * select how many bits are correctable; see nand_bch_init() | ||
3372 | * for details. | ||
3373 | * Otherwise, default to 4 bits for large page devices | ||
3374 | */ | ||
3375 | if (!chip->ecc.size && (mtd->oobsize >= 64)) { | ||
3376 | chip->ecc.size = 512; | ||
3377 | chip->ecc.bytes = 7; | ||
3378 | } | ||
3379 | chip->ecc.priv = nand_bch_init(mtd, | ||
3380 | chip->ecc.size, | ||
3381 | chip->ecc.bytes, | ||
3382 | &chip->ecc.layout); | ||
3383 | if (!chip->ecc.priv) { | ||
3384 | printk(KERN_WARNING "BCH ECC initialization failed!\n"); | ||
3385 | BUG(); | ||
3386 | } | ||
3387 | break; | ||
3388 | |||
3354 | case NAND_ECC_NONE: | 3389 | case NAND_ECC_NONE: |
3355 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " | 3390 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " |
3356 | "This is not recommended !!\n"); | 3391 | "This is not recommended !!\n"); |
@@ -3501,6 +3536,9 @@ void nand_release(struct mtd_info *mtd) | |||
3501 | { | 3536 | { |
3502 | struct nand_chip *chip = mtd->priv; | 3537 | struct nand_chip *chip = mtd->priv; |
3503 | 3538 | ||
3539 | if (chip->ecc.mode == NAND_ECC_SOFT_BCH) | ||
3540 | nand_bch_free((struct nand_bch_control *)chip->ecc.priv); | ||
3541 | |||
3504 | #ifdef CONFIG_MTD_PARTITIONS | 3542 | #ifdef CONFIG_MTD_PARTITIONS |
3505 | /* Deregister partitions */ | 3543 | /* Deregister partitions */ |
3506 | del_mtd_partitions(mtd); | 3544 | del_mtd_partitions(mtd); |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 6ebd869993a..a1e8b30078d 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -1101,12 +1101,16 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
1101 | static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) | 1101 | static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) |
1102 | { | 1102 | { |
1103 | struct nand_chip *this = mtd->priv; | 1103 | struct nand_chip *this = mtd->priv; |
1104 | u32 pattern_len = bd->len; | 1104 | u32 pattern_len; |
1105 | u32 bits = bd->options & NAND_BBT_NRBITS_MSK; | 1105 | u32 bits; |
1106 | u32 table_size; | 1106 | u32 table_size; |
1107 | 1107 | ||
1108 | if (!bd) | 1108 | if (!bd) |
1109 | return; | 1109 | return; |
1110 | |||
1111 | pattern_len = bd->len; | ||
1112 | bits = bd->options & NAND_BBT_NRBITS_MSK; | ||
1113 | |||
1110 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && | 1114 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && |
1111 | !(this->options & NAND_USE_FLASH_BBT)); | 1115 | !(this->options & NAND_USE_FLASH_BBT)); |
1112 | BUG_ON(!bits); | 1116 | BUG_ON(!bits); |
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c new file mode 100644 index 00000000000..0f931e75711 --- /dev/null +++ b/drivers/mtd/nand/nand_bch.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * This file provides ECC correction for more than 1 bit per block of data, | ||
3 | * using binary BCH codes. It relies on the generic BCH library lib/bch.c. | ||
4 | * | ||
5 | * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> | ||
6 | * | ||
7 | * This file is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 or (at your option) any | ||
10 | * later version. | ||
11 | * | ||
12 | * This file is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
15 | * for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this file; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/mtd/mtd.h> | ||
28 | #include <linux/mtd/nand.h> | ||
29 | #include <linux/mtd/nand_bch.h> | ||
30 | #include <linux/bch.h> | ||
31 | |||
32 | /** | ||
33 | * struct nand_bch_control - private NAND BCH control structure | ||
34 | * @bch: BCH control structure | ||
35 | * @ecclayout: private ecc layout for this BCH configuration | ||
36 | * @errloc: error location array | ||
37 | * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid | ||
38 | */ | ||
39 | struct nand_bch_control { | ||
40 | struct bch_control *bch; | ||
41 | struct nand_ecclayout ecclayout; | ||
42 | unsigned int *errloc; | ||
43 | unsigned char *eccmask; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block | ||
48 | * @mtd: MTD block structure | ||
49 | * @buf: input buffer with raw data | ||
50 | * @code: output buffer with ECC | ||
51 | */ | ||
52 | int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | ||
53 | unsigned char *code) | ||
54 | { | ||
55 | const struct nand_chip *chip = mtd->priv; | ||
56 | struct nand_bch_control *nbc = chip->ecc.priv; | ||
57 | unsigned int i; | ||
58 | |||
59 | memset(code, 0, chip->ecc.bytes); | ||
60 | encode_bch(nbc->bch, buf, chip->ecc.size, code); | ||
61 | |||
62 | /* apply mask so that an erased page is a valid codeword */ | ||
63 | for (i = 0; i < chip->ecc.bytes; i++) | ||
64 | code[i] ^= nbc->eccmask[i]; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | EXPORT_SYMBOL(nand_bch_calculate_ecc); | ||
69 | |||
70 | /** | ||
71 | * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s) | ||
72 | * @mtd: MTD block structure | ||
73 | * @buf: raw data read from the chip | ||
74 | * @read_ecc: ECC from the chip | ||
75 | * @calc_ecc: the ECC calculated from raw data | ||
76 | * | ||
77 | * Detect and correct bit errors for a data byte block | ||
78 | */ | ||
79 | int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, | ||
80 | unsigned char *read_ecc, unsigned char *calc_ecc) | ||
81 | { | ||
82 | const struct nand_chip *chip = mtd->priv; | ||
83 | struct nand_bch_control *nbc = chip->ecc.priv; | ||
84 | unsigned int *errloc = nbc->errloc; | ||
85 | int i, count; | ||
86 | |||
87 | count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, | ||
88 | NULL, errloc); | ||
89 | if (count > 0) { | ||
90 | for (i = 0; i < count; i++) { | ||
91 | if (errloc[i] < (chip->ecc.size*8)) | ||
92 | /* error is located in data, correct it */ | ||
93 | buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); | ||
94 | /* else error in ecc, no action needed */ | ||
95 | |||
96 | DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", | ||
97 | __func__, errloc[i]); | ||
98 | } | ||
99 | } else if (count < 0) { | ||
100 | printk(KERN_ERR "ecc unrecoverable error\n"); | ||
101 | count = -1; | ||
102 | } | ||
103 | return count; | ||
104 | } | ||
105 | EXPORT_SYMBOL(nand_bch_correct_data); | ||
106 | |||
107 | /** | ||
108 | * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction | ||
109 | * @mtd: MTD block structure | ||
110 | * @eccsize: ecc block size in bytes | ||
111 | * @eccbytes: ecc length in bytes | ||
112 | * @ecclayout: output default layout | ||
113 | * | ||
114 | * Returns: | ||
115 | * a pointer to a new NAND BCH control structure, or NULL upon failure | ||
116 | * | ||
117 | * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes | ||
118 | * are used to compute BCH parameters m (Galois field order) and t (error | ||
119 | * correction capability). @eccbytes should be equal to the number of bytes | ||
120 | * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8. | ||
121 | * | ||
122 | * Example: to configure 4 bit correction per 512 bytes, you should pass | ||
123 | * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) | ||
124 | * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) | ||
125 | */ | ||
126 | struct nand_bch_control * | ||
127 | nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, | ||
128 | struct nand_ecclayout **ecclayout) | ||
129 | { | ||
130 | unsigned int m, t, eccsteps, i; | ||
131 | struct nand_ecclayout *layout; | ||
132 | struct nand_bch_control *nbc = NULL; | ||
133 | unsigned char *erased_page; | ||
134 | |||
135 | if (!eccsize || !eccbytes) { | ||
136 | printk(KERN_WARNING "ecc parameters not supplied\n"); | ||
137 | goto fail; | ||
138 | } | ||
139 | |||
140 | m = fls(1+8*eccsize); | ||
141 | t = (eccbytes*8)/m; | ||
142 | |||
143 | nbc = kzalloc(sizeof(*nbc), GFP_KERNEL); | ||
144 | if (!nbc) | ||
145 | goto fail; | ||
146 | |||
147 | nbc->bch = init_bch(m, t, 0); | ||
148 | if (!nbc->bch) | ||
149 | goto fail; | ||
150 | |||
151 | /* verify that eccbytes has the expected value */ | ||
152 | if (nbc->bch->ecc_bytes != eccbytes) { | ||
153 | printk(KERN_WARNING "invalid eccbytes %u, should be %u\n", | ||
154 | eccbytes, nbc->bch->ecc_bytes); | ||
155 | goto fail; | ||
156 | } | ||
157 | |||
158 | eccsteps = mtd->writesize/eccsize; | ||
159 | |||
160 | /* if no ecc placement scheme was provided, build one */ | ||
161 | if (!*ecclayout) { | ||
162 | |||
163 | /* handle large page devices only */ | ||
164 | if (mtd->oobsize < 64) { | ||
165 | printk(KERN_WARNING "must provide an oob scheme for " | ||
166 | "oobsize %d\n", mtd->oobsize); | ||
167 | goto fail; | ||
168 | } | ||
169 | |||
170 | layout = &nbc->ecclayout; | ||
171 | layout->eccbytes = eccsteps*eccbytes; | ||
172 | |||
173 | /* reserve 2 bytes for bad block marker */ | ||
174 | if (layout->eccbytes+2 > mtd->oobsize) { | ||
175 | printk(KERN_WARNING "no suitable oob scheme available " | ||
176 | "for oobsize %d eccbytes %u\n", mtd->oobsize, | ||
177 | eccbytes); | ||
178 | goto fail; | ||
179 | } | ||
180 | /* put ecc bytes at oob tail */ | ||
181 | for (i = 0; i < layout->eccbytes; i++) | ||
182 | layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; | ||
183 | |||
184 | layout->oobfree[0].offset = 2; | ||
185 | layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; | ||
186 | |||
187 | *ecclayout = layout; | ||
188 | } | ||
189 | |||
190 | /* sanity checks */ | ||
191 | if (8*(eccsize+eccbytes) >= (1 << m)) { | ||
192 | printk(KERN_WARNING "eccsize %u is too large\n", eccsize); | ||
193 | goto fail; | ||
194 | } | ||
195 | if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) { | ||
196 | printk(KERN_WARNING "invalid ecc layout\n"); | ||
197 | goto fail; | ||
198 | } | ||
199 | |||
200 | nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL); | ||
201 | nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL); | ||
202 | if (!nbc->eccmask || !nbc->errloc) | ||
203 | goto fail; | ||
204 | /* | ||
205 | * compute and store the inverted ecc of an erased ecc block | ||
206 | */ | ||
207 | erased_page = kmalloc(eccsize, GFP_KERNEL); | ||
208 | if (!erased_page) | ||
209 | goto fail; | ||
210 | |||
211 | memset(erased_page, 0xff, eccsize); | ||
212 | memset(nbc->eccmask, 0, eccbytes); | ||
213 | encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); | ||
214 | kfree(erased_page); | ||
215 | |||
216 | for (i = 0; i < eccbytes; i++) | ||
217 | nbc->eccmask[i] ^= 0xff; | ||
218 | |||
219 | return nbc; | ||
220 | fail: | ||
221 | nand_bch_free(nbc); | ||
222 | return NULL; | ||
223 | } | ||
224 | EXPORT_SYMBOL(nand_bch_init); | ||
225 | |||
226 | /** | ||
227 | * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources | ||
228 | * @nbc: NAND BCH control structure | ||
229 | */ | ||
230 | void nand_bch_free(struct nand_bch_control *nbc) | ||
231 | { | ||
232 | if (nbc) { | ||
233 | free_bch(nbc->bch); | ||
234 | kfree(nbc->errloc); | ||
235 | kfree(nbc->eccmask); | ||
236 | kfree(nbc); | ||
237 | } | ||
238 | } | ||
239 | EXPORT_SYMBOL(nand_bch_free); | ||
240 | |||
241 | MODULE_LICENSE("GPL"); | ||
242 | MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); | ||
243 | MODULE_DESCRIPTION("NAND software BCH ECC support"); | ||
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index a5aa99f014b..213181be0d9 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/mtd/mtd.h> | 35 | #include <linux/mtd/mtd.h> |
36 | #include <linux/mtd/nand.h> | 36 | #include <linux/mtd/nand.h> |
37 | #include <linux/mtd/nand_bch.h> | ||
37 | #include <linux/mtd/partitions.h> | 38 | #include <linux/mtd/partitions.h> |
38 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
39 | #include <linux/list.h> | 40 | #include <linux/list.h> |
@@ -108,6 +109,7 @@ static unsigned int rptwear = 0; | |||
108 | static unsigned int overridesize = 0; | 109 | static unsigned int overridesize = 0; |
109 | static char *cache_file = NULL; | 110 | static char *cache_file = NULL; |
110 | static unsigned int bbt; | 111 | static unsigned int bbt; |
112 | static unsigned int bch; | ||
111 | 113 | ||
112 | module_param(first_id_byte, uint, 0400); | 114 | module_param(first_id_byte, uint, 0400); |
113 | module_param(second_id_byte, uint, 0400); | 115 | module_param(second_id_byte, uint, 0400); |
@@ -132,6 +134,7 @@ module_param(rptwear, uint, 0400); | |||
132 | module_param(overridesize, uint, 0400); | 134 | module_param(overridesize, uint, 0400); |
133 | module_param(cache_file, charp, 0400); | 135 | module_param(cache_file, charp, 0400); |
134 | module_param(bbt, uint, 0400); | 136 | module_param(bbt, uint, 0400); |
137 | module_param(bch, uint, 0400); | ||
135 | 138 | ||
136 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); | 139 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); |
137 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); | 140 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); |
@@ -165,6 +168,8 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I | |||
165 | " e.g. 5 means a size of 32 erase blocks"); | 168 | " e.g. 5 means a size of 32 erase blocks"); |
166 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); | 169 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); |
167 | MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); | 170 | MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); |
171 | MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " | ||
172 | "be correctable in 512-byte blocks"); | ||
168 | 173 | ||
169 | /* The largest possible page size */ | 174 | /* The largest possible page size */ |
170 | #define NS_LARGEST_PAGE_SIZE 4096 | 175 | #define NS_LARGEST_PAGE_SIZE 4096 |
@@ -2309,7 +2314,43 @@ static int __init ns_init_module(void) | |||
2309 | if ((retval = parse_gravepages()) != 0) | 2314 | if ((retval = parse_gravepages()) != 0) |
2310 | goto error; | 2315 | goto error; |
2311 | 2316 | ||
2312 | if ((retval = nand_scan(nsmtd, 1)) != 0) { | 2317 | retval = nand_scan_ident(nsmtd, 1, NULL); |
2318 | if (retval) { | ||
2319 | NS_ERR("cannot scan NAND Simulator device\n"); | ||
2320 | if (retval > 0) | ||
2321 | retval = -ENXIO; | ||
2322 | goto error; | ||
2323 | } | ||
2324 | |||
2325 | if (bch) { | ||
2326 | unsigned int eccsteps, eccbytes; | ||
2327 | if (!mtd_nand_has_bch()) { | ||
2328 | NS_ERR("BCH ECC support is disabled\n"); | ||
2329 | retval = -EINVAL; | ||
2330 | goto error; | ||
2331 | } | ||
2332 | /* use 512-byte ecc blocks */ | ||
2333 | eccsteps = nsmtd->writesize/512; | ||
2334 | eccbytes = (bch*13+7)/8; | ||
2335 | /* do not bother supporting small page devices */ | ||
2336 | if ((nsmtd->oobsize < 64) || !eccsteps) { | ||
2337 | NS_ERR("bch not available on small page devices\n"); | ||
2338 | retval = -EINVAL; | ||
2339 | goto error; | ||
2340 | } | ||
2341 | if ((eccbytes*eccsteps+2) > nsmtd->oobsize) { | ||
2342 | NS_ERR("invalid bch value %u\n", bch); | ||
2343 | retval = -EINVAL; | ||
2344 | goto error; | ||
2345 | } | ||
2346 | chip->ecc.mode = NAND_ECC_SOFT_BCH; | ||
2347 | chip->ecc.size = 512; | ||
2348 | chip->ecc.bytes = eccbytes; | ||
2349 | NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); | ||
2350 | } | ||
2351 | |||
2352 | retval = nand_scan_tail(nsmtd); | ||
2353 | if (retval) { | ||
2313 | NS_ERR("can't register NAND Simulator\n"); | 2354 | NS_ERR("can't register NAND Simulator\n"); |
2314 | if (retval > 0) | 2355 | if (retval > 0) |
2315 | retval = -ENXIO; | 2356 | retval = -ENXIO; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 7b8f1fffc52..da9a351c9d7 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -668,6 +668,8 @@ static void gen_true_ecc(u8 *ecc_buf) | |||
668 | * | 668 | * |
669 | * This function compares two ECC's and indicates if there is an error. | 669 | * This function compares two ECC's and indicates if there is an error. |
670 | * If the error can be corrected it will be corrected to the buffer. | 670 | * If the error can be corrected it will be corrected to the buffer. |
671 | * If there is no error, %0 is returned. If there is an error but it | ||
672 | * was corrected, %1 is returned. Otherwise, %-1 is returned. | ||
671 | */ | 673 | */ |
672 | static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | 674 | static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ |
673 | u8 *ecc_data2, /* read from register */ | 675 | u8 *ecc_data2, /* read from register */ |
@@ -773,7 +775,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
773 | 775 | ||
774 | page_data[find_byte] ^= (1 << find_bit); | 776 | page_data[find_byte] ^= (1 << find_bit); |
775 | 777 | ||
776 | return 0; | 778 | return 1; |
777 | default: | 779 | default: |
778 | if (isEccFF) { | 780 | if (isEccFF) { |
779 | if (ecc_data2[0] == 0 && | 781 | if (ecc_data2[0] == 0 && |
@@ -794,8 +796,11 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
794 | * @calc_ecc: ecc read from HW ECC registers | 796 | * @calc_ecc: ecc read from HW ECC registers |
795 | * | 797 | * |
796 | * Compares the ecc read from nand spare area with ECC registers values | 798 | * Compares the ecc read from nand spare area with ECC registers values |
797 | * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection | 799 | * and if ECC's mismatched, it will call 'omap_compare_ecc' for error |
798 | * and correction. | 800 | * detection and correction. If there are no errors, %0 is returned. If |
801 | * there were errors and all of the errors were corrected, the number of | ||
802 | * corrected errors is returned. If uncorrectable errors exist, %-1 is | ||
803 | * returned. | ||
799 | */ | 804 | */ |
800 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | 805 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, |
801 | u_char *read_ecc, u_char *calc_ecc) | 806 | u_char *read_ecc, u_char *calc_ecc) |
@@ -803,6 +808,7 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | |||
803 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 808 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
804 | mtd); | 809 | mtd); |
805 | int blockCnt = 0, i = 0, ret = 0; | 810 | int blockCnt = 0, i = 0, ret = 0; |
811 | int stat = 0; | ||
806 | 812 | ||
807 | /* Ex NAND_ECC_HW12_2048 */ | 813 | /* Ex NAND_ECC_HW12_2048 */ |
808 | if ((info->nand.ecc.mode == NAND_ECC_HW) && | 814 | if ((info->nand.ecc.mode == NAND_ECC_HW) && |
@@ -816,12 +822,14 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | |||
816 | ret = omap_compare_ecc(read_ecc, calc_ecc, dat); | 822 | ret = omap_compare_ecc(read_ecc, calc_ecc, dat); |
817 | if (ret < 0) | 823 | if (ret < 0) |
818 | return ret; | 824 | return ret; |
825 | /* keep track of the number of corrected errors */ | ||
826 | stat += ret; | ||
819 | } | 827 | } |
820 | read_ecc += 3; | 828 | read_ecc += 3; |
821 | calc_ecc += 3; | 829 | calc_ecc += 3; |
822 | dat += 512; | 830 | dat += 512; |
823 | } | 831 | } |
824 | return 0; | 832 | return stat; |
825 | } | 833 | } |
826 | 834 | ||
827 | /** | 835 | /** |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index ea2c288df3f..ab7f4c33ced 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <plat/pxa3xx_nand.h> | 27 | #include <plat/pxa3xx_nand.h> |
28 | 28 | ||
29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) | 29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) |
30 | #define NAND_STOP_DELAY (2 * HZ/50) | ||
31 | #define PAGE_CHUNK_SIZE (2048) | ||
30 | 32 | ||
31 | /* registers and bit definitions */ | 33 | /* registers and bit definitions */ |
32 | #define NDCR (0x00) /* Control register */ | 34 | #define NDCR (0x00) /* Control register */ |
@@ -52,16 +54,18 @@ | |||
52 | #define NDCR_ND_MODE (0x3 << 21) | 54 | #define NDCR_ND_MODE (0x3 << 21) |
53 | #define NDCR_NAND_MODE (0x0) | 55 | #define NDCR_NAND_MODE (0x0) |
54 | #define NDCR_CLR_PG_CNT (0x1 << 20) | 56 | #define NDCR_CLR_PG_CNT (0x1 << 20) |
55 | #define NDCR_CLR_ECC (0x1 << 19) | 57 | #define NDCR_STOP_ON_UNCOR (0x1 << 19) |
56 | #define NDCR_RD_ID_CNT_MASK (0x7 << 16) | 58 | #define NDCR_RD_ID_CNT_MASK (0x7 << 16) |
57 | #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) | 59 | #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) |
58 | 60 | ||
59 | #define NDCR_RA_START (0x1 << 15) | 61 | #define NDCR_RA_START (0x1 << 15) |
60 | #define NDCR_PG_PER_BLK (0x1 << 14) | 62 | #define NDCR_PG_PER_BLK (0x1 << 14) |
61 | #define NDCR_ND_ARB_EN (0x1 << 12) | 63 | #define NDCR_ND_ARB_EN (0x1 << 12) |
64 | #define NDCR_INT_MASK (0xFFF) | ||
62 | 65 | ||
63 | #define NDSR_MASK (0xfff) | 66 | #define NDSR_MASK (0xfff) |
64 | #define NDSR_RDY (0x1 << 11) | 67 | #define NDSR_RDY (0x1 << 12) |
68 | #define NDSR_FLASH_RDY (0x1 << 11) | ||
65 | #define NDSR_CS0_PAGED (0x1 << 10) | 69 | #define NDSR_CS0_PAGED (0x1 << 10) |
66 | #define NDSR_CS1_PAGED (0x1 << 9) | 70 | #define NDSR_CS1_PAGED (0x1 << 9) |
67 | #define NDSR_CS0_CMDD (0x1 << 8) | 71 | #define NDSR_CS0_CMDD (0x1 << 8) |
@@ -74,6 +78,7 @@ | |||
74 | #define NDSR_RDDREQ (0x1 << 1) | 78 | #define NDSR_RDDREQ (0x1 << 1) |
75 | #define NDSR_WRCMDREQ (0x1) | 79 | #define NDSR_WRCMDREQ (0x1) |
76 | 80 | ||
81 | #define NDCB0_ST_ROW_EN (0x1 << 26) | ||
77 | #define NDCB0_AUTO_RS (0x1 << 25) | 82 | #define NDCB0_AUTO_RS (0x1 << 25) |
78 | #define NDCB0_CSEL (0x1 << 24) | 83 | #define NDCB0_CSEL (0x1 << 24) |
79 | #define NDCB0_CMD_TYPE_MASK (0x7 << 21) | 84 | #define NDCB0_CMD_TYPE_MASK (0x7 << 21) |
@@ -104,18 +109,21 @@ enum { | |||
104 | }; | 109 | }; |
105 | 110 | ||
106 | enum { | 111 | enum { |
107 | STATE_READY = 0, | 112 | STATE_IDLE = 0, |
108 | STATE_CMD_HANDLE, | 113 | STATE_CMD_HANDLE, |
109 | STATE_DMA_READING, | 114 | STATE_DMA_READING, |
110 | STATE_DMA_WRITING, | 115 | STATE_DMA_WRITING, |
111 | STATE_DMA_DONE, | 116 | STATE_DMA_DONE, |
112 | STATE_PIO_READING, | 117 | STATE_PIO_READING, |
113 | STATE_PIO_WRITING, | 118 | STATE_PIO_WRITING, |
119 | STATE_CMD_DONE, | ||
120 | STATE_READY, | ||
114 | }; | 121 | }; |
115 | 122 | ||
116 | struct pxa3xx_nand_info { | 123 | struct pxa3xx_nand_info { |
117 | struct nand_chip nand_chip; | 124 | struct nand_chip nand_chip; |
118 | 125 | ||
126 | struct nand_hw_control controller; | ||
119 | struct platform_device *pdev; | 127 | struct platform_device *pdev; |
120 | struct pxa3xx_nand_cmdset *cmdset; | 128 | struct pxa3xx_nand_cmdset *cmdset; |
121 | 129 | ||
@@ -126,6 +134,7 @@ struct pxa3xx_nand_info { | |||
126 | unsigned int buf_start; | 134 | unsigned int buf_start; |
127 | unsigned int buf_count; | 135 | unsigned int buf_count; |
128 | 136 | ||
137 | struct mtd_info *mtd; | ||
129 | /* DMA information */ | 138 | /* DMA information */ |
130 | int drcmr_dat; | 139 | int drcmr_dat; |
131 | int drcmr_cmd; | 140 | int drcmr_cmd; |
@@ -149,6 +158,7 @@ struct pxa3xx_nand_info { | |||
149 | 158 | ||
150 | int use_ecc; /* use HW ECC ? */ | 159 | int use_ecc; /* use HW ECC ? */ |
151 | int use_dma; /* use DMA ? */ | 160 | int use_dma; /* use DMA ? */ |
161 | int is_ready; | ||
152 | 162 | ||
153 | unsigned int page_size; /* page size of attached chip */ | 163 | unsigned int page_size; /* page size of attached chip */ |
154 | unsigned int data_size; /* data size in FIFO */ | 164 | unsigned int data_size; /* data size in FIFO */ |
@@ -201,20 +211,22 @@ static struct pxa3xx_nand_timing timing[] = { | |||
201 | }; | 211 | }; |
202 | 212 | ||
203 | static struct pxa3xx_nand_flash builtin_flash_types[] = { | 213 | static struct pxa3xx_nand_flash builtin_flash_types[] = { |
204 | { 0, 0, 2048, 8, 8, 0, &default_cmdset, &timing[0] }, | 214 | { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] }, |
205 | { 0x46ec, 32, 512, 16, 16, 4096, &default_cmdset, &timing[1] }, | 215 | { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] }, |
206 | { 0xdaec, 64, 2048, 8, 8, 2048, &default_cmdset, &timing[1] }, | 216 | { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] }, |
207 | { 0xd7ec, 128, 4096, 8, 8, 8192, &default_cmdset, &timing[1] }, | 217 | { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] }, |
208 | { 0xa12c, 64, 2048, 8, 8, 1024, &default_cmdset, &timing[2] }, | 218 | { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] }, |
209 | { 0xb12c, 64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] }, | 219 | { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] }, |
210 | { 0xdc2c, 64, 2048, 8, 8, 4096, &default_cmdset, &timing[2] }, | 220 | { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] }, |
211 | { 0xcc2c, 64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] }, | 221 | { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] }, |
212 | { 0xba20, 64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] }, | 222 | { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] }, |
213 | }; | 223 | }; |
214 | 224 | ||
215 | /* Define a default flash type setting serve as flash detecting only */ | 225 | /* Define a default flash type setting serve as flash detecting only */ |
216 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) | 226 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) |
217 | 227 | ||
228 | const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; | ||
229 | |||
218 | #define NDTR0_tCH(c) (min((c), 7) << 19) | 230 | #define NDTR0_tCH(c) (min((c), 7) << 19) |
219 | #define NDTR0_tCS(c) (min((c), 7) << 16) | 231 | #define NDTR0_tCS(c) (min((c), 7) << 16) |
220 | #define NDTR0_tWH(c) (min((c), 7) << 11) | 232 | #define NDTR0_tWH(c) (min((c), 7) << 11) |
@@ -252,25 +264,6 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, | |||
252 | nand_writel(info, NDTR1CS0, ndtr1); | 264 | nand_writel(info, NDTR1CS0, ndtr1); |
253 | } | 265 | } |
254 | 266 | ||
255 | #define WAIT_EVENT_TIMEOUT 10 | ||
256 | |||
257 | static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event) | ||
258 | { | ||
259 | int timeout = WAIT_EVENT_TIMEOUT; | ||
260 | uint32_t ndsr; | ||
261 | |||
262 | while (timeout--) { | ||
263 | ndsr = nand_readl(info, NDSR) & NDSR_MASK; | ||
264 | if (ndsr & event) { | ||
265 | nand_writel(info, NDSR, ndsr); | ||
266 | return 0; | ||
267 | } | ||
268 | udelay(10); | ||
269 | } | ||
270 | |||
271 | return -ETIMEDOUT; | ||
272 | } | ||
273 | |||
274 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | 267 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) |
275 | { | 268 | { |
276 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; | 269 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; |
@@ -291,69 +284,45 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | |||
291 | } | 284 | } |
292 | } | 285 | } |
293 | 286 | ||
294 | static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, | 287 | /** |
295 | uint16_t cmd, int column, int page_addr) | 288 | * NOTE: it is a must to set ND_RUN firstly, then write |
289 | * command buffer, otherwise, it does not work. | ||
290 | * We enable all the interrupt at the same time, and | ||
291 | * let pxa3xx_nand_irq to handle all logic. | ||
292 | */ | ||
293 | static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) | ||
296 | { | 294 | { |
297 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 295 | uint32_t ndcr; |
298 | pxa3xx_set_datasize(info); | ||
299 | |||
300 | /* generate values for NDCBx registers */ | ||
301 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | ||
302 | info->ndcb1 = 0; | ||
303 | info->ndcb2 = 0; | ||
304 | info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles); | ||
305 | |||
306 | if (info->col_addr_cycles == 2) { | ||
307 | /* large block, 2 cycles for column address | ||
308 | * row address starts from 3rd cycle | ||
309 | */ | ||
310 | info->ndcb1 |= page_addr << 16; | ||
311 | if (info->row_addr_cycles == 3) | ||
312 | info->ndcb2 = (page_addr >> 16) & 0xff; | ||
313 | } else | ||
314 | /* small block, 1 cycles for column address | ||
315 | * row address starts from 2nd cycle | ||
316 | */ | ||
317 | info->ndcb1 = page_addr << 8; | ||
318 | |||
319 | if (cmd == cmdset->program) | ||
320 | info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; | ||
321 | 296 | ||
322 | return 0; | 297 | ndcr = info->reg_ndcr; |
323 | } | 298 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; |
299 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | ||
300 | ndcr |= NDCR_ND_RUN; | ||
324 | 301 | ||
325 | static int prepare_erase_cmd(struct pxa3xx_nand_info *info, | 302 | /* clear status bits and run */ |
326 | uint16_t cmd, int page_addr) | 303 | nand_writel(info, NDCR, 0); |
327 | { | 304 | nand_writel(info, NDSR, NDSR_MASK); |
328 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | 305 | nand_writel(info, NDCR, ndcr); |
329 | info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3); | ||
330 | info->ndcb1 = page_addr; | ||
331 | info->ndcb2 = 0; | ||
332 | return 0; | ||
333 | } | 306 | } |
334 | 307 | ||
335 | static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd) | 308 | static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) |
336 | { | 309 | { |
337 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 310 | uint32_t ndcr; |
338 | 311 | int timeout = NAND_STOP_DELAY; | |
339 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | ||
340 | info->ndcb1 = 0; | ||
341 | info->ndcb2 = 0; | ||
342 | 312 | ||
343 | info->oob_size = 0; | 313 | /* wait RUN bit in NDCR become 0 */ |
344 | if (cmd == cmdset->read_id) { | 314 | ndcr = nand_readl(info, NDCR); |
345 | info->ndcb0 |= NDCB0_CMD_TYPE(3); | 315 | while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) { |
346 | info->data_size = 8; | 316 | ndcr = nand_readl(info, NDCR); |
347 | } else if (cmd == cmdset->read_status) { | 317 | udelay(1); |
348 | info->ndcb0 |= NDCB0_CMD_TYPE(4); | 318 | } |
349 | info->data_size = 8; | ||
350 | } else if (cmd == cmdset->reset || cmd == cmdset->lock || | ||
351 | cmd == cmdset->unlock) { | ||
352 | info->ndcb0 |= NDCB0_CMD_TYPE(5); | ||
353 | } else | ||
354 | return -EINVAL; | ||
355 | 319 | ||
356 | return 0; | 320 | if (timeout <= 0) { |
321 | ndcr &= ~NDCR_ND_RUN; | ||
322 | nand_writel(info, NDCR, ndcr); | ||
323 | } | ||
324 | /* clear status bits */ | ||
325 | nand_writel(info, NDSR, NDSR_MASK); | ||
357 | } | 326 | } |
358 | 327 | ||
359 | static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | 328 | static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) |
@@ -372,39 +341,8 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
372 | nand_writel(info, NDCR, ndcr | int_mask); | 341 | nand_writel(info, NDCR, ndcr | int_mask); |
373 | } | 342 | } |
374 | 343 | ||
375 | /* NOTE: it is a must to set ND_RUN firstly, then write command buffer | 344 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
376 | * otherwise, it does not work | ||
377 | */ | ||
378 | static int write_cmd(struct pxa3xx_nand_info *info) | ||
379 | { | 345 | { |
380 | uint32_t ndcr; | ||
381 | |||
382 | /* clear status bits and run */ | ||
383 | nand_writel(info, NDSR, NDSR_MASK); | ||
384 | |||
385 | ndcr = info->reg_ndcr; | ||
386 | |||
387 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; | ||
388 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | ||
389 | ndcr |= NDCR_ND_RUN; | ||
390 | |||
391 | nand_writel(info, NDCR, ndcr); | ||
392 | |||
393 | if (wait_for_event(info, NDSR_WRCMDREQ)) { | ||
394 | printk(KERN_ERR "timed out writing command\n"); | ||
395 | return -ETIMEDOUT; | ||
396 | } | ||
397 | |||
398 | nand_writel(info, NDCB0, info->ndcb0); | ||
399 | nand_writel(info, NDCB0, info->ndcb1); | ||
400 | nand_writel(info, NDCB0, info->ndcb2); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int handle_data_pio(struct pxa3xx_nand_info *info) | ||
405 | { | ||
406 | int ret, timeout = CHIP_DELAY_TIMEOUT; | ||
407 | |||
408 | switch (info->state) { | 346 | switch (info->state) { |
409 | case STATE_PIO_WRITING: | 347 | case STATE_PIO_WRITING: |
410 | __raw_writesl(info->mmio_base + NDDB, info->data_buff, | 348 | __raw_writesl(info->mmio_base + NDDB, info->data_buff, |
@@ -412,14 +350,6 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) | |||
412 | if (info->oob_size > 0) | 350 | if (info->oob_size > 0) |
413 | __raw_writesl(info->mmio_base + NDDB, info->oob_buff, | 351 | __raw_writesl(info->mmio_base + NDDB, info->oob_buff, |
414 | DIV_ROUND_UP(info->oob_size, 4)); | 352 | DIV_ROUND_UP(info->oob_size, 4)); |
415 | |||
416 | enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
417 | |||
418 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
419 | if (!ret) { | ||
420 | printk(KERN_ERR "program command time out\n"); | ||
421 | return -1; | ||
422 | } | ||
423 | break; | 353 | break; |
424 | case STATE_PIO_READING: | 354 | case STATE_PIO_READING: |
425 | __raw_readsl(info->mmio_base + NDDB, info->data_buff, | 355 | __raw_readsl(info->mmio_base + NDDB, info->data_buff, |
@@ -431,14 +361,11 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) | |||
431 | default: | 361 | default: |
432 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | 362 | printk(KERN_ERR "%s: invalid state %d\n", __func__, |
433 | info->state); | 363 | info->state); |
434 | return -EINVAL; | 364 | BUG(); |
435 | } | 365 | } |
436 | |||
437 | info->state = STATE_READY; | ||
438 | return 0; | ||
439 | } | 366 | } |
440 | 367 | ||
441 | static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) | 368 | static void start_data_dma(struct pxa3xx_nand_info *info) |
442 | { | 369 | { |
443 | struct pxa_dma_desc *desc = info->data_desc; | 370 | struct pxa_dma_desc *desc = info->data_desc; |
444 | int dma_len = ALIGN(info->data_size + info->oob_size, 32); | 371 | int dma_len = ALIGN(info->data_size + info->oob_size, 32); |
@@ -446,14 +373,21 @@ static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) | |||
446 | desc->ddadr = DDADR_STOP; | 373 | desc->ddadr = DDADR_STOP; |
447 | desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; | 374 | desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; |
448 | 375 | ||
449 | if (dir_out) { | 376 | switch (info->state) { |
377 | case STATE_DMA_WRITING: | ||
450 | desc->dsadr = info->data_buff_phys; | 378 | desc->dsadr = info->data_buff_phys; |
451 | desc->dtadr = info->mmio_phys + NDDB; | 379 | desc->dtadr = info->mmio_phys + NDDB; |
452 | desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; | 380 | desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; |
453 | } else { | 381 | break; |
382 | case STATE_DMA_READING: | ||
454 | desc->dtadr = info->data_buff_phys; | 383 | desc->dtadr = info->data_buff_phys; |
455 | desc->dsadr = info->mmio_phys + NDDB; | 384 | desc->dsadr = info->mmio_phys + NDDB; |
456 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; | 385 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; |
386 | break; | ||
387 | default: | ||
388 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | ||
389 | info->state); | ||
390 | BUG(); | ||
457 | } | 391 | } |
458 | 392 | ||
459 | DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; | 393 | DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; |
@@ -471,93 +405,62 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data) | |||
471 | 405 | ||
472 | if (dcsr & DCSR_BUSERR) { | 406 | if (dcsr & DCSR_BUSERR) { |
473 | info->retcode = ERR_DMABUSERR; | 407 | info->retcode = ERR_DMABUSERR; |
474 | complete(&info->cmd_complete); | ||
475 | } | 408 | } |
476 | 409 | ||
477 | if (info->state == STATE_DMA_WRITING) { | 410 | info->state = STATE_DMA_DONE; |
478 | info->state = STATE_DMA_DONE; | 411 | enable_int(info, NDCR_INT_MASK); |
479 | enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | 412 | nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); |
480 | } else { | ||
481 | info->state = STATE_READY; | ||
482 | complete(&info->cmd_complete); | ||
483 | } | ||
484 | } | 413 | } |
485 | 414 | ||
486 | static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | 415 | static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) |
487 | { | 416 | { |
488 | struct pxa3xx_nand_info *info = devid; | 417 | struct pxa3xx_nand_info *info = devid; |
489 | unsigned int status; | 418 | unsigned int status, is_completed = 0; |
490 | 419 | ||
491 | status = nand_readl(info, NDSR); | 420 | status = nand_readl(info, NDSR); |
492 | 421 | ||
493 | if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) { | 422 | if (status & NDSR_DBERR) |
494 | if (status & NDSR_DBERR) | 423 | info->retcode = ERR_DBERR; |
495 | info->retcode = ERR_DBERR; | 424 | if (status & NDSR_SBERR) |
496 | else if (status & NDSR_SBERR) | 425 | info->retcode = ERR_SBERR; |
497 | info->retcode = ERR_SBERR; | 426 | if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) { |
498 | 427 | /* whether use dma to transfer data */ | |
499 | disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | ||
500 | |||
501 | if (info->use_dma) { | ||
502 | info->state = STATE_DMA_READING; | ||
503 | start_data_dma(info, 0); | ||
504 | } else { | ||
505 | info->state = STATE_PIO_READING; | ||
506 | complete(&info->cmd_complete); | ||
507 | } | ||
508 | } else if (status & NDSR_WRDREQ) { | ||
509 | disable_int(info, NDSR_WRDREQ); | ||
510 | if (info->use_dma) { | 428 | if (info->use_dma) { |
511 | info->state = STATE_DMA_WRITING; | 429 | disable_int(info, NDCR_INT_MASK); |
512 | start_data_dma(info, 1); | 430 | info->state = (status & NDSR_RDDREQ) ? |
431 | STATE_DMA_READING : STATE_DMA_WRITING; | ||
432 | start_data_dma(info); | ||
433 | goto NORMAL_IRQ_EXIT; | ||
513 | } else { | 434 | } else { |
514 | info->state = STATE_PIO_WRITING; | 435 | info->state = (status & NDSR_RDDREQ) ? |
515 | complete(&info->cmd_complete); | 436 | STATE_PIO_READING : STATE_PIO_WRITING; |
437 | handle_data_pio(info); | ||
516 | } | 438 | } |
517 | } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) { | ||
518 | if (status & NDSR_CS0_BBD) | ||
519 | info->retcode = ERR_BBERR; | ||
520 | |||
521 | disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
522 | info->state = STATE_READY; | ||
523 | complete(&info->cmd_complete); | ||
524 | } | 439 | } |
525 | nand_writel(info, NDSR, status); | 440 | if (status & NDSR_CS0_CMDD) { |
526 | return IRQ_HANDLED; | 441 | info->state = STATE_CMD_DONE; |
527 | } | 442 | is_completed = 1; |
528 | |||
529 | static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event) | ||
530 | { | ||
531 | uint32_t ndcr; | ||
532 | int ret, timeout = CHIP_DELAY_TIMEOUT; | ||
533 | |||
534 | if (write_cmd(info)) { | ||
535 | info->retcode = ERR_SENDCMD; | ||
536 | goto fail_stop; | ||
537 | } | 443 | } |
538 | 444 | if (status & NDSR_FLASH_RDY) { | |
539 | info->state = STATE_CMD_HANDLE; | 445 | info->is_ready = 1; |
540 | 446 | info->state = STATE_READY; | |
541 | enable_int(info, event); | ||
542 | |||
543 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
544 | if (!ret) { | ||
545 | printk(KERN_ERR "command execution timed out\n"); | ||
546 | info->retcode = ERR_SENDCMD; | ||
547 | goto fail_stop; | ||
548 | } | 447 | } |
549 | 448 | ||
550 | if (info->use_dma == 0 && info->data_size > 0) | 449 | if (status & NDSR_WRCMDREQ) { |
551 | if (handle_data_pio(info)) | 450 | nand_writel(info, NDSR, NDSR_WRCMDREQ); |
552 | goto fail_stop; | 451 | status &= ~NDSR_WRCMDREQ; |
553 | 452 | info->state = STATE_CMD_HANDLE; | |
554 | return 0; | 453 | nand_writel(info, NDCB0, info->ndcb0); |
454 | nand_writel(info, NDCB0, info->ndcb1); | ||
455 | nand_writel(info, NDCB0, info->ndcb2); | ||
456 | } | ||
555 | 457 | ||
556 | fail_stop: | 458 | /* clear NDSR to let the controller exit the IRQ */ |
557 | ndcr = nand_readl(info, NDCR); | 459 | nand_writel(info, NDSR, status); |
558 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | 460 | if (is_completed) |
559 | udelay(10); | 461 | complete(&info->cmd_complete); |
560 | return -ETIMEDOUT; | 462 | NORMAL_IRQ_EXIT: |
463 | return IRQ_HANDLED; | ||
561 | } | 464 | } |
562 | 465 | ||
563 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) | 466 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) |
@@ -574,125 +477,218 @@ static inline int is_buf_blank(uint8_t *buf, size_t len) | |||
574 | return 1; | 477 | return 1; |
575 | } | 478 | } |
576 | 479 | ||
577 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | 480 | static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, |
578 | int column, int page_addr) | 481 | uint16_t column, int page_addr) |
579 | { | 482 | { |
580 | struct pxa3xx_nand_info *info = mtd->priv; | 483 | uint16_t cmd; |
581 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 484 | int addr_cycle, exec_cmd, ndcb0; |
582 | int ret; | 485 | struct mtd_info *mtd = info->mtd; |
486 | |||
487 | ndcb0 = 0; | ||
488 | addr_cycle = 0; | ||
489 | exec_cmd = 1; | ||
490 | |||
491 | /* reset data and oob column point to handle data */ | ||
492 | info->buf_start = 0; | ||
493 | info->buf_count = 0; | ||
494 | info->oob_size = 0; | ||
495 | info->use_ecc = 0; | ||
496 | info->is_ready = 0; | ||
497 | info->retcode = ERR_NONE; | ||
583 | 498 | ||
584 | info->use_dma = (use_dma) ? 1 : 0; | 499 | switch (command) { |
585 | info->use_ecc = 0; | 500 | case NAND_CMD_READ0: |
586 | info->data_size = 0; | 501 | case NAND_CMD_PAGEPROG: |
587 | info->state = STATE_READY; | 502 | info->use_ecc = 1; |
503 | case NAND_CMD_READOOB: | ||
504 | pxa3xx_set_datasize(info); | ||
505 | break; | ||
506 | case NAND_CMD_SEQIN: | ||
507 | exec_cmd = 0; | ||
508 | break; | ||
509 | default: | ||
510 | info->ndcb1 = 0; | ||
511 | info->ndcb2 = 0; | ||
512 | break; | ||
513 | } | ||
588 | 514 | ||
589 | init_completion(&info->cmd_complete); | 515 | info->ndcb0 = ndcb0; |
516 | addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles | ||
517 | + info->col_addr_cycles); | ||
590 | 518 | ||
591 | switch (command) { | 519 | switch (command) { |
592 | case NAND_CMD_READOOB: | 520 | case NAND_CMD_READOOB: |
593 | /* disable HW ECC to get all the OOB data */ | 521 | case NAND_CMD_READ0: |
594 | info->buf_count = mtd->writesize + mtd->oobsize; | 522 | cmd = info->cmdset->read1; |
595 | info->buf_start = mtd->writesize + column; | 523 | if (command == NAND_CMD_READOOB) |
596 | memset(info->data_buff, 0xFF, info->buf_count); | 524 | info->buf_start = mtd->writesize + column; |
525 | else | ||
526 | info->buf_start = column; | ||
597 | 527 | ||
598 | if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) | 528 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) |
599 | break; | 529 | info->ndcb0 |= NDCB0_CMD_TYPE(0) |
530 | | addr_cycle | ||
531 | | (cmd & NDCB0_CMD1_MASK); | ||
532 | else | ||
533 | info->ndcb0 |= NDCB0_CMD_TYPE(0) | ||
534 | | NDCB0_DBC | ||
535 | | addr_cycle | ||
536 | | cmd; | ||
600 | 537 | ||
601 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | 538 | case NAND_CMD_SEQIN: |
539 | /* small page addr setting */ | ||
540 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { | ||
541 | info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | ||
542 | | (column & 0xFF); | ||
602 | 543 | ||
603 | /* We only are OOB, so if the data has error, does not matter */ | 544 | info->ndcb2 = 0; |
604 | if (info->retcode == ERR_DBERR) | 545 | } else { |
605 | info->retcode = ERR_NONE; | 546 | info->ndcb1 = ((page_addr & 0xFFFF) << 16) |
606 | break; | 547 | | (column & 0xFFFF); |
548 | |||
549 | if (page_addr & 0xFF0000) | ||
550 | info->ndcb2 = (page_addr & 0xFF0000) >> 16; | ||
551 | else | ||
552 | info->ndcb2 = 0; | ||
553 | } | ||
607 | 554 | ||
608 | case NAND_CMD_READ0: | ||
609 | info->use_ecc = 1; | ||
610 | info->retcode = ERR_NONE; | ||
611 | info->buf_start = column; | ||
612 | info->buf_count = mtd->writesize + mtd->oobsize; | 555 | info->buf_count = mtd->writesize + mtd->oobsize; |
613 | memset(info->data_buff, 0xFF, info->buf_count); | 556 | memset(info->data_buff, 0xFF, info->buf_count); |
614 | 557 | ||
615 | if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) | 558 | break; |
559 | |||
560 | case NAND_CMD_PAGEPROG: | ||
561 | if (is_buf_blank(info->data_buff, | ||
562 | (mtd->writesize + mtd->oobsize))) { | ||
563 | exec_cmd = 0; | ||
616 | break; | 564 | break; |
565 | } | ||
617 | 566 | ||
618 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | 567 | cmd = info->cmdset->program; |
568 | info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | ||
569 | | NDCB0_AUTO_RS | ||
570 | | NDCB0_ST_ROW_EN | ||
571 | | NDCB0_DBC | ||
572 | | cmd | ||
573 | | addr_cycle; | ||
574 | break; | ||
619 | 575 | ||
620 | if (info->retcode == ERR_DBERR) { | 576 | case NAND_CMD_READID: |
621 | /* for blank page (all 0xff), HW will calculate its ECC as | 577 | cmd = info->cmdset->read_id; |
622 | * 0, which is different from the ECC information within | 578 | info->buf_count = info->read_id_bytes; |
623 | * OOB, ignore such double bit errors | 579 | info->ndcb0 |= NDCB0_CMD_TYPE(3) |
624 | */ | 580 | | NDCB0_ADDR_CYC(1) |
625 | if (is_buf_blank(info->data_buff, mtd->writesize)) | 581 | | cmd; |
626 | info->retcode = ERR_NONE; | 582 | |
627 | } | 583 | info->data_size = 8; |
628 | break; | 584 | break; |
629 | case NAND_CMD_SEQIN: | 585 | case NAND_CMD_STATUS: |
630 | info->buf_start = column; | 586 | cmd = info->cmdset->read_status; |
631 | info->buf_count = mtd->writesize + mtd->oobsize; | 587 | info->buf_count = 1; |
632 | memset(info->data_buff, 0xff, info->buf_count); | 588 | info->ndcb0 |= NDCB0_CMD_TYPE(4) |
589 | | NDCB0_ADDR_CYC(1) | ||
590 | | cmd; | ||
633 | 591 | ||
634 | /* save column/page_addr for next CMD_PAGEPROG */ | 592 | info->data_size = 8; |
635 | info->seqin_column = column; | ||
636 | info->seqin_page_addr = page_addr; | ||
637 | break; | 593 | break; |
638 | case NAND_CMD_PAGEPROG: | ||
639 | info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1; | ||
640 | 594 | ||
641 | if (prepare_read_prog_cmd(info, cmdset->program, | 595 | case NAND_CMD_ERASE1: |
642 | info->seqin_column, info->seqin_page_addr)) | 596 | cmd = info->cmdset->erase; |
643 | break; | 597 | info->ndcb0 |= NDCB0_CMD_TYPE(2) |
598 | | NDCB0_AUTO_RS | ||
599 | | NDCB0_ADDR_CYC(3) | ||
600 | | NDCB0_DBC | ||
601 | | cmd; | ||
602 | info->ndcb1 = page_addr; | ||
603 | info->ndcb2 = 0; | ||
644 | 604 | ||
645 | pxa3xx_nand_do_cmd(info, NDSR_WRDREQ); | ||
646 | break; | 605 | break; |
647 | case NAND_CMD_ERASE1: | 606 | case NAND_CMD_RESET: |
648 | if (prepare_erase_cmd(info, cmdset->erase, page_addr)) | 607 | cmd = info->cmdset->reset; |
649 | break; | 608 | info->ndcb0 |= NDCB0_CMD_TYPE(5) |
609 | | cmd; | ||
650 | 610 | ||
651 | pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
652 | break; | 611 | break; |
612 | |||
653 | case NAND_CMD_ERASE2: | 613 | case NAND_CMD_ERASE2: |
614 | exec_cmd = 0; | ||
654 | break; | 615 | break; |
655 | case NAND_CMD_READID: | ||
656 | case NAND_CMD_STATUS: | ||
657 | info->use_dma = 0; /* force PIO read */ | ||
658 | info->buf_start = 0; | ||
659 | info->buf_count = (command == NAND_CMD_READID) ? | ||
660 | info->read_id_bytes : 1; | ||
661 | |||
662 | if (prepare_other_cmd(info, (command == NAND_CMD_READID) ? | ||
663 | cmdset->read_id : cmdset->read_status)) | ||
664 | break; | ||
665 | 616 | ||
666 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ); | 617 | default: |
618 | exec_cmd = 0; | ||
619 | printk(KERN_ERR "pxa3xx-nand: non-supported" | ||
620 | " command %x\n", command); | ||
667 | break; | 621 | break; |
668 | case NAND_CMD_RESET: | 622 | } |
669 | if (prepare_other_cmd(info, cmdset->reset)) | ||
670 | break; | ||
671 | 623 | ||
672 | ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD); | 624 | return exec_cmd; |
673 | if (ret == 0) { | 625 | } |
674 | int timeout = 2; | ||
675 | uint32_t ndcr; | ||
676 | 626 | ||
677 | while (timeout--) { | 627 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, |
678 | if (nand_readl(info, NDSR) & NDSR_RDY) | 628 | int column, int page_addr) |
679 | break; | 629 | { |
680 | msleep(10); | 630 | struct pxa3xx_nand_info *info = mtd->priv; |
681 | } | 631 | int ret, exec_cmd; |
682 | 632 | ||
683 | ndcr = nand_readl(info, NDCR); | 633 | /* |
684 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | 634 | * if this is a x16 device ,then convert the input |
635 | * "byte" address into a "word" address appropriate | ||
636 | * for indexing a word-oriented device | ||
637 | */ | ||
638 | if (info->reg_ndcr & NDCR_DWIDTH_M) | ||
639 | column /= 2; | ||
640 | |||
641 | exec_cmd = prepare_command_pool(info, command, column, page_addr); | ||
642 | if (exec_cmd) { | ||
643 | init_completion(&info->cmd_complete); | ||
644 | pxa3xx_nand_start(info); | ||
645 | |||
646 | ret = wait_for_completion_timeout(&info->cmd_complete, | ||
647 | CHIP_DELAY_TIMEOUT); | ||
648 | if (!ret) { | ||
649 | printk(KERN_ERR "Wait time out!!!\n"); | ||
650 | /* Stop State Machine for next command cycle */ | ||
651 | pxa3xx_nand_stop(info); | ||
685 | } | 652 | } |
686 | break; | 653 | info->state = STATE_IDLE; |
687 | default: | ||
688 | printk(KERN_ERR "non-supported command.\n"); | ||
689 | break; | ||
690 | } | 654 | } |
655 | } | ||
656 | |||
657 | static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, | ||
658 | struct nand_chip *chip, const uint8_t *buf) | ||
659 | { | ||
660 | chip->write_buf(mtd, buf, mtd->writesize); | ||
661 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
662 | } | ||
691 | 663 | ||
692 | if (info->retcode == ERR_DBERR) { | 664 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, |
693 | printk(KERN_ERR "double bit error @ page %08x\n", page_addr); | 665 | struct nand_chip *chip, uint8_t *buf, int page) |
694 | info->retcode = ERR_NONE; | 666 | { |
667 | struct pxa3xx_nand_info *info = mtd->priv; | ||
668 | |||
669 | chip->read_buf(mtd, buf, mtd->writesize); | ||
670 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
671 | |||
672 | if (info->retcode == ERR_SBERR) { | ||
673 | switch (info->use_ecc) { | ||
674 | case 1: | ||
675 | mtd->ecc_stats.corrected++; | ||
676 | break; | ||
677 | case 0: | ||
678 | default: | ||
679 | break; | ||
680 | } | ||
681 | } else if (info->retcode == ERR_DBERR) { | ||
682 | /* | ||
683 | * for blank page (all 0xff), HW will calculate its ECC as | ||
684 | * 0, which is different from the ECC information within | ||
685 | * OOB, ignore such double bit errors | ||
686 | */ | ||
687 | if (is_buf_blank(buf, mtd->writesize)) | ||
688 | mtd->ecc_stats.failed++; | ||
695 | } | 689 | } |
690 | |||
691 | return 0; | ||
696 | } | 692 | } |
697 | 693 | ||
698 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) | 694 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) |
@@ -769,73 +765,12 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) | |||
769 | return 0; | 765 | return 0; |
770 | } | 766 | } |
771 | 767 | ||
772 | static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode) | ||
773 | { | ||
774 | return; | ||
775 | } | ||
776 | |||
777 | static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd, | ||
778 | const uint8_t *dat, uint8_t *ecc_code) | ||
779 | { | ||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd, | ||
784 | uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) | ||
785 | { | ||
786 | struct pxa3xx_nand_info *info = mtd->priv; | ||
787 | /* | ||
788 | * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we | ||
789 | * consider it as a ecc error which will tell the caller the | ||
790 | * read fail We have distinguish all the errors, but the | ||
791 | * nand_read_ecc only check this function return value | ||
792 | * | ||
793 | * Corrected (single-bit) errors must also be noted. | ||
794 | */ | ||
795 | if (info->retcode == ERR_SBERR) | ||
796 | return 1; | ||
797 | else if (info->retcode != ERR_NONE) | ||
798 | return -1; | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) | ||
804 | { | ||
805 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | ||
806 | uint32_t ndcr; | ||
807 | uint8_t id_buff[8]; | ||
808 | |||
809 | if (prepare_other_cmd(info, cmdset->read_id)) { | ||
810 | printk(KERN_ERR "failed to prepare command\n"); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | |||
814 | /* Send command */ | ||
815 | if (write_cmd(info)) | ||
816 | goto fail_timeout; | ||
817 | |||
818 | /* Wait for CMDDM(command done successfully) */ | ||
819 | if (wait_for_event(info, NDSR_RDDREQ)) | ||
820 | goto fail_timeout; | ||
821 | |||
822 | __raw_readsl(info->mmio_base + NDDB, id_buff, 2); | ||
823 | *id = id_buff[0] | (id_buff[1] << 8); | ||
824 | return 0; | ||
825 | |||
826 | fail_timeout: | ||
827 | ndcr = nand_readl(info, NDCR); | ||
828 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | ||
829 | udelay(10); | ||
830 | return -ETIMEDOUT; | ||
831 | } | ||
832 | |||
833 | static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | 768 | static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, |
834 | const struct pxa3xx_nand_flash *f) | 769 | const struct pxa3xx_nand_flash *f) |
835 | { | 770 | { |
836 | struct platform_device *pdev = info->pdev; | 771 | struct platform_device *pdev = info->pdev; |
837 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; | 772 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
838 | uint32_t ndcr = 0x00000FFF; /* disable all interrupts */ | 773 | uint32_t ndcr = 0x0; /* enable all interrupts */ |
839 | 774 | ||
840 | if (f->page_size != 2048 && f->page_size != 512) | 775 | if (f->page_size != 2048 && f->page_size != 512) |
841 | return -EINVAL; | 776 | return -EINVAL; |
@@ -844,9 +779,8 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
844 | return -EINVAL; | 779 | return -EINVAL; |
845 | 780 | ||
846 | /* calculate flash information */ | 781 | /* calculate flash information */ |
847 | info->cmdset = f->cmdset; | 782 | info->cmdset = &default_cmdset; |
848 | info->page_size = f->page_size; | 783 | info->page_size = f->page_size; |
849 | info->oob_buff = info->data_buff + f->page_size; | ||
850 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; | 784 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; |
851 | 785 | ||
852 | /* calculate addressing information */ | 786 | /* calculate addressing information */ |
@@ -876,87 +810,18 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
876 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) | 810 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) |
877 | { | 811 | { |
878 | uint32_t ndcr = nand_readl(info, NDCR); | 812 | uint32_t ndcr = nand_readl(info, NDCR); |
879 | struct nand_flash_dev *type = NULL; | ||
880 | uint32_t id = -1, page_per_block, num_blocks; | ||
881 | int i; | ||
882 | |||
883 | page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32; | ||
884 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; | 813 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; |
885 | /* set info fields needed to __readid */ | 814 | /* set info fields needed to read id */ |
886 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; | 815 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; |
887 | info->reg_ndcr = ndcr; | 816 | info->reg_ndcr = ndcr; |
888 | info->cmdset = &default_cmdset; | 817 | info->cmdset = &default_cmdset; |
889 | 818 | ||
890 | if (__readid(info, &id)) | ||
891 | return -ENODEV; | ||
892 | |||
893 | /* Lookup the flash id */ | ||
894 | id = (id >> 8) & 0xff; /* device id is byte 2 */ | ||
895 | for (i = 0; nand_flash_ids[i].name != NULL; i++) { | ||
896 | if (id == nand_flash_ids[i].id) { | ||
897 | type = &nand_flash_ids[i]; | ||
898 | break; | ||
899 | } | ||
900 | } | ||
901 | |||
902 | if (!type) | ||
903 | return -ENODEV; | ||
904 | |||
905 | /* fill the missing flash information */ | ||
906 | i = __ffs(page_per_block * info->page_size); | ||
907 | num_blocks = type->chipsize << (20 - i); | ||
908 | |||
909 | /* calculate addressing information */ | ||
910 | info->col_addr_cycles = (info->page_size == 2048) ? 2 : 1; | ||
911 | |||
912 | if (num_blocks * page_per_block > 65536) | ||
913 | info->row_addr_cycles = 3; | ||
914 | else | ||
915 | info->row_addr_cycles = 2; | ||
916 | |||
917 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); | 819 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); |
918 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); | 820 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); |
919 | 821 | ||
920 | return 0; | 822 | return 0; |
921 | } | 823 | } |
922 | 824 | ||
923 | static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, | ||
924 | const struct pxa3xx_nand_platform_data *pdata) | ||
925 | { | ||
926 | const struct pxa3xx_nand_flash *f; | ||
927 | uint32_t id = -1; | ||
928 | int i; | ||
929 | |||
930 | if (pdata->keep_config) | ||
931 | if (pxa3xx_nand_detect_config(info) == 0) | ||
932 | return 0; | ||
933 | |||
934 | /* we use default timing to detect id */ | ||
935 | f = DEFAULT_FLASH_TYPE; | ||
936 | pxa3xx_nand_config_flash(info, f); | ||
937 | if (__readid(info, &id)) | ||
938 | goto fail_detect; | ||
939 | |||
940 | for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) { | ||
941 | /* we first choose the flash definition from platfrom */ | ||
942 | if (i < pdata->num_flash) | ||
943 | f = pdata->flash + i; | ||
944 | else | ||
945 | f = &builtin_flash_types[i - pdata->num_flash + 1]; | ||
946 | if (f->chip_id == id) { | ||
947 | dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id); | ||
948 | pxa3xx_nand_config_flash(info, f); | ||
949 | return 0; | ||
950 | } | ||
951 | } | ||
952 | |||
953 | dev_warn(&info->pdev->dev, | ||
954 | "failed to detect configured nand flash; found %04x instead of\n", | ||
955 | id); | ||
956 | fail_detect: | ||
957 | return -ENODEV; | ||
958 | } | ||
959 | |||
960 | /* the maximum possible buffer size for large page with OOB data | 825 | /* the maximum possible buffer size for large page with OOB data |
961 | * is: 2048 + 64 = 2112 bytes, allocate a page here for both the | 826 | * is: 2048 + 64 = 2112 bytes, allocate a page here for both the |
962 | * data buffer and the DMA descriptor | 827 | * data buffer and the DMA descriptor |
@@ -998,82 +863,144 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
998 | return 0; | 863 | return 0; |
999 | } | 864 | } |
1000 | 865 | ||
1001 | static struct nand_ecclayout hw_smallpage_ecclayout = { | 866 | static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) |
1002 | .eccbytes = 6, | 867 | { |
1003 | .eccpos = {8, 9, 10, 11, 12, 13 }, | 868 | struct mtd_info *mtd = info->mtd; |
1004 | .oobfree = { {2, 6} } | 869 | struct nand_chip *chip = mtd->priv; |
1005 | }; | ||
1006 | 870 | ||
1007 | static struct nand_ecclayout hw_largepage_ecclayout = { | 871 | /* use the common timing to make a try */ |
1008 | .eccbytes = 24, | 872 | pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); |
1009 | .eccpos = { | 873 | chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); |
1010 | 40, 41, 42, 43, 44, 45, 46, 47, | 874 | if (info->is_ready) |
1011 | 48, 49, 50, 51, 52, 53, 54, 55, | 875 | return 1; |
1012 | 56, 57, 58, 59, 60, 61, 62, 63}, | 876 | else |
1013 | .oobfree = { {2, 38} } | 877 | return 0; |
1014 | }; | 878 | } |
1015 | 879 | ||
1016 | static void pxa3xx_nand_init_mtd(struct mtd_info *mtd, | 880 | static int pxa3xx_nand_scan(struct mtd_info *mtd) |
1017 | struct pxa3xx_nand_info *info) | ||
1018 | { | 881 | { |
1019 | struct nand_chip *this = &info->nand_chip; | 882 | struct pxa3xx_nand_info *info = mtd->priv; |
1020 | 883 | struct platform_device *pdev = info->pdev; | |
1021 | this->options = (info->reg_ndcr & NDCR_DWIDTH_C) ? NAND_BUSWIDTH_16: 0; | 884 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
1022 | 885 | struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; | |
1023 | this->waitfunc = pxa3xx_nand_waitfunc; | 886 | const struct pxa3xx_nand_flash *f = NULL; |
1024 | this->select_chip = pxa3xx_nand_select_chip; | 887 | struct nand_chip *chip = mtd->priv; |
1025 | this->dev_ready = pxa3xx_nand_dev_ready; | 888 | uint32_t id = -1; |
1026 | this->cmdfunc = pxa3xx_nand_cmdfunc; | 889 | uint64_t chipsize; |
1027 | this->read_word = pxa3xx_nand_read_word; | 890 | int i, ret, num; |
1028 | this->read_byte = pxa3xx_nand_read_byte; | 891 | |
1029 | this->read_buf = pxa3xx_nand_read_buf; | 892 | if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) |
1030 | this->write_buf = pxa3xx_nand_write_buf; | 893 | goto KEEP_CONFIG; |
1031 | this->verify_buf = pxa3xx_nand_verify_buf; | 894 | |
1032 | 895 | ret = pxa3xx_nand_sensing(info); | |
1033 | this->ecc.mode = NAND_ECC_HW; | 896 | if (!ret) { |
1034 | this->ecc.hwctl = pxa3xx_nand_ecc_hwctl; | 897 | kfree(mtd); |
1035 | this->ecc.calculate = pxa3xx_nand_ecc_calculate; | 898 | info->mtd = NULL; |
1036 | this->ecc.correct = pxa3xx_nand_ecc_correct; | 899 | printk(KERN_INFO "There is no nand chip on cs 0!\n"); |
1037 | this->ecc.size = info->page_size; | 900 | |
1038 | 901 | return -EINVAL; | |
1039 | if (info->page_size == 2048) | 902 | } |
1040 | this->ecc.layout = &hw_largepage_ecclayout; | 903 | |
904 | chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); | ||
905 | id = *((uint16_t *)(info->data_buff)); | ||
906 | if (id != 0) | ||
907 | printk(KERN_INFO "Detect a flash id %x\n", id); | ||
908 | else { | ||
909 | kfree(mtd); | ||
910 | info->mtd = NULL; | ||
911 | printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n"); | ||
912 | |||
913 | return -EINVAL; | ||
914 | } | ||
915 | |||
916 | num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; | ||
917 | for (i = 0; i < num; i++) { | ||
918 | if (i < pdata->num_flash) | ||
919 | f = pdata->flash + i; | ||
920 | else | ||
921 | f = &builtin_flash_types[i - pdata->num_flash + 1]; | ||
922 | |||
923 | /* find the chip in default list */ | ||
924 | if (f->chip_id == id) | ||
925 | break; | ||
926 | } | ||
927 | |||
928 | if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { | ||
929 | kfree(mtd); | ||
930 | info->mtd = NULL; | ||
931 | printk(KERN_ERR "ERROR!! flash not defined!!!\n"); | ||
932 | |||
933 | return -EINVAL; | ||
934 | } | ||
935 | |||
936 | pxa3xx_nand_config_flash(info, f); | ||
937 | pxa3xx_flash_ids[0].name = f->name; | ||
938 | pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; | ||
939 | pxa3xx_flash_ids[0].pagesize = f->page_size; | ||
940 | chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; | ||
941 | pxa3xx_flash_ids[0].chipsize = chipsize >> 20; | ||
942 | pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; | ||
943 | if (f->flash_width == 16) | ||
944 | pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; | ||
945 | KEEP_CONFIG: | ||
946 | if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) | ||
947 | return -ENODEV; | ||
948 | /* calculate addressing information */ | ||
949 | info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; | ||
950 | info->oob_buff = info->data_buff + mtd->writesize; | ||
951 | if ((mtd->size >> chip->page_shift) > 65536) | ||
952 | info->row_addr_cycles = 3; | ||
1041 | else | 953 | else |
1042 | this->ecc.layout = &hw_smallpage_ecclayout; | 954 | info->row_addr_cycles = 2; |
955 | mtd->name = mtd_names[0]; | ||
956 | chip->ecc.mode = NAND_ECC_HW; | ||
957 | chip->ecc.size = f->page_size; | ||
958 | |||
959 | chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0; | ||
960 | chip->options |= NAND_NO_AUTOINCR; | ||
961 | chip->options |= NAND_NO_READRDY; | ||
1043 | 962 | ||
1044 | this->chip_delay = 25; | 963 | return nand_scan_tail(mtd); |
1045 | } | 964 | } |
1046 | 965 | ||
1047 | static int pxa3xx_nand_probe(struct platform_device *pdev) | 966 | static |
967 | struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev) | ||
1048 | { | 968 | { |
1049 | struct pxa3xx_nand_platform_data *pdata; | ||
1050 | struct pxa3xx_nand_info *info; | 969 | struct pxa3xx_nand_info *info; |
1051 | struct nand_chip *this; | 970 | struct nand_chip *chip; |
1052 | struct mtd_info *mtd; | 971 | struct mtd_info *mtd; |
1053 | struct resource *r; | 972 | struct resource *r; |
1054 | int ret = 0, irq; | 973 | int ret, irq; |
1055 | |||
1056 | pdata = pdev->dev.platform_data; | ||
1057 | |||
1058 | if (!pdata) { | ||
1059 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
1060 | return -ENODEV; | ||
1061 | } | ||
1062 | 974 | ||
1063 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), | 975 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), |
1064 | GFP_KERNEL); | 976 | GFP_KERNEL); |
1065 | if (!mtd) { | 977 | if (!mtd) { |
1066 | dev_err(&pdev->dev, "failed to allocate memory\n"); | 978 | dev_err(&pdev->dev, "failed to allocate memory\n"); |
1067 | return -ENOMEM; | 979 | return NULL; |
1068 | } | 980 | } |
1069 | 981 | ||
1070 | info = (struct pxa3xx_nand_info *)(&mtd[1]); | 982 | info = (struct pxa3xx_nand_info *)(&mtd[1]); |
983 | chip = (struct nand_chip *)(&mtd[1]); | ||
1071 | info->pdev = pdev; | 984 | info->pdev = pdev; |
1072 | 985 | info->mtd = mtd; | |
1073 | this = &info->nand_chip; | ||
1074 | mtd->priv = info; | 986 | mtd->priv = info; |
1075 | mtd->owner = THIS_MODULE; | 987 | mtd->owner = THIS_MODULE; |
1076 | 988 | ||
989 | chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; | ||
990 | chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; | ||
991 | chip->controller = &info->controller; | ||
992 | chip->waitfunc = pxa3xx_nand_waitfunc; | ||
993 | chip->select_chip = pxa3xx_nand_select_chip; | ||
994 | chip->dev_ready = pxa3xx_nand_dev_ready; | ||
995 | chip->cmdfunc = pxa3xx_nand_cmdfunc; | ||
996 | chip->read_word = pxa3xx_nand_read_word; | ||
997 | chip->read_byte = pxa3xx_nand_read_byte; | ||
998 | chip->read_buf = pxa3xx_nand_read_buf; | ||
999 | chip->write_buf = pxa3xx_nand_write_buf; | ||
1000 | chip->verify_buf = pxa3xx_nand_verify_buf; | ||
1001 | |||
1002 | spin_lock_init(&chip->controller->lock); | ||
1003 | init_waitqueue_head(&chip->controller->wq); | ||
1077 | info->clk = clk_get(&pdev->dev, NULL); | 1004 | info->clk = clk_get(&pdev->dev, NULL); |
1078 | if (IS_ERR(info->clk)) { | 1005 | if (IS_ERR(info->clk)) { |
1079 | dev_err(&pdev->dev, "failed to get nand clock\n"); | 1006 | dev_err(&pdev->dev, "failed to get nand clock\n"); |
@@ -1141,43 +1068,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1141 | goto fail_free_buf; | 1068 | goto fail_free_buf; |
1142 | } | 1069 | } |
1143 | 1070 | ||
1144 | ret = pxa3xx_nand_detect_flash(info, pdata); | 1071 | platform_set_drvdata(pdev, info); |
1145 | if (ret) { | ||
1146 | dev_err(&pdev->dev, "failed to detect flash\n"); | ||
1147 | ret = -ENODEV; | ||
1148 | goto fail_free_irq; | ||
1149 | } | ||
1150 | |||
1151 | pxa3xx_nand_init_mtd(mtd, info); | ||
1152 | |||
1153 | platform_set_drvdata(pdev, mtd); | ||
1154 | |||
1155 | if (nand_scan(mtd, 1)) { | ||
1156 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1157 | ret = -ENXIO; | ||
1158 | goto fail_free_irq; | ||
1159 | } | ||
1160 | |||
1161 | #ifdef CONFIG_MTD_PARTITIONS | ||
1162 | if (mtd_has_cmdlinepart()) { | ||
1163 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
1164 | struct mtd_partition *parts; | ||
1165 | int nr_parts; | ||
1166 | |||
1167 | nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0); | ||
1168 | |||
1169 | if (nr_parts) | ||
1170 | return add_mtd_partitions(mtd, parts, nr_parts); | ||
1171 | } | ||
1172 | 1072 | ||
1173 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); | 1073 | return info; |
1174 | #else | ||
1175 | return 0; | ||
1176 | #endif | ||
1177 | 1074 | ||
1178 | fail_free_irq: | ||
1179 | free_irq(irq, info); | ||
1180 | fail_free_buf: | 1075 | fail_free_buf: |
1076 | free_irq(irq, info); | ||
1181 | if (use_dma) { | 1077 | if (use_dma) { |
1182 | pxa_free_dma(info->data_dma_ch); | 1078 | pxa_free_dma(info->data_dma_ch); |
1183 | dma_free_coherent(&pdev->dev, info->data_buff_size, | 1079 | dma_free_coherent(&pdev->dev, info->data_buff_size, |
@@ -1193,22 +1089,18 @@ fail_put_clk: | |||
1193 | clk_put(info->clk); | 1089 | clk_put(info->clk); |
1194 | fail_free_mtd: | 1090 | fail_free_mtd: |
1195 | kfree(mtd); | 1091 | kfree(mtd); |
1196 | return ret; | 1092 | return NULL; |
1197 | } | 1093 | } |
1198 | 1094 | ||
1199 | static int pxa3xx_nand_remove(struct platform_device *pdev) | 1095 | static int pxa3xx_nand_remove(struct platform_device *pdev) |
1200 | { | 1096 | { |
1201 | struct mtd_info *mtd = platform_get_drvdata(pdev); | 1097 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1202 | struct pxa3xx_nand_info *info = mtd->priv; | 1098 | struct mtd_info *mtd = info->mtd; |
1203 | struct resource *r; | 1099 | struct resource *r; |
1204 | int irq; | 1100 | int irq; |
1205 | 1101 | ||
1206 | platform_set_drvdata(pdev, NULL); | 1102 | platform_set_drvdata(pdev, NULL); |
1207 | 1103 | ||
1208 | del_mtd_device(mtd); | ||
1209 | #ifdef CONFIG_MTD_PARTITIONS | ||
1210 | del_mtd_partitions(mtd); | ||
1211 | #endif | ||
1212 | irq = platform_get_irq(pdev, 0); | 1104 | irq = platform_get_irq(pdev, 0); |
1213 | if (irq >= 0) | 1105 | if (irq >= 0) |
1214 | free_irq(irq, info); | 1106 | free_irq(irq, info); |
@@ -1226,17 +1118,62 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1226 | clk_disable(info->clk); | 1118 | clk_disable(info->clk); |
1227 | clk_put(info->clk); | 1119 | clk_put(info->clk); |
1228 | 1120 | ||
1229 | kfree(mtd); | 1121 | if (mtd) { |
1122 | del_mtd_device(mtd); | ||
1123 | #ifdef CONFIG_MTD_PARTITIONS | ||
1124 | del_mtd_partitions(mtd); | ||
1125 | #endif | ||
1126 | kfree(mtd); | ||
1127 | } | ||
1230 | return 0; | 1128 | return 0; |
1231 | } | 1129 | } |
1232 | 1130 | ||
1131 | static int pxa3xx_nand_probe(struct platform_device *pdev) | ||
1132 | { | ||
1133 | struct pxa3xx_nand_platform_data *pdata; | ||
1134 | struct pxa3xx_nand_info *info; | ||
1135 | |||
1136 | pdata = pdev->dev.platform_data; | ||
1137 | if (!pdata) { | ||
1138 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
1139 | return -ENODEV; | ||
1140 | } | ||
1141 | |||
1142 | info = alloc_nand_resource(pdev); | ||
1143 | if (info == NULL) | ||
1144 | return -ENOMEM; | ||
1145 | |||
1146 | if (pxa3xx_nand_scan(info->mtd)) { | ||
1147 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1148 | pxa3xx_nand_remove(pdev); | ||
1149 | return -ENODEV; | ||
1150 | } | ||
1151 | |||
1152 | #ifdef CONFIG_MTD_PARTITIONS | ||
1153 | if (mtd_has_cmdlinepart()) { | ||
1154 | const char *probes[] = { "cmdlinepart", NULL }; | ||
1155 | struct mtd_partition *parts; | ||
1156 | int nr_parts; | ||
1157 | |||
1158 | nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); | ||
1159 | |||
1160 | if (nr_parts) | ||
1161 | return add_mtd_partitions(info->mtd, parts, nr_parts); | ||
1162 | } | ||
1163 | |||
1164 | return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); | ||
1165 | #else | ||
1166 | return 0; | ||
1167 | #endif | ||
1168 | } | ||
1169 | |||
1233 | #ifdef CONFIG_PM | 1170 | #ifdef CONFIG_PM |
1234 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | 1171 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) |
1235 | { | 1172 | { |
1236 | struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); | 1173 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1237 | struct pxa3xx_nand_info *info = mtd->priv; | 1174 | struct mtd_info *mtd = info->mtd; |
1238 | 1175 | ||
1239 | if (info->state != STATE_READY) { | 1176 | if (info->state) { |
1240 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); | 1177 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); |
1241 | return -EAGAIN; | 1178 | return -EAGAIN; |
1242 | } | 1179 | } |
@@ -1246,8 +1183,8 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | |||
1246 | 1183 | ||
1247 | static int pxa3xx_nand_resume(struct platform_device *pdev) | 1184 | static int pxa3xx_nand_resume(struct platform_device *pdev) |
1248 | { | 1185 | { |
1249 | struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); | 1186 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1250 | struct pxa3xx_nand_info *info = mtd->priv; | 1187 | struct mtd_info *mtd = info->mtd; |
1251 | 1188 | ||
1252 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); | 1189 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); |
1253 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); | 1190 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 14a49abe057..f591f615d3f 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -629,6 +629,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
629 | { | 629 | { |
630 | struct omap_onenand_platform_data *pdata; | 630 | struct omap_onenand_platform_data *pdata; |
631 | struct omap2_onenand *c; | 631 | struct omap2_onenand *c; |
632 | struct onenand_chip *this; | ||
632 | int r; | 633 | int r; |
633 | 634 | ||
634 | pdata = pdev->dev.platform_data; | 635 | pdata = pdev->dev.platform_data; |
@@ -726,9 +727,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
726 | 727 | ||
727 | c->mtd.dev.parent = &pdev->dev; | 728 | c->mtd.dev.parent = &pdev->dev; |
728 | 729 | ||
730 | this = &c->onenand; | ||
729 | if (c->dma_channel >= 0) { | 731 | if (c->dma_channel >= 0) { |
730 | struct onenand_chip *this = &c->onenand; | ||
731 | |||
732 | this->wait = omap2_onenand_wait; | 732 | this->wait = omap2_onenand_wait; |
733 | if (cpu_is_omap34xx()) { | 733 | if (cpu_is_omap34xx()) { |
734 | this->read_bufferram = omap3_onenand_read_bufferram; | 734 | this->read_bufferram = omap3_onenand_read_bufferram; |
@@ -749,6 +749,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
749 | c->onenand.disable = omap2_onenand_disable; | 749 | c->onenand.disable = omap2_onenand_disable; |
750 | } | 750 | } |
751 | 751 | ||
752 | if (pdata->skip_initial_unlocking) | ||
753 | this->options |= ONENAND_SKIP_INITIAL_UNLOCKING; | ||
754 | |||
752 | if ((r = onenand_scan(&c->mtd, 1)) < 0) | 755 | if ((r = onenand_scan(&c->mtd, 1)) < 0) |
753 | goto err_release_regulator; | 756 | goto err_release_regulator; |
754 | 757 | ||
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index bac41caa8df..56a8b2005bd 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -1132,6 +1132,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1132 | onenand_update_bufferram(mtd, from, !ret); | 1132 | onenand_update_bufferram(mtd, from, !ret); |
1133 | if (ret == -EBADMSG) | 1133 | if (ret == -EBADMSG) |
1134 | ret = 0; | 1134 | ret = 0; |
1135 | if (ret) | ||
1136 | break; | ||
1135 | } | 1137 | } |
1136 | 1138 | ||
1137 | this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); | 1139 | this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); |
@@ -1646,11 +1648,10 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1646 | int ret = 0; | 1648 | int ret = 0; |
1647 | int thislen, column; | 1649 | int thislen, column; |
1648 | 1650 | ||
1651 | column = addr & (this->writesize - 1); | ||
1652 | |||
1649 | while (len != 0) { | 1653 | while (len != 0) { |
1650 | thislen = min_t(int, this->writesize, len); | 1654 | thislen = min_t(int, this->writesize - column, len); |
1651 | column = addr & (this->writesize - 1); | ||
1652 | if (column + thislen > this->writesize) | ||
1653 | thislen = this->writesize - column; | ||
1654 | 1655 | ||
1655 | this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); | 1656 | this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); |
1656 | 1657 | ||
@@ -1664,12 +1665,13 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1664 | 1665 | ||
1665 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); | 1666 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); |
1666 | 1667 | ||
1667 | if (memcmp(buf, this->verify_buf, thislen)) | 1668 | if (memcmp(buf, this->verify_buf + column, thislen)) |
1668 | return -EBADMSG; | 1669 | return -EBADMSG; |
1669 | 1670 | ||
1670 | len -= thislen; | 1671 | len -= thislen; |
1671 | buf += thislen; | 1672 | buf += thislen; |
1672 | addr += thislen; | 1673 | addr += thislen; |
1674 | column = 0; | ||
1673 | } | 1675 | } |
1674 | 1676 | ||
1675 | return 0; | 1677 | return 0; |
@@ -4083,7 +4085,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
4083 | mtd->writebufsize = mtd->writesize; | 4085 | mtd->writebufsize = mtd->writesize; |
4084 | 4086 | ||
4085 | /* Unlock whole block */ | 4087 | /* Unlock whole block */ |
4086 | this->unlock_all(mtd); | 4088 | if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING)) |
4089 | this->unlock_all(mtd); | ||
4087 | 4090 | ||
4088 | ret = this->scan_bbt(mtd); | 4091 | ret = this->scan_bbt(mtd); |
4089 | if ((!FLEXONENAND(this)) || ret) | 4092 | if ((!FLEXONENAND(this)) || ret) |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index ac0d6a8613b..2b0daae4018 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -64,12 +64,16 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |||
64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); | 64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); |
65 | 65 | ||
66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); | 66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); |
67 | if (!vendor) | ||
68 | goto error1; | ||
67 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); | 69 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); |
68 | vendor[vendor_len] = 0; | 70 | vendor[vendor_len] = 0; |
69 | 71 | ||
70 | /* Initialize sysfs attributes */ | 72 | /* Initialize sysfs attributes */ |
71 | vendor_attribute = | 73 | vendor_attribute = |
72 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); | 74 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); |
75 | if (!vendor_attribute) | ||
76 | goto error2; | ||
73 | 77 | ||
74 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); | 78 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); |
75 | 79 | ||
@@ -83,12 +87,24 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |||
83 | /* Create array of pointers to the attributes */ | 87 | /* Create array of pointers to the attributes */ |
84 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), | 88 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), |
85 | GFP_KERNEL); | 89 | GFP_KERNEL); |
90 | if (!attributes) | ||
91 | goto error3; | ||
86 | attributes[0] = &vendor_attribute->dev_attr.attr; | 92 | attributes[0] = &vendor_attribute->dev_attr.attr; |
87 | 93 | ||
88 | /* Finally create the attribute group */ | 94 | /* Finally create the attribute group */ |
89 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); | 95 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); |
96 | if (!attr_group) | ||
97 | goto error4; | ||
90 | attr_group->attrs = attributes; | 98 | attr_group->attrs = attributes; |
91 | return attr_group; | 99 | return attr_group; |
100 | error4: | ||
101 | kfree(attributes); | ||
102 | error3: | ||
103 | kfree(vendor_attribute); | ||
104 | error2: | ||
105 | kfree(vendor); | ||
106 | error1: | ||
107 | return NULL; | ||
92 | } | 108 | } |
93 | 109 | ||
94 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) | 110 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) |
@@ -1178,6 +1194,8 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
1178 | } | 1194 | } |
1179 | 1195 | ||
1180 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); | 1196 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); |
1197 | if (!ftl->disk_attributes) | ||
1198 | goto error6; | ||
1181 | trans->disk_attributes = ftl->disk_attributes; | 1199 | trans->disk_attributes = ftl->disk_attributes; |
1182 | 1200 | ||
1183 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", | 1201 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 161feeb7b8b..627d4e2466a 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * | 16 | * |
17 | * Test read and write speed of a MTD device. | 17 | * Test read and write speed of a MTD device. |
18 | * | 18 | * |
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | 19 | * Author: Adrian Hunter <adrian.hunter@nokia.com> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
@@ -33,6 +33,11 @@ static int dev; | |||
33 | module_param(dev, int, S_IRUGO); | 33 | module_param(dev, int, S_IRUGO); |
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 34 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
35 | 35 | ||
36 | static int count; | ||
37 | module_param(count, int, S_IRUGO); | ||
38 | MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use " | ||
39 | "(0 means use all)"); | ||
40 | |||
36 | static struct mtd_info *mtd; | 41 | static struct mtd_info *mtd; |
37 | static unsigned char *iobuf; | 42 | static unsigned char *iobuf; |
38 | static unsigned char *bbt; | 43 | static unsigned char *bbt; |
@@ -89,6 +94,33 @@ static int erase_eraseblock(int ebnum) | |||
89 | return 0; | 94 | return 0; |
90 | } | 95 | } |
91 | 96 | ||
97 | static int multiblock_erase(int ebnum, int blocks) | ||
98 | { | ||
99 | int err; | ||
100 | struct erase_info ei; | ||
101 | loff_t addr = ebnum * mtd->erasesize; | ||
102 | |||
103 | memset(&ei, 0, sizeof(struct erase_info)); | ||
104 | ei.mtd = mtd; | ||
105 | ei.addr = addr; | ||
106 | ei.len = mtd->erasesize * blocks; | ||
107 | |||
108 | err = mtd->erase(mtd, &ei); | ||
109 | if (err) { | ||
110 | printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n", | ||
111 | err, ebnum, blocks); | ||
112 | return err; | ||
113 | } | ||
114 | |||
115 | if (ei.state == MTD_ERASE_FAILED) { | ||
116 | printk(PRINT_PREF "some erase error occurred at EB %d," | ||
117 | "blocks %d\n", ebnum, blocks); | ||
118 | return -EIO; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
92 | static int erase_whole_device(void) | 124 | static int erase_whole_device(void) |
93 | { | 125 | { |
94 | int err; | 126 | int err; |
@@ -282,13 +314,16 @@ static inline void stop_timing(void) | |||
282 | 314 | ||
283 | static long calc_speed(void) | 315 | static long calc_speed(void) |
284 | { | 316 | { |
285 | long ms, k, speed; | 317 | uint64_t k; |
318 | long ms; | ||
286 | 319 | ||
287 | ms = (finish.tv_sec - start.tv_sec) * 1000 + | 320 | ms = (finish.tv_sec - start.tv_sec) * 1000 + |
288 | (finish.tv_usec - start.tv_usec) / 1000; | 321 | (finish.tv_usec - start.tv_usec) / 1000; |
289 | k = goodebcnt * mtd->erasesize / 1024; | 322 | if (ms == 0) |
290 | speed = (k * 1000) / ms; | 323 | return 0; |
291 | return speed; | 324 | k = goodebcnt * (mtd->erasesize / 1024) * 1000; |
325 | do_div(k, ms); | ||
326 | return k; | ||
292 | } | 327 | } |
293 | 328 | ||
294 | static int scan_for_bad_eraseblocks(void) | 329 | static int scan_for_bad_eraseblocks(void) |
@@ -320,13 +355,16 @@ out: | |||
320 | 355 | ||
321 | static int __init mtd_speedtest_init(void) | 356 | static int __init mtd_speedtest_init(void) |
322 | { | 357 | { |
323 | int err, i; | 358 | int err, i, blocks, j, k; |
324 | long speed; | 359 | long speed; |
325 | uint64_t tmp; | 360 | uint64_t tmp; |
326 | 361 | ||
327 | printk(KERN_INFO "\n"); | 362 | printk(KERN_INFO "\n"); |
328 | printk(KERN_INFO "=================================================\n"); | 363 | printk(KERN_INFO "=================================================\n"); |
329 | printk(PRINT_PREF "MTD device: %d\n", dev); | 364 | if (count) |
365 | printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); | ||
366 | else | ||
367 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
330 | 368 | ||
331 | mtd = get_mtd_device(NULL, dev); | 369 | mtd = get_mtd_device(NULL, dev); |
332 | if (IS_ERR(mtd)) { | 370 | if (IS_ERR(mtd)) { |
@@ -353,6 +391,9 @@ static int __init mtd_speedtest_init(void) | |||
353 | (unsigned long long)mtd->size, mtd->erasesize, | 391 | (unsigned long long)mtd->size, mtd->erasesize, |
354 | pgsize, ebcnt, pgcnt, mtd->oobsize); | 392 | pgsize, ebcnt, pgcnt, mtd->oobsize); |
355 | 393 | ||
394 | if (count > 0 && count < ebcnt) | ||
395 | ebcnt = count; | ||
396 | |||
356 | err = -ENOMEM; | 397 | err = -ENOMEM; |
357 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); | 398 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); |
358 | if (!iobuf) { | 399 | if (!iobuf) { |
@@ -484,6 +525,31 @@ static int __init mtd_speedtest_init(void) | |||
484 | speed = calc_speed(); | 525 | speed = calc_speed(); |
485 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); | 526 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); |
486 | 527 | ||
528 | /* Multi-block erase all eraseblocks */ | ||
529 | for (k = 1; k < 7; k++) { | ||
530 | blocks = 1 << k; | ||
531 | printk(PRINT_PREF "Testing %dx multi-block erase speed\n", | ||
532 | blocks); | ||
533 | start_timing(); | ||
534 | for (i = 0; i < ebcnt; ) { | ||
535 | for (j = 0; j < blocks && (i + j) < ebcnt; j++) | ||
536 | if (bbt[i + j]) | ||
537 | break; | ||
538 | if (j < 1) { | ||
539 | i++; | ||
540 | continue; | ||
541 | } | ||
542 | err = multiblock_erase(i, j); | ||
543 | if (err) | ||
544 | goto out; | ||
545 | cond_resched(); | ||
546 | i += j; | ||
547 | } | ||
548 | stop_timing(); | ||
549 | speed = calc_speed(); | ||
550 | printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n", | ||
551 | blocks, speed); | ||
552 | } | ||
487 | printk(PRINT_PREF "finished\n"); | 553 | printk(PRINT_PREF "finished\n"); |
488 | out: | 554 | out: |
489 | kfree(iobuf); | 555 | kfree(iobuf); |
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c index 11204e8aab5..334eae53a3d 100644 --- a/drivers/mtd/tests/mtd_subpagetest.c +++ b/drivers/mtd/tests/mtd_subpagetest.c | |||
@@ -394,6 +394,11 @@ static int __init mtd_subpagetest_init(void) | |||
394 | } | 394 | } |
395 | 395 | ||
396 | subpgsize = mtd->writesize >> mtd->subpage_sft; | 396 | subpgsize = mtd->writesize >> mtd->subpage_sft; |
397 | tmp = mtd->size; | ||
398 | do_div(tmp, mtd->erasesize); | ||
399 | ebcnt = tmp; | ||
400 | pgcnt = mtd->erasesize / mtd->writesize; | ||
401 | |||
397 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 402 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
398 | "page size %u, subpage size %u, count of eraseblocks %u, " | 403 | "page size %u, subpage size %u, count of eraseblocks %u, " |
399 | "pages per eraseblock %u, OOB size %u\n", | 404 | "pages per eraseblock %u, OOB size %u\n", |
@@ -413,11 +418,6 @@ static int __init mtd_subpagetest_init(void) | |||
413 | goto out; | 418 | goto out; |
414 | } | 419 | } |
415 | 420 | ||
416 | tmp = mtd->size; | ||
417 | do_div(tmp, mtd->erasesize); | ||
418 | ebcnt = tmp; | ||
419 | pgcnt = mtd->erasesize / mtd->writesize; | ||
420 | |||
421 | err = scan_for_bad_eraseblocks(); | 421 | err = scan_for_bad_eraseblocks(); |
422 | if (err) | 422 | if (err) |
423 | goto out; | 423 | goto out; |
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index f142cc21e45..deaa8bc16cf 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c | |||
@@ -711,14 +711,14 @@ static int __devinit a2065_init_one(struct zorro_dev *z, | |||
711 | return -EBUSY; | 711 | return -EBUSY; |
712 | r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); | 712 | r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); |
713 | if (!r2) { | 713 | if (!r2) { |
714 | release_resource(r1); | 714 | release_mem_region(base_addr, sizeof(struct lance_regs)); |
715 | return -EBUSY; | 715 | return -EBUSY; |
716 | } | 716 | } |
717 | 717 | ||
718 | dev = alloc_etherdev(sizeof(struct lance_private)); | 718 | dev = alloc_etherdev(sizeof(struct lance_private)); |
719 | if (dev == NULL) { | 719 | if (dev == NULL) { |
720 | release_resource(r1); | 720 | release_mem_region(base_addr, sizeof(struct lance_regs)); |
721 | release_resource(r2); | 721 | release_mem_region(mem_start, A2065_RAM_SIZE); |
722 | return -ENOMEM; | 722 | return -ENOMEM; |
723 | } | 723 | } |
724 | 724 | ||
@@ -764,8 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, | |||
764 | 764 | ||
765 | err = register_netdev(dev); | 765 | err = register_netdev(dev); |
766 | if (err) { | 766 | if (err) { |
767 | release_resource(r1); | 767 | release_mem_region(base_addr, sizeof(struct lance_regs)); |
768 | release_resource(r2); | 768 | release_mem_region(mem_start, A2065_RAM_SIZE); |
769 | free_netdev(dev); | 769 | free_netdev(dev); |
770 | return err; | 770 | return err; |
771 | } | 771 | } |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 7ca0eded256..b7f45cd756a 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -182,14 +182,14 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, | |||
182 | return -EBUSY; | 182 | return -EBUSY; |
183 | r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); | 183 | r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); |
184 | if (!r2) { | 184 | if (!r2) { |
185 | release_resource(r1); | 185 | release_mem_region(base_addr, sizeof(struct Am79C960)); |
186 | return -EBUSY; | 186 | return -EBUSY; |
187 | } | 187 | } |
188 | 188 | ||
189 | dev = alloc_etherdev(sizeof(struct ariadne_private)); | 189 | dev = alloc_etherdev(sizeof(struct ariadne_private)); |
190 | if (dev == NULL) { | 190 | if (dev == NULL) { |
191 | release_resource(r1); | 191 | release_mem_region(base_addr, sizeof(struct Am79C960)); |
192 | release_resource(r2); | 192 | release_mem_region(mem_start, ARIADNE_RAM_SIZE); |
193 | return -ENOMEM; | 193 | return -ENOMEM; |
194 | } | 194 | } |
195 | 195 | ||
@@ -213,8 +213,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, | |||
213 | 213 | ||
214 | err = register_netdev(dev); | 214 | err = register_netdev(dev); |
215 | if (err) { | 215 | if (err) { |
216 | release_resource(r1); | 216 | release_mem_region(base_addr, sizeof(struct Am79C960)); |
217 | release_resource(r2); | 217 | release_mem_region(mem_start, ARIADNE_RAM_SIZE); |
218 | free_netdev(dev); | 218 | free_netdev(dev); |
219 | return err; | 219 | return err; |
220 | } | 220 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 338bea147c6..16d6fe95469 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1482,21 +1482,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1482 | { | 1482 | { |
1483 | struct sk_buff *skb = *pskb; | 1483 | struct sk_buff *skb = *pskb; |
1484 | struct slave *slave; | 1484 | struct slave *slave; |
1485 | struct net_device *bond_dev; | ||
1486 | struct bonding *bond; | 1485 | struct bonding *bond; |
1487 | 1486 | ||
1488 | slave = bond_slave_get_rcu(skb->dev); | ||
1489 | bond_dev = ACCESS_ONCE(slave->dev->master); | ||
1490 | if (unlikely(!bond_dev)) | ||
1491 | return RX_HANDLER_PASS; | ||
1492 | |||
1493 | skb = skb_share_check(skb, GFP_ATOMIC); | 1487 | skb = skb_share_check(skb, GFP_ATOMIC); |
1494 | if (unlikely(!skb)) | 1488 | if (unlikely(!skb)) |
1495 | return RX_HANDLER_CONSUMED; | 1489 | return RX_HANDLER_CONSUMED; |
1496 | 1490 | ||
1497 | *pskb = skb; | 1491 | *pskb = skb; |
1498 | 1492 | ||
1499 | bond = netdev_priv(bond_dev); | 1493 | slave = bond_slave_get_rcu(skb->dev); |
1494 | bond = slave->bond; | ||
1500 | 1495 | ||
1501 | if (bond->params.arp_interval) | 1496 | if (bond->params.arp_interval) |
1502 | slave->dev->last_rx = jiffies; | 1497 | slave->dev->last_rx = jiffies; |
@@ -1505,10 +1500,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1505 | return RX_HANDLER_EXACT; | 1500 | return RX_HANDLER_EXACT; |
1506 | } | 1501 | } |
1507 | 1502 | ||
1508 | skb->dev = bond_dev; | 1503 | skb->dev = bond->dev; |
1509 | 1504 | ||
1510 | if (bond->params.mode == BOND_MODE_ALB && | 1505 | if (bond->params.mode == BOND_MODE_ALB && |
1511 | bond_dev->priv_flags & IFF_BRIDGE_PORT && | 1506 | bond->dev->priv_flags & IFF_BRIDGE_PORT && |
1512 | skb->pkt_type == PACKET_HOST) { | 1507 | skb->pkt_type == PACKET_HOST) { |
1513 | 1508 | ||
1514 | if (unlikely(skb_cow_head(skb, | 1509 | if (unlikely(skb_cow_head(skb, |
@@ -1516,7 +1511,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1516 | kfree_skb(skb); | 1511 | kfree_skb(skb); |
1517 | return RX_HANDLER_CONSUMED; | 1512 | return RX_HANDLER_CONSUMED; |
1518 | } | 1513 | } |
1519 | memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN); | 1514 | memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); |
1520 | } | 1515 | } |
1521 | 1516 | ||
1522 | return RX_HANDLER_ANOTHER; | 1517 | return RX_HANDLER_ANOTHER; |
@@ -1698,20 +1693,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1698 | pr_debug("Error %d calling netdev_set_bond_master\n", res); | 1693 | pr_debug("Error %d calling netdev_set_bond_master\n", res); |
1699 | goto err_restore_mac; | 1694 | goto err_restore_mac; |
1700 | } | 1695 | } |
1701 | res = netdev_rx_handler_register(slave_dev, bond_handle_frame, | ||
1702 | new_slave); | ||
1703 | if (res) { | ||
1704 | pr_debug("Error %d calling netdev_rx_handler_register\n", res); | ||
1705 | goto err_unset_master; | ||
1706 | } | ||
1707 | 1696 | ||
1708 | /* open the slave since the application closed it */ | 1697 | /* open the slave since the application closed it */ |
1709 | res = dev_open(slave_dev); | 1698 | res = dev_open(slave_dev); |
1710 | if (res) { | 1699 | if (res) { |
1711 | pr_debug("Opening slave %s failed\n", slave_dev->name); | 1700 | pr_debug("Opening slave %s failed\n", slave_dev->name); |
1712 | goto err_unreg_rxhandler; | 1701 | goto err_unset_master; |
1713 | } | 1702 | } |
1714 | 1703 | ||
1704 | new_slave->bond = bond; | ||
1715 | new_slave->dev = slave_dev; | 1705 | new_slave->dev = slave_dev; |
1716 | slave_dev->priv_flags |= IFF_BONDING; | 1706 | slave_dev->priv_flags |= IFF_BONDING; |
1717 | 1707 | ||
@@ -1907,6 +1897,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1907 | if (res) | 1897 | if (res) |
1908 | goto err_close; | 1898 | goto err_close; |
1909 | 1899 | ||
1900 | res = netdev_rx_handler_register(slave_dev, bond_handle_frame, | ||
1901 | new_slave); | ||
1902 | if (res) { | ||
1903 | pr_debug("Error %d calling netdev_rx_handler_register\n", res); | ||
1904 | goto err_dest_symlinks; | ||
1905 | } | ||
1906 | |||
1910 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", | 1907 | pr_info("%s: enslaving %s as a%s interface with a%s link.\n", |
1911 | bond_dev->name, slave_dev->name, | 1908 | bond_dev->name, slave_dev->name, |
1912 | bond_is_active_slave(new_slave) ? "n active" : " backup", | 1909 | bond_is_active_slave(new_slave) ? "n active" : " backup", |
@@ -1916,13 +1913,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1916 | return 0; | 1913 | return 0; |
1917 | 1914 | ||
1918 | /* Undo stages on error */ | 1915 | /* Undo stages on error */ |
1916 | err_dest_symlinks: | ||
1917 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | ||
1918 | |||
1919 | err_close: | 1919 | err_close: |
1920 | dev_close(slave_dev); | 1920 | dev_close(slave_dev); |
1921 | 1921 | ||
1922 | err_unreg_rxhandler: | ||
1923 | netdev_rx_handler_unregister(slave_dev); | ||
1924 | synchronize_net(); | ||
1925 | |||
1926 | err_unset_master: | 1922 | err_unset_master: |
1927 | netdev_set_bond_master(slave_dev, NULL); | 1923 | netdev_set_bond_master(slave_dev, NULL); |
1928 | 1924 | ||
@@ -1988,6 +1984,14 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1988 | return -EINVAL; | 1984 | return -EINVAL; |
1989 | } | 1985 | } |
1990 | 1986 | ||
1987 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | ||
1988 | * for this slave anymore. | ||
1989 | */ | ||
1990 | netdev_rx_handler_unregister(slave_dev); | ||
1991 | write_unlock_bh(&bond->lock); | ||
1992 | synchronize_net(); | ||
1993 | write_lock_bh(&bond->lock); | ||
1994 | |||
1991 | if (!bond->params.fail_over_mac) { | 1995 | if (!bond->params.fail_over_mac) { |
1992 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && | 1996 | if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && |
1993 | bond->slave_cnt > 1) | 1997 | bond->slave_cnt > 1) |
@@ -2104,8 +2108,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2104 | netif_addr_unlock_bh(bond_dev); | 2108 | netif_addr_unlock_bh(bond_dev); |
2105 | } | 2109 | } |
2106 | 2110 | ||
2107 | netdev_rx_handler_unregister(slave_dev); | ||
2108 | synchronize_net(); | ||
2109 | netdev_set_bond_master(slave_dev, NULL); | 2111 | netdev_set_bond_master(slave_dev, NULL); |
2110 | 2112 | ||
2111 | slave_disable_netpoll(slave); | 2113 | slave_disable_netpoll(slave); |
@@ -2186,6 +2188,12 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2186 | */ | 2188 | */ |
2187 | write_unlock_bh(&bond->lock); | 2189 | write_unlock_bh(&bond->lock); |
2188 | 2190 | ||
2191 | /* unregister rx_handler early so bond_handle_frame wouldn't | ||
2192 | * be called for this slave anymore. | ||
2193 | */ | ||
2194 | netdev_rx_handler_unregister(slave_dev); | ||
2195 | synchronize_net(); | ||
2196 | |||
2189 | if (bond_is_lb(bond)) { | 2197 | if (bond_is_lb(bond)) { |
2190 | /* must be called only after the slave | 2198 | /* must be called only after the slave |
2191 | * has been detached from the list | 2199 | * has been detached from the list |
@@ -2217,8 +2225,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2217 | netif_addr_unlock_bh(bond_dev); | 2225 | netif_addr_unlock_bh(bond_dev); |
2218 | } | 2226 | } |
2219 | 2227 | ||
2220 | netdev_rx_handler_unregister(slave_dev); | ||
2221 | synchronize_net(); | ||
2222 | netdev_set_bond_master(slave_dev, NULL); | 2228 | netdev_set_bond_master(slave_dev, NULL); |
2223 | 2229 | ||
2224 | slave_disable_netpoll(slave); | 2230 | slave_disable_netpoll(slave); |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 6b26962fd0e..90736cb4d97 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -187,6 +187,7 @@ struct slave { | |||
187 | struct net_device *dev; /* first - useful for panic debug */ | 187 | struct net_device *dev; /* first - useful for panic debug */ |
188 | struct slave *next; | 188 | struct slave *next; |
189 | struct slave *prev; | 189 | struct slave *prev; |
190 | struct bonding *bond; /* our master */ | ||
190 | int delay; | 191 | int delay; |
191 | unsigned long jiffies; | 192 | unsigned long jiffies; |
192 | unsigned long last_arp_rx; | 193 | unsigned long last_arp_rx; |
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c index e92b2b6cd8c..ae47f23ba93 100644 --- a/drivers/net/davinci_cpdma.c +++ b/drivers/net/davinci_cpdma.c | |||
@@ -76,6 +76,7 @@ struct cpdma_desc { | |||
76 | 76 | ||
77 | struct cpdma_desc_pool { | 77 | struct cpdma_desc_pool { |
78 | u32 phys; | 78 | u32 phys; |
79 | u32 hw_addr; | ||
79 | void __iomem *iomap; /* ioremap map */ | 80 | void __iomem *iomap; /* ioremap map */ |
80 | void *cpumap; /* dma_alloc map */ | 81 | void *cpumap; /* dma_alloc map */ |
81 | int desc_size, mem_size; | 82 | int desc_size, mem_size; |
@@ -137,7 +138,8 @@ struct cpdma_chan { | |||
137 | * abstract out these details | 138 | * abstract out these details |
138 | */ | 139 | */ |
139 | static struct cpdma_desc_pool * | 140 | static struct cpdma_desc_pool * |
140 | cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) | 141 | cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, |
142 | int size, int align) | ||
141 | { | 143 | { |
142 | int bitmap_size; | 144 | int bitmap_size; |
143 | struct cpdma_desc_pool *pool; | 145 | struct cpdma_desc_pool *pool; |
@@ -161,10 +163,12 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) | |||
161 | if (phys) { | 163 | if (phys) { |
162 | pool->phys = phys; | 164 | pool->phys = phys; |
163 | pool->iomap = ioremap(phys, size); | 165 | pool->iomap = ioremap(phys, size); |
166 | pool->hw_addr = hw_addr; | ||
164 | } else { | 167 | } else { |
165 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, | 168 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, |
166 | GFP_KERNEL); | 169 | GFP_KERNEL); |
167 | pool->iomap = (void __force __iomem *)pool->cpumap; | 170 | pool->iomap = (void __force __iomem *)pool->cpumap; |
171 | pool->hw_addr = pool->phys; | ||
168 | } | 172 | } |
169 | 173 | ||
170 | if (pool->iomap) | 174 | if (pool->iomap) |
@@ -201,14 +205,14 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, | |||
201 | { | 205 | { |
202 | if (!desc) | 206 | if (!desc) |
203 | return 0; | 207 | return 0; |
204 | return pool->phys + (__force dma_addr_t)desc - | 208 | return pool->hw_addr + (__force dma_addr_t)desc - |
205 | (__force dma_addr_t)pool->iomap; | 209 | (__force dma_addr_t)pool->iomap; |
206 | } | 210 | } |
207 | 211 | ||
208 | static inline struct cpdma_desc __iomem * | 212 | static inline struct cpdma_desc __iomem * |
209 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) | 213 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) |
210 | { | 214 | { |
211 | return dma ? pool->iomap + dma - pool->phys : NULL; | 215 | return dma ? pool->iomap + dma - pool->hw_addr : NULL; |
212 | } | 216 | } |
213 | 217 | ||
214 | static struct cpdma_desc __iomem * | 218 | static struct cpdma_desc __iomem * |
@@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) | |||
260 | 264 | ||
261 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, | 265 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, |
262 | ctlr->params.desc_mem_phys, | 266 | ctlr->params.desc_mem_phys, |
267 | ctlr->params.desc_hw_addr, | ||
263 | ctlr->params.desc_mem_size, | 268 | ctlr->params.desc_mem_size, |
264 | ctlr->params.desc_align); | 269 | ctlr->params.desc_align); |
265 | if (!ctlr->pool) { | 270 | if (!ctlr->pool) { |
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h index 868e50ebde4..afa19a0c0d8 100644 --- a/drivers/net/davinci_cpdma.h +++ b/drivers/net/davinci_cpdma.h | |||
@@ -33,6 +33,7 @@ struct cpdma_params { | |||
33 | bool has_soft_reset; | 33 | bool has_soft_reset; |
34 | int min_packet_size; | 34 | int min_packet_size; |
35 | u32 desc_mem_phys; | 35 | u32 desc_mem_phys; |
36 | u32 desc_hw_addr; | ||
36 | int desc_mem_size; | 37 | int desc_mem_size; |
37 | int desc_align; | 38 | int desc_align; |
38 | 39 | ||
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 082d6ea6992..baca6bfcb08 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -1854,10 +1854,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1854 | dma_params.rxcp = priv->emac_base + 0x660; | 1854 | dma_params.rxcp = priv->emac_base + 0x660; |
1855 | dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; | 1855 | dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; |
1856 | dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; | 1856 | dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; |
1857 | dma_params.desc_mem_phys = hw_ram_addr; | 1857 | dma_params.desc_hw_addr = hw_ram_addr; |
1858 | dma_params.desc_mem_size = pdata->ctrl_ram_size; | 1858 | dma_params.desc_mem_size = pdata->ctrl_ram_size; |
1859 | dma_params.desc_align = 16; | 1859 | dma_params.desc_align = 16; |
1860 | 1860 | ||
1861 | dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 : | ||
1862 | (u32 __force)res->start + pdata->ctrl_ram_offset; | ||
1863 | |||
1861 | priv->dma = cpdma_ctlr_create(&dma_params); | 1864 | priv->dma = cpdma_ctlr_create(&dma_params); |
1862 | if (!priv->dma) { | 1865 | if (!priv->dma) { |
1863 | dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); | 1866 | dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); |
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 3a4277f6fac..116cae334da 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) | |||
62 | } else | 62 | } else |
63 | obj = -1; | 63 | obj = -1; |
64 | 64 | ||
65 | if (obj != -1) | ||
66 | --bitmap->avail; | ||
67 | |||
65 | spin_unlock(&bitmap->lock); | 68 | spin_unlock(&bitmap->lock); |
66 | 69 | ||
67 | return obj; | 70 | return obj; |
@@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) | |||
101 | } else | 104 | } else |
102 | obj = -1; | 105 | obj = -1; |
103 | 106 | ||
107 | if (obj != -1) | ||
108 | bitmap->avail -= cnt; | ||
109 | |||
104 | spin_unlock(&bitmap->lock); | 110 | spin_unlock(&bitmap->lock); |
105 | 111 | ||
106 | return obj; | 112 | return obj; |
107 | } | 113 | } |
108 | 114 | ||
115 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) | ||
116 | { | ||
117 | return bitmap->avail; | ||
118 | } | ||
119 | |||
109 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | 120 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) |
110 | { | 121 | { |
111 | obj &= bitmap->max + bitmap->reserved_top - 1; | 122 | obj &= bitmap->max + bitmap->reserved_top - 1; |
@@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) | |||
115 | bitmap->last = min(bitmap->last, obj); | 126 | bitmap->last = min(bitmap->last, obj); |
116 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) | 127 | bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) |
117 | & bitmap->mask; | 128 | & bitmap->mask; |
129 | bitmap->avail += cnt; | ||
118 | spin_unlock(&bitmap->lock); | 130 | spin_unlock(&bitmap->lock); |
119 | } | 131 | } |
120 | 132 | ||
@@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | |||
130 | bitmap->max = num - reserved_top; | 142 | bitmap->max = num - reserved_top; |
131 | bitmap->mask = mask; | 143 | bitmap->mask = mask; |
132 | bitmap->reserved_top = reserved_top; | 144 | bitmap->reserved_top = reserved_top; |
145 | bitmap->avail = num - reserved_top - reserved_bot; | ||
133 | spin_lock_init(&bitmap->lock); | 146 | spin_lock_init(&bitmap->lock); |
134 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * | 147 | bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * |
135 | sizeof (long), GFP_KERNEL); | 148 | sizeof (long), GFP_KERNEL); |
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index 7cd34e9c7c7..bd8ef9f2fa7 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
198 | u64 mtt_addr; | 198 | u64 mtt_addr; |
199 | int err; | 199 | int err; |
200 | 200 | ||
201 | if (vector >= dev->caps.num_comp_vectors) | 201 | if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) |
202 | return -EINVAL; | 202 | return -EINVAL; |
203 | 203 | ||
204 | cq->vector = vector; | 204 | cq->vector = vector; |
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 21786ad4455..ec4b6d047fe 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c | |||
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
51 | int err; | 51 | int err; |
52 | 52 | ||
53 | cq->size = entries; | 53 | cq->size = entries; |
54 | if (mode == RX) { | 54 | if (mode == RX) |
55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | 55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); |
56 | cq->vector = ring % mdev->dev->caps.num_comp_vectors; | 56 | else |
57 | } else { | ||
58 | cq->buf_size = sizeof(struct mlx4_cqe); | 57 | cq->buf_size = sizeof(struct mlx4_cqe); |
59 | cq->vector = 0; | ||
60 | } | ||
61 | 58 | ||
62 | cq->ring = ring; | 59 | cq->ring = ring; |
63 | cq->is_tx = mode; | 60 | cq->is_tx = mode; |
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
80 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | 77 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) |
81 | { | 78 | { |
82 | struct mlx4_en_dev *mdev = priv->mdev; | 79 | struct mlx4_en_dev *mdev = priv->mdev; |
83 | int err; | 80 | int err = 0; |
81 | char name[25]; | ||
84 | 82 | ||
85 | cq->dev = mdev->pndev[priv->port]; | 83 | cq->dev = mdev->pndev[priv->port]; |
86 | cq->mcq.set_ci_db = cq->wqres.db.db; | 84 | cq->mcq.set_ci_db = cq->wqres.db.db; |
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | |||
89 | *cq->mcq.arm_db = 0; | 87 | *cq->mcq.arm_db = 0; |
90 | memset(cq->buf, 0, cq->buf_size); | 88 | memset(cq->buf, 0, cq->buf_size); |
91 | 89 | ||
90 | if (cq->is_tx == RX) { | ||
91 | if (mdev->dev->caps.comp_pool) { | ||
92 | if (!cq->vector) { | ||
93 | sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring); | ||
94 | if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { | ||
95 | cq->vector = (cq->ring + 1 + priv->port) % | ||
96 | mdev->dev->caps.num_comp_vectors; | ||
97 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
98 | "%s_rx-%d ,Falling back to legacy EQ's\n", | ||
99 | priv->dev->name, cq->ring); | ||
100 | } | ||
101 | } | ||
102 | } else { | ||
103 | cq->vector = (cq->ring + 1 + priv->port) % | ||
104 | mdev->dev->caps.num_comp_vectors; | ||
105 | } | ||
106 | } else { | ||
107 | if (!cq->vector || !mdev->dev->caps.comp_pool) { | ||
108 | /*Fallback to legacy pool in case of error*/ | ||
109 | cq->vector = 0; | ||
110 | } | ||
111 | } | ||
112 | |||
92 | if (!cq->is_tx) | 113 | if (!cq->is_tx) |
93 | cq->size = priv->rx_ring[cq->ring].actual_size; | 114 | cq->size = priv->rx_ring[cq->ring].actual_size; |
94 | 115 | ||
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | |||
112 | return 0; | 133 | return 0; |
113 | } | 134 | } |
114 | 135 | ||
115 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | 136 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
137 | bool reserve_vectors) | ||
116 | { | 138 | { |
117 | struct mlx4_en_dev *mdev = priv->mdev; | 139 | struct mlx4_en_dev *mdev = priv->mdev; |
118 | 140 | ||
119 | mlx4_en_unmap_buffer(&cq->wqres.buf); | 141 | mlx4_en_unmap_buffer(&cq->wqres.buf); |
120 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); | 142 | mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); |
143 | if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors) | ||
144 | mlx4_release_eq(priv->mdev->dev, cq->vector); | ||
121 | cq->buf_size = 0; | 145 | cq->buf_size = 0; |
122 | cq->buf = NULL; | 146 | cq->buf = NULL; |
123 | } | 147 | } |
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c index 056152b3ff5..d54b7abf022 100644 --- a/drivers/net/mlx4/en_ethtool.c +++ b/drivers/net/mlx4/en_ethtool.c | |||
@@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | |||
45 | struct mlx4_en_priv *priv = netdev_priv(dev); | 45 | struct mlx4_en_priv *priv = netdev_priv(dev); |
46 | struct mlx4_en_dev *mdev = priv->mdev; | 46 | struct mlx4_en_dev *mdev = priv->mdev; |
47 | 47 | ||
48 | sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); | 48 | strncpy(drvinfo->driver, DRV_NAME, 32); |
49 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); | 49 | strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); |
50 | sprintf(drvinfo->fw_version, "%d.%d.%d", | 50 | sprintf(drvinfo->fw_version, "%d.%d.%d", |
51 | (u16) (mdev->dev->caps.fw_ver >> 32), | 51 | (u16) (mdev->dev->caps.fw_ver >> 32), |
@@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) | |||
131 | static void mlx4_en_get_wol(struct net_device *netdev, | 131 | static void mlx4_en_get_wol(struct net_device *netdev, |
132 | struct ethtool_wolinfo *wol) | 132 | struct ethtool_wolinfo *wol) |
133 | { | 133 | { |
134 | wol->supported = 0; | 134 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
135 | wol->wolopts = 0; | 135 | int err = 0; |
136 | u64 config = 0; | ||
137 | |||
138 | if (!priv->mdev->dev->caps.wol) { | ||
139 | wol->supported = 0; | ||
140 | wol->wolopts = 0; | ||
141 | return; | ||
142 | } | ||
143 | |||
144 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | ||
145 | if (err) { | ||
146 | en_err(priv, "Failed to get WoL information\n"); | ||
147 | return; | ||
148 | } | ||
149 | |||
150 | if (config & MLX4_EN_WOL_MAGIC) | ||
151 | wol->supported = WAKE_MAGIC; | ||
152 | else | ||
153 | wol->supported = 0; | ||
154 | |||
155 | if (config & MLX4_EN_WOL_ENABLED) | ||
156 | wol->wolopts = WAKE_MAGIC; | ||
157 | else | ||
158 | wol->wolopts = 0; | ||
159 | } | ||
160 | |||
161 | static int mlx4_en_set_wol(struct net_device *netdev, | ||
162 | struct ethtool_wolinfo *wol) | ||
163 | { | ||
164 | struct mlx4_en_priv *priv = netdev_priv(netdev); | ||
165 | u64 config = 0; | ||
166 | int err = 0; | ||
167 | |||
168 | if (!priv->mdev->dev->caps.wol) | ||
169 | return -EOPNOTSUPP; | ||
170 | |||
171 | if (wol->supported & ~WAKE_MAGIC) | ||
172 | return -EINVAL; | ||
173 | |||
174 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | ||
175 | if (err) { | ||
176 | en_err(priv, "Failed to get WoL info, unable to modify\n"); | ||
177 | return err; | ||
178 | } | ||
179 | |||
180 | if (wol->wolopts & WAKE_MAGIC) { | ||
181 | config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | | ||
182 | MLX4_EN_WOL_MAGIC; | ||
183 | } else { | ||
184 | config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); | ||
185 | config |= MLX4_EN_WOL_DO_MODIFY; | ||
186 | } | ||
187 | |||
188 | err = mlx4_wol_write(priv->mdev->dev, config, priv->port); | ||
189 | if (err) | ||
190 | en_err(priv, "Failed to set WoL information\n"); | ||
191 | |||
192 | return err; | ||
136 | } | 193 | } |
137 | 194 | ||
138 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) | 195 | static int mlx4_en_get_sset_count(struct net_device *dev, int sset) |
@@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, | |||
388 | mlx4_en_stop_port(dev); | 445 | mlx4_en_stop_port(dev); |
389 | } | 446 | } |
390 | 447 | ||
391 | mlx4_en_free_resources(priv); | 448 | mlx4_en_free_resources(priv, true); |
392 | 449 | ||
393 | priv->prof->tx_ring_size = tx_size; | 450 | priv->prof->tx_ring_size = tx_size; |
394 | priv->prof->rx_ring_size = rx_size; | 451 | priv->prof->rx_ring_size = rx_size; |
@@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { | |||
442 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, | 499 | .get_ethtool_stats = mlx4_en_get_ethtool_stats, |
443 | .self_test = mlx4_en_self_test, | 500 | .self_test = mlx4_en_self_test, |
444 | .get_wol = mlx4_en_get_wol, | 501 | .get_wol = mlx4_en_get_wol, |
502 | .set_wol = mlx4_en_set_wol, | ||
445 | .get_msglevel = mlx4_en_get_msglevel, | 503 | .get_msglevel = mlx4_en_get_msglevel, |
446 | .set_msglevel = mlx4_en_set_msglevel, | 504 | .set_msglevel = mlx4_en_set_msglevel, |
447 | .get_coalesce = mlx4_en_get_coalesce, | 505 | .get_coalesce = mlx4_en_get_coalesce, |
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 1ff6ca6466e..9317b61a75b 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
@@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
241 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) | 241 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) |
242 | mdev->port_cnt++; | 242 | mdev->port_cnt++; |
243 | 243 | ||
244 | /* If we did not receive an explicit number of Rx rings, default to | ||
245 | * the number of completion vectors populated by the mlx4_core */ | ||
246 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | 244 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { |
247 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | 245 | if (!dev->caps.comp_pool) { |
248 | mdev->profile.prof[i].tx_ring_num, i); | 246 | mdev->profile.prof[i].rx_ring_num = |
249 | mdev->profile.prof[i].rx_ring_num = min_t(int, | 247 | rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, |
250 | roundup_pow_of_two(dev->caps.num_comp_vectors), | 248 | min_t(int, |
251 | MAX_RX_RINGS); | 249 | dev->caps.num_comp_vectors, |
252 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | 250 | MAX_RX_RINGS))); |
253 | mdev->profile.prof[i].rx_ring_num, i); | 251 | } else { |
252 | mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( | ||
253 | min_t(int, dev->caps.comp_pool/ | ||
254 | dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1)); | ||
255 | } | ||
254 | } | 256 | } |
255 | 257 | ||
256 | /* Create our own workqueue for reset/multicast tasks | 258 | /* Create our own workqueue for reset/multicast tasks |
@@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = { | |||
294 | .remove = mlx4_en_remove, | 296 | .remove = mlx4_en_remove, |
295 | .event = mlx4_en_event, | 297 | .event = mlx4_en_event, |
296 | .get_dev = mlx4_en_get_netdev, | 298 | .get_dev = mlx4_en_get_netdev, |
297 | .protocol = MLX4_PROTOCOL_EN, | 299 | .protocol = MLX4_PROT_ETH, |
298 | }; | 300 | }; |
299 | 301 | ||
300 | static int __init mlx4_en_init(void) | 302 | static int __init mlx4_en_init(void) |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 897f576b8b1..5762ebde445 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work) | |||
156 | mutex_lock(&mdev->state_lock); | 156 | mutex_lock(&mdev->state_lock); |
157 | if (priv->port_up) { | 157 | if (priv->port_up) { |
158 | /* Remove old MAC and insert the new one */ | 158 | /* Remove old MAC and insert the new one */ |
159 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | 159 | err = mlx4_replace_mac(mdev->dev, priv->port, |
160 | err = mlx4_register_mac(mdev->dev, priv->port, | 160 | priv->base_qpn, priv->mac, 0); |
161 | priv->mac, &priv->mac_index); | ||
162 | if (err) | 161 | if (err) |
163 | en_err(priv, "Failed changing HW MAC address\n"); | 162 | en_err(priv, "Failed changing HW MAC address\n"); |
164 | } else | 163 | } else |
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
214 | struct mlx4_en_dev *mdev = priv->mdev; | 213 | struct mlx4_en_dev *mdev = priv->mdev; |
215 | struct net_device *dev = priv->dev; | 214 | struct net_device *dev = priv->dev; |
216 | u64 mcast_addr = 0; | 215 | u64 mcast_addr = 0; |
216 | u8 mc_list[16] = {0}; | ||
217 | int err; | 217 | int err; |
218 | 218 | ||
219 | mutex_lock(&mdev->state_lock); | 219 | mutex_lock(&mdev->state_lock); |
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
239 | priv->flags |= MLX4_EN_FLAG_PROMISC; | 239 | priv->flags |= MLX4_EN_FLAG_PROMISC; |
240 | 240 | ||
241 | /* Enable promiscouos mode */ | 241 | /* Enable promiscouos mode */ |
242 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 242 | if (!mdev->dev->caps.vep_uc_steering) |
243 | priv->base_qpn, 1); | 243 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
244 | priv->base_qpn, 1); | ||
245 | else | ||
246 | err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, | ||
247 | priv->port); | ||
244 | if (err) | 248 | if (err) |
245 | en_err(priv, "Failed enabling " | 249 | en_err(priv, "Failed enabling " |
246 | "promiscous mode\n"); | 250 | "promiscous mode\n"); |
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
252 | en_err(priv, "Failed disabling " | 256 | en_err(priv, "Failed disabling " |
253 | "multicast filter\n"); | 257 | "multicast filter\n"); |
254 | 258 | ||
255 | /* Disable port VLAN filter */ | 259 | /* Add the default qp number as multicast promisc */ |
256 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | 260 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { |
257 | if (err) | 261 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, |
258 | en_err(priv, "Failed disabling VLAN filter\n"); | 262 | priv->port); |
263 | if (err) | ||
264 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
265 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
266 | } | ||
267 | |||
268 | if (priv->vlgrp) { | ||
269 | /* Disable port VLAN filter */ | ||
270 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | ||
271 | if (err) | ||
272 | en_err(priv, "Failed disabling VLAN filter\n"); | ||
273 | } | ||
259 | } | 274 | } |
260 | goto out; | 275 | goto out; |
261 | } | 276 | } |
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
270 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | 285 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; |
271 | 286 | ||
272 | /* Disable promiscouos mode */ | 287 | /* Disable promiscouos mode */ |
273 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 288 | if (!mdev->dev->caps.vep_uc_steering) |
274 | priv->base_qpn, 0); | 289 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
290 | priv->base_qpn, 0); | ||
291 | else | ||
292 | err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
293 | priv->port); | ||
275 | if (err) | 294 | if (err) |
276 | en_err(priv, "Failed disabling promiscous mode\n"); | 295 | en_err(priv, "Failed disabling promiscous mode\n"); |
277 | 296 | ||
297 | /* Disable Multicast promisc */ | ||
298 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
299 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
300 | priv->port); | ||
301 | if (err) | ||
302 | en_err(priv, "Failed disabling multicast promiscous mode\n"); | ||
303 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
304 | } | ||
305 | |||
278 | /* Enable port VLAN filter */ | 306 | /* Enable port VLAN filter */ |
279 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 307 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
280 | if (err) | 308 | if (err) |
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
287 | 0, MLX4_MCAST_DISABLE); | 315 | 0, MLX4_MCAST_DISABLE); |
288 | if (err) | 316 | if (err) |
289 | en_err(priv, "Failed disabling multicast filter\n"); | 317 | en_err(priv, "Failed disabling multicast filter\n"); |
318 | |||
319 | /* Add the default qp number as multicast promisc */ | ||
320 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
321 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
322 | priv->port); | ||
323 | if (err) | ||
324 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
325 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
326 | } | ||
290 | } else { | 327 | } else { |
291 | int i; | 328 | int i; |
329 | /* Disable Multicast promisc */ | ||
330 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
331 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
332 | priv->port); | ||
333 | if (err) | ||
334 | en_err(priv, "Failed disabling multicast promiscous mode\n"); | ||
335 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
336 | } | ||
292 | 337 | ||
293 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 338 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
294 | 0, MLX4_MCAST_DISABLE); | 339 | 0, MLX4_MCAST_DISABLE); |
295 | if (err) | 340 | if (err) |
296 | en_err(priv, "Failed disabling multicast filter\n"); | 341 | en_err(priv, "Failed disabling multicast filter\n"); |
297 | 342 | ||
343 | /* Detach our qp from all the multicast addresses */ | ||
344 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
345 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
346 | mc_list[5] = priv->port; | ||
347 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
348 | mc_list, MLX4_PROT_ETH); | ||
349 | } | ||
298 | /* Flush mcast filter and init it with broadcast address */ | 350 | /* Flush mcast filter and init it with broadcast address */ |
299 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | 351 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, |
300 | 1, MLX4_MCAST_CONFIG); | 352 | 1, MLX4_MCAST_CONFIG); |
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
307 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | 359 | for (i = 0; i < priv->mc_addrs_cnt; i++) { |
308 | mcast_addr = | 360 | mcast_addr = |
309 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); | 361 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); |
362 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
363 | mc_list[5] = priv->port; | ||
364 | mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, | ||
365 | mc_list, 0, MLX4_PROT_ETH); | ||
310 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | 366 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, |
311 | mcast_addr, 0, MLX4_MCAST_CONFIG); | 367 | mcast_addr, 0, MLX4_MCAST_CONFIG); |
312 | } | 368 | } |
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
314 | 0, MLX4_MCAST_ENABLE); | 370 | 0, MLX4_MCAST_ENABLE); |
315 | if (err) | 371 | if (err) |
316 | en_err(priv, "Failed enabling multicast filter\n"); | 372 | en_err(priv, "Failed enabling multicast filter\n"); |
317 | |||
318 | mlx4_en_clear_list(dev); | ||
319 | } | 373 | } |
320 | out: | 374 | out: |
321 | mutex_unlock(&mdev->state_lock); | 375 | mutex_unlock(&mdev->state_lock); |
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
417 | unsigned long avg_pkt_size; | 471 | unsigned long avg_pkt_size; |
418 | unsigned long rx_packets; | 472 | unsigned long rx_packets; |
419 | unsigned long rx_bytes; | 473 | unsigned long rx_bytes; |
420 | unsigned long rx_byte_diff; | ||
421 | unsigned long tx_packets; | 474 | unsigned long tx_packets; |
422 | unsigned long tx_pkt_diff; | 475 | unsigned long tx_pkt_diff; |
423 | unsigned long rx_pkt_diff; | 476 | unsigned long rx_pkt_diff; |
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
441 | rx_pkt_diff = ((unsigned long) (rx_packets - | 494 | rx_pkt_diff = ((unsigned long) (rx_packets - |
442 | priv->last_moder_packets)); | 495 | priv->last_moder_packets)); |
443 | packets = max(tx_pkt_diff, rx_pkt_diff); | 496 | packets = max(tx_pkt_diff, rx_pkt_diff); |
444 | rx_byte_diff = rx_bytes - priv->last_moder_bytes; | ||
445 | rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1; | ||
446 | rate = packets * HZ / period; | 497 | rate = packets * HZ / period; |
447 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | 498 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - |
448 | priv->last_moder_bytes)) / packets : 0; | 499 | priv->last_moder_bytes)) / packets : 0; |
449 | 500 | ||
450 | /* Apply auto-moderation only when packet rate exceeds a rate that | 501 | /* Apply auto-moderation only when packet rate exceeds a rate that |
451 | * it matters */ | 502 | * it matters */ |
452 | if (rate > MLX4_EN_RX_RATE_THRESH) { | 503 | if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { |
453 | /* If tx and rx packet rates are not balanced, assume that | 504 | /* If tx and rx packet rates are not balanced, assume that |
454 | * traffic is mainly BW bound and apply maximum moderation. | 505 | * traffic is mainly BW bound and apply maximum moderation. |
455 | * Otherwise, moderate according to packet rate */ | 506 | * Otherwise, moderate according to packet rate */ |
456 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff && | 507 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || |
457 | rx_pkt_diff / rx_byte_diff < | 508 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { |
458 | MLX4_EN_SMALL_PKT_SIZE) | ||
459 | moder_time = priv->rx_usecs_low; | ||
460 | else if (2 * rx_pkt_diff > 3 * tx_pkt_diff) | ||
461 | moder_time = priv->rx_usecs_high; | 509 | moder_time = priv->rx_usecs_high; |
462 | else { | 510 | } else { |
463 | if (rate < priv->pkt_rate_low) | 511 | if (rate < priv->pkt_rate_low) |
464 | moder_time = priv->rx_usecs_low; | 512 | moder_time = priv->rx_usecs_low; |
465 | else if (rate > priv->pkt_rate_high) | 513 | else if (rate > priv->pkt_rate_high) |
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
471 | priv->rx_usecs_low; | 519 | priv->rx_usecs_low; |
472 | } | 520 | } |
473 | } else { | 521 | } else { |
474 | /* When packet rate is low, use default moderation rather than | 522 | moder_time = priv->rx_usecs_low; |
475 | * 0 to prevent interrupt storms if traffic suddenly increases */ | ||
476 | moder_time = priv->rx_usecs; | ||
477 | } | 523 | } |
478 | 524 | ||
479 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | 525 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", |
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev) | |||
565 | int err = 0; | 611 | int err = 0; |
566 | int i; | 612 | int i; |
567 | int j; | 613 | int j; |
614 | u8 mc_list[16] = {0}; | ||
615 | char name[32]; | ||
568 | 616 | ||
569 | if (priv->port_up) { | 617 | if (priv->port_up) { |
570 | en_dbg(DRV, priv, "start port called while port already up\n"); | 618 | en_dbg(DRV, priv, "start port called while port already up\n"); |
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev) | |||
603 | ++rx_index; | 651 | ++rx_index; |
604 | } | 652 | } |
605 | 653 | ||
654 | /* Set port mac number */ | ||
655 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
656 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
657 | priv->mac, &priv->base_qpn, 0); | ||
658 | if (err) { | ||
659 | en_err(priv, "Failed setting port mac\n"); | ||
660 | goto cq_err; | ||
661 | } | ||
662 | mdev->mac_removed[priv->port] = 0; | ||
663 | |||
606 | err = mlx4_en_config_rss_steer(priv); | 664 | err = mlx4_en_config_rss_steer(priv); |
607 | if (err) { | 665 | if (err) { |
608 | en_err(priv, "Failed configuring rss steering\n"); | 666 | en_err(priv, "Failed configuring rss steering\n"); |
609 | goto cq_err; | 667 | goto mac_err; |
610 | } | 668 | } |
611 | 669 | ||
670 | if (mdev->dev->caps.comp_pool && !priv->tx_vector) { | ||
671 | sprintf(name , "%s-tx", priv->dev->name); | ||
672 | if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { | ||
673 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
674 | "%s_tx ,Falling back to legacy " | ||
675 | "EQ's\n", priv->dev->name); | ||
676 | } | ||
677 | } | ||
612 | /* Configure tx cq's and rings */ | 678 | /* Configure tx cq's and rings */ |
613 | for (i = 0; i < priv->tx_ring_num; i++) { | 679 | for (i = 0; i < priv->tx_ring_num; i++) { |
614 | /* Configure cq */ | 680 | /* Configure cq */ |
615 | cq = &priv->tx_cq[i]; | 681 | cq = &priv->tx_cq[i]; |
682 | cq->vector = priv->tx_vector; | ||
616 | err = mlx4_en_activate_cq(priv, cq); | 683 | err = mlx4_en_activate_cq(priv, cq); |
617 | if (err) { | 684 | if (err) { |
618 | en_err(priv, "Failed allocating Tx CQ\n"); | 685 | en_err(priv, "Failed allocating Tx CQ\n"); |
@@ -659,24 +726,22 @@ int mlx4_en_start_port(struct net_device *dev) | |||
659 | en_err(priv, "Failed setting default qp numbers\n"); | 726 | en_err(priv, "Failed setting default qp numbers\n"); |
660 | goto tx_err; | 727 | goto tx_err; |
661 | } | 728 | } |
662 | /* Set port mac number */ | ||
663 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
664 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
665 | priv->mac, &priv->mac_index); | ||
666 | if (err) { | ||
667 | en_err(priv, "Failed setting port mac\n"); | ||
668 | goto tx_err; | ||
669 | } | ||
670 | mdev->mac_removed[priv->port] = 0; | ||
671 | 729 | ||
672 | /* Init port */ | 730 | /* Init port */ |
673 | en_dbg(HW, priv, "Initializing port\n"); | 731 | en_dbg(HW, priv, "Initializing port\n"); |
674 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | 732 | err = mlx4_INIT_PORT(mdev->dev, priv->port); |
675 | if (err) { | 733 | if (err) { |
676 | en_err(priv, "Failed Initializing port\n"); | 734 | en_err(priv, "Failed Initializing port\n"); |
677 | goto mac_err; | 735 | goto tx_err; |
678 | } | 736 | } |
679 | 737 | ||
738 | /* Attach rx QP to bradcast address */ | ||
739 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
740 | mc_list[5] = priv->port; | ||
741 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
742 | 0, MLX4_PROT_ETH)) | ||
743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | ||
744 | |||
680 | /* Schedule multicast task to populate multicast list */ | 745 | /* Schedule multicast task to populate multicast list */ |
681 | queue_work(mdev->workqueue, &priv->mcast_task); | 746 | queue_work(mdev->workqueue, &priv->mcast_task); |
682 | 747 | ||
@@ -684,8 +749,6 @@ int mlx4_en_start_port(struct net_device *dev) | |||
684 | netif_tx_start_all_queues(dev); | 749 | netif_tx_start_all_queues(dev); |
685 | return 0; | 750 | return 0; |
686 | 751 | ||
687 | mac_err: | ||
688 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
689 | tx_err: | 752 | tx_err: |
690 | while (tx_index--) { | 753 | while (tx_index--) { |
691 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | 754 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); |
@@ -693,6 +756,8 @@ tx_err: | |||
693 | } | 756 | } |
694 | 757 | ||
695 | mlx4_en_release_rss_steer(priv); | 758 | mlx4_en_release_rss_steer(priv); |
759 | mac_err: | ||
760 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
696 | cq_err: | 761 | cq_err: |
697 | while (rx_index--) | 762 | while (rx_index--) |
698 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | 763 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); |
@@ -708,6 +773,7 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
708 | struct mlx4_en_priv *priv = netdev_priv(dev); | 773 | struct mlx4_en_priv *priv = netdev_priv(dev); |
709 | struct mlx4_en_dev *mdev = priv->mdev; | 774 | struct mlx4_en_dev *mdev = priv->mdev; |
710 | int i; | 775 | int i; |
776 | u8 mc_list[16] = {0}; | ||
711 | 777 | ||
712 | if (!priv->port_up) { | 778 | if (!priv->port_up) { |
713 | en_dbg(DRV, priv, "stop port called while port already down\n"); | 779 | en_dbg(DRV, priv, "stop port called while port already down\n"); |
@@ -722,8 +788,23 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
722 | /* Set port as not active */ | 788 | /* Set port as not active */ |
723 | priv->port_up = false; | 789 | priv->port_up = false; |
724 | 790 | ||
791 | /* Detach All multicasts */ | ||
792 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
793 | mc_list[5] = priv->port; | ||
794 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
795 | MLX4_PROT_ETH); | ||
796 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
797 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
798 | mc_list[5] = priv->port; | ||
799 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
800 | mc_list, MLX4_PROT_ETH); | ||
801 | } | ||
802 | mlx4_en_clear_list(dev); | ||
803 | /* Flush multicast filter */ | ||
804 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); | ||
805 | |||
725 | /* Unregister Mac address for the port */ | 806 | /* Unregister Mac address for the port */ |
726 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | 807 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); |
727 | mdev->mac_removed[priv->port] = 1; | 808 | mdev->mac_removed[priv->port] = 1; |
728 | 809 | ||
729 | /* Free TX Rings */ | 810 | /* Free TX Rings */ |
@@ -801,7 +882,6 @@ static int mlx4_en_open(struct net_device *dev) | |||
801 | priv->rx_ring[i].packets = 0; | 882 | priv->rx_ring[i].packets = 0; |
802 | } | 883 | } |
803 | 884 | ||
804 | mlx4_en_set_default_moderation(priv); | ||
805 | err = mlx4_en_start_port(dev); | 885 | err = mlx4_en_start_port(dev); |
806 | if (err) | 886 | if (err) |
807 | en_err(priv, "Failed starting port:%d\n", priv->port); | 887 | en_err(priv, "Failed starting port:%d\n", priv->port); |
@@ -828,7 +908,7 @@ static int mlx4_en_close(struct net_device *dev) | |||
828 | return 0; | 908 | return 0; |
829 | } | 909 | } |
830 | 910 | ||
831 | void mlx4_en_free_resources(struct mlx4_en_priv *priv) | 911 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) |
832 | { | 912 | { |
833 | int i; | 913 | int i; |
834 | 914 | ||
@@ -836,14 +916,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
836 | if (priv->tx_ring[i].tx_info) | 916 | if (priv->tx_ring[i].tx_info) |
837 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | 917 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); |
838 | if (priv->tx_cq[i].buf) | 918 | if (priv->tx_cq[i].buf) |
839 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); | 919 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); |
840 | } | 920 | } |
841 | 921 | ||
842 | for (i = 0; i < priv->rx_ring_num; i++) { | 922 | for (i = 0; i < priv->rx_ring_num; i++) { |
843 | if (priv->rx_ring[i].rx_info) | 923 | if (priv->rx_ring[i].rx_info) |
844 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | 924 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); |
845 | if (priv->rx_cq[i].buf) | 925 | if (priv->rx_cq[i].buf) |
846 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | 926 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); |
847 | } | 927 | } |
848 | } | 928 | } |
849 | 929 | ||
@@ -851,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
851 | { | 931 | { |
852 | struct mlx4_en_port_profile *prof = priv->prof; | 932 | struct mlx4_en_port_profile *prof = priv->prof; |
853 | int i; | 933 | int i; |
934 | int base_tx_qpn, err; | ||
935 | |||
936 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); | ||
937 | if (err) { | ||
938 | en_err(priv, "failed reserving range for TX rings\n"); | ||
939 | return err; | ||
940 | } | ||
854 | 941 | ||
855 | /* Create tx Rings */ | 942 | /* Create tx Rings */ |
856 | for (i = 0; i < priv->tx_ring_num; i++) { | 943 | for (i = 0; i < priv->tx_ring_num; i++) { |
@@ -858,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
858 | prof->tx_ring_size, i, TX)) | 945 | prof->tx_ring_size, i, TX)) |
859 | goto err; | 946 | goto err; |
860 | 947 | ||
861 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], | 948 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, |
862 | prof->tx_ring_size, TXBB_SIZE)) | 949 | prof->tx_ring_size, TXBB_SIZE)) |
863 | goto err; | 950 | goto err; |
864 | } | 951 | } |
@@ -878,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
878 | 965 | ||
879 | err: | 966 | err: |
880 | en_err(priv, "Failed to allocate NIC resources\n"); | 967 | en_err(priv, "Failed to allocate NIC resources\n"); |
968 | mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); | ||
881 | return -ENOMEM; | 969 | return -ENOMEM; |
882 | } | 970 | } |
883 | 971 | ||
@@ -905,7 +993,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
905 | mdev->pndev[priv->port] = NULL; | 993 | mdev->pndev[priv->port] = NULL; |
906 | mutex_unlock(&mdev->state_lock); | 994 | mutex_unlock(&mdev->state_lock); |
907 | 995 | ||
908 | mlx4_en_free_resources(priv); | 996 | mlx4_en_free_resources(priv, false); |
909 | free_netdev(dev); | 997 | free_netdev(dev); |
910 | } | 998 | } |
911 | 999 | ||
@@ -932,7 +1020,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | |||
932 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); | 1020 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); |
933 | } else { | 1021 | } else { |
934 | mlx4_en_stop_port(dev); | 1022 | mlx4_en_stop_port(dev); |
935 | mlx4_en_set_default_moderation(priv); | ||
936 | err = mlx4_en_start_port(dev); | 1023 | err = mlx4_en_start_port(dev); |
937 | if (err) { | 1024 | if (err) { |
938 | en_err(priv, "Failed restarting port:%d\n", | 1025 | en_err(priv, "Failed restarting port:%d\n", |
@@ -1079,7 +1166,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1079 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | 1166 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
1080 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 1167 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
1081 | 1168 | ||
1169 | /* Configure port */ | ||
1170 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
1171 | MLX4_EN_MIN_MTU, | ||
1172 | 0, 0, 0, 0); | ||
1173 | if (err) { | ||
1174 | en_err(priv, "Failed setting port general configurations " | ||
1175 | "for port %d, with error %d\n", priv->port, err); | ||
1176 | goto out; | ||
1177 | } | ||
1178 | |||
1179 | /* Init port */ | ||
1180 | en_warn(priv, "Initializing port\n"); | ||
1181 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
1182 | if (err) { | ||
1183 | en_err(priv, "Failed Initializing port\n"); | ||
1184 | goto out; | ||
1185 | } | ||
1082 | priv->registered = 1; | 1186 | priv->registered = 1; |
1187 | mlx4_en_set_default_moderation(priv); | ||
1083 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | 1188 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); |
1084 | return 0; | 1189 | return 0; |
1085 | 1190 | ||
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c index 7f5a3221e0c..f2a4f5dd313 100644 --- a/drivers/net/mlx4/en_port.c +++ b/drivers/net/mlx4/en_port.c | |||
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | |||
119 | struct mlx4_set_port_rqp_calc_context *context; | 119 | struct mlx4_set_port_rqp_calc_context *context; |
120 | int err; | 120 | int err; |
121 | u32 in_mod; | 121 | u32 in_mod; |
122 | u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT; | ||
123 | |||
124 | if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering) | ||
125 | return 0; | ||
122 | 126 | ||
123 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 127 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
124 | if (IS_ERR(mailbox)) | 128 | if (IS_ERR(mailbox)) |
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | |||
127 | memset(context, 0, sizeof *context); | 131 | memset(context, 0, sizeof *context); |
128 | 132 | ||
129 | context->base_qpn = cpu_to_be32(base_qpn); | 133 | context->base_qpn = cpu_to_be32(base_qpn); |
130 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); | 134 | context->n_mac = 0x7; |
131 | context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); | 135 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | |
136 | base_qpn); | ||
137 | context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | | ||
138 | base_qpn); | ||
132 | context->intra_no_vlan = 0; | 139 | context->intra_no_vlan = 0; |
133 | context->no_vlan = MLX4_NO_VLAN_IDX; | 140 | context->no_vlan = MLX4_NO_VLAN_IDX; |
134 | context->intra_vlan_miss = 0; | 141 | context->intra_vlan_miss = 0; |
@@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
206 | } | 213 | } |
207 | stats->tx_packets = 0; | 214 | stats->tx_packets = 0; |
208 | stats->tx_bytes = 0; | 215 | stats->tx_bytes = 0; |
209 | for (i = 0; i <= priv->tx_ring_num; i++) { | 216 | for (i = 0; i < priv->tx_ring_num; i++) { |
210 | stats->tx_packets += priv->tx_ring[i].packets; | 217 | stats->tx_packets += priv->tx_ring[i].packets; |
211 | stats->tx_bytes += priv->tx_ring[i].bytes; | 218 | stats->tx_bytes += priv->tx_ring[i].bytes; |
212 | } | 219 | } |
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h index 092e814b198..e3d73e41c56 100644 --- a/drivers/net/mlx4/en_port.h +++ b/drivers/net/mlx4/en_port.h | |||
@@ -36,8 +36,8 @@ | |||
36 | 36 | ||
37 | 37 | ||
38 | #define SET_PORT_GEN_ALL_VALID 0x7 | 38 | #define SET_PORT_GEN_ALL_VALID 0x7 |
39 | #define SET_PORT_PROMISC_EN_SHIFT 31 | 39 | #define SET_PORT_PROMISC_SHIFT 31 |
40 | #define SET_PORT_PROMISC_MODE_SHIFT 30 | 40 | #define SET_PORT_MC_PROMISC_SHIFT 30 |
41 | 41 | ||
42 | enum { | 42 | enum { |
43 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | 43 | MLX4_CMD_SET_VLAN_FLTR = 0x47, |
@@ -45,6 +45,12 @@ enum { | |||
45 | MLX4_CMD_DUMP_ETH_STATS = 0x49, | 45 | MLX4_CMD_DUMP_ETH_STATS = 0x49, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | enum { | ||
49 | MCAST_DIRECT_ONLY = 0, | ||
50 | MCAST_DIRECT = 1, | ||
51 | MCAST_DEFAULT = 2 | ||
52 | }; | ||
53 | |||
48 | struct mlx4_set_port_general_context { | 54 | struct mlx4_set_port_general_context { |
49 | u8 reserved[3]; | 55 | u8 reserved[3]; |
50 | u8 flags; | 56 | u8 flags; |
@@ -60,14 +66,17 @@ struct mlx4_set_port_general_context { | |||
60 | 66 | ||
61 | struct mlx4_set_port_rqp_calc_context { | 67 | struct mlx4_set_port_rqp_calc_context { |
62 | __be32 base_qpn; | 68 | __be32 base_qpn; |
63 | __be32 flags; | 69 | u8 rererved; |
64 | u8 reserved[3]; | 70 | u8 n_mac; |
71 | u8 n_vlan; | ||
72 | u8 n_prio; | ||
73 | u8 reserved2[3]; | ||
65 | u8 mac_miss; | 74 | u8 mac_miss; |
66 | u8 intra_no_vlan; | 75 | u8 intra_no_vlan; |
67 | u8 no_vlan; | 76 | u8 no_vlan; |
68 | u8 intra_vlan_miss; | 77 | u8 intra_vlan_miss; |
69 | u8 vlan_miss; | 78 | u8 vlan_miss; |
70 | u8 reserved2[3]; | 79 | u8 reserved3[3]; |
71 | u8 no_vlan_prio; | 80 | u8 no_vlan_prio; |
72 | __be32 promisc; | 81 | __be32 promisc; |
73 | __be32 mcast; | 82 | __be32 mcast; |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 570f2508fb3..05998ee297c 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
845 | } | 845 | } |
846 | 846 | ||
847 | /* Configure RSS indirection qp */ | 847 | /* Configure RSS indirection qp */ |
848 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); | ||
849 | if (err) { | ||
850 | en_err(priv, "Failed to reserve range for RSS " | ||
851 | "indirection qp\n"); | ||
852 | goto rss_err; | ||
853 | } | ||
854 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); | 848 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); |
855 | if (err) { | 849 | if (err) { |
856 | en_err(priv, "Failed to allocate RSS indirection QP\n"); | 850 | en_err(priv, "Failed to allocate RSS indirection QP\n"); |
857 | goto reserve_err; | 851 | goto rss_err; |
858 | } | 852 | } |
859 | rss_map->indir_qp.event = mlx4_en_sqp_event; | 853 | rss_map->indir_qp.event = mlx4_en_sqp_event; |
860 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | 854 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, |
@@ -881,8 +875,6 @@ indir_err: | |||
881 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | 875 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); |
882 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | 876 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); |
883 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | 877 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); |
884 | reserve_err: | ||
885 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
886 | rss_err: | 878 | rss_err: |
887 | for (i = 0; i < good_qps; i++) { | 879 | for (i = 0; i < good_qps; i++) { |
888 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | 880 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
@@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | |||
904 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | 896 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); |
905 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | 897 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); |
906 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | 898 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); |
907 | mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); | ||
908 | 899 | ||
909 | for (i = 0; i < priv->rx_ring_num; i++) { | 900 | for (i = 0; i < priv->rx_ring_num; i++) { |
910 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | 901 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index a680cd4a5ab..01feb8fd42a 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | enum { | 45 | enum { |
46 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ | 46 | MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ |
47 | MAX_BF = 256, | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | static int inline_thold __read_mostly = MAX_INLINE; | 50 | static int inline_thold __read_mostly = MAX_INLINE; |
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444); | |||
52 | MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); | 53 | MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); |
53 | 54 | ||
54 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 55 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
55 | struct mlx4_en_tx_ring *ring, u32 size, | 56 | struct mlx4_en_tx_ring *ring, int qpn, u32 size, |
56 | u16 stride) | 57 | u16 stride) |
57 | { | 58 | { |
58 | struct mlx4_en_dev *mdev = priv->mdev; | 59 | struct mlx4_en_dev *mdev = priv->mdev; |
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
103 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, | 104 | "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, |
104 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); | 105 | ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); |
105 | 106 | ||
106 | err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); | 107 | ring->qpn = qpn; |
107 | if (err) { | ||
108 | en_err(priv, "Failed reserving qp for tx ring.\n"); | ||
109 | goto err_map; | ||
110 | } | ||
111 | |||
112 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); | 108 | err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); |
113 | if (err) { | 109 | if (err) { |
114 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); | 110 | en_err(priv, "Failed allocating qp %d\n", ring->qpn); |
115 | goto err_reserve; | 111 | goto err_map; |
116 | } | 112 | } |
117 | ring->qp.event = mlx4_en_sqp_event; | 113 | ring->qp.event = mlx4_en_sqp_event; |
118 | 114 | ||
115 | err = mlx4_bf_alloc(mdev->dev, &ring->bf); | ||
116 | if (err) { | ||
117 | en_dbg(DRV, priv, "working without blueflame (%d)", err); | ||
118 | ring->bf.uar = &mdev->priv_uar; | ||
119 | ring->bf.uar->map = mdev->uar_map; | ||
120 | ring->bf_enabled = false; | ||
121 | } else | ||
122 | ring->bf_enabled = true; | ||
123 | |||
119 | return 0; | 124 | return 0; |
120 | 125 | ||
121 | err_reserve: | ||
122 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | ||
123 | err_map: | 126 | err_map: |
124 | mlx4_en_unmap_buffer(&ring->wqres.buf); | 127 | mlx4_en_unmap_buffer(&ring->wqres.buf); |
125 | err_hwq_res: | 128 | err_hwq_res: |
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | |||
139 | struct mlx4_en_dev *mdev = priv->mdev; | 142 | struct mlx4_en_dev *mdev = priv->mdev; |
140 | en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); | 143 | en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); |
141 | 144 | ||
145 | if (ring->bf_enabled) | ||
146 | mlx4_bf_free(mdev->dev, &ring->bf); | ||
142 | mlx4_qp_remove(mdev->dev, &ring->qp); | 147 | mlx4_qp_remove(mdev->dev, &ring->qp); |
143 | mlx4_qp_free(mdev->dev, &ring->qp); | 148 | mlx4_qp_free(mdev->dev, &ring->qp); |
144 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); | 149 | mlx4_qp_release_range(mdev->dev, ring->qpn, 1); |
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
171 | 176 | ||
172 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | 177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, |
173 | ring->cqn, &ring->context); | 178 | ring->cqn, &ring->context); |
179 | if (ring->bf_enabled) | ||
180 | ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); | ||
174 | 181 | ||
175 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | 182 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, |
176 | &ring->qp, &ring->qp_state); | 183 | &ring->qp, &ring->qp_state); |
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
591 | return skb_tx_hash(dev, skb); | 598 | return skb_tx_hash(dev, skb); |
592 | } | 599 | } |
593 | 600 | ||
601 | static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) | ||
602 | { | ||
603 | __iowrite64_copy(dst, src, bytecnt / 8); | ||
604 | } | ||
605 | |||
594 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | 606 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) |
595 | { | 607 | { |
596 | struct mlx4_en_priv *priv = netdev_priv(dev); | 608 | struct mlx4_en_priv *priv = netdev_priv(dev); |
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
609 | int desc_size; | 621 | int desc_size; |
610 | int real_size; | 622 | int real_size; |
611 | dma_addr_t dma; | 623 | dma_addr_t dma; |
612 | u32 index; | 624 | u32 index, bf_index; |
613 | __be32 op_own; | 625 | __be32 op_own; |
614 | u16 vlan_tag = 0; | 626 | u16 vlan_tag = 0; |
615 | int i; | 627 | int i; |
616 | int lso_header_size; | 628 | int lso_header_size; |
617 | void *fragptr; | 629 | void *fragptr; |
630 | bool bounce = false; | ||
618 | 631 | ||
619 | if (!priv->port_up) | 632 | if (!priv->port_up) |
620 | goto tx_drop; | 633 | goto tx_drop; |
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
657 | 670 | ||
658 | /* Packet is good - grab an index and transmit it */ | 671 | /* Packet is good - grab an index and transmit it */ |
659 | index = ring->prod & ring->size_mask; | 672 | index = ring->prod & ring->size_mask; |
673 | bf_index = ring->prod; | ||
660 | 674 | ||
661 | /* See if we have enough space for whole descriptor TXBB for setting | 675 | /* See if we have enough space for whole descriptor TXBB for setting |
662 | * SW ownership on next descriptor; if not, use a bounce buffer. */ | 676 | * SW ownership on next descriptor; if not, use a bounce buffer. */ |
663 | if (likely(index + nr_txbb <= ring->size)) | 677 | if (likely(index + nr_txbb <= ring->size)) |
664 | tx_desc = ring->buf + index * TXBB_SIZE; | 678 | tx_desc = ring->buf + index * TXBB_SIZE; |
665 | else | 679 | else { |
666 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; | 680 | tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; |
681 | bounce = true; | ||
682 | } | ||
667 | 683 | ||
668 | /* Save skb in tx_info ring */ | 684 | /* Save skb in tx_info ring */ |
669 | tx_info = &ring->tx_info[index]; | 685 | tx_info = &ring->tx_info[index]; |
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
768 | ring->prod += nr_txbb; | 784 | ring->prod += nr_txbb; |
769 | 785 | ||
770 | /* If we used a bounce buffer then copy descriptor back into place */ | 786 | /* If we used a bounce buffer then copy descriptor back into place */ |
771 | if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) | 787 | if (bounce) |
772 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); | 788 | tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); |
773 | 789 | ||
774 | /* Run destructor before passing skb to HW */ | 790 | /* Run destructor before passing skb to HW */ |
775 | if (likely(!skb_shared(skb))) | 791 | if (likely(!skb_shared(skb))) |
776 | skb_orphan(skb); | 792 | skb_orphan(skb); |
777 | 793 | ||
778 | /* Ensure new descirptor hits memory | 794 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { |
779 | * before setting ownership of this descriptor to HW */ | 795 | *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; |
780 | wmb(); | 796 | op_own |= htonl((bf_index & 0xffff) << 8); |
781 | tx_desc->ctrl.owner_opcode = op_own; | 797 | /* Ensure new descirptor hits memory |
798 | * before setting ownership of this descriptor to HW */ | ||
799 | wmb(); | ||
800 | tx_desc->ctrl.owner_opcode = op_own; | ||
782 | 801 | ||
783 | /* Ring doorbell! */ | 802 | wmb(); |
784 | wmb(); | 803 | |
785 | writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); | 804 | mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, |
805 | desc_size); | ||
806 | |||
807 | wmb(); | ||
808 | |||
809 | ring->bf.offset ^= ring->bf.buf_size; | ||
810 | } else { | ||
811 | /* Ensure new descirptor hits memory | ||
812 | * before setting ownership of this descriptor to HW */ | ||
813 | wmb(); | ||
814 | tx_desc->ctrl.owner_opcode = op_own; | ||
815 | wmb(); | ||
816 | writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); | ||
817 | } | ||
786 | 818 | ||
787 | /* Poll CQ here */ | 819 | /* Poll CQ here */ |
788 | mlx4_en_xmit_poll(priv, tx_ind); | 820 | mlx4_en_xmit_poll(priv, tx_ind); |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 552d0fce6f6..506cfd0372e 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include "fw.h" | 42 | #include "fw.h" |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | MLX4_IRQNAME_SIZE = 64 | 45 | MLX4_IRQNAME_SIZE = 32 |
46 | }; | 46 | }; |
47 | 47 | ||
48 | enum { | 48 | enum { |
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev) | |||
317 | * we need to map, take the difference of highest index and | 317 | * we need to map, take the difference of highest index and |
318 | * the lowest index we'll use and add 1. | 318 | * the lowest index we'll use and add 1. |
319 | */ | 319 | */ |
320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - | 320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + |
321 | dev->caps.reserved_eqs / 4 + 1; | 321 | dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; |
322 | } | 322 | } |
323 | 323 | ||
324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | 324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
496 | static void mlx4_free_irqs(struct mlx4_dev *dev) | 496 | static void mlx4_free_irqs(struct mlx4_dev *dev) |
497 | { | 497 | { |
498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | 498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; |
499 | int i; | 499 | struct mlx4_priv *priv = mlx4_priv(dev); |
500 | int i, vec; | ||
500 | 501 | ||
501 | if (eq_table->have_irq) | 502 | if (eq_table->have_irq) |
502 | free_irq(dev->pdev->irq, dev); | 503 | free_irq(dev->pdev->irq, dev); |
504 | |||
503 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 505 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
504 | if (eq_table->eq[i].have_irq) { | 506 | if (eq_table->eq[i].have_irq) { |
505 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 507 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
506 | eq_table->eq[i].have_irq = 0; | 508 | eq_table->eq[i].have_irq = 0; |
507 | } | 509 | } |
508 | 510 | ||
511 | for (i = 0; i < dev->caps.comp_pool; i++) { | ||
512 | /* | ||
513 | * Freeing the assigned irq's | ||
514 | * all bits should be 0, but we need to validate | ||
515 | */ | ||
516 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
517 | /* NO need protecting*/ | ||
518 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
519 | free_irq(priv->eq_table.eq[vec].irq, | ||
520 | &priv->eq_table.eq[vec]); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | |||
509 | kfree(eq_table->irq_names); | 525 | kfree(eq_table->irq_names); |
510 | } | 526 | } |
511 | 527 | ||
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
578 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 594 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
579 | 595 | ||
580 | priv->eq_table.irq_names = | 596 | priv->eq_table.irq_names = |
581 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), | 597 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + |
598 | dev->caps.comp_pool), | ||
582 | GFP_KERNEL); | 599 | GFP_KERNEL); |
583 | if (!priv->eq_table.irq_names) { | 600 | if (!priv->eq_table.irq_names) { |
584 | err = -ENOMEM; | 601 | err = -ENOMEM; |
@@ -601,6 +618,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
601 | if (err) | 618 | if (err) |
602 | goto err_out_comp; | 619 | goto err_out_comp; |
603 | 620 | ||
621 | /*if additional completion vectors poolsize is 0 this loop will not run*/ | ||
622 | for (i = dev->caps.num_comp_vectors + 1; | ||
623 | i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { | ||
624 | |||
625 | err = mlx4_create_eq(dev, dev->caps.num_cqs - | ||
626 | dev->caps.reserved_cqs + | ||
627 | MLX4_NUM_SPARE_EQE, | ||
628 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
629 | &priv->eq_table.eq[i]); | ||
630 | if (err) { | ||
631 | --i; | ||
632 | goto err_out_unmap; | ||
633 | } | ||
634 | } | ||
635 | |||
636 | |||
604 | if (dev->flags & MLX4_FLAG_MSI_X) { | 637 | if (dev->flags & MLX4_FLAG_MSI_X) { |
605 | const char *eq_name; | 638 | const char *eq_name; |
606 | 639 | ||
@@ -686,7 +719,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
686 | 719 | ||
687 | mlx4_free_irqs(dev); | 720 | mlx4_free_irqs(dev); |
688 | 721 | ||
689 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 722 | for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) |
690 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 723 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
691 | 724 | ||
692 | mlx4_unmap_clr_int(dev); | 725 | mlx4_unmap_clr_int(dev); |
@@ -743,3 +776,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) | |||
743 | return err; | 776 | return err; |
744 | } | 777 | } |
745 | EXPORT_SYMBOL(mlx4_test_interrupts); | 778 | EXPORT_SYMBOL(mlx4_test_interrupts); |
779 | |||
780 | int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) | ||
781 | { | ||
782 | |||
783 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
784 | int vec = 0, err = 0, i; | ||
785 | |||
786 | spin_lock(&priv->msix_ctl.pool_lock); | ||
787 | for (i = 0; !vec && i < dev->caps.comp_pool; i++) { | ||
788 | if (~priv->msix_ctl.pool_bm & 1ULL << i) { | ||
789 | priv->msix_ctl.pool_bm |= 1ULL << i; | ||
790 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
791 | snprintf(priv->eq_table.irq_names + | ||
792 | vec * MLX4_IRQNAME_SIZE, | ||
793 | MLX4_IRQNAME_SIZE, "%s", name); | ||
794 | err = request_irq(priv->eq_table.eq[vec].irq, | ||
795 | mlx4_msi_x_interrupt, 0, | ||
796 | &priv->eq_table.irq_names[vec<<5], | ||
797 | priv->eq_table.eq + vec); | ||
798 | if (err) { | ||
799 | /*zero out bit by fliping it*/ | ||
800 | priv->msix_ctl.pool_bm ^= 1 << i; | ||
801 | vec = 0; | ||
802 | continue; | ||
803 | /*we dont want to break here*/ | ||
804 | } | ||
805 | eq_set_ci(&priv->eq_table.eq[vec], 1); | ||
806 | } | ||
807 | } | ||
808 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
809 | |||
810 | if (vec) { | ||
811 | *vector = vec; | ||
812 | } else { | ||
813 | *vector = 0; | ||
814 | err = (i == dev->caps.comp_pool) ? -ENOSPC : err; | ||
815 | } | ||
816 | return err; | ||
817 | } | ||
818 | EXPORT_SYMBOL(mlx4_assign_eq); | ||
819 | |||
820 | void mlx4_release_eq(struct mlx4_dev *dev, int vec) | ||
821 | { | ||
822 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
823 | /*bm index*/ | ||
824 | int i = vec - dev->caps.num_comp_vectors - 1; | ||
825 | |||
826 | if (likely(i >= 0)) { | ||
827 | /*sanity check , making sure were not trying to free irq's | ||
828 | Belonging to a legacy EQ*/ | ||
829 | spin_lock(&priv->msix_ctl.pool_lock); | ||
830 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
831 | free_irq(priv->eq_table.eq[vec].irq, | ||
832 | &priv->eq_table.eq[vec]); | ||
833 | priv->msix_ctl.pool_bm &= ~(1ULL << i); | ||
834 | } | ||
835 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
836 | } | ||
837 | |||
838 | } | ||
839 | EXPORT_SYMBOL(mlx4_release_eq); | ||
840 | |||
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 5de1db89783..67a209ba939 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
274 | dev_cap->stat_rate_support = stat_rate; | 274 | dev_cap->stat_rate_support = stat_rate; |
275 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); | 275 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); |
276 | dev_cap->udp_rss = field & 0x1; | 276 | dev_cap->udp_rss = field & 0x1; |
277 | dev_cap->vep_uc_steering = field & 0x2; | ||
278 | dev_cap->vep_mc_steering = field & 0x4; | ||
277 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); | 279 | MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); |
278 | dev_cap->loopback_support = field & 0x1; | 280 | dev_cap->loopback_support = field & 0x1; |
281 | dev_cap->wol = field & 0x40; | ||
279 | MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); | 282 | MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); |
280 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); | 283 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); |
281 | dev_cap->reserved_uars = field >> 4; | 284 | dev_cap->reserved_uars = field >> 4; |
@@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | |||
737 | #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) | 740 | #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) |
738 | #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) | 741 | #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) |
739 | #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) | 742 | #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) |
743 | #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) | ||
740 | #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) | 744 | #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) |
741 | #define INIT_HCA_TPT_OFFSET 0x0f0 | 745 | #define INIT_HCA_TPT_OFFSET 0x0f0 |
742 | #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) | 746 | #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) |
@@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) | |||
797 | MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); | 801 | MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); |
798 | MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); | 802 | MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); |
799 | MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); | 803 | MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); |
804 | if (dev->caps.vep_mc_steering) | ||
805 | MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); | ||
800 | MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); | 806 | MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); |
801 | 807 | ||
802 | /* TPT attributes */ | 808 | /* TPT attributes */ |
@@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev) | |||
908 | /* Input modifier of 0x1f means "finish as soon as possible." */ | 914 | /* Input modifier of 0x1f means "finish as soon as possible." */ |
909 | return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); | 915 | return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); |
910 | } | 916 | } |
917 | |||
918 | #define MLX4_WOL_SETUP_MODE (5 << 28) | ||
919 | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) | ||
920 | { | ||
921 | u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; | ||
922 | |||
923 | return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, | ||
924 | MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); | ||
925 | } | ||
926 | EXPORT_SYMBOL_GPL(mlx4_wol_read); | ||
927 | |||
928 | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) | ||
929 | { | ||
930 | u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; | ||
931 | |||
932 | return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, | ||
933 | MLX4_CMD_TIME_CLASS_A); | ||
934 | } | ||
935 | EXPORT_SYMBOL_GPL(mlx4_wol_write); | ||
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index 65cc72eb899..88003ebc618 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -80,6 +80,9 @@ struct mlx4_dev_cap { | |||
80 | u16 stat_rate_support; | 80 | u16 stat_rate_support; |
81 | int udp_rss; | 81 | int udp_rss; |
82 | int loopback_support; | 82 | int loopback_support; |
83 | int vep_uc_steering; | ||
84 | int vep_mc_steering; | ||
85 | int wol; | ||
83 | u32 flags; | 86 | u32 flags; |
84 | int reserved_uars; | 87 | int reserved_uars; |
85 | int uar_size; | 88 | int uar_size; |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index c83501122d7..62fa7eec5f0 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <linux/dma-mapping.h> | 40 | #include <linux/dma-mapping.h> |
41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | #include <linux/io-mapping.h> | ||
42 | 43 | ||
43 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
44 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
227 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 228 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
228 | dev->caps.udp_rss = dev_cap->udp_rss; | 229 | dev->caps.udp_rss = dev_cap->udp_rss; |
229 | dev->caps.loopback_support = dev_cap->loopback_support; | 230 | dev->caps.loopback_support = dev_cap->loopback_support; |
231 | dev->caps.vep_uc_steering = dev_cap->vep_uc_steering; | ||
232 | dev->caps.vep_mc_steering = dev_cap->vep_mc_steering; | ||
233 | dev->caps.wol = dev_cap->wol; | ||
230 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 234 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
231 | 235 | ||
232 | dev->caps.log_num_macs = log_num_mac; | 236 | dev->caps.log_num_macs = log_num_mac; |
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev) | |||
718 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); | 722 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
719 | } | 723 | } |
720 | 724 | ||
725 | static int map_bf_area(struct mlx4_dev *dev) | ||
726 | { | ||
727 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
728 | resource_size_t bf_start; | ||
729 | resource_size_t bf_len; | ||
730 | int err = 0; | ||
731 | |||
732 | bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); | ||
733 | bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); | ||
734 | priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); | ||
735 | if (!priv->bf_mapping) | ||
736 | err = -ENOMEM; | ||
737 | |||
738 | return err; | ||
739 | } | ||
740 | |||
741 | static void unmap_bf_area(struct mlx4_dev *dev) | ||
742 | { | ||
743 | if (mlx4_priv(dev)->bf_mapping) | ||
744 | io_mapping_free(mlx4_priv(dev)->bf_mapping); | ||
745 | } | ||
746 | |||
721 | static void mlx4_close_hca(struct mlx4_dev *dev) | 747 | static void mlx4_close_hca(struct mlx4_dev *dev) |
722 | { | 748 | { |
749 | unmap_bf_area(dev); | ||
723 | mlx4_CLOSE_HCA(dev, 0); | 750 | mlx4_CLOSE_HCA(dev, 0); |
724 | mlx4_free_icms(dev); | 751 | mlx4_free_icms(dev); |
725 | mlx4_UNMAP_FA(dev); | 752 | mlx4_UNMAP_FA(dev); |
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
772 | goto err_stop_fw; | 799 | goto err_stop_fw; |
773 | } | 800 | } |
774 | 801 | ||
802 | if (map_bf_area(dev)) | ||
803 | mlx4_dbg(dev, "Failed to map blue flame area\n"); | ||
804 | |||
775 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); | 805 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); |
776 | 806 | ||
777 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); | 807 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); |
@@ -802,6 +832,7 @@ err_free_icm: | |||
802 | mlx4_free_icms(dev); | 832 | mlx4_free_icms(dev); |
803 | 833 | ||
804 | err_stop_fw: | 834 | err_stop_fw: |
835 | unmap_bf_area(dev); | ||
805 | mlx4_UNMAP_FA(dev); | 836 | mlx4_UNMAP_FA(dev); |
806 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); | 837 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); |
807 | 838 | ||
@@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
969 | { | 1000 | { |
970 | struct mlx4_priv *priv = mlx4_priv(dev); | 1001 | struct mlx4_priv *priv = mlx4_priv(dev); |
971 | struct msix_entry *entries; | 1002 | struct msix_entry *entries; |
972 | int nreq; | 1003 | int nreq = min_t(int, dev->caps.num_ports * |
1004 | min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) | ||
1005 | + MSIX_LEGACY_SZ, MAX_MSIX); | ||
973 | int err; | 1006 | int err; |
974 | int i; | 1007 | int i; |
975 | 1008 | ||
976 | if (msi_x) { | 1009 | if (msi_x) { |
977 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, | 1010 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, |
978 | num_possible_cpus() + 1); | 1011 | nreq); |
979 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | 1012 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); |
980 | if (!entries) | 1013 | if (!entries) |
981 | goto no_msi; | 1014 | goto no_msi; |
@@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
998 | goto no_msi; | 1031 | goto no_msi; |
999 | } | 1032 | } |
1000 | 1033 | ||
1001 | dev->caps.num_comp_vectors = nreq - 1; | 1034 | if (nreq < |
1035 | MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { | ||
1036 | /*Working in legacy mode , all EQ's shared*/ | ||
1037 | dev->caps.comp_pool = 0; | ||
1038 | dev->caps.num_comp_vectors = nreq - 1; | ||
1039 | } else { | ||
1040 | dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; | ||
1041 | dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; | ||
1042 | } | ||
1002 | for (i = 0; i < nreq; ++i) | 1043 | for (i = 0; i < nreq; ++i) |
1003 | priv->eq_table.eq[i].irq = entries[i].vector; | 1044 | priv->eq_table.eq[i].irq = entries[i].vector; |
1004 | 1045 | ||
@@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
1010 | 1051 | ||
1011 | no_msi: | 1052 | no_msi: |
1012 | dev->caps.num_comp_vectors = 1; | 1053 | dev->caps.num_comp_vectors = 1; |
1054 | dev->caps.comp_pool = 0; | ||
1013 | 1055 | ||
1014 | for (i = 0; i < 2; ++i) | 1056 | for (i = 0; i < 2; ++i) |
1015 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 1057 | priv->eq_table.eq[i].irq = dev->pdev->irq; |
@@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | |||
1049 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | 1091 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); |
1050 | } | 1092 | } |
1051 | 1093 | ||
1094 | static int mlx4_init_steering(struct mlx4_dev *dev) | ||
1095 | { | ||
1096 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
1097 | int num_entries = dev->caps.num_ports; | ||
1098 | int i, j; | ||
1099 | |||
1100 | priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); | ||
1101 | if (!priv->steer) | ||
1102 | return -ENOMEM; | ||
1103 | |||
1104 | for (i = 0; i < num_entries; i++) { | ||
1105 | for (j = 0; j < MLX4_NUM_STEERS; j++) { | ||
1106 | INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); | ||
1107 | INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); | ||
1108 | } | ||
1109 | INIT_LIST_HEAD(&priv->steer[i].high_prios); | ||
1110 | } | ||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static void mlx4_clear_steering(struct mlx4_dev *dev) | ||
1115 | { | ||
1116 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
1117 | struct mlx4_steer_index *entry, *tmp_entry; | ||
1118 | struct mlx4_promisc_qp *pqp, *tmp_pqp; | ||
1119 | int num_entries = dev->caps.num_ports; | ||
1120 | int i, j; | ||
1121 | |||
1122 | for (i = 0; i < num_entries; i++) { | ||
1123 | for (j = 0; j < MLX4_NUM_STEERS; j++) { | ||
1124 | list_for_each_entry_safe(pqp, tmp_pqp, | ||
1125 | &priv->steer[i].promisc_qps[j], | ||
1126 | list) { | ||
1127 | list_del(&pqp->list); | ||
1128 | kfree(pqp); | ||
1129 | } | ||
1130 | list_for_each_entry_safe(entry, tmp_entry, | ||
1131 | &priv->steer[i].steer_entries[j], | ||
1132 | list) { | ||
1133 | list_del(&entry->list); | ||
1134 | list_for_each_entry_safe(pqp, tmp_pqp, | ||
1135 | &entry->duplicates, | ||
1136 | list) { | ||
1137 | list_del(&pqp->list); | ||
1138 | kfree(pqp); | ||
1139 | } | ||
1140 | kfree(entry); | ||
1141 | } | ||
1142 | } | ||
1143 | } | ||
1144 | kfree(priv->steer); | ||
1145 | } | ||
1146 | |||
1052 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 1147 | static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
1053 | { | 1148 | { |
1054 | struct mlx4_priv *priv; | 1149 | struct mlx4_priv *priv; |
@@ -1130,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1130 | INIT_LIST_HEAD(&priv->pgdir_list); | 1225 | INIT_LIST_HEAD(&priv->pgdir_list); |
1131 | mutex_init(&priv->pgdir_mutex); | 1226 | mutex_init(&priv->pgdir_mutex); |
1132 | 1227 | ||
1228 | pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id); | ||
1229 | |||
1230 | INIT_LIST_HEAD(&priv->bf_list); | ||
1231 | mutex_init(&priv->bf_mutex); | ||
1232 | |||
1133 | /* | 1233 | /* |
1134 | * Now reset the HCA before we touch the PCI capabilities or | 1234 | * Now reset the HCA before we touch the PCI capabilities or |
1135 | * attempt a firmware command, since a boot ROM may have left | 1235 | * attempt a firmware command, since a boot ROM may have left |
@@ -1154,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1154 | if (err) | 1254 | if (err) |
1155 | goto err_close; | 1255 | goto err_close; |
1156 | 1256 | ||
1257 | priv->msix_ctl.pool_bm = 0; | ||
1258 | spin_lock_init(&priv->msix_ctl.pool_lock); | ||
1259 | |||
1157 | mlx4_enable_msi_x(dev); | 1260 | mlx4_enable_msi_x(dev); |
1158 | 1261 | ||
1262 | err = mlx4_init_steering(dev); | ||
1263 | if (err) | ||
1264 | goto err_free_eq; | ||
1265 | |||
1159 | err = mlx4_setup_hca(dev); | 1266 | err = mlx4_setup_hca(dev); |
1160 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { | 1267 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { |
1161 | dev->flags &= ~MLX4_FLAG_MSI_X; | 1268 | dev->flags &= ~MLX4_FLAG_MSI_X; |
@@ -1164,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1164 | } | 1271 | } |
1165 | 1272 | ||
1166 | if (err) | 1273 | if (err) |
1167 | goto err_free_eq; | 1274 | goto err_steer; |
1168 | 1275 | ||
1169 | for (port = 1; port <= dev->caps.num_ports; port++) { | 1276 | for (port = 1; port <= dev->caps.num_ports; port++) { |
1170 | err = mlx4_init_port_info(dev, port); | 1277 | err = mlx4_init_port_info(dev, port); |
@@ -1197,6 +1304,9 @@ err_port: | |||
1197 | mlx4_cleanup_pd_table(dev); | 1304 | mlx4_cleanup_pd_table(dev); |
1198 | mlx4_cleanup_uar_table(dev); | 1305 | mlx4_cleanup_uar_table(dev); |
1199 | 1306 | ||
1307 | err_steer: | ||
1308 | mlx4_clear_steering(dev); | ||
1309 | |||
1200 | err_free_eq: | 1310 | err_free_eq: |
1201 | mlx4_free_eq_table(dev); | 1311 | mlx4_free_eq_table(dev); |
1202 | 1312 | ||
@@ -1256,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
1256 | iounmap(priv->kar); | 1366 | iounmap(priv->kar); |
1257 | mlx4_uar_free(dev, &priv->driver_uar); | 1367 | mlx4_uar_free(dev, &priv->driver_uar); |
1258 | mlx4_cleanup_uar_table(dev); | 1368 | mlx4_cleanup_uar_table(dev); |
1369 | mlx4_clear_steering(dev); | ||
1259 | mlx4_free_eq_table(dev); | 1370 | mlx4_free_eq_table(dev); |
1260 | mlx4_close_hca(dev); | 1371 | mlx4_close_hca(dev); |
1261 | mlx4_cmd_cleanup(dev); | 1372 | mlx4_cmd_cleanup(dev); |
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index 79cf42db2ea..e71372aa9cc 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/etherdevice.h> | ||
35 | 36 | ||
36 | #include <linux/mlx4/cmd.h> | 37 | #include <linux/mlx4/cmd.h> |
37 | 38 | ||
@@ -40,38 +41,40 @@ | |||
40 | #define MGM_QPN_MASK 0x00FFFFFF | 41 | #define MGM_QPN_MASK 0x00FFFFFF |
41 | #define MGM_BLCK_LB_BIT 30 | 42 | #define MGM_BLCK_LB_BIT 30 |
42 | 43 | ||
43 | struct mlx4_mgm { | ||
44 | __be32 next_gid_index; | ||
45 | __be32 members_count; | ||
46 | u32 reserved[2]; | ||
47 | u8 gid[16]; | ||
48 | __be32 qp[MLX4_QP_PER_MGM]; | ||
49 | }; | ||
50 | |||
51 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ | 44 | static const u8 zero_gid[16]; /* automatically initialized to 0 */ |
52 | 45 | ||
53 | static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, | 46 | static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, |
54 | struct mlx4_cmd_mailbox *mailbox) | 47 | struct mlx4_cmd_mailbox *mailbox) |
55 | { | 48 | { |
56 | return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, | 49 | return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, |
57 | MLX4_CMD_TIME_CLASS_A); | 50 | MLX4_CMD_TIME_CLASS_A); |
58 | } | 51 | } |
59 | 52 | ||
60 | static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, | 53 | static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, |
61 | struct mlx4_cmd_mailbox *mailbox) | 54 | struct mlx4_cmd_mailbox *mailbox) |
62 | { | 55 | { |
63 | return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, | 56 | return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, |
64 | MLX4_CMD_TIME_CLASS_A); | 57 | MLX4_CMD_TIME_CLASS_A); |
65 | } | 58 | } |
66 | 59 | ||
67 | static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | 60 | static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer, |
68 | u16 *hash) | 61 | struct mlx4_cmd_mailbox *mailbox) |
62 | { | ||
63 | u32 in_mod; | ||
64 | |||
65 | in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1; | ||
66 | return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, | ||
67 | MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A); | ||
68 | } | ||
69 | |||
70 | static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | ||
71 | u16 *hash, u8 op_mod) | ||
69 | { | 72 | { |
70 | u64 imm; | 73 | u64 imm; |
71 | int err; | 74 | int err; |
72 | 75 | ||
73 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, | 76 | err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, |
74 | MLX4_CMD_TIME_CLASS_A); | 77 | MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); |
75 | 78 | ||
76 | if (!err) | 79 | if (!err) |
77 | *hash = imm; | 80 | *hash = imm; |
@@ -79,6 +82,457 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
79 | return err; | 82 | return err; |
80 | } | 83 | } |
81 | 84 | ||
85 | static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, | ||
86 | enum mlx4_steer_type steer, | ||
87 | u32 qpn) | ||
88 | { | ||
89 | struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
90 | struct mlx4_promisc_qp *pqp; | ||
91 | |||
92 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | ||
93 | if (pqp->qpn == qpn) | ||
94 | return pqp; | ||
95 | } | ||
96 | /* not found */ | ||
97 | return NULL; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Add new entry to steering data structure. | ||
102 | * All promisc QPs should be added as well | ||
103 | */ | ||
104 | static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
105 | enum mlx4_steer_type steer, | ||
106 | unsigned int index, u32 qpn) | ||
107 | { | ||
108 | struct mlx4_steer *s_steer; | ||
109 | struct mlx4_cmd_mailbox *mailbox; | ||
110 | struct mlx4_mgm *mgm; | ||
111 | u32 members_count; | ||
112 | struct mlx4_steer_index *new_entry; | ||
113 | struct mlx4_promisc_qp *pqp; | ||
114 | struct mlx4_promisc_qp *dqp; | ||
115 | u32 prot; | ||
116 | int err; | ||
117 | u8 pf_num; | ||
118 | |||
119 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
120 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
121 | new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); | ||
122 | if (!new_entry) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | INIT_LIST_HEAD(&new_entry->duplicates); | ||
126 | new_entry->index = index; | ||
127 | list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); | ||
128 | |||
129 | /* If the given qpn is also a promisc qp, | ||
130 | * it should be inserted to duplicates list | ||
131 | */ | ||
132 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
133 | if (pqp) { | ||
134 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
135 | if (!dqp) { | ||
136 | err = -ENOMEM; | ||
137 | goto out_alloc; | ||
138 | } | ||
139 | dqp->qpn = qpn; | ||
140 | list_add_tail(&dqp->list, &new_entry->duplicates); | ||
141 | } | ||
142 | |||
143 | /* if no promisc qps for this vep, we are done */ | ||
144 | if (list_empty(&s_steer->promisc_qps[steer])) | ||
145 | return 0; | ||
146 | |||
147 | /* now need to add all the promisc qps to the new | ||
148 | * steering entry, as they should also receive the packets | ||
149 | * destined to this address */ | ||
150 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
151 | if (IS_ERR(mailbox)) { | ||
152 | err = -ENOMEM; | ||
153 | goto out_alloc; | ||
154 | } | ||
155 | mgm = mailbox->buf; | ||
156 | |||
157 | err = mlx4_READ_ENTRY(dev, index, mailbox); | ||
158 | if (err) | ||
159 | goto out_mailbox; | ||
160 | |||
161 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
162 | prot = be32_to_cpu(mgm->members_count) >> 30; | ||
163 | list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { | ||
164 | /* don't add already existing qpn */ | ||
165 | if (pqp->qpn == qpn) | ||
166 | continue; | ||
167 | if (members_count == MLX4_QP_PER_MGM) { | ||
168 | /* out of space */ | ||
169 | err = -ENOMEM; | ||
170 | goto out_mailbox; | ||
171 | } | ||
172 | |||
173 | /* add the qpn */ | ||
174 | mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); | ||
175 | } | ||
176 | /* update the qps count and update the entry with all the promisc qps*/ | ||
177 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | ||
178 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
179 | |||
180 | out_mailbox: | ||
181 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
182 | if (!err) | ||
183 | return 0; | ||
184 | out_alloc: | ||
185 | if (dqp) { | ||
186 | list_del(&dqp->list); | ||
187 | kfree(&dqp); | ||
188 | } | ||
189 | list_del(&new_entry->list); | ||
190 | kfree(new_entry); | ||
191 | return err; | ||
192 | } | ||
193 | |||
194 | /* update the data structures with existing steering entry */ | ||
195 | static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
196 | enum mlx4_steer_type steer, | ||
197 | unsigned int index, u32 qpn) | ||
198 | { | ||
199 | struct mlx4_steer *s_steer; | ||
200 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | ||
201 | struct mlx4_promisc_qp *pqp; | ||
202 | struct mlx4_promisc_qp *dqp; | ||
203 | u8 pf_num; | ||
204 | |||
205 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
206 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
207 | |||
208 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
209 | if (!pqp) | ||
210 | return 0; /* nothing to do */ | ||
211 | |||
212 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | ||
213 | if (tmp_entry->index == index) { | ||
214 | entry = tmp_entry; | ||
215 | break; | ||
216 | } | ||
217 | } | ||
218 | if (unlikely(!entry)) { | ||
219 | mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
223 | /* the given qpn is listed as a promisc qpn | ||
224 | * we need to add it as a duplicate to this entry | ||
225 | * for future refernce */ | ||
226 | list_for_each_entry(dqp, &entry->duplicates, list) { | ||
227 | if (qpn == dqp->qpn) | ||
228 | return 0; /* qp is already duplicated */ | ||
229 | } | ||
230 | |||
231 | /* add the qp as a duplicate on this index */ | ||
232 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
233 | if (!dqp) | ||
234 | return -ENOMEM; | ||
235 | dqp->qpn = qpn; | ||
236 | list_add_tail(&dqp->list, &entry->duplicates); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | /* Check whether a qpn is a duplicate on steering entry | ||
242 | * If so, it should not be removed from mgm */ | ||
243 | static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
244 | enum mlx4_steer_type steer, | ||
245 | unsigned int index, u32 qpn) | ||
246 | { | ||
247 | struct mlx4_steer *s_steer; | ||
248 | struct mlx4_steer_index *tmp_entry, *entry = NULL; | ||
249 | struct mlx4_promisc_qp *dqp, *tmp_dqp; | ||
250 | u8 pf_num; | ||
251 | |||
252 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
253 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
254 | |||
255 | /* if qp is not promisc, it cannot be duplicated */ | ||
256 | if (!get_promisc_qp(dev, pf_num, steer, qpn)) | ||
257 | return false; | ||
258 | |||
259 | /* The qp is promisc qp so it is a duplicate on this index | ||
260 | * Find the index entry, and remove the duplicate */ | ||
261 | list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { | ||
262 | if (tmp_entry->index == index) { | ||
263 | entry = tmp_entry; | ||
264 | break; | ||
265 | } | ||
266 | } | ||
267 | if (unlikely(!entry)) { | ||
268 | mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); | ||
269 | return false; | ||
270 | } | ||
271 | list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { | ||
272 | if (dqp->qpn == qpn) { | ||
273 | list_del(&dqp->list); | ||
274 | kfree(dqp); | ||
275 | } | ||
276 | } | ||
277 | return true; | ||
278 | } | ||
279 | |||
280 | /* I a steering entry contains only promisc QPs, it can be removed. */ | ||
281 | static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
282 | enum mlx4_steer_type steer, | ||
283 | unsigned int index, u32 tqpn) | ||
284 | { | ||
285 | struct mlx4_steer *s_steer; | ||
286 | struct mlx4_cmd_mailbox *mailbox; | ||
287 | struct mlx4_mgm *mgm; | ||
288 | struct mlx4_steer_index *entry = NULL, *tmp_entry; | ||
289 | u32 qpn; | ||
290 | u32 members_count; | ||
291 | bool ret = false; | ||
292 | int i; | ||
293 | u8 pf_num; | ||
294 | |||
295 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
296 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
297 | |||
298 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
299 | if (IS_ERR(mailbox)) | ||
300 | return false; | ||
301 | mgm = mailbox->buf; | ||
302 | |||
303 | if (mlx4_READ_ENTRY(dev, index, mailbox)) | ||
304 | goto out; | ||
305 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
306 | for (i = 0; i < members_count; i++) { | ||
307 | qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; | ||
308 | if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) { | ||
309 | /* the qp is not promisc, the entry can't be removed */ | ||
310 | goto out; | ||
311 | } | ||
312 | } | ||
313 | /* All the qps currently registered for this entry are promiscuous, | ||
314 | * Checking for duplicates */ | ||
315 | ret = true; | ||
316 | list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { | ||
317 | if (entry->index == index) { | ||
318 | if (list_empty(&entry->duplicates)) { | ||
319 | list_del(&entry->list); | ||
320 | kfree(entry); | ||
321 | } else { | ||
322 | /* This entry contains duplicates so it shouldn't be removed */ | ||
323 | ret = false; | ||
324 | goto out; | ||
325 | } | ||
326 | } | ||
327 | } | ||
328 | |||
329 | out: | ||
330 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
335 | enum mlx4_steer_type steer, u32 qpn) | ||
336 | { | ||
337 | struct mlx4_steer *s_steer; | ||
338 | struct mlx4_cmd_mailbox *mailbox; | ||
339 | struct mlx4_mgm *mgm; | ||
340 | struct mlx4_steer_index *entry; | ||
341 | struct mlx4_promisc_qp *pqp; | ||
342 | struct mlx4_promisc_qp *dqp; | ||
343 | u32 members_count; | ||
344 | u32 prot; | ||
345 | int i; | ||
346 | bool found; | ||
347 | int last_index; | ||
348 | int err; | ||
349 | u8 pf_num; | ||
350 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
351 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
352 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
353 | |||
354 | mutex_lock(&priv->mcg_table.mutex); | ||
355 | |||
356 | if (get_promisc_qp(dev, pf_num, steer, qpn)) { | ||
357 | err = 0; /* Noting to do, already exists */ | ||
358 | goto out_mutex; | ||
359 | } | ||
360 | |||
361 | pqp = kmalloc(sizeof *pqp, GFP_KERNEL); | ||
362 | if (!pqp) { | ||
363 | err = -ENOMEM; | ||
364 | goto out_mutex; | ||
365 | } | ||
366 | pqp->qpn = qpn; | ||
367 | |||
368 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
369 | if (IS_ERR(mailbox)) { | ||
370 | err = -ENOMEM; | ||
371 | goto out_alloc; | ||
372 | } | ||
373 | mgm = mailbox->buf; | ||
374 | |||
375 | /* the promisc qp needs to be added for each one of the steering | ||
376 | * entries, if it already exists, needs to be added as a duplicate | ||
377 | * for this entry */ | ||
378 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | ||
379 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | ||
380 | if (err) | ||
381 | goto out_mailbox; | ||
382 | |||
383 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
384 | prot = be32_to_cpu(mgm->members_count) >> 30; | ||
385 | found = false; | ||
386 | for (i = 0; i < members_count; i++) { | ||
387 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { | ||
388 | /* Entry already exists, add to duplicates */ | ||
389 | dqp = kmalloc(sizeof *dqp, GFP_KERNEL); | ||
390 | if (!dqp) | ||
391 | goto out_mailbox; | ||
392 | dqp->qpn = qpn; | ||
393 | list_add_tail(&dqp->list, &entry->duplicates); | ||
394 | found = true; | ||
395 | } | ||
396 | } | ||
397 | if (!found) { | ||
398 | /* Need to add the qpn to mgm */ | ||
399 | if (members_count == MLX4_QP_PER_MGM) { | ||
400 | /* entry is full */ | ||
401 | err = -ENOMEM; | ||
402 | goto out_mailbox; | ||
403 | } | ||
404 | mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); | ||
405 | mgm->members_count = cpu_to_be32(members_count | (prot << 30)); | ||
406 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | ||
407 | if (err) | ||
408 | goto out_mailbox; | ||
409 | } | ||
410 | last_index = entry->index; | ||
411 | } | ||
412 | |||
413 | /* add the new qpn to list of promisc qps */ | ||
414 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | ||
415 | /* now need to add all the promisc qps to default entry */ | ||
416 | memset(mgm, 0, sizeof *mgm); | ||
417 | members_count = 0; | ||
418 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | ||
419 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | ||
420 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | ||
421 | |||
422 | err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); | ||
423 | if (err) | ||
424 | goto out_list; | ||
425 | |||
426 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
427 | mutex_unlock(&priv->mcg_table.mutex); | ||
428 | return 0; | ||
429 | |||
430 | out_list: | ||
431 | list_del(&pqp->list); | ||
432 | out_mailbox: | ||
433 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
434 | out_alloc: | ||
435 | kfree(pqp); | ||
436 | out_mutex: | ||
437 | mutex_unlock(&priv->mcg_table.mutex); | ||
438 | return err; | ||
439 | } | ||
440 | |||
441 | static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, | ||
442 | enum mlx4_steer_type steer, u32 qpn) | ||
443 | { | ||
444 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
445 | struct mlx4_steer *s_steer; | ||
446 | struct mlx4_cmd_mailbox *mailbox; | ||
447 | struct mlx4_mgm *mgm; | ||
448 | struct mlx4_steer_index *entry; | ||
449 | struct mlx4_promisc_qp *pqp; | ||
450 | struct mlx4_promisc_qp *dqp; | ||
451 | u32 members_count; | ||
452 | bool found; | ||
453 | bool back_to_list = false; | ||
454 | int loc, i; | ||
455 | int err; | ||
456 | u8 pf_num; | ||
457 | |||
458 | pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); | ||
459 | s_steer = &mlx4_priv(dev)->steer[pf_num]; | ||
460 | mutex_lock(&priv->mcg_table.mutex); | ||
461 | |||
462 | pqp = get_promisc_qp(dev, pf_num, steer, qpn); | ||
463 | if (unlikely(!pqp)) { | ||
464 | mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); | ||
465 | /* nothing to do */ | ||
466 | err = 0; | ||
467 | goto out_mutex; | ||
468 | } | ||
469 | |||
470 | /*remove from list of promisc qps */ | ||
471 | list_del(&pqp->list); | ||
472 | kfree(pqp); | ||
473 | |||
474 | /* set the default entry not to include the removed one */ | ||
475 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
476 | if (IS_ERR(mailbox)) { | ||
477 | err = -ENOMEM; | ||
478 | back_to_list = true; | ||
479 | goto out_list; | ||
480 | } | ||
481 | mgm = mailbox->buf; | ||
482 | members_count = 0; | ||
483 | list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) | ||
484 | mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); | ||
485 | mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); | ||
486 | |||
487 | err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); | ||
488 | if (err) | ||
489 | goto out_mailbox; | ||
490 | |||
491 | /* remove the qp from all the steering entries*/ | ||
492 | list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { | ||
493 | found = false; | ||
494 | list_for_each_entry(dqp, &entry->duplicates, list) { | ||
495 | if (dqp->qpn == qpn) { | ||
496 | found = true; | ||
497 | break; | ||
498 | } | ||
499 | } | ||
500 | if (found) { | ||
501 | /* a duplicate, no need to change the mgm, | ||
502 | * only update the duplicates list */ | ||
503 | list_del(&dqp->list); | ||
504 | kfree(dqp); | ||
505 | } else { | ||
506 | err = mlx4_READ_ENTRY(dev, entry->index, mailbox); | ||
507 | if (err) | ||
508 | goto out_mailbox; | ||
509 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | ||
510 | for (loc = -1, i = 0; i < members_count; ++i) | ||
511 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) | ||
512 | loc = i; | ||
513 | |||
514 | mgm->members_count = cpu_to_be32(--members_count | | ||
515 | (MLX4_PROT_ETH << 30)); | ||
516 | mgm->qp[loc] = mgm->qp[i - 1]; | ||
517 | mgm->qp[i - 1] = 0; | ||
518 | |||
519 | err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); | ||
520 | if (err) | ||
521 | goto out_mailbox; | ||
522 | } | ||
523 | |||
524 | } | ||
525 | |||
526 | out_mailbox: | ||
527 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
528 | out_list: | ||
529 | if (back_to_list) | ||
530 | list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); | ||
531 | out_mutex: | ||
532 | mutex_unlock(&priv->mcg_table.mutex); | ||
533 | return err; | ||
534 | } | ||
535 | |||
82 | /* | 536 | /* |
83 | * Caller must hold MCG table semaphore. gid and mgm parameters must | 537 | * Caller must hold MCG table semaphore. gid and mgm parameters must |
84 | * be properly aligned for command interface. | 538 | * be properly aligned for command interface. |
@@ -94,15 +548,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
94 | * If no AMGM exists for given gid, *index = -1, *prev = index of last | 548 | * If no AMGM exists for given gid, *index = -1, *prev = index of last |
95 | * entry in hash chain and *mgm holds end of hash chain. | 549 | * entry in hash chain and *mgm holds end of hash chain. |
96 | */ | 550 | */ |
97 | static int find_mgm(struct mlx4_dev *dev, | 551 | static int find_entry(struct mlx4_dev *dev, u8 port, |
98 | u8 *gid, enum mlx4_protocol protocol, | 552 | u8 *gid, enum mlx4_protocol prot, |
99 | struct mlx4_cmd_mailbox *mgm_mailbox, | 553 | enum mlx4_steer_type steer, |
100 | u16 *hash, int *prev, int *index) | 554 | struct mlx4_cmd_mailbox *mgm_mailbox, |
555 | u16 *hash, int *prev, int *index) | ||
101 | { | 556 | { |
102 | struct mlx4_cmd_mailbox *mailbox; | 557 | struct mlx4_cmd_mailbox *mailbox; |
103 | struct mlx4_mgm *mgm = mgm_mailbox->buf; | 558 | struct mlx4_mgm *mgm = mgm_mailbox->buf; |
104 | u8 *mgid; | 559 | u8 *mgid; |
105 | int err; | 560 | int err; |
561 | u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0; | ||
106 | 562 | ||
107 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 563 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
108 | if (IS_ERR(mailbox)) | 564 | if (IS_ERR(mailbox)) |
@@ -111,7 +567,7 @@ static int find_mgm(struct mlx4_dev *dev, | |||
111 | 567 | ||
112 | memcpy(mgid, gid, 16); | 568 | memcpy(mgid, gid, 16); |
113 | 569 | ||
114 | err = mlx4_MGID_HASH(dev, mailbox, hash); | 570 | err = mlx4_GID_HASH(dev, mailbox, hash, op_mod); |
115 | mlx4_free_cmd_mailbox(dev, mailbox); | 571 | mlx4_free_cmd_mailbox(dev, mailbox); |
116 | if (err) | 572 | if (err) |
117 | return err; | 573 | return err; |
@@ -123,11 +579,11 @@ static int find_mgm(struct mlx4_dev *dev, | |||
123 | *prev = -1; | 579 | *prev = -1; |
124 | 580 | ||
125 | do { | 581 | do { |
126 | err = mlx4_READ_MCG(dev, *index, mgm_mailbox); | 582 | err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); |
127 | if (err) | 583 | if (err) |
128 | return err; | 584 | return err; |
129 | 585 | ||
130 | if (!memcmp(mgm->gid, zero_gid, 16)) { | 586 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { |
131 | if (*index != *hash) { | 587 | if (*index != *hash) { |
132 | mlx4_err(dev, "Found zero MGID in AMGM.\n"); | 588 | mlx4_err(dev, "Found zero MGID in AMGM.\n"); |
133 | err = -EINVAL; | 589 | err = -EINVAL; |
@@ -136,7 +592,7 @@ static int find_mgm(struct mlx4_dev *dev, | |||
136 | } | 592 | } |
137 | 593 | ||
138 | if (!memcmp(mgm->gid, gid, 16) && | 594 | if (!memcmp(mgm->gid, gid, 16) && |
139 | be32_to_cpu(mgm->members_count) >> 30 == protocol) | 595 | be32_to_cpu(mgm->members_count) >> 30 == prot) |
140 | return err; | 596 | return err; |
141 | 597 | ||
142 | *prev = *index; | 598 | *prev = *index; |
@@ -147,8 +603,9 @@ static int find_mgm(struct mlx4_dev *dev, | |||
147 | return err; | 603 | return err; |
148 | } | 604 | } |
149 | 605 | ||
150 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 606 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
151 | int block_mcast_loopback, enum mlx4_protocol protocol) | 607 | int block_mcast_loopback, enum mlx4_protocol prot, |
608 | enum mlx4_steer_type steer) | ||
152 | { | 609 | { |
153 | struct mlx4_priv *priv = mlx4_priv(dev); | 610 | struct mlx4_priv *priv = mlx4_priv(dev); |
154 | struct mlx4_cmd_mailbox *mailbox; | 611 | struct mlx4_cmd_mailbox *mailbox; |
@@ -159,6 +616,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
159 | int link = 0; | 616 | int link = 0; |
160 | int i; | 617 | int i; |
161 | int err; | 618 | int err; |
619 | u8 port = gid[5]; | ||
620 | u8 new_entry = 0; | ||
162 | 621 | ||
163 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 622 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
164 | if (IS_ERR(mailbox)) | 623 | if (IS_ERR(mailbox)) |
@@ -166,14 +625,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
166 | mgm = mailbox->buf; | 625 | mgm = mailbox->buf; |
167 | 626 | ||
168 | mutex_lock(&priv->mcg_table.mutex); | 627 | mutex_lock(&priv->mcg_table.mutex); |
169 | 628 | err = find_entry(dev, port, gid, prot, steer, | |
170 | err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); | 629 | mailbox, &hash, &prev, &index); |
171 | if (err) | 630 | if (err) |
172 | goto out; | 631 | goto out; |
173 | 632 | ||
174 | if (index != -1) { | 633 | if (index != -1) { |
175 | if (!memcmp(mgm->gid, zero_gid, 16)) | 634 | if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { |
635 | new_entry = 1; | ||
176 | memcpy(mgm->gid, gid, 16); | 636 | memcpy(mgm->gid, gid, 16); |
637 | } | ||
177 | } else { | 638 | } else { |
178 | link = 1; | 639 | link = 1; |
179 | 640 | ||
@@ -209,26 +670,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
209 | else | 670 | else |
210 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); | 671 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); |
211 | 672 | ||
212 | mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); | 673 | mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); |
213 | 674 | ||
214 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 675 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
215 | if (err) | 676 | if (err) |
216 | goto out; | 677 | goto out; |
217 | 678 | ||
218 | if (!link) | 679 | if (!link) |
219 | goto out; | 680 | goto out; |
220 | 681 | ||
221 | err = mlx4_READ_MCG(dev, prev, mailbox); | 682 | err = mlx4_READ_ENTRY(dev, prev, mailbox); |
222 | if (err) | 683 | if (err) |
223 | goto out; | 684 | goto out; |
224 | 685 | ||
225 | mgm->next_gid_index = cpu_to_be32(index << 6); | 686 | mgm->next_gid_index = cpu_to_be32(index << 6); |
226 | 687 | ||
227 | err = mlx4_WRITE_MCG(dev, prev, mailbox); | 688 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); |
228 | if (err) | 689 | if (err) |
229 | goto out; | 690 | goto out; |
230 | 691 | ||
231 | out: | 692 | out: |
693 | if (prot == MLX4_PROT_ETH) { | ||
694 | /* manage the steering entry for promisc mode */ | ||
695 | if (new_entry) | ||
696 | new_steering_entry(dev, 0, port, steer, index, qp->qpn); | ||
697 | else | ||
698 | existing_steering_entry(dev, 0, port, steer, | ||
699 | index, qp->qpn); | ||
700 | } | ||
232 | if (err && link && index != -1) { | 701 | if (err && link && index != -1) { |
233 | if (index < dev->caps.num_mgms) | 702 | if (index < dev->caps.num_mgms) |
234 | mlx4_warn(dev, "Got AMGM index %d < %d", | 703 | mlx4_warn(dev, "Got AMGM index %d < %d", |
@@ -242,10 +711,9 @@ out: | |||
242 | mlx4_free_cmd_mailbox(dev, mailbox); | 711 | mlx4_free_cmd_mailbox(dev, mailbox); |
243 | return err; | 712 | return err; |
244 | } | 713 | } |
245 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | ||
246 | 714 | ||
247 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 715 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
248 | enum mlx4_protocol protocol) | 716 | enum mlx4_protocol prot, enum mlx4_steer_type steer) |
249 | { | 717 | { |
250 | struct mlx4_priv *priv = mlx4_priv(dev); | 718 | struct mlx4_priv *priv = mlx4_priv(dev); |
251 | struct mlx4_cmd_mailbox *mailbox; | 719 | struct mlx4_cmd_mailbox *mailbox; |
@@ -255,6 +723,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
255 | int prev, index; | 723 | int prev, index; |
256 | int i, loc; | 724 | int i, loc; |
257 | int err; | 725 | int err; |
726 | u8 port = gid[5]; | ||
727 | bool removed_entry = false; | ||
258 | 728 | ||
259 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 729 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
260 | if (IS_ERR(mailbox)) | 730 | if (IS_ERR(mailbox)) |
@@ -263,7 +733,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
263 | 733 | ||
264 | mutex_lock(&priv->mcg_table.mutex); | 734 | mutex_lock(&priv->mcg_table.mutex); |
265 | 735 | ||
266 | err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); | 736 | err = find_entry(dev, port, gid, prot, steer, |
737 | mailbox, &hash, &prev, &index); | ||
267 | if (err) | 738 | if (err) |
268 | goto out; | 739 | goto out; |
269 | 740 | ||
@@ -273,6 +744,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
273 | goto out; | 744 | goto out; |
274 | } | 745 | } |
275 | 746 | ||
747 | /* if this pq is also a promisc qp, it shouldn't be removed */ | ||
748 | if (prot == MLX4_PROT_ETH && | ||
749 | check_duplicate_entry(dev, 0, port, steer, index, qp->qpn)) | ||
750 | goto out; | ||
751 | |||
276 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; | 752 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; |
277 | for (loc = -1, i = 0; i < members_count; ++i) | 753 | for (loc = -1, i = 0; i < members_count; ++i) |
278 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) | 754 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) |
@@ -285,26 +761,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
285 | } | 761 | } |
286 | 762 | ||
287 | 763 | ||
288 | mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); | 764 | mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); |
289 | mgm->qp[loc] = mgm->qp[i - 1]; | 765 | mgm->qp[loc] = mgm->qp[i - 1]; |
290 | mgm->qp[i - 1] = 0; | 766 | mgm->qp[i - 1] = 0; |
291 | 767 | ||
292 | if (i != 1) { | 768 | if (prot == MLX4_PROT_ETH) |
293 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 769 | removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn); |
770 | if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { | ||
771 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); | ||
294 | goto out; | 772 | goto out; |
295 | } | 773 | } |
296 | 774 | ||
775 | /* We are going to delete the entry, members count should be 0 */ | ||
776 | mgm->members_count = cpu_to_be32((u32) prot << 30); | ||
777 | |||
297 | if (prev == -1) { | 778 | if (prev == -1) { |
298 | /* Remove entry from MGM */ | 779 | /* Remove entry from MGM */ |
299 | int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; | 780 | int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; |
300 | if (amgm_index) { | 781 | if (amgm_index) { |
301 | err = mlx4_READ_MCG(dev, amgm_index, mailbox); | 782 | err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); |
302 | if (err) | 783 | if (err) |
303 | goto out; | 784 | goto out; |
304 | } else | 785 | } else |
305 | memset(mgm->gid, 0, 16); | 786 | memset(mgm->gid, 0, 16); |
306 | 787 | ||
307 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 788 | err = mlx4_WRITE_ENTRY(dev, index, mailbox); |
308 | if (err) | 789 | if (err) |
309 | goto out; | 790 | goto out; |
310 | 791 | ||
@@ -319,13 +800,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
319 | } else { | 800 | } else { |
320 | /* Remove entry from AMGM */ | 801 | /* Remove entry from AMGM */ |
321 | int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; | 802 | int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; |
322 | err = mlx4_READ_MCG(dev, prev, mailbox); | 803 | err = mlx4_READ_ENTRY(dev, prev, mailbox); |
323 | if (err) | 804 | if (err) |
324 | goto out; | 805 | goto out; |
325 | 806 | ||
326 | mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); | 807 | mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); |
327 | 808 | ||
328 | err = mlx4_WRITE_MCG(dev, prev, mailbox); | 809 | err = mlx4_WRITE_ENTRY(dev, prev, mailbox); |
329 | if (err) | 810 | if (err) |
330 | goto out; | 811 | goto out; |
331 | 812 | ||
@@ -343,8 +824,85 @@ out: | |||
343 | mlx4_free_cmd_mailbox(dev, mailbox); | 824 | mlx4_free_cmd_mailbox(dev, mailbox); |
344 | return err; | 825 | return err; |
345 | } | 826 | } |
827 | |||
828 | |||
829 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
830 | int block_mcast_loopback, enum mlx4_protocol prot) | ||
831 | { | ||
832 | enum mlx4_steer_type steer; | ||
833 | |||
834 | steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; | ||
835 | |||
836 | if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) | ||
837 | return 0; | ||
838 | |||
839 | if (prot == MLX4_PROT_ETH) | ||
840 | gid[7] |= (steer << 1); | ||
841 | |||
842 | return mlx4_qp_attach_common(dev, qp, gid, | ||
843 | block_mcast_loopback, prot, | ||
844 | steer); | ||
845 | } | ||
846 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | ||
847 | |||
848 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
849 | enum mlx4_protocol prot) | ||
850 | { | ||
851 | enum mlx4_steer_type steer; | ||
852 | |||
853 | steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; | ||
854 | |||
855 | if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) | ||
856 | return 0; | ||
857 | |||
858 | if (prot == MLX4_PROT_ETH) { | ||
859 | gid[7] |= (steer << 1); | ||
860 | } | ||
861 | |||
862 | return mlx4_qp_detach_common(dev, qp, gid, prot, steer); | ||
863 | } | ||
346 | EXPORT_SYMBOL_GPL(mlx4_multicast_detach); | 864 | EXPORT_SYMBOL_GPL(mlx4_multicast_detach); |
347 | 865 | ||
866 | |||
867 | int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
868 | { | ||
869 | if (!dev->caps.vep_mc_steering) | ||
870 | return 0; | ||
871 | |||
872 | |||
873 | return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); | ||
874 | } | ||
875 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); | ||
876 | |||
877 | int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
878 | { | ||
879 | if (!dev->caps.vep_mc_steering) | ||
880 | return 0; | ||
881 | |||
882 | |||
883 | return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); | ||
884 | } | ||
885 | EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); | ||
886 | |||
887 | int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
888 | { | ||
889 | if (!dev->caps.vep_mc_steering) | ||
890 | return 0; | ||
891 | |||
892 | |||
893 | return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); | ||
894 | } | ||
895 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); | ||
896 | |||
897 | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) | ||
898 | { | ||
899 | if (!dev->caps.vep_mc_steering) | ||
900 | return 0; | ||
901 | |||
902 | return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); | ||
903 | } | ||
904 | EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); | ||
905 | |||
348 | int mlx4_init_mcg_table(struct mlx4_dev *dev) | 906 | int mlx4_init_mcg_table(struct mlx4_dev *dev) |
349 | { | 907 | { |
350 | struct mlx4_priv *priv = mlx4_priv(dev); | 908 | struct mlx4_priv *priv = mlx4_priv(dev); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 0da5bb7285b..c1e0e5f1bcd 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -105,6 +105,7 @@ struct mlx4_bitmap { | |||
105 | u32 max; | 105 | u32 max; |
106 | u32 reserved_top; | 106 | u32 reserved_top; |
107 | u32 mask; | 107 | u32 mask; |
108 | u32 avail; | ||
108 | spinlock_t lock; | 109 | spinlock_t lock; |
109 | unsigned long *table; | 110 | unsigned long *table; |
110 | }; | 111 | }; |
@@ -162,6 +163,27 @@ struct mlx4_fw { | |||
162 | u8 catas_bar; | 163 | u8 catas_bar; |
163 | }; | 164 | }; |
164 | 165 | ||
166 | #define MGM_QPN_MASK 0x00FFFFFF | ||
167 | #define MGM_BLCK_LB_BIT 30 | ||
168 | |||
169 | struct mlx4_promisc_qp { | ||
170 | struct list_head list; | ||
171 | u32 qpn; | ||
172 | }; | ||
173 | |||
174 | struct mlx4_steer_index { | ||
175 | struct list_head list; | ||
176 | unsigned int index; | ||
177 | struct list_head duplicates; | ||
178 | }; | ||
179 | |||
180 | struct mlx4_mgm { | ||
181 | __be32 next_gid_index; | ||
182 | __be32 members_count; | ||
183 | u32 reserved[2]; | ||
184 | u8 gid[16]; | ||
185 | __be32 qp[MLX4_QP_PER_MGM]; | ||
186 | }; | ||
165 | struct mlx4_cmd { | 187 | struct mlx4_cmd { |
166 | struct pci_pool *pool; | 188 | struct pci_pool *pool; |
167 | void __iomem *hcr; | 189 | void __iomem *hcr; |
@@ -265,6 +287,10 @@ struct mlx4_vlan_table { | |||
265 | int max; | 287 | int max; |
266 | }; | 288 | }; |
267 | 289 | ||
290 | struct mlx4_mac_entry { | ||
291 | u64 mac; | ||
292 | }; | ||
293 | |||
268 | struct mlx4_port_info { | 294 | struct mlx4_port_info { |
269 | struct mlx4_dev *dev; | 295 | struct mlx4_dev *dev; |
270 | int port; | 296 | int port; |
@@ -272,7 +298,9 @@ struct mlx4_port_info { | |||
272 | struct device_attribute port_attr; | 298 | struct device_attribute port_attr; |
273 | enum mlx4_port_type tmp_type; | 299 | enum mlx4_port_type tmp_type; |
274 | struct mlx4_mac_table mac_table; | 300 | struct mlx4_mac_table mac_table; |
301 | struct radix_tree_root mac_tree; | ||
275 | struct mlx4_vlan_table vlan_table; | 302 | struct mlx4_vlan_table vlan_table; |
303 | int base_qpn; | ||
276 | }; | 304 | }; |
277 | 305 | ||
278 | struct mlx4_sense { | 306 | struct mlx4_sense { |
@@ -282,6 +310,17 @@ struct mlx4_sense { | |||
282 | struct delayed_work sense_poll; | 310 | struct delayed_work sense_poll; |
283 | }; | 311 | }; |
284 | 312 | ||
313 | struct mlx4_msix_ctl { | ||
314 | u64 pool_bm; | ||
315 | spinlock_t pool_lock; | ||
316 | }; | ||
317 | |||
318 | struct mlx4_steer { | ||
319 | struct list_head promisc_qps[MLX4_NUM_STEERS]; | ||
320 | struct list_head steer_entries[MLX4_NUM_STEERS]; | ||
321 | struct list_head high_prios; | ||
322 | }; | ||
323 | |||
285 | struct mlx4_priv { | 324 | struct mlx4_priv { |
286 | struct mlx4_dev dev; | 325 | struct mlx4_dev dev; |
287 | 326 | ||
@@ -313,6 +352,11 @@ struct mlx4_priv { | |||
313 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; | 352 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; |
314 | struct mlx4_sense sense; | 353 | struct mlx4_sense sense; |
315 | struct mutex port_mutex; | 354 | struct mutex port_mutex; |
355 | struct mlx4_msix_ctl msix_ctl; | ||
356 | struct mlx4_steer *steer; | ||
357 | struct list_head bf_list; | ||
358 | struct mutex bf_mutex; | ||
359 | struct io_mapping *bf_mapping; | ||
316 | }; | 360 | }; |
317 | 361 | ||
318 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) | 362 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) |
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); | |||
328 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); | 372 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); |
329 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); | 373 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); |
330 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); | 374 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); |
375 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); | ||
331 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, | 376 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, |
332 | u32 reserved_bot, u32 resetrved_top); | 377 | u32 reserved_bot, u32 resetrved_top); |
333 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | 378 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); |
@@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); | |||
403 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); | 448 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); |
404 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); | 449 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); |
405 | 450 | ||
451 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
452 | enum mlx4_protocol prot, enum mlx4_steer_type steer); | ||
453 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | ||
454 | int block_mcast_loopback, enum mlx4_protocol prot, | ||
455 | enum mlx4_steer_type steer); | ||
406 | #endif /* MLX4_H */ | 456 | #endif /* MLX4_H */ |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index dfed6a07c2d..e30f6099c0d 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -49,8 +49,8 @@ | |||
49 | #include "en_port.h" | 49 | #include "en_port.h" |
50 | 50 | ||
51 | #define DRV_NAME "mlx4_en" | 51 | #define DRV_NAME "mlx4_en" |
52 | #define DRV_VERSION "1.5.1.6" | 52 | #define DRV_VERSION "1.5.4.1" |
53 | #define DRV_RELDATE "August 2010" | 53 | #define DRV_RELDATE "March 2011" |
54 | 54 | ||
55 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) | 55 | #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) |
56 | 56 | ||
@@ -62,6 +62,7 @@ | |||
62 | #define MLX4_EN_PAGE_SHIFT 12 | 62 | #define MLX4_EN_PAGE_SHIFT 12 |
63 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) | 63 | #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) |
64 | #define MAX_RX_RINGS 16 | 64 | #define MAX_RX_RINGS 16 |
65 | #define MIN_RX_RINGS 4 | ||
65 | #define TXBB_SIZE 64 | 66 | #define TXBB_SIZE 64 |
66 | #define HEADROOM (2048 / TXBB_SIZE + 1) | 67 | #define HEADROOM (2048 / TXBB_SIZE + 1) |
67 | #define STAMP_STRIDE 64 | 68 | #define STAMP_STRIDE 64 |
@@ -124,6 +125,7 @@ enum { | |||
124 | #define MLX4_EN_RX_SIZE_THRESH 1024 | 125 | #define MLX4_EN_RX_SIZE_THRESH 1024 |
125 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) | 126 | #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) |
126 | #define MLX4_EN_SAMPLE_INTERVAL 0 | 127 | #define MLX4_EN_SAMPLE_INTERVAL 0 |
128 | #define MLX4_EN_AVG_PKT_SMALL 256 | ||
127 | 129 | ||
128 | #define MLX4_EN_AUTO_CONF 0xffff | 130 | #define MLX4_EN_AUTO_CONF 0xffff |
129 | 131 | ||
@@ -214,6 +216,9 @@ struct mlx4_en_tx_desc { | |||
214 | 216 | ||
215 | #define MLX4_EN_USE_SRQ 0x01000000 | 217 | #define MLX4_EN_USE_SRQ 0x01000000 |
216 | 218 | ||
219 | #define MLX4_EN_CX3_LOW_ID 0x1000 | ||
220 | #define MLX4_EN_CX3_HIGH_ID 0x1005 | ||
221 | |||
217 | struct mlx4_en_rx_alloc { | 222 | struct mlx4_en_rx_alloc { |
218 | struct page *page; | 223 | struct page *page; |
219 | u16 offset; | 224 | u16 offset; |
@@ -243,6 +248,8 @@ struct mlx4_en_tx_ring { | |||
243 | unsigned long bytes; | 248 | unsigned long bytes; |
244 | unsigned long packets; | 249 | unsigned long packets; |
245 | spinlock_t comp_lock; | 250 | spinlock_t comp_lock; |
251 | struct mlx4_bf bf; | ||
252 | bool bf_enabled; | ||
246 | }; | 253 | }; |
247 | 254 | ||
248 | struct mlx4_en_rx_desc { | 255 | struct mlx4_en_rx_desc { |
@@ -453,6 +460,7 @@ struct mlx4_en_priv { | |||
453 | struct mlx4_en_rss_map rss_map; | 460 | struct mlx4_en_rss_map rss_map; |
454 | u32 flags; | 461 | u32 flags; |
455 | #define MLX4_EN_FLAG_PROMISC 0x1 | 462 | #define MLX4_EN_FLAG_PROMISC 0x1 |
463 | #define MLX4_EN_FLAG_MC_PROMISC 0x2 | ||
456 | u32 tx_ring_num; | 464 | u32 tx_ring_num; |
457 | u32 rx_ring_num; | 465 | u32 rx_ring_num; |
458 | u32 rx_skb_size; | 466 | u32 rx_skb_size; |
@@ -461,6 +469,7 @@ struct mlx4_en_priv { | |||
461 | u16 log_rx_info; | 469 | u16 log_rx_info; |
462 | 470 | ||
463 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; | 471 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; |
472 | int tx_vector; | ||
464 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; | 473 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; |
465 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; | 474 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; |
466 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | 475 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; |
@@ -476,6 +485,13 @@ struct mlx4_en_priv { | |||
476 | int mc_addrs_cnt; | 485 | int mc_addrs_cnt; |
477 | struct mlx4_en_stat_out_mbox hw_stats; | 486 | struct mlx4_en_stat_out_mbox hw_stats; |
478 | int vids[128]; | 487 | int vids[128]; |
488 | bool wol; | ||
489 | }; | ||
490 | |||
491 | enum mlx4_en_wol { | ||
492 | MLX4_EN_WOL_MAGIC = (1ULL << 61), | ||
493 | MLX4_EN_WOL_ENABLED = (1ULL << 62), | ||
494 | MLX4_EN_WOL_DO_MODIFY = (1ULL << 63), | ||
479 | }; | 495 | }; |
480 | 496 | ||
481 | 497 | ||
@@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
486 | int mlx4_en_start_port(struct net_device *dev); | 502 | int mlx4_en_start_port(struct net_device *dev); |
487 | void mlx4_en_stop_port(struct net_device *dev); | 503 | void mlx4_en_stop_port(struct net_device *dev); |
488 | 504 | ||
489 | void mlx4_en_free_resources(struct mlx4_en_priv *priv); | 505 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors); |
490 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); | 506 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); |
491 | 507 | ||
492 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | 508 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
493 | int entries, int ring, enum cq_type mode); | 509 | int entries, int ring, enum cq_type mode); |
494 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 510 | void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
511 | bool reserve_vectors); | ||
495 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 512 | int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
496 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 513 | void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
497 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 514 | int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
@@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | |||
503 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 520 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
504 | 521 | ||
505 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, | 522 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, |
506 | u32 size, u16 stride); | 523 | int qpn, u32 size, u16 stride); |
507 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); | 524 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); |
508 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | 525 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, |
509 | struct mlx4_en_tx_ring *ring, | 526 | struct mlx4_en_tx_ring *ring, |
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index c4988d6bd5b..1286b886dce 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c | |||
@@ -32,12 +32,17 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/io-mapping.h> | ||
35 | 36 | ||
36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
37 | 38 | ||
38 | #include "mlx4.h" | 39 | #include "mlx4.h" |
39 | #include "icm.h" | 40 | #include "icm.h" |
40 | 41 | ||
42 | enum { | ||
43 | MLX4_NUM_RESERVED_UARS = 8 | ||
44 | }; | ||
45 | |||
41 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) | 46 | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) |
42 | { | 47 | { |
43 | struct mlx4_priv *priv = mlx4_priv(dev); | 48 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) | |||
77 | return -ENOMEM; | 82 | return -ENOMEM; |
78 | 83 | ||
79 | uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; | 84 | uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; |
85 | uar->map = NULL; | ||
80 | 86 | ||
81 | return 0; | 87 | return 0; |
82 | } | 88 | } |
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) | |||
88 | } | 94 | } |
89 | EXPORT_SYMBOL_GPL(mlx4_uar_free); | 95 | EXPORT_SYMBOL_GPL(mlx4_uar_free); |
90 | 96 | ||
97 | int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) | ||
98 | { | ||
99 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
100 | struct mlx4_uar *uar; | ||
101 | int err = 0; | ||
102 | int idx; | ||
103 | |||
104 | if (!priv->bf_mapping) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | mutex_lock(&priv->bf_mutex); | ||
108 | if (!list_empty(&priv->bf_list)) | ||
109 | uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); | ||
110 | else { | ||
111 | if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { | ||
112 | err = -ENOMEM; | ||
113 | goto out; | ||
114 | } | ||
115 | uar = kmalloc(sizeof *uar, GFP_KERNEL); | ||
116 | if (!uar) { | ||
117 | err = -ENOMEM; | ||
118 | goto out; | ||
119 | } | ||
120 | err = mlx4_uar_alloc(dev, uar); | ||
121 | if (err) | ||
122 | goto free_kmalloc; | ||
123 | |||
124 | uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); | ||
125 | if (!uar->map) { | ||
126 | err = -ENOMEM; | ||
127 | goto free_uar; | ||
128 | } | ||
129 | |||
130 | uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); | ||
131 | if (!uar->bf_map) { | ||
132 | err = -ENOMEM; | ||
133 | goto unamp_uar; | ||
134 | } | ||
135 | uar->free_bf_bmap = 0; | ||
136 | list_add(&uar->bf_list, &priv->bf_list); | ||
137 | } | ||
138 | |||
139 | bf->uar = uar; | ||
140 | idx = ffz(uar->free_bf_bmap); | ||
141 | uar->free_bf_bmap |= 1 << idx; | ||
142 | bf->uar = uar; | ||
143 | bf->offset = 0; | ||
144 | bf->buf_size = dev->caps.bf_reg_size / 2; | ||
145 | bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; | ||
146 | if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) | ||
147 | list_del_init(&uar->bf_list); | ||
148 | |||
149 | goto out; | ||
150 | |||
151 | unamp_uar: | ||
152 | bf->uar = NULL; | ||
153 | iounmap(uar->map); | ||
154 | |||
155 | free_uar: | ||
156 | mlx4_uar_free(dev, uar); | ||
157 | |||
158 | free_kmalloc: | ||
159 | kfree(uar); | ||
160 | |||
161 | out: | ||
162 | mutex_unlock(&priv->bf_mutex); | ||
163 | return err; | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(mlx4_bf_alloc); | ||
166 | |||
167 | void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) | ||
168 | { | ||
169 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
170 | int idx; | ||
171 | |||
172 | if (!bf->uar || !bf->uar->bf_map) | ||
173 | return; | ||
174 | |||
175 | mutex_lock(&priv->bf_mutex); | ||
176 | idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; | ||
177 | bf->uar->free_bf_bmap &= ~(1 << idx); | ||
178 | if (!bf->uar->free_bf_bmap) { | ||
179 | if (!list_empty(&bf->uar->bf_list)) | ||
180 | list_del(&bf->uar->bf_list); | ||
181 | |||
182 | io_mapping_unmap(bf->uar->bf_map); | ||
183 | iounmap(bf->uar->map); | ||
184 | mlx4_uar_free(dev, bf->uar); | ||
185 | kfree(bf->uar); | ||
186 | } else if (list_empty(&bf->uar->bf_list)) | ||
187 | list_add(&bf->uar->bf_list, &priv->bf_list); | ||
188 | |||
189 | mutex_unlock(&priv->bf_mutex); | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(mlx4_bf_free); | ||
192 | |||
91 | int mlx4_init_uar_table(struct mlx4_dev *dev) | 193 | int mlx4_init_uar_table(struct mlx4_dev *dev) |
92 | { | 194 | { |
93 | if (dev->caps.num_uars <= 128) { | 195 | if (dev->caps.num_uars <= 128) { |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 451339559bd..eca7d8596f8 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c | |||
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, | |||
90 | return err; | 90 | return err; |
91 | } | 91 | } |
92 | 92 | ||
93 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | 93 | static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, |
94 | u64 mac, int *qpn, u8 reserve) | ||
94 | { | 95 | { |
95 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | 96 | struct mlx4_qp qp; |
97 | u8 gid[16] = {0}; | ||
98 | int err; | ||
99 | |||
100 | if (reserve) { | ||
101 | err = mlx4_qp_reserve_range(dev, 1, 1, qpn); | ||
102 | if (err) { | ||
103 | mlx4_err(dev, "Failed to reserve qp for mac registration\n"); | ||
104 | return err; | ||
105 | } | ||
106 | } | ||
107 | qp.qpn = *qpn; | ||
108 | |||
109 | mac &= 0xffffffffffffULL; | ||
110 | mac = cpu_to_be64(mac << 16); | ||
111 | memcpy(&gid[10], &mac, ETH_ALEN); | ||
112 | gid[5] = port; | ||
113 | gid[7] = MLX4_UC_STEER << 1; | ||
114 | |||
115 | err = mlx4_qp_attach_common(dev, &qp, gid, 0, | ||
116 | MLX4_PROT_ETH, MLX4_UC_STEER); | ||
117 | if (err && reserve) | ||
118 | mlx4_qp_release_range(dev, *qpn, 1); | ||
119 | |||
120 | return err; | ||
121 | } | ||
122 | |||
123 | static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, | ||
124 | u64 mac, int qpn, u8 free) | ||
125 | { | ||
126 | struct mlx4_qp qp; | ||
127 | u8 gid[16] = {0}; | ||
128 | |||
129 | qp.qpn = qpn; | ||
130 | mac &= 0xffffffffffffULL; | ||
131 | mac = cpu_to_be64(mac << 16); | ||
132 | memcpy(&gid[10], &mac, ETH_ALEN); | ||
133 | gid[5] = port; | ||
134 | gid[7] = MLX4_UC_STEER << 1; | ||
135 | |||
136 | mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); | ||
137 | if (free) | ||
138 | mlx4_qp_release_range(dev, qpn, 1); | ||
139 | } | ||
140 | |||
141 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) | ||
142 | { | ||
143 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
144 | struct mlx4_mac_table *table = &info->mac_table; | ||
145 | struct mlx4_mac_entry *entry; | ||
96 | int i, err = 0; | 146 | int i, err = 0; |
97 | int free = -1; | 147 | int free = -1; |
98 | 148 | ||
149 | if (dev->caps.vep_uc_steering) { | ||
150 | err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); | ||
151 | if (!err) { | ||
152 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
153 | if (!entry) { | ||
154 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
155 | return -ENOMEM; | ||
156 | } | ||
157 | entry->mac = mac; | ||
158 | err = radix_tree_insert(&info->mac_tree, *qpn, entry); | ||
159 | if (err) { | ||
160 | mlx4_uc_steer_release(dev, port, mac, *qpn, 1); | ||
161 | return err; | ||
162 | } | ||
163 | } else | ||
164 | return err; | ||
165 | } | ||
99 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); | 166 | mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); |
100 | mutex_lock(&table->mutex); | 167 | mutex_lock(&table->mutex); |
101 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { | 168 | for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { |
@@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | |||
106 | 173 | ||
107 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { | 174 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { |
108 | /* MAC already registered, increase refernce count */ | 175 | /* MAC already registered, increase refernce count */ |
109 | *index = i; | ||
110 | ++table->refs[i]; | 176 | ++table->refs[i]; |
111 | goto out; | 177 | goto out; |
112 | } | 178 | } |
@@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) | |||
137 | goto out; | 203 | goto out; |
138 | } | 204 | } |
139 | 205 | ||
140 | *index = free; | 206 | if (!dev->caps.vep_uc_steering) |
207 | *qpn = info->base_qpn + free; | ||
141 | ++table->total; | 208 | ++table->total; |
142 | out: | 209 | out: |
143 | mutex_unlock(&table->mutex); | 210 | mutex_unlock(&table->mutex); |
@@ -145,20 +212,52 @@ out: | |||
145 | } | 212 | } |
146 | EXPORT_SYMBOL_GPL(mlx4_register_mac); | 213 | EXPORT_SYMBOL_GPL(mlx4_register_mac); |
147 | 214 | ||
148 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) | 215 | static int validate_index(struct mlx4_dev *dev, |
216 | struct mlx4_mac_table *table, int index) | ||
149 | { | 217 | { |
150 | struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; | 218 | int err = 0; |
151 | 219 | ||
152 | mutex_lock(&table->mutex); | 220 | if (index < 0 || index >= table->max || !table->entries[index]) { |
153 | if (!table->refs[index]) { | 221 | mlx4_warn(dev, "No valid Mac entry for the given index\n"); |
154 | mlx4_warn(dev, "No MAC entry for index %d\n", index); | 222 | err = -EINVAL; |
155 | goto out; | ||
156 | } | 223 | } |
157 | if (--table->refs[index]) { | 224 | return err; |
158 | mlx4_warn(dev, "Have more references for index %d," | 225 | } |
159 | "no need to modify MAC table\n", index); | 226 | |
160 | goto out; | 227 | static int find_index(struct mlx4_dev *dev, |
228 | struct mlx4_mac_table *table, u64 mac) | ||
229 | { | ||
230 | int i; | ||
231 | for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { | ||
232 | if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) | ||
233 | return i; | ||
161 | } | 234 | } |
235 | /* Mac not found */ | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | |||
239 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) | ||
240 | { | ||
241 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
242 | struct mlx4_mac_table *table = &info->mac_table; | ||
243 | int index = qpn - info->base_qpn; | ||
244 | struct mlx4_mac_entry *entry; | ||
245 | |||
246 | if (dev->caps.vep_uc_steering) { | ||
247 | entry = radix_tree_lookup(&info->mac_tree, qpn); | ||
248 | if (entry) { | ||
249 | mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); | ||
250 | radix_tree_delete(&info->mac_tree, qpn); | ||
251 | index = find_index(dev, table, entry->mac); | ||
252 | kfree(entry); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | mutex_lock(&table->mutex); | ||
257 | |||
258 | if (validate_index(dev, table, index)) | ||
259 | goto out; | ||
260 | |||
162 | table->entries[index] = 0; | 261 | table->entries[index] = 0; |
163 | mlx4_set_port_mac_table(dev, port, table->entries); | 262 | mlx4_set_port_mac_table(dev, port, table->entries); |
164 | --table->total; | 263 | --table->total; |
@@ -167,6 +266,44 @@ out: | |||
167 | } | 266 | } |
168 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); | 267 | EXPORT_SYMBOL_GPL(mlx4_unregister_mac); |
169 | 268 | ||
269 | int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) | ||
270 | { | ||
271 | struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; | ||
272 | struct mlx4_mac_table *table = &info->mac_table; | ||
273 | int index = qpn - info->base_qpn; | ||
274 | struct mlx4_mac_entry *entry; | ||
275 | int err; | ||
276 | |||
277 | if (dev->caps.vep_uc_steering) { | ||
278 | entry = radix_tree_lookup(&info->mac_tree, qpn); | ||
279 | if (!entry) | ||
280 | return -EINVAL; | ||
281 | index = find_index(dev, table, entry->mac); | ||
282 | mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); | ||
283 | entry->mac = new_mac; | ||
284 | err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); | ||
285 | if (err || index < 0) | ||
286 | return err; | ||
287 | } | ||
288 | |||
289 | mutex_lock(&table->mutex); | ||
290 | |||
291 | err = validate_index(dev, table, index); | ||
292 | if (err) | ||
293 | goto out; | ||
294 | |||
295 | table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); | ||
296 | |||
297 | err = mlx4_set_port_mac_table(dev, port, table->entries); | ||
298 | if (unlikely(err)) { | ||
299 | mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); | ||
300 | table->entries[index] = 0; | ||
301 | } | ||
302 | out: | ||
303 | mutex_unlock(&table->mutex); | ||
304 | return err; | ||
305 | } | ||
306 | EXPORT_SYMBOL_GPL(mlx4_replace_mac); | ||
170 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, | 307 | static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, |
171 | __be32 *entries) | 308 | __be32 *entries) |
172 | { | 309 | { |
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index e749f82865f..b967647d0c7 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c | |||
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
107 | profile[MLX4_RES_AUXC].num = request->num_qp; | 107 | profile[MLX4_RES_AUXC].num = request->num_qp; |
108 | profile[MLX4_RES_SRQ].num = request->num_srq; | 108 | profile[MLX4_RES_SRQ].num = request->num_srq; |
109 | profile[MLX4_RES_CQ].num = request->num_cq; | 109 | profile[MLX4_RES_CQ].num = request->num_cq; |
110 | profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, | 110 | profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); |
111 | dev_cap->reserved_eqs + | ||
112 | num_possible_cpus() + 1); | ||
113 | profile[MLX4_RES_DMPT].num = request->num_mpt; | 111 | profile[MLX4_RES_DMPT].num = request->num_mpt; |
114 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; | 112 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; |
115 | profile[MLX4_RES_MTT].num = request->num_mtt; | 113 | profile[MLX4_RES_MTT].num = request->num_mtt; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index a7f2eed9a08..1f4e8680a96 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) | |||
3645 | dma_free_coherent(&pdev->dev, bytes, | 3645 | dma_free_coherent(&pdev->dev, bytes, |
3646 | ss->fw_stats, ss->fw_stats_bus); | 3646 | ss->fw_stats, ss->fw_stats_bus); |
3647 | ss->fw_stats = NULL; | 3647 | ss->fw_stats = NULL; |
3648 | netif_napi_del(&ss->napi); | ||
3648 | } | 3649 | } |
3649 | } | 3650 | } |
3650 | kfree(mgp->ss); | 3651 | kfree(mgp->ss); |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 8c66e22c3a0..50986840c99 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -2441,7 +2441,7 @@ static struct pci_error_handlers pch_gbe_err_handler = { | |||
2441 | .resume = pch_gbe_io_resume | 2441 | .resume = pch_gbe_io_resume |
2442 | }; | 2442 | }; |
2443 | 2443 | ||
2444 | static struct pci_driver pch_gbe_pcidev = { | 2444 | static struct pci_driver pch_gbe_driver = { |
2445 | .name = KBUILD_MODNAME, | 2445 | .name = KBUILD_MODNAME, |
2446 | .id_table = pch_gbe_pcidev_id, | 2446 | .id_table = pch_gbe_pcidev_id, |
2447 | .probe = pch_gbe_probe, | 2447 | .probe = pch_gbe_probe, |
@@ -2458,7 +2458,7 @@ static int __init pch_gbe_init_module(void) | |||
2458 | { | 2458 | { |
2459 | int ret; | 2459 | int ret; |
2460 | 2460 | ||
2461 | ret = pci_register_driver(&pch_gbe_pcidev); | 2461 | ret = pci_register_driver(&pch_gbe_driver); |
2462 | if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { | 2462 | if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { |
2463 | if (copybreak == 0) { | 2463 | if (copybreak == 0) { |
2464 | pr_info("copybreak disabled\n"); | 2464 | pr_info("copybreak disabled\n"); |
@@ -2472,7 +2472,7 @@ static int __init pch_gbe_init_module(void) | |||
2472 | 2472 | ||
2473 | static void __exit pch_gbe_exit_module(void) | 2473 | static void __exit pch_gbe_exit_module(void) |
2474 | { | 2474 | { |
2475 | pci_unregister_driver(&pch_gbe_pcidev); | 2475 | pci_unregister_driver(&pch_gbe_driver); |
2476 | } | 2476 | } |
2477 | 2477 | ||
2478 | module_init(pch_gbe_init_module); | 2478 | module_init(pch_gbe_init_module); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index b8bd936374f..d890679e4c4 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1054,6 +1054,7 @@ static int efx_init_io(struct efx_nic *efx) | |||
1054 | { | 1054 | { |
1055 | struct pci_dev *pci_dev = efx->pci_dev; | 1055 | struct pci_dev *pci_dev = efx->pci_dev; |
1056 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1056 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1057 | bool use_wc; | ||
1057 | int rc; | 1058 | int rc; |
1058 | 1059 | ||
1059 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1060 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
@@ -1104,8 +1105,21 @@ static int efx_init_io(struct efx_nic *efx) | |||
1104 | rc = -EIO; | 1105 | rc = -EIO; |
1105 | goto fail3; | 1106 | goto fail3; |
1106 | } | 1107 | } |
1107 | efx->membase = ioremap_wc(efx->membase_phys, | 1108 | |
1108 | efx->type->mem_map_size); | 1109 | /* bug22643: If SR-IOV is enabled then tx push over a write combined |
1110 | * mapping is unsafe. We need to disable write combining in this case. | ||
1111 | * MSI is unsupported when SR-IOV is enabled, and the firmware will | ||
1112 | * have removed the MSI capability. So write combining is safe if | ||
1113 | * there is an MSI capability. | ||
1114 | */ | ||
1115 | use_wc = (!EFX_WORKAROUND_22643(efx) || | ||
1116 | pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); | ||
1117 | if (use_wc) | ||
1118 | efx->membase = ioremap_wc(efx->membase_phys, | ||
1119 | efx->type->mem_map_size); | ||
1120 | else | ||
1121 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1122 | efx->type->mem_map_size); | ||
1109 | if (!efx->membase) { | 1123 | if (!efx->membase) { |
1110 | netif_err(efx, probe, efx->net_dev, | 1124 | netif_err(efx, probe, efx->net_dev, |
1111 | "could not map memory BAR at %llx+%x\n", | 1125 | "could not map memory BAR at %llx+%x\n", |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index e4dd3a7f304..99ff11400ce 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -38,6 +38,8 @@ | |||
38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS | 38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS |
39 | /* Legacy interrupt storm when interrupt fifo fills */ | 39 | /* Legacy interrupt storm when interrupt fifo fills */ |
40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | 40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA |
41 | /* Write combining and sriov=enabled are incompatible */ | ||
42 | #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA | ||
41 | 43 | ||
42 | /* Spurious parity errors in TSORT buffers */ | 44 | /* Spurious parity errors in TSORT buffers */ |
43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | 45 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index bc86f4b6ecc..727874d9deb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -49,6 +49,8 @@ | |||
49 | 49 | ||
50 | struct smsc95xx_priv { | 50 | struct smsc95xx_priv { |
51 | u32 mac_cr; | 51 | u32 mac_cr; |
52 | u32 hash_hi; | ||
53 | u32 hash_lo; | ||
52 | spinlock_t mac_cr_lock; | 54 | spinlock_t mac_cr_lock; |
53 | bool use_tx_csum; | 55 | bool use_tx_csum; |
54 | bool use_rx_csum; | 56 | bool use_rx_csum; |
@@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev) | |||
370 | { | 372 | { |
371 | struct usbnet *dev = netdev_priv(netdev); | 373 | struct usbnet *dev = netdev_priv(netdev); |
372 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | 374 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
373 | u32 hash_hi = 0; | ||
374 | u32 hash_lo = 0; | ||
375 | unsigned long flags; | 375 | unsigned long flags; |
376 | 376 | ||
377 | pdata->hash_hi = 0; | ||
378 | pdata->hash_lo = 0; | ||
379 | |||
377 | spin_lock_irqsave(&pdata->mac_cr_lock, flags); | 380 | spin_lock_irqsave(&pdata->mac_cr_lock, flags); |
378 | 381 | ||
379 | if (dev->net->flags & IFF_PROMISC) { | 382 | if (dev->net->flags & IFF_PROMISC) { |
@@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev) | |||
394 | u32 bitnum = smsc95xx_hash(ha->addr); | 397 | u32 bitnum = smsc95xx_hash(ha->addr); |
395 | u32 mask = 0x01 << (bitnum & 0x1F); | 398 | u32 mask = 0x01 << (bitnum & 0x1F); |
396 | if (bitnum & 0x20) | 399 | if (bitnum & 0x20) |
397 | hash_hi |= mask; | 400 | pdata->hash_hi |= mask; |
398 | else | 401 | else |
399 | hash_lo |= mask; | 402 | pdata->hash_lo |= mask; |
400 | } | 403 | } |
401 | 404 | ||
402 | netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", | 405 | netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", |
403 | hash_hi, hash_lo); | 406 | pdata->hash_hi, pdata->hash_lo); |
404 | } else { | 407 | } else { |
405 | netif_dbg(dev, drv, dev->net, "receive own packets only\n"); | 408 | netif_dbg(dev, drv, dev->net, "receive own packets only\n"); |
406 | pdata->mac_cr &= | 409 | pdata->mac_cr &= |
@@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev) | |||
410 | spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); | 413 | spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); |
411 | 414 | ||
412 | /* Initiate async writes, as we can't wait for completion here */ | 415 | /* Initiate async writes, as we can't wait for completion here */ |
413 | smsc95xx_write_reg_async(dev, HASHH, &hash_hi); | 416 | smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); |
414 | smsc95xx_write_reg_async(dev, HASHL, &hash_lo); | 417 | smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); |
415 | smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); | 418 | smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); |
416 | } | 419 | } |
417 | 420 | ||
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 115f162c617..524825720a0 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2160,6 +2160,8 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | |||
2160 | if (!ath_drain_all_txq(sc, false)) | 2160 | if (!ath_drain_all_txq(sc, false)) |
2161 | ath_reset(sc, false); | 2161 | ath_reset(sc, false); |
2162 | 2162 | ||
2163 | ieee80211_wake_queues(hw); | ||
2164 | |||
2163 | out: | 2165 | out: |
2164 | ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); | 2166 | ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); |
2165 | mutex_unlock(&sc->mutex); | 2167 | mutex_unlock(&sc->mutex); |
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 960d717ca7c..a3241cd089b 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c | |||
@@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, | |||
1328 | 1328 | ||
1329 | hdr = (struct ieee80211_hdr *)skb->data; | 1329 | hdr = (struct ieee80211_hdr *)skb->data; |
1330 | fc = hdr->frame_control; | 1330 | fc = hdr->frame_control; |
1331 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | 1331 | for (i = 0; i < sc->hw->max_rates; i++) { |
1332 | struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; | 1332 | struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; |
1333 | if (!rate->count) | 1333 | if (!rate->count) |
1334 | break; | 1334 | break; |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index ef22096d40c..26734e53b37 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1725,8 +1725,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1725 | u8 tidno; | 1725 | u8 tidno; |
1726 | 1726 | ||
1727 | spin_lock_bh(&txctl->txq->axq_lock); | 1727 | spin_lock_bh(&txctl->txq->axq_lock); |
1728 | 1728 | if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && | |
1729 | if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { | 1729 | ieee80211_is_data_qos(hdr->frame_control)) { |
1730 | tidno = ieee80211_get_qos_ctl(hdr)[0] & | 1730 | tidno = ieee80211_get_qos_ctl(hdr)[0] & |
1731 | IEEE80211_QOS_CTL_TID_MASK; | 1731 | IEEE80211_QOS_CTL_TID_MASK; |
1732 | tid = ATH_AN_2_TID(txctl->an, tidno); | 1732 | tid = ATH_AN_2_TID(txctl->an, tidno); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 2003c1d4295..08ccb9496f7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -2265,7 +2265,7 @@ signed long iwlagn_wait_notification(struct iwl_priv *priv, | |||
2265 | int ret; | 2265 | int ret; |
2266 | 2266 | ||
2267 | ret = wait_event_timeout(priv->_agn.notif_waitq, | 2267 | ret = wait_event_timeout(priv->_agn.notif_waitq, |
2268 | &wait_entry->triggered, | 2268 | wait_entry->triggered, |
2269 | timeout); | 2269 | timeout); |
2270 | 2270 | ||
2271 | spin_lock_bh(&priv->_agn.notif_wait_lock); | 2271 | spin_lock_bh(&priv->_agn.notif_wait_lock); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 581dc9f1027..321b18b5913 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -3009,14 +3009,17 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw) | |||
3009 | 3009 | ||
3010 | mutex_lock(&priv->mutex); | 3010 | mutex_lock(&priv->mutex); |
3011 | 3011 | ||
3012 | if (!priv->_agn.offchan_tx_skb) | 3012 | if (!priv->_agn.offchan_tx_skb) { |
3013 | return -EINVAL; | 3013 | ret = -EINVAL; |
3014 | goto unlock; | ||
3015 | } | ||
3014 | 3016 | ||
3015 | priv->_agn.offchan_tx_skb = NULL; | 3017 | priv->_agn.offchan_tx_skb = NULL; |
3016 | 3018 | ||
3017 | ret = iwl_scan_cancel_timeout(priv, 200); | 3019 | ret = iwl_scan_cancel_timeout(priv, 200); |
3018 | if (ret) | 3020 | if (ret) |
3019 | ret = -EIO; | 3021 | ret = -EIO; |
3022 | unlock: | ||
3020 | mutex_unlock(&priv->mutex); | 3023 | mutex_unlock(&priv->mutex); |
3021 | 3024 | ||
3022 | return ret; | 3025 | return ret; |
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c index 09fae2f0ea0..736bbb9bd1d 100644 --- a/drivers/net/wireless/orinoco/cfg.c +++ b/drivers/net/wireless/orinoco/cfg.c | |||
@@ -153,6 +153,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev, | |||
153 | priv->scan_request = request; | 153 | priv->scan_request = request; |
154 | 154 | ||
155 | err = orinoco_hw_trigger_scan(priv, request->ssids); | 155 | err = orinoco_hw_trigger_scan(priv, request->ssids); |
156 | /* On error the we aren't processing the request */ | ||
157 | if (err) | ||
158 | priv->scan_request = NULL; | ||
156 | 159 | ||
157 | return err; | 160 | return err; |
158 | } | 161 | } |
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index f3d396e7544..62c6b2b37db 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c | |||
@@ -1376,13 +1376,13 @@ static void orinoco_process_scan_results(struct work_struct *work) | |||
1376 | 1376 | ||
1377 | spin_lock_irqsave(&priv->scan_lock, flags); | 1377 | spin_lock_irqsave(&priv->scan_lock, flags); |
1378 | list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { | 1378 | list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { |
1379 | spin_unlock_irqrestore(&priv->scan_lock, flags); | ||
1380 | 1379 | ||
1381 | buf = sd->buf; | 1380 | buf = sd->buf; |
1382 | len = sd->len; | 1381 | len = sd->len; |
1383 | type = sd->type; | 1382 | type = sd->type; |
1384 | 1383 | ||
1385 | list_del(&sd->list); | 1384 | list_del(&sd->list); |
1385 | spin_unlock_irqrestore(&priv->scan_lock, flags); | ||
1386 | kfree(sd); | 1386 | kfree(sd); |
1387 | 1387 | ||
1388 | if (len > 0) { | 1388 | if (len > 0) { |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index f1a92144996..4e368657a83 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -719,6 +719,7 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
719 | { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, | 719 | { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, |
720 | { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, | 720 | { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, |
721 | { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, | 721 | { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, |
722 | { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
722 | /* AzureWave */ | 723 | /* AzureWave */ |
723 | { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, | 724 | { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, |
724 | { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, | 725 | { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, |
@@ -913,7 +914,6 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
913 | { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, | 914 | { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, |
914 | { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, | 915 | { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, |
915 | { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, | 916 | { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, |
916 | { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
917 | /* AzureWave */ | 917 | /* AzureWave */ |
918 | { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, | 918 | { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, |
919 | { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, | 919 | { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, |
@@ -937,6 +937,8 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
937 | { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, | 937 | { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, |
938 | { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, | 938 | { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, |
939 | { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, | 939 | { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, |
940 | /* Edimax */ | ||
941 | { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
940 | /* Encore */ | 942 | /* Encore */ |
941 | { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, | 943 | { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, |
942 | /* Gemtek */ | 944 | /* Gemtek */ |
@@ -961,6 +963,7 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
961 | { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, | 963 | { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, |
962 | { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, | 964 | { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, |
963 | /* Planex */ | 965 | /* Planex */ |
966 | { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
964 | { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, | 967 | { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, |
965 | /* Qcom */ | 968 | /* Qcom */ |
966 | { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, | 969 | { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, |
@@ -972,6 +975,8 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
972 | /* Sweex */ | 975 | /* Sweex */ |
973 | { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, | 976 | { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, |
974 | { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, | 977 | { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, |
978 | /* Toshiba */ | ||
979 | { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
975 | /* Zyxel */ | 980 | /* Zyxel */ |
976 | { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, | 981 | { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, |
977 | #endif | 982 | #endif |
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index 4f92cba6810..f74a8701c67 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c | |||
@@ -410,8 +410,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) | |||
410 | 410 | ||
411 | if (!efuse_shadow_update_chk(hw)) { | 411 | if (!efuse_shadow_update_chk(hw)) { |
412 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); | 412 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); |
413 | memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], | 413 | memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], |
414 | (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], | 414 | &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], |
415 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); | 415 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); |
416 | 416 | ||
417 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, | 417 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, |
@@ -446,9 +446,9 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) | |||
446 | 446 | ||
447 | if (word_en != 0x0F) { | 447 | if (word_en != 0x0F) { |
448 | u8 tmpdata[8]; | 448 | u8 tmpdata[8]; |
449 | memcpy((void *)tmpdata, | 449 | memcpy(tmpdata, |
450 | (void *)(&rtlefuse-> | 450 | &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base], |
451 | efuse_map[EFUSE_MODIFY_MAP][base]), 8); | 451 | 8); |
452 | RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, | 452 | RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, |
453 | ("U-efuse\n"), tmpdata, 8); | 453 | ("U-efuse\n"), tmpdata, 8); |
454 | 454 | ||
@@ -465,8 +465,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) | |||
465 | efuse_power_switch(hw, true, false); | 465 | efuse_power_switch(hw, true, false); |
466 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); | 466 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); |
467 | 467 | ||
468 | memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], | 468 | memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], |
469 | (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], | 469 | &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], |
470 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); | 470 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); |
471 | 471 | ||
472 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); | 472 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); |
@@ -479,13 +479,12 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw) | |||
479 | struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); | 479 | struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); |
480 | 480 | ||
481 | if (rtlefuse->autoload_failflag == true) { | 481 | if (rtlefuse->autoload_failflag == true) { |
482 | memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128, | 482 | memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128); |
483 | 0xFF); | ||
484 | } else | 483 | } else |
485 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); | 484 | efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); |
486 | 485 | ||
487 | memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], | 486 | memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], |
488 | (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], | 487 | &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], |
489 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); | 488 | rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); |
490 | 489 | ||
491 | } | 490 | } |
@@ -694,8 +693,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) | |||
694 | if (offset > 15) | 693 | if (offset > 15) |
695 | return false; | 694 | return false; |
696 | 695 | ||
697 | memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff); | 696 | memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); |
698 | memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff); | 697 | memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); |
699 | 698 | ||
700 | while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { | 699 | while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { |
701 | if (readstate & PG_STATE_HEADER) { | 700 | if (readstate & PG_STATE_HEADER) { |
@@ -862,7 +861,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
862 | 861 | ||
863 | tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); | 862 | tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); |
864 | 863 | ||
865 | memset((void *)originaldata, 8 * sizeof(u8), 0xff); | 864 | memset(originaldata, 0xff, 8 * sizeof(u8)); |
866 | 865 | ||
867 | if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { | 866 | if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { |
868 | badworden = efuse_word_enable_data_write(hw, | 867 | badworden = efuse_word_enable_data_write(hw, |
@@ -917,7 +916,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, | |||
917 | target_pkt.offset = offset; | 916 | target_pkt.offset = offset; |
918 | target_pkt.word_en = word_en; | 917 | target_pkt.word_en = word_en; |
919 | 918 | ||
920 | memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF); | 919 | memset(target_pkt.data, 0xFF, 8 * sizeof(u8)); |
921 | 920 | ||
922 | efuse_word_enable_data_read(word_en, data, target_pkt.data); | 921 | efuse_word_enable_data_read(word_en, data, target_pkt.data); |
923 | target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); | 922 | target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); |
@@ -1022,7 +1021,7 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, | |||
1022 | u8 badworden = 0x0F; | 1021 | u8 badworden = 0x0F; |
1023 | u8 tmpdata[8]; | 1022 | u8 tmpdata[8]; |
1024 | 1023 | ||
1025 | memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff); | 1024 | memset(tmpdata, 0xff, PGPKT_DATA_SIZE); |
1026 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, | 1025 | RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, |
1027 | ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); | 1026 | ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); |
1028 | 1027 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 81e80489a05..58236e6d092 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -60,6 +60,7 @@ static struct usb_device_id usb_ids[] = { | |||
60 | { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, | 60 | { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, |
61 | { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, | 61 | { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, |
62 | { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, | 62 | { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, |
63 | { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 }, | ||
63 | { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, | 64 | { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, |
64 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, | 65 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, |
65 | /* ZD1211B */ | 66 | /* ZD1211B */ |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 4789f8e8bf7..a4115f1afe1 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/iova.h> | 36 | #include <linux/iova.h> |
37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
39 | #include <linux/sysdev.h> | 39 | #include <linux/syscore_ops.h> |
40 | #include <linux/tboot.h> | 40 | #include <linux/tboot.h> |
41 | #include <linux/dmi.h> | 41 | #include <linux/dmi.h> |
42 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
@@ -3135,7 +3135,7 @@ static void iommu_flush_all(void) | |||
3135 | } | 3135 | } |
3136 | } | 3136 | } |
3137 | 3137 | ||
3138 | static int iommu_suspend(struct sys_device *dev, pm_message_t state) | 3138 | static int iommu_suspend(void) |
3139 | { | 3139 | { |
3140 | struct dmar_drhd_unit *drhd; | 3140 | struct dmar_drhd_unit *drhd; |
3141 | struct intel_iommu *iommu = NULL; | 3141 | struct intel_iommu *iommu = NULL; |
@@ -3175,7 +3175,7 @@ nomem: | |||
3175 | return -ENOMEM; | 3175 | return -ENOMEM; |
3176 | } | 3176 | } |
3177 | 3177 | ||
3178 | static int iommu_resume(struct sys_device *dev) | 3178 | static void iommu_resume(void) |
3179 | { | 3179 | { |
3180 | struct dmar_drhd_unit *drhd; | 3180 | struct dmar_drhd_unit *drhd; |
3181 | struct intel_iommu *iommu = NULL; | 3181 | struct intel_iommu *iommu = NULL; |
@@ -3183,7 +3183,7 @@ static int iommu_resume(struct sys_device *dev) | |||
3183 | 3183 | ||
3184 | if (init_iommu_hw()) { | 3184 | if (init_iommu_hw()) { |
3185 | WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); | 3185 | WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); |
3186 | return -EIO; | 3186 | return; |
3187 | } | 3187 | } |
3188 | 3188 | ||
3189 | for_each_active_iommu(iommu, drhd) { | 3189 | for_each_active_iommu(iommu, drhd) { |
@@ -3204,40 +3204,20 @@ static int iommu_resume(struct sys_device *dev) | |||
3204 | 3204 | ||
3205 | for_each_active_iommu(iommu, drhd) | 3205 | for_each_active_iommu(iommu, drhd) |
3206 | kfree(iommu->iommu_state); | 3206 | kfree(iommu->iommu_state); |
3207 | |||
3208 | return 0; | ||
3209 | } | 3207 | } |
3210 | 3208 | ||
3211 | static struct sysdev_class iommu_sysclass = { | 3209 | static struct syscore_ops iommu_syscore_ops = { |
3212 | .name = "iommu", | ||
3213 | .resume = iommu_resume, | 3210 | .resume = iommu_resume, |
3214 | .suspend = iommu_suspend, | 3211 | .suspend = iommu_suspend, |
3215 | }; | 3212 | }; |
3216 | 3213 | ||
3217 | static struct sys_device device_iommu = { | 3214 | static void __init init_iommu_pm_ops(void) |
3218 | .cls = &iommu_sysclass, | ||
3219 | }; | ||
3220 | |||
3221 | static int __init init_iommu_sysfs(void) | ||
3222 | { | 3215 | { |
3223 | int error; | 3216 | register_syscore_ops(&iommu_syscore_ops); |
3224 | |||
3225 | error = sysdev_class_register(&iommu_sysclass); | ||
3226 | if (error) | ||
3227 | return error; | ||
3228 | |||
3229 | error = sysdev_register(&device_iommu); | ||
3230 | if (error) | ||
3231 | sysdev_class_unregister(&iommu_sysclass); | ||
3232 | |||
3233 | return error; | ||
3234 | } | 3217 | } |
3235 | 3218 | ||
3236 | #else | 3219 | #else |
3237 | static int __init init_iommu_sysfs(void) | 3220 | static inline int init_iommu_pm_ops(void) { } |
3238 | { | ||
3239 | return 0; | ||
3240 | } | ||
3241 | #endif /* CONFIG_PM */ | 3221 | #endif /* CONFIG_PM */ |
3242 | 3222 | ||
3243 | /* | 3223 | /* |
@@ -3320,7 +3300,7 @@ int __init intel_iommu_init(void) | |||
3320 | #endif | 3300 | #endif |
3321 | dma_ops = &intel_dma_ops; | 3301 | dma_ops = &intel_dma_ops; |
3322 | 3302 | ||
3323 | init_iommu_sysfs(); | 3303 | init_iommu_pm_ops(); |
3324 | 3304 | ||
3325 | register_iommu(&intel_iommu_ops); | 3305 | register_iommu(&intel_iommu_ops); |
3326 | 3306 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b714d787bdd..2472e7177b4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -740,6 +740,12 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
740 | 740 | ||
741 | if (!__pci_complete_power_transition(dev, state)) | 741 | if (!__pci_complete_power_transition(dev, state)) |
742 | error = 0; | 742 | error = 0; |
743 | /* | ||
744 | * When aspm_policy is "powersave" this call ensures | ||
745 | * that ASPM is configured. | ||
746 | */ | ||
747 | if (!error && dev->bus->self) | ||
748 | pcie_aspm_powersave_config_link(dev->bus->self); | ||
743 | 749 | ||
744 | return error; | 750 | return error; |
745 | } | 751 | } |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 3188cd96b33..eee09f756ec 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -69,6 +69,7 @@ struct pcie_link_state { | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | static int aspm_disabled, aspm_force, aspm_clear_state; | 71 | static int aspm_disabled, aspm_force, aspm_clear_state; |
72 | static bool aspm_support_enabled = true; | ||
72 | static DEFINE_MUTEX(aspm_lock); | 73 | static DEFINE_MUTEX(aspm_lock); |
73 | static LIST_HEAD(link_list); | 74 | static LIST_HEAD(link_list); |
74 | 75 | ||
@@ -707,6 +708,28 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) | |||
707 | up_read(&pci_bus_sem); | 708 | up_read(&pci_bus_sem); |
708 | } | 709 | } |
709 | 710 | ||
711 | void pcie_aspm_powersave_config_link(struct pci_dev *pdev) | ||
712 | { | ||
713 | struct pcie_link_state *link = pdev->link_state; | ||
714 | |||
715 | if (aspm_disabled || !pci_is_pcie(pdev) || !link) | ||
716 | return; | ||
717 | |||
718 | if (aspm_policy != POLICY_POWERSAVE) | ||
719 | return; | ||
720 | |||
721 | if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && | ||
722 | (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) | ||
723 | return; | ||
724 | |||
725 | down_read(&pci_bus_sem); | ||
726 | mutex_lock(&aspm_lock); | ||
727 | pcie_config_aspm_path(link); | ||
728 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); | ||
729 | mutex_unlock(&aspm_lock); | ||
730 | up_read(&pci_bus_sem); | ||
731 | } | ||
732 | |||
710 | /* | 733 | /* |
711 | * pci_disable_link_state - disable pci device's link state, so the link will | 734 | * pci_disable_link_state - disable pci device's link state, so the link will |
712 | * never enter specific states | 735 | * never enter specific states |
@@ -747,6 +770,8 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) | |||
747 | int i; | 770 | int i; |
748 | struct pcie_link_state *link; | 771 | struct pcie_link_state *link; |
749 | 772 | ||
773 | if (aspm_disabled) | ||
774 | return -EPERM; | ||
750 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) | 775 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
751 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) | 776 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) |
752 | break; | 777 | break; |
@@ -801,6 +826,8 @@ static ssize_t link_state_store(struct device *dev, | |||
801 | struct pcie_link_state *link, *root = pdev->link_state->root; | 826 | struct pcie_link_state *link, *root = pdev->link_state->root; |
802 | u32 val = buf[0] - '0', state = 0; | 827 | u32 val = buf[0] - '0', state = 0; |
803 | 828 | ||
829 | if (aspm_disabled) | ||
830 | return -EPERM; | ||
804 | if (n < 1 || val > 3) | 831 | if (n < 1 || val > 3) |
805 | return -EINVAL; | 832 | return -EINVAL; |
806 | 833 | ||
@@ -896,6 +923,7 @@ static int __init pcie_aspm_disable(char *str) | |||
896 | { | 923 | { |
897 | if (!strcmp(str, "off")) { | 924 | if (!strcmp(str, "off")) { |
898 | aspm_disabled = 1; | 925 | aspm_disabled = 1; |
926 | aspm_support_enabled = false; | ||
899 | printk(KERN_INFO "PCIe ASPM is disabled\n"); | 927 | printk(KERN_INFO "PCIe ASPM is disabled\n"); |
900 | } else if (!strcmp(str, "force")) { | 928 | } else if (!strcmp(str, "force")) { |
901 | aspm_force = 1; | 929 | aspm_force = 1; |
@@ -930,3 +958,8 @@ int pcie_aspm_enabled(void) | |||
930 | } | 958 | } |
931 | EXPORT_SYMBOL(pcie_aspm_enabled); | 959 | EXPORT_SYMBOL(pcie_aspm_enabled); |
932 | 960 | ||
961 | bool pcie_aspm_support_enabled(void) | ||
962 | { | ||
963 | return aspm_support_enabled; | ||
964 | } | ||
965 | EXPORT_SYMBOL(pcie_aspm_support_enabled); | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 5130d0d2239..595654a1a6a 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
17 | #include <linux/aer.h> | 17 | #include <linux/aer.h> |
18 | #include <linux/pci-aspm.h> | ||
19 | 18 | ||
20 | #include "../pci.h" | 19 | #include "../pci.h" |
21 | #include "portdrv.h" | 20 | #include "portdrv.h" |
@@ -356,10 +355,8 @@ int pcie_port_device_register(struct pci_dev *dev) | |||
356 | 355 | ||
357 | /* Get and check PCI Express port services */ | 356 | /* Get and check PCI Express port services */ |
358 | capabilities = get_port_device_capability(dev); | 357 | capabilities = get_port_device_capability(dev); |
359 | if (!capabilities) { | 358 | if (!capabilities) |
360 | pcie_no_aspm(); | ||
361 | return 0; | 359 | return 0; |
362 | } | ||
363 | 360 | ||
364 | pci_set_master(dev); | 361 | pci_set_master(dev); |
365 | /* | 362 | /* |
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 61bf5d72413..52a462fc6b8 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
@@ -117,10 +117,24 @@ config BATTERY_BQ20Z75 | |||
117 | 117 | ||
118 | config BATTERY_BQ27x00 | 118 | config BATTERY_BQ27x00 |
119 | tristate "BQ27x00 battery driver" | 119 | tristate "BQ27x00 battery driver" |
120 | help | ||
121 | Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips. | ||
122 | |||
123 | config BATTERY_BQ27X00_I2C | ||
124 | bool "BQ27200/BQ27500 support" | ||
125 | depends on BATTERY_BQ27x00 | ||
120 | depends on I2C | 126 | depends on I2C |
127 | default y | ||
121 | help | 128 | help |
122 | Say Y here to enable support for batteries with BQ27x00 (I2C) chips. | 129 | Say Y here to enable support for batteries with BQ27x00 (I2C) chips. |
123 | 130 | ||
131 | config BATTERY_BQ27X00_PLATFORM | ||
132 | bool "BQ27000 support" | ||
133 | depends on BATTERY_BQ27x00 | ||
134 | default y | ||
135 | help | ||
136 | Say Y here to enable support for batteries with BQ27000 (HDQ) chips. | ||
137 | |||
124 | config BATTERY_DA9030 | 138 | config BATTERY_DA9030 |
125 | tristate "DA9030 battery driver" | 139 | tristate "DA9030 battery driver" |
126 | depends on PMIC_DA903X | 140 | depends on PMIC_DA903X |
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c index 492da27e1a4..506585e31a5 100644 --- a/drivers/power/bq20z75.c +++ b/drivers/power/bq20z75.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #include <linux/power_supply.h> | 25 | #include <linux/power_supply.h> |
26 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/gpio.h> | ||
30 | |||
31 | #include <linux/power/bq20z75.h> | ||
28 | 32 | ||
29 | enum { | 33 | enum { |
30 | REG_MANUFACTURER_DATA, | 34 | REG_MANUFACTURER_DATA, |
@@ -38,11 +42,22 @@ enum { | |||
38 | REG_CYCLE_COUNT, | 42 | REG_CYCLE_COUNT, |
39 | REG_SERIAL_NUMBER, | 43 | REG_SERIAL_NUMBER, |
40 | REG_REMAINING_CAPACITY, | 44 | REG_REMAINING_CAPACITY, |
45 | REG_REMAINING_CAPACITY_CHARGE, | ||
41 | REG_FULL_CHARGE_CAPACITY, | 46 | REG_FULL_CHARGE_CAPACITY, |
47 | REG_FULL_CHARGE_CAPACITY_CHARGE, | ||
42 | REG_DESIGN_CAPACITY, | 48 | REG_DESIGN_CAPACITY, |
49 | REG_DESIGN_CAPACITY_CHARGE, | ||
43 | REG_DESIGN_VOLTAGE, | 50 | REG_DESIGN_VOLTAGE, |
44 | }; | 51 | }; |
45 | 52 | ||
53 | /* Battery Mode defines */ | ||
54 | #define BATTERY_MODE_OFFSET 0x03 | ||
55 | #define BATTERY_MODE_MASK 0x8000 | ||
56 | enum bq20z75_battery_mode { | ||
57 | BATTERY_MODE_AMPS, | ||
58 | BATTERY_MODE_WATTS | ||
59 | }; | ||
60 | |||
46 | /* manufacturer access defines */ | 61 | /* manufacturer access defines */ |
47 | #define MANUFACTURER_ACCESS_STATUS 0x0006 | 62 | #define MANUFACTURER_ACCESS_STATUS 0x0006 |
48 | #define MANUFACTURER_ACCESS_SLEEP 0x0011 | 63 | #define MANUFACTURER_ACCESS_SLEEP 0x0011 |
@@ -78,8 +93,12 @@ static const struct bq20z75_device_data { | |||
78 | BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100), | 93 | BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100), |
79 | [REG_REMAINING_CAPACITY] = | 94 | [REG_REMAINING_CAPACITY] = |
80 | BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535), | 95 | BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535), |
96 | [REG_REMAINING_CAPACITY_CHARGE] = | ||
97 | BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535), | ||
81 | [REG_FULL_CHARGE_CAPACITY] = | 98 | [REG_FULL_CHARGE_CAPACITY] = |
82 | BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535), | 99 | BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535), |
100 | [REG_FULL_CHARGE_CAPACITY_CHARGE] = | ||
101 | BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535), | ||
83 | [REG_TIME_TO_EMPTY] = | 102 | [REG_TIME_TO_EMPTY] = |
84 | BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, | 103 | BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, |
85 | 65535), | 104 | 65535), |
@@ -93,6 +112,9 @@ static const struct bq20z75_device_data { | |||
93 | [REG_DESIGN_CAPACITY] = | 112 | [REG_DESIGN_CAPACITY] = |
94 | BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0, | 113 | BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0, |
95 | 65535), | 114 | 65535), |
115 | [REG_DESIGN_CAPACITY_CHARGE] = | ||
116 | BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0, | ||
117 | 65535), | ||
96 | [REG_DESIGN_VOLTAGE] = | 118 | [REG_DESIGN_VOLTAGE] = |
97 | BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0, | 119 | BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0, |
98 | 65535), | 120 | 65535), |
@@ -117,39 +139,72 @@ static enum power_supply_property bq20z75_properties[] = { | |||
117 | POWER_SUPPLY_PROP_ENERGY_NOW, | 139 | POWER_SUPPLY_PROP_ENERGY_NOW, |
118 | POWER_SUPPLY_PROP_ENERGY_FULL, | 140 | POWER_SUPPLY_PROP_ENERGY_FULL, |
119 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, | 141 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, |
142 | POWER_SUPPLY_PROP_CHARGE_NOW, | ||
143 | POWER_SUPPLY_PROP_CHARGE_FULL, | ||
144 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, | ||
120 | }; | 145 | }; |
121 | 146 | ||
122 | struct bq20z75_info { | 147 | struct bq20z75_info { |
123 | struct i2c_client *client; | 148 | struct i2c_client *client; |
124 | struct power_supply power_supply; | 149 | struct power_supply power_supply; |
150 | struct bq20z75_platform_data *pdata; | ||
151 | bool is_present; | ||
152 | bool gpio_detect; | ||
153 | bool enable_detection; | ||
154 | int irq; | ||
125 | }; | 155 | }; |
126 | 156 | ||
127 | static int bq20z75_read_word_data(struct i2c_client *client, u8 address) | 157 | static int bq20z75_read_word_data(struct i2c_client *client, u8 address) |
128 | { | 158 | { |
129 | s32 ret; | 159 | struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); |
160 | s32 ret = 0; | ||
161 | int retries = 1; | ||
162 | |||
163 | if (bq20z75_device->pdata) | ||
164 | retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1); | ||
165 | |||
166 | while (retries > 0) { | ||
167 | ret = i2c_smbus_read_word_data(client, address); | ||
168 | if (ret >= 0) | ||
169 | break; | ||
170 | retries--; | ||
171 | } | ||
130 | 172 | ||
131 | ret = i2c_smbus_read_word_data(client, address); | ||
132 | if (ret < 0) { | 173 | if (ret < 0) { |
133 | dev_err(&client->dev, | 174 | dev_dbg(&client->dev, |
134 | "%s: i2c read at address 0x%x failed\n", | 175 | "%s: i2c read at address 0x%x failed\n", |
135 | __func__, address); | 176 | __func__, address); |
136 | return ret; | 177 | return ret; |
137 | } | 178 | } |
179 | |||
138 | return le16_to_cpu(ret); | 180 | return le16_to_cpu(ret); |
139 | } | 181 | } |
140 | 182 | ||
141 | static int bq20z75_write_word_data(struct i2c_client *client, u8 address, | 183 | static int bq20z75_write_word_data(struct i2c_client *client, u8 address, |
142 | u16 value) | 184 | u16 value) |
143 | { | 185 | { |
144 | s32 ret; | 186 | struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); |
187 | s32 ret = 0; | ||
188 | int retries = 1; | ||
189 | |||
190 | if (bq20z75_device->pdata) | ||
191 | retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1); | ||
192 | |||
193 | while (retries > 0) { | ||
194 | ret = i2c_smbus_write_word_data(client, address, | ||
195 | le16_to_cpu(value)); | ||
196 | if (ret >= 0) | ||
197 | break; | ||
198 | retries--; | ||
199 | } | ||
145 | 200 | ||
146 | ret = i2c_smbus_write_word_data(client, address, le16_to_cpu(value)); | ||
147 | if (ret < 0) { | 201 | if (ret < 0) { |
148 | dev_err(&client->dev, | 202 | dev_dbg(&client->dev, |
149 | "%s: i2c write to address 0x%x failed\n", | 203 | "%s: i2c write to address 0x%x failed\n", |
150 | __func__, address); | 204 | __func__, address); |
151 | return ret; | 205 | return ret; |
152 | } | 206 | } |
207 | |||
153 | return 0; | 208 | return 0; |
154 | } | 209 | } |
155 | 210 | ||
@@ -158,6 +213,19 @@ static int bq20z75_get_battery_presence_and_health( | |||
158 | union power_supply_propval *val) | 213 | union power_supply_propval *val) |
159 | { | 214 | { |
160 | s32 ret; | 215 | s32 ret; |
216 | struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); | ||
217 | |||
218 | if (psp == POWER_SUPPLY_PROP_PRESENT && | ||
219 | bq20z75_device->gpio_detect) { | ||
220 | ret = gpio_get_value( | ||
221 | bq20z75_device->pdata->battery_detect); | ||
222 | if (ret == bq20z75_device->pdata->battery_detect_present) | ||
223 | val->intval = 1; | ||
224 | else | ||
225 | val->intval = 0; | ||
226 | bq20z75_device->is_present = val->intval; | ||
227 | return ret; | ||
228 | } | ||
161 | 229 | ||
162 | /* Write to ManufacturerAccess with | 230 | /* Write to ManufacturerAccess with |
163 | * ManufacturerAccess command and then | 231 | * ManufacturerAccess command and then |
@@ -165,9 +233,11 @@ static int bq20z75_get_battery_presence_and_health( | |||
165 | ret = bq20z75_write_word_data(client, | 233 | ret = bq20z75_write_word_data(client, |
166 | bq20z75_data[REG_MANUFACTURER_DATA].addr, | 234 | bq20z75_data[REG_MANUFACTURER_DATA].addr, |
167 | MANUFACTURER_ACCESS_STATUS); | 235 | MANUFACTURER_ACCESS_STATUS); |
168 | if (ret < 0) | 236 | if (ret < 0) { |
237 | if (psp == POWER_SUPPLY_PROP_PRESENT) | ||
238 | val->intval = 0; /* battery removed */ | ||
169 | return ret; | 239 | return ret; |
170 | 240 | } | |
171 | 241 | ||
172 | ret = bq20z75_read_word_data(client, | 242 | ret = bq20z75_read_word_data(client, |
173 | bq20z75_data[REG_MANUFACTURER_DATA].addr); | 243 | bq20z75_data[REG_MANUFACTURER_DATA].addr); |
@@ -248,30 +318,39 @@ static void bq20z75_unit_adjustment(struct i2c_client *client, | |||
248 | { | 318 | { |
249 | #define BASE_UNIT_CONVERSION 1000 | 319 | #define BASE_UNIT_CONVERSION 1000 |
250 | #define BATTERY_MODE_CAP_MULT_WATT (10 * BASE_UNIT_CONVERSION) | 320 | #define BATTERY_MODE_CAP_MULT_WATT (10 * BASE_UNIT_CONVERSION) |
251 | #define TIME_UNIT_CONVERSION 600 | 321 | #define TIME_UNIT_CONVERSION 60 |
252 | #define TEMP_KELVIN_TO_CELCIUS 2731 | 322 | #define TEMP_KELVIN_TO_CELSIUS 2731 |
253 | switch (psp) { | 323 | switch (psp) { |
254 | case POWER_SUPPLY_PROP_ENERGY_NOW: | 324 | case POWER_SUPPLY_PROP_ENERGY_NOW: |
255 | case POWER_SUPPLY_PROP_ENERGY_FULL: | 325 | case POWER_SUPPLY_PROP_ENERGY_FULL: |
256 | case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: | 326 | case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: |
327 | /* bq20z75 provides energy in units of 10mWh. | ||
328 | * Convert to µWh | ||
329 | */ | ||
257 | val->intval *= BATTERY_MODE_CAP_MULT_WATT; | 330 | val->intval *= BATTERY_MODE_CAP_MULT_WATT; |
258 | break; | 331 | break; |
259 | 332 | ||
260 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | 333 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
261 | case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: | 334 | case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: |
262 | case POWER_SUPPLY_PROP_CURRENT_NOW: | 335 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
336 | case POWER_SUPPLY_PROP_CHARGE_NOW: | ||
337 | case POWER_SUPPLY_PROP_CHARGE_FULL: | ||
338 | case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: | ||
263 | val->intval *= BASE_UNIT_CONVERSION; | 339 | val->intval *= BASE_UNIT_CONVERSION; |
264 | break; | 340 | break; |
265 | 341 | ||
266 | case POWER_SUPPLY_PROP_TEMP: | 342 | case POWER_SUPPLY_PROP_TEMP: |
267 | /* bq20z75 provides battery tempreture in 0.1°K | 343 | /* bq20z75 provides battery temperature in 0.1K |
268 | * so convert it to 0.1°C */ | 344 | * so convert it to 0.1°C |
269 | val->intval -= TEMP_KELVIN_TO_CELCIUS; | 345 | */ |
270 | val->intval *= 10; | 346 | val->intval -= TEMP_KELVIN_TO_CELSIUS; |
271 | break; | 347 | break; |
272 | 348 | ||
273 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: | 349 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: |
274 | case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: | 350 | case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: |
351 | /* bq20z75 provides time to empty and time to full in minutes. | ||
352 | * Convert to seconds | ||
353 | */ | ||
275 | val->intval *= TIME_UNIT_CONVERSION; | 354 | val->intval *= TIME_UNIT_CONVERSION; |
276 | break; | 355 | break; |
277 | 356 | ||
@@ -281,11 +360,44 @@ static void bq20z75_unit_adjustment(struct i2c_client *client, | |||
281 | } | 360 | } |
282 | } | 361 | } |
283 | 362 | ||
363 | static enum bq20z75_battery_mode | ||
364 | bq20z75_set_battery_mode(struct i2c_client *client, | ||
365 | enum bq20z75_battery_mode mode) | ||
366 | { | ||
367 | int ret, original_val; | ||
368 | |||
369 | original_val = bq20z75_read_word_data(client, BATTERY_MODE_OFFSET); | ||
370 | if (original_val < 0) | ||
371 | return original_val; | ||
372 | |||
373 | if ((original_val & BATTERY_MODE_MASK) == mode) | ||
374 | return mode; | ||
375 | |||
376 | if (mode == BATTERY_MODE_AMPS) | ||
377 | ret = original_val & ~BATTERY_MODE_MASK; | ||
378 | else | ||
379 | ret = original_val | BATTERY_MODE_MASK; | ||
380 | |||
381 | ret = bq20z75_write_word_data(client, BATTERY_MODE_OFFSET, ret); | ||
382 | if (ret < 0) | ||
383 | return ret; | ||
384 | |||
385 | return original_val & BATTERY_MODE_MASK; | ||
386 | } | ||
387 | |||
284 | static int bq20z75_get_battery_capacity(struct i2c_client *client, | 388 | static int bq20z75_get_battery_capacity(struct i2c_client *client, |
285 | int reg_offset, enum power_supply_property psp, | 389 | int reg_offset, enum power_supply_property psp, |
286 | union power_supply_propval *val) | 390 | union power_supply_propval *val) |
287 | { | 391 | { |
288 | s32 ret; | 392 | s32 ret; |
393 | enum bq20z75_battery_mode mode = BATTERY_MODE_WATTS; | ||
394 | |||
395 | if (power_supply_is_amp_property(psp)) | ||
396 | mode = BATTERY_MODE_AMPS; | ||
397 | |||
398 | mode = bq20z75_set_battery_mode(client, mode); | ||
399 | if (mode < 0) | ||
400 | return mode; | ||
289 | 401 | ||
290 | ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr); | 402 | ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr); |
291 | if (ret < 0) | 403 | if (ret < 0) |
@@ -298,6 +410,10 @@ static int bq20z75_get_battery_capacity(struct i2c_client *client, | |||
298 | } else | 410 | } else |
299 | val->intval = ret; | 411 | val->intval = ret; |
300 | 412 | ||
413 | ret = bq20z75_set_battery_mode(client, mode); | ||
414 | if (ret < 0) | ||
415 | return ret; | ||
416 | |||
301 | return 0; | 417 | return 0; |
302 | } | 418 | } |
303 | 419 | ||
@@ -318,12 +434,25 @@ static int bq20z75_get_battery_serial_number(struct i2c_client *client, | |||
318 | return 0; | 434 | return 0; |
319 | } | 435 | } |
320 | 436 | ||
437 | static int bq20z75_get_property_index(struct i2c_client *client, | ||
438 | enum power_supply_property psp) | ||
439 | { | ||
440 | int count; | ||
441 | for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) | ||
442 | if (psp == bq20z75_data[count].psp) | ||
443 | return count; | ||
444 | |||
445 | dev_warn(&client->dev, | ||
446 | "%s: Invalid Property - %d\n", __func__, psp); | ||
447 | |||
448 | return -EINVAL; | ||
449 | } | ||
450 | |||
321 | static int bq20z75_get_property(struct power_supply *psy, | 451 | static int bq20z75_get_property(struct power_supply *psy, |
322 | enum power_supply_property psp, | 452 | enum power_supply_property psp, |
323 | union power_supply_propval *val) | 453 | union power_supply_propval *val) |
324 | { | 454 | { |
325 | int count; | 455 | int ret = 0; |
326 | int ret; | ||
327 | struct bq20z75_info *bq20z75_device = container_of(psy, | 456 | struct bq20z75_info *bq20z75_device = container_of(psy, |
328 | struct bq20z75_info, power_supply); | 457 | struct bq20z75_info, power_supply); |
329 | struct i2c_client *client = bq20z75_device->client; | 458 | struct i2c_client *client = bq20z75_device->client; |
@@ -332,8 +461,8 @@ static int bq20z75_get_property(struct power_supply *psy, | |||
332 | case POWER_SUPPLY_PROP_PRESENT: | 461 | case POWER_SUPPLY_PROP_PRESENT: |
333 | case POWER_SUPPLY_PROP_HEALTH: | 462 | case POWER_SUPPLY_PROP_HEALTH: |
334 | ret = bq20z75_get_battery_presence_and_health(client, psp, val); | 463 | ret = bq20z75_get_battery_presence_and_health(client, psp, val); |
335 | if (ret) | 464 | if (psp == POWER_SUPPLY_PROP_PRESENT) |
336 | return ret; | 465 | return 0; |
337 | break; | 466 | break; |
338 | 467 | ||
339 | case POWER_SUPPLY_PROP_TECHNOLOGY: | 468 | case POWER_SUPPLY_PROP_TECHNOLOGY: |
@@ -343,22 +472,19 @@ static int bq20z75_get_property(struct power_supply *psy, | |||
343 | case POWER_SUPPLY_PROP_ENERGY_NOW: | 472 | case POWER_SUPPLY_PROP_ENERGY_NOW: |
344 | case POWER_SUPPLY_PROP_ENERGY_FULL: | 473 | case POWER_SUPPLY_PROP_ENERGY_FULL: |
345 | case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: | 474 | case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: |
475 | case POWER_SUPPLY_PROP_CHARGE_NOW: | ||
476 | case POWER_SUPPLY_PROP_CHARGE_FULL: | ||
477 | case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: | ||
346 | case POWER_SUPPLY_PROP_CAPACITY: | 478 | case POWER_SUPPLY_PROP_CAPACITY: |
347 | for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) { | 479 | ret = bq20z75_get_property_index(client, psp); |
348 | if (psp == bq20z75_data[count].psp) | 480 | if (ret < 0) |
349 | break; | 481 | break; |
350 | } | ||
351 | |||
352 | ret = bq20z75_get_battery_capacity(client, count, psp, val); | ||
353 | if (ret) | ||
354 | return ret; | ||
355 | 482 | ||
483 | ret = bq20z75_get_battery_capacity(client, ret, psp, val); | ||
356 | break; | 484 | break; |
357 | 485 | ||
358 | case POWER_SUPPLY_PROP_SERIAL_NUMBER: | 486 | case POWER_SUPPLY_PROP_SERIAL_NUMBER: |
359 | ret = bq20z75_get_battery_serial_number(client, val); | 487 | ret = bq20z75_get_battery_serial_number(client, val); |
360 | if (ret) | ||
361 | return ret; | ||
362 | break; | 488 | break; |
363 | 489 | ||
364 | case POWER_SUPPLY_PROP_STATUS: | 490 | case POWER_SUPPLY_PROP_STATUS: |
@@ -369,15 +495,11 @@ static int bq20z75_get_property(struct power_supply *psy, | |||
369 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: | 495 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: |
370 | case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: | 496 | case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: |
371 | case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: | 497 | case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: |
372 | for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) { | 498 | ret = bq20z75_get_property_index(client, psp); |
373 | if (psp == bq20z75_data[count].psp) | 499 | if (ret < 0) |
374 | break; | 500 | break; |
375 | } | ||
376 | |||
377 | ret = bq20z75_get_battery_property(client, count, psp, val); | ||
378 | if (ret) | ||
379 | return ret; | ||
380 | 501 | ||
502 | ret = bq20z75_get_battery_property(client, ret, psp, val); | ||
381 | break; | 503 | break; |
382 | 504 | ||
383 | default: | 505 | default: |
@@ -386,26 +508,58 @@ static int bq20z75_get_property(struct power_supply *psy, | |||
386 | return -EINVAL; | 508 | return -EINVAL; |
387 | } | 509 | } |
388 | 510 | ||
389 | /* Convert units to match requirements for power supply class */ | 511 | if (!bq20z75_device->enable_detection) |
390 | bq20z75_unit_adjustment(client, psp, val); | 512 | goto done; |
513 | |||
514 | if (!bq20z75_device->gpio_detect && | ||
515 | bq20z75_device->is_present != (ret >= 0)) { | ||
516 | bq20z75_device->is_present = (ret >= 0); | ||
517 | power_supply_changed(&bq20z75_device->power_supply); | ||
518 | } | ||
519 | |||
520 | done: | ||
521 | if (!ret) { | ||
522 | /* Convert units to match requirements for power supply class */ | ||
523 | bq20z75_unit_adjustment(client, psp, val); | ||
524 | } | ||
391 | 525 | ||
392 | dev_dbg(&client->dev, | 526 | dev_dbg(&client->dev, |
393 | "%s: property = %d, value = %d\n", __func__, psp, val->intval); | 527 | "%s: property = %d, value = %x\n", __func__, psp, val->intval); |
528 | |||
529 | if (ret && bq20z75_device->is_present) | ||
530 | return ret; | ||
531 | |||
532 | /* battery not present, so return NODATA for properties */ | ||
533 | if (ret) | ||
534 | return -ENODATA; | ||
394 | 535 | ||
395 | return 0; | 536 | return 0; |
396 | } | 537 | } |
397 | 538 | ||
398 | static int bq20z75_probe(struct i2c_client *client, | 539 | static irqreturn_t bq20z75_irq(int irq, void *devid) |
540 | { | ||
541 | struct power_supply *battery = devid; | ||
542 | |||
543 | power_supply_changed(battery); | ||
544 | |||
545 | return IRQ_HANDLED; | ||
546 | } | ||
547 | |||
548 | static int __devinit bq20z75_probe(struct i2c_client *client, | ||
399 | const struct i2c_device_id *id) | 549 | const struct i2c_device_id *id) |
400 | { | 550 | { |
401 | struct bq20z75_info *bq20z75_device; | 551 | struct bq20z75_info *bq20z75_device; |
552 | struct bq20z75_platform_data *pdata = client->dev.platform_data; | ||
402 | int rc; | 553 | int rc; |
554 | int irq; | ||
403 | 555 | ||
404 | bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL); | 556 | bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL); |
405 | if (!bq20z75_device) | 557 | if (!bq20z75_device) |
406 | return -ENOMEM; | 558 | return -ENOMEM; |
407 | 559 | ||
408 | bq20z75_device->client = client; | 560 | bq20z75_device->client = client; |
561 | bq20z75_device->enable_detection = false; | ||
562 | bq20z75_device->gpio_detect = false; | ||
409 | bq20z75_device->power_supply.name = "battery"; | 563 | bq20z75_device->power_supply.name = "battery"; |
410 | bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY; | 564 | bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY; |
411 | bq20z75_device->power_supply.properties = bq20z75_properties; | 565 | bq20z75_device->power_supply.properties = bq20z75_properties; |
@@ -413,26 +567,86 @@ static int bq20z75_probe(struct i2c_client *client, | |||
413 | ARRAY_SIZE(bq20z75_properties); | 567 | ARRAY_SIZE(bq20z75_properties); |
414 | bq20z75_device->power_supply.get_property = bq20z75_get_property; | 568 | bq20z75_device->power_supply.get_property = bq20z75_get_property; |
415 | 569 | ||
570 | if (pdata) { | ||
571 | bq20z75_device->gpio_detect = | ||
572 | gpio_is_valid(pdata->battery_detect); | ||
573 | bq20z75_device->pdata = pdata; | ||
574 | } | ||
575 | |||
416 | i2c_set_clientdata(client, bq20z75_device); | 576 | i2c_set_clientdata(client, bq20z75_device); |
417 | 577 | ||
578 | if (!bq20z75_device->gpio_detect) | ||
579 | goto skip_gpio; | ||
580 | |||
581 | rc = gpio_request(pdata->battery_detect, dev_name(&client->dev)); | ||
582 | if (rc) { | ||
583 | dev_warn(&client->dev, "Failed to request gpio: %d\n", rc); | ||
584 | bq20z75_device->gpio_detect = false; | ||
585 | goto skip_gpio; | ||
586 | } | ||
587 | |||
588 | rc = gpio_direction_input(pdata->battery_detect); | ||
589 | if (rc) { | ||
590 | dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc); | ||
591 | gpio_free(pdata->battery_detect); | ||
592 | bq20z75_device->gpio_detect = false; | ||
593 | goto skip_gpio; | ||
594 | } | ||
595 | |||
596 | irq = gpio_to_irq(pdata->battery_detect); | ||
597 | if (irq <= 0) { | ||
598 | dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq); | ||
599 | gpio_free(pdata->battery_detect); | ||
600 | bq20z75_device->gpio_detect = false; | ||
601 | goto skip_gpio; | ||
602 | } | ||
603 | |||
604 | rc = request_irq(irq, bq20z75_irq, | ||
605 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
606 | dev_name(&client->dev), &bq20z75_device->power_supply); | ||
607 | if (rc) { | ||
608 | dev_warn(&client->dev, "Failed to request irq: %d\n", rc); | ||
609 | gpio_free(pdata->battery_detect); | ||
610 | bq20z75_device->gpio_detect = false; | ||
611 | goto skip_gpio; | ||
612 | } | ||
613 | |||
614 | bq20z75_device->irq = irq; | ||
615 | |||
616 | skip_gpio: | ||
617 | |||
418 | rc = power_supply_register(&client->dev, &bq20z75_device->power_supply); | 618 | rc = power_supply_register(&client->dev, &bq20z75_device->power_supply); |
419 | if (rc) { | 619 | if (rc) { |
420 | dev_err(&client->dev, | 620 | dev_err(&client->dev, |
421 | "%s: Failed to register power supply\n", __func__); | 621 | "%s: Failed to register power supply\n", __func__); |
422 | kfree(bq20z75_device); | 622 | goto exit_psupply; |
423 | return rc; | ||
424 | } | 623 | } |
425 | 624 | ||
426 | dev_info(&client->dev, | 625 | dev_info(&client->dev, |
427 | "%s: battery gas gauge device registered\n", client->name); | 626 | "%s: battery gas gauge device registered\n", client->name); |
428 | 627 | ||
429 | return 0; | 628 | return 0; |
629 | |||
630 | exit_psupply: | ||
631 | if (bq20z75_device->irq) | ||
632 | free_irq(bq20z75_device->irq, &bq20z75_device->power_supply); | ||
633 | if (bq20z75_device->gpio_detect) | ||
634 | gpio_free(pdata->battery_detect); | ||
635 | |||
636 | kfree(bq20z75_device); | ||
637 | |||
638 | return rc; | ||
430 | } | 639 | } |
431 | 640 | ||
432 | static int bq20z75_remove(struct i2c_client *client) | 641 | static int __devexit bq20z75_remove(struct i2c_client *client) |
433 | { | 642 | { |
434 | struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); | 643 | struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); |
435 | 644 | ||
645 | if (bq20z75_device->irq) | ||
646 | free_irq(bq20z75_device->irq, &bq20z75_device->power_supply); | ||
647 | if (bq20z75_device->gpio_detect) | ||
648 | gpio_free(bq20z75_device->pdata->battery_detect); | ||
649 | |||
436 | power_supply_unregister(&bq20z75_device->power_supply); | 650 | power_supply_unregister(&bq20z75_device->power_supply); |
437 | kfree(bq20z75_device); | 651 | kfree(bq20z75_device); |
438 | bq20z75_device = NULL; | 652 | bq20z75_device = NULL; |
@@ -444,13 +658,14 @@ static int bq20z75_remove(struct i2c_client *client) | |||
444 | static int bq20z75_suspend(struct i2c_client *client, | 658 | static int bq20z75_suspend(struct i2c_client *client, |
445 | pm_message_t state) | 659 | pm_message_t state) |
446 | { | 660 | { |
661 | struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); | ||
447 | s32 ret; | 662 | s32 ret; |
448 | 663 | ||
449 | /* write to manufacturer access with sleep command */ | 664 | /* write to manufacturer access with sleep command */ |
450 | ret = bq20z75_write_word_data(client, | 665 | ret = bq20z75_write_word_data(client, |
451 | bq20z75_data[REG_MANUFACTURER_DATA].addr, | 666 | bq20z75_data[REG_MANUFACTURER_DATA].addr, |
452 | MANUFACTURER_ACCESS_SLEEP); | 667 | MANUFACTURER_ACCESS_SLEEP); |
453 | if (ret < 0) | 668 | if (bq20z75_device->is_present && ret < 0) |
454 | return ret; | 669 | return ret; |
455 | 670 | ||
456 | return 0; | 671 | return 0; |
@@ -465,10 +680,11 @@ static const struct i2c_device_id bq20z75_id[] = { | |||
465 | { "bq20z75", 0 }, | 680 | { "bq20z75", 0 }, |
466 | {} | 681 | {} |
467 | }; | 682 | }; |
683 | MODULE_DEVICE_TABLE(i2c, bq20z75_id); | ||
468 | 684 | ||
469 | static struct i2c_driver bq20z75_battery_driver = { | 685 | static struct i2c_driver bq20z75_battery_driver = { |
470 | .probe = bq20z75_probe, | 686 | .probe = bq20z75_probe, |
471 | .remove = bq20z75_remove, | 687 | .remove = __devexit_p(bq20z75_remove), |
472 | .suspend = bq20z75_suspend, | 688 | .suspend = bq20z75_suspend, |
473 | .resume = bq20z75_resume, | 689 | .resume = bq20z75_resume, |
474 | .id_table = bq20z75_id, | 690 | .id_table = bq20z75_id, |
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c index eff0273d403..59e68dbd028 100644 --- a/drivers/power/bq27x00_battery.c +++ b/drivers/power/bq27x00_battery.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> | 4 | * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> |
5 | * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> | 5 | * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> |
6 | * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> | ||
6 | * | 7 | * |
7 | * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. | 8 | * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. |
8 | * | 9 | * |
@@ -15,6 +16,13 @@ | |||
15 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. |
16 | * | 17 | * |
17 | */ | 18 | */ |
19 | |||
20 | /* | ||
21 | * Datasheets: | ||
22 | * http://focus.ti.com/docs/prod/folders/print/bq27000.html | ||
23 | * http://focus.ti.com/docs/prod/folders/print/bq27500.html | ||
24 | */ | ||
25 | |||
18 | #include <linux/module.h> | 26 | #include <linux/module.h> |
19 | #include <linux/param.h> | 27 | #include <linux/param.h> |
20 | #include <linux/jiffies.h> | 28 | #include <linux/jiffies.h> |
@@ -27,7 +35,9 @@ | |||
27 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
28 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
29 | 37 | ||
30 | #define DRIVER_VERSION "1.1.0" | 38 | #include <linux/power/bq27x00_battery.h> |
39 | |||
40 | #define DRIVER_VERSION "1.2.0" | ||
31 | 41 | ||
32 | #define BQ27x00_REG_TEMP 0x06 | 42 | #define BQ27x00_REG_TEMP 0x06 |
33 | #define BQ27x00_REG_VOLT 0x08 | 43 | #define BQ27x00_REG_VOLT 0x08 |
@@ -36,36 +46,59 @@ | |||
36 | #define BQ27x00_REG_TTE 0x16 | 46 | #define BQ27x00_REG_TTE 0x16 |
37 | #define BQ27x00_REG_TTF 0x18 | 47 | #define BQ27x00_REG_TTF 0x18 |
38 | #define BQ27x00_REG_TTECP 0x26 | 48 | #define BQ27x00_REG_TTECP 0x26 |
49 | #define BQ27x00_REG_NAC 0x0C /* Nominal available capaciy */ | ||
50 | #define BQ27x00_REG_LMD 0x12 /* Last measured discharge */ | ||
51 | #define BQ27x00_REG_CYCT 0x2A /* Cycle count total */ | ||
52 | #define BQ27x00_REG_AE 0x22 /* Available enery */ | ||
39 | 53 | ||
40 | #define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ | 54 | #define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ |
55 | #define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */ | ||
41 | #define BQ27000_FLAG_CHGS BIT(7) | 56 | #define BQ27000_FLAG_CHGS BIT(7) |
57 | #define BQ27000_FLAG_FC BIT(5) | ||
42 | 58 | ||
43 | #define BQ27500_REG_SOC 0x2c | 59 | #define BQ27500_REG_SOC 0x2C |
60 | #define BQ27500_REG_DCAP 0x3C /* Design capacity */ | ||
44 | #define BQ27500_FLAG_DSC BIT(0) | 61 | #define BQ27500_FLAG_DSC BIT(0) |
45 | #define BQ27500_FLAG_FC BIT(9) | 62 | #define BQ27500_FLAG_FC BIT(9) |
46 | 63 | ||
47 | /* If the system has several batteries we need a different name for each | 64 | #define BQ27000_RS 20 /* Resistor sense */ |
48 | * of them... | ||
49 | */ | ||
50 | static DEFINE_IDR(battery_id); | ||
51 | static DEFINE_MUTEX(battery_mutex); | ||
52 | 65 | ||
53 | struct bq27x00_device_info; | 66 | struct bq27x00_device_info; |
54 | struct bq27x00_access_methods { | 67 | struct bq27x00_access_methods { |
55 | int (*read)(u8 reg, int *rt_value, int b_single, | 68 | int (*read)(struct bq27x00_device_info *di, u8 reg, bool single); |
56 | struct bq27x00_device_info *di); | ||
57 | }; | 69 | }; |
58 | 70 | ||
59 | enum bq27x00_chip { BQ27000, BQ27500 }; | 71 | enum bq27x00_chip { BQ27000, BQ27500 }; |
60 | 72 | ||
73 | struct bq27x00_reg_cache { | ||
74 | int temperature; | ||
75 | int time_to_empty; | ||
76 | int time_to_empty_avg; | ||
77 | int time_to_full; | ||
78 | int charge_full; | ||
79 | int charge_counter; | ||
80 | int capacity; | ||
81 | int flags; | ||
82 | |||
83 | int current_now; | ||
84 | }; | ||
85 | |||
61 | struct bq27x00_device_info { | 86 | struct bq27x00_device_info { |
62 | struct device *dev; | 87 | struct device *dev; |
63 | int id; | 88 | int id; |
64 | struct bq27x00_access_methods *bus; | ||
65 | struct power_supply bat; | ||
66 | enum bq27x00_chip chip; | 89 | enum bq27x00_chip chip; |
67 | 90 | ||
68 | struct i2c_client *client; | 91 | struct bq27x00_reg_cache cache; |
92 | int charge_design_full; | ||
93 | |||
94 | unsigned long last_update; | ||
95 | struct delayed_work work; | ||
96 | |||
97 | struct power_supply bat; | ||
98 | |||
99 | struct bq27x00_access_methods bus; | ||
100 | |||
101 | struct mutex lock; | ||
69 | }; | 102 | }; |
70 | 103 | ||
71 | static enum power_supply_property bq27x00_battery_props[] = { | 104 | static enum power_supply_property bq27x00_battery_props[] = { |
@@ -78,164 +111,328 @@ static enum power_supply_property bq27x00_battery_props[] = { | |||
78 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, | 111 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, |
79 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, | 112 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, |
80 | POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, | 113 | POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, |
114 | POWER_SUPPLY_PROP_TECHNOLOGY, | ||
115 | POWER_SUPPLY_PROP_CHARGE_FULL, | ||
116 | POWER_SUPPLY_PROP_CHARGE_NOW, | ||
117 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, | ||
118 | POWER_SUPPLY_PROP_CHARGE_COUNTER, | ||
119 | POWER_SUPPLY_PROP_ENERGY_NOW, | ||
81 | }; | 120 | }; |
82 | 121 | ||
122 | static unsigned int poll_interval = 360; | ||
123 | module_param(poll_interval, uint, 0644); | ||
124 | MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \ | ||
125 | "0 disables polling"); | ||
126 | |||
83 | /* | 127 | /* |
84 | * Common code for BQ27x00 devices | 128 | * Common code for BQ27x00 devices |
85 | */ | 129 | */ |
86 | 130 | ||
87 | static int bq27x00_read(u8 reg, int *rt_value, int b_single, | 131 | static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg, |
88 | struct bq27x00_device_info *di) | 132 | bool single) |
89 | { | 133 | { |
90 | return di->bus->read(reg, rt_value, b_single, di); | 134 | return di->bus.read(di, reg, single); |
91 | } | 135 | } |
92 | 136 | ||
93 | /* | 137 | /* |
94 | * Return the battery temperature in tenths of degree Celsius | 138 | * Return the battery Relative State-of-Charge |
95 | * Or < 0 if something fails. | 139 | * Or < 0 if something fails. |
96 | */ | 140 | */ |
97 | static int bq27x00_battery_temperature(struct bq27x00_device_info *di) | 141 | static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di) |
98 | { | 142 | { |
99 | int ret; | 143 | int rsoc; |
100 | int temp = 0; | ||
101 | 144 | ||
102 | ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di); | 145 | if (di->chip == BQ27500) |
103 | if (ret) { | 146 | rsoc = bq27x00_read(di, BQ27500_REG_SOC, false); |
104 | dev_err(di->dev, "error reading temperature\n"); | 147 | else |
105 | return ret; | 148 | rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true); |
149 | |||
150 | if (rsoc < 0) | ||
151 | dev_err(di->dev, "error reading relative State-of-Charge\n"); | ||
152 | |||
153 | return rsoc; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Return a battery charge value in µAh | ||
158 | * Or < 0 if something fails. | ||
159 | */ | ||
160 | static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg) | ||
161 | { | ||
162 | int charge; | ||
163 | |||
164 | charge = bq27x00_read(di, reg, false); | ||
165 | if (charge < 0) { | ||
166 | dev_err(di->dev, "error reading nominal available capacity\n"); | ||
167 | return charge; | ||
106 | } | 168 | } |
107 | 169 | ||
108 | if (di->chip == BQ27500) | 170 | if (di->chip == BQ27500) |
109 | return temp - 2731; | 171 | charge *= 1000; |
110 | else | 172 | else |
111 | return ((temp >> 2) - 273) * 10; | 173 | charge = charge * 3570 / BQ27000_RS; |
174 | |||
175 | return charge; | ||
112 | } | 176 | } |
113 | 177 | ||
114 | /* | 178 | /* |
115 | * Return the battery Voltage in milivolts | 179 | * Return the battery Nominal available capaciy in µAh |
116 | * Or < 0 if something fails. | 180 | * Or < 0 if something fails. |
117 | */ | 181 | */ |
118 | static int bq27x00_battery_voltage(struct bq27x00_device_info *di) | 182 | static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di) |
119 | { | 183 | { |
120 | int ret; | 184 | return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC); |
121 | int volt = 0; | 185 | } |
122 | 186 | ||
123 | ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di); | 187 | /* |
124 | if (ret) { | 188 | * Return the battery Last measured discharge in µAh |
125 | dev_err(di->dev, "error reading voltage\n"); | 189 | * Or < 0 if something fails. |
126 | return ret; | 190 | */ |
191 | static inline int bq27x00_battery_read_lmd(struct bq27x00_device_info *di) | ||
192 | { | ||
193 | return bq27x00_battery_read_charge(di, BQ27x00_REG_LMD); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Return the battery Initial last measured discharge in µAh | ||
198 | * Or < 0 if something fails. | ||
199 | */ | ||
200 | static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di) | ||
201 | { | ||
202 | int ilmd; | ||
203 | |||
204 | if (di->chip == BQ27500) | ||
205 | ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false); | ||
206 | else | ||
207 | ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true); | ||
208 | |||
209 | if (ilmd < 0) { | ||
210 | dev_err(di->dev, "error reading initial last measured discharge\n"); | ||
211 | return ilmd; | ||
127 | } | 212 | } |
128 | 213 | ||
129 | return volt * 1000; | 214 | if (di->chip == BQ27500) |
215 | ilmd *= 1000; | ||
216 | else | ||
217 | ilmd = ilmd * 256 * 3570 / BQ27000_RS; | ||
218 | |||
219 | return ilmd; | ||
130 | } | 220 | } |
131 | 221 | ||
132 | /* | 222 | /* |
133 | * Return the battery average current | 223 | * Return the battery Cycle count total |
134 | * Note that current can be negative signed as well | 224 | * Or < 0 if something fails. |
135 | * Or 0 if something fails. | ||
136 | */ | 225 | */ |
137 | static int bq27x00_battery_current(struct bq27x00_device_info *di) | 226 | static int bq27x00_battery_read_cyct(struct bq27x00_device_info *di) |
138 | { | 227 | { |
139 | int ret; | 228 | int cyct; |
140 | int curr = 0; | ||
141 | int flags = 0; | ||
142 | 229 | ||
143 | ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di); | 230 | cyct = bq27x00_read(di, BQ27x00_REG_CYCT, false); |
144 | if (ret) { | 231 | if (cyct < 0) |
145 | dev_err(di->dev, "error reading current\n"); | 232 | dev_err(di->dev, "error reading cycle count total\n"); |
146 | return 0; | 233 | |
234 | return cyct; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Read a time register. | ||
239 | * Return < 0 if something fails. | ||
240 | */ | ||
241 | static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg) | ||
242 | { | ||
243 | int tval; | ||
244 | |||
245 | tval = bq27x00_read(di, reg, false); | ||
246 | if (tval < 0) { | ||
247 | dev_err(di->dev, "error reading register %02x: %d\n", reg, tval); | ||
248 | return tval; | ||
147 | } | 249 | } |
148 | 250 | ||
149 | if (di->chip == BQ27500) { | 251 | if (tval == 65535) |
150 | /* bq27500 returns signed value */ | 252 | return -ENODATA; |
151 | curr = (int)(s16)curr; | 253 | |
152 | } else { | 254 | return tval * 60; |
153 | ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); | 255 | } |
154 | if (ret < 0) { | 256 | |
155 | dev_err(di->dev, "error reading flags\n"); | 257 | static void bq27x00_update(struct bq27x00_device_info *di) |
156 | return 0; | 258 | { |
157 | } | 259 | struct bq27x00_reg_cache cache = {0, }; |
158 | if (flags & BQ27000_FLAG_CHGS) { | 260 | bool is_bq27500 = di->chip == BQ27500; |
159 | dev_dbg(di->dev, "negative current!\n"); | 261 | |
160 | curr = -curr; | 262 | cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500); |
161 | } | 263 | if (cache.flags >= 0) { |
264 | cache.capacity = bq27x00_battery_read_rsoc(di); | ||
265 | cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false); | ||
266 | cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE); | ||
267 | cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); | ||
268 | cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); | ||
269 | cache.charge_full = bq27x00_battery_read_lmd(di); | ||
270 | cache.charge_counter = bq27x00_battery_read_cyct(di); | ||
271 | |||
272 | if (!is_bq27500) | ||
273 | cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); | ||
274 | |||
275 | /* We only have to read charge design full once */ | ||
276 | if (di->charge_design_full <= 0) | ||
277 | di->charge_design_full = bq27x00_battery_read_ilmd(di); | ||
278 | } | ||
279 | |||
280 | /* Ignore current_now which is a snapshot of the current battery state | ||
281 | * and is likely to be different even between two consecutive reads */ | ||
282 | if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) { | ||
283 | di->cache = cache; | ||
284 | power_supply_changed(&di->bat); | ||
162 | } | 285 | } |
163 | 286 | ||
164 | return curr * 1000; | 287 | di->last_update = jiffies; |
288 | } | ||
289 | |||
290 | static void bq27x00_battery_poll(struct work_struct *work) | ||
291 | { | ||
292 | struct bq27x00_device_info *di = | ||
293 | container_of(work, struct bq27x00_device_info, work.work); | ||
294 | |||
295 | bq27x00_update(di); | ||
296 | |||
297 | if (poll_interval > 0) { | ||
298 | /* The timer does not have to be accurate. */ | ||
299 | set_timer_slack(&di->work.timer, poll_interval * HZ / 4); | ||
300 | schedule_delayed_work(&di->work, poll_interval * HZ); | ||
301 | } | ||
165 | } | 302 | } |
166 | 303 | ||
304 | |||
167 | /* | 305 | /* |
168 | * Return the battery Relative State-of-Charge | 306 | * Return the battery temperature in tenths of degree Celsius |
169 | * Or < 0 if something fails. | 307 | * Or < 0 if something fails. |
170 | */ | 308 | */ |
171 | static int bq27x00_battery_rsoc(struct bq27x00_device_info *di) | 309 | static int bq27x00_battery_temperature(struct bq27x00_device_info *di, |
310 | union power_supply_propval *val) | ||
172 | { | 311 | { |
173 | int ret; | 312 | if (di->cache.temperature < 0) |
174 | int rsoc = 0; | 313 | return di->cache.temperature; |
175 | 314 | ||
176 | if (di->chip == BQ27500) | 315 | if (di->chip == BQ27500) |
177 | ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di); | 316 | val->intval = di->cache.temperature - 2731; |
178 | else | 317 | else |
179 | ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di); | 318 | val->intval = ((di->cache.temperature * 5) - 5463) / 2; |
180 | if (ret) { | 319 | |
181 | dev_err(di->dev, "error reading relative State-of-Charge\n"); | 320 | return 0; |
182 | return ret; | 321 | } |
322 | |||
323 | /* | ||
324 | * Return the battery average current in µA | ||
325 | * Note that current can be negative signed as well | ||
326 | * Or 0 if something fails. | ||
327 | */ | ||
328 | static int bq27x00_battery_current(struct bq27x00_device_info *di, | ||
329 | union power_supply_propval *val) | ||
330 | { | ||
331 | int curr; | ||
332 | |||
333 | if (di->chip == BQ27500) | ||
334 | curr = bq27x00_read(di, BQ27x00_REG_AI, false); | ||
335 | else | ||
336 | curr = di->cache.current_now; | ||
337 | |||
338 | if (curr < 0) | ||
339 | return curr; | ||
340 | |||
341 | if (di->chip == BQ27500) { | ||
342 | /* bq27500 returns signed value */ | ||
343 | val->intval = (int)((s16)curr) * 1000; | ||
344 | } else { | ||
345 | if (di->cache.flags & BQ27000_FLAG_CHGS) { | ||
346 | dev_dbg(di->dev, "negative current!\n"); | ||
347 | curr = -curr; | ||
348 | } | ||
349 | |||
350 | val->intval = curr * 3570 / BQ27000_RS; | ||
183 | } | 351 | } |
184 | 352 | ||
185 | return rsoc; | 353 | return 0; |
186 | } | 354 | } |
187 | 355 | ||
188 | static int bq27x00_battery_status(struct bq27x00_device_info *di, | 356 | static int bq27x00_battery_status(struct bq27x00_device_info *di, |
189 | union power_supply_propval *val) | 357 | union power_supply_propval *val) |
190 | { | 358 | { |
191 | int flags = 0; | ||
192 | int status; | 359 | int status; |
193 | int ret; | ||
194 | |||
195 | ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); | ||
196 | if (ret < 0) { | ||
197 | dev_err(di->dev, "error reading flags\n"); | ||
198 | return ret; | ||
199 | } | ||
200 | 360 | ||
201 | if (di->chip == BQ27500) { | 361 | if (di->chip == BQ27500) { |
202 | if (flags & BQ27500_FLAG_FC) | 362 | if (di->cache.flags & BQ27500_FLAG_FC) |
203 | status = POWER_SUPPLY_STATUS_FULL; | 363 | status = POWER_SUPPLY_STATUS_FULL; |
204 | else if (flags & BQ27500_FLAG_DSC) | 364 | else if (di->cache.flags & BQ27500_FLAG_DSC) |
205 | status = POWER_SUPPLY_STATUS_DISCHARGING; | 365 | status = POWER_SUPPLY_STATUS_DISCHARGING; |
206 | else | 366 | else |
207 | status = POWER_SUPPLY_STATUS_CHARGING; | 367 | status = POWER_SUPPLY_STATUS_CHARGING; |
208 | } else { | 368 | } else { |
209 | if (flags & BQ27000_FLAG_CHGS) | 369 | if (di->cache.flags & BQ27000_FLAG_FC) |
370 | status = POWER_SUPPLY_STATUS_FULL; | ||
371 | else if (di->cache.flags & BQ27000_FLAG_CHGS) | ||
210 | status = POWER_SUPPLY_STATUS_CHARGING; | 372 | status = POWER_SUPPLY_STATUS_CHARGING; |
373 | else if (power_supply_am_i_supplied(&di->bat)) | ||
374 | status = POWER_SUPPLY_STATUS_NOT_CHARGING; | ||
211 | else | 375 | else |
212 | status = POWER_SUPPLY_STATUS_DISCHARGING; | 376 | status = POWER_SUPPLY_STATUS_DISCHARGING; |
213 | } | 377 | } |
214 | 378 | ||
215 | val->intval = status; | 379 | val->intval = status; |
380 | |||
216 | return 0; | 381 | return 0; |
217 | } | 382 | } |
218 | 383 | ||
219 | /* | 384 | /* |
220 | * Read a time register. | 385 | * Return the battery Voltage in milivolts |
221 | * Return < 0 if something fails. | 386 | * Or < 0 if something fails. |
222 | */ | 387 | */ |
223 | static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg, | 388 | static int bq27x00_battery_voltage(struct bq27x00_device_info *di, |
224 | union power_supply_propval *val) | 389 | union power_supply_propval *val) |
225 | { | 390 | { |
226 | int tval = 0; | 391 | int volt; |
227 | int ret; | ||
228 | 392 | ||
229 | ret = bq27x00_read(reg, &tval, 0, di); | 393 | volt = bq27x00_read(di, BQ27x00_REG_VOLT, false); |
230 | if (ret) { | 394 | if (volt < 0) |
231 | dev_err(di->dev, "error reading register %02x\n", reg); | 395 | return volt; |
232 | return ret; | 396 | |
397 | val->intval = volt * 1000; | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Return the battery Available energy in µWh | ||
404 | * Or < 0 if something fails. | ||
405 | */ | ||
406 | static int bq27x00_battery_energy(struct bq27x00_device_info *di, | ||
407 | union power_supply_propval *val) | ||
408 | { | ||
409 | int ae; | ||
410 | |||
411 | ae = bq27x00_read(di, BQ27x00_REG_AE, false); | ||
412 | if (ae < 0) { | ||
413 | dev_err(di->dev, "error reading available energy\n"); | ||
414 | return ae; | ||
233 | } | 415 | } |
234 | 416 | ||
235 | if (tval == 65535) | 417 | if (di->chip == BQ27500) |
236 | return -ENODATA; | 418 | ae *= 1000; |
419 | else | ||
420 | ae = ae * 29200 / BQ27000_RS; | ||
421 | |||
422 | val->intval = ae; | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | |||
428 | static int bq27x00_simple_value(int value, | ||
429 | union power_supply_propval *val) | ||
430 | { | ||
431 | if (value < 0) | ||
432 | return value; | ||
433 | |||
434 | val->intval = value; | ||
237 | 435 | ||
238 | val->intval = tval * 60; | ||
239 | return 0; | 436 | return 0; |
240 | } | 437 | } |
241 | 438 | ||
@@ -249,33 +446,61 @@ static int bq27x00_battery_get_property(struct power_supply *psy, | |||
249 | int ret = 0; | 446 | int ret = 0; |
250 | struct bq27x00_device_info *di = to_bq27x00_device_info(psy); | 447 | struct bq27x00_device_info *di = to_bq27x00_device_info(psy); |
251 | 448 | ||
449 | mutex_lock(&di->lock); | ||
450 | if (time_is_before_jiffies(di->last_update + 5 * HZ)) { | ||
451 | cancel_delayed_work_sync(&di->work); | ||
452 | bq27x00_battery_poll(&di->work.work); | ||
453 | } | ||
454 | mutex_unlock(&di->lock); | ||
455 | |||
456 | if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) | ||
457 | return -ENODEV; | ||
458 | |||
252 | switch (psp) { | 459 | switch (psp) { |
253 | case POWER_SUPPLY_PROP_STATUS: | 460 | case POWER_SUPPLY_PROP_STATUS: |
254 | ret = bq27x00_battery_status(di, val); | 461 | ret = bq27x00_battery_status(di, val); |
255 | break; | 462 | break; |
256 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | 463 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
464 | ret = bq27x00_battery_voltage(di, val); | ||
465 | break; | ||
257 | case POWER_SUPPLY_PROP_PRESENT: | 466 | case POWER_SUPPLY_PROP_PRESENT: |
258 | val->intval = bq27x00_battery_voltage(di); | 467 | val->intval = di->cache.flags < 0 ? 0 : 1; |
259 | if (psp == POWER_SUPPLY_PROP_PRESENT) | ||
260 | val->intval = val->intval <= 0 ? 0 : 1; | ||
261 | break; | 468 | break; |
262 | case POWER_SUPPLY_PROP_CURRENT_NOW: | 469 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
263 | val->intval = bq27x00_battery_current(di); | 470 | ret = bq27x00_battery_current(di, val); |
264 | break; | 471 | break; |
265 | case POWER_SUPPLY_PROP_CAPACITY: | 472 | case POWER_SUPPLY_PROP_CAPACITY: |
266 | val->intval = bq27x00_battery_rsoc(di); | 473 | ret = bq27x00_simple_value(di->cache.capacity, val); |
267 | break; | 474 | break; |
268 | case POWER_SUPPLY_PROP_TEMP: | 475 | case POWER_SUPPLY_PROP_TEMP: |
269 | val->intval = bq27x00_battery_temperature(di); | 476 | ret = bq27x00_battery_temperature(di, val); |
270 | break; | 477 | break; |
271 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: | 478 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: |
272 | ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val); | 479 | ret = bq27x00_simple_value(di->cache.time_to_empty, val); |
273 | break; | 480 | break; |
274 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: | 481 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: |
275 | ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val); | 482 | ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); |
276 | break; | 483 | break; |
277 | case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: | 484 | case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: |
278 | ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val); | 485 | ret = bq27x00_simple_value(di->cache.time_to_full, val); |
486 | break; | ||
487 | case POWER_SUPPLY_PROP_TECHNOLOGY: | ||
488 | val->intval = POWER_SUPPLY_TECHNOLOGY_LION; | ||
489 | break; | ||
490 | case POWER_SUPPLY_PROP_CHARGE_NOW: | ||
491 | ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val); | ||
492 | break; | ||
493 | case POWER_SUPPLY_PROP_CHARGE_FULL: | ||
494 | ret = bq27x00_simple_value(di->cache.charge_full, val); | ||
495 | break; | ||
496 | case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: | ||
497 | ret = bq27x00_simple_value(di->charge_design_full, val); | ||
498 | break; | ||
499 | case POWER_SUPPLY_PROP_CHARGE_COUNTER: | ||
500 | ret = bq27x00_simple_value(di->cache.charge_counter, val); | ||
501 | break; | ||
502 | case POWER_SUPPLY_PROP_ENERGY_NOW: | ||
503 | ret = bq27x00_battery_energy(di, val); | ||
279 | break; | 504 | break; |
280 | default: | 505 | default: |
281 | return -EINVAL; | 506 | return -EINVAL; |
@@ -284,56 +509,91 @@ static int bq27x00_battery_get_property(struct power_supply *psy, | |||
284 | return ret; | 509 | return ret; |
285 | } | 510 | } |
286 | 511 | ||
287 | static void bq27x00_powersupply_init(struct bq27x00_device_info *di) | 512 | static void bq27x00_external_power_changed(struct power_supply *psy) |
288 | { | 513 | { |
514 | struct bq27x00_device_info *di = to_bq27x00_device_info(psy); | ||
515 | |||
516 | cancel_delayed_work_sync(&di->work); | ||
517 | schedule_delayed_work(&di->work, 0); | ||
518 | } | ||
519 | |||
520 | static int bq27x00_powersupply_init(struct bq27x00_device_info *di) | ||
521 | { | ||
522 | int ret; | ||
523 | |||
289 | di->bat.type = POWER_SUPPLY_TYPE_BATTERY; | 524 | di->bat.type = POWER_SUPPLY_TYPE_BATTERY; |
290 | di->bat.properties = bq27x00_battery_props; | 525 | di->bat.properties = bq27x00_battery_props; |
291 | di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); | 526 | di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); |
292 | di->bat.get_property = bq27x00_battery_get_property; | 527 | di->bat.get_property = bq27x00_battery_get_property; |
293 | di->bat.external_power_changed = NULL; | 528 | di->bat.external_power_changed = bq27x00_external_power_changed; |
529 | |||
530 | INIT_DELAYED_WORK(&di->work, bq27x00_battery_poll); | ||
531 | mutex_init(&di->lock); | ||
532 | |||
533 | ret = power_supply_register(di->dev, &di->bat); | ||
534 | if (ret) { | ||
535 | dev_err(di->dev, "failed to register battery: %d\n", ret); | ||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION); | ||
540 | |||
541 | bq27x00_update(di); | ||
542 | |||
543 | return 0; | ||
294 | } | 544 | } |
295 | 545 | ||
296 | /* | 546 | static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di) |
297 | * i2c specific code | 547 | { |
548 | cancel_delayed_work_sync(&di->work); | ||
549 | |||
550 | power_supply_unregister(&di->bat); | ||
551 | |||
552 | mutex_destroy(&di->lock); | ||
553 | } | ||
554 | |||
555 | |||
556 | /* i2c specific code */ | ||
557 | #ifdef CONFIG_BATTERY_BQ27X00_I2C | ||
558 | |||
559 | /* If the system has several batteries we need a different name for each | ||
560 | * of them... | ||
298 | */ | 561 | */ |
562 | static DEFINE_IDR(battery_id); | ||
563 | static DEFINE_MUTEX(battery_mutex); | ||
299 | 564 | ||
300 | static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single, | 565 | static int bq27x00_read_i2c(struct bq27x00_device_info *di, u8 reg, bool single) |
301 | struct bq27x00_device_info *di) | ||
302 | { | 566 | { |
303 | struct i2c_client *client = di->client; | 567 | struct i2c_client *client = to_i2c_client(di->dev); |
304 | struct i2c_msg msg[1]; | 568 | struct i2c_msg msg[2]; |
305 | unsigned char data[2]; | 569 | unsigned char data[2]; |
306 | int err; | 570 | int ret; |
307 | 571 | ||
308 | if (!client->adapter) | 572 | if (!client->adapter) |
309 | return -ENODEV; | 573 | return -ENODEV; |
310 | 574 | ||
311 | msg->addr = client->addr; | 575 | msg[0].addr = client->addr; |
312 | msg->flags = 0; | 576 | msg[0].flags = 0; |
313 | msg->len = 1; | 577 | msg[0].buf = ® |
314 | msg->buf = data; | 578 | msg[0].len = sizeof(reg); |
315 | 579 | msg[1].addr = client->addr; | |
316 | data[0] = reg; | 580 | msg[1].flags = I2C_M_RD; |
317 | err = i2c_transfer(client->adapter, msg, 1); | 581 | msg[1].buf = data; |
582 | if (single) | ||
583 | msg[1].len = 1; | ||
584 | else | ||
585 | msg[1].len = 2; | ||
318 | 586 | ||
319 | if (err >= 0) { | 587 | ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); |
320 | if (!b_single) | 588 | if (ret < 0) |
321 | msg->len = 2; | 589 | return ret; |
322 | else | ||
323 | msg->len = 1; | ||
324 | 590 | ||
325 | msg->flags = I2C_M_RD; | 591 | if (!single) |
326 | err = i2c_transfer(client->adapter, msg, 1); | 592 | ret = get_unaligned_le16(data); |
327 | if (err >= 0) { | 593 | else |
328 | if (!b_single) | 594 | ret = data[0]; |
329 | *rt_value = get_unaligned_le16(data); | ||
330 | else | ||
331 | *rt_value = data[0]; | ||
332 | 595 | ||
333 | return 0; | 596 | return ret; |
334 | } | ||
335 | } | ||
336 | return err; | ||
337 | } | 597 | } |
338 | 598 | ||
339 | static int bq27x00_battery_probe(struct i2c_client *client, | 599 | static int bq27x00_battery_probe(struct i2c_client *client, |
@@ -341,7 +601,6 @@ static int bq27x00_battery_probe(struct i2c_client *client, | |||
341 | { | 601 | { |
342 | char *name; | 602 | char *name; |
343 | struct bq27x00_device_info *di; | 603 | struct bq27x00_device_info *di; |
344 | struct bq27x00_access_methods *bus; | ||
345 | int num; | 604 | int num; |
346 | int retval = 0; | 605 | int retval = 0; |
347 | 606 | ||
@@ -368,38 +627,20 @@ static int bq27x00_battery_probe(struct i2c_client *client, | |||
368 | retval = -ENOMEM; | 627 | retval = -ENOMEM; |
369 | goto batt_failed_2; | 628 | goto batt_failed_2; |
370 | } | 629 | } |
630 | |||
371 | di->id = num; | 631 | di->id = num; |
632 | di->dev = &client->dev; | ||
372 | di->chip = id->driver_data; | 633 | di->chip = id->driver_data; |
634 | di->bat.name = name; | ||
635 | di->bus.read = &bq27x00_read_i2c; | ||
373 | 636 | ||
374 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | 637 | if (bq27x00_powersupply_init(di)) |
375 | if (!bus) { | ||
376 | dev_err(&client->dev, "failed to allocate access method " | ||
377 | "data\n"); | ||
378 | retval = -ENOMEM; | ||
379 | goto batt_failed_3; | 638 | goto batt_failed_3; |
380 | } | ||
381 | 639 | ||
382 | i2c_set_clientdata(client, di); | 640 | i2c_set_clientdata(client, di); |
383 | di->dev = &client->dev; | ||
384 | di->bat.name = name; | ||
385 | bus->read = &bq27x00_read_i2c; | ||
386 | di->bus = bus; | ||
387 | di->client = client; | ||
388 | |||
389 | bq27x00_powersupply_init(di); | ||
390 | |||
391 | retval = power_supply_register(&client->dev, &di->bat); | ||
392 | if (retval) { | ||
393 | dev_err(&client->dev, "failed to register battery\n"); | ||
394 | goto batt_failed_4; | ||
395 | } | ||
396 | |||
397 | dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); | ||
398 | 641 | ||
399 | return 0; | 642 | return 0; |
400 | 643 | ||
401 | batt_failed_4: | ||
402 | kfree(bus); | ||
403 | batt_failed_3: | 644 | batt_failed_3: |
404 | kfree(di); | 645 | kfree(di); |
405 | batt_failed_2: | 646 | batt_failed_2: |
@@ -416,9 +657,8 @@ static int bq27x00_battery_remove(struct i2c_client *client) | |||
416 | { | 657 | { |
417 | struct bq27x00_device_info *di = i2c_get_clientdata(client); | 658 | struct bq27x00_device_info *di = i2c_get_clientdata(client); |
418 | 659 | ||
419 | power_supply_unregister(&di->bat); | 660 | bq27x00_powersupply_unregister(di); |
420 | 661 | ||
421 | kfree(di->bus); | ||
422 | kfree(di->bat.name); | 662 | kfree(di->bat.name); |
423 | 663 | ||
424 | mutex_lock(&battery_mutex); | 664 | mutex_lock(&battery_mutex); |
@@ -430,15 +670,12 @@ static int bq27x00_battery_remove(struct i2c_client *client) | |||
430 | return 0; | 670 | return 0; |
431 | } | 671 | } |
432 | 672 | ||
433 | /* | ||
434 | * Module stuff | ||
435 | */ | ||
436 | |||
437 | static const struct i2c_device_id bq27x00_id[] = { | 673 | static const struct i2c_device_id bq27x00_id[] = { |
438 | { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ | 674 | { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ |
439 | { "bq27500", BQ27500 }, | 675 | { "bq27500", BQ27500 }, |
440 | {}, | 676 | {}, |
441 | }; | 677 | }; |
678 | MODULE_DEVICE_TABLE(i2c, bq27x00_id); | ||
442 | 679 | ||
443 | static struct i2c_driver bq27x00_battery_driver = { | 680 | static struct i2c_driver bq27x00_battery_driver = { |
444 | .driver = { | 681 | .driver = { |
@@ -449,13 +686,164 @@ static struct i2c_driver bq27x00_battery_driver = { | |||
449 | .id_table = bq27x00_id, | 686 | .id_table = bq27x00_id, |
450 | }; | 687 | }; |
451 | 688 | ||
689 | static inline int bq27x00_battery_i2c_init(void) | ||
690 | { | ||
691 | int ret = i2c_add_driver(&bq27x00_battery_driver); | ||
692 | if (ret) | ||
693 | printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n"); | ||
694 | |||
695 | return ret; | ||
696 | } | ||
697 | |||
698 | static inline void bq27x00_battery_i2c_exit(void) | ||
699 | { | ||
700 | i2c_del_driver(&bq27x00_battery_driver); | ||
701 | } | ||
702 | |||
703 | #else | ||
704 | |||
705 | static inline int bq27x00_battery_i2c_init(void) { return 0; } | ||
706 | static inline void bq27x00_battery_i2c_exit(void) {}; | ||
707 | |||
708 | #endif | ||
709 | |||
710 | /* platform specific code */ | ||
711 | #ifdef CONFIG_BATTERY_BQ27X00_PLATFORM | ||
712 | |||
713 | static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg, | ||
714 | bool single) | ||
715 | { | ||
716 | struct device *dev = di->dev; | ||
717 | struct bq27000_platform_data *pdata = dev->platform_data; | ||
718 | unsigned int timeout = 3; | ||
719 | int upper, lower; | ||
720 | int temp; | ||
721 | |||
722 | if (!single) { | ||
723 | /* Make sure the value has not changed in between reading the | ||
724 | * lower and the upper part */ | ||
725 | upper = pdata->read(dev, reg + 1); | ||
726 | do { | ||
727 | temp = upper; | ||
728 | if (upper < 0) | ||
729 | return upper; | ||
730 | |||
731 | lower = pdata->read(dev, reg); | ||
732 | if (lower < 0) | ||
733 | return lower; | ||
734 | |||
735 | upper = pdata->read(dev, reg + 1); | ||
736 | } while (temp != upper && --timeout); | ||
737 | |||
738 | if (timeout == 0) | ||
739 | return -EIO; | ||
740 | |||
741 | return (upper << 8) | lower; | ||
742 | } | ||
743 | |||
744 | return pdata->read(dev, reg); | ||
745 | } | ||
746 | |||
747 | static int __devinit bq27000_battery_probe(struct platform_device *pdev) | ||
748 | { | ||
749 | struct bq27x00_device_info *di; | ||
750 | struct bq27000_platform_data *pdata = pdev->dev.platform_data; | ||
751 | int ret; | ||
752 | |||
753 | if (!pdata) { | ||
754 | dev_err(&pdev->dev, "no platform_data supplied\n"); | ||
755 | return -EINVAL; | ||
756 | } | ||
757 | |||
758 | if (!pdata->read) { | ||
759 | dev_err(&pdev->dev, "no hdq read callback supplied\n"); | ||
760 | return -EINVAL; | ||
761 | } | ||
762 | |||
763 | di = kzalloc(sizeof(*di), GFP_KERNEL); | ||
764 | if (!di) { | ||
765 | dev_err(&pdev->dev, "failed to allocate device info data\n"); | ||
766 | return -ENOMEM; | ||
767 | } | ||
768 | |||
769 | platform_set_drvdata(pdev, di); | ||
770 | |||
771 | di->dev = &pdev->dev; | ||
772 | di->chip = BQ27000; | ||
773 | |||
774 | di->bat.name = pdata->name ?: dev_name(&pdev->dev); | ||
775 | di->bus.read = &bq27000_read_platform; | ||
776 | |||
777 | ret = bq27x00_powersupply_init(di); | ||
778 | if (ret) | ||
779 | goto err_free; | ||
780 | |||
781 | return 0; | ||
782 | |||
783 | err_free: | ||
784 | platform_set_drvdata(pdev, NULL); | ||
785 | kfree(di); | ||
786 | |||
787 | return ret; | ||
788 | } | ||
789 | |||
790 | static int __devexit bq27000_battery_remove(struct platform_device *pdev) | ||
791 | { | ||
792 | struct bq27x00_device_info *di = platform_get_drvdata(pdev); | ||
793 | |||
794 | bq27x00_powersupply_unregister(di); | ||
795 | |||
796 | platform_set_drvdata(pdev, NULL); | ||
797 | kfree(di); | ||
798 | |||
799 | return 0; | ||
800 | } | ||
801 | |||
802 | static struct platform_driver bq27000_battery_driver = { | ||
803 | .probe = bq27000_battery_probe, | ||
804 | .remove = __devexit_p(bq27000_battery_remove), | ||
805 | .driver = { | ||
806 | .name = "bq27000-battery", | ||
807 | .owner = THIS_MODULE, | ||
808 | }, | ||
809 | }; | ||
810 | |||
811 | static inline int bq27x00_battery_platform_init(void) | ||
812 | { | ||
813 | int ret = platform_driver_register(&bq27000_battery_driver); | ||
814 | if (ret) | ||
815 | printk(KERN_ERR "Unable to register BQ27000 platform driver\n"); | ||
816 | |||
817 | return ret; | ||
818 | } | ||
819 | |||
820 | static inline void bq27x00_battery_platform_exit(void) | ||
821 | { | ||
822 | platform_driver_unregister(&bq27000_battery_driver); | ||
823 | } | ||
824 | |||
825 | #else | ||
826 | |||
827 | static inline int bq27x00_battery_platform_init(void) { return 0; } | ||
828 | static inline void bq27x00_battery_platform_exit(void) {}; | ||
829 | |||
830 | #endif | ||
831 | |||
832 | /* | ||
833 | * Module stuff | ||
834 | */ | ||
835 | |||
452 | static int __init bq27x00_battery_init(void) | 836 | static int __init bq27x00_battery_init(void) |
453 | { | 837 | { |
454 | int ret; | 838 | int ret; |
455 | 839 | ||
456 | ret = i2c_add_driver(&bq27x00_battery_driver); | 840 | ret = bq27x00_battery_i2c_init(); |
457 | if (ret) | 841 | if (ret) |
458 | printk(KERN_ERR "Unable to register BQ27x00 driver\n"); | 842 | return ret; |
843 | |||
844 | ret = bq27x00_battery_platform_init(); | ||
845 | if (ret) | ||
846 | bq27x00_battery_i2c_exit(); | ||
459 | 847 | ||
460 | return ret; | 848 | return ret; |
461 | } | 849 | } |
@@ -463,7 +851,8 @@ module_init(bq27x00_battery_init); | |||
463 | 851 | ||
464 | static void __exit bq27x00_battery_exit(void) | 852 | static void __exit bq27x00_battery_exit(void) |
465 | { | 853 | { |
466 | i2c_del_driver(&bq27x00_battery_driver); | 854 | bq27x00_battery_platform_exit(); |
855 | bq27x00_battery_i2c_exit(); | ||
467 | } | 856 | } |
468 | module_exit(bq27x00_battery_exit); | 857 | module_exit(bq27x00_battery_exit); |
469 | 858 | ||
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c index 6957e8af644..4d2dc4fa288 100644 --- a/drivers/power/ds2782_battery.c +++ b/drivers/power/ds2782_battery.c | |||
@@ -393,6 +393,7 @@ static const struct i2c_device_id ds278x_id[] = { | |||
393 | {"ds2786", DS2786}, | 393 | {"ds2786", DS2786}, |
394 | {}, | 394 | {}, |
395 | }; | 395 | }; |
396 | MODULE_DEVICE_TABLE(i2c, ds278x_id); | ||
396 | 397 | ||
397 | static struct i2c_driver ds278x_battery_driver = { | 398 | static struct i2c_driver ds278x_battery_driver = { |
398 | .driver = { | 399 | .driver = { |
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 970f7335d3a..329b46b2327 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
@@ -171,6 +171,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
171 | dev_set_drvdata(dev, psy); | 171 | dev_set_drvdata(dev, psy); |
172 | psy->dev = dev; | 172 | psy->dev = dev; |
173 | 173 | ||
174 | INIT_WORK(&psy->changed_work, power_supply_changed_work); | ||
175 | |||
174 | rc = kobject_set_name(&dev->kobj, "%s", psy->name); | 176 | rc = kobject_set_name(&dev->kobj, "%s", psy->name); |
175 | if (rc) | 177 | if (rc) |
176 | goto kobject_set_name_failed; | 178 | goto kobject_set_name_failed; |
@@ -179,8 +181,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
179 | if (rc) | 181 | if (rc) |
180 | goto device_add_failed; | 182 | goto device_add_failed; |
181 | 183 | ||
182 | INIT_WORK(&psy->changed_work, power_supply_changed_work); | ||
183 | |||
184 | rc = power_supply_create_triggers(psy); | 184 | rc = power_supply_create_triggers(psy); |
185 | if (rc) | 185 | if (rc) |
186 | goto create_triggers_failed; | 186 | goto create_triggers_failed; |
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c index 031a554837f..da25eb94e5c 100644 --- a/drivers/power/power_supply_leds.c +++ b/drivers/power/power_supply_leds.c | |||
@@ -21,6 +21,8 @@ | |||
21 | static void power_supply_update_bat_leds(struct power_supply *psy) | 21 | static void power_supply_update_bat_leds(struct power_supply *psy) |
22 | { | 22 | { |
23 | union power_supply_propval status; | 23 | union power_supply_propval status; |
24 | unsigned long delay_on = 0; | ||
25 | unsigned long delay_off = 0; | ||
24 | 26 | ||
25 | if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) | 27 | if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) |
26 | return; | 28 | return; |
@@ -32,16 +34,22 @@ static void power_supply_update_bat_leds(struct power_supply *psy) | |||
32 | led_trigger_event(psy->charging_full_trig, LED_FULL); | 34 | led_trigger_event(psy->charging_full_trig, LED_FULL); |
33 | led_trigger_event(psy->charging_trig, LED_OFF); | 35 | led_trigger_event(psy->charging_trig, LED_OFF); |
34 | led_trigger_event(psy->full_trig, LED_FULL); | 36 | led_trigger_event(psy->full_trig, LED_FULL); |
37 | led_trigger_event(psy->charging_blink_full_solid_trig, | ||
38 | LED_FULL); | ||
35 | break; | 39 | break; |
36 | case POWER_SUPPLY_STATUS_CHARGING: | 40 | case POWER_SUPPLY_STATUS_CHARGING: |
37 | led_trigger_event(psy->charging_full_trig, LED_FULL); | 41 | led_trigger_event(psy->charging_full_trig, LED_FULL); |
38 | led_trigger_event(psy->charging_trig, LED_FULL); | 42 | led_trigger_event(psy->charging_trig, LED_FULL); |
39 | led_trigger_event(psy->full_trig, LED_OFF); | 43 | led_trigger_event(psy->full_trig, LED_OFF); |
44 | led_trigger_blink(psy->charging_blink_full_solid_trig, | ||
45 | &delay_on, &delay_off); | ||
40 | break; | 46 | break; |
41 | default: | 47 | default: |
42 | led_trigger_event(psy->charging_full_trig, LED_OFF); | 48 | led_trigger_event(psy->charging_full_trig, LED_OFF); |
43 | led_trigger_event(psy->charging_trig, LED_OFF); | 49 | led_trigger_event(psy->charging_trig, LED_OFF); |
44 | led_trigger_event(psy->full_trig, LED_OFF); | 50 | led_trigger_event(psy->full_trig, LED_OFF); |
51 | led_trigger_event(psy->charging_blink_full_solid_trig, | ||
52 | LED_OFF); | ||
45 | break; | 53 | break; |
46 | } | 54 | } |
47 | } | 55 | } |
@@ -64,15 +72,24 @@ static int power_supply_create_bat_triggers(struct power_supply *psy) | |||
64 | if (!psy->full_trig_name) | 72 | if (!psy->full_trig_name) |
65 | goto full_failed; | 73 | goto full_failed; |
66 | 74 | ||
75 | psy->charging_blink_full_solid_trig_name = kasprintf(GFP_KERNEL, | ||
76 | "%s-charging-blink-full-solid", psy->name); | ||
77 | if (!psy->charging_blink_full_solid_trig_name) | ||
78 | goto charging_blink_full_solid_failed; | ||
79 | |||
67 | led_trigger_register_simple(psy->charging_full_trig_name, | 80 | led_trigger_register_simple(psy->charging_full_trig_name, |
68 | &psy->charging_full_trig); | 81 | &psy->charging_full_trig); |
69 | led_trigger_register_simple(psy->charging_trig_name, | 82 | led_trigger_register_simple(psy->charging_trig_name, |
70 | &psy->charging_trig); | 83 | &psy->charging_trig); |
71 | led_trigger_register_simple(psy->full_trig_name, | 84 | led_trigger_register_simple(psy->full_trig_name, |
72 | &psy->full_trig); | 85 | &psy->full_trig); |
86 | led_trigger_register_simple(psy->charging_blink_full_solid_trig_name, | ||
87 | &psy->charging_blink_full_solid_trig); | ||
73 | 88 | ||
74 | goto success; | 89 | goto success; |
75 | 90 | ||
91 | charging_blink_full_solid_failed: | ||
92 | kfree(psy->full_trig_name); | ||
76 | full_failed: | 93 | full_failed: |
77 | kfree(psy->charging_trig_name); | 94 | kfree(psy->charging_trig_name); |
78 | charging_failed: | 95 | charging_failed: |
@@ -88,6 +105,8 @@ static void power_supply_remove_bat_triggers(struct power_supply *psy) | |||
88 | led_trigger_unregister_simple(psy->charging_full_trig); | 105 | led_trigger_unregister_simple(psy->charging_full_trig); |
89 | led_trigger_unregister_simple(psy->charging_trig); | 106 | led_trigger_unregister_simple(psy->charging_trig); |
90 | led_trigger_unregister_simple(psy->full_trig); | 107 | led_trigger_unregister_simple(psy->full_trig); |
108 | led_trigger_unregister_simple(psy->charging_blink_full_solid_trig); | ||
109 | kfree(psy->charging_blink_full_solid_trig_name); | ||
91 | kfree(psy->full_trig_name); | 110 | kfree(psy->full_trig_name); |
92 | kfree(psy->charging_trig_name); | 111 | kfree(psy->charging_trig_name); |
93 | kfree(psy->charging_full_trig_name); | 112 | kfree(psy->charging_full_trig_name); |
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index cd1f90754a3..605514afc29 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c | |||
@@ -270,7 +270,7 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
270 | attr = &power_supply_attrs[psy->properties[j]]; | 270 | attr = &power_supply_attrs[psy->properties[j]]; |
271 | 271 | ||
272 | ret = power_supply_show_property(dev, attr, prop_buf); | 272 | ret = power_supply_show_property(dev, attr, prop_buf); |
273 | if (ret == -ENODEV) { | 273 | if (ret == -ENODEV || ret == -ENODATA) { |
274 | /* When a battery is absent, we expect -ENODEV. Don't abort; | 274 | /* When a battery is absent, we expect -ENODEV. Don't abort; |
275 | send the uevent with at least the the PRESENT=0 property */ | 275 | send the uevent with at least the the PRESENT=0 property */ |
276 | ret = 0; | 276 | ret = 0; |
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index 4255f2358b1..d36c289aaef 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c | |||
@@ -406,8 +406,8 @@ static int s3c_adc_bat_resume(struct platform_device *pdev) | |||
406 | return 0; | 406 | return 0; |
407 | } | 407 | } |
408 | #else | 408 | #else |
409 | #define s3c_adc_battery_suspend NULL | 409 | #define s3c_adc_bat_suspend NULL |
410 | #define s3c_adc_battery_resume NULL | 410 | #define s3c_adc_bat_resume NULL |
411 | #endif | 411 | #endif |
412 | 412 | ||
413 | static struct platform_driver s3c_adc_bat_driver = { | 413 | static struct platform_driver s3c_adc_bat_driver = { |
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c index ff1f42398a2..92c16e1677b 100644 --- a/drivers/power/twl4030_charger.c +++ b/drivers/power/twl4030_charger.c | |||
@@ -71,8 +71,11 @@ struct twl4030_bci { | |||
71 | struct power_supply usb; | 71 | struct power_supply usb; |
72 | struct otg_transceiver *transceiver; | 72 | struct otg_transceiver *transceiver; |
73 | struct notifier_block otg_nb; | 73 | struct notifier_block otg_nb; |
74 | struct work_struct work; | ||
74 | int irq_chg; | 75 | int irq_chg; |
75 | int irq_bci; | 76 | int irq_bci; |
77 | |||
78 | unsigned long event; | ||
76 | }; | 79 | }; |
77 | 80 | ||
78 | /* | 81 | /* |
@@ -258,14 +261,11 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg) | |||
258 | return IRQ_HANDLED; | 261 | return IRQ_HANDLED; |
259 | } | 262 | } |
260 | 263 | ||
261 | static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val, | 264 | static void twl4030_bci_usb_work(struct work_struct *data) |
262 | void *priv) | ||
263 | { | 265 | { |
264 | struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb); | 266 | struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work); |
265 | 267 | ||
266 | dev_dbg(bci->dev, "OTG notify %lu\n", val); | 268 | switch (bci->event) { |
267 | |||
268 | switch (val) { | ||
269 | case USB_EVENT_VBUS: | 269 | case USB_EVENT_VBUS: |
270 | case USB_EVENT_CHARGER: | 270 | case USB_EVENT_CHARGER: |
271 | twl4030_charger_enable_usb(bci, true); | 271 | twl4030_charger_enable_usb(bci, true); |
@@ -274,6 +274,17 @@ static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val, | |||
274 | twl4030_charger_enable_usb(bci, false); | 274 | twl4030_charger_enable_usb(bci, false); |
275 | break; | 275 | break; |
276 | } | 276 | } |
277 | } | ||
278 | |||
279 | static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val, | ||
280 | void *priv) | ||
281 | { | ||
282 | struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb); | ||
283 | |||
284 | dev_dbg(bci->dev, "OTG notify %lu\n", val); | ||
285 | |||
286 | bci->event = val; | ||
287 | schedule_work(&bci->work); | ||
277 | 288 | ||
278 | return NOTIFY_OK; | 289 | return NOTIFY_OK; |
279 | } | 290 | } |
@@ -466,6 +477,8 @@ static int __init twl4030_bci_probe(struct platform_device *pdev) | |||
466 | goto fail_bci_irq; | 477 | goto fail_bci_irq; |
467 | } | 478 | } |
468 | 479 | ||
480 | INIT_WORK(&bci->work, twl4030_bci_usb_work); | ||
481 | |||
469 | bci->transceiver = otg_get_transceiver(); | 482 | bci->transceiver = otg_get_transceiver(); |
470 | if (bci->transceiver != NULL) { | 483 | if (bci->transceiver != NULL) { |
471 | bci->otg_nb.notifier_call = twl4030_bci_usb_ncb; | 484 | bci->otg_nb.notifier_call = twl4030_bci_usb_ncb; |
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index e5ed52d7193..2a9ab89f83b 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c | |||
@@ -134,6 +134,8 @@ static int z2_batt_ps_init(struct z2_charger *charger, int props) | |||
134 | enum power_supply_property *prop; | 134 | enum power_supply_property *prop; |
135 | struct z2_battery_info *info = charger->info; | 135 | struct z2_battery_info *info = charger->info; |
136 | 136 | ||
137 | if (info->charge_gpio >= 0) | ||
138 | props++; /* POWER_SUPPLY_PROP_STATUS */ | ||
137 | if (info->batt_tech >= 0) | 139 | if (info->batt_tech >= 0) |
138 | props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */ | 140 | props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */ |
139 | if (info->batt_I2C_reg >= 0) | 141 | if (info->batt_I2C_reg >= 0) |
@@ -293,6 +295,7 @@ static const struct i2c_device_id z2_batt_id[] = { | |||
293 | { "aer915", 0 }, | 295 | { "aer915", 0 }, |
294 | { } | 296 | { } |
295 | }; | 297 | }; |
298 | MODULE_DEVICE_TABLE(i2c, z2_batt_id); | ||
296 | 299 | ||
297 | static struct i2c_driver z2_batt_driver = { | 300 | static struct i2c_driver z2_batt_driver = { |
298 | .driver = { | 301 | .driver = { |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index de75f67f4cc..b9f29e0d429 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
@@ -126,7 +126,7 @@ config REGULATOR_MAX8998 | |||
126 | and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages. | 126 | and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages. |
127 | 127 | ||
128 | config REGULATOR_TWL4030 | 128 | config REGULATOR_TWL4030 |
129 | bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC" | 129 | bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC" |
130 | depends on TWL4030_CORE | 130 | depends on TWL4030_CORE |
131 | help | 131 | help |
132 | This driver supports the voltage regulators provided by | 132 | This driver supports the voltage regulators provided by |
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 2dec589a890..b1d77946e9c 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c | |||
@@ -206,29 +206,6 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) | |||
206 | return err; | 206 | return err; |
207 | } | 207 | } |
208 | 208 | ||
209 | /* Per-regulator power on delay from spec */ | ||
210 | switch (abreg->regreg) { | ||
211 | case AB3100_LDO_A: /* Fallthrough */ | ||
212 | case AB3100_LDO_C: /* Fallthrough */ | ||
213 | case AB3100_LDO_D: /* Fallthrough */ | ||
214 | case AB3100_LDO_E: /* Fallthrough */ | ||
215 | case AB3100_LDO_H: /* Fallthrough */ | ||
216 | case AB3100_LDO_K: | ||
217 | udelay(200); | ||
218 | break; | ||
219 | case AB3100_LDO_F: | ||
220 | udelay(600); | ||
221 | break; | ||
222 | case AB3100_LDO_G: | ||
223 | udelay(400); | ||
224 | break; | ||
225 | case AB3100_BUCK: | ||
226 | mdelay(1); | ||
227 | break; | ||
228 | default: | ||
229 | break; | ||
230 | } | ||
231 | |||
232 | return 0; | 209 | return 0; |
233 | } | 210 | } |
234 | 211 | ||
@@ -450,11 +427,37 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg) | |||
450 | return abreg->plfdata->external_voltage; | 427 | return abreg->plfdata->external_voltage; |
451 | } | 428 | } |
452 | 429 | ||
430 | static int ab3100_enable_time_regulator(struct regulator_dev *reg) | ||
431 | { | ||
432 | struct ab3100_regulator *abreg = reg->reg_data; | ||
433 | |||
434 | /* Per-regulator power on delay from spec */ | ||
435 | switch (abreg->regreg) { | ||
436 | case AB3100_LDO_A: /* Fallthrough */ | ||
437 | case AB3100_LDO_C: /* Fallthrough */ | ||
438 | case AB3100_LDO_D: /* Fallthrough */ | ||
439 | case AB3100_LDO_E: /* Fallthrough */ | ||
440 | case AB3100_LDO_H: /* Fallthrough */ | ||
441 | case AB3100_LDO_K: | ||
442 | return 200; | ||
443 | case AB3100_LDO_F: | ||
444 | return 600; | ||
445 | case AB3100_LDO_G: | ||
446 | return 400; | ||
447 | case AB3100_BUCK: | ||
448 | return 1000; | ||
449 | default: | ||
450 | break; | ||
451 | } | ||
452 | return 0; | ||
453 | } | ||
454 | |||
453 | static struct regulator_ops regulator_ops_fixed = { | 455 | static struct regulator_ops regulator_ops_fixed = { |
454 | .enable = ab3100_enable_regulator, | 456 | .enable = ab3100_enable_regulator, |
455 | .disable = ab3100_disable_regulator, | 457 | .disable = ab3100_disable_regulator, |
456 | .is_enabled = ab3100_is_enabled_regulator, | 458 | .is_enabled = ab3100_is_enabled_regulator, |
457 | .get_voltage = ab3100_get_voltage_regulator, | 459 | .get_voltage = ab3100_get_voltage_regulator, |
460 | .enable_time = ab3100_enable_time_regulator, | ||
458 | }; | 461 | }; |
459 | 462 | ||
460 | static struct regulator_ops regulator_ops_variable = { | 463 | static struct regulator_ops regulator_ops_variable = { |
@@ -464,6 +467,7 @@ static struct regulator_ops regulator_ops_variable = { | |||
464 | .get_voltage = ab3100_get_voltage_regulator, | 467 | .get_voltage = ab3100_get_voltage_regulator, |
465 | .set_voltage = ab3100_set_voltage_regulator, | 468 | .set_voltage = ab3100_set_voltage_regulator, |
466 | .list_voltage = ab3100_list_voltage_regulator, | 469 | .list_voltage = ab3100_list_voltage_regulator, |
470 | .enable_time = ab3100_enable_time_regulator, | ||
467 | }; | 471 | }; |
468 | 472 | ||
469 | static struct regulator_ops regulator_ops_variable_sleepable = { | 473 | static struct regulator_ops regulator_ops_variable_sleepable = { |
@@ -474,6 +478,7 @@ static struct regulator_ops regulator_ops_variable_sleepable = { | |||
474 | .set_voltage = ab3100_set_voltage_regulator, | 478 | .set_voltage = ab3100_set_voltage_regulator, |
475 | .set_suspend_voltage = ab3100_set_suspend_voltage_regulator, | 479 | .set_suspend_voltage = ab3100_set_suspend_voltage_regulator, |
476 | .list_voltage = ab3100_list_voltage_regulator, | 480 | .list_voltage = ab3100_list_voltage_regulator, |
481 | .enable_time = ab3100_enable_time_regulator, | ||
477 | }; | 482 | }; |
478 | 483 | ||
479 | /* | 484 | /* |
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index d9a052c53ae..02f3c2333c8 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * AB8500 peripheral regulators | 9 | * AB8500 peripheral regulators |
10 | * | 10 | * |
11 | * AB8500 supports the following regulators: | 11 | * AB8500 supports the following regulators: |
12 | * VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA | 12 | * VAUX1/2/3, VINTCORE, VTVOUT, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA |
13 | */ | 13 | */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -38,6 +38,7 @@ | |||
38 | * @voltage_mask: mask to control regulator voltage | 38 | * @voltage_mask: mask to control regulator voltage |
39 | * @voltages: supported voltage table | 39 | * @voltages: supported voltage table |
40 | * @voltages_len: number of supported voltages for the regulator | 40 | * @voltages_len: number of supported voltages for the regulator |
41 | * @delay: startup/set voltage delay in us | ||
41 | */ | 42 | */ |
42 | struct ab8500_regulator_info { | 43 | struct ab8500_regulator_info { |
43 | struct device *dev; | 44 | struct device *dev; |
@@ -55,6 +56,7 @@ struct ab8500_regulator_info { | |||
55 | u8 voltage_mask; | 56 | u8 voltage_mask; |
56 | int const *voltages; | 57 | int const *voltages; |
57 | int voltages_len; | 58 | int voltages_len; |
59 | unsigned int delay; | ||
58 | }; | 60 | }; |
59 | 61 | ||
60 | /* voltage tables for the vauxn/vintcore supplies */ | 62 | /* voltage tables for the vauxn/vintcore supplies */ |
@@ -290,6 +292,29 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev, | |||
290 | return ret; | 292 | return ret; |
291 | } | 293 | } |
292 | 294 | ||
295 | static int ab8500_regulator_enable_time(struct regulator_dev *rdev) | ||
296 | { | ||
297 | struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); | ||
298 | |||
299 | return info->delay; | ||
300 | } | ||
301 | |||
302 | static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev, | ||
303 | unsigned int old_sel, | ||
304 | unsigned int new_sel) | ||
305 | { | ||
306 | struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); | ||
307 | int ret; | ||
308 | |||
309 | /* If the regulator isn't on, it won't take time here */ | ||
310 | ret = ab8500_regulator_is_enabled(rdev); | ||
311 | if (ret < 0) | ||
312 | return ret; | ||
313 | if (!ret) | ||
314 | return 0; | ||
315 | return info->delay; | ||
316 | } | ||
317 | |||
293 | static struct regulator_ops ab8500_regulator_ops = { | 318 | static struct regulator_ops ab8500_regulator_ops = { |
294 | .enable = ab8500_regulator_enable, | 319 | .enable = ab8500_regulator_enable, |
295 | .disable = ab8500_regulator_disable, | 320 | .disable = ab8500_regulator_disable, |
@@ -297,6 +322,8 @@ static struct regulator_ops ab8500_regulator_ops = { | |||
297 | .get_voltage = ab8500_regulator_get_voltage, | 322 | .get_voltage = ab8500_regulator_get_voltage, |
298 | .set_voltage = ab8500_regulator_set_voltage, | 323 | .set_voltage = ab8500_regulator_set_voltage, |
299 | .list_voltage = ab8500_list_voltage, | 324 | .list_voltage = ab8500_list_voltage, |
325 | .enable_time = ab8500_regulator_enable_time, | ||
326 | .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, | ||
300 | }; | 327 | }; |
301 | 328 | ||
302 | static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) | 329 | static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) |
@@ -317,6 +344,8 @@ static struct regulator_ops ab8500_regulator_fixed_ops = { | |||
317 | .is_enabled = ab8500_regulator_is_enabled, | 344 | .is_enabled = ab8500_regulator_is_enabled, |
318 | .get_voltage = ab8500_fixed_get_voltage, | 345 | .get_voltage = ab8500_fixed_get_voltage, |
319 | .list_voltage = ab8500_list_voltage, | 346 | .list_voltage = ab8500_list_voltage, |
347 | .enable_time = ab8500_regulator_enable_time, | ||
348 | .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, | ||
320 | }; | 349 | }; |
321 | 350 | ||
322 | static struct ab8500_regulator_info | 351 | static struct ab8500_regulator_info |
@@ -426,12 +455,28 @@ static struct ab8500_regulator_info | |||
426 | .owner = THIS_MODULE, | 455 | .owner = THIS_MODULE, |
427 | .n_voltages = 1, | 456 | .n_voltages = 1, |
428 | }, | 457 | }, |
458 | .delay = 10000, | ||
429 | .fixed_uV = 2000000, | 459 | .fixed_uV = 2000000, |
430 | .update_bank = 0x03, | 460 | .update_bank = 0x03, |
431 | .update_reg = 0x80, | 461 | .update_reg = 0x80, |
432 | .update_mask = 0x82, | 462 | .update_mask = 0x82, |
433 | .update_val_enable = 0x02, | 463 | .update_val_enable = 0x02, |
434 | }, | 464 | }, |
465 | [AB8500_LDO_USB] = { | ||
466 | .desc = { | ||
467 | .name = "LDO-USB", | ||
468 | .ops = &ab8500_regulator_fixed_ops, | ||
469 | .type = REGULATOR_VOLTAGE, | ||
470 | .id = AB8500_LDO_USB, | ||
471 | .owner = THIS_MODULE, | ||
472 | .n_voltages = 1, | ||
473 | }, | ||
474 | .fixed_uV = 3300000, | ||
475 | .update_bank = 0x03, | ||
476 | .update_reg = 0x82, | ||
477 | .update_mask = 0x03, | ||
478 | .update_val_enable = 0x01, | ||
479 | }, | ||
435 | [AB8500_LDO_AUDIO] = { | 480 | [AB8500_LDO_AUDIO] = { |
436 | .desc = { | 481 | .desc = { |
437 | .name = "LDO-AUDIO", | 482 | .name = "LDO-AUDIO", |
@@ -511,6 +556,186 @@ static struct ab8500_regulator_info | |||
511 | 556 | ||
512 | }; | 557 | }; |
513 | 558 | ||
559 | struct ab8500_reg_init { | ||
560 | u8 bank; | ||
561 | u8 addr; | ||
562 | u8 mask; | ||
563 | }; | ||
564 | |||
565 | #define REG_INIT(_id, _bank, _addr, _mask) \ | ||
566 | [_id] = { \ | ||
567 | .bank = _bank, \ | ||
568 | .addr = _addr, \ | ||
569 | .mask = _mask, \ | ||
570 | } | ||
571 | |||
572 | static struct ab8500_reg_init ab8500_reg_init[] = { | ||
573 | /* | ||
574 | * 0x30, VanaRequestCtrl | ||
575 | * 0x0C, VpllRequestCtrl | ||
576 | * 0xc0, VextSupply1RequestCtrl | ||
577 | */ | ||
578 | REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc), | ||
579 | /* | ||
580 | * 0x03, VextSupply2RequestCtrl | ||
581 | * 0x0c, VextSupply3RequestCtrl | ||
582 | * 0x30, Vaux1RequestCtrl | ||
583 | * 0xc0, Vaux2RequestCtrl | ||
584 | */ | ||
585 | REG_INIT(AB8500_REGUREQUESTCTRL3, 0x03, 0x05, 0xff), | ||
586 | /* | ||
587 | * 0x03, Vaux3RequestCtrl | ||
588 | * 0x04, SwHPReq | ||
589 | */ | ||
590 | REG_INIT(AB8500_REGUREQUESTCTRL4, 0x03, 0x06, 0x07), | ||
591 | /* | ||
592 | * 0x08, VanaSysClkReq1HPValid | ||
593 | * 0x20, Vaux1SysClkReq1HPValid | ||
594 | * 0x40, Vaux2SysClkReq1HPValid | ||
595 | * 0x80, Vaux3SysClkReq1HPValid | ||
596 | */ | ||
597 | REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xe8), | ||
598 | /* | ||
599 | * 0x10, VextSupply1SysClkReq1HPValid | ||
600 | * 0x20, VextSupply2SysClkReq1HPValid | ||
601 | * 0x40, VextSupply3SysClkReq1HPValid | ||
602 | */ | ||
603 | REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x70), | ||
604 | /* | ||
605 | * 0x08, VanaHwHPReq1Valid | ||
606 | * 0x20, Vaux1HwHPReq1Valid | ||
607 | * 0x40, Vaux2HwHPReq1Valid | ||
608 | * 0x80, Vaux3HwHPReq1Valid | ||
609 | */ | ||
610 | REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xe8), | ||
611 | /* | ||
612 | * 0x01, VextSupply1HwHPReq1Valid | ||
613 | * 0x02, VextSupply2HwHPReq1Valid | ||
614 | * 0x04, VextSupply3HwHPReq1Valid | ||
615 | */ | ||
616 | REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07), | ||
617 | /* | ||
618 | * 0x08, VanaHwHPReq2Valid | ||
619 | * 0x20, Vaux1HwHPReq2Valid | ||
620 | * 0x40, Vaux2HwHPReq2Valid | ||
621 | * 0x80, Vaux3HwHPReq2Valid | ||
622 | */ | ||
623 | REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xe8), | ||
624 | /* | ||
625 | * 0x01, VextSupply1HwHPReq2Valid | ||
626 | * 0x02, VextSupply2HwHPReq2Valid | ||
627 | * 0x04, VextSupply3HwHPReq2Valid | ||
628 | */ | ||
629 | REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07), | ||
630 | /* | ||
631 | * 0x20, VanaSwHPReqValid | ||
632 | * 0x80, Vaux1SwHPReqValid | ||
633 | */ | ||
634 | REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xa0), | ||
635 | /* | ||
636 | * 0x01, Vaux2SwHPReqValid | ||
637 | * 0x02, Vaux3SwHPReqValid | ||
638 | * 0x04, VextSupply1SwHPReqValid | ||
639 | * 0x08, VextSupply2SwHPReqValid | ||
640 | * 0x10, VextSupply3SwHPReqValid | ||
641 | */ | ||
642 | REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f), | ||
643 | /* | ||
644 | * 0x02, SysClkReq2Valid1 | ||
645 | * ... | ||
646 | * 0x80, SysClkReq8Valid1 | ||
647 | */ | ||
648 | REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe), | ||
649 | /* | ||
650 | * 0x02, SysClkReq2Valid2 | ||
651 | * ... | ||
652 | * 0x80, SysClkReq8Valid2 | ||
653 | */ | ||
654 | REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe), | ||
655 | /* | ||
656 | * 0x02, VTVoutEna | ||
657 | * 0x04, Vintcore12Ena | ||
658 | * 0x38, Vintcore12Sel | ||
659 | * 0x40, Vintcore12LP | ||
660 | * 0x80, VTVoutLP | ||
661 | */ | ||
662 | REG_INIT(AB8500_REGUMISC1, 0x03, 0x80, 0xfe), | ||
663 | /* | ||
664 | * 0x02, VaudioEna | ||
665 | * 0x04, VdmicEna | ||
666 | * 0x08, Vamic1Ena | ||
667 | * 0x10, Vamic2Ena | ||
668 | */ | ||
669 | REG_INIT(AB8500_VAUDIOSUPPLY, 0x03, 0x83, 0x1e), | ||
670 | /* | ||
671 | * 0x01, Vamic1_dzout | ||
672 | * 0x02, Vamic2_dzout | ||
673 | */ | ||
674 | REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03), | ||
675 | /* | ||
676 | * 0x0c, VanaRegu | ||
677 | * 0x03, VpllRegu | ||
678 | */ | ||
679 | REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f), | ||
680 | /* | ||
681 | * 0x01, VrefDDREna | ||
682 | * 0x02, VrefDDRSleepMode | ||
683 | */ | ||
684 | REG_INIT(AB8500_VREFDDR, 0x04, 0x07, 0x03), | ||
685 | /* | ||
686 | * 0x03, VextSupply1Regu | ||
687 | * 0x0c, VextSupply2Regu | ||
688 | * 0x30, VextSupply3Regu | ||
689 | * 0x40, ExtSupply2Bypass | ||
690 | * 0x80, ExtSupply3Bypass | ||
691 | */ | ||
692 | REG_INIT(AB8500_EXTSUPPLYREGU, 0x04, 0x08, 0xff), | ||
693 | /* | ||
694 | * 0x03, Vaux1Regu | ||
695 | * 0x0c, Vaux2Regu | ||
696 | */ | ||
697 | REG_INIT(AB8500_VAUX12REGU, 0x04, 0x09, 0x0f), | ||
698 | /* | ||
699 | * 0x03, Vaux3Regu | ||
700 | */ | ||
701 | REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03), | ||
702 | /* | ||
703 | * 0x3f, Vsmps1Sel1 | ||
704 | */ | ||
705 | REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f), | ||
706 | /* | ||
707 | * 0x0f, Vaux1Sel | ||
708 | */ | ||
709 | REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f), | ||
710 | /* | ||
711 | * 0x0f, Vaux2Sel | ||
712 | */ | ||
713 | REG_INIT(AB8500_VAUX2SEL, 0x04, 0x20, 0x0f), | ||
714 | /* | ||
715 | * 0x07, Vaux3Sel | ||
716 | */ | ||
717 | REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x07), | ||
718 | /* | ||
719 | * 0x01, VextSupply12LP | ||
720 | */ | ||
721 | REG_INIT(AB8500_REGUCTRL2SPARE, 0x04, 0x22, 0x01), | ||
722 | /* | ||
723 | * 0x04, Vaux1Disch | ||
724 | * 0x08, Vaux2Disch | ||
725 | * 0x10, Vaux3Disch | ||
726 | * 0x20, Vintcore12Disch | ||
727 | * 0x40, VTVoutDisch | ||
728 | * 0x80, VaudioDisch | ||
729 | */ | ||
730 | REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xfc), | ||
731 | /* | ||
732 | * 0x02, VanaDisch | ||
733 | * 0x04, VdmicPullDownEna | ||
734 | * 0x10, VdmicDisch | ||
735 | */ | ||
736 | REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16), | ||
737 | }; | ||
738 | |||
514 | static __devinit int ab8500_regulator_probe(struct platform_device *pdev) | 739 | static __devinit int ab8500_regulator_probe(struct platform_device *pdev) |
515 | { | 740 | { |
516 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); | 741 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); |
@@ -529,10 +754,51 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) | |||
529 | 754 | ||
530 | /* make sure the platform data has the correct size */ | 755 | /* make sure the platform data has the correct size */ |
531 | if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) { | 756 | if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) { |
532 | dev_err(&pdev->dev, "platform configuration error\n"); | 757 | dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); |
533 | return -EINVAL; | 758 | return -EINVAL; |
534 | } | 759 | } |
535 | 760 | ||
761 | /* initialize registers */ | ||
762 | for (i = 0; i < pdata->num_regulator_reg_init; i++) { | ||
763 | int id; | ||
764 | u8 value; | ||
765 | |||
766 | id = pdata->regulator_reg_init[i].id; | ||
767 | value = pdata->regulator_reg_init[i].value; | ||
768 | |||
769 | /* check for configuration errors */ | ||
770 | if (id >= AB8500_NUM_REGULATOR_REGISTERS) { | ||
771 | dev_err(&pdev->dev, | ||
772 | "Configuration error: id outside range.\n"); | ||
773 | return -EINVAL; | ||
774 | } | ||
775 | if (value & ~ab8500_reg_init[id].mask) { | ||
776 | dev_err(&pdev->dev, | ||
777 | "Configuration error: value outside mask.\n"); | ||
778 | return -EINVAL; | ||
779 | } | ||
780 | |||
781 | /* initialize register */ | ||
782 | err = abx500_mask_and_set_register_interruptible(&pdev->dev, | ||
783 | ab8500_reg_init[id].bank, | ||
784 | ab8500_reg_init[id].addr, | ||
785 | ab8500_reg_init[id].mask, | ||
786 | value); | ||
787 | if (err < 0) { | ||
788 | dev_err(&pdev->dev, | ||
789 | "Failed to initialize 0x%02x, 0x%02x.\n", | ||
790 | ab8500_reg_init[id].bank, | ||
791 | ab8500_reg_init[id].addr); | ||
792 | return err; | ||
793 | } | ||
794 | dev_vdbg(&pdev->dev, | ||
795 | " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", | ||
796 | ab8500_reg_init[id].bank, | ||
797 | ab8500_reg_init[id].addr, | ||
798 | ab8500_reg_init[id].mask, | ||
799 | value); | ||
800 | } | ||
801 | |||
536 | /* register all regulators */ | 802 | /* register all regulators */ |
537 | for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { | 803 | for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { |
538 | struct ab8500_regulator_info *info = NULL; | 804 | struct ab8500_regulator_info *info = NULL; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 9fa20957847..3ffc6979d16 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1629,6 +1629,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1629 | int min_uV, int max_uV) | 1629 | int min_uV, int max_uV) |
1630 | { | 1630 | { |
1631 | int ret; | 1631 | int ret; |
1632 | int delay = 0; | ||
1632 | unsigned int selector; | 1633 | unsigned int selector; |
1633 | 1634 | ||
1634 | trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); | 1635 | trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); |
@@ -1662,6 +1663,22 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1662 | } | 1663 | } |
1663 | } | 1664 | } |
1664 | 1665 | ||
1666 | /* | ||
1667 | * If we can't obtain the old selector there is not enough | ||
1668 | * info to call set_voltage_time_sel(). | ||
1669 | */ | ||
1670 | if (rdev->desc->ops->set_voltage_time_sel && | ||
1671 | rdev->desc->ops->get_voltage_sel) { | ||
1672 | unsigned int old_selector = 0; | ||
1673 | |||
1674 | ret = rdev->desc->ops->get_voltage_sel(rdev); | ||
1675 | if (ret < 0) | ||
1676 | return ret; | ||
1677 | old_selector = ret; | ||
1678 | delay = rdev->desc->ops->set_voltage_time_sel(rdev, | ||
1679 | old_selector, selector); | ||
1680 | } | ||
1681 | |||
1665 | if (best_val != INT_MAX) { | 1682 | if (best_val != INT_MAX) { |
1666 | ret = rdev->desc->ops->set_voltage_sel(rdev, selector); | 1683 | ret = rdev->desc->ops->set_voltage_sel(rdev, selector); |
1667 | selector = best_val; | 1684 | selector = best_val; |
@@ -1672,6 +1689,14 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1672 | ret = -EINVAL; | 1689 | ret = -EINVAL; |
1673 | } | 1690 | } |
1674 | 1691 | ||
1692 | /* Insert any necessary delays */ | ||
1693 | if (delay >= 1000) { | ||
1694 | mdelay(delay / 1000); | ||
1695 | udelay(delay % 1000); | ||
1696 | } else if (delay) { | ||
1697 | udelay(delay); | ||
1698 | } | ||
1699 | |||
1675 | if (ret == 0) | 1700 | if (ret == 0) |
1676 | _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, | 1701 | _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, |
1677 | NULL); | 1702 | NULL); |
@@ -1740,6 +1765,51 @@ out: | |||
1740 | EXPORT_SYMBOL_GPL(regulator_set_voltage); | 1765 | EXPORT_SYMBOL_GPL(regulator_set_voltage); |
1741 | 1766 | ||
1742 | /** | 1767 | /** |
1768 | * regulator_set_voltage_time - get raise/fall time | ||
1769 | * @regulator: regulator source | ||
1770 | * @old_uV: starting voltage in microvolts | ||
1771 | * @new_uV: target voltage in microvolts | ||
1772 | * | ||
1773 | * Provided with the starting and ending voltage, this function attempts to | ||
1774 | * calculate the time in microseconds required to rise or fall to this new | ||
1775 | * voltage. | ||
1776 | */ | ||
1777 | int regulator_set_voltage_time(struct regulator *regulator, | ||
1778 | int old_uV, int new_uV) | ||
1779 | { | ||
1780 | struct regulator_dev *rdev = regulator->rdev; | ||
1781 | struct regulator_ops *ops = rdev->desc->ops; | ||
1782 | int old_sel = -1; | ||
1783 | int new_sel = -1; | ||
1784 | int voltage; | ||
1785 | int i; | ||
1786 | |||
1787 | /* Currently requires operations to do this */ | ||
1788 | if (!ops->list_voltage || !ops->set_voltage_time_sel | ||
1789 | || !rdev->desc->n_voltages) | ||
1790 | return -EINVAL; | ||
1791 | |||
1792 | for (i = 0; i < rdev->desc->n_voltages; i++) { | ||
1793 | /* We only look for exact voltage matches here */ | ||
1794 | voltage = regulator_list_voltage(regulator, i); | ||
1795 | if (voltage < 0) | ||
1796 | return -EINVAL; | ||
1797 | if (voltage == 0) | ||
1798 | continue; | ||
1799 | if (voltage == old_uV) | ||
1800 | old_sel = i; | ||
1801 | if (voltage == new_uV) | ||
1802 | new_sel = i; | ||
1803 | } | ||
1804 | |||
1805 | if (old_sel < 0 || new_sel < 0) | ||
1806 | return -EINVAL; | ||
1807 | |||
1808 | return ops->set_voltage_time_sel(rdev, old_sel, new_sel); | ||
1809 | } | ||
1810 | EXPORT_SYMBOL_GPL(regulator_set_voltage_time); | ||
1811 | |||
1812 | /** | ||
1743 | * regulator_sync_voltage - re-apply last regulator output voltage | 1813 | * regulator_sync_voltage - re-apply last regulator output voltage |
1744 | * @regulator: regulator source | 1814 | * @regulator: regulator source |
1745 | * | 1815 | * |
@@ -2565,8 +2635,11 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
2565 | init_data->consumer_supplies[i].dev, | 2635 | init_data->consumer_supplies[i].dev, |
2566 | init_data->consumer_supplies[i].dev_name, | 2636 | init_data->consumer_supplies[i].dev_name, |
2567 | init_data->consumer_supplies[i].supply); | 2637 | init_data->consumer_supplies[i].supply); |
2568 | if (ret < 0) | 2638 | if (ret < 0) { |
2639 | dev_err(dev, "Failed to set supply %s\n", | ||
2640 | init_data->consumer_supplies[i].supply); | ||
2569 | goto unset_supplies; | 2641 | goto unset_supplies; |
2642 | } | ||
2570 | } | 2643 | } |
2571 | 2644 | ||
2572 | list_add(&rdev->list, ®ulator_list); | 2645 | list_add(&rdev->list, ®ulator_list); |
@@ -2653,6 +2726,47 @@ out: | |||
2653 | EXPORT_SYMBOL_GPL(regulator_suspend_prepare); | 2726 | EXPORT_SYMBOL_GPL(regulator_suspend_prepare); |
2654 | 2727 | ||
2655 | /** | 2728 | /** |
2729 | * regulator_suspend_finish - resume regulators from system wide suspend | ||
2730 | * | ||
2731 | * Turn on regulators that might be turned off by regulator_suspend_prepare | ||
2732 | * and that should be turned on according to the regulators properties. | ||
2733 | */ | ||
2734 | int regulator_suspend_finish(void) | ||
2735 | { | ||
2736 | struct regulator_dev *rdev; | ||
2737 | int ret = 0, error; | ||
2738 | |||
2739 | mutex_lock(®ulator_list_mutex); | ||
2740 | list_for_each_entry(rdev, ®ulator_list, list) { | ||
2741 | struct regulator_ops *ops = rdev->desc->ops; | ||
2742 | |||
2743 | mutex_lock(&rdev->mutex); | ||
2744 | if ((rdev->use_count > 0 || rdev->constraints->always_on) && | ||
2745 | ops->enable) { | ||
2746 | error = ops->enable(rdev); | ||
2747 | if (error) | ||
2748 | ret = error; | ||
2749 | } else { | ||
2750 | if (!has_full_constraints) | ||
2751 | goto unlock; | ||
2752 | if (!ops->disable) | ||
2753 | goto unlock; | ||
2754 | if (ops->is_enabled && !ops->is_enabled(rdev)) | ||
2755 | goto unlock; | ||
2756 | |||
2757 | error = ops->disable(rdev); | ||
2758 | if (error) | ||
2759 | ret = error; | ||
2760 | } | ||
2761 | unlock: | ||
2762 | mutex_unlock(&rdev->mutex); | ||
2763 | } | ||
2764 | mutex_unlock(®ulator_list_mutex); | ||
2765 | return ret; | ||
2766 | } | ||
2767 | EXPORT_SYMBOL_GPL(regulator_suspend_finish); | ||
2768 | |||
2769 | /** | ||
2656 | * regulator_has_full_constraints - the system has fully specified constraints | 2770 | * regulator_has_full_constraints - the system has fully specified constraints |
2657 | * | 2771 | * |
2658 | * Calling this function will cause the regulator API to disable all | 2772 | * Calling this function will cause the regulator API to disable all |
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 01ef7e9903b..77e0cfb30b2 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c | |||
@@ -1185,6 +1185,7 @@ static const struct platform_device_id max8997_pmic_id[] = { | |||
1185 | { "max8997-pmic", 0}, | 1185 | { "max8997-pmic", 0}, |
1186 | { }, | 1186 | { }, |
1187 | }; | 1187 | }; |
1188 | MODULE_DEVICE_TABLE(platform, max8997_pmic_id); | ||
1188 | 1189 | ||
1189 | static struct platform_driver max8997_pmic_driver = { | 1190 | static struct platform_driver max8997_pmic_driver = { |
1190 | .driver = { | 1191 | .driver = { |
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 0ec49ca527a..43410266f99 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c | |||
@@ -887,6 +887,7 @@ static const struct platform_device_id max8998_pmic_id[] = { | |||
887 | { "lp3974-pmic", TYPE_LP3974 }, | 887 | { "lp3974-pmic", TYPE_LP3974 }, |
888 | { } | 888 | { } |
889 | }; | 889 | }; |
890 | MODULE_DEVICE_TABLE(platform, max8998_pmic_id); | ||
890 | 891 | ||
891 | static struct platform_driver max8998_pmic_driver = { | 892 | static struct platform_driver max8998_pmic_driver = { |
892 | .driver = { | 893 | .driver = { |
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c index 176a6be5a8c..9166aa0a9df 100644 --- a/drivers/regulator/tps6524x-regulator.c +++ b/drivers/regulator/tps6524x-regulator.c | |||
@@ -596,7 +596,7 @@ static struct regulator_ops regulator_ops = { | |||
596 | .get_current_limit = get_current_limit, | 596 | .get_current_limit = get_current_limit, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | static int __devexit pmic_remove(struct spi_device *spi) | 599 | static int pmic_remove(struct spi_device *spi) |
600 | { | 600 | { |
601 | struct tps6524x *hw = spi_get_drvdata(spi); | 601 | struct tps6524x *hw = spi_get_drvdata(spi); |
602 | int i; | 602 | int i; |
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 06df898842c..e93453b1b97 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
@@ -565,9 +565,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
565 | } | 565 | } |
566 | 566 | ||
567 | irq = platform_get_irq_byname(pdev, "UV"); | 567 | irq = platform_get_irq_byname(pdev, "UV"); |
568 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, | 568 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, |
569 | IRQF_TRIGGER_RISING, dcdc->name, | 569 | IRQF_TRIGGER_RISING, dcdc->name, dcdc); |
570 | dcdc); | ||
571 | if (ret != 0) { | 570 | if (ret != 0) { |
572 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 571 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
573 | irq, ret); | 572 | irq, ret); |
@@ -575,9 +574,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
575 | } | 574 | } |
576 | 575 | ||
577 | irq = platform_get_irq_byname(pdev, "HC"); | 576 | irq = platform_get_irq_byname(pdev, "HC"); |
578 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq, | 577 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq, |
579 | IRQF_TRIGGER_RISING, dcdc->name, | 578 | IRQF_TRIGGER_RISING, dcdc->name, dcdc); |
580 | dcdc); | ||
581 | if (ret != 0) { | 579 | if (ret != 0) { |
582 | dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", | 580 | dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", |
583 | irq, ret); | 581 | irq, ret); |
@@ -589,7 +587,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
589 | return 0; | 587 | return 0; |
590 | 588 | ||
591 | err_uv: | 589 | err_uv: |
592 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 590 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
593 | err_regulator: | 591 | err_regulator: |
594 | regulator_unregister(dcdc->regulator); | 592 | regulator_unregister(dcdc->regulator); |
595 | err: | 593 | err: |
@@ -606,8 +604,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev) | |||
606 | 604 | ||
607 | platform_set_drvdata(pdev, NULL); | 605 | platform_set_drvdata(pdev, NULL); |
608 | 606 | ||
609 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); | 607 | free_irq(platform_get_irq_byname(pdev, "HC"), dcdc); |
610 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 608 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
611 | regulator_unregister(dcdc->regulator); | 609 | regulator_unregister(dcdc->regulator); |
612 | if (dcdc->dvs_gpio) | 610 | if (dcdc->dvs_gpio) |
613 | gpio_free(dcdc->dvs_gpio); | 611 | gpio_free(dcdc->dvs_gpio); |
@@ -756,9 +754,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev) | |||
756 | } | 754 | } |
757 | 755 | ||
758 | irq = platform_get_irq_byname(pdev, "UV"); | 756 | irq = platform_get_irq_byname(pdev, "UV"); |
759 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, | 757 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, |
760 | IRQF_TRIGGER_RISING, dcdc->name, | 758 | IRQF_TRIGGER_RISING, dcdc->name, dcdc); |
761 | dcdc); | ||
762 | if (ret != 0) { | 759 | if (ret != 0) { |
763 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 760 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
764 | irq, ret); | 761 | irq, ret); |
@@ -783,7 +780,7 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev) | |||
783 | 780 | ||
784 | platform_set_drvdata(pdev, NULL); | 781 | platform_set_drvdata(pdev, NULL); |
785 | 782 | ||
786 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 783 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
787 | regulator_unregister(dcdc->regulator); | 784 | regulator_unregister(dcdc->regulator); |
788 | kfree(dcdc); | 785 | kfree(dcdc); |
789 | 786 | ||
@@ -885,9 +882,9 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev) | |||
885 | } | 882 | } |
886 | 883 | ||
887 | irq = platform_get_irq_byname(pdev, "UV"); | 884 | irq = platform_get_irq_byname(pdev, "UV"); |
888 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, | 885 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, |
889 | IRQF_TRIGGER_RISING, dcdc->name, | 886 | IRQF_TRIGGER_RISING, dcdc->name, |
890 | dcdc); | 887 | dcdc); |
891 | if (ret != 0) { | 888 | if (ret != 0) { |
892 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 889 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
893 | irq, ret); | 890 | irq, ret); |
@@ -908,11 +905,10 @@ err: | |||
908 | static __devexit int wm831x_boostp_remove(struct platform_device *pdev) | 905 | static __devexit int wm831x_boostp_remove(struct platform_device *pdev) |
909 | { | 906 | { |
910 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); | 907 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); |
911 | struct wm831x *wm831x = dcdc->wm831x; | ||
912 | 908 | ||
913 | platform_set_drvdata(pdev, NULL); | 909 | platform_set_drvdata(pdev, NULL); |
914 | 910 | ||
915 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 911 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
916 | regulator_unregister(dcdc->regulator); | 912 | regulator_unregister(dcdc->regulator); |
917 | kfree(dcdc); | 913 | kfree(dcdc); |
918 | 914 | ||
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c index 6c446cd6ad5..01f27c7f423 100644 --- a/drivers/regulator/wm831x-isink.c +++ b/drivers/regulator/wm831x-isink.c | |||
@@ -198,9 +198,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev) | |||
198 | } | 198 | } |
199 | 199 | ||
200 | irq = platform_get_irq(pdev, 0); | 200 | irq = platform_get_irq(pdev, 0); |
201 | ret = wm831x_request_irq(wm831x, irq, wm831x_isink_irq, | 201 | ret = request_threaded_irq(irq, NULL, wm831x_isink_irq, |
202 | IRQF_TRIGGER_RISING, isink->name, | 202 | IRQF_TRIGGER_RISING, isink->name, isink); |
203 | isink); | ||
204 | if (ret != 0) { | 203 | if (ret != 0) { |
205 | dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n", | 204 | dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n", |
206 | irq, ret); | 205 | irq, ret); |
@@ -221,11 +220,10 @@ err: | |||
221 | static __devexit int wm831x_isink_remove(struct platform_device *pdev) | 220 | static __devexit int wm831x_isink_remove(struct platform_device *pdev) |
222 | { | 221 | { |
223 | struct wm831x_isink *isink = platform_get_drvdata(pdev); | 222 | struct wm831x_isink *isink = platform_get_drvdata(pdev); |
224 | struct wm831x *wm831x = isink->wm831x; | ||
225 | 223 | ||
226 | platform_set_drvdata(pdev, NULL); | 224 | platform_set_drvdata(pdev, NULL); |
227 | 225 | ||
228 | wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink); | 226 | free_irq(platform_get_irq(pdev, 0), isink); |
229 | 227 | ||
230 | regulator_unregister(isink->regulator); | 228 | regulator_unregister(isink->regulator); |
231 | kfree(isink); | 229 | kfree(isink); |
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index c94fc5b7cd5..2220cf8defb 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c | |||
@@ -354,9 +354,9 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev) | |||
354 | } | 354 | } |
355 | 355 | ||
356 | irq = platform_get_irq_byname(pdev, "UV"); | 356 | irq = platform_get_irq_byname(pdev, "UV"); |
357 | ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq, | 357 | ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, |
358 | IRQF_TRIGGER_RISING, ldo->name, | 358 | IRQF_TRIGGER_RISING, ldo->name, |
359 | ldo); | 359 | ldo); |
360 | if (ret != 0) { | 360 | if (ret != 0) { |
361 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 361 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
362 | irq, ret); | 362 | irq, ret); |
@@ -377,11 +377,10 @@ err: | |||
377 | static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev) | 377 | static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev) |
378 | { | 378 | { |
379 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); | 379 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); |
380 | struct wm831x *wm831x = ldo->wm831x; | ||
381 | 380 | ||
382 | platform_set_drvdata(pdev, NULL); | 381 | platform_set_drvdata(pdev, NULL); |
383 | 382 | ||
384 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); | 383 | free_irq(platform_get_irq_byname(pdev, "UV"), ldo); |
385 | regulator_unregister(ldo->regulator); | 384 | regulator_unregister(ldo->regulator); |
386 | kfree(ldo); | 385 | kfree(ldo); |
387 | 386 | ||
@@ -619,9 +618,8 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev) | |||
619 | } | 618 | } |
620 | 619 | ||
621 | irq = platform_get_irq_byname(pdev, "UV"); | 620 | irq = platform_get_irq_byname(pdev, "UV"); |
622 | ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq, | 621 | ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, |
623 | IRQF_TRIGGER_RISING, ldo->name, | 622 | IRQF_TRIGGER_RISING, ldo->name, ldo); |
624 | ldo); | ||
625 | if (ret != 0) { | 623 | if (ret != 0) { |
626 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 624 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
627 | irq, ret); | 625 | irq, ret); |
@@ -642,9 +640,8 @@ err: | |||
642 | static __devexit int wm831x_aldo_remove(struct platform_device *pdev) | 640 | static __devexit int wm831x_aldo_remove(struct platform_device *pdev) |
643 | { | 641 | { |
644 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); | 642 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); |
645 | struct wm831x *wm831x = ldo->wm831x; | ||
646 | 643 | ||
647 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); | 644 | free_irq(platform_get_irq_byname(pdev, "UV"), ldo); |
648 | regulator_unregister(ldo->regulator); | 645 | regulator_unregister(ldo->regulator); |
649 | kfree(ldo); | 646 | kfree(ldo); |
650 | 647 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 379d8592bc6..459f2cbe80f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -3982,8 +3982,10 @@ out_err: | |||
3982 | } | 3982 | } |
3983 | 3983 | ||
3984 | static struct ccw_driver dasd_eckd_driver = { | 3984 | static struct ccw_driver dasd_eckd_driver = { |
3985 | .name = "dasd-eckd", | 3985 | .driver = { |
3986 | .owner = THIS_MODULE, | 3986 | .name = "dasd-eckd", |
3987 | .owner = THIS_MODULE, | ||
3988 | }, | ||
3987 | .ids = dasd_eckd_ids, | 3989 | .ids = dasd_eckd_ids, |
3988 | .probe = dasd_eckd_probe, | 3990 | .probe = dasd_eckd_probe, |
3989 | .remove = dasd_generic_remove, | 3991 | .remove = dasd_generic_remove, |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index be89b3a893d..4b71b116486 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -65,8 +65,10 @@ dasd_fba_set_online(struct ccw_device *cdev) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | static struct ccw_driver dasd_fba_driver = { | 67 | static struct ccw_driver dasd_fba_driver = { |
68 | .name = "dasd-fba", | 68 | .driver = { |
69 | .owner = THIS_MODULE, | 69 | .name = "dasd-fba", |
70 | .owner = THIS_MODULE, | ||
71 | }, | ||
70 | .ids = dasd_fba_ids, | 72 | .ids = dasd_fba_ids, |
71 | .probe = dasd_fba_probe, | 73 | .probe = dasd_fba_probe, |
72 | .remove = dasd_generic_remove, | 74 | .remove = dasd_generic_remove, |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 3fb4335d491..694464c65fc 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -764,8 +764,10 @@ static struct ccw_device_id raw3215_id[] = { | |||
764 | }; | 764 | }; |
765 | 765 | ||
766 | static struct ccw_driver raw3215_ccw_driver = { | 766 | static struct ccw_driver raw3215_ccw_driver = { |
767 | .name = "3215", | 767 | .driver = { |
768 | .owner = THIS_MODULE, | 768 | .name = "3215", |
769 | .owner = THIS_MODULE, | ||
770 | }, | ||
769 | .ids = raw3215_id, | 771 | .ids = raw3215_id, |
770 | .probe = &raw3215_probe, | 772 | .probe = &raw3215_probe, |
771 | .remove = &raw3215_remove, | 773 | .remove = &raw3215_remove, |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 96ba2fd1c8a..4c023761946 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -1388,8 +1388,10 @@ static struct ccw_device_id raw3270_id[] = { | |||
1388 | }; | 1388 | }; |
1389 | 1389 | ||
1390 | static struct ccw_driver raw3270_ccw_driver = { | 1390 | static struct ccw_driver raw3270_ccw_driver = { |
1391 | .name = "3270", | 1391 | .driver = { |
1392 | .owner = THIS_MODULE, | 1392 | .name = "3270", |
1393 | .owner = THIS_MODULE, | ||
1394 | }, | ||
1393 | .ids = raw3270_id, | 1395 | .ids = raw3270_id, |
1394 | .probe = &raw3270_probe, | 1396 | .probe = &raw3270_probe, |
1395 | .remove = &raw3270_remove, | 1397 | .remove = &raw3270_remove, |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index c26511171ff..9eff2df70dd 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -1320,8 +1320,10 @@ tape_34xx_online(struct ccw_device *cdev) | |||
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | static struct ccw_driver tape_34xx_driver = { | 1322 | static struct ccw_driver tape_34xx_driver = { |
1323 | .name = "tape_34xx", | 1323 | .driver = { |
1324 | .owner = THIS_MODULE, | 1324 | .name = "tape_34xx", |
1325 | .owner = THIS_MODULE, | ||
1326 | }, | ||
1325 | .ids = tape_34xx_ids, | 1327 | .ids = tape_34xx_ids, |
1326 | .probe = tape_generic_probe, | 1328 | .probe = tape_generic_probe, |
1327 | .remove = tape_generic_remove, | 1329 | .remove = tape_generic_remove, |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index de2e99e0a71..b98dcbd1671 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -1761,8 +1761,10 @@ tape_3590_online(struct ccw_device *cdev) | |||
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | static struct ccw_driver tape_3590_driver = { | 1763 | static struct ccw_driver tape_3590_driver = { |
1764 | .name = "tape_3590", | 1764 | .driver = { |
1765 | .owner = THIS_MODULE, | 1765 | .name = "tape_3590", |
1766 | .owner = THIS_MODULE, | ||
1767 | }, | ||
1766 | .ids = tape_3590_ids, | 1768 | .ids = tape_3590_ids, |
1767 | .probe = tape_generic_probe, | 1769 | .probe = tape_generic_probe, |
1768 | .remove = tape_generic_remove, | 1770 | .remove = tape_generic_remove, |
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index caef1757341..f6b00c3df42 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -64,8 +64,10 @@ static int ur_set_offline(struct ccw_device *cdev); | |||
64 | static int ur_pm_suspend(struct ccw_device *cdev); | 64 | static int ur_pm_suspend(struct ccw_device *cdev); |
65 | 65 | ||
66 | static struct ccw_driver ur_driver = { | 66 | static struct ccw_driver ur_driver = { |
67 | .name = "vmur", | 67 | .driver = { |
68 | .owner = THIS_MODULE, | 68 | .name = "vmur", |
69 | .owner = THIS_MODULE, | ||
70 | }, | ||
69 | .ids = ur_ids, | 71 | .ids = ur_ids, |
70 | .probe = ur_probe, | 72 | .probe = ur_probe, |
71 | .remove = ur_remove, | 73 | .remove = ur_remove, |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 2864581d8ec..5c567414c4b 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -428,7 +428,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const | |||
428 | gdev = to_ccwgroupdev(dev); | 428 | gdev = to_ccwgroupdev(dev); |
429 | gdrv = to_ccwgroupdrv(dev->driver); | 429 | gdrv = to_ccwgroupdrv(dev->driver); |
430 | 430 | ||
431 | if (!try_module_get(gdrv->owner)) | 431 | if (!try_module_get(gdrv->driver.owner)) |
432 | return -EINVAL; | 432 | return -EINVAL; |
433 | 433 | ||
434 | ret = strict_strtoul(buf, 0, &value); | 434 | ret = strict_strtoul(buf, 0, &value); |
@@ -442,7 +442,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const | |||
442 | else | 442 | else |
443 | ret = -EINVAL; | 443 | ret = -EINVAL; |
444 | out: | 444 | out: |
445 | module_put(gdrv->owner); | 445 | module_put(gdrv->driver.owner); |
446 | return (ret == 0) ? count : ret; | 446 | return (ret == 0) ? count : ret; |
447 | } | 447 | } |
448 | 448 | ||
@@ -616,8 +616,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) | |||
616 | { | 616 | { |
617 | /* register our new driver with the core */ | 617 | /* register our new driver with the core */ |
618 | cdriver->driver.bus = &ccwgroup_bus_type; | 618 | cdriver->driver.bus = &ccwgroup_bus_type; |
619 | cdriver->driver.name = cdriver->name; | ||
620 | cdriver->driver.owner = cdriver->owner; | ||
621 | 619 | ||
622 | return driver_register(&cdriver->driver); | 620 | return driver_register(&cdriver->driver); |
623 | } | 621 | } |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e50b12163af..df14c51f653 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -127,7 +127,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
127 | return ret; | 127 | return ret; |
128 | } | 128 | } |
129 | 129 | ||
130 | struct bus_type ccw_bus_type; | 130 | static struct bus_type ccw_bus_type; |
131 | 131 | ||
132 | static void io_subchannel_irq(struct subchannel *); | 132 | static void io_subchannel_irq(struct subchannel *); |
133 | static int io_subchannel_probe(struct subchannel *); | 133 | static int io_subchannel_probe(struct subchannel *); |
@@ -547,7 +547,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
547 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) | 547 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
548 | return -EAGAIN; | 548 | return -EAGAIN; |
549 | 549 | ||
550 | if (cdev->drv && !try_module_get(cdev->drv->owner)) { | 550 | if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { |
551 | atomic_set(&cdev->private->onoff, 0); | 551 | atomic_set(&cdev->private->onoff, 0); |
552 | return -EINVAL; | 552 | return -EINVAL; |
553 | } | 553 | } |
@@ -573,7 +573,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
573 | } | 573 | } |
574 | out: | 574 | out: |
575 | if (cdev->drv) | 575 | if (cdev->drv) |
576 | module_put(cdev->drv->owner); | 576 | module_put(cdev->drv->driver.owner); |
577 | atomic_set(&cdev->private->onoff, 0); | 577 | atomic_set(&cdev->private->onoff, 0); |
578 | return (ret < 0) ? ret : count; | 578 | return (ret < 0) ? ret : count; |
579 | } | 579 | } |
@@ -1970,7 +1970,7 @@ static const struct dev_pm_ops ccw_pm_ops = { | |||
1970 | .restore = ccw_device_pm_restore, | 1970 | .restore = ccw_device_pm_restore, |
1971 | }; | 1971 | }; |
1972 | 1972 | ||
1973 | struct bus_type ccw_bus_type = { | 1973 | static struct bus_type ccw_bus_type = { |
1974 | .name = "ccw", | 1974 | .name = "ccw", |
1975 | .match = ccw_bus_match, | 1975 | .match = ccw_bus_match, |
1976 | .uevent = ccw_uevent, | 1976 | .uevent = ccw_uevent, |
@@ -1993,8 +1993,6 @@ int ccw_driver_register(struct ccw_driver *cdriver) | |||
1993 | struct device_driver *drv = &cdriver->driver; | 1993 | struct device_driver *drv = &cdriver->driver; |
1994 | 1994 | ||
1995 | drv->bus = &ccw_bus_type; | 1995 | drv->bus = &ccw_bus_type; |
1996 | drv->name = cdriver->name; | ||
1997 | drv->owner = cdriver->owner; | ||
1998 | 1996 | ||
1999 | return driver_register(drv); | 1997 | return driver_register(drv); |
2000 | } | 1998 | } |
@@ -2112,5 +2110,4 @@ EXPORT_SYMBOL(ccw_device_set_offline); | |||
2112 | EXPORT_SYMBOL(ccw_driver_register); | 2110 | EXPORT_SYMBOL(ccw_driver_register); |
2113 | EXPORT_SYMBOL(ccw_driver_unregister); | 2111 | EXPORT_SYMBOL(ccw_driver_unregister); |
2114 | EXPORT_SYMBOL(get_ccwdev_by_busid); | 2112 | EXPORT_SYMBOL(get_ccwdev_by_busid); |
2115 | EXPORT_SYMBOL(ccw_bus_type); | ||
2116 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); | 2113 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 379de2d1ec4..7e297c7bb5f 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -133,7 +133,6 @@ void ccw_device_set_notoper(struct ccw_device *cdev); | |||
133 | /* qdio needs this. */ | 133 | /* qdio needs this. */ |
134 | void ccw_device_set_timeout(struct ccw_device *, int); | 134 | void ccw_device_set_timeout(struct ccw_device *, int); |
135 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); | 135 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); |
136 | extern struct bus_type ccw_bus_type; | ||
137 | 136 | ||
138 | /* Channel measurement facility related */ | 137 | /* Channel measurement facility related */ |
139 | void retry_set_schib(struct ccw_device *cdev); | 138 | void retry_set_schib(struct ccw_device *cdev); |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 5640c89cd9d..479c665e9e7 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -1508,7 +1508,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1508 | 1508 | ||
1509 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) | 1509 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) |
1510 | return -EBUSY; | 1510 | return -EBUSY; |
1511 | 1511 | if (!count) | |
1512 | return 0; | ||
1512 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1513 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1513 | return handle_inbound(irq_ptr->input_qs[q_nr], | 1514 | return handle_inbound(irq_ptr->input_qs[q_nr], |
1514 | callflags, bufnr, count); | 1515 | callflags, bufnr, count); |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index ce3a5c13ce0..9feb62febb3 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -264,8 +264,10 @@ static struct device *claw_root_dev; | |||
264 | /* ccwgroup table */ | 264 | /* ccwgroup table */ |
265 | 265 | ||
266 | static struct ccwgroup_driver claw_group_driver = { | 266 | static struct ccwgroup_driver claw_group_driver = { |
267 | .owner = THIS_MODULE, | 267 | .driver = { |
268 | .name = "claw", | 268 | .owner = THIS_MODULE, |
269 | .name = "claw", | ||
270 | }, | ||
269 | .max_slaves = 2, | 271 | .max_slaves = 2, |
270 | .driver_id = 0xC3D3C1E6, | 272 | .driver_id = 0xC3D3C1E6, |
271 | .probe = claw_probe, | 273 | .probe = claw_probe, |
@@ -282,8 +284,10 @@ static struct ccw_device_id claw_ids[] = { | |||
282 | MODULE_DEVICE_TABLE(ccw, claw_ids); | 284 | MODULE_DEVICE_TABLE(ccw, claw_ids); |
283 | 285 | ||
284 | static struct ccw_driver claw_ccw_driver = { | 286 | static struct ccw_driver claw_ccw_driver = { |
285 | .owner = THIS_MODULE, | 287 | .driver = { |
286 | .name = "claw", | 288 | .owner = THIS_MODULE, |
289 | .name = "claw", | ||
290 | }, | ||
287 | .ids = claw_ids, | 291 | .ids = claw_ids, |
288 | .probe = ccwgroup_probe_ccwdev, | 292 | .probe = ccwgroup_probe_ccwdev, |
289 | .remove = ccwgroup_remove_ccwdev, | 293 | .remove = ccwgroup_remove_ccwdev, |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 4c284598592..c189296763a 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -1764,16 +1764,20 @@ static struct ccw_device_id ctcm_ids[] = { | |||
1764 | MODULE_DEVICE_TABLE(ccw, ctcm_ids); | 1764 | MODULE_DEVICE_TABLE(ccw, ctcm_ids); |
1765 | 1765 | ||
1766 | static struct ccw_driver ctcm_ccw_driver = { | 1766 | static struct ccw_driver ctcm_ccw_driver = { |
1767 | .owner = THIS_MODULE, | 1767 | .driver = { |
1768 | .name = "ctcm", | 1768 | .owner = THIS_MODULE, |
1769 | .name = "ctcm", | ||
1770 | }, | ||
1769 | .ids = ctcm_ids, | 1771 | .ids = ctcm_ids, |
1770 | .probe = ccwgroup_probe_ccwdev, | 1772 | .probe = ccwgroup_probe_ccwdev, |
1771 | .remove = ccwgroup_remove_ccwdev, | 1773 | .remove = ccwgroup_remove_ccwdev, |
1772 | }; | 1774 | }; |
1773 | 1775 | ||
1774 | static struct ccwgroup_driver ctcm_group_driver = { | 1776 | static struct ccwgroup_driver ctcm_group_driver = { |
1775 | .owner = THIS_MODULE, | 1777 | .driver = { |
1776 | .name = CTC_DRIVER_NAME, | 1778 | .owner = THIS_MODULE, |
1779 | .name = CTC_DRIVER_NAME, | ||
1780 | }, | ||
1777 | .max_slaves = 2, | 1781 | .max_slaves = 2, |
1778 | .driver_id = 0xC3E3C3D4, /* CTCM */ | 1782 | .driver_id = 0xC3E3C3D4, /* CTCM */ |
1779 | .probe = ctcm_probe_device, | 1783 | .probe = ctcm_probe_device, |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 30b2a820e67..7fbc4adbb6d 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -2396,8 +2396,10 @@ static struct ccw_device_id lcs_ids[] = { | |||
2396 | MODULE_DEVICE_TABLE(ccw, lcs_ids); | 2396 | MODULE_DEVICE_TABLE(ccw, lcs_ids); |
2397 | 2397 | ||
2398 | static struct ccw_driver lcs_ccw_driver = { | 2398 | static struct ccw_driver lcs_ccw_driver = { |
2399 | .owner = THIS_MODULE, | 2399 | .driver = { |
2400 | .name = "lcs", | 2400 | .owner = THIS_MODULE, |
2401 | .name = "lcs", | ||
2402 | }, | ||
2401 | .ids = lcs_ids, | 2403 | .ids = lcs_ids, |
2402 | .probe = ccwgroup_probe_ccwdev, | 2404 | .probe = ccwgroup_probe_ccwdev, |
2403 | .remove = ccwgroup_remove_ccwdev, | 2405 | .remove = ccwgroup_remove_ccwdev, |
@@ -2407,8 +2409,10 @@ static struct ccw_driver lcs_ccw_driver = { | |||
2407 | * LCS ccwgroup driver registration | 2409 | * LCS ccwgroup driver registration |
2408 | */ | 2410 | */ |
2409 | static struct ccwgroup_driver lcs_group_driver = { | 2411 | static struct ccwgroup_driver lcs_group_driver = { |
2410 | .owner = THIS_MODULE, | 2412 | .driver = { |
2411 | .name = "lcs", | 2413 | .owner = THIS_MODULE, |
2414 | .name = "lcs", | ||
2415 | }, | ||
2412 | .max_slaves = 2, | 2416 | .max_slaves = 2, |
2413 | .driver_id = 0xD3C3E2, | 2417 | .driver_id = 0xD3C3E2, |
2414 | .probe = lcs_probe_device, | 2418 | .probe = lcs_probe_device, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 25eef304bd4..10a3a3b4dd3 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -3902,7 +3902,9 @@ static struct ccw_device_id qeth_ids[] = { | |||
3902 | MODULE_DEVICE_TABLE(ccw, qeth_ids); | 3902 | MODULE_DEVICE_TABLE(ccw, qeth_ids); |
3903 | 3903 | ||
3904 | static struct ccw_driver qeth_ccw_driver = { | 3904 | static struct ccw_driver qeth_ccw_driver = { |
3905 | .name = "qeth", | 3905 | .driver = { |
3906 | .name = "qeth", | ||
3907 | }, | ||
3906 | .ids = qeth_ids, | 3908 | .ids = qeth_ids, |
3907 | .probe = ccwgroup_probe_ccwdev, | 3909 | .probe = ccwgroup_probe_ccwdev, |
3908 | .remove = ccwgroup_remove_ccwdev, | 3910 | .remove = ccwgroup_remove_ccwdev, |
@@ -4428,8 +4430,10 @@ static int qeth_core_restore(struct ccwgroup_device *gdev) | |||
4428 | } | 4430 | } |
4429 | 4431 | ||
4430 | static struct ccwgroup_driver qeth_core_ccwgroup_driver = { | 4432 | static struct ccwgroup_driver qeth_core_ccwgroup_driver = { |
4431 | .owner = THIS_MODULE, | 4433 | .driver = { |
4432 | .name = "qeth", | 4434 | .owner = THIS_MODULE, |
4435 | .name = "qeth", | ||
4436 | }, | ||
4433 | .driver_id = 0xD8C5E3C8, | 4437 | .driver_id = 0xD8C5E3C8, |
4434 | .probe = qeth_core_probe_device, | 4438 | .probe = qeth_core_probe_device, |
4435 | .remove = qeth_core_remove_device, | 4439 | .remove = qeth_core_remove_device, |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 4f7852dd30c..e8b7cee6204 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -251,8 +251,10 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | struct ccw_driver zfcp_ccw_driver = { | 253 | struct ccw_driver zfcp_ccw_driver = { |
254 | .owner = THIS_MODULE, | 254 | .driver = { |
255 | .name = "zfcp", | 255 | .owner = THIS_MODULE, |
256 | .name = "zfcp", | ||
257 | }, | ||
256 | .ids = zfcp_ccw_device_id, | 258 | .ids = zfcp_ccw_device_id, |
257 | .probe = zfcp_ccw_probe, | 259 | .probe = zfcp_ccw_probe, |
258 | .remove = zfcp_ccw_remove, | 260 | .remove = zfcp_ccw_remove, |
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile index 92df4d6b614..1bd9fd18f7f 100644 --- a/drivers/scsi/aacraid/Makefile +++ b/drivers/scsi/aacraid/Makefile | |||
@@ -3,6 +3,6 @@ | |||
3 | obj-$(CONFIG_SCSI_AACRAID) := aacraid.o | 3 | obj-$(CONFIG_SCSI_AACRAID) := aacraid.o |
4 | 4 | ||
5 | aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ | 5 | aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ |
6 | dpcsup.o rx.o sa.o rkt.o nark.o | 6 | dpcsup.o rx.o sa.o rkt.o nark.o src.o |
7 | 7 | ||
8 | ccflags-y := -Idrivers/scsi | 8 | ccflags-y := -Idrivers/scsi |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7df2dd1d2c6..118ce83a737 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -1486,7 +1487,9 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1486 | dev->a_ops.adapter_write = aac_write_block; | 1487 | dev->a_ops.adapter_write = aac_write_block; |
1487 | } | 1488 | } |
1488 | dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; | 1489 | dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; |
1489 | if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { | 1490 | if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1) |
1491 | dev->adapter_info.options |= AAC_OPT_NEW_COMM; | ||
1492 | if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { | ||
1490 | /* | 1493 | /* |
1491 | * Worst case size that could cause sg overflow when | 1494 | * Worst case size that could cause sg overflow when |
1492 | * we break up SG elements that are larger than 64KB. | 1495 | * we break up SG elements that are larger than 64KB. |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4dbcc055ac7..29ab00016b7 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -12,7 +12,7 @@ | |||
12 | *----------------------------------------------------------------------------*/ | 12 | *----------------------------------------------------------------------------*/ |
13 | 13 | ||
14 | #ifndef AAC_DRIVER_BUILD | 14 | #ifndef AAC_DRIVER_BUILD |
15 | # define AAC_DRIVER_BUILD 26400 | 15 | # define AAC_DRIVER_BUILD 28000 |
16 | # define AAC_DRIVER_BRANCH "-ms" | 16 | # define AAC_DRIVER_BRANCH "-ms" |
17 | #endif | 17 | #endif |
18 | #define MAXIMUM_NUM_CONTAINERS 32 | 18 | #define MAXIMUM_NUM_CONTAINERS 32 |
@@ -277,6 +277,16 @@ enum aac_queue_types { | |||
277 | 277 | ||
278 | #define FsaNormal 1 | 278 | #define FsaNormal 1 |
279 | 279 | ||
280 | /* transport FIB header (PMC) */ | ||
281 | struct aac_fib_xporthdr { | ||
282 | u64 HostAddress; /* FIB host address w/o xport header */ | ||
283 | u32 Size; /* FIB size excluding xport header */ | ||
284 | u32 Handle; /* driver handle to reference the FIB */ | ||
285 | u64 Reserved[2]; | ||
286 | }; | ||
287 | |||
288 | #define ALIGN32 32 | ||
289 | |||
280 | /* | 290 | /* |
281 | * Define the FIB. The FIB is the where all the requested data and | 291 | * Define the FIB. The FIB is the where all the requested data and |
282 | * command information are put to the application on the FSA adapter. | 292 | * command information are put to the application on the FSA adapter. |
@@ -394,7 +404,9 @@ enum fib_xfer_state { | |||
394 | AdapterMicroFib = (1<<17), | 404 | AdapterMicroFib = (1<<17), |
395 | BIOSFibPath = (1<<18), | 405 | BIOSFibPath = (1<<18), |
396 | FastResponseCapable = (1<<19), | 406 | FastResponseCapable = (1<<19), |
397 | ApiFib = (1<<20) // Its an API Fib. | 407 | ApiFib = (1<<20), /* Its an API Fib */ |
408 | /* PMC NEW COMM: There is no more AIF data pending */ | ||
409 | NoMoreAifDataAvailable = (1<<21) | ||
398 | }; | 410 | }; |
399 | 411 | ||
400 | /* | 412 | /* |
@@ -404,6 +416,7 @@ enum fib_xfer_state { | |||
404 | 416 | ||
405 | #define ADAPTER_INIT_STRUCT_REVISION 3 | 417 | #define ADAPTER_INIT_STRUCT_REVISION 3 |
406 | #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science | 418 | #define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science |
419 | #define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ | ||
407 | 420 | ||
408 | struct aac_init | 421 | struct aac_init |
409 | { | 422 | { |
@@ -428,9 +441,15 @@ struct aac_init | |||
428 | #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 | 441 | #define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 |
429 | #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 | 442 | #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 |
430 | #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 | 443 | #define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 |
444 | #define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041 | ||
431 | __le32 MaxIoCommands; /* max outstanding commands */ | 445 | __le32 MaxIoCommands; /* max outstanding commands */ |
432 | __le32 MaxIoSize; /* largest I/O command */ | 446 | __le32 MaxIoSize; /* largest I/O command */ |
433 | __le32 MaxFibSize; /* largest FIB to adapter */ | 447 | __le32 MaxFibSize; /* largest FIB to adapter */ |
448 | /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ | ||
449 | __le32 MaxNumAif; /* max number of aif */ | ||
450 | /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ | ||
451 | __le32 HostRRQ_AddrLow; | ||
452 | __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ | ||
434 | }; | 453 | }; |
435 | 454 | ||
436 | enum aac_log_level { | 455 | enum aac_log_level { |
@@ -685,7 +704,7 @@ struct rx_inbound { | |||
685 | #define OutboundDoorbellReg MUnit.ODR | 704 | #define OutboundDoorbellReg MUnit.ODR |
686 | 705 | ||
687 | struct rx_registers { | 706 | struct rx_registers { |
688 | struct rx_mu_registers MUnit; /* 1300h - 1344h */ | 707 | struct rx_mu_registers MUnit; /* 1300h - 1347h */ |
689 | __le32 reserved1[2]; /* 1348h - 134ch */ | 708 | __le32 reserved1[2]; /* 1348h - 134ch */ |
690 | struct rx_inbound IndexRegs; | 709 | struct rx_inbound IndexRegs; |
691 | }; | 710 | }; |
@@ -703,7 +722,7 @@ struct rx_registers { | |||
703 | #define rkt_inbound rx_inbound | 722 | #define rkt_inbound rx_inbound |
704 | 723 | ||
705 | struct rkt_registers { | 724 | struct rkt_registers { |
706 | struct rkt_mu_registers MUnit; /* 1300h - 1344h */ | 725 | struct rkt_mu_registers MUnit; /* 1300h - 1347h */ |
707 | __le32 reserved1[1006]; /* 1348h - 22fch */ | 726 | __le32 reserved1[1006]; /* 1348h - 22fch */ |
708 | struct rkt_inbound IndexRegs; /* 2300h - */ | 727 | struct rkt_inbound IndexRegs; /* 2300h - */ |
709 | }; | 728 | }; |
@@ -713,6 +732,44 @@ struct rkt_registers { | |||
713 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) | 732 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) |
714 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) | 733 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) |
715 | 734 | ||
735 | /* | ||
736 | * PMC SRC message unit registers | ||
737 | */ | ||
738 | |||
739 | #define src_inbound rx_inbound | ||
740 | |||
741 | struct src_mu_registers { | ||
742 | /* PCI*| Name */ | ||
743 | __le32 reserved0[8]; /* 00h | Reserved */ | ||
744 | __le32 IDR; /* 20h | Inbound Doorbell Register */ | ||
745 | __le32 IISR; /* 24h | Inbound Int. Status Register */ | ||
746 | __le32 reserved1[3]; /* 28h | Reserved */ | ||
747 | __le32 OIMR; /* 34h | Outbound Int. Mask Register */ | ||
748 | __le32 reserved2[25]; /* 38h | Reserved */ | ||
749 | __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ | ||
750 | __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ | ||
751 | __le32 reserved3[6]; /* a4h | Reserved */ | ||
752 | __le32 OMR; /* bch | Outbound Message Register */ | ||
753 | __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ | ||
754 | __le32 IQ_H; /* c4h | Inbound Queue (High address) */ | ||
755 | }; | ||
756 | |||
757 | struct src_registers { | ||
758 | struct src_mu_registers MUnit; /* 00h - c7h */ | ||
759 | __le32 reserved1[130790]; /* c8h - 7fc5fh */ | ||
760 | struct src_inbound IndexRegs; /* 7fc60h */ | ||
761 | }; | ||
762 | |||
763 | #define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) | ||
764 | #define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR)) | ||
765 | #define src_writeb(AEP, CSR, value) writeb(value, \ | ||
766 | &((AEP)->regs.src.bar0->CSR)) | ||
767 | #define src_writel(AEP, CSR, value) writel(value, \ | ||
768 | &((AEP)->regs.src.bar0->CSR)) | ||
769 | |||
770 | #define SRC_ODR_SHIFT 12 | ||
771 | #define SRC_IDR_SHIFT 9 | ||
772 | |||
716 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); | 773 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); |
717 | 774 | ||
718 | struct aac_fib_context { | 775 | struct aac_fib_context { |
@@ -879,6 +936,7 @@ struct aac_supplement_adapter_info | |||
879 | #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) | 936 | #define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) |
880 | #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) | 937 | #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) |
881 | #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) | 938 | #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) |
939 | #define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) | ||
882 | #define AAC_SIS_VERSION_V3 3 | 940 | #define AAC_SIS_VERSION_V3 3 |
883 | #define AAC_SIS_SLOT_UNKNOWN 0xFF | 941 | #define AAC_SIS_SLOT_UNKNOWN 0xFF |
884 | 942 | ||
@@ -940,6 +998,7 @@ struct aac_bus_info_response { | |||
940 | #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) | 998 | #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) |
941 | #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) | 999 | #define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) |
942 | #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) | 1000 | #define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) |
1001 | #define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) | ||
943 | 1002 | ||
944 | struct aac_dev | 1003 | struct aac_dev |
945 | { | 1004 | { |
@@ -952,6 +1011,7 @@ struct aac_dev | |||
952 | */ | 1011 | */ |
953 | unsigned max_fib_size; | 1012 | unsigned max_fib_size; |
954 | unsigned sg_tablesize; | 1013 | unsigned sg_tablesize; |
1014 | unsigned max_num_aif; | ||
955 | 1015 | ||
956 | /* | 1016 | /* |
957 | * Map for 128 fib objects (64k) | 1017 | * Map for 128 fib objects (64k) |
@@ -980,10 +1040,21 @@ struct aac_dev | |||
980 | struct adapter_ops a_ops; | 1040 | struct adapter_ops a_ops; |
981 | unsigned long fsrev; /* Main driver's revision number */ | 1041 | unsigned long fsrev; /* Main driver's revision number */ |
982 | 1042 | ||
983 | unsigned base_size; /* Size of mapped in region */ | 1043 | unsigned long dbg_base; /* address of UART |
1044 | * debug buffer */ | ||
1045 | |||
1046 | unsigned base_size, dbg_size; /* Size of | ||
1047 | * mapped in region */ | ||
1048 | |||
984 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ | 1049 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ |
985 | dma_addr_t init_pa; /* Holds physical address of the init struct */ | 1050 | dma_addr_t init_pa; /* Holds physical address of the init struct */ |
986 | 1051 | ||
1052 | u32 *host_rrq; /* response queue | ||
1053 | * if AAC_COMM_MESSAGE_TYPE1 */ | ||
1054 | |||
1055 | dma_addr_t host_rrq_pa; /* phys. address */ | ||
1056 | u32 host_rrq_idx; /* index into rrq buffer */ | ||
1057 | |||
987 | struct pci_dev *pdev; /* Our PCI interface */ | 1058 | struct pci_dev *pdev; /* Our PCI interface */ |
988 | void * printfbuf; /* pointer to buffer used for printf's from the adapter */ | 1059 | void * printfbuf; /* pointer to buffer used for printf's from the adapter */ |
989 | void * comm_addr; /* Base address of Comm area */ | 1060 | void * comm_addr; /* Base address of Comm area */ |
@@ -1003,14 +1074,20 @@ struct aac_dev | |||
1003 | */ | 1074 | */ |
1004 | #ifndef AAC_MIN_FOOTPRINT_SIZE | 1075 | #ifndef AAC_MIN_FOOTPRINT_SIZE |
1005 | # define AAC_MIN_FOOTPRINT_SIZE 8192 | 1076 | # define AAC_MIN_FOOTPRINT_SIZE 8192 |
1077 | # define AAC_MIN_SRC_BAR0_SIZE 0x400000 | ||
1078 | # define AAC_MIN_SRC_BAR1_SIZE 0x800 | ||
1006 | #endif | 1079 | #endif |
1007 | union | 1080 | union |
1008 | { | 1081 | { |
1009 | struct sa_registers __iomem *sa; | 1082 | struct sa_registers __iomem *sa; |
1010 | struct rx_registers __iomem *rx; | 1083 | struct rx_registers __iomem *rx; |
1011 | struct rkt_registers __iomem *rkt; | 1084 | struct rkt_registers __iomem *rkt; |
1085 | struct { | ||
1086 | struct src_registers __iomem *bar0; | ||
1087 | char __iomem *bar1; | ||
1088 | } src; | ||
1012 | } regs; | 1089 | } regs; |
1013 | volatile void __iomem *base; | 1090 | volatile void __iomem *base, *dbg_base_mapped; |
1014 | volatile struct rx_inbound __iomem *IndexRegs; | 1091 | volatile struct rx_inbound __iomem *IndexRegs; |
1015 | u32 OIMR; /* Mask Register Cache */ | 1092 | u32 OIMR; /* Mask Register Cache */ |
1016 | /* | 1093 | /* |
@@ -1031,9 +1108,8 @@ struct aac_dev | |||
1031 | u8 comm_interface; | 1108 | u8 comm_interface; |
1032 | # define AAC_COMM_PRODUCER 0 | 1109 | # define AAC_COMM_PRODUCER 0 |
1033 | # define AAC_COMM_MESSAGE 1 | 1110 | # define AAC_COMM_MESSAGE 1 |
1034 | /* macro side-effects BEWARE */ | 1111 | # define AAC_COMM_MESSAGE_TYPE1 3 |
1035 | # define raw_io_interface \ | 1112 | u8 raw_io_interface; |
1036 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) | ||
1037 | u8 raw_io_64; | 1113 | u8 raw_io_64; |
1038 | u8 printf_enabled; | 1114 | u8 printf_enabled; |
1039 | u8 in_reset; | 1115 | u8 in_reset; |
@@ -1789,6 +1865,10 @@ extern struct aac_common aac_config; | |||
1789 | #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ | 1865 | #define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */ |
1790 | #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ | 1866 | #define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */ |
1791 | #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ | 1867 | #define DoorBellPrintfReady (1<<5) /* Adapter -> Host */ |
1868 | #define DoorBellAifPending (1<<6) /* Adapter -> Host */ | ||
1869 | |||
1870 | /* PMC specific outbound doorbell bits */ | ||
1871 | #define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */ | ||
1792 | 1872 | ||
1793 | /* | 1873 | /* |
1794 | * For FIB communication, we need all of the following things | 1874 | * For FIB communication, we need all of the following things |
@@ -1831,6 +1911,9 @@ extern struct aac_common aac_config; | |||
1831 | #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ | 1911 | #define AifReqAPIJobUpdate 109 /* Update a job report from the API */ |
1832 | #define AifReqAPIJobFinish 110 /* Finish a job from the API */ | 1912 | #define AifReqAPIJobFinish 110 /* Finish a job from the API */ |
1833 | 1913 | ||
1914 | /* PMC NEW COMM: Request the event data */ | ||
1915 | #define AifReqEvent 200 | ||
1916 | |||
1834 | /* | 1917 | /* |
1835 | * Adapter Initiated FIB command structures. Start with the adapter | 1918 | * Adapter Initiated FIB command structures. Start with the adapter |
1836 | * initiated FIBs that really come from the adapter, and get responded | 1919 | * initiated FIBs that really come from the adapter, and get responded |
@@ -1886,10 +1969,13 @@ int aac_rx_init(struct aac_dev *dev); | |||
1886 | int aac_rkt_init(struct aac_dev *dev); | 1969 | int aac_rkt_init(struct aac_dev *dev); |
1887 | int aac_nark_init(struct aac_dev *dev); | 1970 | int aac_nark_init(struct aac_dev *dev); |
1888 | int aac_sa_init(struct aac_dev *dev); | 1971 | int aac_sa_init(struct aac_dev *dev); |
1972 | int aac_src_init(struct aac_dev *dev); | ||
1889 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); | 1973 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); |
1890 | unsigned int aac_response_normal(struct aac_queue * q); | 1974 | unsigned int aac_response_normal(struct aac_queue * q); |
1891 | unsigned int aac_command_normal(struct aac_queue * q); | 1975 | unsigned int aac_command_normal(struct aac_queue * q); |
1892 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); | 1976 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, |
1977 | int isAif, int isFastResponse, | ||
1978 | struct hw_fib *aif_fib); | ||
1893 | int aac_reset_adapter(struct aac_dev * dev, int forced); | 1979 | int aac_reset_adapter(struct aac_dev * dev, int forced); |
1894 | int aac_check_health(struct aac_dev * dev); | 1980 | int aac_check_health(struct aac_dev * dev); |
1895 | int aac_command_thread(void *data); | 1981 | int aac_command_thread(void *data); |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 645ddd9d9b9..8a0b3303317 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index a7261486ccd..7ac8fdb5577 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -52,12 +53,16 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
52 | unsigned long size, align; | 53 | unsigned long size, align; |
53 | const unsigned long fibsize = 4096; | 54 | const unsigned long fibsize = 4096; |
54 | const unsigned long printfbufsiz = 256; | 55 | const unsigned long printfbufsiz = 256; |
56 | unsigned long host_rrq_size = 0; | ||
55 | struct aac_init *init; | 57 | struct aac_init *init; |
56 | dma_addr_t phys; | 58 | dma_addr_t phys; |
57 | unsigned long aac_max_hostphysmempages; | 59 | unsigned long aac_max_hostphysmempages; |
58 | 60 | ||
59 | size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz; | 61 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) |
60 | 62 | host_rrq_size = (dev->scsi_host_ptr->can_queue | |
63 | + AAC_NUM_MGT_FIB) * sizeof(u32); | ||
64 | size = fibsize + sizeof(struct aac_init) + commsize + | ||
65 | commalign + printfbufsiz + host_rrq_size; | ||
61 | 66 | ||
62 | base = pci_alloc_consistent(dev->pdev, size, &phys); | 67 | base = pci_alloc_consistent(dev->pdev, size, &phys); |
63 | 68 | ||
@@ -70,8 +75,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
70 | dev->comm_phys = phys; | 75 | dev->comm_phys = phys; |
71 | dev->comm_size = size; | 76 | dev->comm_size = size; |
72 | 77 | ||
73 | dev->init = (struct aac_init *)(base + fibsize); | 78 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { |
74 | dev->init_pa = phys + fibsize; | 79 | dev->host_rrq = (u32 *)(base + fibsize); |
80 | dev->host_rrq_pa = phys + fibsize; | ||
81 | memset(dev->host_rrq, 0, host_rrq_size); | ||
82 | } | ||
83 | |||
84 | dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); | ||
85 | dev->init_pa = phys + fibsize + host_rrq_size; | ||
75 | 86 | ||
76 | init = dev->init; | 87 | init = dev->init; |
77 | 88 | ||
@@ -106,8 +117,13 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
106 | 117 | ||
107 | init->InitFlags = 0; | 118 | init->InitFlags = 0; |
108 | if (dev->comm_interface == AAC_COMM_MESSAGE) { | 119 | if (dev->comm_interface == AAC_COMM_MESSAGE) { |
109 | init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); | 120 | init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); |
110 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); | 121 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); |
122 | } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { | ||
123 | init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); | ||
124 | init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED); | ||
125 | dprintk((KERN_WARNING | ||
126 | "aacraid: New Comm Interface type1 enabled\n")); | ||
111 | } | 127 | } |
112 | init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | | 128 | init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | |
113 | INITFLAGS_DRIVER_SUPPORTS_PM); | 129 | INITFLAGS_DRIVER_SUPPORTS_PM); |
@@ -115,11 +131,18 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
115 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); | 131 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); |
116 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); | 132 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); |
117 | 133 | ||
134 | init->MaxNumAif = cpu_to_le32(dev->max_num_aif); | ||
135 | init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32); | ||
136 | init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff); | ||
137 | |||
138 | |||
118 | /* | 139 | /* |
119 | * Increment the base address by the amount already used | 140 | * Increment the base address by the amount already used |
120 | */ | 141 | */ |
121 | base = base + fibsize + sizeof(struct aac_init); | 142 | base = base + fibsize + host_rrq_size + sizeof(struct aac_init); |
122 | phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init)); | 143 | phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + |
144 | sizeof(struct aac_init)); | ||
145 | |||
123 | /* | 146 | /* |
124 | * Align the beginning of Headers to commalign | 147 | * Align the beginning of Headers to commalign |
125 | */ | 148 | */ |
@@ -314,15 +337,22 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
314 | - sizeof(struct aac_write) + sizeof(struct sgentry)) | 337 | - sizeof(struct aac_write) + sizeof(struct sgentry)) |
315 | / sizeof(struct sgentry); | 338 | / sizeof(struct sgentry); |
316 | dev->comm_interface = AAC_COMM_PRODUCER; | 339 | dev->comm_interface = AAC_COMM_PRODUCER; |
317 | dev->raw_io_64 = 0; | 340 | dev->raw_io_interface = dev->raw_io_64 = 0; |
341 | |||
318 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, | 342 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, |
319 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && | 343 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && |
320 | (status[0] == 0x00000001)) { | 344 | (status[0] == 0x00000001)) { |
321 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) | 345 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) |
322 | dev->raw_io_64 = 1; | 346 | dev->raw_io_64 = 1; |
323 | if (dev->a_ops.adapter_comm && | 347 | if (dev->a_ops.adapter_comm) { |
324 | (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) | 348 | if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) { |
325 | dev->comm_interface = AAC_COMM_MESSAGE; | 349 | dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; |
350 | dev->raw_io_interface = 1; | ||
351 | } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) { | ||
352 | dev->comm_interface = AAC_COMM_MESSAGE; | ||
353 | dev->raw_io_interface = 1; | ||
354 | } | ||
355 | } | ||
326 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && | 356 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && |
327 | (status[2] > dev->base_size)) { | 357 | (status[2] > dev->base_size)) { |
328 | aac_adapter_ioremap(dev, 0); | 358 | aac_adapter_ioremap(dev, 0); |
@@ -350,10 +380,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
350 | * status[3] & 0xFFFF maximum number FIBs outstanding | 380 | * status[3] & 0xFFFF maximum number FIBs outstanding |
351 | */ | 381 | */ |
352 | host->max_sectors = (status[1] >> 16) << 1; | 382 | host->max_sectors = (status[1] >> 16) << 1; |
353 | dev->max_fib_size = status[1] & 0xFFFF; | 383 | /* Multiple of 32 for PMC */ |
384 | dev->max_fib_size = status[1] & 0xFFE0; | ||
354 | host->sg_tablesize = status[2] >> 16; | 385 | host->sg_tablesize = status[2] >> 16; |
355 | dev->sg_tablesize = status[2] & 0xFFFF; | 386 | dev->sg_tablesize = status[2] & 0xFFFF; |
356 | host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; | 387 | host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; |
388 | dev->max_num_aif = status[4] & 0xFFFF; | ||
357 | /* | 389 | /* |
358 | * NOTE: | 390 | * NOTE: |
359 | * All these overrides are based on a fixed internal | 391 | * All these overrides are based on a fixed internal |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 060ac4bd5a1..dd7ad3ba2da 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -63,9 +64,11 @@ static int fib_map_alloc(struct aac_dev *dev) | |||
63 | "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", | 64 | "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", |
64 | dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, | 65 | dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, |
65 | AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); | 66 | AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); |
66 | if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size | 67 | dev->hw_fib_va = pci_alloc_consistent(dev->pdev, |
67 | * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), | 68 | (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) |
68 | &dev->hw_fib_pa))==NULL) | 69 | * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), |
70 | &dev->hw_fib_pa); | ||
71 | if (dev->hw_fib_va == NULL) | ||
69 | return -ENOMEM; | 72 | return -ENOMEM; |
70 | return 0; | 73 | return 0; |
71 | } | 74 | } |
@@ -110,9 +113,22 @@ int aac_fib_setup(struct aac_dev * dev) | |||
110 | if (i<0) | 113 | if (i<0) |
111 | return -ENOMEM; | 114 | return -ENOMEM; |
112 | 115 | ||
116 | /* 32 byte alignment for PMC */ | ||
117 | hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); | ||
118 | dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + | ||
119 | (hw_fib_pa - dev->hw_fib_pa)); | ||
120 | dev->hw_fib_pa = hw_fib_pa; | ||
121 | memset(dev->hw_fib_va, 0, | ||
122 | (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * | ||
123 | (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | ||
124 | |||
125 | /* add Xport header */ | ||
126 | dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + | ||
127 | sizeof(struct aac_fib_xporthdr)); | ||
128 | dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); | ||
129 | |||
113 | hw_fib = dev->hw_fib_va; | 130 | hw_fib = dev->hw_fib_va; |
114 | hw_fib_pa = dev->hw_fib_pa; | 131 | hw_fib_pa = dev->hw_fib_pa; |
115 | memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | ||
116 | /* | 132 | /* |
117 | * Initialise the fibs | 133 | * Initialise the fibs |
118 | */ | 134 | */ |
@@ -129,8 +145,10 @@ int aac_fib_setup(struct aac_dev * dev) | |||
129 | hw_fib->header.XferState = cpu_to_le32(0xffffffff); | 145 | hw_fib->header.XferState = cpu_to_le32(0xffffffff); |
130 | hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); | 146 | hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); |
131 | fibptr->hw_fib_pa = hw_fib_pa; | 147 | fibptr->hw_fib_pa = hw_fib_pa; |
132 | hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size); | 148 | hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + |
133 | hw_fib_pa = hw_fib_pa + dev->max_fib_size; | 149 | dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); |
150 | hw_fib_pa = hw_fib_pa + | ||
151 | dev->max_fib_size + sizeof(struct aac_fib_xporthdr); | ||
134 | } | 152 | } |
135 | /* | 153 | /* |
136 | * Add the fib chain to the free list | 154 | * Add the fib chain to the free list |
@@ -664,9 +682,14 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
664 | unsigned long nointr = 0; | 682 | unsigned long nointr = 0; |
665 | unsigned long qflags; | 683 | unsigned long qflags; |
666 | 684 | ||
685 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { | ||
686 | kfree(hw_fib); | ||
687 | return 0; | ||
688 | } | ||
689 | |||
667 | if (hw_fib->header.XferState == 0) { | 690 | if (hw_fib->header.XferState == 0) { |
668 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 691 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
669 | kfree (hw_fib); | 692 | kfree(hw_fib); |
670 | return 0; | 693 | return 0; |
671 | } | 694 | } |
672 | /* | 695 | /* |
@@ -674,7 +697,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
674 | */ | 697 | */ |
675 | if (hw_fib->header.StructType != FIB_MAGIC) { | 698 | if (hw_fib->header.StructType != FIB_MAGIC) { |
676 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 699 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
677 | kfree (hw_fib); | 700 | kfree(hw_fib); |
678 | return -EINVAL; | 701 | return -EINVAL; |
679 | } | 702 | } |
680 | /* | 703 | /* |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 9c7408fe8c7..f0c66a80ad1 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -228,6 +229,48 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
228 | return 0; | 229 | return 0; |
229 | } | 230 | } |
230 | 231 | ||
232 | /* | ||
233 | * | ||
234 | * aac_aif_callback | ||
235 | * @context: the context set in the fib - here it is scsi cmd | ||
236 | * @fibptr: pointer to the fib | ||
237 | * | ||
238 | * Handles the AIFs - new method (SRC) | ||
239 | * | ||
240 | */ | ||
241 | |||
242 | static void aac_aif_callback(void *context, struct fib * fibptr) | ||
243 | { | ||
244 | struct fib *fibctx; | ||
245 | struct aac_dev *dev; | ||
246 | struct aac_aifcmd *cmd; | ||
247 | int status; | ||
248 | |||
249 | fibctx = (struct fib *)context; | ||
250 | BUG_ON(fibptr == NULL); | ||
251 | dev = fibptr->dev; | ||
252 | |||
253 | if (fibptr->hw_fib_va->header.XferState & | ||
254 | cpu_to_le32(NoMoreAifDataAvailable)) { | ||
255 | aac_fib_complete(fibptr); | ||
256 | aac_fib_free(fibptr); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); | ||
261 | |||
262 | aac_fib_init(fibctx); | ||
263 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | ||
264 | cmd->command = cpu_to_le32(AifReqEvent); | ||
265 | |||
266 | status = aac_fib_send(AifRequest, | ||
267 | fibctx, | ||
268 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | ||
269 | FsaNormal, | ||
270 | 0, 1, | ||
271 | (fib_callback)aac_aif_callback, fibctx); | ||
272 | } | ||
273 | |||
231 | 274 | ||
232 | /** | 275 | /** |
233 | * aac_intr_normal - Handle command replies | 276 | * aac_intr_normal - Handle command replies |
@@ -238,19 +281,17 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
238 | * know there is a response on our normal priority queue. We will pull off | 281 | * know there is a response on our normal priority queue. We will pull off |
239 | * all QE there are and wake up all the waiters before exiting. | 282 | * all QE there are and wake up all the waiters before exiting. |
240 | */ | 283 | */ |
241 | 284 | unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, | |
242 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | 285 | int isAif, int isFastResponse, struct hw_fib *aif_fib) |
243 | { | 286 | { |
244 | unsigned long mflags; | 287 | unsigned long mflags; |
245 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); | 288 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); |
246 | if ((index & 0x00000002L)) { | 289 | if (isAif == 1) { /* AIF - common */ |
247 | struct hw_fib * hw_fib; | 290 | struct hw_fib * hw_fib; |
248 | struct fib * fib; | 291 | struct fib * fib; |
249 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | 292 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; |
250 | unsigned long flags; | 293 | unsigned long flags; |
251 | 294 | ||
252 | if (index == 0xFFFFFFFEL) /* Special Case */ | ||
253 | return 0; /* Do nothing */ | ||
254 | /* | 295 | /* |
255 | * Allocate a FIB. For non queued stuff we can just use | 296 | * Allocate a FIB. For non queued stuff we can just use |
256 | * the stack so we are happy. We need a fib object in order to | 297 | * the stack so we are happy. We need a fib object in order to |
@@ -263,8 +304,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
263 | kfree (fib); | 304 | kfree (fib); |
264 | return 1; | 305 | return 1; |
265 | } | 306 | } |
266 | memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + | 307 | if (aif_fib != NULL) { |
267 | (index & ~0x00000002L)), sizeof(struct hw_fib)); | 308 | memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); |
309 | } else { | ||
310 | memcpy(hw_fib, | ||
311 | (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + | ||
312 | index), sizeof(struct hw_fib)); | ||
313 | } | ||
268 | INIT_LIST_HEAD(&fib->fiblink); | 314 | INIT_LIST_HEAD(&fib->fiblink); |
269 | fib->type = FSAFS_NTC_FIB_CONTEXT; | 315 | fib->type = FSAFS_NTC_FIB_CONTEXT; |
270 | fib->size = sizeof(struct fib); | 316 | fib->size = sizeof(struct fib); |
@@ -277,9 +323,26 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
277 | wake_up_interruptible(&q->cmdready); | 323 | wake_up_interruptible(&q->cmdready); |
278 | spin_unlock_irqrestore(q->lock, flags); | 324 | spin_unlock_irqrestore(q->lock, flags); |
279 | return 1; | 325 | return 1; |
326 | } else if (isAif == 2) { /* AIF - new (SRC) */ | ||
327 | struct fib *fibctx; | ||
328 | struct aac_aifcmd *cmd; | ||
329 | |||
330 | fibctx = aac_fib_alloc(dev); | ||
331 | if (!fibctx) | ||
332 | return 1; | ||
333 | aac_fib_init(fibctx); | ||
334 | |||
335 | cmd = (struct aac_aifcmd *) fib_data(fibctx); | ||
336 | cmd->command = cpu_to_le32(AifReqEvent); | ||
337 | |||
338 | return aac_fib_send(AifRequest, | ||
339 | fibctx, | ||
340 | sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), | ||
341 | FsaNormal, | ||
342 | 0, 1, | ||
343 | (fib_callback)aac_aif_callback, fibctx); | ||
280 | } else { | 344 | } else { |
281 | int fast = index & 0x01; | 345 | struct fib *fib = &dev->fibs[index]; |
282 | struct fib * fib = &dev->fibs[index >> 2]; | ||
283 | struct hw_fib * hwfib = fib->hw_fib_va; | 346 | struct hw_fib * hwfib = fib->hw_fib_va; |
284 | 347 | ||
285 | /* | 348 | /* |
@@ -298,7 +361,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
298 | return 0; | 361 | return 0; |
299 | } | 362 | } |
300 | 363 | ||
301 | if (fast) { | 364 | if (isFastResponse) { |
302 | /* | 365 | /* |
303 | * Doctor the fib | 366 | * Doctor the fib |
304 | */ | 367 | */ |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 2c93d9496d6..4ff26521d75 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -54,7 +55,7 @@ | |||
54 | 55 | ||
55 | #include "aacraid.h" | 56 | #include "aacraid.h" |
56 | 57 | ||
57 | #define AAC_DRIVER_VERSION "1.1-5" | 58 | #define AAC_DRIVER_VERSION "1.1-7" |
58 | #ifndef AAC_DRIVER_BRANCH | 59 | #ifndef AAC_DRIVER_BRANCH |
59 | #define AAC_DRIVER_BRANCH "" | 60 | #define AAC_DRIVER_BRANCH "" |
60 | #endif | 61 | #endif |
@@ -161,6 +162,7 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = { | |||
161 | { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ | 162 | { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ |
162 | { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ | 163 | { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ |
163 | { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ | 164 | { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ |
165 | { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */ | ||
164 | { 0,} | 166 | { 0,} |
165 | }; | 167 | }; |
166 | MODULE_DEVICE_TABLE(pci, aac_pci_tbl); | 168 | MODULE_DEVICE_TABLE(pci, aac_pci_tbl); |
@@ -235,7 +237,8 @@ static struct aac_driver_ident aac_drivers[] = { | |||
235 | { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ | 237 | { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */ |
236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ | 238 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ |
237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ | 239 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ |
238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ | 240 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ |
241 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */ | ||
239 | }; | 242 | }; |
240 | 243 | ||
241 | /** | 244 | /** |
@@ -653,8 +656,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) | |||
653 | * This adapter needs a blind reset, only do so for Adapters that | 656 | * This adapter needs a blind reset, only do so for Adapters that |
654 | * support a register, instead of a commanded, reset. | 657 | * support a register, instead of a commanded, reset. |
655 | */ | 658 | */ |
656 | if ((aac->supplement_adapter_info.SupportedOptions2 & | 659 | if (((aac->supplement_adapter_info.SupportedOptions2 & |
657 | AAC_OPTION_MU_RESET) && | 660 | AAC_OPTION_MU_RESET) || |
661 | (aac->supplement_adapter_info.SupportedOptions2 & | ||
662 | AAC_OPTION_DOORBELL_RESET)) && | ||
658 | aac_check_reset && | 663 | aac_check_reset && |
659 | ((aac_check_reset != 1) || | 664 | ((aac_check_reset != 1) || |
660 | !(aac->supplement_adapter_info.SupportedOptions2 & | 665 | !(aac->supplement_adapter_info.SupportedOptions2 & |
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c index c55f7c862f0..f397d21a0c0 100644 --- a/drivers/scsi/aacraid/nark.c +++ b/drivers/scsi/aacraid/nark.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * based on the old aacraid driver that is.. | 4 | * based on the old aacraid driver that is.. |
5 | * Adaptec aacraid device driver for Linux. | 5 | * Adaptec aacraid device driver for Linux. |
6 | * | 6 | * |
7 | * Copyright (c) 2006-2007 Adaptec, Inc. (aacraid@adaptec.com) | 7 | * Copyright (c) 2000-2010 Adaptec, Inc. |
8 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 16d8db55002..be44de92429 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 84d77fd86e5..ce530f113fd 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -84,15 +85,35 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) | |||
84 | 85 | ||
85 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) | 86 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) |
86 | { | 87 | { |
88 | int isAif, isFastResponse, isSpecial; | ||
87 | struct aac_dev *dev = dev_id; | 89 | struct aac_dev *dev = dev_id; |
88 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); | 90 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); |
89 | if (unlikely(Index == 0xFFFFFFFFL)) | 91 | if (unlikely(Index == 0xFFFFFFFFL)) |
90 | Index = rx_readl(dev, MUnit.OutboundQueue); | 92 | Index = rx_readl(dev, MUnit.OutboundQueue); |
91 | if (likely(Index != 0xFFFFFFFFL)) { | 93 | if (likely(Index != 0xFFFFFFFFL)) { |
92 | do { | 94 | do { |
93 | if (unlikely(aac_intr_normal(dev, Index))) { | 95 | isAif = isFastResponse = isSpecial = 0; |
94 | rx_writel(dev, MUnit.OutboundQueue, Index); | 96 | if (Index & 0x00000002L) { |
95 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); | 97 | isAif = 1; |
98 | if (Index == 0xFFFFFFFEL) | ||
99 | isSpecial = 1; | ||
100 | Index &= ~0x00000002L; | ||
101 | } else { | ||
102 | if (Index & 0x00000001L) | ||
103 | isFastResponse = 1; | ||
104 | Index >>= 2; | ||
105 | } | ||
106 | if (!isSpecial) { | ||
107 | if (unlikely(aac_intr_normal(dev, | ||
108 | Index, isAif, | ||
109 | isFastResponse, NULL))) { | ||
110 | rx_writel(dev, | ||
111 | MUnit.OutboundQueue, | ||
112 | Index); | ||
113 | rx_writel(dev, | ||
114 | MUnit.ODR, | ||
115 | DoorBellAdapterNormRespReady); | ||
116 | } | ||
96 | } | 117 | } |
97 | Index = rx_readl(dev, MUnit.OutboundQueue); | 118 | Index = rx_readl(dev, MUnit.OutboundQueue); |
98 | } while (Index != 0xFFFFFFFFL); | 119 | } while (Index != 0xFFFFFFFFL); |
@@ -631,6 +652,10 @@ int _aac_rx_init(struct aac_dev *dev) | |||
631 | name, instance); | 652 | name, instance); |
632 | goto error_iounmap; | 653 | goto error_iounmap; |
633 | } | 654 | } |
655 | dev->dbg_base = dev->scsi_host_ptr->base; | ||
656 | dev->dbg_base_mapped = dev->base; | ||
657 | dev->dbg_size = dev->base_size; | ||
658 | |||
634 | aac_adapter_enable_int(dev); | 659 | aac_adapter_enable_int(dev); |
635 | /* | 660 | /* |
636 | * Tell the adapter that all is configured, and it can | 661 | * Tell the adapter that all is configured, and it can |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 622c21c68e6..e5d4457121e 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -5,7 +5,8 @@ | |||
5 | * based on the old aacraid driver that is.. | 5 | * based on the old aacraid driver that is.. |
6 | * Adaptec aacraid device driver for Linux. | 6 | * Adaptec aacraid device driver for Linux. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) | 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -391,6 +392,10 @@ int aac_sa_init(struct aac_dev *dev) | |||
391 | name, instance); | 392 | name, instance); |
392 | goto error_iounmap; | 393 | goto error_iounmap; |
393 | } | 394 | } |
395 | dev->dbg_base = dev->scsi_host_ptr->base; | ||
396 | dev->dbg_base_mapped = dev->base; | ||
397 | dev->dbg_size = dev->base_size; | ||
398 | |||
394 | aac_adapter_enable_int(dev); | 399 | aac_adapter_enable_int(dev); |
395 | 400 | ||
396 | /* | 401 | /* |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c new file mode 100644 index 00000000000..c2049466060 --- /dev/null +++ b/drivers/scsi/aacraid/src.c | |||
@@ -0,0 +1,594 @@ | |||
1 | /* | ||
2 | * Adaptec AAC series RAID controller driver | ||
3 | * (c) Copyright 2001 Red Hat Inc. | ||
4 | * | ||
5 | * based on the old aacraid driver that is.. | ||
6 | * Adaptec aacraid device driver for Linux. | ||
7 | * | ||
8 | * Copyright (c) 2000-2010 Adaptec, Inc. | ||
9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; see the file COPYING. If not, write to | ||
23 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Module Name: | ||
26 | * src.c | ||
27 | * | ||
28 | * Abstract: Hardware Device Interface for PMC SRC based controllers | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/version.h> | ||
41 | #include <linux/completion.h> | ||
42 | #include <linux/time.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <scsi/scsi_host.h> | ||
45 | |||
46 | #include "aacraid.h" | ||
47 | |||
48 | static irqreturn_t aac_src_intr_message(int irq, void *dev_id) | ||
49 | { | ||
50 | struct aac_dev *dev = dev_id; | ||
51 | unsigned long bellbits, bellbits_shifted; | ||
52 | int our_interrupt = 0; | ||
53 | int isFastResponse; | ||
54 | u32 index, handle; | ||
55 | |||
56 | bellbits = src_readl(dev, MUnit.ODR_R); | ||
57 | if (bellbits & PmDoorBellResponseSent) { | ||
58 | bellbits = PmDoorBellResponseSent; | ||
59 | /* handle async. status */ | ||
60 | our_interrupt = 1; | ||
61 | index = dev->host_rrq_idx; | ||
62 | if (dev->host_rrq[index] == 0) { | ||
63 | u32 old_index = index; | ||
64 | /* adjust index */ | ||
65 | do { | ||
66 | index++; | ||
67 | if (index == dev->scsi_host_ptr->can_queue + | ||
68 | AAC_NUM_MGT_FIB) | ||
69 | index = 0; | ||
70 | if (dev->host_rrq[index] != 0) | ||
71 | break; | ||
72 | } while (index != old_index); | ||
73 | dev->host_rrq_idx = index; | ||
74 | } | ||
75 | for (;;) { | ||
76 | isFastResponse = 0; | ||
77 | /* remove toggle bit (31) */ | ||
78 | handle = (dev->host_rrq[index] & 0x7fffffff); | ||
79 | /* check fast response bit (30) */ | ||
80 | if (handle & 0x40000000) | ||
81 | isFastResponse = 1; | ||
82 | handle &= 0x0000ffff; | ||
83 | if (handle == 0) | ||
84 | break; | ||
85 | |||
86 | aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); | ||
87 | |||
88 | dev->host_rrq[index++] = 0; | ||
89 | if (index == dev->scsi_host_ptr->can_queue + | ||
90 | AAC_NUM_MGT_FIB) | ||
91 | index = 0; | ||
92 | dev->host_rrq_idx = index; | ||
93 | } | ||
94 | } else { | ||
95 | bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); | ||
96 | if (bellbits_shifted & DoorBellAifPending) { | ||
97 | our_interrupt = 1; | ||
98 | /* handle AIF */ | ||
99 | aac_intr_normal(dev, 0, 2, 0, NULL); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | if (our_interrupt) { | ||
104 | src_writel(dev, MUnit.ODR_C, bellbits); | ||
105 | return IRQ_HANDLED; | ||
106 | } | ||
107 | return IRQ_NONE; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * aac_src_disable_interrupt - Disable interrupts | ||
112 | * @dev: Adapter | ||
113 | */ | ||
114 | |||
115 | static void aac_src_disable_interrupt(struct aac_dev *dev) | ||
116 | { | ||
117 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * aac_src_enable_interrupt_message - Enable interrupts | ||
122 | * @dev: Adapter | ||
123 | */ | ||
124 | |||
125 | static void aac_src_enable_interrupt_message(struct aac_dev *dev) | ||
126 | { | ||
127 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * src_sync_cmd - send a command and wait | ||
132 | * @dev: Adapter | ||
133 | * @command: Command to execute | ||
134 | * @p1: first parameter | ||
135 | * @ret: adapter status | ||
136 | * | ||
137 | * This routine will send a synchronous command to the adapter and wait | ||
138 | * for its completion. | ||
139 | */ | ||
140 | |||
141 | static int src_sync_cmd(struct aac_dev *dev, u32 command, | ||
142 | u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, | ||
143 | u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) | ||
144 | { | ||
145 | unsigned long start; | ||
146 | int ok; | ||
147 | |||
148 | /* | ||
149 | * Write the command into Mailbox 0 | ||
150 | */ | ||
151 | writel(command, &dev->IndexRegs->Mailbox[0]); | ||
152 | /* | ||
153 | * Write the parameters into Mailboxes 1 - 6 | ||
154 | */ | ||
155 | writel(p1, &dev->IndexRegs->Mailbox[1]); | ||
156 | writel(p2, &dev->IndexRegs->Mailbox[2]); | ||
157 | writel(p3, &dev->IndexRegs->Mailbox[3]); | ||
158 | writel(p4, &dev->IndexRegs->Mailbox[4]); | ||
159 | |||
160 | /* | ||
161 | * Clear the synch command doorbell to start on a clean slate. | ||
162 | */ | ||
163 | src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
164 | |||
165 | /* | ||
166 | * Disable doorbell interrupts | ||
167 | */ | ||
168 | src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); | ||
169 | |||
170 | /* | ||
171 | * Force the completion of the mask register write before issuing | ||
172 | * the interrupt. | ||
173 | */ | ||
174 | src_readl(dev, MUnit.OIMR); | ||
175 | |||
176 | /* | ||
177 | * Signal that there is a new synch command | ||
178 | */ | ||
179 | src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); | ||
180 | |||
181 | ok = 0; | ||
182 | start = jiffies; | ||
183 | |||
184 | /* | ||
185 | * Wait up to 30 seconds | ||
186 | */ | ||
187 | while (time_before(jiffies, start+30*HZ)) { | ||
188 | /* Delay 5 microseconds to let Mon960 get info. */ | ||
189 | udelay(5); | ||
190 | |||
191 | /* Mon960 will set doorbell0 bit | ||
192 | * when it has completed the command | ||
193 | */ | ||
194 | if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { | ||
195 | /* Clear the doorbell */ | ||
196 | src_writel(dev, | ||
197 | MUnit.ODR_C, | ||
198 | OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
199 | ok = 1; | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | /* Yield the processor in case we are slow */ | ||
204 | msleep(1); | ||
205 | } | ||
206 | if (unlikely(ok != 1)) { | ||
207 | /* Restore interrupt mask even though we timed out */ | ||
208 | aac_adapter_enable_int(dev); | ||
209 | return -ETIMEDOUT; | ||
210 | } | ||
211 | |||
212 | /* Pull the synch status from Mailbox 0 */ | ||
213 | if (status) | ||
214 | *status = readl(&dev->IndexRegs->Mailbox[0]); | ||
215 | if (r1) | ||
216 | *r1 = readl(&dev->IndexRegs->Mailbox[1]); | ||
217 | if (r2) | ||
218 | *r2 = readl(&dev->IndexRegs->Mailbox[2]); | ||
219 | if (r3) | ||
220 | *r3 = readl(&dev->IndexRegs->Mailbox[3]); | ||
221 | if (r4) | ||
222 | *r4 = readl(&dev->IndexRegs->Mailbox[4]); | ||
223 | |||
224 | /* Clear the synch command doorbell */ | ||
225 | src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); | ||
226 | |||
227 | /* Restore interrupt mask */ | ||
228 | aac_adapter_enable_int(dev); | ||
229 | return 0; | ||
230 | |||
231 | } | ||
232 | |||
233 | /** | ||
234 | * aac_src_interrupt_adapter - interrupt adapter | ||
235 | * @dev: Adapter | ||
236 | * | ||
237 | * Send an interrupt to the i960 and breakpoint it. | ||
238 | */ | ||
239 | |||
240 | static void aac_src_interrupt_adapter(struct aac_dev *dev) | ||
241 | { | ||
242 | src_sync_cmd(dev, BREAKPOINT_REQUEST, | ||
243 | 0, 0, 0, 0, 0, 0, | ||
244 | NULL, NULL, NULL, NULL, NULL); | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * aac_src_notify_adapter - send an event to the adapter | ||
249 | * @dev: Adapter | ||
250 | * @event: Event to send | ||
251 | * | ||
252 | * Notify the i960 that something it probably cares about has | ||
253 | * happened. | ||
254 | */ | ||
255 | |||
256 | static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) | ||
257 | { | ||
258 | switch (event) { | ||
259 | |||
260 | case AdapNormCmdQue: | ||
261 | src_writel(dev, MUnit.ODR_C, | ||
262 | INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); | ||
263 | break; | ||
264 | case HostNormRespNotFull: | ||
265 | src_writel(dev, MUnit.ODR_C, | ||
266 | INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); | ||
267 | break; | ||
268 | case AdapNormRespQue: | ||
269 | src_writel(dev, MUnit.ODR_C, | ||
270 | INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); | ||
271 | break; | ||
272 | case HostNormCmdNotFull: | ||
273 | src_writel(dev, MUnit.ODR_C, | ||
274 | INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); | ||
275 | break; | ||
276 | case FastIo: | ||
277 | src_writel(dev, MUnit.ODR_C, | ||
278 | INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); | ||
279 | break; | ||
280 | case AdapPrintfDone: | ||
281 | src_writel(dev, MUnit.ODR_C, | ||
282 | INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); | ||
283 | break; | ||
284 | default: | ||
285 | BUG(); | ||
286 | break; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * aac_src_start_adapter - activate adapter | ||
292 | * @dev: Adapter | ||
293 | * | ||
294 | * Start up processing on an i960 based AAC adapter | ||
295 | */ | ||
296 | |||
297 | static void aac_src_start_adapter(struct aac_dev *dev) | ||
298 | { | ||
299 | struct aac_init *init; | ||
300 | |||
301 | init = dev->init; | ||
302 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); | ||
303 | |||
304 | /* We can only use a 32 bit address here */ | ||
305 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, | ||
306 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * aac_src_check_health | ||
311 | * @dev: device to check if healthy | ||
312 | * | ||
313 | * Will attempt to determine if the specified adapter is alive and | ||
314 | * capable of handling requests, returning 0 if alive. | ||
315 | */ | ||
316 | static int aac_src_check_health(struct aac_dev *dev) | ||
317 | { | ||
318 | u32 status = src_readl(dev, MUnit.OMR); | ||
319 | |||
320 | /* | ||
321 | * Check to see if the board failed any self tests. | ||
322 | */ | ||
323 | if (unlikely(status & SELF_TEST_FAILED)) | ||
324 | return -1; | ||
325 | |||
326 | /* | ||
327 | * Check to see if the board panic'd. | ||
328 | */ | ||
329 | if (unlikely(status & KERNEL_PANIC)) | ||
330 | return (status >> 16) & 0xFF; | ||
331 | /* | ||
332 | * Wait for the adapter to be up and running. | ||
333 | */ | ||
334 | if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) | ||
335 | return -3; | ||
336 | /* | ||
337 | * Everything is OK | ||
338 | */ | ||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * aac_src_deliver_message | ||
344 | * @fib: fib to issue | ||
345 | * | ||
346 | * Will send a fib, returning 0 if successful. | ||
347 | */ | ||
348 | static int aac_src_deliver_message(struct fib *fib) | ||
349 | { | ||
350 | struct aac_dev *dev = fib->dev; | ||
351 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | ||
352 | unsigned long qflags; | ||
353 | u32 fibsize; | ||
354 | u64 address; | ||
355 | struct aac_fib_xporthdr *pFibX; | ||
356 | |||
357 | spin_lock_irqsave(q->lock, qflags); | ||
358 | q->numpending++; | ||
359 | spin_unlock_irqrestore(q->lock, qflags); | ||
360 | |||
361 | /* Calculate the amount to the fibsize bits */ | ||
362 | fibsize = (sizeof(struct aac_fib_xporthdr) + | ||
363 | fib->hw_fib_va->header.Size + 127) / 128 - 1; | ||
364 | if (fibsize > (ALIGN32 - 1)) | ||
365 | fibsize = ALIGN32 - 1; | ||
366 | |||
367 | /* Fill XPORT header */ | ||
368 | pFibX = (struct aac_fib_xporthdr *) | ||
369 | ((unsigned char *)fib->hw_fib_va - | ||
370 | sizeof(struct aac_fib_xporthdr)); | ||
371 | pFibX->Handle = fib->hw_fib_va->header.SenderData + 1; | ||
372 | pFibX->HostAddress = fib->hw_fib_pa; | ||
373 | pFibX->Size = fib->hw_fib_va->header.Size; | ||
374 | address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr); | ||
375 | |||
376 | src_writel(dev, MUnit.IQ_H, (u32)(address >> 32)); | ||
377 | src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * aac_src_ioremap | ||
383 | * @size: mapping resize request | ||
384 | * | ||
385 | */ | ||
386 | static int aac_src_ioremap(struct aac_dev *dev, u32 size) | ||
387 | { | ||
388 | if (!size) { | ||
389 | iounmap(dev->regs.src.bar0); | ||
390 | dev->regs.src.bar0 = NULL; | ||
391 | iounmap(dev->base); | ||
392 | dev->base = NULL; | ||
393 | return 0; | ||
394 | } | ||
395 | dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), | ||
396 | AAC_MIN_SRC_BAR1_SIZE); | ||
397 | dev->base = NULL; | ||
398 | if (dev->regs.src.bar1 == NULL) | ||
399 | return -1; | ||
400 | dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, | ||
401 | size); | ||
402 | if (dev->base == NULL) { | ||
403 | iounmap(dev->regs.src.bar1); | ||
404 | dev->regs.src.bar1 = NULL; | ||
405 | return -1; | ||
406 | } | ||
407 | dev->IndexRegs = &((struct src_registers __iomem *) | ||
408 | dev->base)->IndexRegs; | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int aac_src_restart_adapter(struct aac_dev *dev, int bled) | ||
413 | { | ||
414 | u32 var, reset_mask; | ||
415 | |||
416 | if (bled >= 0) { | ||
417 | if (bled) | ||
418 | printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", | ||
419 | dev->name, dev->id, bled); | ||
420 | bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, | ||
421 | 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); | ||
422 | if (bled || (var != 0x00000001)) | ||
423 | bled = -EINVAL; | ||
424 | if (dev->supplement_adapter_info.SupportedOptions2 & | ||
425 | AAC_OPTION_DOORBELL_RESET) { | ||
426 | src_writel(dev, MUnit.IDR, reset_mask); | ||
427 | msleep(5000); /* Delay 5 seconds */ | ||
428 | } | ||
429 | } | ||
430 | |||
431 | if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) | ||
432 | return -ENODEV; | ||
433 | |||
434 | if (startup_timeout < 300) | ||
435 | startup_timeout = 300; | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * aac_src_select_comm - Select communications method | ||
442 | * @dev: Adapter | ||
443 | * @comm: communications method | ||
444 | */ | ||
445 | int aac_src_select_comm(struct aac_dev *dev, int comm) | ||
446 | { | ||
447 | switch (comm) { | ||
448 | case AAC_COMM_MESSAGE: | ||
449 | dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; | ||
450 | dev->a_ops.adapter_intr = aac_src_intr_message; | ||
451 | dev->a_ops.adapter_deliver = aac_src_deliver_message; | ||
452 | break; | ||
453 | default: | ||
454 | return 1; | ||
455 | } | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * aac_src_init - initialize an Cardinal Frey Bar card | ||
461 | * @dev: device to configure | ||
462 | * | ||
463 | */ | ||
464 | |||
465 | int aac_src_init(struct aac_dev *dev) | ||
466 | { | ||
467 | unsigned long start; | ||
468 | unsigned long status; | ||
469 | int restart = 0; | ||
470 | int instance = dev->id; | ||
471 | const char *name = dev->name; | ||
472 | |||
473 | dev->a_ops.adapter_ioremap = aac_src_ioremap; | ||
474 | dev->a_ops.adapter_comm = aac_src_select_comm; | ||
475 | |||
476 | dev->base_size = AAC_MIN_SRC_BAR0_SIZE; | ||
477 | if (aac_adapter_ioremap(dev, dev->base_size)) { | ||
478 | printk(KERN_WARNING "%s: unable to map adapter.\n", name); | ||
479 | goto error_iounmap; | ||
480 | } | ||
481 | |||
482 | /* Failure to reset here is an option ... */ | ||
483 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; | ||
484 | dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; | ||
485 | if ((aac_reset_devices || reset_devices) && | ||
486 | !aac_src_restart_adapter(dev, 0)) | ||
487 | ++restart; | ||
488 | /* | ||
489 | * Check to see if the board panic'd while booting. | ||
490 | */ | ||
491 | status = src_readl(dev, MUnit.OMR); | ||
492 | if (status & KERNEL_PANIC) { | ||
493 | if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) | ||
494 | goto error_iounmap; | ||
495 | ++restart; | ||
496 | } | ||
497 | /* | ||
498 | * Check to see if the board failed any self tests. | ||
499 | */ | ||
500 | status = src_readl(dev, MUnit.OMR); | ||
501 | if (status & SELF_TEST_FAILED) { | ||
502 | printk(KERN_ERR "%s%d: adapter self-test failed.\n", | ||
503 | dev->name, instance); | ||
504 | goto error_iounmap; | ||
505 | } | ||
506 | /* | ||
507 | * Check to see if the monitor panic'd while booting. | ||
508 | */ | ||
509 | if (status & MONITOR_PANIC) { | ||
510 | printk(KERN_ERR "%s%d: adapter monitor panic.\n", | ||
511 | dev->name, instance); | ||
512 | goto error_iounmap; | ||
513 | } | ||
514 | start = jiffies; | ||
515 | /* | ||
516 | * Wait for the adapter to be up and running. Wait up to 3 minutes | ||
517 | */ | ||
518 | while (!((status = src_readl(dev, MUnit.OMR)) & | ||
519 | KERNEL_UP_AND_RUNNING)) { | ||
520 | if ((restart && | ||
521 | (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || | ||
522 | time_after(jiffies, start+HZ*startup_timeout)) { | ||
523 | printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", | ||
524 | dev->name, instance, status); | ||
525 | goto error_iounmap; | ||
526 | } | ||
527 | if (!restart && | ||
528 | ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || | ||
529 | time_after(jiffies, start + HZ * | ||
530 | ((startup_timeout > 60) | ||
531 | ? (startup_timeout - 60) | ||
532 | : (startup_timeout / 2))))) { | ||
533 | if (likely(!aac_src_restart_adapter(dev, | ||
534 | aac_src_check_health(dev)))) | ||
535 | start = jiffies; | ||
536 | ++restart; | ||
537 | } | ||
538 | msleep(1); | ||
539 | } | ||
540 | if (restart && aac_commit) | ||
541 | aac_commit = 1; | ||
542 | /* | ||
543 | * Fill in the common function dispatch table. | ||
544 | */ | ||
545 | dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; | ||
546 | dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; | ||
547 | dev->a_ops.adapter_notify = aac_src_notify_adapter; | ||
548 | dev->a_ops.adapter_sync_cmd = src_sync_cmd; | ||
549 | dev->a_ops.adapter_check_health = aac_src_check_health; | ||
550 | dev->a_ops.adapter_restart = aac_src_restart_adapter; | ||
551 | |||
552 | /* | ||
553 | * First clear out all interrupts. Then enable the one's that we | ||
554 | * can handle. | ||
555 | */ | ||
556 | aac_adapter_comm(dev, AAC_COMM_MESSAGE); | ||
557 | aac_adapter_disable_int(dev); | ||
558 | src_writel(dev, MUnit.ODR_C, 0xffffffff); | ||
559 | aac_adapter_enable_int(dev); | ||
560 | |||
561 | if (aac_init_adapter(dev) == NULL) | ||
562 | goto error_iounmap; | ||
563 | if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) | ||
564 | goto error_iounmap; | ||
565 | |||
566 | dev->msi = aac_msi && !pci_enable_msi(dev->pdev); | ||
567 | |||
568 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, | ||
569 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { | ||
570 | |||
571 | if (dev->msi) | ||
572 | pci_disable_msi(dev->pdev); | ||
573 | |||
574 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", | ||
575 | name, instance); | ||
576 | goto error_iounmap; | ||
577 | } | ||
578 | dev->dbg_base = pci_resource_start(dev->pdev, 2); | ||
579 | dev->dbg_base_mapped = dev->regs.src.bar1; | ||
580 | dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; | ||
581 | |||
582 | aac_adapter_enable_int(dev); | ||
583 | /* | ||
584 | * Tell the adapter that all is configured, and it can | ||
585 | * start accepting requests | ||
586 | */ | ||
587 | aac_src_start_adapter(dev); | ||
588 | |||
589 | return 0; | ||
590 | |||
591 | error_iounmap: | ||
592 | |||
593 | return -1; | ||
594 | } | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index df2fc09ba47..b6d350ac428 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #include "bnx2fc_constants.h" | 62 | #include "bnx2fc_constants.h" |
63 | 63 | ||
64 | #define BNX2FC_NAME "bnx2fc" | 64 | #define BNX2FC_NAME "bnx2fc" |
65 | #define BNX2FC_VERSION "1.0.0" | 65 | #define BNX2FC_VERSION "1.0.1" |
66 | 66 | ||
67 | #define PFX "bnx2fc: " | 67 | #define PFX "bnx2fc: " |
68 | 68 | ||
@@ -84,9 +84,15 @@ | |||
84 | #define BNX2FC_NUM_MAX_SESS 128 | 84 | #define BNX2FC_NUM_MAX_SESS 128 |
85 | #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) | 85 | #define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) |
86 | 86 | ||
87 | #define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 | 87 | #define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 |
88 | #define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS | ||
89 | #define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE | ||
88 | #define BNX2FC_MIN_PAYLOAD 256 | 90 | #define BNX2FC_MIN_PAYLOAD 256 |
89 | #define BNX2FC_MAX_PAYLOAD 2048 | 91 | #define BNX2FC_MAX_PAYLOAD 2048 |
92 | #define BNX2FC_MFS \ | ||
93 | (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) | ||
94 | #define BNX2FC_MINI_JUMBO_MTU 2500 | ||
95 | |||
90 | 96 | ||
91 | #define BNX2FC_RQ_BUF_SZ 256 | 97 | #define BNX2FC_RQ_BUF_SZ 256 |
92 | #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) | 98 | #define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) |
@@ -98,7 +104,8 @@ | |||
98 | #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) | 104 | #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) |
99 | #define BNX2FC_5771X_DB_PAGE_SIZE 128 | 105 | #define BNX2FC_5771X_DB_PAGE_SIZE 128 |
100 | 106 | ||
101 | #define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS | 107 | #define BNX2FC_MAX_TASKS \ |
108 | (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS) | ||
102 | #define BNX2FC_TASK_SIZE 128 | 109 | #define BNX2FC_TASK_SIZE 128 |
103 | #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) | 110 | #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) |
104 | #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) | 111 | #define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) |
@@ -112,10 +119,10 @@ | |||
112 | #define BNX2FC_WRITE (1 << 0) | 119 | #define BNX2FC_WRITE (1 << 0) |
113 | 120 | ||
114 | #define BNX2FC_MIN_XID 0 | 121 | #define BNX2FC_MIN_XID 0 |
115 | #define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) | 122 | #define BNX2FC_MAX_XID \ |
116 | #define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) | 123 | (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) |
117 | #define FCOE_MAX_XID \ | 124 | #define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) |
118 | (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) | 125 | #define FCOE_MAX_XID (FCOE_MIN_XID + 4095) |
119 | #define BNX2FC_MAX_LUN 0xFFFF | 126 | #define BNX2FC_MAX_LUN 0xFFFF |
120 | #define BNX2FC_MAX_FCP_TGT 256 | 127 | #define BNX2FC_MAX_FCP_TGT 256 |
121 | #define BNX2FC_MAX_CMD_LEN 16 | 128 | #define BNX2FC_MAX_CMD_LEN 16 |
@@ -125,7 +132,6 @@ | |||
125 | 132 | ||
126 | #define BNX2FC_WAIT_CNT 120 | 133 | #define BNX2FC_WAIT_CNT 120 |
127 | #define BNX2FC_FW_TIMEOUT (3 * HZ) | 134 | #define BNX2FC_FW_TIMEOUT (3 * HZ) |
128 | |||
129 | #define PORT_MAX 2 | 135 | #define PORT_MAX 2 |
130 | 136 | ||
131 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) | 137 | #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index e476e875307..e2e647509a7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |||
21 | 21 | ||
22 | #define DRV_MODULE_NAME "bnx2fc" | 22 | #define DRV_MODULE_NAME "bnx2fc" |
23 | #define DRV_MODULE_VERSION BNX2FC_VERSION | 23 | #define DRV_MODULE_VERSION BNX2FC_VERSION |
24 | #define DRV_MODULE_RELDATE "Jan 25, 2011" | 24 | #define DRV_MODULE_RELDATE "Mar 17, 2011" |
25 | 25 | ||
26 | 26 | ||
27 | static char version[] __devinitdata = | 27 | static char version[] __devinitdata = |
@@ -437,17 +437,16 @@ static int bnx2fc_l2_rcv_thread(void *arg) | |||
437 | set_current_state(TASK_INTERRUPTIBLE); | 437 | set_current_state(TASK_INTERRUPTIBLE); |
438 | while (!kthread_should_stop()) { | 438 | while (!kthread_should_stop()) { |
439 | schedule(); | 439 | schedule(); |
440 | set_current_state(TASK_RUNNING); | ||
441 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 440 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
442 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { | 441 | while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { |
443 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 442 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
444 | bnx2fc_recv_frame(skb); | 443 | bnx2fc_recv_frame(skb); |
445 | spin_lock_bh(&bg->fcoe_rx_list.lock); | 444 | spin_lock_bh(&bg->fcoe_rx_list.lock); |
446 | } | 445 | } |
446 | __set_current_state(TASK_INTERRUPTIBLE); | ||
447 | spin_unlock_bh(&bg->fcoe_rx_list.lock); | 447 | spin_unlock_bh(&bg->fcoe_rx_list.lock); |
448 | set_current_state(TASK_INTERRUPTIBLE); | ||
449 | } | 448 | } |
450 | set_current_state(TASK_RUNNING); | 449 | __set_current_state(TASK_RUNNING); |
451 | return 0; | 450 | return 0; |
452 | } | 451 | } |
453 | 452 | ||
@@ -569,7 +568,6 @@ int bnx2fc_percpu_io_thread(void *arg) | |||
569 | set_current_state(TASK_INTERRUPTIBLE); | 568 | set_current_state(TASK_INTERRUPTIBLE); |
570 | while (!kthread_should_stop()) { | 569 | while (!kthread_should_stop()) { |
571 | schedule(); | 570 | schedule(); |
572 | set_current_state(TASK_RUNNING); | ||
573 | spin_lock_bh(&p->fp_work_lock); | 571 | spin_lock_bh(&p->fp_work_lock); |
574 | while (!list_empty(&p->work_list)) { | 572 | while (!list_empty(&p->work_list)) { |
575 | list_splice_init(&p->work_list, &work_list); | 573 | list_splice_init(&p->work_list, &work_list); |
@@ -583,10 +581,10 @@ int bnx2fc_percpu_io_thread(void *arg) | |||
583 | 581 | ||
584 | spin_lock_bh(&p->fp_work_lock); | 582 | spin_lock_bh(&p->fp_work_lock); |
585 | } | 583 | } |
584 | __set_current_state(TASK_INTERRUPTIBLE); | ||
586 | spin_unlock_bh(&p->fp_work_lock); | 585 | spin_unlock_bh(&p->fp_work_lock); |
587 | set_current_state(TASK_INTERRUPTIBLE); | ||
588 | } | 586 | } |
589 | set_current_state(TASK_RUNNING); | 587 | __set_current_state(TASK_RUNNING); |
590 | 588 | ||
591 | return 0; | 589 | return 0; |
592 | } | 590 | } |
@@ -661,31 +659,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) | |||
661 | return 0; | 659 | return 0; |
662 | } | 660 | } |
663 | 661 | ||
664 | static int bnx2fc_mfs_update(struct fc_lport *lport) | ||
665 | { | ||
666 | struct fcoe_port *port = lport_priv(lport); | ||
667 | struct bnx2fc_hba *hba = port->priv; | ||
668 | struct net_device *netdev = hba->netdev; | ||
669 | u32 mfs; | ||
670 | u32 max_mfs; | ||
671 | |||
672 | mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + | ||
673 | sizeof(struct fcoe_crc_eof)); | ||
674 | max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); | ||
675 | BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); | ||
676 | if (mfs > max_mfs) | ||
677 | mfs = max_mfs; | ||
678 | |||
679 | /* Adjust mfs to be a multiple of 256 bytes */ | ||
680 | mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * | ||
681 | BNX2FC_MIN_PAYLOAD); | ||
682 | mfs = mfs + sizeof(struct fc_frame_header); | ||
683 | |||
684 | BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); | ||
685 | if (fc_set_mfs(lport, mfs)) | ||
686 | return -EINVAL; | ||
687 | return 0; | ||
688 | } | ||
689 | static void bnx2fc_link_speed_update(struct fc_lport *lport) | 662 | static void bnx2fc_link_speed_update(struct fc_lport *lport) |
690 | { | 663 | { |
691 | struct fcoe_port *port = lport_priv(lport); | 664 | struct fcoe_port *port = lport_priv(lport); |
@@ -754,7 +727,7 @@ static int bnx2fc_net_config(struct fc_lport *lport) | |||
754 | !hba->phys_dev->ethtool_ops->get_pauseparam) | 727 | !hba->phys_dev->ethtool_ops->get_pauseparam) |
755 | return -EOPNOTSUPP; | 728 | return -EOPNOTSUPP; |
756 | 729 | ||
757 | if (bnx2fc_mfs_update(lport)) | 730 | if (fc_set_mfs(lport, BNX2FC_MFS)) |
758 | return -EINVAL; | 731 | return -EINVAL; |
759 | 732 | ||
760 | skb_queue_head_init(&port->fcoe_pending_queue); | 733 | skb_queue_head_init(&port->fcoe_pending_queue); |
@@ -825,14 +798,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event) | |||
825 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) | 798 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) |
826 | printk(KERN_ERR "indicate_netevent: "\ | 799 | printk(KERN_ERR "indicate_netevent: "\ |
827 | "adapter is not UP!!\n"); | 800 | "adapter is not UP!!\n"); |
828 | /* fall thru to update mfs if MTU has changed */ | ||
829 | case NETDEV_CHANGEMTU: | ||
830 | BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); | ||
831 | bnx2fc_mfs_update(lport); | ||
832 | mutex_lock(&lport->lp_mutex); | ||
833 | list_for_each_entry(vport, &lport->vports, list) | ||
834 | bnx2fc_mfs_update(vport); | ||
835 | mutex_unlock(&lport->lp_mutex); | ||
836 | break; | 801 | break; |
837 | 802 | ||
838 | case NETDEV_DOWN: | 803 | case NETDEV_DOWN: |
@@ -1095,13 +1060,6 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) | |||
1095 | struct netdev_hw_addr *ha; | 1060 | struct netdev_hw_addr *ha; |
1096 | int sel_san_mac = 0; | 1061 | int sel_san_mac = 0; |
1097 | 1062 | ||
1098 | /* Do not support for bonding device */ | ||
1099 | if ((netdev->priv_flags & IFF_MASTER_ALB) || | ||
1100 | (netdev->priv_flags & IFF_SLAVE_INACTIVE) || | ||
1101 | (netdev->priv_flags & IFF_MASTER_8023AD)) { | ||
1102 | return -EOPNOTSUPP; | ||
1103 | } | ||
1104 | |||
1105 | /* setup Source MAC Address */ | 1063 | /* setup Source MAC Address */ |
1106 | rcu_read_lock(); | 1064 | rcu_read_lock(); |
1107 | for_each_dev_addr(physdev, ha) { | 1065 | for_each_dev_addr(physdev, ha) { |
@@ -1432,16 +1390,9 @@ static int bnx2fc_destroy(struct net_device *netdev) | |||
1432 | struct net_device *phys_dev; | 1390 | struct net_device *phys_dev; |
1433 | int rc = 0; | 1391 | int rc = 0; |
1434 | 1392 | ||
1435 | if (!rtnl_trylock()) | 1393 | rtnl_lock(); |
1436 | return restart_syscall(); | ||
1437 | 1394 | ||
1438 | mutex_lock(&bnx2fc_dev_lock); | 1395 | mutex_lock(&bnx2fc_dev_lock); |
1439 | #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE | ||
1440 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1441 | rc = -ENODEV; | ||
1442 | goto netdev_err; | ||
1443 | } | ||
1444 | #endif | ||
1445 | /* obtain physical netdev */ | 1396 | /* obtain physical netdev */ |
1446 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1397 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1447 | phys_dev = vlan_dev_real_dev(netdev); | 1398 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1805,18 +1756,10 @@ static int bnx2fc_disable(struct net_device *netdev) | |||
1805 | struct ethtool_drvinfo drvinfo; | 1756 | struct ethtool_drvinfo drvinfo; |
1806 | int rc = 0; | 1757 | int rc = 0; |
1807 | 1758 | ||
1808 | if (!rtnl_trylock()) { | 1759 | rtnl_lock(); |
1809 | printk(KERN_ERR PFX "retrying for rtnl_lock\n"); | ||
1810 | return -EIO; | ||
1811 | } | ||
1812 | 1760 | ||
1813 | mutex_lock(&bnx2fc_dev_lock); | 1761 | mutex_lock(&bnx2fc_dev_lock); |
1814 | 1762 | ||
1815 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1816 | rc = -ENODEV; | ||
1817 | goto nodev; | ||
1818 | } | ||
1819 | |||
1820 | /* obtain physical netdev */ | 1763 | /* obtain physical netdev */ |
1821 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1764 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1822 | phys_dev = vlan_dev_real_dev(netdev); | 1765 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1867,19 +1810,11 @@ static int bnx2fc_enable(struct net_device *netdev) | |||
1867 | struct ethtool_drvinfo drvinfo; | 1810 | struct ethtool_drvinfo drvinfo; |
1868 | int rc = 0; | 1811 | int rc = 0; |
1869 | 1812 | ||
1870 | if (!rtnl_trylock()) { | 1813 | rtnl_lock(); |
1871 | printk(KERN_ERR PFX "retrying for rtnl_lock\n"); | ||
1872 | return -EIO; | ||
1873 | } | ||
1874 | 1814 | ||
1875 | BNX2FC_MISC_DBG("Entered %s\n", __func__); | 1815 | BNX2FC_MISC_DBG("Entered %s\n", __func__); |
1876 | mutex_lock(&bnx2fc_dev_lock); | 1816 | mutex_lock(&bnx2fc_dev_lock); |
1877 | 1817 | ||
1878 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1879 | rc = -ENODEV; | ||
1880 | goto nodev; | ||
1881 | } | ||
1882 | |||
1883 | /* obtain physical netdev */ | 1818 | /* obtain physical netdev */ |
1884 | if (netdev->priv_flags & IFF_802_1Q_VLAN) | 1819 | if (netdev->priv_flags & IFF_802_1Q_VLAN) |
1885 | phys_dev = vlan_dev_real_dev(netdev); | 1820 | phys_dev = vlan_dev_real_dev(netdev); |
@@ -1942,18 +1877,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) | |||
1942 | return -EIO; | 1877 | return -EIO; |
1943 | } | 1878 | } |
1944 | 1879 | ||
1945 | if (!rtnl_trylock()) { | 1880 | rtnl_lock(); |
1946 | printk(KERN_ERR "trying for rtnl_lock\n"); | ||
1947 | return -EIO; | ||
1948 | } | ||
1949 | mutex_lock(&bnx2fc_dev_lock); | ||
1950 | 1881 | ||
1951 | #ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE | 1882 | mutex_lock(&bnx2fc_dev_lock); |
1952 | if (THIS_MODULE->state != MODULE_STATE_LIVE) { | ||
1953 | rc = -ENODEV; | ||
1954 | goto mod_err; | ||
1955 | } | ||
1956 | #endif | ||
1957 | 1883 | ||
1958 | if (!try_module_get(THIS_MODULE)) { | 1884 | if (!try_module_get(THIS_MODULE)) { |
1959 | rc = -EINVAL; | 1885 | rc = -EINVAL; |
@@ -2506,7 +2432,7 @@ static struct scsi_host_template bnx2fc_shost_template = { | |||
2506 | .change_queue_type = fc_change_queue_type, | 2432 | .change_queue_type = fc_change_queue_type, |
2507 | .this_id = -1, | 2433 | .this_id = -1, |
2508 | .cmd_per_lun = 3, | 2434 | .cmd_per_lun = 3, |
2509 | .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), | 2435 | .can_queue = BNX2FC_CAN_QUEUE, |
2510 | .use_clustering = ENABLE_CLUSTERING, | 2436 | .use_clustering = ENABLE_CLUSTERING, |
2511 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, | 2437 | .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, |
2512 | .max_sectors = 512, | 2438 | .max_sectors = 512, |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 4f409683674..1b680e288c5 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
@@ -87,7 +87,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |||
87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; | 87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; |
88 | fcoe_init1.task_list_pbl_addr_hi = | 88 | fcoe_init1.task_list_pbl_addr_hi = |
89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); | 89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); |
90 | fcoe_init1.mtu = hba->netdev->mtu; | 90 | fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; |
91 | 91 | ||
92 | fcoe_init1.flags = (PAGE_SHIFT << | 92 | fcoe_init1.flags = (PAGE_SHIFT << |
93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); | 93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); |
@@ -590,7 +590,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
590 | 590 | ||
591 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; | 591 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; |
592 | 592 | ||
593 | spin_lock_bh(&tgt->tgt_lock); | ||
593 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); | 594 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); |
595 | spin_unlock_bh(&tgt->tgt_lock); | ||
596 | |||
594 | if (rq_data) { | 597 | if (rq_data) { |
595 | buf = rq_data; | 598 | buf = rq_data; |
596 | } else { | 599 | } else { |
@@ -603,8 +606,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
603 | } | 606 | } |
604 | 607 | ||
605 | for (i = 0; i < num_rq; i++) { | 608 | for (i = 0; i < num_rq; i++) { |
609 | spin_lock_bh(&tgt->tgt_lock); | ||
606 | rq_data = (unsigned char *) | 610 | rq_data = (unsigned char *) |
607 | bnx2fc_get_next_rqe(tgt, 1); | 611 | bnx2fc_get_next_rqe(tgt, 1); |
612 | spin_unlock_bh(&tgt->tgt_lock); | ||
608 | len = BNX2FC_RQ_BUF_SZ; | 613 | len = BNX2FC_RQ_BUF_SZ; |
609 | memcpy(buf1, rq_data, len); | 614 | memcpy(buf1, rq_data, len); |
610 | buf1 += len; | 615 | buf1 += len; |
@@ -615,13 +620,15 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
615 | 620 | ||
616 | if (buf != rq_data) | 621 | if (buf != rq_data) |
617 | kfree(buf); | 622 | kfree(buf); |
623 | spin_lock_bh(&tgt->tgt_lock); | ||
618 | bnx2fc_return_rqe(tgt, num_rq); | 624 | bnx2fc_return_rqe(tgt, num_rq); |
625 | spin_unlock_bh(&tgt->tgt_lock); | ||
619 | break; | 626 | break; |
620 | 627 | ||
621 | case FCOE_ERROR_DETECTION_CQE_TYPE: | 628 | case FCOE_ERROR_DETECTION_CQE_TYPE: |
622 | /* | 629 | /* |
623 | *In case of error reporting CQE a single RQ entry | 630 | * In case of error reporting CQE a single RQ entry |
624 | * is consumes. | 631 | * is consumed. |
625 | */ | 632 | */ |
626 | spin_lock_bh(&tgt->tgt_lock); | 633 | spin_lock_bh(&tgt->tgt_lock); |
627 | num_rq = 1; | 634 | num_rq = 1; |
@@ -705,6 +712,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
705 | *In case of warning reporting CQE a single RQ entry | 712 | *In case of warning reporting CQE a single RQ entry |
706 | * is consumes. | 713 | * is consumes. |
707 | */ | 714 | */ |
715 | spin_lock_bh(&tgt->tgt_lock); | ||
708 | num_rq = 1; | 716 | num_rq = 1; |
709 | err_entry = (struct fcoe_err_report_entry *) | 717 | err_entry = (struct fcoe_err_report_entry *) |
710 | bnx2fc_get_next_rqe(tgt, 1); | 718 | bnx2fc_get_next_rqe(tgt, 1); |
@@ -717,6 +725,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
717 | err_entry->tx_buf_off, err_entry->rx_buf_off); | 725 | err_entry->tx_buf_off, err_entry->rx_buf_off); |
718 | 726 | ||
719 | bnx2fc_return_rqe(tgt, 1); | 727 | bnx2fc_return_rqe(tgt, 1); |
728 | spin_unlock_bh(&tgt->tgt_lock); | ||
720 | break; | 729 | break; |
721 | 730 | ||
722 | default: | 731 | default: |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0f1dd23730d..d3fc302c241 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -11,6 +11,9 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include "bnx2fc.h" | 13 | #include "bnx2fc.h" |
14 | |||
15 | #define RESERVE_FREE_LIST_INDEX num_possible_cpus() | ||
16 | |||
14 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, | 17 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, |
15 | int bd_index); | 18 | int bd_index); |
16 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); | 19 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); |
@@ -242,8 +245,9 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
242 | u32 mem_size; | 245 | u32 mem_size; |
243 | u16 xid; | 246 | u16 xid; |
244 | int i; | 247 | int i; |
245 | int num_ios; | 248 | int num_ios, num_pri_ios; |
246 | size_t bd_tbl_sz; | 249 | size_t bd_tbl_sz; |
250 | int arr_sz = num_possible_cpus() + 1; | ||
247 | 251 | ||
248 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { | 252 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { |
249 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ | 253 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ |
@@ -263,14 +267,14 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
263 | } | 267 | } |
264 | 268 | ||
265 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * | 269 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * |
266 | num_possible_cpus(), GFP_KERNEL); | 270 | arr_sz, GFP_KERNEL); |
267 | if (!cmgr->free_list) { | 271 | if (!cmgr->free_list) { |
268 | printk(KERN_ERR PFX "failed to alloc free_list\n"); | 272 | printk(KERN_ERR PFX "failed to alloc free_list\n"); |
269 | goto mem_err; | 273 | goto mem_err; |
270 | } | 274 | } |
271 | 275 | ||
272 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * | 276 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * |
273 | num_possible_cpus(), GFP_KERNEL); | 277 | arr_sz, GFP_KERNEL); |
274 | if (!cmgr->free_list_lock) { | 278 | if (!cmgr->free_list_lock) { |
275 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); | 279 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); |
276 | goto mem_err; | 280 | goto mem_err; |
@@ -279,13 +283,18 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
279 | cmgr->hba = hba; | 283 | cmgr->hba = hba; |
280 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); | 284 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); |
281 | 285 | ||
282 | for (i = 0; i < num_possible_cpus(); i++) { | 286 | for (i = 0; i < arr_sz; i++) { |
283 | INIT_LIST_HEAD(&cmgr->free_list[i]); | 287 | INIT_LIST_HEAD(&cmgr->free_list[i]); |
284 | spin_lock_init(&cmgr->free_list_lock[i]); | 288 | spin_lock_init(&cmgr->free_list_lock[i]); |
285 | } | 289 | } |
286 | 290 | ||
287 | /* Pre-allocated pool of bnx2fc_cmds */ | 291 | /* |
292 | * Pre-allocated pool of bnx2fc_cmds. | ||
293 | * Last entry in the free list array is the free list | ||
294 | * of slow path requests. | ||
295 | */ | ||
288 | xid = BNX2FC_MIN_XID; | 296 | xid = BNX2FC_MIN_XID; |
297 | num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; | ||
289 | for (i = 0; i < num_ios; i++) { | 298 | for (i = 0; i < num_ios; i++) { |
290 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); | 299 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); |
291 | 300 | ||
@@ -298,11 +307,13 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | |||
298 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); | 307 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); |
299 | 308 | ||
300 | io_req->xid = xid++; | 309 | io_req->xid = xid++; |
301 | if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) | 310 | if (i < num_pri_ios) |
302 | printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", | 311 | list_add_tail(&io_req->link, |
303 | io_req->xid); | 312 | &cmgr->free_list[io_req->xid % |
304 | list_add_tail(&io_req->link, | 313 | num_possible_cpus()]); |
305 | &cmgr->free_list[io_req->xid % num_possible_cpus()]); | 314 | else |
315 | list_add_tail(&io_req->link, | ||
316 | &cmgr->free_list[num_possible_cpus()]); | ||
306 | io_req++; | 317 | io_req++; |
307 | } | 318 | } |
308 | 319 | ||
@@ -389,7 +400,7 @@ free_cmd_pool: | |||
389 | if (!cmgr->free_list) | 400 | if (!cmgr->free_list) |
390 | goto free_cmgr; | 401 | goto free_cmgr; |
391 | 402 | ||
392 | for (i = 0; i < num_possible_cpus(); i++) { | 403 | for (i = 0; i < num_possible_cpus() + 1; i++) { |
393 | struct list_head *list; | 404 | struct list_head *list; |
394 | struct list_head *tmp; | 405 | struct list_head *tmp; |
395 | 406 | ||
@@ -413,6 +424,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
413 | struct bnx2fc_cmd *io_req; | 424 | struct bnx2fc_cmd *io_req; |
414 | struct list_head *listp; | 425 | struct list_head *listp; |
415 | struct io_bdt *bd_tbl; | 426 | struct io_bdt *bd_tbl; |
427 | int index = RESERVE_FREE_LIST_INDEX; | ||
416 | u32 max_sqes; | 428 | u32 max_sqes; |
417 | u16 xid; | 429 | u16 xid; |
418 | 430 | ||
@@ -432,26 +444,26 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
432 | * NOTE: Free list insertions and deletions are protected with | 444 | * NOTE: Free list insertions and deletions are protected with |
433 | * cmgr lock | 445 | * cmgr lock |
434 | */ | 446 | */ |
435 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 447 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
436 | if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || | 448 | if ((list_empty(&(cmd_mgr->free_list[index]))) || |
437 | (tgt->num_active_ios.counter >= max_sqes)) { | 449 | (tgt->num_active_ios.counter >= max_sqes)) { |
438 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " | 450 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " |
439 | "ios(%d):sqes(%d)\n", | 451 | "ios(%d):sqes(%d)\n", |
440 | tgt->num_active_ios.counter, tgt->max_sqes); | 452 | tgt->num_active_ios.counter, tgt->max_sqes); |
441 | if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) | 453 | if (list_empty(&(cmd_mgr->free_list[index]))) |
442 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); | 454 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); |
443 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 455 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
444 | return NULL; | 456 | return NULL; |
445 | } | 457 | } |
446 | 458 | ||
447 | listp = (struct list_head *) | 459 | listp = (struct list_head *) |
448 | cmd_mgr->free_list[smp_processor_id()].next; | 460 | cmd_mgr->free_list[index].next; |
449 | list_del_init(listp); | 461 | list_del_init(listp); |
450 | io_req = (struct bnx2fc_cmd *) listp; | 462 | io_req = (struct bnx2fc_cmd *) listp; |
451 | xid = io_req->xid; | 463 | xid = io_req->xid; |
452 | cmd_mgr->cmds[xid] = io_req; | 464 | cmd_mgr->cmds[xid] = io_req; |
453 | atomic_inc(&tgt->num_active_ios); | 465 | atomic_inc(&tgt->num_active_ios); |
454 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 466 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
455 | 467 | ||
456 | INIT_LIST_HEAD(&io_req->link); | 468 | INIT_LIST_HEAD(&io_req->link); |
457 | 469 | ||
@@ -479,27 +491,30 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | |||
479 | struct io_bdt *bd_tbl; | 491 | struct io_bdt *bd_tbl; |
480 | u32 max_sqes; | 492 | u32 max_sqes; |
481 | u16 xid; | 493 | u16 xid; |
494 | int index = get_cpu(); | ||
482 | 495 | ||
483 | max_sqes = BNX2FC_SCSI_MAX_SQES; | 496 | max_sqes = BNX2FC_SCSI_MAX_SQES; |
484 | /* | 497 | /* |
485 | * NOTE: Free list insertions and deletions are protected with | 498 | * NOTE: Free list insertions and deletions are protected with |
486 | * cmgr lock | 499 | * cmgr lock |
487 | */ | 500 | */ |
488 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 501 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
489 | if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || | 502 | if ((list_empty(&cmd_mgr->free_list[index])) || |
490 | (tgt->num_active_ios.counter >= max_sqes)) { | 503 | (tgt->num_active_ios.counter >= max_sqes)) { |
491 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 504 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
505 | put_cpu(); | ||
492 | return NULL; | 506 | return NULL; |
493 | } | 507 | } |
494 | 508 | ||
495 | listp = (struct list_head *) | 509 | listp = (struct list_head *) |
496 | cmd_mgr->free_list[smp_processor_id()].next; | 510 | cmd_mgr->free_list[index].next; |
497 | list_del_init(listp); | 511 | list_del_init(listp); |
498 | io_req = (struct bnx2fc_cmd *) listp; | 512 | io_req = (struct bnx2fc_cmd *) listp; |
499 | xid = io_req->xid; | 513 | xid = io_req->xid; |
500 | cmd_mgr->cmds[xid] = io_req; | 514 | cmd_mgr->cmds[xid] = io_req; |
501 | atomic_inc(&tgt->num_active_ios); | 515 | atomic_inc(&tgt->num_active_ios); |
502 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 516 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
517 | put_cpu(); | ||
503 | 518 | ||
504 | INIT_LIST_HEAD(&io_req->link); | 519 | INIT_LIST_HEAD(&io_req->link); |
505 | 520 | ||
@@ -522,8 +537,15 @@ void bnx2fc_cmd_release(struct kref *ref) | |||
522 | struct bnx2fc_cmd *io_req = container_of(ref, | 537 | struct bnx2fc_cmd *io_req = container_of(ref, |
523 | struct bnx2fc_cmd, refcount); | 538 | struct bnx2fc_cmd, refcount); |
524 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; | 539 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; |
540 | int index; | ||
541 | |||
542 | if (io_req->cmd_type == BNX2FC_SCSI_CMD) | ||
543 | index = io_req->xid % num_possible_cpus(); | ||
544 | else | ||
545 | index = RESERVE_FREE_LIST_INDEX; | ||
525 | 546 | ||
526 | spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 547 | |
548 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | ||
527 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) | 549 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) |
528 | bnx2fc_free_mp_resc(io_req); | 550 | bnx2fc_free_mp_resc(io_req); |
529 | cmd_mgr->cmds[io_req->xid] = NULL; | 551 | cmd_mgr->cmds[io_req->xid] = NULL; |
@@ -531,9 +553,10 @@ void bnx2fc_cmd_release(struct kref *ref) | |||
531 | list_del_init(&io_req->link); | 553 | list_del_init(&io_req->link); |
532 | /* Add it to the free list */ | 554 | /* Add it to the free list */ |
533 | list_add(&io_req->link, | 555 | list_add(&io_req->link, |
534 | &cmd_mgr->free_list[smp_processor_id()]); | 556 | &cmd_mgr->free_list[index]); |
535 | atomic_dec(&io_req->tgt->num_active_ios); | 557 | atomic_dec(&io_req->tgt->num_active_ios); |
536 | spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); | 558 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
559 | |||
537 | } | 560 | } |
538 | 561 | ||
539 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | 562 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 7ea93af6026..7cc05e4e82d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -304,10 +304,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port, | |||
304 | " not sent to FW\n"); | 304 | " not sent to FW\n"); |
305 | 305 | ||
306 | /* Free session resources */ | 306 | /* Free session resources */ |
307 | spin_lock_bh(&tgt->cq_lock); | ||
308 | bnx2fc_free_session_resc(hba, tgt); | 307 | bnx2fc_free_session_resc(hba, tgt); |
309 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); | 308 | bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); |
310 | spin_unlock_bh(&tgt->cq_lock); | ||
311 | } | 309 | } |
312 | 310 | ||
313 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | 311 | static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, |
@@ -830,11 +828,13 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |||
830 | tgt->rq = NULL; | 828 | tgt->rq = NULL; |
831 | } | 829 | } |
832 | /* Free CQ */ | 830 | /* Free CQ */ |
831 | spin_lock_bh(&tgt->cq_lock); | ||
833 | if (tgt->cq) { | 832 | if (tgt->cq) { |
834 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 833 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
835 | tgt->cq, tgt->cq_dma); | 834 | tgt->cq, tgt->cq_dma); |
836 | tgt->cq = NULL; | 835 | tgt->cq = NULL; |
837 | } | 836 | } |
837 | spin_unlock_bh(&tgt->cq_lock); | ||
838 | /* Free SQ */ | 838 | /* Free SQ */ |
839 | if (tgt->sq) { | 839 | if (tgt->sq) { |
840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 8eeb39ffa37..e98ae33f129 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -132,14 +132,25 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) | |||
132 | if (page_count(sg_page(sg)) >= 1 && !recv) | 132 | if (page_count(sg_page(sg)) >= 1 && !recv) |
133 | return; | 133 | return; |
134 | 134 | ||
135 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | 135 | if (recv) { |
136 | segment->atomic_mapped = true; | ||
137 | segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); | ||
138 | } else { | ||
139 | segment->atomic_mapped = false; | ||
140 | /* the xmit path can sleep with the page mapped so use kmap */ | ||
141 | segment->sg_mapped = kmap(sg_page(sg)); | ||
142 | } | ||
143 | |||
136 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; | 144 | segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; |
137 | } | 145 | } |
138 | 146 | ||
139 | void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) | 147 | void iscsi_tcp_segment_unmap(struct iscsi_segment *segment) |
140 | { | 148 | { |
141 | if (segment->sg_mapped) { | 149 | if (segment->sg_mapped) { |
142 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | 150 | if (segment->atomic_mapped) |
151 | kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); | ||
152 | else | ||
153 | kunmap(sg_page(segment->sg)); | ||
143 | segment->sg_mapped = NULL; | 154 | segment->sg_mapped = NULL; |
144 | segment->data = NULL; | 155 | segment->data = NULL; |
145 | } | 156 | } |
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 14de249917f..88928f00aa2 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | #/******************************************************************* | 1 | #/******************************************************************* |
2 | # * This file is part of the Emulex Linux Device Driver for * | 2 | # * This file is part of the Emulex Linux Device Driver for * |
3 | # * Fibre Channel Host Bus Adapters. * | 3 | # * Fibre Channel Host Bus Adapters. * |
4 | # * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | # * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | # * EMULEX and SLI are trademarks of Emulex. * | 5 | # * EMULEX and SLI are trademarks of Emulex. * |
6 | # * www.emulex.com * | 6 | # * www.emulex.com * |
7 | # * * | 7 | # * * |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index b64c6da870d..60e98a62f30 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -539,6 +539,8 @@ struct lpfc_hba { | |||
539 | (struct lpfc_hba *, uint32_t); | 539 | (struct lpfc_hba *, uint32_t); |
540 | int (*lpfc_hba_down_link) | 540 | int (*lpfc_hba_down_link) |
541 | (struct lpfc_hba *, uint32_t); | 541 | (struct lpfc_hba *, uint32_t); |
542 | int (*lpfc_selective_reset) | ||
543 | (struct lpfc_hba *); | ||
542 | 544 | ||
543 | /* SLI4 specific HBA data structure */ | 545 | /* SLI4 specific HBA data structure */ |
544 | struct lpfc_sli4_hba sli4_hba; | 546 | struct lpfc_sli4_hba sli4_hba; |
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) | |||
895 | return; | 897 | return; |
896 | } | 898 | } |
897 | 899 | ||
898 | static inline void | 900 | static inline int |
901 | lpfc_readl(void __iomem *addr, uint32_t *data) | ||
902 | { | ||
903 | uint32_t temp; | ||
904 | temp = readl(addr); | ||
905 | if (temp == 0xffffffff) | ||
906 | return -EIO; | ||
907 | *data = temp; | ||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | static inline int | ||
899 | lpfc_sli_read_hs(struct lpfc_hba *phba) | 912 | lpfc_sli_read_hs(struct lpfc_hba *phba) |
900 | { | 913 | { |
901 | /* | 914 | /* |
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) | |||
904 | */ | 917 | */ |
905 | phba->sli.slistat.err_attn_event++; | 918 | phba->sli.slistat.err_attn_event++; |
906 | 919 | ||
907 | /* Save status info */ | 920 | /* Save status info and check for unplug error */ |
908 | phba->work_hs = readl(phba->HSregaddr); | 921 | if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || |
909 | phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); | 922 | lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || |
910 | phba->work_status[1] = readl(phba->MBslimaddr + 0xac); | 923 | lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { |
924 | return -EIO; | ||
925 | } | ||
911 | 926 | ||
912 | /* Clear chip Host Attention error bit */ | 927 | /* Clear chip Host Attention error bit */ |
913 | writel(HA_ERATT, phba->HAregaddr); | 928 | writel(HA_ERATT, phba->HAregaddr); |
914 | readl(phba->HAregaddr); /* flush */ | 929 | readl(phba->HAregaddr); /* flush */ |
915 | phba->pport->stopped = 1; | 930 | phba->pport->stopped = 1; |
916 | 931 | ||
917 | return; | 932 | return 0; |
918 | } | 933 | } |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e7c020df12f..4e0faa00b96 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) | |||
685 | * -EIO reset not configured or error posting the event | 685 | * -EIO reset not configured or error posting the event |
686 | * zero for success | 686 | * zero for success |
687 | **/ | 687 | **/ |
688 | static int | 688 | int |
689 | lpfc_selective_reset(struct lpfc_hba *phba) | 689 | lpfc_selective_reset(struct lpfc_hba *phba) |
690 | { | 690 | { |
691 | struct completion online_compl; | 691 | struct completion online_compl; |
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, | |||
746 | int status = -EINVAL; | 746 | int status = -EINVAL; |
747 | 747 | ||
748 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) | 748 | if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) |
749 | status = lpfc_selective_reset(phba); | 749 | status = phba->lpfc_selective_reset(phba); |
750 | 750 | ||
751 | if (status == 0) | 751 | if (status == 0) |
752 | return strlen(buf); | 752 | return strlen(buf); |
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
1224 | if (val & ENABLE_FCP_RING_POLLING) { | 1224 | if (val & ENABLE_FCP_RING_POLLING) { |
1225 | if ((val & DISABLE_FCP_RING_INT) && | 1225 | if ((val & DISABLE_FCP_RING_INT) && |
1226 | !(old_val & DISABLE_FCP_RING_INT)) { | 1226 | !(old_val & DISABLE_FCP_RING_INT)) { |
1227 | creg_val = readl(phba->HCregaddr); | 1227 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1228 | spin_unlock_irq(&phba->hbalock); | ||
1229 | return -EINVAL; | ||
1230 | } | ||
1228 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 1231 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
1229 | writel(creg_val, phba->HCregaddr); | 1232 | writel(creg_val, phba->HCregaddr); |
1230 | readl(phba->HCregaddr); /* flush */ | 1233 | readl(phba->HCregaddr); /* flush */ |
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, | |||
1242 | spin_unlock_irq(&phba->hbalock); | 1245 | spin_unlock_irq(&phba->hbalock); |
1243 | del_timer(&phba->fcp_poll_timer); | 1246 | del_timer(&phba->fcp_poll_timer); |
1244 | spin_lock_irq(&phba->hbalock); | 1247 | spin_lock_irq(&phba->hbalock); |
1245 | creg_val = readl(phba->HCregaddr); | 1248 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1249 | spin_unlock_irq(&phba->hbalock); | ||
1250 | return -EINVAL; | ||
1251 | } | ||
1246 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 1252 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
1247 | writel(creg_val, phba->HCregaddr); | 1253 | writel(creg_val, phba->HCregaddr); |
1248 | readl(phba->HCregaddr); /* flush */ | 1254 | readl(phba->HCregaddr); /* flush */ |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 0dd43bb9161..793b9f1131f 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
348 | dd_data->context_un.iocb.bmp = bmp; | 348 | dd_data->context_un.iocb.bmp = bmp; |
349 | 349 | ||
350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
351 | creg_val = readl(phba->HCregaddr); | 351 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
352 | rc = -EIO ; | ||
353 | goto free_cmdiocbq; | ||
354 | } | ||
352 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 355 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
353 | writel(creg_val, phba->HCregaddr); | 356 | writel(creg_val, phba->HCregaddr); |
354 | readl(phba->HCregaddr); /* flush */ | 357 | readl(phba->HCregaddr); /* flush */ |
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
599 | dd_data->context_un.iocb.ndlp = ndlp; | 602 | dd_data->context_un.iocb.ndlp = ndlp; |
600 | 603 | ||
601 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 604 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
602 | creg_val = readl(phba->HCregaddr); | 605 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
606 | rc = -EIO; | ||
607 | goto linkdown_err; | ||
608 | } | ||
603 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 609 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
604 | writel(creg_val, phba->HCregaddr); | 610 | writel(creg_val, phba->HCregaddr); |
605 | readl(phba->HCregaddr); /* flush */ | 611 | readl(phba->HCregaddr); /* flush */ |
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
613 | else | 619 | else |
614 | rc = -EIO; | 620 | rc = -EIO; |
615 | 621 | ||
622 | linkdown_err: | ||
616 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 623 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
617 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 624 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
618 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 625 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1357 | dd_data->context_un.iocb.ndlp = ndlp; | 1364 | dd_data->context_un.iocb.ndlp = ndlp; |
1358 | 1365 | ||
1359 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 1366 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
1360 | creg_val = readl(phba->HCregaddr); | 1367 | if (lpfc_readl(phba->HCregaddr, &creg_val)) { |
1368 | rc = -IOCB_ERROR; | ||
1369 | goto issue_ct_rsp_exit; | ||
1370 | } | ||
1361 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 1371 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
1362 | writel(creg_val, phba->HCregaddr); | 1372 | writel(creg_val, phba->HCregaddr); |
1363 | readl(phba->HCregaddr); /* flush */ | 1373 | readl(phba->HCregaddr); /* flush */ |
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | |||
2479 | 2489 | ||
2480 | from = (uint8_t *)dd_data->context_un.mbox.mb; | 2490 | from = (uint8_t *)dd_data->context_un.mbox.mb; |
2481 | job = dd_data->context_un.mbox.set_job; | 2491 | job = dd_data->context_un.mbox.set_job; |
2482 | size = job->reply_payload.payload_len; | 2492 | if (job) { |
2483 | job->reply->reply_payload_rcv_len = | 2493 | size = job->reply_payload.payload_len; |
2484 | sg_copy_from_buffer(job->reply_payload.sg_list, | 2494 | job->reply->reply_payload_rcv_len = |
2485 | job->reply_payload.sg_cnt, | 2495 | sg_copy_from_buffer(job->reply_payload.sg_list, |
2486 | from, size); | 2496 | job->reply_payload.sg_cnt, |
2487 | job->reply->result = 0; | 2497 | from, size); |
2498 | job->reply->result = 0; | ||
2488 | 2499 | ||
2500 | job->dd_data = NULL; | ||
2501 | job->job_done(job); | ||
2502 | } | ||
2489 | dd_data->context_un.mbox.set_job = NULL; | 2503 | dd_data->context_un.mbox.set_job = NULL; |
2490 | job->dd_data = NULL; | ||
2491 | job->job_done(job); | ||
2492 | /* need to hold the lock until we call job done to hold off | 2504 | /* need to hold the lock until we call job done to hold off |
2493 | * the timeout handler returning to the midlayer while | 2505 | * the timeout handler returning to the midlayer while |
2494 | * we are stillprocessing the job | 2506 | * we are stillprocessing the job |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 3d40023f480..f0b332f4eed 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); | |||
254 | void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, | 254 | void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, |
255 | uint32_t); | 255 | uint32_t); |
256 | void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); | 256 | void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); |
257 | 257 | int lpfc_selective_reset(struct lpfc_hba *); | |
258 | void lpfc_reset_barrier(struct lpfc_hba * phba); | 258 | void lpfc_reset_barrier(struct lpfc_hba *); |
259 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); | 259 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); |
260 | int lpfc_sli_brdkill(struct lpfc_hba *); | 260 | int lpfc_sli_brdkill(struct lpfc_hba *); |
261 | int lpfc_sli_brdreset(struct lpfc_hba *); | 261 | int lpfc_sli_brdreset(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8e28edf9801..735028fedda 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) | |||
89 | return 0; | 89 | return 0; |
90 | 90 | ||
91 | /* Read the HBA Host Attention Register */ | 91 | /* Read the HBA Host Attention Register */ |
92 | ha_copy = readl(phba->HAregaddr); | 92 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
93 | return 1; | ||
93 | 94 | ||
94 | if (!(ha_copy & HA_LATT)) | 95 | if (!(ha_copy & HA_LATT)) |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 94ae37c5111..95f11ed7946 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */ | |||
1344 | #define HS_FFER1 0x80000000 /* Bit 31 */ | 1344 | #define HS_FFER1 0x80000000 /* Bit 31 */ |
1345 | #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ | 1345 | #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ |
1346 | #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ | 1346 | #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ |
1347 | 1347 | #define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ | |
1348 | /* Host Control Register */ | 1348 | /* Host Control Register */ |
1349 | 1349 | ||
1350 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ | 1350 | #define HC_REG_OFFSET 12 /* Byte offset from register base address */ |
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 { | |||
1713 | #define pde6_apptagval_WORD word2 | 1713 | #define pde6_apptagval_WORD word2 |
1714 | }; | 1714 | }; |
1715 | 1715 | ||
1716 | struct lpfc_pde7 { | ||
1717 | uint32_t word0; | ||
1718 | #define pde7_type_SHIFT 24 | ||
1719 | #define pde7_type_MASK 0x000000ff | ||
1720 | #define pde7_type_WORD word0 | ||
1721 | #define pde7_rsvd0_SHIFT 0 | ||
1722 | #define pde7_rsvd0_MASK 0x00ffffff | ||
1723 | #define pde7_rsvd0_WORD word0 | ||
1724 | uint32_t addrHigh; | ||
1725 | uint32_t addrLow; | ||
1726 | }; | ||
1716 | 1727 | ||
1717 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ | 1728 | /* Structure for MB Command LOAD_SM and DOWN_LOAD */ |
1718 | 1729 | ||
@@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */ | |||
3621 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ | 3632 | ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ |
3622 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ | 3633 | QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ |
3623 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ | 3634 | struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ |
3624 | struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ | 3635 | struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ |
3625 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ | 3636 | uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ |
3626 | } un; | 3637 | } un; |
3627 | union { | 3638 | union { |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c7178d60c7b..8433ac0d9fb 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags { | |||
215 | #define lpfc_fip_flag_WORD word0 | 215 | #define lpfc_fip_flag_WORD word0 |
216 | }; | 216 | }; |
217 | 217 | ||
218 | struct sli4_bls_acc { | 218 | struct sli4_bls_rsp { |
219 | uint32_t word0_rsvd; /* Word0 must be reserved */ | 219 | uint32_t word0_rsvd; /* Word0 must be reserved */ |
220 | uint32_t word1; | 220 | uint32_t word1; |
221 | #define lpfc_abts_orig_SHIFT 0 | 221 | #define lpfc_abts_orig_SHIFT 0 |
@@ -231,6 +231,16 @@ struct sli4_bls_acc { | |||
231 | #define lpfc_abts_oxid_MASK 0x0000FFFF | 231 | #define lpfc_abts_oxid_MASK 0x0000FFFF |
232 | #define lpfc_abts_oxid_WORD word2 | 232 | #define lpfc_abts_oxid_WORD word2 |
233 | uint32_t word3; | 233 | uint32_t word3; |
234 | #define lpfc_vndr_code_SHIFT 0 | ||
235 | #define lpfc_vndr_code_MASK 0x000000FF | ||
236 | #define lpfc_vndr_code_WORD word3 | ||
237 | #define lpfc_rsn_expln_SHIFT 8 | ||
238 | #define lpfc_rsn_expln_MASK 0x000000FF | ||
239 | #define lpfc_rsn_expln_WORD word3 | ||
240 | #define lpfc_rsn_code_SHIFT 16 | ||
241 | #define lpfc_rsn_code_MASK 0x000000FF | ||
242 | #define lpfc_rsn_code_WORD word3 | ||
243 | |||
234 | uint32_t word4; | 244 | uint32_t word4; |
235 | uint32_t word5_rsvd; /* Word5 must be reserved */ | 245 | uint32_t word5_rsvd; /* Word5 must be reserved */ |
236 | }; | 246 | }; |
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr { | |||
711 | union lpfc_sli4_cfg_shdr { | 721 | union lpfc_sli4_cfg_shdr { |
712 | struct { | 722 | struct { |
713 | uint32_t word6; | 723 | uint32_t word6; |
714 | #define lpfc_mbox_hdr_opcode_SHIFT 0 | 724 | #define lpfc_mbox_hdr_opcode_SHIFT 0 |
715 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF | 725 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF |
716 | #define lpfc_mbox_hdr_opcode_WORD word6 | 726 | #define lpfc_mbox_hdr_opcode_WORD word6 |
717 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 | 727 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 |
718 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF | 728 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF |
719 | #define lpfc_mbox_hdr_subsystem_WORD word6 | 729 | #define lpfc_mbox_hdr_subsystem_WORD word6 |
720 | #define lpfc_mbox_hdr_port_number_SHIFT 16 | 730 | #define lpfc_mbox_hdr_port_number_SHIFT 16 |
721 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF | 731 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF |
722 | #define lpfc_mbox_hdr_port_number_WORD word6 | 732 | #define lpfc_mbox_hdr_port_number_WORD word6 |
723 | #define lpfc_mbox_hdr_domain_SHIFT 24 | 733 | #define lpfc_mbox_hdr_domain_SHIFT 24 |
724 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF | 734 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF |
725 | #define lpfc_mbox_hdr_domain_WORD word6 | 735 | #define lpfc_mbox_hdr_domain_WORD word6 |
726 | uint32_t timeout; | 736 | uint32_t timeout; |
727 | uint32_t request_length; | 737 | uint32_t request_length; |
728 | uint32_t reserved9; | 738 | uint32_t word9; |
739 | #define lpfc_mbox_hdr_version_SHIFT 0 | ||
740 | #define lpfc_mbox_hdr_version_MASK 0x000000FF | ||
741 | #define lpfc_mbox_hdr_version_WORD word9 | ||
742 | #define LPFC_Q_CREATE_VERSION_2 2 | ||
743 | #define LPFC_Q_CREATE_VERSION_1 1 | ||
744 | #define LPFC_Q_CREATE_VERSION_0 0 | ||
729 | } request; | 745 | } request; |
730 | struct { | 746 | struct { |
731 | uint32_t word6; | 747 | uint32_t word6; |
@@ -917,9 +933,12 @@ struct cq_context { | |||
917 | #define LPFC_CQ_CNT_512 0x1 | 933 | #define LPFC_CQ_CNT_512 0x1 |
918 | #define LPFC_CQ_CNT_1024 0x2 | 934 | #define LPFC_CQ_CNT_1024 0x2 |
919 | uint32_t word1; | 935 | uint32_t word1; |
920 | #define lpfc_cq_eq_id_SHIFT 22 | 936 | #define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ |
921 | #define lpfc_cq_eq_id_MASK 0x000000FF | 937 | #define lpfc_cq_eq_id_MASK 0x000000FF |
922 | #define lpfc_cq_eq_id_WORD word1 | 938 | #define lpfc_cq_eq_id_WORD word1 |
939 | #define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ | ||
940 | #define lpfc_cq_eq_id_2_MASK 0x0000FFFF | ||
941 | #define lpfc_cq_eq_id_2_WORD word1 | ||
923 | uint32_t reserved0; | 942 | uint32_t reserved0; |
924 | uint32_t reserved1; | 943 | uint32_t reserved1; |
925 | }; | 944 | }; |
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create { | |||
929 | union { | 948 | union { |
930 | struct { | 949 | struct { |
931 | uint32_t word0; | 950 | uint32_t word0; |
951 | #define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ | ||
952 | #define lpfc_mbx_cq_create_page_size_MASK 0x000000FF | ||
953 | #define lpfc_mbx_cq_create_page_size_WORD word0 | ||
932 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 | 954 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 |
933 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF | 955 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF |
934 | #define lpfc_mbx_cq_create_num_pages_WORD word0 | 956 | #define lpfc_mbx_cq_create_num_pages_WORD word0 |
@@ -969,7 +991,7 @@ struct wq_context { | |||
969 | struct lpfc_mbx_wq_create { | 991 | struct lpfc_mbx_wq_create { |
970 | struct mbox_header header; | 992 | struct mbox_header header; |
971 | union { | 993 | union { |
972 | struct { | 994 | struct { /* Version 0 Request */ |
973 | uint32_t word0; | 995 | uint32_t word0; |
974 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 | 996 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 |
975 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF | 997 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF |
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create { | |||
979 | #define lpfc_mbx_wq_create_cq_id_WORD word0 | 1001 | #define lpfc_mbx_wq_create_cq_id_WORD word0 |
980 | struct dma_address page[LPFC_MAX_WQ_PAGE]; | 1002 | struct dma_address page[LPFC_MAX_WQ_PAGE]; |
981 | } request; | 1003 | } request; |
1004 | struct { /* Version 1 Request */ | ||
1005 | uint32_t word0; /* Word 0 is the same as in v0 */ | ||
1006 | uint32_t word1; | ||
1007 | #define lpfc_mbx_wq_create_page_size_SHIFT 0 | ||
1008 | #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF | ||
1009 | #define lpfc_mbx_wq_create_page_size_WORD word1 | ||
1010 | #define lpfc_mbx_wq_create_wqe_size_SHIFT 8 | ||
1011 | #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F | ||
1012 | #define lpfc_mbx_wq_create_wqe_size_WORD word1 | ||
1013 | #define LPFC_WQ_WQE_SIZE_64 0x5 | ||
1014 | #define LPFC_WQ_WQE_SIZE_128 0x6 | ||
1015 | #define lpfc_mbx_wq_create_wqe_count_SHIFT 16 | ||
1016 | #define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF | ||
1017 | #define lpfc_mbx_wq_create_wqe_count_WORD word1 | ||
1018 | uint32_t word2; | ||
1019 | struct dma_address page[LPFC_MAX_WQ_PAGE-1]; | ||
1020 | } request_1; | ||
982 | struct { | 1021 | struct { |
983 | uint32_t word0; | 1022 | uint32_t word0; |
984 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 | 1023 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 |
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy { | |||
1007 | #define LPFC_DATA_BUF_SIZE 2048 | 1046 | #define LPFC_DATA_BUF_SIZE 2048 |
1008 | struct rq_context { | 1047 | struct rq_context { |
1009 | uint32_t word0; | 1048 | uint32_t word0; |
1010 | #define lpfc_rq_context_rq_size_SHIFT 16 | 1049 | #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ |
1011 | #define lpfc_rq_context_rq_size_MASK 0x0000000F | 1050 | #define lpfc_rq_context_rqe_count_MASK 0x0000000F |
1012 | #define lpfc_rq_context_rq_size_WORD word0 | 1051 | #define lpfc_rq_context_rqe_count_WORD word0 |
1013 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ | 1052 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ |
1014 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ | 1053 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ |
1015 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ | 1054 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ |
1016 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ | 1055 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ |
1056 | #define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ | ||
1057 | #define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF | ||
1058 | #define lpfc_rq_context_rqe_count_1_WORD word0 | ||
1059 | #define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ | ||
1060 | #define lpfc_rq_context_rqe_size_MASK 0x0000000F | ||
1061 | #define lpfc_rq_context_rqe_size_WORD word0 | ||
1062 | #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ | ||
1063 | #define lpfc_rq_context_page_size_MASK 0x000000FF | ||
1064 | #define lpfc_rq_context_page_size_WORD word0 | ||
1017 | uint32_t reserved1; | 1065 | uint32_t reserved1; |
1018 | uint32_t word2; | 1066 | uint32_t word2; |
1019 | #define lpfc_rq_context_cq_id_SHIFT 16 | 1067 | #define lpfc_rq_context_cq_id_SHIFT 16 |
@@ -1022,7 +1070,7 @@ struct rq_context { | |||
1022 | #define lpfc_rq_context_buf_size_SHIFT 0 | 1070 | #define lpfc_rq_context_buf_size_SHIFT 0 |
1023 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF | 1071 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF |
1024 | #define lpfc_rq_context_buf_size_WORD word2 | 1072 | #define lpfc_rq_context_buf_size_WORD word2 |
1025 | uint32_t reserved3; | 1073 | uint32_t buffer_size; /* Version 1 Only */ |
1026 | }; | 1074 | }; |
1027 | 1075 | ||
1028 | struct lpfc_mbx_rq_create { | 1076 | struct lpfc_mbx_rq_create { |
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy { | |||
1062 | 1110 | ||
1063 | struct mq_context { | 1111 | struct mq_context { |
1064 | uint32_t word0; | 1112 | uint32_t word0; |
1065 | #define lpfc_mq_context_cq_id_SHIFT 22 | 1113 | #define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ |
1066 | #define lpfc_mq_context_cq_id_MASK 0x000003FF | 1114 | #define lpfc_mq_context_cq_id_MASK 0x000003FF |
1067 | #define lpfc_mq_context_cq_id_WORD word0 | 1115 | #define lpfc_mq_context_cq_id_WORD word0 |
1068 | #define lpfc_mq_context_count_SHIFT 16 | 1116 | #define lpfc_mq_context_ring_size_SHIFT 16 |
1069 | #define lpfc_mq_context_count_MASK 0x0000000F | 1117 | #define lpfc_mq_context_ring_size_MASK 0x0000000F |
1070 | #define lpfc_mq_context_count_WORD word0 | 1118 | #define lpfc_mq_context_ring_size_WORD word0 |
1071 | #define LPFC_MQ_CNT_16 0x5 | 1119 | #define LPFC_MQ_RING_SIZE_16 0x5 |
1072 | #define LPFC_MQ_CNT_32 0x6 | 1120 | #define LPFC_MQ_RING_SIZE_32 0x6 |
1073 | #define LPFC_MQ_CNT_64 0x7 | 1121 | #define LPFC_MQ_RING_SIZE_64 0x7 |
1074 | #define LPFC_MQ_CNT_128 0x8 | 1122 | #define LPFC_MQ_RING_SIZE_128 0x8 |
1075 | uint32_t word1; | 1123 | uint32_t word1; |
1076 | #define lpfc_mq_context_valid_SHIFT 31 | 1124 | #define lpfc_mq_context_valid_SHIFT 31 |
1077 | #define lpfc_mq_context_valid_MASK 0x00000001 | 1125 | #define lpfc_mq_context_valid_MASK 0x00000001 |
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext { | |||
1105 | union { | 1153 | union { |
1106 | struct { | 1154 | struct { |
1107 | uint32_t word0; | 1155 | uint32_t word0; |
1108 | #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 | 1156 | #define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 |
1109 | #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF | 1157 | #define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF |
1110 | #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 | 1158 | #define lpfc_mbx_mq_create_ext_num_pages_WORD word0 |
1159 | #define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ | ||
1160 | #define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF | ||
1161 | #define lpfc_mbx_mq_create_ext_cq_id_WORD word0 | ||
1111 | uint32_t async_evt_bmap; | 1162 | uint32_t async_evt_bmap; |
1112 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK | 1163 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK |
1113 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 | 1164 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 35665cfb568..e6ebe516cfb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2010 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
507 | phba->hba_flag &= ~HBA_ERATT_HANDLED; | 507 | phba->hba_flag &= ~HBA_ERATT_HANDLED; |
508 | 508 | ||
509 | /* Enable appropriate host interrupts */ | 509 | /* Enable appropriate host interrupts */ |
510 | status = readl(phba->HCregaddr); | 510 | if (lpfc_readl(phba->HCregaddr, &status)) { |
511 | spin_unlock_irq(&phba->hbalock); | ||
512 | return -EIO; | ||
513 | } | ||
511 | status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; | 514 | status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; |
512 | if (psli->num_rings > 0) | 515 | if (psli->num_rings > 0) |
513 | status |= HC_R0INT_ENA; | 516 | status |= HC_R0INT_ENA; |
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) | |||
1222 | /* Wait for the ER1 bit to clear.*/ | 1225 | /* Wait for the ER1 bit to clear.*/ |
1223 | while (phba->work_hs & HS_FFER1) { | 1226 | while (phba->work_hs & HS_FFER1) { |
1224 | msleep(100); | 1227 | msleep(100); |
1225 | phba->work_hs = readl(phba->HSregaddr); | 1228 | if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { |
1229 | phba->work_hs = UNPLUG_ERR ; | ||
1230 | break; | ||
1231 | } | ||
1226 | /* If driver is unloading let the worker thread continue */ | 1232 | /* If driver is unloading let the worker thread continue */ |
1227 | if (phba->pport->load_flag & FC_UNLOADING) { | 1233 | if (phba->pport->load_flag & FC_UNLOADING) { |
1228 | phba->work_hs = 0; | 1234 | phba->work_hs = 0; |
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
4474 | { | 4480 | { |
4475 | phba->lpfc_hba_init_link = lpfc_hba_init_link; | 4481 | phba->lpfc_hba_init_link = lpfc_hba_init_link; |
4476 | phba->lpfc_hba_down_link = lpfc_hba_down_link; | 4482 | phba->lpfc_hba_down_link = lpfc_hba_down_link; |
4483 | phba->lpfc_selective_reset = lpfc_selective_reset; | ||
4477 | switch (dev_grp) { | 4484 | switch (dev_grp) { |
4478 | case LPFC_PCI_DEV_LP: | 4485 | case LPFC_PCI_DEV_LP: |
4479 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; | 4486 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; |
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5385 | int i, port_error = 0; | 5392 | int i, port_error = 0; |
5386 | uint32_t if_type; | 5393 | uint32_t if_type; |
5387 | 5394 | ||
5395 | memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); | ||
5396 | memset(®_data, 0, sizeof(reg_data)); | ||
5388 | if (!phba->sli4_hba.PSMPHRregaddr) | 5397 | if (!phba->sli4_hba.PSMPHRregaddr) |
5389 | return -ENODEV; | 5398 | return -ENODEV; |
5390 | 5399 | ||
5391 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ | 5400 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ |
5392 | for (i = 0; i < 3000; i++) { | 5401 | for (i = 0; i < 3000; i++) { |
5393 | portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); | 5402 | if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, |
5394 | if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { | 5403 | &portsmphr_reg.word0) || |
5404 | (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { | ||
5395 | /* Port has a fatal POST error, break out */ | 5405 | /* Port has a fatal POST error, break out */ |
5396 | port_error = -ENODEV; | 5406 | port_error = -ENODEV; |
5397 | break; | 5407 | break; |
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) | |||
5472 | break; | 5482 | break; |
5473 | case LPFC_SLI_INTF_IF_TYPE_2: | 5483 | case LPFC_SLI_INTF_IF_TYPE_2: |
5474 | /* Final checks. The port status should be clean. */ | 5484 | /* Final checks. The port status should be clean. */ |
5475 | reg_data.word0 = | 5485 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
5476 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | 5486 | ®_data.word0) || |
5477 | if (bf_get(lpfc_sliport_status_err, ®_data)) { | 5487 | bf_get(lpfc_sliport_status_err, ®_data)) { |
5478 | phba->work_status[0] = | 5488 | phba->work_status[0] = |
5479 | readl(phba->sli4_hba.u.if_type2. | 5489 | readl(phba->sli4_hba.u.if_type2. |
5480 | ERR1regaddr); | 5490 | ERR1regaddr); |
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
6760 | * the loop again. | 6770 | * the loop again. |
6761 | */ | 6771 | */ |
6762 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { | 6772 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { |
6763 | reg_data.word0 = | 6773 | if (lpfc_readl(phba->sli4_hba.u.if_type2. |
6764 | readl(phba->sli4_hba.u.if_type2. | 6774 | STATUSregaddr, ®_data.word0)) { |
6765 | STATUSregaddr); | 6775 | rc = -ENODEV; |
6776 | break; | ||
6777 | } | ||
6766 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) | 6778 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) |
6767 | break; | 6779 | break; |
6768 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { | 6780 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { |
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) | |||
6783 | } | 6795 | } |
6784 | 6796 | ||
6785 | /* Detect any port errors. */ | 6797 | /* Detect any port errors. */ |
6786 | reg_data.word0 = readl(phba->sli4_hba.u.if_type2. | 6798 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
6787 | STATUSregaddr); | 6799 | ®_data.word0)) { |
6800 | rc = -ENODEV; | ||
6801 | break; | ||
6802 | } | ||
6788 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || | 6803 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || |
6789 | (rdy_chk >= 1000)) { | 6804 | (rdy_chk >= 1000)) { |
6790 | phba->work_status[0] = readl( | 6805 | phba->work_status[0] = readl( |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bf34178b80b..2b962b020cf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1514 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ | 1514 | struct scatterlist *sgpe = NULL; /* s/g prot entry */ |
1515 | struct lpfc_pde5 *pde5 = NULL; | 1515 | struct lpfc_pde5 *pde5 = NULL; |
1516 | struct lpfc_pde6 *pde6 = NULL; | 1516 | struct lpfc_pde6 *pde6 = NULL; |
1517 | struct ulp_bde64 *prot_bde = NULL; | 1517 | struct lpfc_pde7 *pde7 = NULL; |
1518 | dma_addr_t dataphysaddr, protphysaddr; | 1518 | dma_addr_t dataphysaddr, protphysaddr; |
1519 | unsigned short curr_data = 0, curr_prot = 0; | 1519 | unsigned short curr_data = 0, curr_prot = 0; |
1520 | unsigned int split_offset, protgroup_len; | 1520 | unsigned int split_offset; |
1521 | unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; | ||
1521 | unsigned int protgrp_blks, protgrp_bytes; | 1522 | unsigned int protgrp_blks, protgrp_bytes; |
1522 | unsigned int remainder, subtotal; | 1523 | unsigned int remainder, subtotal; |
1523 | int status; | 1524 | int status; |
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1585 | bpl++; | 1586 | bpl++; |
1586 | 1587 | ||
1587 | /* setup the first BDE that points to protection buffer */ | 1588 | /* setup the first BDE that points to protection buffer */ |
1588 | prot_bde = (struct ulp_bde64 *) bpl; | 1589 | protphysaddr = sg_dma_address(sgpe) + protgroup_offset; |
1589 | protphysaddr = sg_dma_address(sgpe); | 1590 | protgroup_len = sg_dma_len(sgpe) - protgroup_offset; |
1590 | prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1591 | prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1592 | protgroup_len = sg_dma_len(sgpe); | ||
1593 | 1591 | ||
1594 | /* must be integer multiple of the DIF block length */ | 1592 | /* must be integer multiple of the DIF block length */ |
1595 | BUG_ON(protgroup_len % 8); | 1593 | BUG_ON(protgroup_len % 8); |
1596 | 1594 | ||
1595 | pde7 = (struct lpfc_pde7 *) bpl; | ||
1596 | memset(pde7, 0, sizeof(struct lpfc_pde7)); | ||
1597 | bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); | ||
1598 | |||
1599 | pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); | ||
1600 | pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); | ||
1601 | |||
1597 | protgrp_blks = protgroup_len / 8; | 1602 | protgrp_blks = protgroup_len / 8; |
1598 | protgrp_bytes = protgrp_blks * blksize; | 1603 | protgrp_bytes = protgrp_blks * blksize; |
1599 | 1604 | ||
1600 | prot_bde->tus.f.bdeSize = protgroup_len; | 1605 | /* check if this pde is crossing the 4K boundary; if so split */ |
1601 | prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; | 1606 | if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { |
1602 | prot_bde->tus.w = le32_to_cpu(bpl->tus.w); | 1607 | protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); |
1608 | protgroup_offset += protgroup_remainder; | ||
1609 | protgrp_blks = protgroup_remainder / 8; | ||
1610 | protgrp_bytes = protgroup_remainder * blksize; | ||
1611 | } else { | ||
1612 | protgroup_offset = 0; | ||
1613 | curr_prot++; | ||
1614 | } | ||
1603 | 1615 | ||
1604 | curr_prot++; | ||
1605 | num_bde++; | 1616 | num_bde++; |
1606 | 1617 | ||
1607 | /* setup BDE's for data blocks associated with DIF data */ | 1618 | /* setup BDE's for data blocks associated with DIF data */ |
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, | |||
1653 | 1664 | ||
1654 | } | 1665 | } |
1655 | 1666 | ||
1667 | if (protgroup_offset) { | ||
1668 | /* update the reference tag */ | ||
1669 | reftag += protgrp_blks; | ||
1670 | bpl++; | ||
1671 | continue; | ||
1672 | } | ||
1673 | |||
1656 | /* are we done ? */ | 1674 | /* are we done ? */ |
1657 | if (curr_prot == protcnt) { | 1675 | if (curr_prot == protcnt) { |
1658 | alldone = 1; | 1676 | alldone = 1; |
@@ -1675,6 +1693,7 @@ out: | |||
1675 | 1693 | ||
1676 | return num_bde; | 1694 | return num_bde; |
1677 | } | 1695 | } |
1696 | |||
1678 | /* | 1697 | /* |
1679 | * Given a SCSI command that supports DIF, determine composition of protection | 1698 | * Given a SCSI command that supports DIF, determine composition of protection |
1680 | * groups involved in setting up buffer lists | 1699 | * groups involved in setting up buffer lists |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2ee0374a990..4746dcd756d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
3477 | int retval = 0; | 3477 | int retval = 0; |
3478 | 3478 | ||
3479 | /* Read the HBA Host Status Register */ | 3479 | /* Read the HBA Host Status Register */ |
3480 | status = readl(phba->HSregaddr); | 3480 | if (lpfc_readl(phba->HSregaddr, &status)) |
3481 | return 1; | ||
3481 | 3482 | ||
3482 | /* | 3483 | /* |
3483 | * Check status register every 100ms for 5 retries, then every | 3484 | * Check status register every 100ms for 5 retries, then every |
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
3502 | lpfc_sli_brdrestart(phba); | 3503 | lpfc_sli_brdrestart(phba); |
3503 | } | 3504 | } |
3504 | /* Read the HBA Host Status Register */ | 3505 | /* Read the HBA Host Status Register */ |
3505 | status = readl(phba->HSregaddr); | 3506 | if (lpfc_readl(phba->HSregaddr, &status)) { |
3507 | retval = 1; | ||
3508 | break; | ||
3509 | } | ||
3506 | } | 3510 | } |
3507 | 3511 | ||
3508 | /* Check to see if any errors occurred during init */ | 3512 | /* Check to see if any errors occurred during init */ |
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3584 | uint32_t __iomem *resp_buf; | 3588 | uint32_t __iomem *resp_buf; |
3585 | uint32_t __iomem *mbox_buf; | 3589 | uint32_t __iomem *mbox_buf; |
3586 | volatile uint32_t mbox; | 3590 | volatile uint32_t mbox; |
3587 | uint32_t hc_copy; | 3591 | uint32_t hc_copy, ha_copy, resp_data; |
3588 | int i; | 3592 | int i; |
3589 | uint8_t hdrtype; | 3593 | uint8_t hdrtype; |
3590 | 3594 | ||
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3601 | resp_buf = phba->MBslimaddr; | 3605 | resp_buf = phba->MBslimaddr; |
3602 | 3606 | ||
3603 | /* Disable the error attention */ | 3607 | /* Disable the error attention */ |
3604 | hc_copy = readl(phba->HCregaddr); | 3608 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) |
3609 | return; | ||
3605 | writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); | 3610 | writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); |
3606 | readl(phba->HCregaddr); /* flush */ | 3611 | readl(phba->HCregaddr); /* flush */ |
3607 | phba->link_flag |= LS_IGNORE_ERATT; | 3612 | phba->link_flag |= LS_IGNORE_ERATT; |
3608 | 3613 | ||
3609 | if (readl(phba->HAregaddr) & HA_ERATT) { | 3614 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3615 | return; | ||
3616 | if (ha_copy & HA_ERATT) { | ||
3610 | /* Clear Chip error bit */ | 3617 | /* Clear Chip error bit */ |
3611 | writel(HA_ERATT, phba->HAregaddr); | 3618 | writel(HA_ERATT, phba->HAregaddr); |
3612 | phba->pport->stopped = 1; | 3619 | phba->pport->stopped = 1; |
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3620 | mbox_buf = phba->MBslimaddr; | 3627 | mbox_buf = phba->MBslimaddr; |
3621 | writel(mbox, mbox_buf); | 3628 | writel(mbox, mbox_buf); |
3622 | 3629 | ||
3623 | for (i = 0; | 3630 | for (i = 0; i < 50; i++) { |
3624 | readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) | 3631 | if (lpfc_readl((resp_buf + 1), &resp_data)) |
3625 | mdelay(1); | 3632 | return; |
3626 | 3633 | if (resp_data != ~(BARRIER_TEST_PATTERN)) | |
3627 | if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { | 3634 | mdelay(1); |
3635 | else | ||
3636 | break; | ||
3637 | } | ||
3638 | resp_data = 0; | ||
3639 | if (lpfc_readl((resp_buf + 1), &resp_data)) | ||
3640 | return; | ||
3641 | if (resp_data != ~(BARRIER_TEST_PATTERN)) { | ||
3628 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || | 3642 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || |
3629 | phba->pport->stopped) | 3643 | phba->pport->stopped) |
3630 | goto restore_hc; | 3644 | goto restore_hc; |
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) | |||
3633 | } | 3647 | } |
3634 | 3648 | ||
3635 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; | 3649 | ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; |
3636 | for (i = 0; readl(resp_buf) != mbox && i < 500; i++) | 3650 | resp_data = 0; |
3637 | mdelay(1); | 3651 | for (i = 0; i < 500; i++) { |
3652 | if (lpfc_readl(resp_buf, &resp_data)) | ||
3653 | return; | ||
3654 | if (resp_data != mbox) | ||
3655 | mdelay(1); | ||
3656 | else | ||
3657 | break; | ||
3658 | } | ||
3638 | 3659 | ||
3639 | clear_errat: | 3660 | clear_errat: |
3640 | 3661 | ||
3641 | while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) | 3662 | while (++i < 500) { |
3642 | mdelay(1); | 3663 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3664 | return; | ||
3665 | if (!(ha_copy & HA_ERATT)) | ||
3666 | mdelay(1); | ||
3667 | else | ||
3668 | break; | ||
3669 | } | ||
3643 | 3670 | ||
3644 | if (readl(phba->HAregaddr) & HA_ERATT) { | 3671 | if (readl(phba->HAregaddr) & HA_ERATT) { |
3645 | writel(HA_ERATT, phba->HAregaddr); | 3672 | writel(HA_ERATT, phba->HAregaddr); |
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
3686 | 3713 | ||
3687 | /* Disable the error attention */ | 3714 | /* Disable the error attention */ |
3688 | spin_lock_irq(&phba->hbalock); | 3715 | spin_lock_irq(&phba->hbalock); |
3689 | status = readl(phba->HCregaddr); | 3716 | if (lpfc_readl(phba->HCregaddr, &status)) { |
3717 | spin_unlock_irq(&phba->hbalock); | ||
3718 | mempool_free(pmb, phba->mbox_mem_pool); | ||
3719 | return 1; | ||
3720 | } | ||
3690 | status &= ~HC_ERINT_ENA; | 3721 | status &= ~HC_ERINT_ENA; |
3691 | writel(status, phba->HCregaddr); | 3722 | writel(status, phba->HCregaddr); |
3692 | readl(phba->HCregaddr); /* flush */ | 3723 | readl(phba->HCregaddr); /* flush */ |
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) | |||
3720 | * 3 seconds we still set HBA_ERROR state because the status of the | 3751 | * 3 seconds we still set HBA_ERROR state because the status of the |
3721 | * board is now undefined. | 3752 | * board is now undefined. |
3722 | */ | 3753 | */ |
3723 | ha_copy = readl(phba->HAregaddr); | 3754 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3724 | 3755 | return 1; | |
3725 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { | 3756 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { |
3726 | mdelay(100); | 3757 | mdelay(100); |
3727 | ha_copy = readl(phba->HAregaddr); | 3758 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
3759 | return 1; | ||
3728 | } | 3760 | } |
3729 | 3761 | ||
3730 | del_timer_sync(&psli->mbox_tmo); | 3762 | del_timer_sync(&psli->mbox_tmo); |
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
4018 | uint32_t status, i = 0; | 4050 | uint32_t status, i = 0; |
4019 | 4051 | ||
4020 | /* Read the HBA Host Status Register */ | 4052 | /* Read the HBA Host Status Register */ |
4021 | status = readl(phba->HSregaddr); | 4053 | if (lpfc_readl(phba->HSregaddr, &status)) |
4054 | return -EIO; | ||
4022 | 4055 | ||
4023 | /* Check status register to see what current state is */ | 4056 | /* Check status register to see what current state is */ |
4024 | i = 0; | 4057 | i = 0; |
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) | |||
4073 | lpfc_sli_brdrestart(phba); | 4106 | lpfc_sli_brdrestart(phba); |
4074 | } | 4107 | } |
4075 | /* Read the HBA Host Status Register */ | 4108 | /* Read the HBA Host Status Register */ |
4076 | status = readl(phba->HSregaddr); | 4109 | if (lpfc_readl(phba->HSregaddr, &status)) |
4110 | return -EIO; | ||
4077 | } | 4111 | } |
4078 | 4112 | ||
4079 | /* Check to see if any errors occurred during init */ | 4113 | /* Check to see if any errors occurred during init */ |
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5136 | MAILBOX_t *mb; | 5170 | MAILBOX_t *mb; |
5137 | struct lpfc_sli *psli = &phba->sli; | 5171 | struct lpfc_sli *psli = &phba->sli; |
5138 | uint32_t status, evtctr; | 5172 | uint32_t status, evtctr; |
5139 | uint32_t ha_copy; | 5173 | uint32_t ha_copy, hc_copy; |
5140 | int i; | 5174 | int i; |
5141 | unsigned long timeout; | 5175 | unsigned long timeout; |
5142 | unsigned long drvr_flag = 0; | 5176 | unsigned long drvr_flag = 0; |
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5202 | goto out_not_finished; | 5236 | goto out_not_finished; |
5203 | } | 5237 | } |
5204 | 5238 | ||
5205 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 5239 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { |
5206 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 5240 | if (lpfc_readl(phba->HCregaddr, &hc_copy) || |
5207 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 5241 | !(hc_copy & HC_MBINT_ENA)) { |
5208 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 5242 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
5243 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
5209 | "(%d):2528 Mailbox command x%x cannot " | 5244 | "(%d):2528 Mailbox command x%x cannot " |
5210 | "issue Data: x%x x%x\n", | 5245 | "issue Data: x%x x%x\n", |
5211 | pmbox->vport ? pmbox->vport->vpi : 0, | 5246 | pmbox->vport ? pmbox->vport->vpi : 0, |
5212 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); | 5247 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); |
5213 | goto out_not_finished; | 5248 | goto out_not_finished; |
5249 | } | ||
5214 | } | 5250 | } |
5215 | 5251 | ||
5216 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { | 5252 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5408 | word0 = le32_to_cpu(word0); | 5444 | word0 = le32_to_cpu(word0); |
5409 | } else { | 5445 | } else { |
5410 | /* First read mbox status word */ | 5446 | /* First read mbox status word */ |
5411 | word0 = readl(phba->MBslimaddr); | 5447 | if (lpfc_readl(phba->MBslimaddr, &word0)) { |
5448 | spin_unlock_irqrestore(&phba->hbalock, | ||
5449 | drvr_flag); | ||
5450 | goto out_not_finished; | ||
5451 | } | ||
5412 | } | 5452 | } |
5413 | 5453 | ||
5414 | /* Read the HBA Host Attention Register */ | 5454 | /* Read the HBA Host Attention Register */ |
5415 | ha_copy = readl(phba->HAregaddr); | 5455 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) { |
5456 | spin_unlock_irqrestore(&phba->hbalock, | ||
5457 | drvr_flag); | ||
5458 | goto out_not_finished; | ||
5459 | } | ||
5416 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, | 5460 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, |
5417 | mb->mbxCommand) * | 5461 | mb->mbxCommand) * |
5418 | 1000) + jiffies; | 5462 | 1000) + jiffies; |
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
5463 | word0 = readl(phba->MBslimaddr); | 5507 | word0 = readl(phba->MBslimaddr); |
5464 | } | 5508 | } |
5465 | /* Read the HBA Host Attention Register */ | 5509 | /* Read the HBA Host Attention Register */ |
5466 | ha_copy = readl(phba->HAregaddr); | 5510 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) { |
5511 | spin_unlock_irqrestore(&phba->hbalock, | ||
5512 | drvr_flag); | ||
5513 | goto out_not_finished; | ||
5514 | } | ||
5467 | } | 5515 | } |
5468 | 5516 | ||
5469 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { | 5517 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
6263 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 6311 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
6264 | else | 6312 | else |
6265 | bf_set(lpfc_sli4_sge_last, sgl, 0); | 6313 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
6266 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
6267 | /* swap the size field back to the cpu so we | 6314 | /* swap the size field back to the cpu so we |
6268 | * can assign it to the sgl. | 6315 | * can assign it to the sgl. |
6269 | */ | 6316 | */ |
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
6283 | bf_set(lpfc_sli4_sge_offset, sgl, offset); | 6330 | bf_set(lpfc_sli4_sge_offset, sgl, offset); |
6284 | offset += bde.tus.f.bdeSize; | 6331 | offset += bde.tus.f.bdeSize; |
6285 | } | 6332 | } |
6333 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
6286 | bpl++; | 6334 | bpl++; |
6287 | sgl++; | 6335 | sgl++; |
6288 | } | 6336 | } |
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6528 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | 6576 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / |
6529 | sizeof(struct ulp_bde64); | 6577 | sizeof(struct ulp_bde64); |
6530 | for (i = 0; i < numBdes; i++) { | 6578 | for (i = 0; i < numBdes; i++) { |
6531 | if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6532 | break; | ||
6533 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); | 6579 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); |
6580 | if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6581 | break; | ||
6534 | xmit_len += bde.tus.f.bdeSize; | 6582 | xmit_len += bde.tus.f.bdeSize; |
6535 | } | 6583 | } |
6536 | /* word3 iocb=IO_TAG wqe=request_payload_len */ | 6584 | /* word3 iocb=IO_TAG wqe=request_payload_len */ |
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6620 | xritag = 0; | 6668 | xritag = 0; |
6621 | break; | 6669 | break; |
6622 | case CMD_XMIT_BLS_RSP64_CX: | 6670 | case CMD_XMIT_BLS_RSP64_CX: |
6623 | /* As BLS ABTS-ACC WQE is very different from other WQEs, | 6671 | /* As BLS ABTS RSP WQE is very different from other WQEs, |
6624 | * we re-construct this WQE here based on information in | 6672 | * we re-construct this WQE here based on information in |
6625 | * iocbq from scratch. | 6673 | * iocbq from scratch. |
6626 | */ | 6674 | */ |
6627 | memset(wqe, 0, sizeof(union lpfc_wqe)); | 6675 | memset(wqe, 0, sizeof(union lpfc_wqe)); |
6628 | /* OX_ID is invariable to who sent ABTS to CT exchange */ | 6676 | /* OX_ID is invariable to who sent ABTS to CT exchange */ |
6629 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, | 6677 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, |
6630 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); | 6678 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); |
6631 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == | 6679 | if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == |
6632 | LPFC_ABTS_UNSOL_INT) { | 6680 | LPFC_ABTS_UNSOL_INT) { |
6633 | /* ABTS sent by initiator to CT exchange, the | 6681 | /* ABTS sent by initiator to CT exchange, the |
6634 | * RX_ID field will be filled with the newly | 6682 | * RX_ID field will be filled with the newly |
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6642 | * RX_ID from ABTS. | 6690 | * RX_ID from ABTS. |
6643 | */ | 6691 | */ |
6644 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, | 6692 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, |
6645 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); | 6693 | bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); |
6646 | } | 6694 | } |
6647 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); | 6695 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); |
6648 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); | 6696 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); |
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6653 | LPFC_WQE_LENLOC_NONE); | 6701 | LPFC_WQE_LENLOC_NONE); |
6654 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ | 6702 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ |
6655 | command_type = OTHER_COMMAND; | 6703 | command_type = OTHER_COMMAND; |
6704 | if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { | ||
6705 | bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, | ||
6706 | bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); | ||
6707 | bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, | ||
6708 | bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); | ||
6709 | bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, | ||
6710 | bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); | ||
6711 | } | ||
6712 | |||
6656 | break; | 6713 | break; |
6657 | case CMD_XRI_ABORTED_CX: | 6714 | case CMD_XRI_ABORTED_CX: |
6658 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | 6715 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ |
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
6701 | 6758 | ||
6702 | if (piocb->sli4_xritag == NO_XRI) { | 6759 | if (piocb->sli4_xritag == NO_XRI) { |
6703 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || | 6760 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
6704 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | 6761 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || |
6762 | piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) | ||
6705 | sglq = NULL; | 6763 | sglq = NULL; |
6706 | else { | 6764 | else { |
6707 | if (pring->txq_cnt) { | 6765 | if (pring->txq_cnt) { |
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
8194 | piocb->iocb_flag &= ~LPFC_IO_WAKE; | 8252 | piocb->iocb_flag &= ~LPFC_IO_WAKE; |
8195 | 8253 | ||
8196 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 8254 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
8197 | creg_val = readl(phba->HCregaddr); | 8255 | if (lpfc_readl(phba->HCregaddr, &creg_val)) |
8256 | return IOCB_ERROR; | ||
8198 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | 8257 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
8199 | writel(creg_val, phba->HCregaddr); | 8258 | writel(creg_val, phba->HCregaddr); |
8200 | readl(phba->HCregaddr); /* flush */ | 8259 | readl(phba->HCregaddr); /* flush */ |
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
8236 | } | 8295 | } |
8237 | 8296 | ||
8238 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 8297 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
8239 | creg_val = readl(phba->HCregaddr); | 8298 | if (lpfc_readl(phba->HCregaddr, &creg_val)) |
8299 | return IOCB_ERROR; | ||
8240 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 8300 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
8241 | writel(creg_val, phba->HCregaddr); | 8301 | writel(creg_val, phba->HCregaddr); |
8242 | readl(phba->HCregaddr); /* flush */ | 8302 | readl(phba->HCregaddr); /* flush */ |
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
8387 | uint32_t ha_copy; | 8447 | uint32_t ha_copy; |
8388 | 8448 | ||
8389 | /* Read chip Host Attention (HA) register */ | 8449 | /* Read chip Host Attention (HA) register */ |
8390 | ha_copy = readl(phba->HAregaddr); | 8450 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
8451 | goto unplug_err; | ||
8452 | |||
8391 | if (ha_copy & HA_ERATT) { | 8453 | if (ha_copy & HA_ERATT) { |
8392 | /* Read host status register to retrieve error event */ | 8454 | /* Read host status register to retrieve error event */ |
8393 | lpfc_sli_read_hs(phba); | 8455 | if (lpfc_sli_read_hs(phba)) |
8456 | goto unplug_err; | ||
8394 | 8457 | ||
8395 | /* Check if there is a deferred error condition is active */ | 8458 | /* Check if there is a deferred error condition is active */ |
8396 | if ((HS_FFER1 & phba->work_hs) && | 8459 | if ((HS_FFER1 & phba->work_hs) && |
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
8409 | return 1; | 8472 | return 1; |
8410 | } | 8473 | } |
8411 | return 0; | 8474 | return 0; |
8475 | |||
8476 | unplug_err: | ||
8477 | /* Set the driver HS work bitmap */ | ||
8478 | phba->work_hs |= UNPLUG_ERR; | ||
8479 | /* Set the driver HA work bitmap */ | ||
8480 | phba->work_ha |= HA_ERATT; | ||
8481 | /* Indicate polling handles this ERATT */ | ||
8482 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8483 | return 1; | ||
8412 | } | 8484 | } |
8413 | 8485 | ||
8414 | /** | 8486 | /** |
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
8436 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); | 8508 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
8437 | switch (if_type) { | 8509 | switch (if_type) { |
8438 | case LPFC_SLI_INTF_IF_TYPE_0: | 8510 | case LPFC_SLI_INTF_IF_TYPE_0: |
8439 | uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); | 8511 | if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, |
8440 | uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); | 8512 | &uerr_sta_lo) || |
8513 | lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, | ||
8514 | &uerr_sta_hi)) { | ||
8515 | phba->work_hs |= UNPLUG_ERR; | ||
8516 | phba->work_ha |= HA_ERATT; | ||
8517 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8518 | return 1; | ||
8519 | } | ||
8441 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || | 8520 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || |
8442 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { | 8521 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { |
8443 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8522 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
8456 | } | 8535 | } |
8457 | break; | 8536 | break; |
8458 | case LPFC_SLI_INTF_IF_TYPE_2: | 8537 | case LPFC_SLI_INTF_IF_TYPE_2: |
8459 | portstat_reg.word0 = | 8538 | if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
8460 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | 8539 | &portstat_reg.word0) || |
8461 | portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); | 8540 | lpfc_readl(phba->sli4_hba.PSMPHRregaddr, |
8541 | &portsmphr)){ | ||
8542 | phba->work_hs |= UNPLUG_ERR; | ||
8543 | phba->work_ha |= HA_ERATT; | ||
8544 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8545 | return 1; | ||
8546 | } | ||
8462 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { | 8547 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { |
8463 | phba->work_status[0] = | 8548 | phba->work_status[0] = |
8464 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); | 8549 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); |
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8639 | return IRQ_NONE; | 8724 | return IRQ_NONE; |
8640 | /* Need to read HA REG for slow-path events */ | 8725 | /* Need to read HA REG for slow-path events */ |
8641 | spin_lock_irqsave(&phba->hbalock, iflag); | 8726 | spin_lock_irqsave(&phba->hbalock, iflag); |
8642 | ha_copy = readl(phba->HAregaddr); | 8727 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
8728 | goto unplug_error; | ||
8643 | /* If somebody is waiting to handle an eratt don't process it | 8729 | /* If somebody is waiting to handle an eratt don't process it |
8644 | * here. The brdkill function will do this. | 8730 | * here. The brdkill function will do this. |
8645 | */ | 8731 | */ |
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8665 | } | 8751 | } |
8666 | 8752 | ||
8667 | /* Clear up only attention source related to slow-path */ | 8753 | /* Clear up only attention source related to slow-path */ |
8668 | hc_copy = readl(phba->HCregaddr); | 8754 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) |
8755 | goto unplug_error; | ||
8756 | |||
8669 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | | 8757 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | |
8670 | HC_LAINT_ENA | HC_ERINT_ENA), | 8758 | HC_LAINT_ENA | HC_ERINT_ENA), |
8671 | phba->HCregaddr); | 8759 | phba->HCregaddr); |
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8688 | */ | 8776 | */ |
8689 | spin_lock_irqsave(&phba->hbalock, iflag); | 8777 | spin_lock_irqsave(&phba->hbalock, iflag); |
8690 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; | 8778 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; |
8691 | control = readl(phba->HCregaddr); | 8779 | if (lpfc_readl(phba->HCregaddr, &control)) |
8780 | goto unplug_error; | ||
8692 | control &= ~HC_LAINT_ENA; | 8781 | control &= ~HC_LAINT_ENA; |
8693 | writel(control, phba->HCregaddr); | 8782 | writel(control, phba->HCregaddr); |
8694 | readl(phba->HCregaddr); /* flush */ | 8783 | readl(phba->HCregaddr); /* flush */ |
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8708 | status >>= (4*LPFC_ELS_RING); | 8797 | status >>= (4*LPFC_ELS_RING); |
8709 | if (status & HA_RXMASK) { | 8798 | if (status & HA_RXMASK) { |
8710 | spin_lock_irqsave(&phba->hbalock, iflag); | 8799 | spin_lock_irqsave(&phba->hbalock, iflag); |
8711 | control = readl(phba->HCregaddr); | 8800 | if (lpfc_readl(phba->HCregaddr, &control)) |
8801 | goto unplug_error; | ||
8712 | 8802 | ||
8713 | lpfc_debugfs_slow_ring_trc(phba, | 8803 | lpfc_debugfs_slow_ring_trc(phba, |
8714 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", | 8804 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", |
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) | |||
8741 | } | 8831 | } |
8742 | spin_lock_irqsave(&phba->hbalock, iflag); | 8832 | spin_lock_irqsave(&phba->hbalock, iflag); |
8743 | if (work_ha_copy & HA_ERATT) { | 8833 | if (work_ha_copy & HA_ERATT) { |
8744 | lpfc_sli_read_hs(phba); | 8834 | if (lpfc_sli_read_hs(phba)) |
8835 | goto unplug_error; | ||
8745 | /* | 8836 | /* |
8746 | * Check if there is a deferred error condition | 8837 | * Check if there is a deferred error condition |
8747 | * is active | 8838 | * is active |
@@ -8872,6 +8963,9 @@ send_current_mbox: | |||
8872 | lpfc_worker_wake_up(phba); | 8963 | lpfc_worker_wake_up(phba); |
8873 | } | 8964 | } |
8874 | return IRQ_HANDLED; | 8965 | return IRQ_HANDLED; |
8966 | unplug_error: | ||
8967 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
8968 | return IRQ_HANDLED; | ||
8875 | 8969 | ||
8876 | } /* lpfc_sli_sp_intr_handler */ | 8970 | } /* lpfc_sli_sp_intr_handler */ |
8877 | 8971 | ||
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) | |||
8919 | if (lpfc_intr_state_check(phba)) | 9013 | if (lpfc_intr_state_check(phba)) |
8920 | return IRQ_NONE; | 9014 | return IRQ_NONE; |
8921 | /* Need to read HA REG for FCP ring and other ring events */ | 9015 | /* Need to read HA REG for FCP ring and other ring events */ |
8922 | ha_copy = readl(phba->HAregaddr); | 9016 | if (lpfc_readl(phba->HAregaddr, &ha_copy)) |
9017 | return IRQ_HANDLED; | ||
8923 | /* Clear up only attention source related to fast-path */ | 9018 | /* Clear up only attention source related to fast-path */ |
8924 | spin_lock_irqsave(&phba->hbalock, iflag); | 9019 | spin_lock_irqsave(&phba->hbalock, iflag); |
8925 | /* | 9020 | /* |
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
9004 | return IRQ_NONE; | 9099 | return IRQ_NONE; |
9005 | 9100 | ||
9006 | spin_lock(&phba->hbalock); | 9101 | spin_lock(&phba->hbalock); |
9007 | phba->ha_copy = readl(phba->HAregaddr); | 9102 | if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { |
9103 | spin_unlock(&phba->hbalock); | ||
9104 | return IRQ_HANDLED; | ||
9105 | } | ||
9106 | |||
9008 | if (unlikely(!phba->ha_copy)) { | 9107 | if (unlikely(!phba->ha_copy)) { |
9009 | spin_unlock(&phba->hbalock); | 9108 | spin_unlock(&phba->hbalock); |
9010 | return IRQ_NONE; | 9109 | return IRQ_NONE; |
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
9026 | } | 9125 | } |
9027 | 9126 | ||
9028 | /* Clear attention sources except link and error attentions */ | 9127 | /* Clear attention sources except link and error attentions */ |
9029 | hc_copy = readl(phba->HCregaddr); | 9128 | if (lpfc_readl(phba->HCregaddr, &hc_copy)) { |
9129 | spin_unlock(&phba->hbalock); | ||
9130 | return IRQ_HANDLED; | ||
9131 | } | ||
9030 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | 9132 | writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA |
9031 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), | 9133 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), |
9032 | phba->HCregaddr); | 9134 | phba->HCregaddr); |
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10403 | if (!phba->sli4_hba.pc_sli4_params.supported) | 10505 | if (!phba->sli4_hba.pc_sli4_params.supported) |
10404 | hw_page_size = SLI4_PAGE_SIZE; | 10506 | hw_page_size = SLI4_PAGE_SIZE; |
10405 | 10507 | ||
10406 | |||
10407 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 10508 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
10408 | if (!mbox) | 10509 | if (!mbox) |
10409 | return -ENOMEM; | 10510 | return -ENOMEM; |
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10413 | LPFC_MBOX_OPCODE_CQ_CREATE, | 10514 | LPFC_MBOX_OPCODE_CQ_CREATE, |
10414 | length, LPFC_SLI4_MBX_EMBED); | 10515 | length, LPFC_SLI4_MBX_EMBED); |
10415 | cq_create = &mbox->u.mqe.un.cq_create; | 10516 | cq_create = &mbox->u.mqe.un.cq_create; |
10517 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
10416 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, | 10518 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, |
10417 | cq->page_count); | 10519 | cq->page_count); |
10418 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); | 10520 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); |
10419 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); | 10521 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); |
10420 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); | 10522 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10523 | phba->sli4_hba.pc_sli4_params.cqv); | ||
10524 | if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { | ||
10525 | bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, | ||
10526 | (PAGE_SIZE/SLI4_PAGE_SIZE)); | ||
10527 | bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, | ||
10528 | eq->queue_id); | ||
10529 | } else { | ||
10530 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, | ||
10531 | eq->queue_id); | ||
10532 | } | ||
10421 | switch (cq->entry_count) { | 10533 | switch (cq->entry_count) { |
10422 | default: | 10534 | default: |
10423 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10535 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
10449 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10561 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10450 | 10562 | ||
10451 | /* The IOCTL status is embedded in the mailbox subheader. */ | 10563 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10452 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
10453 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 10564 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10454 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 10565 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10455 | if (shdr_status || shdr_add_status || rc) { | 10566 | if (shdr_status || shdr_add_status || rc) { |
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10515 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); | 10626 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); |
10516 | switch (mq->entry_count) { | 10627 | switch (mq->entry_count) { |
10517 | case 16: | 10628 | case 16: |
10518 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10629 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10519 | LPFC_MQ_CNT_16); | 10630 | LPFC_MQ_RING_SIZE_16); |
10520 | break; | 10631 | break; |
10521 | case 32: | 10632 | case 32: |
10522 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10633 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10523 | LPFC_MQ_CNT_32); | 10634 | LPFC_MQ_RING_SIZE_32); |
10524 | break; | 10635 | break; |
10525 | case 64: | 10636 | case 64: |
10526 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10637 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10527 | LPFC_MQ_CNT_64); | 10638 | LPFC_MQ_RING_SIZE_64); |
10528 | break; | 10639 | break; |
10529 | case 128: | 10640 | case 128: |
10530 | bf_set(lpfc_mq_context_count, &mq_create->u.request.context, | 10641 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
10531 | LPFC_MQ_CNT_128); | 10642 | LPFC_MQ_RING_SIZE_128); |
10532 | break; | 10643 | break; |
10533 | } | 10644 | } |
10534 | list_for_each_entry(dmabuf, &mq->page_list, list) { | 10645 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10586 | length, LPFC_SLI4_MBX_EMBED); | 10697 | length, LPFC_SLI4_MBX_EMBED); |
10587 | 10698 | ||
10588 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; | 10699 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; |
10700 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; | ||
10589 | bf_set(lpfc_mbx_mq_create_ext_num_pages, | 10701 | bf_set(lpfc_mbx_mq_create_ext_num_pages, |
10590 | &mq_create_ext->u.request, mq->page_count); | 10702 | &mq_create_ext->u.request, mq->page_count); |
10591 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, | 10703 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, |
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10598 | &mq_create_ext->u.request, 1); | 10710 | &mq_create_ext->u.request, 1); |
10599 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, | 10711 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, |
10600 | &mq_create_ext->u.request, 1); | 10712 | &mq_create_ext->u.request, 1); |
10601 | bf_set(lpfc_mq_context_cq_id, | ||
10602 | &mq_create_ext->u.request.context, cq->queue_id); | ||
10603 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); | 10713 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); |
10714 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
10715 | phba->sli4_hba.pc_sli4_params.mqv); | ||
10716 | if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) | ||
10717 | bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, | ||
10718 | cq->queue_id); | ||
10719 | else | ||
10720 | bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, | ||
10721 | cq->queue_id); | ||
10604 | switch (mq->entry_count) { | 10722 | switch (mq->entry_count) { |
10605 | default: | 10723 | default: |
10606 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10724 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10610 | return -EINVAL; | 10728 | return -EINVAL; |
10611 | /* otherwise default to smallest count (drop through) */ | 10729 | /* otherwise default to smallest count (drop through) */ |
10612 | case 16: | 10730 | case 16: |
10613 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10731 | bf_set(lpfc_mq_context_ring_size, |
10614 | LPFC_MQ_CNT_16); | 10732 | &mq_create_ext->u.request.context, |
10733 | LPFC_MQ_RING_SIZE_16); | ||
10615 | break; | 10734 | break; |
10616 | case 32: | 10735 | case 32: |
10617 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10736 | bf_set(lpfc_mq_context_ring_size, |
10618 | LPFC_MQ_CNT_32); | 10737 | &mq_create_ext->u.request.context, |
10738 | LPFC_MQ_RING_SIZE_32); | ||
10619 | break; | 10739 | break; |
10620 | case 64: | 10740 | case 64: |
10621 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10741 | bf_set(lpfc_mq_context_ring_size, |
10622 | LPFC_MQ_CNT_64); | 10742 | &mq_create_ext->u.request.context, |
10743 | LPFC_MQ_RING_SIZE_64); | ||
10623 | break; | 10744 | break; |
10624 | case 128: | 10745 | case 128: |
10625 | bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, | 10746 | bf_set(lpfc_mq_context_ring_size, |
10626 | LPFC_MQ_CNT_128); | 10747 | &mq_create_ext->u.request.context, |
10748 | LPFC_MQ_RING_SIZE_128); | ||
10627 | break; | 10749 | break; |
10628 | } | 10750 | } |
10629 | list_for_each_entry(dmabuf, &mq->page_list, list) { | 10751 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10634 | putPaddrHigh(dmabuf->phys); | 10756 | putPaddrHigh(dmabuf->phys); |
10635 | } | 10757 | } |
10636 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10758 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10637 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; | ||
10638 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, | 10759 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, |
10639 | &mq_create_ext->u.response); | 10760 | &mq_create_ext->u.response); |
10640 | if (rc != MBX_SUCCESS) { | 10761 | if (rc != MBX_SUCCESS) { |
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
10711 | uint32_t shdr_status, shdr_add_status; | 10832 | uint32_t shdr_status, shdr_add_status; |
10712 | union lpfc_sli4_cfg_shdr *shdr; | 10833 | union lpfc_sli4_cfg_shdr *shdr; |
10713 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; | 10834 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
10835 | struct dma_address *page; | ||
10714 | 10836 | ||
10715 | if (!phba->sli4_hba.pc_sli4_params.supported) | 10837 | if (!phba->sli4_hba.pc_sli4_params.supported) |
10716 | hw_page_size = SLI4_PAGE_SIZE; | 10838 | hw_page_size = SLI4_PAGE_SIZE; |
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
10724 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, | 10846 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, |
10725 | length, LPFC_SLI4_MBX_EMBED); | 10847 | length, LPFC_SLI4_MBX_EMBED); |
10726 | wq_create = &mbox->u.mqe.un.wq_create; | 10848 | wq_create = &mbox->u.mqe.un.wq_create; |
10849 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
10727 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, | 10850 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, |
10728 | wq->page_count); | 10851 | wq->page_count); |
10729 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, | 10852 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, |
10730 | cq->queue_id); | 10853 | cq->queue_id); |
10854 | bf_set(lpfc_mbox_hdr_version, &shdr->request, | ||
10855 | phba->sli4_hba.pc_sli4_params.wqv); | ||
10856 | if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { | ||
10857 | bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, | ||
10858 | wq->entry_count); | ||
10859 | switch (wq->entry_size) { | ||
10860 | default: | ||
10861 | case 64: | ||
10862 | bf_set(lpfc_mbx_wq_create_wqe_size, | ||
10863 | &wq_create->u.request_1, | ||
10864 | LPFC_WQ_WQE_SIZE_64); | ||
10865 | break; | ||
10866 | case 128: | ||
10867 | bf_set(lpfc_mbx_wq_create_wqe_size, | ||
10868 | &wq_create->u.request_1, | ||
10869 | LPFC_WQ_WQE_SIZE_128); | ||
10870 | break; | ||
10871 | } | ||
10872 | bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, | ||
10873 | (PAGE_SIZE/SLI4_PAGE_SIZE)); | ||
10874 | page = wq_create->u.request_1.page; | ||
10875 | } else { | ||
10876 | page = wq_create->u.request.page; | ||
10877 | } | ||
10731 | list_for_each_entry(dmabuf, &wq->page_list, list) { | 10878 | list_for_each_entry(dmabuf, &wq->page_list, list) { |
10732 | memset(dmabuf->virt, 0, hw_page_size); | 10879 | memset(dmabuf->virt, 0, hw_page_size); |
10733 | wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 10880 | page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); |
10734 | putPaddrLow(dmabuf->phys); | 10881 | page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); |
10735 | wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
10736 | putPaddrHigh(dmabuf->phys); | ||
10737 | } | 10882 | } |
10738 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 10883 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10739 | /* The IOCTL status is embedded in the mailbox subheader. */ | 10884 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10740 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
10741 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 10885 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10742 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 10886 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10743 | if (shdr_status || shdr_add_status || rc) { | 10887 | if (shdr_status || shdr_add_status || rc) { |
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10815 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | 10959 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
10816 | length, LPFC_SLI4_MBX_EMBED); | 10960 | length, LPFC_SLI4_MBX_EMBED); |
10817 | rq_create = &mbox->u.mqe.un.rq_create; | 10961 | rq_create = &mbox->u.mqe.un.rq_create; |
10818 | switch (hrq->entry_count) { | 10962 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; |
10819 | default: | 10963 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10820 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 10964 | phba->sli4_hba.pc_sli4_params.rqv); |
10821 | "2535 Unsupported RQ count. (%d)\n", | 10965 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
10822 | hrq->entry_count); | 10966 | bf_set(lpfc_rq_context_rqe_count_1, |
10823 | if (hrq->entry_count < 512) | 10967 | &rq_create->u.request.context, |
10824 | return -EINVAL; | 10968 | hrq->entry_count); |
10825 | /* otherwise default to smallest count (drop through) */ | 10969 | rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; |
10826 | case 512: | 10970 | } else { |
10827 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10971 | switch (hrq->entry_count) { |
10828 | LPFC_RQ_RING_SIZE_512); | 10972 | default: |
10829 | break; | 10973 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
10830 | case 1024: | 10974 | "2535 Unsupported RQ count. (%d)\n", |
10831 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10975 | hrq->entry_count); |
10832 | LPFC_RQ_RING_SIZE_1024); | 10976 | if (hrq->entry_count < 512) |
10833 | break; | 10977 | return -EINVAL; |
10834 | case 2048: | 10978 | /* otherwise default to smallest count (drop through) */ |
10835 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10979 | case 512: |
10836 | LPFC_RQ_RING_SIZE_2048); | 10980 | bf_set(lpfc_rq_context_rqe_count, |
10837 | break; | 10981 | &rq_create->u.request.context, |
10838 | case 4096: | 10982 | LPFC_RQ_RING_SIZE_512); |
10839 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 10983 | break; |
10840 | LPFC_RQ_RING_SIZE_4096); | 10984 | case 1024: |
10841 | break; | 10985 | bf_set(lpfc_rq_context_rqe_count, |
10986 | &rq_create->u.request.context, | ||
10987 | LPFC_RQ_RING_SIZE_1024); | ||
10988 | break; | ||
10989 | case 2048: | ||
10990 | bf_set(lpfc_rq_context_rqe_count, | ||
10991 | &rq_create->u.request.context, | ||
10992 | LPFC_RQ_RING_SIZE_2048); | ||
10993 | break; | ||
10994 | case 4096: | ||
10995 | bf_set(lpfc_rq_context_rqe_count, | ||
10996 | &rq_create->u.request.context, | ||
10997 | LPFC_RQ_RING_SIZE_4096); | ||
10998 | break; | ||
10999 | } | ||
11000 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
11001 | LPFC_HDR_BUF_SIZE); | ||
10842 | } | 11002 | } |
10843 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 11003 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
10844 | cq->queue_id); | 11004 | cq->queue_id); |
10845 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | 11005 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
10846 | hrq->page_count); | 11006 | hrq->page_count); |
10847 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
10848 | LPFC_HDR_BUF_SIZE); | ||
10849 | list_for_each_entry(dmabuf, &hrq->page_list, list) { | 11007 | list_for_each_entry(dmabuf, &hrq->page_list, list) { |
10850 | memset(dmabuf->virt, 0, hw_page_size); | 11008 | memset(dmabuf->virt, 0, hw_page_size); |
10851 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 11009 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10855 | } | 11013 | } |
10856 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | 11014 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
10857 | /* The IOCTL status is embedded in the mailbox subheader. */ | 11015 | /* The IOCTL status is embedded in the mailbox subheader. */ |
10858 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; | ||
10859 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 11016 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
10860 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 11017 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
10861 | if (shdr_status || shdr_add_status || rc) { | 11018 | if (shdr_status || shdr_add_status || rc) { |
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
10881 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | 11038 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
10882 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | 11039 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
10883 | length, LPFC_SLI4_MBX_EMBED); | 11040 | length, LPFC_SLI4_MBX_EMBED); |
10884 | switch (drq->entry_count) { | 11041 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
10885 | default: | 11042 | phba->sli4_hba.pc_sli4_params.rqv); |
10886 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 11043 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
10887 | "2536 Unsupported RQ count. (%d)\n", | 11044 | bf_set(lpfc_rq_context_rqe_count_1, |
10888 | drq->entry_count); | 11045 | &rq_create->u.request.context, |
10889 | if (drq->entry_count < 512) | 11046 | hrq->entry_count); |
10890 | return -EINVAL; | 11047 | rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; |
10891 | /* otherwise default to smallest count (drop through) */ | 11048 | } else { |
10892 | case 512: | 11049 | switch (drq->entry_count) { |
10893 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11050 | default: |
10894 | LPFC_RQ_RING_SIZE_512); | 11051 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
10895 | break; | 11052 | "2536 Unsupported RQ count. (%d)\n", |
10896 | case 1024: | 11053 | drq->entry_count); |
10897 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11054 | if (drq->entry_count < 512) |
10898 | LPFC_RQ_RING_SIZE_1024); | 11055 | return -EINVAL; |
10899 | break; | 11056 | /* otherwise default to smallest count (drop through) */ |
10900 | case 2048: | 11057 | case 512: |
10901 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11058 | bf_set(lpfc_rq_context_rqe_count, |
10902 | LPFC_RQ_RING_SIZE_2048); | 11059 | &rq_create->u.request.context, |
10903 | break; | 11060 | LPFC_RQ_RING_SIZE_512); |
10904 | case 4096: | 11061 | break; |
10905 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | 11062 | case 1024: |
10906 | LPFC_RQ_RING_SIZE_4096); | 11063 | bf_set(lpfc_rq_context_rqe_count, |
10907 | break; | 11064 | &rq_create->u.request.context, |
11065 | LPFC_RQ_RING_SIZE_1024); | ||
11066 | break; | ||
11067 | case 2048: | ||
11068 | bf_set(lpfc_rq_context_rqe_count, | ||
11069 | &rq_create->u.request.context, | ||
11070 | LPFC_RQ_RING_SIZE_2048); | ||
11071 | break; | ||
11072 | case 4096: | ||
11073 | bf_set(lpfc_rq_context_rqe_count, | ||
11074 | &rq_create->u.request.context, | ||
11075 | LPFC_RQ_RING_SIZE_4096); | ||
11076 | break; | ||
11077 | } | ||
11078 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
11079 | LPFC_DATA_BUF_SIZE); | ||
10908 | } | 11080 | } |
10909 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | 11081 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
10910 | cq->queue_id); | 11082 | cq->queue_id); |
10911 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | 11083 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
10912 | drq->page_count); | 11084 | drq->page_count); |
10913 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
10914 | LPFC_DATA_BUF_SIZE); | ||
10915 | list_for_each_entry(dmabuf, &drq->page_list, list) { | 11085 | list_for_each_entry(dmabuf, &drq->page_list, list) { |
10916 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | 11086 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
10917 | putPaddrLow(dmabuf->phys); | 11087 | putPaddrLow(dmabuf->phys); |
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
11580 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; | 11750 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; |
11581 | char *type_names[] = FC_TYPE_NAMES_INIT; | 11751 | char *type_names[] = FC_TYPE_NAMES_INIT; |
11582 | struct fc_vft_header *fc_vft_hdr; | 11752 | struct fc_vft_header *fc_vft_hdr; |
11753 | uint32_t *header = (uint32_t *) fc_hdr; | ||
11583 | 11754 | ||
11584 | switch (fc_hdr->fh_r_ctl) { | 11755 | switch (fc_hdr->fh_r_ctl) { |
11585 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ | 11756 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ |
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | |||
11628 | default: | 11799 | default: |
11629 | goto drop; | 11800 | goto drop; |
11630 | } | 11801 | } |
11802 | |||
11631 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 11803 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
11632 | "2538 Received frame rctl:%s type:%s\n", | 11804 | "2538 Received frame rctl:%s type:%s " |
11805 | "Frame Data:%08x %08x %08x %08x %08x %08x\n", | ||
11633 | rctl_names[fc_hdr->fh_r_ctl], | 11806 | rctl_names[fc_hdr->fh_r_ctl], |
11634 | type_names[fc_hdr->fh_type]); | 11807 | type_names[fc_hdr->fh_type], |
11808 | be32_to_cpu(header[0]), be32_to_cpu(header[1]), | ||
11809 | be32_to_cpu(header[2]), be32_to_cpu(header[3]), | ||
11810 | be32_to_cpu(header[4]), be32_to_cpu(header[5])); | ||
11635 | return 0; | 11811 | return 0; |
11636 | drop: | 11812 | drop: |
11637 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | 11813 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, |
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, | |||
11928 | } | 12104 | } |
11929 | 12105 | ||
11930 | /** | 12106 | /** |
11931 | * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler | 12107 | * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler |
11932 | * @phba: Pointer to HBA context object. | 12108 | * @phba: Pointer to HBA context object. |
11933 | * @cmd_iocbq: pointer to the command iocbq structure. | 12109 | * @cmd_iocbq: pointer to the command iocbq structure. |
11934 | * @rsp_iocbq: pointer to the response iocbq structure. | 12110 | * @rsp_iocbq: pointer to the response iocbq structure. |
11935 | * | 12111 | * |
11936 | * This function handles the sequence abort accept iocb command complete | 12112 | * This function handles the sequence abort response iocb command complete |
11937 | * event. It properly releases the memory allocated to the sequence abort | 12113 | * event. It properly releases the memory allocated to the sequence abort |
11938 | * accept iocb. | 12114 | * accept iocb. |
11939 | **/ | 12115 | **/ |
11940 | static void | 12116 | static void |
11941 | lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | 12117 | lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, |
11942 | struct lpfc_iocbq *cmd_iocbq, | 12118 | struct lpfc_iocbq *cmd_iocbq, |
11943 | struct lpfc_iocbq *rsp_iocbq) | 12119 | struct lpfc_iocbq *rsp_iocbq) |
11944 | { | 12120 | { |
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, | |||
11947 | } | 12123 | } |
11948 | 12124 | ||
11949 | /** | 12125 | /** |
11950 | * lpfc_sli4_seq_abort_acc - Accept sequence abort | 12126 | * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort |
11951 | * @phba: Pointer to HBA context object. | 12127 | * @phba: Pointer to HBA context object. |
11952 | * @fc_hdr: pointer to a FC frame header. | 12128 | * @fc_hdr: pointer to a FC frame header. |
11953 | * | 12129 | * |
11954 | * This function sends a basic accept to a previous unsol sequence abort | 12130 | * This function sends a basic response to a previous unsol sequence abort |
11955 | * event after aborting the sequence handling. | 12131 | * event after aborting the sequence handling. |
11956 | **/ | 12132 | **/ |
11957 | static void | 12133 | static void |
11958 | lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | 12134 | lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, |
11959 | struct fc_frame_header *fc_hdr) | 12135 | struct fc_frame_header *fc_hdr) |
11960 | { | 12136 | { |
11961 | struct lpfc_iocbq *ctiocb = NULL; | 12137 | struct lpfc_iocbq *ctiocb = NULL; |
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11963 | uint16_t oxid, rxid; | 12139 | uint16_t oxid, rxid; |
11964 | uint32_t sid, fctl; | 12140 | uint32_t sid, fctl; |
11965 | IOCB_t *icmd; | 12141 | IOCB_t *icmd; |
12142 | int rc; | ||
11966 | 12143 | ||
11967 | if (!lpfc_is_link_up(phba)) | 12144 | if (!lpfc_is_link_up(phba)) |
11968 | return; | 12145 | return; |
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11983 | + phba->sli4_hba.max_cfg_param.xri_base)) | 12160 | + phba->sli4_hba.max_cfg_param.xri_base)) |
11984 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); | 12161 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); |
11985 | 12162 | ||
11986 | /* Allocate buffer for acc iocb */ | 12163 | /* Allocate buffer for rsp iocb */ |
11987 | ctiocb = lpfc_sli_get_iocbq(phba); | 12164 | ctiocb = lpfc_sli_get_iocbq(phba); |
11988 | if (!ctiocb) | 12165 | if (!ctiocb) |
11989 | return; | 12166 | return; |
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
12008 | 12185 | ||
12009 | ctiocb->iocb_cmpl = NULL; | 12186 | ctiocb->iocb_cmpl = NULL; |
12010 | ctiocb->vport = phba->pport; | 12187 | ctiocb->vport = phba->pport; |
12011 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; | 12188 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; |
12189 | ctiocb->sli4_xritag = NO_XRI; | ||
12190 | |||
12191 | /* If the oxid maps to the FCP XRI range or if it is out of range, | ||
12192 | * send a BLS_RJT. The driver no longer has that exchange. | ||
12193 | * Override the IOCB for a BA_RJT. | ||
12194 | */ | ||
12195 | if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + | ||
12196 | phba->sli4_hba.max_cfg_param.xri_base) || | ||
12197 | oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + | ||
12198 | phba->sli4_hba.max_cfg_param.xri_base)) { | ||
12199 | icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; | ||
12200 | bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); | ||
12201 | bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); | ||
12202 | bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); | ||
12203 | } | ||
12012 | 12204 | ||
12013 | if (fctl & FC_FC_EX_CTX) { | 12205 | if (fctl & FC_FC_EX_CTX) { |
12014 | /* ABTS sent by responder to CT exchange, construction | 12206 | /* ABTS sent by responder to CT exchange, construction |
12015 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG | 12207 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG |
12016 | * field and RX_ID from ABTS for RX_ID field. | 12208 | * field and RX_ID from ABTS for RX_ID field. |
12017 | */ | 12209 | */ |
12018 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); | 12210 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); |
12019 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); | 12211 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); |
12020 | ctiocb->sli4_xritag = oxid; | ||
12021 | } else { | 12212 | } else { |
12022 | /* ABTS sent by initiator to CT exchange, construction | 12213 | /* ABTS sent by initiator to CT exchange, construction |
12023 | * of BA_ACC will need to allocate a new XRI as for the | 12214 | * of BA_ACC will need to allocate a new XRI as for the |
12024 | * XRI_TAG and RX_ID fields. | 12215 | * XRI_TAG and RX_ID fields. |
12025 | */ | 12216 | */ |
12026 | bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); | 12217 | bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); |
12027 | bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); | 12218 | bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); |
12028 | ctiocb->sli4_xritag = NO_XRI; | ||
12029 | } | 12219 | } |
12030 | bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); | 12220 | bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); |
12031 | 12221 | ||
12032 | /* Xmit CT abts accept on exchange <xid> */ | 12222 | /* Xmit CT abts response on exchange <xid> */ |
12033 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | 12223 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
12034 | "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", | 12224 | "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", |
12035 | CMD_XMIT_BLS_RSP64_CX, phba->link_state); | 12225 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); |
12036 | lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | 12226 | |
12227 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | ||
12228 | if (rc == IOCB_ERROR) { | ||
12229 | lpfc_printf_log(phba, KERN_ERR, LOG_ELS, | ||
12230 | "2925 Failed to issue CT ABTS RSP x%x on " | ||
12231 | "xri x%x, Data x%x\n", | ||
12232 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, | ||
12233 | phba->link_state); | ||
12234 | lpfc_sli_release_iocbq(phba, ctiocb); | ||
12235 | } | ||
12037 | } | 12236 | } |
12038 | 12237 | ||
12039 | /** | 12238 | /** |
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, | |||
12081 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 12280 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
12082 | } | 12281 | } |
12083 | /* Send basic accept (BA_ACC) to the abort requester */ | 12282 | /* Send basic accept (BA_ACC) to the abort requester */ |
12084 | lpfc_sli4_seq_abort_acc(phba, &fc_hdr); | 12283 | lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); |
12085 | } | 12284 | } |
12086 | 12285 | ||
12087 | /** | 12286 | /** |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 595056b8960..1a3cbf88f2c 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2011 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 0a4d376dbca..2404d1d6556 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.21" | 21 | #define LPFC_DRIVER_VERSION "8.3.22" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index e8a6f1cf1e4..5e001ffd4c1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -1748,6 +1748,54 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) | |||
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | /** | 1750 | /** |
1751 | * _base_display_hp_branding - Display branding string | ||
1752 | * @ioc: per adapter object | ||
1753 | * | ||
1754 | * Return nothing. | ||
1755 | */ | ||
1756 | static void | ||
1757 | _base_display_hp_branding(struct MPT2SAS_ADAPTER *ioc) | ||
1758 | { | ||
1759 | if (ioc->pdev->subsystem_vendor != MPT2SAS_HP_3PAR_SSVID) | ||
1760 | return; | ||
1761 | |||
1762 | switch (ioc->pdev->device) { | ||
1763 | case MPI2_MFGPAGE_DEVID_SAS2004: | ||
1764 | switch (ioc->pdev->subsystem_device) { | ||
1765 | case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: | ||
1766 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1767 | MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); | ||
1768 | break; | ||
1769 | default: | ||
1770 | break; | ||
1771 | } | ||
1772 | case MPI2_MFGPAGE_DEVID_SAS2308_2: | ||
1773 | switch (ioc->pdev->subsystem_device) { | ||
1774 | case MPT2SAS_HP_2_4_INTERNAL_SSDID: | ||
1775 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1776 | MPT2SAS_HP_2_4_INTERNAL_BRANDING); | ||
1777 | break; | ||
1778 | case MPT2SAS_HP_2_4_EXTERNAL_SSDID: | ||
1779 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1780 | MPT2SAS_HP_2_4_EXTERNAL_BRANDING); | ||
1781 | break; | ||
1782 | case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: | ||
1783 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1784 | MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); | ||
1785 | break; | ||
1786 | case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: | ||
1787 | printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | ||
1788 | MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); | ||
1789 | break; | ||
1790 | default: | ||
1791 | break; | ||
1792 | } | ||
1793 | default: | ||
1794 | break; | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | /** | ||
1751 | * _base_display_ioc_capabilities - Disply IOC's capabilities. | 1799 | * _base_display_ioc_capabilities - Disply IOC's capabilities. |
1752 | * @ioc: per adapter object | 1800 | * @ioc: per adapter object |
1753 | * | 1801 | * |
@@ -1778,6 +1826,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
1778 | 1826 | ||
1779 | _base_display_dell_branding(ioc); | 1827 | _base_display_dell_branding(ioc); |
1780 | _base_display_intel_branding(ioc); | 1828 | _base_display_intel_branding(ioc); |
1829 | _base_display_hp_branding(ioc); | ||
1781 | 1830 | ||
1782 | printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); | 1831 | printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); |
1783 | 1832 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index a3f8aa9baea..500328245f6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -168,6 +168,26 @@ | |||
168 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E | 168 | #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E |
169 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F | 169 | #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F |
170 | 170 | ||
171 | |||
172 | /* | ||
173 | * HP HBA branding | ||
174 | */ | ||
175 | #define MPT2SAS_HP_3PAR_SSVID 0x1590 | ||
176 | #define MPT2SAS_HP_2_4_INTERNAL_BRANDING "HP H220 Host Bus Adapter" | ||
177 | #define MPT2SAS_HP_2_4_EXTERNAL_BRANDING "HP H221 Host Bus Adapter" | ||
178 | #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING "HP H222 Host Bus Adapter" | ||
179 | #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING "HP H220i Host Bus Adapter" | ||
180 | #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING "HP H210i Host Bus Adapter" | ||
181 | |||
182 | /* | ||
183 | * HO HBA SSDIDs | ||
184 | */ | ||
185 | #define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 | ||
186 | #define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 | ||
187 | #define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 | ||
188 | #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 | ||
189 | #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 | ||
190 | |||
171 | /* | 191 | /* |
172 | * per target private data | 192 | * per target private data |
173 | */ | 193 | */ |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 19ad34f381a..938d045e418 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
@@ -663,6 +663,13 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { | |||
663 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, | 663 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, |
664 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, | 664 | { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, |
665 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, | 665 | { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, |
666 | { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, | ||
667 | { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, | ||
668 | { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, | ||
669 | { PCI_VDEVICE(TTI, 0x2722), chip_9480 }, | ||
670 | { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, | ||
671 | { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, | ||
672 | { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, | ||
666 | 673 | ||
667 | { } /* terminate list */ | 674 | { } /* terminate list */ |
668 | }; | 675 | }; |
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 2fc0045b1a5..c1f8d1b150f 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -53,6 +53,9 @@ | |||
53 | #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 | 53 | #define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #define ISP4XXX_PCI_FN_1 0x1 | ||
57 | #define ISP4XXX_PCI_FN_2 0x3 | ||
58 | |||
56 | #define QLA_SUCCESS 0 | 59 | #define QLA_SUCCESS 0 |
57 | #define QLA_ERROR 1 | 60 | #define QLA_ERROR 1 |
58 | 61 | ||
@@ -233,9 +236,6 @@ struct ddb_entry { | |||
233 | 236 | ||
234 | unsigned long flags; /* DDB Flags */ | 237 | unsigned long flags; /* DDB Flags */ |
235 | 238 | ||
236 | unsigned long dev_scan_wait_to_start_relogin; | ||
237 | unsigned long dev_scan_wait_to_complete_relogin; | ||
238 | |||
239 | uint16_t fw_ddb_index; /* DDB firmware index */ | 239 | uint16_t fw_ddb_index; /* DDB firmware index */ |
240 | uint16_t options; | 240 | uint16_t options; |
241 | uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ | 241 | uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ |
@@ -289,8 +289,6 @@ struct ddb_entry { | |||
289 | * DDB flags. | 289 | * DDB flags. |
290 | */ | 290 | */ |
291 | #define DF_RELOGIN 0 /* Relogin to device */ | 291 | #define DF_RELOGIN 0 /* Relogin to device */ |
292 | #define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL | ||
293 | * logged it out */ | ||
294 | #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ | 292 | #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ |
295 | #define DF_FO_MASKED 3 | 293 | #define DF_FO_MASKED 3 |
296 | 294 | ||
@@ -376,7 +374,7 @@ struct scsi_qla_host { | |||
376 | #define AF_LINK_UP 8 /* 0x00000100 */ | 374 | #define AF_LINK_UP 8 /* 0x00000100 */ |
377 | #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ | 375 | #define AF_IRQ_ATTACHED 10 /* 0x00000400 */ |
378 | #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ | 376 | #define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */ |
379 | #define AF_HBA_GOING_AWAY 12 /* 0x00001000 */ | 377 | #define AF_HA_REMOVAL 12 /* 0x00001000 */ |
380 | #define AF_INTx_ENABLED 15 /* 0x00008000 */ | 378 | #define AF_INTx_ENABLED 15 /* 0x00008000 */ |
381 | #define AF_MSI_ENABLED 16 /* 0x00010000 */ | 379 | #define AF_MSI_ENABLED 16 /* 0x00010000 */ |
382 | #define AF_MSIX_ENABLED 17 /* 0x00020000 */ | 380 | #define AF_MSIX_ENABLED 17 /* 0x00020000 */ |
@@ -479,7 +477,6 @@ struct scsi_qla_host { | |||
479 | uint32_t timer_active; | 477 | uint32_t timer_active; |
480 | 478 | ||
481 | /* Recovery Timers */ | 479 | /* Recovery Timers */ |
482 | uint32_t discovery_wait; | ||
483 | atomic_t check_relogin_timeouts; | 480 | atomic_t check_relogin_timeouts; |
484 | uint32_t retry_reset_ha_cnt; | 481 | uint32_t retry_reset_ha_cnt; |
485 | uint32_t isp_reset_timer; /* reset test timer */ | 482 | uint32_t isp_reset_timer; /* reset test timer */ |
@@ -765,6 +762,5 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) | |||
765 | /* Defines for process_aen() */ | 762 | /* Defines for process_aen() */ |
766 | #define PROCESS_ALL_AENS 0 | 763 | #define PROCESS_ALL_AENS 0 |
767 | #define FLUSH_DDB_CHANGED_AENS 1 | 764 | #define FLUSH_DDB_CHANGED_AENS 1 |
768 | #define RELOGIN_DDB_CHANGED_AENS 2 | ||
769 | 765 | ||
770 | #endif /*_QLA4XXX_H */ | 766 | #endif /*_QLA4XXX_H */ |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index c1985792f03..31e2bf97198 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
@@ -455,6 +455,7 @@ struct addr_ctrl_blk { | |||
455 | uint8_t res0; /* 07 */ | 455 | uint8_t res0; /* 07 */ |
456 | uint16_t eth_mtu_size; /* 08-09 */ | 456 | uint16_t eth_mtu_size; /* 08-09 */ |
457 | uint16_t add_fw_options; /* 0A-0B */ | 457 | uint16_t add_fw_options; /* 0A-0B */ |
458 | #define SERIALIZE_TASK_MGMT 0x0400 | ||
458 | 459 | ||
459 | uint8_t hb_interval; /* 0C */ | 460 | uint8_t hb_interval; /* 0C */ |
460 | uint8_t inst_num; /* 0D */ | 461 | uint8_t inst_num; /* 0D */ |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 8fad99b7eef..cc53e3fbd78 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
@@ -136,7 +136,6 @@ void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); | |||
136 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); | 136 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); |
137 | 137 | ||
138 | extern int ql4xextended_error_logging; | 138 | extern int ql4xextended_error_logging; |
139 | extern int ql4xdiscoverywait; | ||
140 | extern int ql4xdontresethba; | 139 | extern int ql4xdontresethba; |
141 | extern int ql4xenablemsix; | 140 | extern int ql4xenablemsix; |
142 | 141 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1629c48c35e..bbb2e903d38 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -723,13 +723,38 @@ int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err) | |||
723 | return relogin; | 723 | return relogin; |
724 | } | 724 | } |
725 | 725 | ||
726 | static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) | ||
727 | { | ||
728 | unsigned long wtime; | ||
729 | |||
730 | /* Flush the 0x8014 AEN from the firmware as a result of | ||
731 | * Auto connect. We are basically doing get_firmware_ddb() | ||
732 | * to determine whether we need to log back in or not. | ||
733 | * Trying to do a set ddb before we have processed 0x8014 | ||
734 | * will result in another set_ddb() for the same ddb. In other | ||
735 | * words there will be stale entries in the aen_q. | ||
736 | */ | ||
737 | wtime = jiffies + (2 * HZ); | ||
738 | do { | ||
739 | if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) | ||
740 | if (ha->firmware_state & (BIT_2 | BIT_0)) | ||
741 | return; | ||
742 | |||
743 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
744 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | ||
745 | |||
746 | msleep(1000); | ||
747 | } while (!time_after_eq(jiffies, wtime)); | ||
748 | } | ||
749 | |||
726 | /** | 750 | /** |
727 | * qla4xxx_configure_ddbs - builds driver ddb list | 751 | * qla4xxx_build_ddb_list - builds driver ddb list |
728 | * @ha: Pointer to host adapter structure. | 752 | * @ha: Pointer to host adapter structure. |
729 | * | 753 | * |
730 | * This routine searches for all valid firmware ddb entries and builds | 754 | * This routine searches for all valid firmware ddb entries and builds |
731 | * an internal ddb list. Ddbs that are considered valid are those with | 755 | * an internal ddb list. Ddbs that are considered valid are those with |
732 | * a device state of SESSION_ACTIVE. | 756 | * a device state of SESSION_ACTIVE. |
757 | * A relogin (set_ddb) is issued for DDBs that are not online. | ||
733 | **/ | 758 | **/ |
734 | static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) | 759 | static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) |
735 | { | 760 | { |
@@ -744,6 +769,8 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) | |||
744 | uint32_t ipv6_device; | 769 | uint32_t ipv6_device; |
745 | uint32_t new_tgt; | 770 | uint32_t new_tgt; |
746 | 771 | ||
772 | qla4xxx_flush_AENS(ha); | ||
773 | |||
747 | fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), | 774 | fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), |
748 | &fw_ddb_entry_dma, GFP_KERNEL); | 775 | &fw_ddb_entry_dma, GFP_KERNEL); |
749 | if (fw_ddb_entry == NULL) { | 776 | if (fw_ddb_entry == NULL) { |
@@ -847,144 +874,6 @@ exit_build_ddb_list_no_free: | |||
847 | return status; | 874 | return status; |
848 | } | 875 | } |
849 | 876 | ||
850 | struct qla4_relog_scan { | ||
851 | int halt_wait; | ||
852 | uint32_t conn_err; | ||
853 | uint32_t fw_ddb_index; | ||
854 | uint32_t next_fw_ddb_index; | ||
855 | uint32_t fw_ddb_device_state; | ||
856 | }; | ||
857 | |||
858 | static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) | ||
859 | { | ||
860 | struct ddb_entry *ddb_entry; | ||
861 | |||
862 | if (qla4_is_relogin_allowed(ha, rs->conn_err)) { | ||
863 | /* We either have a device that is in | ||
864 | * the process of relogging in or a | ||
865 | * device that is waiting to be | ||
866 | * relogged in */ | ||
867 | rs->halt_wait = 0; | ||
868 | |||
869 | ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, | ||
870 | rs->fw_ddb_index); | ||
871 | if (ddb_entry == NULL) | ||
872 | return QLA_ERROR; | ||
873 | |||
874 | if (ddb_entry->dev_scan_wait_to_start_relogin != 0 | ||
875 | && time_after_eq(jiffies, | ||
876 | ddb_entry-> | ||
877 | dev_scan_wait_to_start_relogin)) | ||
878 | { | ||
879 | ddb_entry->dev_scan_wait_to_start_relogin = 0; | ||
880 | qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0); | ||
881 | } | ||
882 | } | ||
883 | return QLA_SUCCESS; | ||
884 | } | ||
885 | |||
886 | static int qla4_scan_for_relogin(struct scsi_qla_host *ha, | ||
887 | struct qla4_relog_scan *rs) | ||
888 | { | ||
889 | int error; | ||
890 | |||
891 | /* scan for relogins | ||
892 | * ----------------- */ | ||
893 | for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES; | ||
894 | rs->fw_ddb_index = rs->next_fw_ddb_index) { | ||
895 | if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0, | ||
896 | NULL, &rs->next_fw_ddb_index, | ||
897 | &rs->fw_ddb_device_state, | ||
898 | &rs->conn_err, NULL, NULL) | ||
899 | == QLA_ERROR) | ||
900 | return QLA_ERROR; | ||
901 | |||
902 | if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS) | ||
903 | rs->halt_wait = 0; | ||
904 | |||
905 | if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED || | ||
906 | rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) { | ||
907 | error = qla4_test_rdy(ha, rs); | ||
908 | if (error) | ||
909 | return error; | ||
910 | } | ||
911 | |||
912 | /* We know we've reached the last device when | ||
913 | * next_fw_ddb_index is 0 */ | ||
914 | if (rs->next_fw_ddb_index == 0) | ||
915 | break; | ||
916 | } | ||
917 | return QLA_SUCCESS; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * qla4xxx_devices_ready - wait for target devices to be logged in | ||
922 | * @ha: pointer to adapter structure | ||
923 | * | ||
924 | * This routine waits up to ql4xdiscoverywait seconds | ||
925 | * F/W database during driver load time. | ||
926 | **/ | ||
927 | static int qla4xxx_devices_ready(struct scsi_qla_host *ha) | ||
928 | { | ||
929 | int error; | ||
930 | unsigned long discovery_wtime; | ||
931 | struct qla4_relog_scan rs; | ||
932 | |||
933 | discovery_wtime = jiffies + (ql4xdiscoverywait * HZ); | ||
934 | |||
935 | DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait)); | ||
936 | do { | ||
937 | /* poll for AEN. */ | ||
938 | qla4xxx_get_firmware_state(ha); | ||
939 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) { | ||
940 | /* Set time-between-relogin timer */ | ||
941 | qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS); | ||
942 | } | ||
943 | |||
944 | /* if no relogins active or needed, halt discvery wait */ | ||
945 | rs.halt_wait = 1; | ||
946 | |||
947 | error = qla4_scan_for_relogin(ha, &rs); | ||
948 | |||
949 | if (rs.halt_wait) { | ||
950 | DEBUG2(printk("scsi%ld: %s: Delay halted. Devices " | ||
951 | "Ready.\n", ha->host_no, __func__)); | ||
952 | return QLA_SUCCESS; | ||
953 | } | ||
954 | |||
955 | msleep(2000); | ||
956 | } while (!time_after_eq(jiffies, discovery_wtime)); | ||
957 | |||
958 | DEBUG3(qla4xxx_get_conn_event_log(ha)); | ||
959 | |||
960 | return QLA_SUCCESS; | ||
961 | } | ||
962 | |||
963 | static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) | ||
964 | { | ||
965 | unsigned long wtime; | ||
966 | |||
967 | /* Flush the 0x8014 AEN from the firmware as a result of | ||
968 | * Auto connect. We are basically doing get_firmware_ddb() | ||
969 | * to determine whether we need to log back in or not. | ||
970 | * Trying to do a set ddb before we have processed 0x8014 | ||
971 | * will result in another set_ddb() for the same ddb. In other | ||
972 | * words there will be stale entries in the aen_q. | ||
973 | */ | ||
974 | wtime = jiffies + (2 * HZ); | ||
975 | do { | ||
976 | if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) | ||
977 | if (ha->firmware_state & (BIT_2 | BIT_0)) | ||
978 | return; | ||
979 | |||
980 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
981 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | ||
982 | |||
983 | msleep(1000); | ||
984 | } while (!time_after_eq(jiffies, wtime)); | ||
985 | |||
986 | } | ||
987 | |||
988 | static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) | 877 | static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) |
989 | { | 878 | { |
990 | uint16_t fw_ddb_index; | 879 | uint16_t fw_ddb_index; |
@@ -996,29 +885,12 @@ static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) | |||
996 | 885 | ||
997 | for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) | 886 | for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) |
998 | ha->fw_ddb_index_map[fw_ddb_index] = | 887 | ha->fw_ddb_index_map[fw_ddb_index] = |
999 | (struct ddb_entry *)INVALID_ENTRY; | 888 | (struct ddb_entry *)INVALID_ENTRY; |
1000 | 889 | ||
1001 | ha->tot_ddbs = 0; | 890 | ha->tot_ddbs = 0; |
1002 | 891 | ||
1003 | qla4xxx_flush_AENS(ha); | 892 | /* Perform device discovery and build ddb list. */ |
1004 | 893 | status = qla4xxx_build_ddb_list(ha); | |
1005 | /* Wait for an AEN */ | ||
1006 | qla4xxx_devices_ready(ha); | ||
1007 | |||
1008 | /* | ||
1009 | * First perform device discovery for active | ||
1010 | * fw ddb indexes and build | ||
1011 | * ddb list. | ||
1012 | */ | ||
1013 | if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) | ||
1014 | return status; | ||
1015 | |||
1016 | /* | ||
1017 | * Targets can come online after the inital discovery, so processing | ||
1018 | * the aens here will catch them. | ||
1019 | */ | ||
1020 | if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) | ||
1021 | qla4xxx_process_aen(ha, PROCESS_ALL_AENS); | ||
1022 | 894 | ||
1023 | return status; | 895 | return status; |
1024 | } | 896 | } |
@@ -1537,7 +1409,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1537 | uint32_t state, uint32_t conn_err) | 1409 | uint32_t state, uint32_t conn_err) |
1538 | { | 1410 | { |
1539 | struct ddb_entry * ddb_entry; | 1411 | struct ddb_entry * ddb_entry; |
1540 | uint32_t old_fw_ddb_device_state; | ||
1541 | 1412 | ||
1542 | /* check for out of range index */ | 1413 | /* check for out of range index */ |
1543 | if (fw_ddb_index >= MAX_DDB_ENTRIES) | 1414 | if (fw_ddb_index >= MAX_DDB_ENTRIES) |
@@ -1553,27 +1424,18 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1553 | } | 1424 | } |
1554 | 1425 | ||
1555 | /* Device already exists in our database. */ | 1426 | /* Device already exists in our database. */ |
1556 | old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; | ||
1557 | DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " | 1427 | DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " |
1558 | "index [%d]\n", ha->host_no, __func__, | 1428 | "index [%d]\n", ha->host_no, __func__, |
1559 | ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); | 1429 | ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); |
1560 | if (old_fw_ddb_device_state == state && | ||
1561 | state == DDB_DS_SESSION_ACTIVE) { | ||
1562 | if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { | ||
1563 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1564 | iscsi_unblock_session(ddb_entry->sess); | ||
1565 | } | ||
1566 | return QLA_SUCCESS; | ||
1567 | } | ||
1568 | 1430 | ||
1569 | ddb_entry->fw_ddb_device_state = state; | 1431 | ddb_entry->fw_ddb_device_state = state; |
1570 | /* Device is back online. */ | 1432 | /* Device is back online. */ |
1571 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { | 1433 | if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && |
1434 | (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { | ||
1572 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | 1435 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); |
1573 | atomic_set(&ddb_entry->relogin_retry_count, 0); | 1436 | atomic_set(&ddb_entry->relogin_retry_count, 0); |
1574 | atomic_set(&ddb_entry->relogin_timer, 0); | 1437 | atomic_set(&ddb_entry->relogin_timer, 0); |
1575 | clear_bit(DF_RELOGIN, &ddb_entry->flags); | 1438 | clear_bit(DF_RELOGIN, &ddb_entry->flags); |
1576 | clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); | ||
1577 | iscsi_unblock_session(ddb_entry->sess); | 1439 | iscsi_unblock_session(ddb_entry->sess); |
1578 | iscsi_session_event(ddb_entry->sess, | 1440 | iscsi_session_event(ddb_entry->sess, |
1579 | ISCSI_KEVENT_CREATE_SESSION); | 1441 | ISCSI_KEVENT_CREATE_SESSION); |
@@ -1581,7 +1443,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1581 | * Change the lun state to READY in case the lun TIMEOUT before | 1443 | * Change the lun state to READY in case the lun TIMEOUT before |
1582 | * the device came back. | 1444 | * the device came back. |
1583 | */ | 1445 | */ |
1584 | } else { | 1446 | } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { |
1585 | /* Device went away, mark device missing */ | 1447 | /* Device went away, mark device missing */ |
1586 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { | 1448 | if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { |
1587 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " | 1449 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " |
@@ -1598,7 +1460,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
1598 | */ | 1460 | */ |
1599 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && | 1461 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && |
1600 | !test_bit(DF_RELOGIN, &ddb_entry->flags) && | 1462 | !test_bit(DF_RELOGIN, &ddb_entry->flags) && |
1601 | !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && | ||
1602 | qla4_is_relogin_allowed(ha, conn_err)) { | 1463 | qla4_is_relogin_allowed(ha, conn_err)) { |
1603 | /* | 1464 | /* |
1604 | * This triggers a relogin. After the relogin_timer | 1465 | * This triggers a relogin. After the relogin_timer |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 03e028e6e80..2f40ac761cd 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -801,7 +801,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) | |||
801 | &ha->reg->ctrl_status); | 801 | &ha->reg->ctrl_status); |
802 | readl(&ha->reg->ctrl_status); | 802 | readl(&ha->reg->ctrl_status); |
803 | 803 | ||
804 | if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags)) | 804 | if (!test_bit(AF_HA_REMOVAL, &ha->flags)) |
805 | set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); | 805 | set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); |
806 | 806 | ||
807 | break; | 807 | break; |
@@ -1008,34 +1008,9 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) | |||
1008 | mbox_sts[0], mbox_sts[2], | 1008 | mbox_sts[0], mbox_sts[2], |
1009 | mbox_sts[3])); | 1009 | mbox_sts[3])); |
1010 | break; | 1010 | break; |
1011 | } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { | ||
1012 | /* for use during init time, we only want to | ||
1013 | * relogin non-active ddbs */ | ||
1014 | struct ddb_entry *ddb_entry; | ||
1015 | |||
1016 | ddb_entry = | ||
1017 | /* FIXME: name length? */ | ||
1018 | qla4xxx_lookup_ddb_by_fw_index(ha, | ||
1019 | mbox_sts[2]); | ||
1020 | if (!ddb_entry) | ||
1021 | break; | ||
1022 | |||
1023 | ddb_entry->dev_scan_wait_to_complete_relogin = | ||
1024 | 0; | ||
1025 | ddb_entry->dev_scan_wait_to_start_relogin = | ||
1026 | jiffies + | ||
1027 | ((ddb_entry->default_time2wait + | ||
1028 | 4) * HZ); | ||
1029 | |||
1030 | DEBUG2(printk("scsi%ld: ddb [%d] initiate" | ||
1031 | " RELOGIN after %d seconds\n", | ||
1032 | ha->host_no, | ||
1033 | ddb_entry->fw_ddb_index, | ||
1034 | ddb_entry->default_time2wait + | ||
1035 | 4)); | ||
1036 | break; | ||
1037 | } | 1011 | } |
1038 | 1012 | case PROCESS_ALL_AENS: | |
1013 | default: | ||
1039 | if (mbox_sts[1] == 0) { /* Global DB change. */ | 1014 | if (mbox_sts[1] == 0) { /* Global DB change. */ |
1040 | qla4xxx_reinitialize_ddb_list(ha); | 1015 | qla4xxx_reinitialize_ddb_list(ha); |
1041 | } else if (mbox_sts[1] == 1) { /* Specific device. */ | 1016 | } else if (mbox_sts[1] == 1) { /* Specific device. */ |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index f65626aec7c..f9d81c8372c 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -32,6 +32,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
32 | u_long wait_count; | 32 | u_long wait_count; |
33 | uint32_t intr_status; | 33 | uint32_t intr_status; |
34 | unsigned long flags = 0; | 34 | unsigned long flags = 0; |
35 | uint32_t dev_state; | ||
35 | 36 | ||
36 | /* Make sure that pointers are valid */ | 37 | /* Make sure that pointers are valid */ |
37 | if (!mbx_cmd || !mbx_sts) { | 38 | if (!mbx_cmd || !mbx_sts) { |
@@ -40,12 +41,23 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
40 | return status; | 41 | return status; |
41 | } | 42 | } |
42 | 43 | ||
43 | if (is_qla8022(ha) && | 44 | if (is_qla8022(ha)) { |
44 | test_bit(AF_FW_RECOVERY, &ha->flags)) { | 45 | if (test_bit(AF_FW_RECOVERY, &ha->flags)) { |
45 | DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " | 46 | DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " |
46 | "completing mbx cmd as firmware recovery detected\n", | 47 | "prematurely completing mbx cmd as firmware " |
47 | ha->host_no, __func__)); | 48 | "recovery detected\n", ha->host_no, __func__)); |
48 | return status; | 49 | return status; |
50 | } | ||
51 | /* Do not send any mbx cmd if h/w is in failed state*/ | ||
52 | qla4_8xxx_idc_lock(ha); | ||
53 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
54 | qla4_8xxx_idc_unlock(ha); | ||
55 | if (dev_state == QLA82XX_DEV_FAILED) { | ||
56 | ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " | ||
57 | "failed state, do not send any mailbox commands\n", | ||
58 | ha->host_no, __func__); | ||
59 | return status; | ||
60 | } | ||
49 | } | 61 | } |
50 | 62 | ||
51 | if ((is_aer_supported(ha)) && | 63 | if ((is_aer_supported(ha)) && |
@@ -139,7 +151,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
139 | if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && | 151 | if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && |
140 | test_bit(AF_INTERRUPTS_ON, &ha->flags) && | 152 | test_bit(AF_INTERRUPTS_ON, &ha->flags) && |
141 | test_bit(AF_ONLINE, &ha->flags) && | 153 | test_bit(AF_ONLINE, &ha->flags) && |
142 | !test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { | 154 | !test_bit(AF_HA_REMOVAL, &ha->flags)) { |
143 | /* Do not poll for completion. Use completion queue */ | 155 | /* Do not poll for completion. Use completion queue */ |
144 | set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); | 156 | set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags); |
145 | wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); | 157 | wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ); |
@@ -395,9 +407,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, | |||
395 | /*memcpy(ha->alias, init_fw_cb->Alias, | 407 | /*memcpy(ha->alias, init_fw_cb->Alias, |
396 | min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ | 408 | min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ |
397 | 409 | ||
398 | /* Save Command Line Paramater info */ | ||
399 | ha->discovery_wait = ql4xdiscoverywait; | ||
400 | |||
401 | if (ha->acb_version == ACB_SUPPORTED) { | 410 | if (ha->acb_version == ACB_SUPPORTED) { |
402 | ha->ipv6_options = init_fw_cb->ipv6_opts; | 411 | ha->ipv6_options = init_fw_cb->ipv6_opts; |
403 | ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; | 412 | ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; |
@@ -467,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) | |||
467 | 476 | ||
468 | init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); | 477 | init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); |
469 | 478 | ||
479 | /* Set bit for "serialize task mgmt" all other bits need to be zero */ | ||
480 | init_fw_cb->add_fw_options = 0; | ||
481 | init_fw_cb->add_fw_options |= | ||
482 | __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); | ||
483 | |||
470 | if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) | 484 | if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) |
471 | != QLA_SUCCESS) { | 485 | != QLA_SUCCESS) { |
472 | DEBUG2(printk(KERN_WARNING | 486 | DEBUG2(printk(KERN_WARNING |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 3d5ef2df413..35381cb0936 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
@@ -2304,14 +2304,13 @@ qla4_8xxx_enable_intrs(struct scsi_qla_host *ha) | |||
2304 | void | 2304 | void |
2305 | qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) | 2305 | qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) |
2306 | { | 2306 | { |
2307 | if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) | 2307 | if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) |
2308 | qla4_8xxx_mbx_intr_disable(ha); | 2308 | qla4_8xxx_mbx_intr_disable(ha); |
2309 | 2309 | ||
2310 | spin_lock_irq(&ha->hardware_lock); | 2310 | spin_lock_irq(&ha->hardware_lock); |
2311 | /* BIT 10 - set */ | 2311 | /* BIT 10 - set */ |
2312 | qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); | 2312 | qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); |
2313 | spin_unlock_irq(&ha->hardware_lock); | 2313 | spin_unlock_irq(&ha->hardware_lock); |
2314 | clear_bit(AF_INTERRUPTS_ON, &ha->flags); | ||
2315 | } | 2314 | } |
2316 | 2315 | ||
2317 | struct ql4_init_msix_entry { | 2316 | struct ql4_init_msix_entry { |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 967836ef5ab..a4acb0dd7be 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -29,10 +29,6 @@ static struct kmem_cache *srb_cachep; | |||
29 | /* | 29 | /* |
30 | * Module parameter information and variables | 30 | * Module parameter information and variables |
31 | */ | 31 | */ |
32 | int ql4xdiscoverywait = 60; | ||
33 | module_param(ql4xdiscoverywait, int, S_IRUGO | S_IWUSR); | ||
34 | MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time"); | ||
35 | |||
36 | int ql4xdontresethba = 0; | 32 | int ql4xdontresethba = 0; |
37 | module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); | 33 | module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); |
38 | MODULE_PARM_DESC(ql4xdontresethba, | 34 | MODULE_PARM_DESC(ql4xdontresethba, |
@@ -55,6 +51,17 @@ MODULE_PARM_DESC(ql4xenablemsix, | |||
55 | " 2 = enable MSI interrupt mechanism."); | 51 | " 2 = enable MSI interrupt mechanism."); |
56 | 52 | ||
57 | #define QL4_DEF_QDEPTH 32 | 53 | #define QL4_DEF_QDEPTH 32 |
54 | static int ql4xmaxqdepth = QL4_DEF_QDEPTH; | ||
55 | module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); | ||
56 | MODULE_PARM_DESC(ql4xmaxqdepth, | ||
57 | "Maximum queue depth to report for target devices.\n" | ||
58 | " Default: 32."); | ||
59 | |||
60 | static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; | ||
61 | module_param(ql4xsess_recovery_tmo, int, S_IRUGO); | ||
62 | MODULE_PARM_DESC(ql4xsess_recovery_tmo, | ||
63 | "Target Session Recovery Timeout.\n" | ||
64 | " Default: 30 sec."); | ||
58 | 65 | ||
59 | /* | 66 | /* |
60 | * SCSI host template entry points | 67 | * SCSI host template entry points |
@@ -165,7 +172,7 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) | |||
165 | DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " | 172 | DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " |
166 | "of (%d) secs exhausted, marking device DEAD.\n", | 173 | "of (%d) secs exhausted, marking device DEAD.\n", |
167 | ha->host_no, __func__, ddb_entry->fw_ddb_index, | 174 | ha->host_no, __func__, ddb_entry->fw_ddb_index, |
168 | QL4_SESS_RECOVERY_TMO)); | 175 | ddb_entry->sess->recovery_tmo)); |
169 | } | 176 | } |
170 | } | 177 | } |
171 | 178 | ||
@@ -295,7 +302,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry) | |||
295 | { | 302 | { |
296 | int err; | 303 | int err; |
297 | 304 | ||
298 | ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO; | 305 | ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; |
299 | 306 | ||
300 | err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); | 307 | err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); |
301 | if (err) { | 308 | if (err) { |
@@ -753,12 +760,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) | |||
753 | if (!pci_channel_offline(ha->pdev)) | 760 | if (!pci_channel_offline(ha->pdev)) |
754 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); | 761 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); |
755 | 762 | ||
756 | if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { | ||
757 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", | ||
758 | __func__)); | ||
759 | return; | ||
760 | } | ||
761 | |||
762 | if (is_qla8022(ha)) { | 763 | if (is_qla8022(ha)) { |
763 | qla4_8xxx_watchdog(ha); | 764 | qla4_8xxx_watchdog(ha); |
764 | } | 765 | } |
@@ -1067,7 +1068,6 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) | |||
1067 | 1068 | ||
1068 | /* Disable the board */ | 1069 | /* Disable the board */ |
1069 | ql4_printk(KERN_INFO, ha, "Disabling the board\n"); | 1070 | ql4_printk(KERN_INFO, ha, "Disabling the board\n"); |
1070 | set_bit(AF_HBA_GOING_AWAY, &ha->flags); | ||
1071 | 1071 | ||
1072 | qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); | 1072 | qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); |
1073 | qla4xxx_mark_all_devices_missing(ha); | 1073 | qla4xxx_mark_all_devices_missing(ha); |
@@ -1218,6 +1218,27 @@ recover_ha_init_adapter: | |||
1218 | return status; | 1218 | return status; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) | ||
1222 | { | ||
1223 | struct ddb_entry *ddb_entry, *dtemp; | ||
1224 | |||
1225 | list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { | ||
1226 | if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || | ||
1227 | (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { | ||
1228 | if (ddb_entry->fw_ddb_device_state == | ||
1229 | DDB_DS_SESSION_ACTIVE) { | ||
1230 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1231 | ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" | ||
1232 | " marked ONLINE\n", ha->host_no, __func__, | ||
1233 | ddb_entry->fw_ddb_index); | ||
1234 | |||
1235 | iscsi_unblock_session(ddb_entry->sess); | ||
1236 | } else | ||
1237 | qla4xxx_relogin_device(ha, ddb_entry); | ||
1238 | } | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1221 | void qla4xxx_wake_dpc(struct scsi_qla_host *ha) | 1242 | void qla4xxx_wake_dpc(struct scsi_qla_host *ha) |
1222 | { | 1243 | { |
1223 | if (ha->dpc_thread && | 1244 | if (ha->dpc_thread && |
@@ -1259,11 +1280,6 @@ static void qla4xxx_do_dpc(struct work_struct *work) | |||
1259 | goto do_dpc_exit; | 1280 | goto do_dpc_exit; |
1260 | } | 1281 | } |
1261 | 1282 | ||
1262 | /* HBA is in the process of being permanently disabled. | ||
1263 | * Don't process anything */ | ||
1264 | if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) | ||
1265 | return; | ||
1266 | |||
1267 | if (is_qla8022(ha)) { | 1283 | if (is_qla8022(ha)) { |
1268 | if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { | 1284 | if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { |
1269 | qla4_8xxx_idc_lock(ha); | 1285 | qla4_8xxx_idc_lock(ha); |
@@ -1331,13 +1347,7 @@ dpc_post_reset_ha: | |||
1331 | if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { | 1347 | if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { |
1332 | if (!test_bit(AF_LINK_UP, &ha->flags)) { | 1348 | if (!test_bit(AF_LINK_UP, &ha->flags)) { |
1333 | /* ---- link down? --- */ | 1349 | /* ---- link down? --- */ |
1334 | list_for_each_entry_safe(ddb_entry, dtemp, | 1350 | qla4xxx_mark_all_devices_missing(ha); |
1335 | &ha->ddb_list, list) { | ||
1336 | if (atomic_read(&ddb_entry->state) == | ||
1337 | DDB_STATE_ONLINE) | ||
1338 | qla4xxx_mark_device_missing(ha, | ||
1339 | ddb_entry); | ||
1340 | } | ||
1341 | } else { | 1351 | } else { |
1342 | /* ---- link up? --- * | 1352 | /* ---- link up? --- * |
1343 | * F/W will auto login to all devices ONLY ONCE after | 1353 | * F/W will auto login to all devices ONLY ONCE after |
@@ -1346,30 +1356,7 @@ dpc_post_reset_ha: | |||
1346 | * manually relogin to devices when recovering from | 1356 | * manually relogin to devices when recovering from |
1347 | * connection failures, logouts, expired KATO, etc. */ | 1357 | * connection failures, logouts, expired KATO, etc. */ |
1348 | 1358 | ||
1349 | list_for_each_entry_safe(ddb_entry, dtemp, | 1359 | qla4xxx_relogin_all_devices(ha); |
1350 | &ha->ddb_list, list) { | ||
1351 | if ((atomic_read(&ddb_entry->state) == | ||
1352 | DDB_STATE_MISSING) || | ||
1353 | (atomic_read(&ddb_entry->state) == | ||
1354 | DDB_STATE_DEAD)) { | ||
1355 | if (ddb_entry->fw_ddb_device_state == | ||
1356 | DDB_DS_SESSION_ACTIVE) { | ||
1357 | atomic_set(&ddb_entry->state, | ||
1358 | DDB_STATE_ONLINE); | ||
1359 | ql4_printk(KERN_INFO, ha, | ||
1360 | "scsi%ld: %s: ddb[%d]" | ||
1361 | " marked ONLINE\n", | ||
1362 | ha->host_no, __func__, | ||
1363 | ddb_entry->fw_ddb_index); | ||
1364 | |||
1365 | iscsi_unblock_session( | ||
1366 | ddb_entry->sess); | ||
1367 | } else | ||
1368 | qla4xxx_relogin_device( | ||
1369 | ha, ddb_entry); | ||
1370 | } | ||
1371 | |||
1372 | } | ||
1373 | } | 1360 | } |
1374 | } | 1361 | } |
1375 | 1362 | ||
@@ -1630,6 +1617,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1630 | uint8_t init_retry_count = 0; | 1617 | uint8_t init_retry_count = 0; |
1631 | char buf[34]; | 1618 | char buf[34]; |
1632 | struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; | 1619 | struct qla4_8xxx_legacy_intr_set *nx_legacy_intr; |
1620 | uint32_t dev_state; | ||
1633 | 1621 | ||
1634 | if (pci_enable_device(pdev)) | 1622 | if (pci_enable_device(pdev)) |
1635 | return -1; | 1623 | return -1; |
@@ -1713,6 +1701,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1713 | status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); | 1701 | status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); |
1714 | while ((!test_bit(AF_ONLINE, &ha->flags)) && | 1702 | while ((!test_bit(AF_ONLINE, &ha->flags)) && |
1715 | init_retry_count++ < MAX_INIT_RETRIES) { | 1703 | init_retry_count++ < MAX_INIT_RETRIES) { |
1704 | |||
1705 | if (is_qla8022(ha)) { | ||
1706 | qla4_8xxx_idc_lock(ha); | ||
1707 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
1708 | qla4_8xxx_idc_unlock(ha); | ||
1709 | if (dev_state == QLA82XX_DEV_FAILED) { | ||
1710 | ql4_printk(KERN_WARNING, ha, "%s: don't retry " | ||
1711 | "initialize adapter. H/W is in failed state\n", | ||
1712 | __func__); | ||
1713 | break; | ||
1714 | } | ||
1715 | } | ||
1716 | DEBUG2(printk("scsi: %s: retrying adapter initialization " | 1716 | DEBUG2(printk("scsi: %s: retrying adapter initialization " |
1717 | "(%d)\n", __func__, init_retry_count)); | 1717 | "(%d)\n", __func__, init_retry_count)); |
1718 | 1718 | ||
@@ -1815,6 +1815,44 @@ probe_disable_device: | |||
1815 | } | 1815 | } |
1816 | 1816 | ||
1817 | /** | 1817 | /** |
1818 | * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize | ||
1819 | * @ha: pointer to adapter structure | ||
1820 | * | ||
1821 | * Mark the other ISP-4xxx port to indicate that the driver is being removed, | ||
1822 | * so that the other port will not re-initialize while in the process of | ||
1823 | * removing the ha due to driver unload or hba hotplug. | ||
1824 | **/ | ||
1825 | static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) | ||
1826 | { | ||
1827 | struct scsi_qla_host *other_ha = NULL; | ||
1828 | struct pci_dev *other_pdev = NULL; | ||
1829 | int fn = ISP4XXX_PCI_FN_2; | ||
1830 | |||
1831 | /*iscsi function numbers for ISP4xxx is 1 and 3*/ | ||
1832 | if (PCI_FUNC(ha->pdev->devfn) & BIT_1) | ||
1833 | fn = ISP4XXX_PCI_FN_1; | ||
1834 | |||
1835 | other_pdev = | ||
1836 | pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), | ||
1837 | ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), | ||
1838 | fn)); | ||
1839 | |||
1840 | /* Get other_ha if other_pdev is valid and state is enable*/ | ||
1841 | if (other_pdev) { | ||
1842 | if (atomic_read(&other_pdev->enable_cnt)) { | ||
1843 | other_ha = pci_get_drvdata(other_pdev); | ||
1844 | if (other_ha) { | ||
1845 | set_bit(AF_HA_REMOVAL, &other_ha->flags); | ||
1846 | DEBUG2(ql4_printk(KERN_INFO, ha, "%s: " | ||
1847 | "Prevent %s reinit\n", __func__, | ||
1848 | dev_name(&other_ha->pdev->dev))); | ||
1849 | } | ||
1850 | } | ||
1851 | pci_dev_put(other_pdev); | ||
1852 | } | ||
1853 | } | ||
1854 | |||
1855 | /** | ||
1818 | * qla4xxx_remove_adapter - calback function to remove adapter. | 1856 | * qla4xxx_remove_adapter - calback function to remove adapter. |
1819 | * @pci_dev: PCI device pointer | 1857 | * @pci_dev: PCI device pointer |
1820 | **/ | 1858 | **/ |
@@ -1824,7 +1862,8 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) | |||
1824 | 1862 | ||
1825 | ha = pci_get_drvdata(pdev); | 1863 | ha = pci_get_drvdata(pdev); |
1826 | 1864 | ||
1827 | set_bit(AF_HBA_GOING_AWAY, &ha->flags); | 1865 | if (!is_qla8022(ha)) |
1866 | qla4xxx_prevent_other_port_reinit(ha); | ||
1828 | 1867 | ||
1829 | /* remove devs from iscsi_sessions to scsi_devices */ | 1868 | /* remove devs from iscsi_sessions to scsi_devices */ |
1830 | qla4xxx_free_ddb_list(ha); | 1869 | qla4xxx_free_ddb_list(ha); |
@@ -1868,10 +1907,15 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev) | |||
1868 | { | 1907 | { |
1869 | struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); | 1908 | struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); |
1870 | struct ddb_entry *ddb = sess->dd_data; | 1909 | struct ddb_entry *ddb = sess->dd_data; |
1910 | int queue_depth = QL4_DEF_QDEPTH; | ||
1871 | 1911 | ||
1872 | sdev->hostdata = ddb; | 1912 | sdev->hostdata = ddb; |
1873 | sdev->tagged_supported = 1; | 1913 | sdev->tagged_supported = 1; |
1874 | scsi_activate_tcq(sdev, QL4_DEF_QDEPTH); | 1914 | |
1915 | if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) | ||
1916 | queue_depth = ql4xmaxqdepth; | ||
1917 | |||
1918 | scsi_activate_tcq(sdev, queue_depth); | ||
1875 | return 0; | 1919 | return 0; |
1876 | } | 1920 | } |
1877 | 1921 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 8475b308e01..60315576940 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
@@ -5,4 +5,4 @@ | |||
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k5" | 8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k6" |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b4218390941..3fd16d7212d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1917,7 +1917,7 @@ store_priv_session_##field(struct device *dev, \ | |||
1917 | #define iscsi_priv_session_rw_attr(field, format) \ | 1917 | #define iscsi_priv_session_rw_attr(field, format) \ |
1918 | iscsi_priv_session_attr_show(field, format) \ | 1918 | iscsi_priv_session_attr_show(field, format) \ |
1919 | iscsi_priv_session_attr_store(field) \ | 1919 | iscsi_priv_session_attr_store(field) \ |
1920 | static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ | 1920 | static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ |
1921 | show_priv_session_##field, \ | 1921 | show_priv_session_##field, \ |
1922 | store_priv_session_##field) | 1922 | store_priv_session_##field) |
1923 | iscsi_priv_session_rw_attr(recovery_tmo, "%d"); | 1923 | iscsi_priv_session_rw_attr(recovery_tmo, "%d"); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7ff61d76b4c..b61ebec6bca 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2027,14 +2027,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2027 | int old_rcd = sdkp->RCD; | 2027 | int old_rcd = sdkp->RCD; |
2028 | int old_dpofua = sdkp->DPOFUA; | 2028 | int old_dpofua = sdkp->DPOFUA; |
2029 | 2029 | ||
2030 | if (sdp->skip_ms_page_8) { | 2030 | if (sdp->skip_ms_page_8) |
2031 | if (sdp->type == TYPE_RBC) | 2031 | goto defaults; |
2032 | goto defaults; | 2032 | |
2033 | else { | 2033 | if (sdp->type == TYPE_RBC) { |
2034 | modepage = 0x3F; | ||
2035 | dbd = 0; | ||
2036 | } | ||
2037 | } else if (sdp->type == TYPE_RBC) { | ||
2038 | modepage = 6; | 2034 | modepage = 6; |
2039 | dbd = 8; | 2035 | dbd = 8; |
2040 | } else { | 2036 | } else { |
@@ -2062,11 +2058,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2062 | */ | 2058 | */ |
2063 | if (len < 3) | 2059 | if (len < 3) |
2064 | goto bad_sense; | 2060 | goto bad_sense; |
2065 | else if (len > SD_BUF_SIZE) { | 2061 | if (len > 20) |
2066 | sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " | 2062 | len = 20; |
2067 | "data from %d to %d bytes\n", len, SD_BUF_SIZE); | 2063 | |
2068 | len = SD_BUF_SIZE; | 2064 | /* Take headers and block descriptors into account */ |
2069 | } | 2065 | len += data.header_length + data.block_descriptor_length; |
2066 | if (len > SD_BUF_SIZE) | ||
2067 | goto bad_sense; | ||
2070 | 2068 | ||
2071 | /* Get the data */ | 2069 | /* Get the data */ |
2072 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); | 2070 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); |
@@ -2074,45 +2072,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2074 | if (scsi_status_is_good(res)) { | 2072 | if (scsi_status_is_good(res)) { |
2075 | int offset = data.header_length + data.block_descriptor_length; | 2073 | int offset = data.header_length + data.block_descriptor_length; |
2076 | 2074 | ||
2077 | while (offset < len) { | 2075 | if (offset >= SD_BUF_SIZE - 2) { |
2078 | u8 page_code = buffer[offset] & 0x3F; | 2076 | sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); |
2079 | u8 spf = buffer[offset] & 0x40; | 2077 | goto defaults; |
2080 | |||
2081 | if (page_code == 8 || page_code == 6) { | ||
2082 | /* We're interested only in the first 3 bytes. | ||
2083 | */ | ||
2084 | if (len - offset <= 2) { | ||
2085 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
2086 | "mode parameter data\n"); | ||
2087 | goto defaults; | ||
2088 | } else { | ||
2089 | modepage = page_code; | ||
2090 | goto Page_found; | ||
2091 | } | ||
2092 | } else { | ||
2093 | /* Go to the next page */ | ||
2094 | if (spf && len - offset > 3) | ||
2095 | offset += 4 + (buffer[offset+2] << 8) + | ||
2096 | buffer[offset+3]; | ||
2097 | else if (!spf && len - offset > 1) | ||
2098 | offset += 2 + buffer[offset+1]; | ||
2099 | else { | ||
2100 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
2101 | "mode parameter data\n"); | ||
2102 | goto defaults; | ||
2103 | } | ||
2104 | } | ||
2105 | } | 2078 | } |
2106 | 2079 | ||
2107 | if (modepage == 0x3F) { | 2080 | if ((buffer[offset] & 0x3f) != modepage) { |
2108 | sd_printk(KERN_ERR, sdkp, "No Caching mode page " | ||
2109 | "present\n"); | ||
2110 | goto defaults; | ||
2111 | } else if ((buffer[offset] & 0x3f) != modepage) { | ||
2112 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); | 2081 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); |
2113 | goto defaults; | 2082 | goto defaults; |
2114 | } | 2083 | } |
2115 | Page_found: | 2084 | |
2116 | if (modepage == 8) { | 2085 | if (modepage == 8) { |
2117 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); | 2086 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); |
2118 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); | 2087 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a86f82..eb7a3e85304 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -35,9 +35,11 @@ | |||
35 | 35 | ||
36 | struct ses_device { | 36 | struct ses_device { |
37 | unsigned char *page1; | 37 | unsigned char *page1; |
38 | unsigned char *page1_types; | ||
38 | unsigned char *page2; | 39 | unsigned char *page2; |
39 | unsigned char *page10; | 40 | unsigned char *page10; |
40 | short page1_len; | 41 | short page1_len; |
42 | short page1_num_types; | ||
41 | short page2_len; | 43 | short page2_len; |
42 | short page10_len; | 44 | short page10_len; |
43 | }; | 45 | }; |
@@ -110,12 +112,12 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev, | |||
110 | int i, j, count = 0, descriptor = ecomp->number; | 112 | int i, j, count = 0, descriptor = ecomp->number; |
111 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); | 113 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); |
112 | struct ses_device *ses_dev = edev->scratch; | 114 | struct ses_device *ses_dev = edev->scratch; |
113 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 115 | unsigned char *type_ptr = ses_dev->page1_types; |
114 | unsigned char *desc_ptr = ses_dev->page2 + 8; | 116 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
115 | 117 | ||
116 | /* Clear everything */ | 118 | /* Clear everything */ |
117 | memset(desc_ptr, 0, ses_dev->page2_len - 8); | 119 | memset(desc_ptr, 0, ses_dev->page2_len - 8); |
118 | for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { | 120 | for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { |
119 | for (j = 0; j < type_ptr[1]; j++) { | 121 | for (j = 0; j < type_ptr[1]; j++) { |
120 | desc_ptr += 4; | 122 | desc_ptr += 4; |
121 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 123 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && |
@@ -140,12 +142,12 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, | |||
140 | int i, j, count = 0, descriptor = ecomp->number; | 142 | int i, j, count = 0, descriptor = ecomp->number; |
141 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); | 143 | struct scsi_device *sdev = to_scsi_device(edev->edev.parent); |
142 | struct ses_device *ses_dev = edev->scratch; | 144 | struct ses_device *ses_dev = edev->scratch; |
143 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 145 | unsigned char *type_ptr = ses_dev->page1_types; |
144 | unsigned char *desc_ptr = ses_dev->page2 + 8; | 146 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
145 | 147 | ||
146 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 148 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
147 | 149 | ||
148 | for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { | 150 | for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) { |
149 | for (j = 0; j < type_ptr[1]; j++) { | 151 | for (j = 0; j < type_ptr[1]; j++) { |
150 | desc_ptr += 4; | 152 | desc_ptr += 4; |
151 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 153 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && |
@@ -358,7 +360,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
358 | unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; | 360 | unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL; |
359 | int i, j, page7_len, len, components; | 361 | int i, j, page7_len, len, components; |
360 | struct ses_device *ses_dev = edev->scratch; | 362 | struct ses_device *ses_dev = edev->scratch; |
361 | int types = ses_dev->page1[10]; | 363 | int types = ses_dev->page1_num_types; |
362 | unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); | 364 | unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); |
363 | 365 | ||
364 | if (!hdr_buf) | 366 | if (!hdr_buf) |
@@ -390,10 +392,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
390 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 392 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
391 | /* skip past overall descriptor */ | 393 | /* skip past overall descriptor */ |
392 | desc_ptr += len + 4; | 394 | desc_ptr += len + 4; |
393 | if (ses_dev->page10) | ||
394 | addl_desc_ptr = ses_dev->page10 + 8; | ||
395 | } | 395 | } |
396 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 396 | if (ses_dev->page10) |
397 | addl_desc_ptr = ses_dev->page10 + 8; | ||
398 | type_ptr = ses_dev->page1_types; | ||
397 | components = 0; | 399 | components = 0; |
398 | for (i = 0; i < types; i++, type_ptr += 4) { | 400 | for (i = 0; i < types; i++, type_ptr += 4) { |
399 | for (j = 0; j < type_ptr[1]; j++) { | 401 | for (j = 0; j < type_ptr[1]; j++) { |
@@ -503,6 +505,7 @@ static int ses_intf_add(struct device *cdev, | |||
503 | u32 result; | 505 | u32 result; |
504 | int i, types, len, components = 0; | 506 | int i, types, len, components = 0; |
505 | int err = -ENOMEM; | 507 | int err = -ENOMEM; |
508 | int num_enclosures; | ||
506 | struct enclosure_device *edev; | 509 | struct enclosure_device *edev; |
507 | struct ses_component *scomp = NULL; | 510 | struct ses_component *scomp = NULL; |
508 | 511 | ||
@@ -530,16 +533,6 @@ static int ses_intf_add(struct device *cdev, | |||
530 | if (result) | 533 | if (result) |
531 | goto recv_failed; | 534 | goto recv_failed; |
532 | 535 | ||
533 | if (hdr_buf[1] != 0) { | ||
534 | /* FIXME: need subenclosure support; I've just never | ||
535 | * seen a device with subenclosures and it makes the | ||
536 | * traversal routines more complex */ | ||
537 | sdev_printk(KERN_ERR, sdev, | ||
538 | "FIXME driver has no support for subenclosures (%d)\n", | ||
539 | hdr_buf[1]); | ||
540 | goto err_free; | ||
541 | } | ||
542 | |||
543 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 536 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; |
544 | buf = kzalloc(len, GFP_KERNEL); | 537 | buf = kzalloc(len, GFP_KERNEL); |
545 | if (!buf) | 538 | if (!buf) |
@@ -549,11 +542,24 @@ static int ses_intf_add(struct device *cdev, | |||
549 | if (result) | 542 | if (result) |
550 | goto recv_failed; | 543 | goto recv_failed; |
551 | 544 | ||
552 | types = buf[10]; | 545 | types = 0; |
553 | 546 | ||
554 | type_ptr = buf + 12 + buf[11]; | 547 | /* we always have one main enclosure and the rest are referred |
548 | * to as secondary subenclosures */ | ||
549 | num_enclosures = buf[1] + 1; | ||
555 | 550 | ||
556 | for (i = 0; i < types; i++, type_ptr += 4) { | 551 | /* begin at the enclosure descriptor */ |
552 | type_ptr = buf + 8; | ||
553 | /* skip all the enclosure descriptors */ | ||
554 | for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) { | ||
555 | types += type_ptr[2]; | ||
556 | type_ptr += type_ptr[3] + 4; | ||
557 | } | ||
558 | |||
559 | ses_dev->page1_types = type_ptr; | ||
560 | ses_dev->page1_num_types = types; | ||
561 | |||
562 | for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) { | ||
557 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || | 563 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
558 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 564 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) |
559 | components += type_ptr[1]; | 565 | components += type_ptr[1]; |
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c index 842cd9214a5..289729daba8 100644 --- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c +++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c | |||
@@ -1191,7 +1191,7 @@ static int cyasblkdev_add_disks(int bus_num, | |||
1191 | bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; | 1191 | bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; |
1192 | bd->user_disk_1->minors = 8; | 1192 | bd->user_disk_1->minors = 8; |
1193 | bd->user_disk_1->fops = &cyasblkdev_bdops; | 1193 | bd->user_disk_1->fops = &cyasblkdev_bdops; |
1194 | bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE; | 1194 | bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE; |
1195 | bd->user_disk_1->private_data = bd; | 1195 | bd->user_disk_1->private_data = bd; |
1196 | bd->user_disk_1->queue = bd->queue.queue; | 1196 | bd->user_disk_1->queue = bd->queue.queue; |
1197 | bd->dbgprn_flags = DBGPRN_RD_RQ; | 1197 | bd->dbgprn_flags = DBGPRN_RD_RQ; |
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 2fac3be209a..9ef2dbbfa62 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig | |||
@@ -29,4 +29,6 @@ config TCM_PSCSI | |||
29 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered | 29 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered |
30 | passthrough access to Linux/SCSI device | 30 | passthrough access to Linux/SCSI device |
31 | 31 | ||
32 | source "drivers/target/loopback/Kconfig" | ||
33 | |||
32 | endif | 34 | endif |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 973bb190ef5..1178bbfc68f 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -1,4 +1,3 @@ | |||
1 | EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/ | ||
2 | 1 | ||
3 | target_core_mod-y := target_core_configfs.o \ | 2 | target_core_mod-y := target_core_configfs.o \ |
4 | target_core_device.o \ | 3 | target_core_device.o \ |
@@ -13,7 +12,8 @@ target_core_mod-y := target_core_configfs.o \ | |||
13 | target_core_transport.o \ | 12 | target_core_transport.o \ |
14 | target_core_cdb.o \ | 13 | target_core_cdb.o \ |
15 | target_core_ua.o \ | 14 | target_core_ua.o \ |
16 | target_core_rd.o | 15 | target_core_rd.o \ |
16 | target_core_stat.o | ||
17 | 17 | ||
18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | 18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o |
19 | 19 | ||
@@ -21,3 +21,6 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | |||
21 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o | 21 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o |
22 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o | 22 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o |
23 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o | 23 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o |
24 | |||
25 | # Fabric modules | ||
26 | obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ | ||
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig new file mode 100644 index 00000000000..57dcbc2d711 --- /dev/null +++ b/drivers/target/loopback/Kconfig | |||
@@ -0,0 +1,11 @@ | |||
1 | config LOOPBACK_TARGET | ||
2 | tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module" | ||
3 | help | ||
4 | Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD | ||
5 | fabric loopback module. | ||
6 | |||
7 | config LOOPBACK_TARGET_CDB_DEBUG | ||
8 | bool "TCM loopback fabric module CDB debug code" | ||
9 | depends on LOOPBACK_TARGET | ||
10 | help | ||
11 | Say Y here to enable the TCM loopback fabric module CDB debug code | ||
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile new file mode 100644 index 00000000000..6abebdf9565 --- /dev/null +++ b/drivers/target/loopback/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o | |||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c new file mode 100644 index 00000000000..aed4e464d31 --- /dev/null +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -0,0 +1,1579 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * This file contains the Linux/SCSI LLD virtual SCSI initiator driver | ||
4 | * for emulated SAS initiator ports | ||
5 | * | ||
6 | * © Copyright 2011 RisingTide Systems LLC. | ||
7 | * | ||
8 | * Licensed to the Linux Foundation under the General Public License (GPL) version 2. | ||
9 | * | ||
10 | * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | ****************************************************************************/ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/configfs.h> | ||
29 | #include <scsi/scsi.h> | ||
30 | #include <scsi/scsi_tcq.h> | ||
31 | #include <scsi/scsi_host.h> | ||
32 | #include <scsi/scsi_device.h> | ||
33 | #include <scsi/scsi_cmnd.h> | ||
34 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | ||
35 | |||
36 | #include <target/target_core_base.h> | ||
37 | #include <target/target_core_transport.h> | ||
38 | #include <target/target_core_fabric_ops.h> | ||
39 | #include <target/target_core_fabric_configfs.h> | ||
40 | #include <target/target_core_fabric_lib.h> | ||
41 | #include <target/target_core_configfs.h> | ||
42 | #include <target/target_core_device.h> | ||
43 | #include <target/target_core_tpg.h> | ||
44 | #include <target/target_core_tmr.h> | ||
45 | |||
46 | #include "tcm_loop.h" | ||
47 | |||
48 | #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) | ||
49 | |||
50 | /* Local pointer to allocated TCM configfs fabric module */ | ||
51 | static struct target_fabric_configfs *tcm_loop_fabric_configfs; | ||
52 | |||
53 | static struct kmem_cache *tcm_loop_cmd_cache; | ||
54 | |||
55 | static int tcm_loop_hba_no_cnt; | ||
56 | |||
57 | /* | ||
58 | * Allocate a tcm_loop cmd descriptor from target_core_mod code | ||
59 | * | ||
60 | * Can be called from interrupt context in tcm_loop_queuecommand() below | ||
61 | */ | ||
62 | static struct se_cmd *tcm_loop_allocate_core_cmd( | ||
63 | struct tcm_loop_hba *tl_hba, | ||
64 | struct se_portal_group *se_tpg, | ||
65 | struct scsi_cmnd *sc) | ||
66 | { | ||
67 | struct se_cmd *se_cmd; | ||
68 | struct se_session *se_sess; | ||
69 | struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus; | ||
70 | struct tcm_loop_cmd *tl_cmd; | ||
71 | int sam_task_attr; | ||
72 | |||
73 | if (!tl_nexus) { | ||
74 | scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" | ||
75 | " does not exist\n"); | ||
76 | set_host_byte(sc, DID_ERROR); | ||
77 | return NULL; | ||
78 | } | ||
79 | se_sess = tl_nexus->se_sess; | ||
80 | |||
81 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); | ||
82 | if (!tl_cmd) { | ||
83 | printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); | ||
84 | set_host_byte(sc, DID_ERROR); | ||
85 | return NULL; | ||
86 | } | ||
87 | se_cmd = &tl_cmd->tl_se_cmd; | ||
88 | /* | ||
89 | * Save the pointer to struct scsi_cmnd *sc | ||
90 | */ | ||
91 | tl_cmd->sc = sc; | ||
92 | /* | ||
93 | * Locate the SAM Task Attr from struct scsi_cmnd * | ||
94 | */ | ||
95 | if (sc->device->tagged_supported) { | ||
96 | switch (sc->tag) { | ||
97 | case HEAD_OF_QUEUE_TAG: | ||
98 | sam_task_attr = TASK_ATTR_HOQ; | ||
99 | break; | ||
100 | case ORDERED_QUEUE_TAG: | ||
101 | sam_task_attr = TASK_ATTR_ORDERED; | ||
102 | break; | ||
103 | default: | ||
104 | sam_task_attr = TASK_ATTR_SIMPLE; | ||
105 | break; | ||
106 | } | ||
107 | } else | ||
108 | sam_task_attr = TASK_ATTR_SIMPLE; | ||
109 | |||
110 | /* | ||
111 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | ||
112 | */ | ||
113 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | ||
114 | scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, | ||
115 | &tl_cmd->tl_sense_buf[0]); | ||
116 | |||
117 | /* | ||
118 | * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi | ||
119 | */ | ||
120 | if (scsi_bidi_cmnd(sc)) | ||
121 | T_TASK(se_cmd)->t_tasks_bidi = 1; | ||
122 | /* | ||
123 | * Locate the struct se_lun pointer and attach it to struct se_cmd | ||
124 | */ | ||
125 | if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) { | ||
126 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
127 | set_host_byte(sc, DID_NO_CONNECT); | ||
128 | return NULL; | ||
129 | } | ||
130 | |||
131 | transport_device_setup_cmd(se_cmd); | ||
132 | return se_cmd; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Called by struct target_core_fabric_ops->new_cmd_map() | ||
137 | * | ||
138 | * Always called in process context. A non zero return value | ||
139 | * here will signal to handle an exception based on the return code. | ||
140 | */ | ||
141 | static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) | ||
142 | { | ||
143 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
144 | struct tcm_loop_cmd, tl_se_cmd); | ||
145 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
146 | void *mem_ptr, *mem_bidi_ptr = NULL; | ||
147 | u32 sg_no_bidi = 0; | ||
148 | int ret; | ||
149 | /* | ||
150 | * Allocate the necessary tasks to complete the received CDB+data | ||
151 | */ | ||
152 | ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); | ||
153 | if (ret == -1) { | ||
154 | /* Out of Resources */ | ||
155 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
156 | } else if (ret == -2) { | ||
157 | /* | ||
158 | * Handle case for SAM_STAT_RESERVATION_CONFLICT | ||
159 | */ | ||
160 | if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) | ||
161 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
162 | /* | ||
163 | * Otherwise, return SAM_STAT_CHECK_CONDITION and return | ||
164 | * sense data. | ||
165 | */ | ||
166 | return PYX_TRANSPORT_USE_SENSE_REASON; | ||
167 | } | ||
168 | /* | ||
169 | * Setup the struct scatterlist memory from the received | ||
170 | * struct scsi_cmnd. | ||
171 | */ | ||
172 | if (scsi_sg_count(sc)) { | ||
173 | se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; | ||
174 | mem_ptr = (void *)scsi_sglist(sc); | ||
175 | /* | ||
176 | * For BIDI commands, pass in the extra READ buffer | ||
177 | * to transport_generic_map_mem_to_cmd() below.. | ||
178 | */ | ||
179 | if (T_TASK(se_cmd)->t_tasks_bidi) { | ||
180 | struct scsi_data_buffer *sdb = scsi_in(sc); | ||
181 | |||
182 | mem_bidi_ptr = (void *)sdb->table.sgl; | ||
183 | sg_no_bidi = sdb->table.nents; | ||
184 | } | ||
185 | } else { | ||
186 | /* | ||
187 | * Used for DMA_NONE | ||
188 | */ | ||
189 | mem_ptr = NULL; | ||
190 | } | ||
191 | /* | ||
192 | * Map the SG memory into struct se_mem->page linked list using the same | ||
193 | * physical memory at sg->page_link. | ||
194 | */ | ||
195 | ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, | ||
196 | scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); | ||
197 | if (ret < 0) | ||
198 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Called from struct target_core_fabric_ops->check_stop_free() | ||
205 | */ | ||
206 | static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) | ||
207 | { | ||
208 | /* | ||
209 | * Do not release struct se_cmd's containing a valid TMR | ||
210 | * pointer. These will be released directly in tcm_loop_device_reset() | ||
211 | * with transport_generic_free_cmd(). | ||
212 | */ | ||
213 | if (se_cmd->se_tmr_req) | ||
214 | return; | ||
215 | /* | ||
216 | * Release the struct se_cmd, which will make a callback to release | ||
217 | * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() | ||
218 | */ | ||
219 | transport_generic_free_cmd(se_cmd, 0, 1, 0); | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Called from struct target_core_fabric_ops->release_cmd_to_pool() | ||
224 | */ | ||
225 | static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd) | ||
226 | { | ||
227 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
228 | struct tcm_loop_cmd, tl_se_cmd); | ||
229 | |||
230 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
231 | } | ||
232 | |||
233 | static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer, | ||
234 | char **start, off_t offset, | ||
235 | int length, int inout) | ||
236 | { | ||
237 | return sprintf(buffer, "tcm_loop_proc_info()\n"); | ||
238 | } | ||
239 | |||
240 | static int tcm_loop_driver_probe(struct device *); | ||
241 | static int tcm_loop_driver_remove(struct device *); | ||
242 | |||
243 | static int pseudo_lld_bus_match(struct device *dev, | ||
244 | struct device_driver *dev_driver) | ||
245 | { | ||
246 | return 1; | ||
247 | } | ||
248 | |||
249 | static struct bus_type tcm_loop_lld_bus = { | ||
250 | .name = "tcm_loop_bus", | ||
251 | .match = pseudo_lld_bus_match, | ||
252 | .probe = tcm_loop_driver_probe, | ||
253 | .remove = tcm_loop_driver_remove, | ||
254 | }; | ||
255 | |||
256 | static struct device_driver tcm_loop_driverfs = { | ||
257 | .name = "tcm_loop", | ||
258 | .bus = &tcm_loop_lld_bus, | ||
259 | }; | ||
260 | /* | ||
261 | * Used with root_device_register() in tcm_loop_alloc_core_bus() below | ||
262 | */ | ||
263 | struct device *tcm_loop_primary; | ||
264 | |||
265 | /* | ||
266 | * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and | ||
267 | * drivers/scsi/libiscsi.c:iscsi_change_queue_depth() | ||
268 | */ | ||
269 | static int tcm_loop_change_queue_depth( | ||
270 | struct scsi_device *sdev, | ||
271 | int depth, | ||
272 | int reason) | ||
273 | { | ||
274 | switch (reason) { | ||
275 | case SCSI_QDEPTH_DEFAULT: | ||
276 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
277 | break; | ||
278 | case SCSI_QDEPTH_QFULL: | ||
279 | scsi_track_queue_full(sdev, depth); | ||
280 | break; | ||
281 | case SCSI_QDEPTH_RAMP_UP: | ||
282 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
283 | break; | ||
284 | default: | ||
285 | return -EOPNOTSUPP; | ||
286 | } | ||
287 | return sdev->queue_depth; | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data | ||
292 | * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) | ||
293 | */ | ||
294 | static int tcm_loop_queuecommand( | ||
295 | struct Scsi_Host *sh, | ||
296 | struct scsi_cmnd *sc) | ||
297 | { | ||
298 | struct se_cmd *se_cmd; | ||
299 | struct se_portal_group *se_tpg; | ||
300 | struct tcm_loop_hba *tl_hba; | ||
301 | struct tcm_loop_tpg *tl_tpg; | ||
302 | |||
303 | TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" | ||
304 | " scsi_buf_len: %u\n", sc->device->host->host_no, | ||
305 | sc->device->id, sc->device->channel, sc->device->lun, | ||
306 | sc->cmnd[0], scsi_bufflen(sc)); | ||
307 | /* | ||
308 | * Locate the tcm_loop_hba_t pointer | ||
309 | */ | ||
310 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | ||
311 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | ||
312 | se_tpg = &tl_tpg->tl_se_tpg; | ||
313 | /* | ||
314 | * Determine the SAM Task Attribute and allocate tl_cmd and | ||
315 | * tl_cmd->tl_se_cmd from TCM infrastructure | ||
316 | */ | ||
317 | se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); | ||
318 | if (!se_cmd) { | ||
319 | sc->scsi_done(sc); | ||
320 | return 0; | ||
321 | } | ||
322 | /* | ||
323 | * Queue up the newly allocated to be processed in TCM thread context. | ||
324 | */ | ||
325 | transport_generic_handle_cdb_map(se_cmd); | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Called from SCSI EH process context to issue a LUN_RESET TMR | ||
331 | * to struct scsi_device | ||
332 | */ | ||
333 | static int tcm_loop_device_reset(struct scsi_cmnd *sc) | ||
334 | { | ||
335 | struct se_cmd *se_cmd = NULL; | ||
336 | struct se_portal_group *se_tpg; | ||
337 | struct se_session *se_sess; | ||
338 | struct tcm_loop_cmd *tl_cmd = NULL; | ||
339 | struct tcm_loop_hba *tl_hba; | ||
340 | struct tcm_loop_nexus *tl_nexus; | ||
341 | struct tcm_loop_tmr *tl_tmr = NULL; | ||
342 | struct tcm_loop_tpg *tl_tpg; | ||
343 | int ret = FAILED; | ||
344 | /* | ||
345 | * Locate the tcm_loop_hba_t pointer | ||
346 | */ | ||
347 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | ||
348 | /* | ||
349 | * Locate the tl_nexus and se_sess pointers | ||
350 | */ | ||
351 | tl_nexus = tl_hba->tl_nexus; | ||
352 | if (!tl_nexus) { | ||
353 | printk(KERN_ERR "Unable to perform device reset without" | ||
354 | " active I_T Nexus\n"); | ||
355 | return FAILED; | ||
356 | } | ||
357 | se_sess = tl_nexus->se_sess; | ||
358 | /* | ||
359 | * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id | ||
360 | */ | ||
361 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | ||
362 | se_tpg = &tl_tpg->tl_se_tpg; | ||
363 | |||
364 | tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); | ||
365 | if (!tl_cmd) { | ||
366 | printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); | ||
367 | return FAILED; | ||
368 | } | ||
369 | |||
370 | tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); | ||
371 | if (!tl_tmr) { | ||
372 | printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); | ||
373 | goto release; | ||
374 | } | ||
375 | init_waitqueue_head(&tl_tmr->tl_tmr_wait); | ||
376 | |||
377 | se_cmd = &tl_cmd->tl_se_cmd; | ||
378 | /* | ||
379 | * Initialize struct se_cmd descriptor from target_core_mod infrastructure | ||
380 | */ | ||
381 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, | ||
382 | DMA_NONE, TASK_ATTR_SIMPLE, | ||
383 | &tl_cmd->tl_sense_buf[0]); | ||
384 | /* | ||
385 | * Allocate the LUN_RESET TMR | ||
386 | */ | ||
387 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, | ||
388 | TMR_LUN_RESET); | ||
389 | if (!se_cmd->se_tmr_req) | ||
390 | goto release; | ||
391 | /* | ||
392 | * Locate the underlying TCM struct se_lun from sc->device->lun | ||
393 | */ | ||
394 | if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) | ||
395 | goto release; | ||
396 | /* | ||
397 | * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() | ||
398 | * to wake us up. | ||
399 | */ | ||
400 | transport_generic_handle_tmr(se_cmd); | ||
401 | wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); | ||
402 | /* | ||
403 | * The TMR LUN_RESET has completed, check the response status and | ||
404 | * then release allocations. | ||
405 | */ | ||
406 | ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? | ||
407 | SUCCESS : FAILED; | ||
408 | release: | ||
409 | if (se_cmd) | ||
410 | transport_generic_free_cmd(se_cmd, 1, 1, 0); | ||
411 | else | ||
412 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
413 | kfree(tl_tmr); | ||
414 | return ret; | ||
415 | } | ||
416 | |||
417 | static int tcm_loop_slave_alloc(struct scsi_device *sd) | ||
418 | { | ||
419 | set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static int tcm_loop_slave_configure(struct scsi_device *sd) | ||
424 | { | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static struct scsi_host_template tcm_loop_driver_template = { | ||
429 | .proc_info = tcm_loop_proc_info, | ||
430 | .proc_name = "tcm_loopback", | ||
431 | .name = "TCM_Loopback", | ||
432 | .queuecommand = tcm_loop_queuecommand, | ||
433 | .change_queue_depth = tcm_loop_change_queue_depth, | ||
434 | .eh_device_reset_handler = tcm_loop_device_reset, | ||
435 | .can_queue = TL_SCSI_CAN_QUEUE, | ||
436 | .this_id = -1, | ||
437 | .sg_tablesize = TL_SCSI_SG_TABLESIZE, | ||
438 | .cmd_per_lun = TL_SCSI_CMD_PER_LUN, | ||
439 | .max_sectors = TL_SCSI_MAX_SECTORS, | ||
440 | .use_clustering = DISABLE_CLUSTERING, | ||
441 | .slave_alloc = tcm_loop_slave_alloc, | ||
442 | .slave_configure = tcm_loop_slave_configure, | ||
443 | .module = THIS_MODULE, | ||
444 | }; | ||
445 | |||
446 | static int tcm_loop_driver_probe(struct device *dev) | ||
447 | { | ||
448 | struct tcm_loop_hba *tl_hba; | ||
449 | struct Scsi_Host *sh; | ||
450 | int error; | ||
451 | |||
452 | tl_hba = to_tcm_loop_hba(dev); | ||
453 | |||
454 | sh = scsi_host_alloc(&tcm_loop_driver_template, | ||
455 | sizeof(struct tcm_loop_hba)); | ||
456 | if (!sh) { | ||
457 | printk(KERN_ERR "Unable to allocate struct scsi_host\n"); | ||
458 | return -ENODEV; | ||
459 | } | ||
460 | tl_hba->sh = sh; | ||
461 | |||
462 | /* | ||
463 | * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata | ||
464 | */ | ||
465 | *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; | ||
466 | /* | ||
467 | * Setup single ID, Channel and LUN for now.. | ||
468 | */ | ||
469 | sh->max_id = 2; | ||
470 | sh->max_lun = 0; | ||
471 | sh->max_channel = 0; | ||
472 | sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; | ||
473 | |||
474 | error = scsi_add_host(sh, &tl_hba->dev); | ||
475 | if (error) { | ||
476 | printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); | ||
477 | scsi_host_put(sh); | ||
478 | return -ENODEV; | ||
479 | } | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int tcm_loop_driver_remove(struct device *dev) | ||
484 | { | ||
485 | struct tcm_loop_hba *tl_hba; | ||
486 | struct Scsi_Host *sh; | ||
487 | |||
488 | tl_hba = to_tcm_loop_hba(dev); | ||
489 | sh = tl_hba->sh; | ||
490 | |||
491 | scsi_remove_host(sh); | ||
492 | scsi_host_put(sh); | ||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | static void tcm_loop_release_adapter(struct device *dev) | ||
497 | { | ||
498 | struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); | ||
499 | |||
500 | kfree(tl_hba); | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c | ||
505 | */ | ||
506 | static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) | ||
507 | { | ||
508 | int ret; | ||
509 | |||
510 | tl_hba->dev.bus = &tcm_loop_lld_bus; | ||
511 | tl_hba->dev.parent = tcm_loop_primary; | ||
512 | tl_hba->dev.release = &tcm_loop_release_adapter; | ||
513 | dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); | ||
514 | |||
515 | ret = device_register(&tl_hba->dev); | ||
516 | if (ret) { | ||
517 | printk(KERN_ERR "device_register() failed for" | ||
518 | " tl_hba->dev: %d\n", ret); | ||
519 | return -ENODEV; | ||
520 | } | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated | ||
527 | * tcm_loop SCSI bus. | ||
528 | */ | ||
529 | static int tcm_loop_alloc_core_bus(void) | ||
530 | { | ||
531 | int ret; | ||
532 | |||
533 | tcm_loop_primary = root_device_register("tcm_loop_0"); | ||
534 | if (IS_ERR(tcm_loop_primary)) { | ||
535 | printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); | ||
536 | return PTR_ERR(tcm_loop_primary); | ||
537 | } | ||
538 | |||
539 | ret = bus_register(&tcm_loop_lld_bus); | ||
540 | if (ret) { | ||
541 | printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); | ||
542 | goto dev_unreg; | ||
543 | } | ||
544 | |||
545 | ret = driver_register(&tcm_loop_driverfs); | ||
546 | if (ret) { | ||
547 | printk(KERN_ERR "driver_register() failed for" | ||
548 | "tcm_loop_driverfs\n"); | ||
549 | goto bus_unreg; | ||
550 | } | ||
551 | |||
552 | printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); | ||
553 | return ret; | ||
554 | |||
555 | bus_unreg: | ||
556 | bus_unregister(&tcm_loop_lld_bus); | ||
557 | dev_unreg: | ||
558 | root_device_unregister(tcm_loop_primary); | ||
559 | return ret; | ||
560 | } | ||
561 | |||
562 | static void tcm_loop_release_core_bus(void) | ||
563 | { | ||
564 | driver_unregister(&tcm_loop_driverfs); | ||
565 | bus_unregister(&tcm_loop_lld_bus); | ||
566 | root_device_unregister(tcm_loop_primary); | ||
567 | |||
568 | printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); | ||
569 | } | ||
570 | |||
571 | static char *tcm_loop_get_fabric_name(void) | ||
572 | { | ||
573 | return "loopback"; | ||
574 | } | ||
575 | |||
576 | static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
577 | { | ||
578 | struct tcm_loop_tpg *tl_tpg = | ||
579 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
580 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
581 | /* | ||
582 | * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() | ||
583 | * time based on the protocol dependent prefix of the passed configfs group. | ||
584 | * | ||
585 | * Based upon tl_proto_id, TCM_Loop emulates the requested fabric | ||
586 | * ProtocolID using target_core_fabric_lib.c symbols. | ||
587 | */ | ||
588 | switch (tl_hba->tl_proto_id) { | ||
589 | case SCSI_PROTOCOL_SAS: | ||
590 | return sas_get_fabric_proto_ident(se_tpg); | ||
591 | case SCSI_PROTOCOL_FCP: | ||
592 | return fc_get_fabric_proto_ident(se_tpg); | ||
593 | case SCSI_PROTOCOL_ISCSI: | ||
594 | return iscsi_get_fabric_proto_ident(se_tpg); | ||
595 | default: | ||
596 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
597 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
598 | break; | ||
599 | } | ||
600 | |||
601 | return sas_get_fabric_proto_ident(se_tpg); | ||
602 | } | ||
603 | |||
604 | static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) | ||
605 | { | ||
606 | struct tcm_loop_tpg *tl_tpg = | ||
607 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
608 | /* | ||
609 | * Return the passed NAA identifier for the SAS Target Port | ||
610 | */ | ||
611 | return &tl_tpg->tl_hba->tl_wwn_address[0]; | ||
612 | } | ||
613 | |||
614 | static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) | ||
615 | { | ||
616 | struct tcm_loop_tpg *tl_tpg = | ||
617 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
618 | /* | ||
619 | * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 | ||
620 | * to represent the SCSI Target Port. | ||
621 | */ | ||
622 | return tl_tpg->tl_tpgt; | ||
623 | } | ||
624 | |||
625 | static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg) | ||
626 | { | ||
627 | return 1; | ||
628 | } | ||
629 | |||
630 | static u32 tcm_loop_get_pr_transport_id( | ||
631 | struct se_portal_group *se_tpg, | ||
632 | struct se_node_acl *se_nacl, | ||
633 | struct t10_pr_registration *pr_reg, | ||
634 | int *format_code, | ||
635 | unsigned char *buf) | ||
636 | { | ||
637 | struct tcm_loop_tpg *tl_tpg = | ||
638 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
639 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
640 | |||
641 | switch (tl_hba->tl_proto_id) { | ||
642 | case SCSI_PROTOCOL_SAS: | ||
643 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
644 | format_code, buf); | ||
645 | case SCSI_PROTOCOL_FCP: | ||
646 | return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
647 | format_code, buf); | ||
648 | case SCSI_PROTOCOL_ISCSI: | ||
649 | return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
650 | format_code, buf); | ||
651 | default: | ||
652 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
653 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
654 | break; | ||
655 | } | ||
656 | |||
657 | return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, | ||
658 | format_code, buf); | ||
659 | } | ||
660 | |||
661 | static u32 tcm_loop_get_pr_transport_id_len( | ||
662 | struct se_portal_group *se_tpg, | ||
663 | struct se_node_acl *se_nacl, | ||
664 | struct t10_pr_registration *pr_reg, | ||
665 | int *format_code) | ||
666 | { | ||
667 | struct tcm_loop_tpg *tl_tpg = | ||
668 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
669 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
670 | |||
671 | switch (tl_hba->tl_proto_id) { | ||
672 | case SCSI_PROTOCOL_SAS: | ||
673 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
674 | format_code); | ||
675 | case SCSI_PROTOCOL_FCP: | ||
676 | return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
677 | format_code); | ||
678 | case SCSI_PROTOCOL_ISCSI: | ||
679 | return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
680 | format_code); | ||
681 | default: | ||
682 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
683 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
684 | break; | ||
685 | } | ||
686 | |||
687 | return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, | ||
688 | format_code); | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | ||
693 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | ||
694 | */ | ||
695 | static char *tcm_loop_parse_pr_out_transport_id( | ||
696 | struct se_portal_group *se_tpg, | ||
697 | const char *buf, | ||
698 | u32 *out_tid_len, | ||
699 | char **port_nexus_ptr) | ||
700 | { | ||
701 | struct tcm_loop_tpg *tl_tpg = | ||
702 | (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; | ||
703 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
704 | |||
705 | switch (tl_hba->tl_proto_id) { | ||
706 | case SCSI_PROTOCOL_SAS: | ||
707 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
708 | port_nexus_ptr); | ||
709 | case SCSI_PROTOCOL_FCP: | ||
710 | return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
711 | port_nexus_ptr); | ||
712 | case SCSI_PROTOCOL_ISCSI: | ||
713 | return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
714 | port_nexus_ptr); | ||
715 | default: | ||
716 | printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" | ||
717 | " SAS emulation\n", tl_hba->tl_proto_id); | ||
718 | break; | ||
719 | } | ||
720 | |||
721 | return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, | ||
722 | port_nexus_ptr); | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * Returning (1) here allows for target_core_mod struct se_node_acl to be generated | ||
727 | * based upon the incoming fabric dependent SCSI Initiator Port | ||
728 | */ | ||
729 | static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) | ||
730 | { | ||
731 | return 1; | ||
732 | } | ||
733 | |||
734 | static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) | ||
735 | { | ||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | /* | ||
740 | * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for | ||
741 | * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest | ||
742 | */ | ||
743 | static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) | ||
744 | { | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will | ||
750 | * never be called for TCM_Loop by target_core_fabric_configfs.c code. | ||
751 | * It has been added here as a nop for target_fabric_tf_ops_check() | ||
752 | */ | ||
753 | static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) | ||
754 | { | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( | ||
759 | struct se_portal_group *se_tpg) | ||
760 | { | ||
761 | struct tcm_loop_nacl *tl_nacl; | ||
762 | |||
763 | tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); | ||
764 | if (!tl_nacl) { | ||
765 | printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); | ||
766 | return NULL; | ||
767 | } | ||
768 | |||
769 | return &tl_nacl->se_node_acl; | ||
770 | } | ||
771 | |||
772 | static void tcm_loop_tpg_release_fabric_acl( | ||
773 | struct se_portal_group *se_tpg, | ||
774 | struct se_node_acl *se_nacl) | ||
775 | { | ||
776 | struct tcm_loop_nacl *tl_nacl = container_of(se_nacl, | ||
777 | struct tcm_loop_nacl, se_node_acl); | ||
778 | |||
779 | kfree(tl_nacl); | ||
780 | } | ||
781 | |||
782 | static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) | ||
783 | { | ||
784 | return 1; | ||
785 | } | ||
786 | |||
787 | static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd) | ||
788 | { | ||
789 | /* | ||
790 | * Since TCM_loop is already passing struct scatterlist data from | ||
791 | * struct scsi_cmnd, no more Linux/SCSI failure dependent state need | ||
792 | * to be handled here. | ||
793 | */ | ||
794 | return; | ||
795 | } | ||
796 | |||
797 | static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) | ||
798 | { | ||
799 | /* | ||
800 | * Assume struct scsi_cmnd is not in remove state.. | ||
801 | */ | ||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | static int tcm_loop_sess_logged_in(struct se_session *se_sess) | ||
806 | { | ||
807 | /* | ||
808 | * Assume that TL Nexus is always active | ||
809 | */ | ||
810 | return 1; | ||
811 | } | ||
812 | |||
813 | static u32 tcm_loop_sess_get_index(struct se_session *se_sess) | ||
814 | { | ||
815 | return 1; | ||
816 | } | ||
817 | |||
818 | static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) | ||
819 | { | ||
820 | return; | ||
821 | } | ||
822 | |||
823 | static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) | ||
824 | { | ||
825 | return 1; | ||
826 | } | ||
827 | |||
828 | static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) | ||
829 | { | ||
830 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
831 | struct tcm_loop_cmd, tl_se_cmd); | ||
832 | |||
833 | return tl_cmd->sc_cmd_state; | ||
834 | } | ||
835 | |||
836 | static int tcm_loop_shutdown_session(struct se_session *se_sess) | ||
837 | { | ||
838 | return 0; | ||
839 | } | ||
840 | |||
841 | static void tcm_loop_close_session(struct se_session *se_sess) | ||
842 | { | ||
843 | return; | ||
844 | }; | ||
845 | |||
846 | static void tcm_loop_stop_session( | ||
847 | struct se_session *se_sess, | ||
848 | int sess_sleep, | ||
849 | int conn_sleep) | ||
850 | { | ||
851 | return; | ||
852 | } | ||
853 | |||
854 | static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess) | ||
855 | { | ||
856 | return; | ||
857 | } | ||
858 | |||
859 | static int tcm_loop_write_pending(struct se_cmd *se_cmd) | ||
860 | { | ||
861 | /* | ||
862 | * Since Linux/SCSI has already sent down a struct scsi_cmnd | ||
863 | * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array | ||
864 | * memory, and memory has already been mapped to struct se_cmd->t_mem_list | ||
865 | * format with transport_generic_map_mem_to_cmd(). | ||
866 | * | ||
867 | * We now tell TCM to add this WRITE CDB directly into the TCM storage | ||
868 | * object execution queue. | ||
869 | */ | ||
870 | transport_generic_process_write(se_cmd); | ||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) | ||
875 | { | ||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) | ||
880 | { | ||
881 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
882 | struct tcm_loop_cmd, tl_se_cmd); | ||
883 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
884 | |||
885 | TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" | ||
886 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); | ||
887 | |||
888 | sc->result = SAM_STAT_GOOD; | ||
889 | set_host_byte(sc, DID_OK); | ||
890 | sc->scsi_done(sc); | ||
891 | return 0; | ||
892 | } | ||
893 | |||
894 | static int tcm_loop_queue_status(struct se_cmd *se_cmd) | ||
895 | { | ||
896 | struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, | ||
897 | struct tcm_loop_cmd, tl_se_cmd); | ||
898 | struct scsi_cmnd *sc = tl_cmd->sc; | ||
899 | |||
900 | TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" | ||
901 | " cdb: 0x%02x\n", sc, sc->cmnd[0]); | ||
902 | |||
903 | if (se_cmd->sense_buffer && | ||
904 | ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || | ||
905 | (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { | ||
906 | |||
907 | memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, | ||
908 | SCSI_SENSE_BUFFERSIZE); | ||
909 | sc->result = SAM_STAT_CHECK_CONDITION; | ||
910 | set_driver_byte(sc, DRIVER_SENSE); | ||
911 | } else | ||
912 | sc->result = se_cmd->scsi_status; | ||
913 | |||
914 | set_host_byte(sc, DID_OK); | ||
915 | sc->scsi_done(sc); | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) | ||
920 | { | ||
921 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | ||
922 | struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; | ||
923 | /* | ||
924 | * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead | ||
925 | * and wake up the wait_queue_head_t in tcm_loop_device_reset() | ||
926 | */ | ||
927 | atomic_set(&tl_tmr->tmr_complete, 1); | ||
928 | wake_up(&tl_tmr->tl_tmr_wait); | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) | ||
933 | { | ||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | static u16 tcm_loop_get_fabric_sense_len(void) | ||
938 | { | ||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | static u64 tcm_loop_pack_lun(unsigned int lun) | ||
943 | { | ||
944 | u64 result; | ||
945 | |||
946 | /* LSB of lun into byte 1 big-endian */ | ||
947 | result = ((lun & 0xff) << 8); | ||
948 | /* use flat space addressing method */ | ||
949 | result |= 0x40 | ((lun >> 8) & 0x3f); | ||
950 | |||
951 | return cpu_to_le64(result); | ||
952 | } | ||
953 | |||
954 | static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) | ||
955 | { | ||
956 | switch (tl_hba->tl_proto_id) { | ||
957 | case SCSI_PROTOCOL_SAS: | ||
958 | return "SAS"; | ||
959 | case SCSI_PROTOCOL_FCP: | ||
960 | return "FCP"; | ||
961 | case SCSI_PROTOCOL_ISCSI: | ||
962 | return "iSCSI"; | ||
963 | default: | ||
964 | break; | ||
965 | } | ||
966 | |||
967 | return "Unknown"; | ||
968 | } | ||
969 | |||
970 | /* Start items for tcm_loop_port_cit */ | ||
971 | |||
972 | static int tcm_loop_port_link( | ||
973 | struct se_portal_group *se_tpg, | ||
974 | struct se_lun *lun) | ||
975 | { | ||
976 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
977 | struct tcm_loop_tpg, tl_se_tpg); | ||
978 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
979 | |||
980 | atomic_inc(&tl_tpg->tl_tpg_port_count); | ||
981 | smp_mb__after_atomic_inc(); | ||
982 | /* | ||
983 | * Add Linux/SCSI struct scsi_device by HCTL | ||
984 | */ | ||
985 | scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); | ||
986 | |||
987 | printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); | ||
988 | return 0; | ||
989 | } | ||
990 | |||
991 | static void tcm_loop_port_unlink( | ||
992 | struct se_portal_group *se_tpg, | ||
993 | struct se_lun *se_lun) | ||
994 | { | ||
995 | struct scsi_device *sd; | ||
996 | struct tcm_loop_hba *tl_hba; | ||
997 | struct tcm_loop_tpg *tl_tpg; | ||
998 | |||
999 | tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); | ||
1000 | tl_hba = tl_tpg->tl_hba; | ||
1001 | |||
1002 | sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, | ||
1003 | se_lun->unpacked_lun); | ||
1004 | if (!sd) { | ||
1005 | printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" | ||
1006 | "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); | ||
1007 | return; | ||
1008 | } | ||
1009 | /* | ||
1010 | * Remove Linux/SCSI struct scsi_device by HCTL | ||
1011 | */ | ||
1012 | scsi_remove_device(sd); | ||
1013 | scsi_device_put(sd); | ||
1014 | |||
1015 | atomic_dec(&tl_tpg->tl_tpg_port_count); | ||
1016 | smp_mb__after_atomic_dec(); | ||
1017 | |||
1018 | printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); | ||
1019 | } | ||
1020 | |||
1021 | /* End items for tcm_loop_port_cit */ | ||
1022 | |||
1023 | /* Start items for tcm_loop_nexus_cit */ | ||
1024 | |||
1025 | static int tcm_loop_make_nexus( | ||
1026 | struct tcm_loop_tpg *tl_tpg, | ||
1027 | const char *name) | ||
1028 | { | ||
1029 | struct se_portal_group *se_tpg; | ||
1030 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
1031 | struct tcm_loop_nexus *tl_nexus; | ||
1032 | |||
1033 | if (tl_tpg->tl_hba->tl_nexus) { | ||
1034 | printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); | ||
1035 | return -EEXIST; | ||
1036 | } | ||
1037 | se_tpg = &tl_tpg->tl_se_tpg; | ||
1038 | |||
1039 | tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); | ||
1040 | if (!tl_nexus) { | ||
1041 | printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); | ||
1042 | return -ENOMEM; | ||
1043 | } | ||
1044 | /* | ||
1045 | * Initialize the struct se_session pointer | ||
1046 | */ | ||
1047 | tl_nexus->se_sess = transport_init_session(); | ||
1048 | if (!tl_nexus->se_sess) | ||
1049 | goto out; | ||
1050 | /* | ||
1051 | * Since we are running in 'demo mode' this call with generate a | ||
1052 | * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI | ||
1053 | * Initiator port name of the passed configfs group 'name'. | ||
1054 | */ | ||
1055 | tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( | ||
1056 | se_tpg, (unsigned char *)name); | ||
1057 | if (!tl_nexus->se_sess->se_node_acl) { | ||
1058 | transport_free_session(tl_nexus->se_sess); | ||
1059 | goto out; | ||
1060 | } | ||
1061 | /* | ||
1062 | * Now, register the SAS I_T Nexus as active with the call to | ||
1063 | * transport_register_session() | ||
1064 | */ | ||
1065 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | ||
1066 | tl_nexus->se_sess, (void *)tl_nexus); | ||
1067 | tl_tpg->tl_hba->tl_nexus = tl_nexus; | ||
1068 | printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | ||
1069 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | ||
1070 | name); | ||
1071 | return 0; | ||
1072 | |||
1073 | out: | ||
1074 | kfree(tl_nexus); | ||
1075 | return -ENOMEM; | ||
1076 | } | ||
1077 | |||
1078 | static int tcm_loop_drop_nexus( | ||
1079 | struct tcm_loop_tpg *tpg) | ||
1080 | { | ||
1081 | struct se_session *se_sess; | ||
1082 | struct tcm_loop_nexus *tl_nexus; | ||
1083 | struct tcm_loop_hba *tl_hba = tpg->tl_hba; | ||
1084 | |||
1085 | tl_nexus = tpg->tl_hba->tl_nexus; | ||
1086 | if (!tl_nexus) | ||
1087 | return -ENODEV; | ||
1088 | |||
1089 | se_sess = tl_nexus->se_sess; | ||
1090 | if (!se_sess) | ||
1091 | return -ENODEV; | ||
1092 | |||
1093 | if (atomic_read(&tpg->tl_tpg_port_count)) { | ||
1094 | printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" | ||
1095 | " active TPG port count: %d\n", | ||
1096 | atomic_read(&tpg->tl_tpg_port_count)); | ||
1097 | return -EPERM; | ||
1098 | } | ||
1099 | |||
1100 | printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" | ||
1101 | " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), | ||
1102 | tl_nexus->se_sess->se_node_acl->initiatorname); | ||
1103 | /* | ||
1104 | * Release the SCSI I_T Nexus to the emulated SAS Target Port | ||
1105 | */ | ||
1106 | transport_deregister_session(tl_nexus->se_sess); | ||
1107 | tpg->tl_hba->tl_nexus = NULL; | ||
1108 | kfree(tl_nexus); | ||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | /* End items for tcm_loop_nexus_cit */ | ||
1113 | |||
1114 | static ssize_t tcm_loop_tpg_show_nexus( | ||
1115 | struct se_portal_group *se_tpg, | ||
1116 | char *page) | ||
1117 | { | ||
1118 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
1119 | struct tcm_loop_tpg, tl_se_tpg); | ||
1120 | struct tcm_loop_nexus *tl_nexus; | ||
1121 | ssize_t ret; | ||
1122 | |||
1123 | tl_nexus = tl_tpg->tl_hba->tl_nexus; | ||
1124 | if (!tl_nexus) | ||
1125 | return -ENODEV; | ||
1126 | |||
1127 | ret = snprintf(page, PAGE_SIZE, "%s\n", | ||
1128 | tl_nexus->se_sess->se_node_acl->initiatorname); | ||
1129 | |||
1130 | return ret; | ||
1131 | } | ||
1132 | |||
1133 | static ssize_t tcm_loop_tpg_store_nexus( | ||
1134 | struct se_portal_group *se_tpg, | ||
1135 | const char *page, | ||
1136 | size_t count) | ||
1137 | { | ||
1138 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
1139 | struct tcm_loop_tpg, tl_se_tpg); | ||
1140 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | ||
1141 | unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; | ||
1142 | int ret; | ||
1143 | /* | ||
1144 | * Shutdown the active I_T nexus if 'NULL' is passed.. | ||
1145 | */ | ||
1146 | if (!strncmp(page, "NULL", 4)) { | ||
1147 | ret = tcm_loop_drop_nexus(tl_tpg); | ||
1148 | return (!ret) ? count : ret; | ||
1149 | } | ||
1150 | /* | ||
1151 | * Otherwise make sure the passed virtual Initiator port WWN matches | ||
1152 | * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call | ||
1153 | * tcm_loop_make_nexus() | ||
1154 | */ | ||
1155 | if (strlen(page) > TL_WWN_ADDR_LEN) { | ||
1156 | printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" | ||
1157 | " max: %d\n", page, TL_WWN_ADDR_LEN); | ||
1158 | return -EINVAL; | ||
1159 | } | ||
1160 | snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); | ||
1161 | |||
1162 | ptr = strstr(i_port, "naa."); | ||
1163 | if (ptr) { | ||
1164 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { | ||
1165 | printk(KERN_ERR "Passed SAS Initiator Port %s does not" | ||
1166 | " match target port protoid: %s\n", i_port, | ||
1167 | tcm_loop_dump_proto_id(tl_hba)); | ||
1168 | return -EINVAL; | ||
1169 | } | ||
1170 | port_ptr = &i_port[0]; | ||
1171 | goto check_newline; | ||
1172 | } | ||
1173 | ptr = strstr(i_port, "fc."); | ||
1174 | if (ptr) { | ||
1175 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { | ||
1176 | printk(KERN_ERR "Passed FCP Initiator Port %s does not" | ||
1177 | " match target port protoid: %s\n", i_port, | ||
1178 | tcm_loop_dump_proto_id(tl_hba)); | ||
1179 | return -EINVAL; | ||
1180 | } | ||
1181 | port_ptr = &i_port[3]; /* Skip over "fc." */ | ||
1182 | goto check_newline; | ||
1183 | } | ||
1184 | ptr = strstr(i_port, "iqn."); | ||
1185 | if (ptr) { | ||
1186 | if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { | ||
1187 | printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" | ||
1188 | " match target port protoid: %s\n", i_port, | ||
1189 | tcm_loop_dump_proto_id(tl_hba)); | ||
1190 | return -EINVAL; | ||
1191 | } | ||
1192 | port_ptr = &i_port[0]; | ||
1193 | goto check_newline; | ||
1194 | } | ||
1195 | printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" | ||
1196 | " %s\n", i_port); | ||
1197 | return -EINVAL; | ||
1198 | /* | ||
1199 | * Clear any trailing newline for the NAA WWN | ||
1200 | */ | ||
1201 | check_newline: | ||
1202 | if (i_port[strlen(i_port)-1] == '\n') | ||
1203 | i_port[strlen(i_port)-1] = '\0'; | ||
1204 | |||
1205 | ret = tcm_loop_make_nexus(tl_tpg, port_ptr); | ||
1206 | if (ret < 0) | ||
1207 | return ret; | ||
1208 | |||
1209 | return count; | ||
1210 | } | ||
1211 | |||
1212 | TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); | ||
1213 | |||
1214 | static struct configfs_attribute *tcm_loop_tpg_attrs[] = { | ||
1215 | &tcm_loop_tpg_nexus.attr, | ||
1216 | NULL, | ||
1217 | }; | ||
1218 | |||
1219 | /* Start items for tcm_loop_naa_cit */ | ||
1220 | |||
1221 | struct se_portal_group *tcm_loop_make_naa_tpg( | ||
1222 | struct se_wwn *wwn, | ||
1223 | struct config_group *group, | ||
1224 | const char *name) | ||
1225 | { | ||
1226 | struct tcm_loop_hba *tl_hba = container_of(wwn, | ||
1227 | struct tcm_loop_hba, tl_hba_wwn); | ||
1228 | struct tcm_loop_tpg *tl_tpg; | ||
1229 | char *tpgt_str, *end_ptr; | ||
1230 | int ret; | ||
1231 | unsigned short int tpgt; | ||
1232 | |||
1233 | tpgt_str = strstr(name, "tpgt_"); | ||
1234 | if (!tpgt_str) { | ||
1235 | printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" | ||
1236 | " group\n"); | ||
1237 | return ERR_PTR(-EINVAL); | ||
1238 | } | ||
1239 | tpgt_str += 5; /* Skip ahead of "tpgt_" */ | ||
1240 | tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); | ||
1241 | |||
1242 | if (tpgt > TL_TPGS_PER_HBA) { | ||
1243 | printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" | ||
1244 | " %u\n", tpgt, TL_TPGS_PER_HBA); | ||
1245 | return ERR_PTR(-EINVAL); | ||
1246 | } | ||
1247 | tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; | ||
1248 | tl_tpg->tl_hba = tl_hba; | ||
1249 | tl_tpg->tl_tpgt = tpgt; | ||
1250 | /* | ||
1251 | * Register the tl_tpg as a emulated SAS TCM Target Endpoint | ||
1252 | */ | ||
1253 | ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, | ||
1254 | wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, | ||
1255 | TRANSPORT_TPG_TYPE_NORMAL); | ||
1256 | if (ret < 0) | ||
1257 | return ERR_PTR(-ENOMEM); | ||
1258 | |||
1259 | printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" | ||
1260 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | ||
1261 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | ||
1262 | |||
1263 | return &tl_tpg->tl_se_tpg; | ||
1264 | } | ||
1265 | |||
1266 | void tcm_loop_drop_naa_tpg( | ||
1267 | struct se_portal_group *se_tpg) | ||
1268 | { | ||
1269 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; | ||
1270 | struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, | ||
1271 | struct tcm_loop_tpg, tl_se_tpg); | ||
1272 | struct tcm_loop_hba *tl_hba; | ||
1273 | unsigned short tpgt; | ||
1274 | |||
1275 | tl_hba = tl_tpg->tl_hba; | ||
1276 | tpgt = tl_tpg->tl_tpgt; | ||
1277 | /* | ||
1278 | * Release the I_T Nexus for the Virtual SAS link if present | ||
1279 | */ | ||
1280 | tcm_loop_drop_nexus(tl_tpg); | ||
1281 | /* | ||
1282 | * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint | ||
1283 | */ | ||
1284 | core_tpg_deregister(se_tpg); | ||
1285 | |||
1286 | printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" | ||
1287 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | ||
1288 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | ||
1289 | } | ||
1290 | |||
1291 | /* End items for tcm_loop_naa_cit */ | ||
1292 | |||
1293 | /* Start items for tcm_loop_cit */ | ||
1294 | |||
1295 | struct se_wwn *tcm_loop_make_scsi_hba( | ||
1296 | struct target_fabric_configfs *tf, | ||
1297 | struct config_group *group, | ||
1298 | const char *name) | ||
1299 | { | ||
1300 | struct tcm_loop_hba *tl_hba; | ||
1301 | struct Scsi_Host *sh; | ||
1302 | char *ptr; | ||
1303 | int ret, off = 0; | ||
1304 | |||
1305 | tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); | ||
1306 | if (!tl_hba) { | ||
1307 | printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); | ||
1308 | return ERR_PTR(-ENOMEM); | ||
1309 | } | ||
1310 | /* | ||
1311 | * Determine the emulated Protocol Identifier and Target Port Name | ||
1312 | * based on the incoming configfs directory name. | ||
1313 | */ | ||
1314 | ptr = strstr(name, "naa."); | ||
1315 | if (ptr) { | ||
1316 | tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; | ||
1317 | goto check_len; | ||
1318 | } | ||
1319 | ptr = strstr(name, "fc."); | ||
1320 | if (ptr) { | ||
1321 | tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; | ||
1322 | off = 3; /* Skip over "fc." */ | ||
1323 | goto check_len; | ||
1324 | } | ||
1325 | ptr = strstr(name, "iqn."); | ||
1326 | if (ptr) { | ||
1327 | tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; | ||
1328 | goto check_len; | ||
1329 | } | ||
1330 | |||
1331 | printk(KERN_ERR "Unable to locate prefix for emulated Target Port:" | ||
1332 | " %s\n", name); | ||
1333 | return ERR_PTR(-EINVAL); | ||
1334 | |||
1335 | check_len: | ||
1336 | if (strlen(name) > TL_WWN_ADDR_LEN) { | ||
1337 | printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" | ||
1338 | " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), | ||
1339 | TL_WWN_ADDR_LEN); | ||
1340 | kfree(tl_hba); | ||
1341 | return ERR_PTR(-EINVAL); | ||
1342 | } | ||
1343 | snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); | ||
1344 | |||
1345 | /* | ||
1346 | * Call device_register(tl_hba->dev) to register the emulated | ||
1347 | * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after | ||
1348 | * device_register() callbacks in tcm_loop_driver_probe() | ||
1349 | */ | ||
1350 | ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); | ||
1351 | if (ret) | ||
1352 | goto out; | ||
1353 | |||
1354 | sh = tl_hba->sh; | ||
1355 | tcm_loop_hba_no_cnt++; | ||
1356 | printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" | ||
1357 | " %s Address: %s at Linux/SCSI Host ID: %d\n", | ||
1358 | tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); | ||
1359 | |||
1360 | return &tl_hba->tl_hba_wwn; | ||
1361 | out: | ||
1362 | kfree(tl_hba); | ||
1363 | return ERR_PTR(ret); | ||
1364 | } | ||
1365 | |||
1366 | void tcm_loop_drop_scsi_hba( | ||
1367 | struct se_wwn *wwn) | ||
1368 | { | ||
1369 | struct tcm_loop_hba *tl_hba = container_of(wwn, | ||
1370 | struct tcm_loop_hba, tl_hba_wwn); | ||
1371 | int host_no = tl_hba->sh->host_no; | ||
1372 | /* | ||
1373 | * Call device_unregister() on the original tl_hba->dev. | ||
1374 | * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will | ||
1375 | * release *tl_hba; | ||
1376 | */ | ||
1377 | device_unregister(&tl_hba->dev); | ||
1378 | |||
1379 | printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" | ||
1380 | " SAS Address: %s at Linux/SCSI Host ID: %d\n", | ||
1381 | config_item_name(&wwn->wwn_group.cg_item), host_no); | ||
1382 | } | ||
1383 | |||
1384 | /* Start items for tcm_loop_cit */ | ||
1385 | static ssize_t tcm_loop_wwn_show_attr_version( | ||
1386 | struct target_fabric_configfs *tf, | ||
1387 | char *page) | ||
1388 | { | ||
1389 | return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); | ||
1390 | } | ||
1391 | |||
1392 | TF_WWN_ATTR_RO(tcm_loop, version); | ||
1393 | |||
1394 | static struct configfs_attribute *tcm_loop_wwn_attrs[] = { | ||
1395 | &tcm_loop_wwn_version.attr, | ||
1396 | NULL, | ||
1397 | }; | ||
1398 | |||
1399 | /* End items for tcm_loop_cit */ | ||
1400 | |||
1401 | static int tcm_loop_register_configfs(void) | ||
1402 | { | ||
1403 | struct target_fabric_configfs *fabric; | ||
1404 | struct config_group *tf_cg; | ||
1405 | int ret; | ||
1406 | /* | ||
1407 | * Set the TCM Loop HBA counter to zero | ||
1408 | */ | ||
1409 | tcm_loop_hba_no_cnt = 0; | ||
1410 | /* | ||
1411 | * Register the top level struct config_item_type with TCM core | ||
1412 | */ | ||
1413 | fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); | ||
1414 | if (!fabric) { | ||
1415 | printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); | ||
1416 | return -1; | ||
1417 | } | ||
1418 | /* | ||
1419 | * Setup the fabric API of function pointers used by target_core_mod | ||
1420 | */ | ||
1421 | fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; | ||
1422 | fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; | ||
1423 | fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; | ||
1424 | fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; | ||
1425 | fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; | ||
1426 | fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; | ||
1427 | fabric->tf_ops.tpg_get_pr_transport_id_len = | ||
1428 | &tcm_loop_get_pr_transport_id_len; | ||
1429 | fabric->tf_ops.tpg_parse_pr_out_transport_id = | ||
1430 | &tcm_loop_parse_pr_out_transport_id; | ||
1431 | fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; | ||
1432 | fabric->tf_ops.tpg_check_demo_mode_cache = | ||
1433 | &tcm_loop_check_demo_mode_cache; | ||
1434 | fabric->tf_ops.tpg_check_demo_mode_write_protect = | ||
1435 | &tcm_loop_check_demo_mode_write_protect; | ||
1436 | fabric->tf_ops.tpg_check_prod_mode_write_protect = | ||
1437 | &tcm_loop_check_prod_mode_write_protect; | ||
1438 | /* | ||
1439 | * The TCM loopback fabric module runs in demo-mode to a local | ||
1440 | * virtual SCSI device, so fabric dependent initator ACLs are | ||
1441 | * not required. | ||
1442 | */ | ||
1443 | fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; | ||
1444 | fabric->tf_ops.tpg_release_fabric_acl = | ||
1445 | &tcm_loop_tpg_release_fabric_acl; | ||
1446 | fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; | ||
1447 | /* | ||
1448 | * Since tcm_loop is mapping physical memory from Linux/SCSI | ||
1449 | * struct scatterlist arrays for each struct scsi_cmnd I/O, | ||
1450 | * we do not need TCM to allocate a iovec array for | ||
1451 | * virtual memory address mappings | ||
1452 | */ | ||
1453 | fabric->tf_ops.alloc_cmd_iovecs = NULL; | ||
1454 | /* | ||
1455 | * Used for setting up remaining TCM resources in process context | ||
1456 | */ | ||
1457 | fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; | ||
1458 | fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; | ||
1459 | fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd; | ||
1460 | fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd; | ||
1461 | fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; | ||
1462 | fabric->tf_ops.close_session = &tcm_loop_close_session; | ||
1463 | fabric->tf_ops.stop_session = &tcm_loop_stop_session; | ||
1464 | fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0; | ||
1465 | fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in; | ||
1466 | fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; | ||
1467 | fabric->tf_ops.sess_get_initiator_sid = NULL; | ||
1468 | fabric->tf_ops.write_pending = &tcm_loop_write_pending; | ||
1469 | fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; | ||
1470 | /* | ||
1471 | * Not used for TCM loopback | ||
1472 | */ | ||
1473 | fabric->tf_ops.set_default_node_attributes = | ||
1474 | &tcm_loop_set_default_node_attributes; | ||
1475 | fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; | ||
1476 | fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; | ||
1477 | fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure; | ||
1478 | fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; | ||
1479 | fabric->tf_ops.queue_status = &tcm_loop_queue_status; | ||
1480 | fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; | ||
1481 | fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; | ||
1482 | fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; | ||
1483 | fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; | ||
1484 | fabric->tf_ops.pack_lun = &tcm_loop_pack_lun; | ||
1485 | |||
1486 | tf_cg = &fabric->tf_group; | ||
1487 | /* | ||
1488 | * Setup function pointers for generic logic in target_core_fabric_configfs.c | ||
1489 | */ | ||
1490 | fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; | ||
1491 | fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; | ||
1492 | fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; | ||
1493 | fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; | ||
1494 | /* | ||
1495 | * fabric_post_link() and fabric_pre_unlink() are used for | ||
1496 | * registration and release of TCM Loop Virtual SCSI LUNs. | ||
1497 | */ | ||
1498 | fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; | ||
1499 | fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; | ||
1500 | fabric->tf_ops.fabric_make_np = NULL; | ||
1501 | fabric->tf_ops.fabric_drop_np = NULL; | ||
1502 | /* | ||
1503 | * Setup default attribute lists for various fabric->tf_cit_tmpl | ||
1504 | */ | ||
1505 | TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; | ||
1506 | TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; | ||
1507 | TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; | ||
1508 | TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; | ||
1509 | TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; | ||
1510 | /* | ||
1511 | * Once fabric->tf_ops has been setup, now register the fabric for | ||
1512 | * use within TCM | ||
1513 | */ | ||
1514 | ret = target_fabric_configfs_register(fabric); | ||
1515 | if (ret < 0) { | ||
1516 | printk(KERN_ERR "target_fabric_configfs_register() for" | ||
1517 | " TCM_Loop failed!\n"); | ||
1518 | target_fabric_configfs_free(fabric); | ||
1519 | return -1; | ||
1520 | } | ||
1521 | /* | ||
1522 | * Setup our local pointer to *fabric. | ||
1523 | */ | ||
1524 | tcm_loop_fabric_configfs = fabric; | ||
1525 | printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" | ||
1526 | " tcm_loop_fabric_configfs\n"); | ||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static void tcm_loop_deregister_configfs(void) | ||
1531 | { | ||
1532 | if (!tcm_loop_fabric_configfs) | ||
1533 | return; | ||
1534 | |||
1535 | target_fabric_configfs_deregister(tcm_loop_fabric_configfs); | ||
1536 | tcm_loop_fabric_configfs = NULL; | ||
1537 | printk(KERN_INFO "TCM_LOOP[0] - Cleared" | ||
1538 | " tcm_loop_fabric_configfs\n"); | ||
1539 | } | ||
1540 | |||
1541 | static int __init tcm_loop_fabric_init(void) | ||
1542 | { | ||
1543 | int ret; | ||
1544 | |||
1545 | tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", | ||
1546 | sizeof(struct tcm_loop_cmd), | ||
1547 | __alignof__(struct tcm_loop_cmd), | ||
1548 | 0, NULL); | ||
1549 | if (!tcm_loop_cmd_cache) { | ||
1550 | printk(KERN_ERR "kmem_cache_create() for" | ||
1551 | " tcm_loop_cmd_cache failed\n"); | ||
1552 | return -ENOMEM; | ||
1553 | } | ||
1554 | |||
1555 | ret = tcm_loop_alloc_core_bus(); | ||
1556 | if (ret) | ||
1557 | return ret; | ||
1558 | |||
1559 | ret = tcm_loop_register_configfs(); | ||
1560 | if (ret) { | ||
1561 | tcm_loop_release_core_bus(); | ||
1562 | return ret; | ||
1563 | } | ||
1564 | |||
1565 | return 0; | ||
1566 | } | ||
1567 | |||
1568 | static void __exit tcm_loop_fabric_exit(void) | ||
1569 | { | ||
1570 | tcm_loop_deregister_configfs(); | ||
1571 | tcm_loop_release_core_bus(); | ||
1572 | kmem_cache_destroy(tcm_loop_cmd_cache); | ||
1573 | } | ||
1574 | |||
1575 | MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); | ||
1576 | MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); | ||
1577 | MODULE_LICENSE("GPL"); | ||
1578 | module_init(tcm_loop_fabric_init); | ||
1579 | module_exit(tcm_loop_fabric_exit); | ||
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h new file mode 100644 index 00000000000..7e9f7ab4554 --- /dev/null +++ b/drivers/target/loopback/tcm_loop.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #define TCM_LOOP_VERSION "v2.1-rc1" | ||
2 | #define TL_WWN_ADDR_LEN 256 | ||
3 | #define TL_TPGS_PER_HBA 32 | ||
4 | /* | ||
5 | * Defaults for struct scsi_host_template tcm_loop_driver_template | ||
6 | * | ||
7 | * We use large can_queue and cmd_per_lun here and let TCM enforce | ||
8 | * the underlying se_device_t->queue_depth. | ||
9 | */ | ||
10 | #define TL_SCSI_CAN_QUEUE 1024 | ||
11 | #define TL_SCSI_CMD_PER_LUN 1024 | ||
12 | #define TL_SCSI_MAX_SECTORS 1024 | ||
13 | #define TL_SCSI_SG_TABLESIZE 256 | ||
14 | /* | ||
15 | * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len | ||
16 | */ | ||
17 | #define TL_SCSI_MAX_CMD_LEN 32 | ||
18 | |||
19 | #ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG | ||
20 | # define TL_CDB_DEBUG(x...) printk(KERN_INFO x) | ||
21 | #else | ||
22 | # define TL_CDB_DEBUG(x...) | ||
23 | #endif | ||
24 | |||
25 | struct tcm_loop_cmd { | ||
26 | /* State of Linux/SCSI CDB+Data descriptor */ | ||
27 | u32 sc_cmd_state; | ||
28 | /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ | ||
29 | struct scsi_cmnd *sc; | ||
30 | struct list_head *tl_cmd_list; | ||
31 | /* The TCM I/O descriptor that is accessed via container_of() */ | ||
32 | struct se_cmd tl_se_cmd; | ||
33 | /* Sense buffer that will be mapped into outgoing status */ | ||
34 | unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; | ||
35 | }; | ||
36 | |||
37 | struct tcm_loop_tmr { | ||
38 | atomic_t tmr_complete; | ||
39 | wait_queue_head_t tl_tmr_wait; | ||
40 | }; | ||
41 | |||
42 | struct tcm_loop_nexus { | ||
43 | int it_nexus_active; | ||
44 | /* | ||
45 | * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h | ||
46 | */ | ||
47 | struct scsi_host *sh; | ||
48 | /* | ||
49 | * Pointer to TCM session for I_T Nexus | ||
50 | */ | ||
51 | struct se_session *se_sess; | ||
52 | }; | ||
53 | |||
54 | struct tcm_loop_nacl { | ||
55 | struct se_node_acl se_node_acl; | ||
56 | }; | ||
57 | |||
58 | struct tcm_loop_tpg { | ||
59 | unsigned short tl_tpgt; | ||
60 | atomic_t tl_tpg_port_count; | ||
61 | struct se_portal_group tl_se_tpg; | ||
62 | struct tcm_loop_hba *tl_hba; | ||
63 | }; | ||
64 | |||
65 | struct tcm_loop_hba { | ||
66 | u8 tl_proto_id; | ||
67 | unsigned char tl_wwn_address[TL_WWN_ADDR_LEN]; | ||
68 | struct se_hba_s *se_hba; | ||
69 | struct se_lun *tl_hba_lun; | ||
70 | struct se_port *tl_hba_lun_sep; | ||
71 | struct se_device_s *se_dev_hba_ptr; | ||
72 | struct tcm_loop_nexus *tl_nexus; | ||
73 | struct device dev; | ||
74 | struct Scsi_Host *sh; | ||
75 | struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; | ||
76 | struct se_wwn tl_hba_wwn; | ||
77 | }; | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index caf8dc18ee0..a5f44a6e6e1 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -3,8 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This file contains ConfigFS logic for the Generic Target Engine project. | 4 | * This file contains ConfigFS logic for the Generic Target Engine project. |
5 | * | 5 | * |
6 | * Copyright (c) 2008-2010 Rising Tide Systems | 6 | * Copyright (c) 2008-2011 Rising Tide Systems |
7 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 7 | * Copyright (c) 2008-2011 Linux-iSCSI.org |
8 | * | 8 | * |
9 | * Nicholas A. Bellinger <nab@kernel.org> | 9 | * Nicholas A. Bellinger <nab@kernel.org> |
10 | * | 10 | * |
@@ -50,6 +50,7 @@ | |||
50 | #include "target_core_hba.h" | 50 | #include "target_core_hba.h" |
51 | #include "target_core_pr.h" | 51 | #include "target_core_pr.h" |
52 | #include "target_core_rd.h" | 52 | #include "target_core_rd.h" |
53 | #include "target_core_stat.h" | ||
53 | 54 | ||
54 | static struct list_head g_tf_list; | 55 | static struct list_head g_tf_list; |
55 | static struct mutex g_tf_lock; | 56 | static struct mutex g_tf_lock; |
@@ -1451,8 +1452,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1451 | size_t count) | 1452 | size_t count) |
1452 | { | 1453 | { |
1453 | struct se_device *dev; | 1454 | struct se_device *dev; |
1454 | unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL; | 1455 | unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; |
1455 | unsigned char *isid = NULL; | 1456 | unsigned char *t_fabric = NULL, *t_port = NULL; |
1456 | char *orig, *ptr, *arg_p, *opts; | 1457 | char *orig, *ptr, *arg_p, *opts; |
1457 | substring_t args[MAX_OPT_ARGS]; | 1458 | substring_t args[MAX_OPT_ARGS]; |
1458 | unsigned long long tmp_ll; | 1459 | unsigned long long tmp_ll; |
@@ -1488,9 +1489,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1488 | switch (token) { | 1489 | switch (token) { |
1489 | case Opt_initiator_fabric: | 1490 | case Opt_initiator_fabric: |
1490 | i_fabric = match_strdup(&args[0]); | 1491 | i_fabric = match_strdup(&args[0]); |
1492 | if (!i_fabric) { | ||
1493 | ret = -ENOMEM; | ||
1494 | goto out; | ||
1495 | } | ||
1491 | break; | 1496 | break; |
1492 | case Opt_initiator_node: | 1497 | case Opt_initiator_node: |
1493 | i_port = match_strdup(&args[0]); | 1498 | i_port = match_strdup(&args[0]); |
1499 | if (!i_port) { | ||
1500 | ret = -ENOMEM; | ||
1501 | goto out; | ||
1502 | } | ||
1494 | if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { | 1503 | if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { |
1495 | printk(KERN_ERR "APTPL metadata initiator_node=" | 1504 | printk(KERN_ERR "APTPL metadata initiator_node=" |
1496 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", | 1505 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", |
@@ -1501,6 +1510,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1501 | break; | 1510 | break; |
1502 | case Opt_initiator_sid: | 1511 | case Opt_initiator_sid: |
1503 | isid = match_strdup(&args[0]); | 1512 | isid = match_strdup(&args[0]); |
1513 | if (!isid) { | ||
1514 | ret = -ENOMEM; | ||
1515 | goto out; | ||
1516 | } | ||
1504 | if (strlen(isid) > PR_REG_ISID_LEN) { | 1517 | if (strlen(isid) > PR_REG_ISID_LEN) { |
1505 | printk(KERN_ERR "APTPL metadata initiator_isid" | 1518 | printk(KERN_ERR "APTPL metadata initiator_isid" |
1506 | "= exceeds PR_REG_ISID_LEN: %d\n", | 1519 | "= exceeds PR_REG_ISID_LEN: %d\n", |
@@ -1511,6 +1524,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1511 | break; | 1524 | break; |
1512 | case Opt_sa_res_key: | 1525 | case Opt_sa_res_key: |
1513 | arg_p = match_strdup(&args[0]); | 1526 | arg_p = match_strdup(&args[0]); |
1527 | if (!arg_p) { | ||
1528 | ret = -ENOMEM; | ||
1529 | goto out; | ||
1530 | } | ||
1514 | ret = strict_strtoull(arg_p, 0, &tmp_ll); | 1531 | ret = strict_strtoull(arg_p, 0, &tmp_ll); |
1515 | if (ret < 0) { | 1532 | if (ret < 0) { |
1516 | printk(KERN_ERR "strict_strtoull() failed for" | 1533 | printk(KERN_ERR "strict_strtoull() failed for" |
@@ -1547,9 +1564,17 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1547 | */ | 1564 | */ |
1548 | case Opt_target_fabric: | 1565 | case Opt_target_fabric: |
1549 | t_fabric = match_strdup(&args[0]); | 1566 | t_fabric = match_strdup(&args[0]); |
1567 | if (!t_fabric) { | ||
1568 | ret = -ENOMEM; | ||
1569 | goto out; | ||
1570 | } | ||
1550 | break; | 1571 | break; |
1551 | case Opt_target_node: | 1572 | case Opt_target_node: |
1552 | t_port = match_strdup(&args[0]); | 1573 | t_port = match_strdup(&args[0]); |
1574 | if (!t_port) { | ||
1575 | ret = -ENOMEM; | ||
1576 | goto out; | ||
1577 | } | ||
1553 | if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { | 1578 | if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { |
1554 | printk(KERN_ERR "APTPL metadata target_node=" | 1579 | printk(KERN_ERR "APTPL metadata target_node=" |
1555 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", | 1580 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", |
@@ -1592,6 +1617,11 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | |||
1592 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, | 1617 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, |
1593 | res_holder, all_tg_pt, type); | 1618 | res_holder, all_tg_pt, type); |
1594 | out: | 1619 | out: |
1620 | kfree(i_fabric); | ||
1621 | kfree(i_port); | ||
1622 | kfree(isid); | ||
1623 | kfree(t_fabric); | ||
1624 | kfree(t_port); | ||
1595 | kfree(orig); | 1625 | kfree(orig); |
1596 | return (ret == 0) ? count : ret; | 1626 | return (ret == 0) ? count : ret; |
1597 | } | 1627 | } |
@@ -1798,7 +1828,9 @@ static ssize_t target_core_store_dev_enable( | |||
1798 | return -EINVAL; | 1828 | return -EINVAL; |
1799 | 1829 | ||
1800 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | 1830 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); |
1801 | if (!(dev) || IS_ERR(dev)) | 1831 | if (IS_ERR(dev)) |
1832 | return PTR_ERR(dev); | ||
1833 | else if (!dev) | ||
1802 | return -EINVAL; | 1834 | return -EINVAL; |
1803 | 1835 | ||
1804 | se_dev->se_dev_ptr = dev; | 1836 | se_dev->se_dev_ptr = dev; |
@@ -2678,6 +2710,34 @@ static struct config_item_type target_core_alua_cit = { | |||
2678 | 2710 | ||
2679 | /* End functions for struct config_item_type target_core_alua_cit */ | 2711 | /* End functions for struct config_item_type target_core_alua_cit */ |
2680 | 2712 | ||
2713 | /* Start functions for struct config_item_type target_core_stat_cit */ | ||
2714 | |||
2715 | static struct config_group *target_core_stat_mkdir( | ||
2716 | struct config_group *group, | ||
2717 | const char *name) | ||
2718 | { | ||
2719 | return ERR_PTR(-ENOSYS); | ||
2720 | } | ||
2721 | |||
2722 | static void target_core_stat_rmdir( | ||
2723 | struct config_group *group, | ||
2724 | struct config_item *item) | ||
2725 | { | ||
2726 | return; | ||
2727 | } | ||
2728 | |||
2729 | static struct configfs_group_operations target_core_stat_group_ops = { | ||
2730 | .make_group = &target_core_stat_mkdir, | ||
2731 | .drop_item = &target_core_stat_rmdir, | ||
2732 | }; | ||
2733 | |||
2734 | static struct config_item_type target_core_stat_cit = { | ||
2735 | .ct_group_ops = &target_core_stat_group_ops, | ||
2736 | .ct_owner = THIS_MODULE, | ||
2737 | }; | ||
2738 | |||
2739 | /* End functions for struct config_item_type target_core_stat_cit */ | ||
2740 | |||
2681 | /* Start functions for struct config_item_type target_core_hba_cit */ | 2741 | /* Start functions for struct config_item_type target_core_hba_cit */ |
2682 | 2742 | ||
2683 | static struct config_group *target_core_make_subdev( | 2743 | static struct config_group *target_core_make_subdev( |
@@ -2690,10 +2750,12 @@ static struct config_group *target_core_make_subdev( | |||
2690 | struct config_item *hba_ci = &group->cg_item; | 2750 | struct config_item *hba_ci = &group->cg_item; |
2691 | struct se_hba *hba = item_to_hba(hba_ci); | 2751 | struct se_hba *hba = item_to_hba(hba_ci); |
2692 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; | 2752 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; |
2753 | struct config_group *dev_stat_grp = NULL; | ||
2754 | int errno = -ENOMEM, ret; | ||
2693 | 2755 | ||
2694 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | 2756 | ret = mutex_lock_interruptible(&hba->hba_access_mutex); |
2695 | return NULL; | 2757 | if (ret) |
2696 | 2758 | return ERR_PTR(ret); | |
2697 | /* | 2759 | /* |
2698 | * Locate the struct se_subsystem_api from parent's struct se_hba. | 2760 | * Locate the struct se_subsystem_api from parent's struct se_hba. |
2699 | */ | 2761 | */ |
@@ -2723,7 +2785,7 @@ static struct config_group *target_core_make_subdev( | |||
2723 | se_dev->se_dev_hba = hba; | 2785 | se_dev->se_dev_hba = hba; |
2724 | dev_cg = &se_dev->se_dev_group; | 2786 | dev_cg = &se_dev->se_dev_group; |
2725 | 2787 | ||
2726 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, | 2788 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, |
2727 | GFP_KERNEL); | 2789 | GFP_KERNEL); |
2728 | if (!(dev_cg->default_groups)) | 2790 | if (!(dev_cg->default_groups)) |
2729 | goto out; | 2791 | goto out; |
@@ -2755,13 +2817,17 @@ static struct config_group *target_core_make_subdev( | |||
2755 | &target_core_dev_wwn_cit); | 2817 | &target_core_dev_wwn_cit); |
2756 | config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, | 2818 | config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, |
2757 | "alua", &target_core_alua_tg_pt_gps_cit); | 2819 | "alua", &target_core_alua_tg_pt_gps_cit); |
2820 | config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, | ||
2821 | "statistics", &target_core_stat_cit); | ||
2822 | |||
2758 | dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; | 2823 | dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; |
2759 | dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; | 2824 | dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; |
2760 | dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; | 2825 | dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; |
2761 | dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; | 2826 | dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; |
2762 | dev_cg->default_groups[4] = NULL; | 2827 | dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; |
2828 | dev_cg->default_groups[5] = NULL; | ||
2763 | /* | 2829 | /* |
2764 | * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp | 2830 | * Add core/$HBA/$DEV/alua/default_tg_pt_gp |
2765 | */ | 2831 | */ |
2766 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); | 2832 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); |
2767 | if (!(tg_pt_gp)) | 2833 | if (!(tg_pt_gp)) |
@@ -2781,6 +2847,17 @@ static struct config_group *target_core_make_subdev( | |||
2781 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; | 2847 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; |
2782 | tg_pt_gp_cg->default_groups[1] = NULL; | 2848 | tg_pt_gp_cg->default_groups[1] = NULL; |
2783 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; | 2849 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; |
2850 | /* | ||
2851 | * Add core/$HBA/$DEV/statistics/ default groups | ||
2852 | */ | ||
2853 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | ||
2854 | dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, | ||
2855 | GFP_KERNEL); | ||
2856 | if (!dev_stat_grp->default_groups) { | ||
2857 | printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); | ||
2858 | goto out; | ||
2859 | } | ||
2860 | target_stat_setup_dev_default_groups(se_dev); | ||
2784 | 2861 | ||
2785 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" | 2862 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" |
2786 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); | 2863 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); |
@@ -2792,6 +2869,8 @@ out: | |||
2792 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2869 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); |
2793 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2870 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; |
2794 | } | 2871 | } |
2872 | if (dev_stat_grp) | ||
2873 | kfree(dev_stat_grp->default_groups); | ||
2795 | if (tg_pt_gp_cg) | 2874 | if (tg_pt_gp_cg) |
2796 | kfree(tg_pt_gp_cg->default_groups); | 2875 | kfree(tg_pt_gp_cg->default_groups); |
2797 | if (dev_cg) | 2876 | if (dev_cg) |
@@ -2801,7 +2880,7 @@ out: | |||
2801 | kfree(se_dev); | 2880 | kfree(se_dev); |
2802 | unlock: | 2881 | unlock: |
2803 | mutex_unlock(&hba->hba_access_mutex); | 2882 | mutex_unlock(&hba->hba_access_mutex); |
2804 | return NULL; | 2883 | return ERR_PTR(errno); |
2805 | } | 2884 | } |
2806 | 2885 | ||
2807 | static void target_core_drop_subdev( | 2886 | static void target_core_drop_subdev( |
@@ -2813,7 +2892,7 @@ static void target_core_drop_subdev( | |||
2813 | struct se_hba *hba; | 2892 | struct se_hba *hba; |
2814 | struct se_subsystem_api *t; | 2893 | struct se_subsystem_api *t; |
2815 | struct config_item *df_item; | 2894 | struct config_item *df_item; |
2816 | struct config_group *dev_cg, *tg_pt_gp_cg; | 2895 | struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; |
2817 | int i; | 2896 | int i; |
2818 | 2897 | ||
2819 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | 2898 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
@@ -2825,6 +2904,14 @@ static void target_core_drop_subdev( | |||
2825 | list_del(&se_dev->g_se_dev_list); | 2904 | list_del(&se_dev->g_se_dev_list); |
2826 | spin_unlock(&se_global->g_device_lock); | 2905 | spin_unlock(&se_global->g_device_lock); |
2827 | 2906 | ||
2907 | dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; | ||
2908 | for (i = 0; dev_stat_grp->default_groups[i]; i++) { | ||
2909 | df_item = &dev_stat_grp->default_groups[i]->cg_item; | ||
2910 | dev_stat_grp->default_groups[i] = NULL; | ||
2911 | config_item_put(df_item); | ||
2912 | } | ||
2913 | kfree(dev_stat_grp->default_groups); | ||
2914 | |||
2828 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | 2915 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; |
2829 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { | 2916 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { |
2830 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; | 2917 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; |
@@ -3044,7 +3131,7 @@ static struct config_item_type target_core_cit = { | |||
3044 | 3131 | ||
3045 | /* Stop functions for struct config_item_type target_core_hba_cit */ | 3132 | /* Stop functions for struct config_item_type target_core_hba_cit */ |
3046 | 3133 | ||
3047 | static int target_core_init_configfs(void) | 3134 | static int __init target_core_init_configfs(void) |
3048 | { | 3135 | { |
3049 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 3136 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
3050 | struct config_group *lu_gp_cg = NULL; | 3137 | struct config_group *lu_gp_cg = NULL; |
@@ -3176,7 +3263,7 @@ out_global: | |||
3176 | return -1; | 3263 | return -1; |
3177 | } | 3264 | } |
3178 | 3265 | ||
3179 | static void target_core_exit_configfs(void) | 3266 | static void __exit target_core_exit_configfs(void) |
3180 | { | 3267 | { |
3181 | struct configfs_subsystem *subsys; | 3268 | struct configfs_subsystem *subsys; |
3182 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; | 3269 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 350ed401544..3fb8e32506e 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -589,6 +589,7 @@ static void core_export_port( | |||
589 | * Called with struct se_device->se_port_lock spinlock held. | 589 | * Called with struct se_device->se_port_lock spinlock held. |
590 | */ | 590 | */ |
591 | static void core_release_port(struct se_device *dev, struct se_port *port) | 591 | static void core_release_port(struct se_device *dev, struct se_port *port) |
592 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) | ||
592 | { | 593 | { |
593 | /* | 594 | /* |
594 | * Wait for any port reference for PR ALL_TG_PT=1 operation | 595 | * Wait for any port reference for PR ALL_TG_PT=1 operation |
@@ -779,49 +780,14 @@ void se_release_vpd_for_dev(struct se_device *dev) | |||
779 | return; | 780 | return; |
780 | } | 781 | } |
781 | 782 | ||
782 | /* | ||
783 | * Called with struct se_hba->device_lock held. | ||
784 | */ | ||
785 | void se_clear_dev_ports(struct se_device *dev) | ||
786 | { | ||
787 | struct se_hba *hba = dev->se_hba; | ||
788 | struct se_lun *lun; | ||
789 | struct se_portal_group *tpg; | ||
790 | struct se_port *sep, *sep_tmp; | ||
791 | |||
792 | spin_lock(&dev->se_port_lock); | ||
793 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
794 | spin_unlock(&dev->se_port_lock); | ||
795 | spin_unlock(&hba->device_lock); | ||
796 | |||
797 | lun = sep->sep_lun; | ||
798 | tpg = sep->sep_tpg; | ||
799 | spin_lock(&lun->lun_sep_lock); | ||
800 | if (lun->lun_se_dev == NULL) { | ||
801 | spin_unlock(&lun->lun_sep_lock); | ||
802 | continue; | ||
803 | } | ||
804 | spin_unlock(&lun->lun_sep_lock); | ||
805 | |||
806 | core_dev_del_lun(tpg, lun->unpacked_lun); | ||
807 | |||
808 | spin_lock(&hba->device_lock); | ||
809 | spin_lock(&dev->se_port_lock); | ||
810 | } | ||
811 | spin_unlock(&dev->se_port_lock); | ||
812 | |||
813 | return; | ||
814 | } | ||
815 | |||
816 | /* se_free_virtual_device(): | 783 | /* se_free_virtual_device(): |
817 | * | 784 | * |
818 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | 785 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. |
819 | */ | 786 | */ |
820 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | 787 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) |
821 | { | 788 | { |
822 | spin_lock(&hba->device_lock); | 789 | if (!list_empty(&dev->dev_sep_list)) |
823 | se_clear_dev_ports(dev); | 790 | dump_stack(); |
824 | spin_unlock(&hba->device_lock); | ||
825 | 791 | ||
826 | core_alua_free_lu_gp_mem(dev); | 792 | core_alua_free_lu_gp_mem(dev); |
827 | se_release_device_for_hba(dev); | 793 | se_release_device_for_hba(dev); |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index b65d1c8e774..07ab5a3bb8e 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -4,10 +4,10 @@ | |||
4 | * This file contains generic fabric module configfs infrastructure for | 4 | * This file contains generic fabric module configfs infrastructure for |
5 | * TCM v4.x code | 5 | * TCM v4.x code |
6 | * | 6 | * |
7 | * Copyright (c) 2010 Rising Tide Systems | 7 | * Copyright (c) 2010,2011 Rising Tide Systems |
8 | * Copyright (c) 2010 Linux-iSCSI.org | 8 | * Copyright (c) 2010,2011 Linux-iSCSI.org |
9 | * | 9 | * |
10 | * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org> | 10 | * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
@@ -48,6 +48,7 @@ | |||
48 | #include "target_core_alua.h" | 48 | #include "target_core_alua.h" |
49 | #include "target_core_hba.h" | 49 | #include "target_core_hba.h" |
50 | #include "target_core_pr.h" | 50 | #include "target_core_pr.h" |
51 | #include "target_core_stat.h" | ||
51 | 52 | ||
52 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | 53 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ |
53 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | 54 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ |
@@ -241,6 +242,32 @@ TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, | |||
241 | 242 | ||
242 | /* End of tfc_tpg_mappedlun_cit */ | 243 | /* End of tfc_tpg_mappedlun_cit */ |
243 | 244 | ||
245 | /* Start of tfc_tpg_mappedlun_port_cit */ | ||
246 | |||
247 | static struct config_group *target_core_mappedlun_stat_mkdir( | ||
248 | struct config_group *group, | ||
249 | const char *name) | ||
250 | { | ||
251 | return ERR_PTR(-ENOSYS); | ||
252 | } | ||
253 | |||
254 | static void target_core_mappedlun_stat_rmdir( | ||
255 | struct config_group *group, | ||
256 | struct config_item *item) | ||
257 | { | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { | ||
262 | .make_group = target_core_mappedlun_stat_mkdir, | ||
263 | .drop_item = target_core_mappedlun_stat_rmdir, | ||
264 | }; | ||
265 | |||
266 | TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops, | ||
267 | NULL); | ||
268 | |||
269 | /* End of tfc_tpg_mappedlun_port_cit */ | ||
270 | |||
244 | /* Start of tfc_tpg_nacl_attrib_cit */ | 271 | /* Start of tfc_tpg_nacl_attrib_cit */ |
245 | 272 | ||
246 | CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); | 273 | CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); |
@@ -294,6 +321,7 @@ static struct config_group *target_fabric_make_mappedlun( | |||
294 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 321 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
295 | struct se_lun_acl *lacl; | 322 | struct se_lun_acl *lacl; |
296 | struct config_item *acl_ci; | 323 | struct config_item *acl_ci; |
324 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | ||
297 | char *buf; | 325 | char *buf; |
298 | unsigned long mapped_lun; | 326 | unsigned long mapped_lun; |
299 | int ret = 0; | 327 | int ret = 0; |
@@ -330,15 +358,42 @@ static struct config_group *target_fabric_make_mappedlun( | |||
330 | 358 | ||
331 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, | 359 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, |
332 | config_item_name(acl_ci), &ret); | 360 | config_item_name(acl_ci), &ret); |
333 | if (!(lacl)) | 361 | if (!(lacl)) { |
362 | ret = -EINVAL; | ||
334 | goto out; | 363 | goto out; |
364 | } | ||
365 | |||
366 | lacl_cg = &lacl->se_lun_group; | ||
367 | lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
368 | GFP_KERNEL); | ||
369 | if (!lacl_cg->default_groups) { | ||
370 | printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); | ||
371 | ret = -ENOMEM; | ||
372 | goto out; | ||
373 | } | ||
335 | 374 | ||
336 | config_group_init_type_name(&lacl->se_lun_group, name, | 375 | config_group_init_type_name(&lacl->se_lun_group, name, |
337 | &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); | 376 | &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); |
377 | config_group_init_type_name(&lacl->ml_stat_grps.stat_group, | ||
378 | "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); | ||
379 | lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; | ||
380 | lacl_cg->default_groups[1] = NULL; | ||
381 | |||
382 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | ||
383 | ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | ||
384 | GFP_KERNEL); | ||
385 | if (!ml_stat_grp->default_groups) { | ||
386 | printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); | ||
387 | ret = -ENOMEM; | ||
388 | goto out; | ||
389 | } | ||
390 | target_stat_setup_mappedlun_default_groups(lacl); | ||
338 | 391 | ||
339 | kfree(buf); | 392 | kfree(buf); |
340 | return &lacl->se_lun_group; | 393 | return &lacl->se_lun_group; |
341 | out: | 394 | out: |
395 | if (lacl_cg) | ||
396 | kfree(lacl_cg->default_groups); | ||
342 | kfree(buf); | 397 | kfree(buf); |
343 | return ERR_PTR(ret); | 398 | return ERR_PTR(ret); |
344 | } | 399 | } |
@@ -347,6 +402,28 @@ static void target_fabric_drop_mappedlun( | |||
347 | struct config_group *group, | 402 | struct config_group *group, |
348 | struct config_item *item) | 403 | struct config_item *item) |
349 | { | 404 | { |
405 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
406 | struct se_lun_acl, se_lun_group); | ||
407 | struct config_item *df_item; | ||
408 | struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; | ||
409 | int i; | ||
410 | |||
411 | ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | ||
412 | for (i = 0; ml_stat_grp->default_groups[i]; i++) { | ||
413 | df_item = &ml_stat_grp->default_groups[i]->cg_item; | ||
414 | ml_stat_grp->default_groups[i] = NULL; | ||
415 | config_item_put(df_item); | ||
416 | } | ||
417 | kfree(ml_stat_grp->default_groups); | ||
418 | |||
419 | lacl_cg = &lacl->se_lun_group; | ||
420 | for (i = 0; lacl_cg->default_groups[i]; i++) { | ||
421 | df_item = &lacl_cg->default_groups[i]->cg_item; | ||
422 | lacl_cg->default_groups[i] = NULL; | ||
423 | config_item_put(df_item); | ||
424 | } | ||
425 | kfree(lacl_cg->default_groups); | ||
426 | |||
350 | config_item_put(item); | 427 | config_item_put(item); |
351 | } | 428 | } |
352 | 429 | ||
@@ -376,6 +453,15 @@ TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, | |||
376 | 453 | ||
377 | /* End of tfc_tpg_nacl_base_cit */ | 454 | /* End of tfc_tpg_nacl_base_cit */ |
378 | 455 | ||
456 | /* Start of tfc_node_fabric_stats_cit */ | ||
457 | /* | ||
458 | * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group | ||
459 | * to allow fabrics access to ->acl_fabric_stat_group->default_groups[] | ||
460 | */ | ||
461 | TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL); | ||
462 | |||
463 | /* End of tfc_wwn_fabric_stats_cit */ | ||
464 | |||
379 | /* Start of tfc_tpg_nacl_cit */ | 465 | /* Start of tfc_tpg_nacl_cit */ |
380 | 466 | ||
381 | static struct config_group *target_fabric_make_nodeacl( | 467 | static struct config_group *target_fabric_make_nodeacl( |
@@ -402,7 +488,8 @@ static struct config_group *target_fabric_make_nodeacl( | |||
402 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; | 488 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; |
403 | nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; | 489 | nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; |
404 | nacl_cg->default_groups[2] = &se_nacl->acl_param_group; | 490 | nacl_cg->default_groups[2] = &se_nacl->acl_param_group; |
405 | nacl_cg->default_groups[3] = NULL; | 491 | nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group; |
492 | nacl_cg->default_groups[4] = NULL; | ||
406 | 493 | ||
407 | config_group_init_type_name(&se_nacl->acl_group, name, | 494 | config_group_init_type_name(&se_nacl->acl_group, name, |
408 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); | 495 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); |
@@ -412,6 +499,9 @@ static struct config_group *target_fabric_make_nodeacl( | |||
412 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); | 499 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); |
413 | config_group_init_type_name(&se_nacl->acl_param_group, "param", | 500 | config_group_init_type_name(&se_nacl->acl_param_group, "param", |
414 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); | 501 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); |
502 | config_group_init_type_name(&se_nacl->acl_fabric_stat_group, | ||
503 | "fabric_statistics", | ||
504 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); | ||
415 | 505 | ||
416 | return &se_nacl->acl_group; | 506 | return &se_nacl->acl_group; |
417 | } | 507 | } |
@@ -758,6 +848,31 @@ TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_at | |||
758 | 848 | ||
759 | /* End of tfc_tpg_port_cit */ | 849 | /* End of tfc_tpg_port_cit */ |
760 | 850 | ||
851 | /* Start of tfc_tpg_port_stat_cit */ | ||
852 | |||
853 | static struct config_group *target_core_port_stat_mkdir( | ||
854 | struct config_group *group, | ||
855 | const char *name) | ||
856 | { | ||
857 | return ERR_PTR(-ENOSYS); | ||
858 | } | ||
859 | |||
860 | static void target_core_port_stat_rmdir( | ||
861 | struct config_group *group, | ||
862 | struct config_item *item) | ||
863 | { | ||
864 | return; | ||
865 | } | ||
866 | |||
867 | static struct configfs_group_operations target_fabric_port_stat_group_ops = { | ||
868 | .make_group = target_core_port_stat_mkdir, | ||
869 | .drop_item = target_core_port_stat_rmdir, | ||
870 | }; | ||
871 | |||
872 | TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL); | ||
873 | |||
874 | /* End of tfc_tpg_port_stat_cit */ | ||
875 | |||
761 | /* Start of tfc_tpg_lun_cit */ | 876 | /* Start of tfc_tpg_lun_cit */ |
762 | 877 | ||
763 | static struct config_group *target_fabric_make_lun( | 878 | static struct config_group *target_fabric_make_lun( |
@@ -768,7 +883,9 @@ static struct config_group *target_fabric_make_lun( | |||
768 | struct se_portal_group *se_tpg = container_of(group, | 883 | struct se_portal_group *se_tpg = container_of(group, |
769 | struct se_portal_group, tpg_lun_group); | 884 | struct se_portal_group, tpg_lun_group); |
770 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 885 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; |
886 | struct config_group *lun_cg = NULL, *port_stat_grp = NULL; | ||
771 | unsigned long unpacked_lun; | 887 | unsigned long unpacked_lun; |
888 | int errno; | ||
772 | 889 | ||
773 | if (strstr(name, "lun_") != name) { | 890 | if (strstr(name, "lun_") != name) { |
774 | printk(KERN_ERR "Unable to locate \'_\" in" | 891 | printk(KERN_ERR "Unable to locate \'_\" in" |
@@ -782,16 +899,64 @@ static struct config_group *target_fabric_make_lun( | |||
782 | if (!(lun)) | 899 | if (!(lun)) |
783 | return ERR_PTR(-EINVAL); | 900 | return ERR_PTR(-EINVAL); |
784 | 901 | ||
902 | lun_cg = &lun->lun_group; | ||
903 | lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
904 | GFP_KERNEL); | ||
905 | if (!lun_cg->default_groups) { | ||
906 | printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); | ||
907 | return ERR_PTR(-ENOMEM); | ||
908 | } | ||
909 | |||
785 | config_group_init_type_name(&lun->lun_group, name, | 910 | config_group_init_type_name(&lun->lun_group, name, |
786 | &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); | 911 | &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); |
912 | config_group_init_type_name(&lun->port_stat_grps.stat_group, | ||
913 | "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); | ||
914 | lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; | ||
915 | lun_cg->default_groups[1] = NULL; | ||
916 | |||
917 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | ||
918 | port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, | ||
919 | GFP_KERNEL); | ||
920 | if (!port_stat_grp->default_groups) { | ||
921 | printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); | ||
922 | errno = -ENOMEM; | ||
923 | goto out; | ||
924 | } | ||
925 | target_stat_setup_port_default_groups(lun); | ||
787 | 926 | ||
788 | return &lun->lun_group; | 927 | return &lun->lun_group; |
928 | out: | ||
929 | if (lun_cg) | ||
930 | kfree(lun_cg->default_groups); | ||
931 | return ERR_PTR(errno); | ||
789 | } | 932 | } |
790 | 933 | ||
791 | static void target_fabric_drop_lun( | 934 | static void target_fabric_drop_lun( |
792 | struct config_group *group, | 935 | struct config_group *group, |
793 | struct config_item *item) | 936 | struct config_item *item) |
794 | { | 937 | { |
938 | struct se_lun *lun = container_of(to_config_group(item), | ||
939 | struct se_lun, lun_group); | ||
940 | struct config_item *df_item; | ||
941 | struct config_group *lun_cg, *port_stat_grp; | ||
942 | int i; | ||
943 | |||
944 | port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | ||
945 | for (i = 0; port_stat_grp->default_groups[i]; i++) { | ||
946 | df_item = &port_stat_grp->default_groups[i]->cg_item; | ||
947 | port_stat_grp->default_groups[i] = NULL; | ||
948 | config_item_put(df_item); | ||
949 | } | ||
950 | kfree(port_stat_grp->default_groups); | ||
951 | |||
952 | lun_cg = &lun->lun_group; | ||
953 | for (i = 0; lun_cg->default_groups[i]; i++) { | ||
954 | df_item = &lun_cg->default_groups[i]->cg_item; | ||
955 | lun_cg->default_groups[i] = NULL; | ||
956 | config_item_put(df_item); | ||
957 | } | ||
958 | kfree(lun_cg->default_groups); | ||
959 | |||
795 | config_item_put(item); | 960 | config_item_put(item); |
796 | } | 961 | } |
797 | 962 | ||
@@ -946,6 +1111,15 @@ TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, | |||
946 | 1111 | ||
947 | /* End of tfc_tpg_cit */ | 1112 | /* End of tfc_tpg_cit */ |
948 | 1113 | ||
1114 | /* Start of tfc_wwn_fabric_stats_cit */ | ||
1115 | /* | ||
1116 | * This is used as a placeholder for struct se_wwn->fabric_stat_group | ||
1117 | * to allow fabrics access to ->fabric_stat_group->default_groups[] | ||
1118 | */ | ||
1119 | TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); | ||
1120 | |||
1121 | /* End of tfc_wwn_fabric_stats_cit */ | ||
1122 | |||
949 | /* Start of tfc_wwn_cit */ | 1123 | /* Start of tfc_wwn_cit */ |
950 | 1124 | ||
951 | static struct config_group *target_fabric_make_wwn( | 1125 | static struct config_group *target_fabric_make_wwn( |
@@ -966,8 +1140,17 @@ static struct config_group *target_fabric_make_wwn( | |||
966 | return ERR_PTR(-EINVAL); | 1140 | return ERR_PTR(-EINVAL); |
967 | 1141 | ||
968 | wwn->wwn_tf = tf; | 1142 | wwn->wwn_tf = tf; |
1143 | /* | ||
1144 | * Setup default groups from pre-allocated wwn->wwn_default_groups | ||
1145 | */ | ||
1146 | wwn->wwn_group.default_groups = wwn->wwn_default_groups; | ||
1147 | wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; | ||
1148 | wwn->wwn_group.default_groups[1] = NULL; | ||
1149 | |||
969 | config_group_init_type_name(&wwn->wwn_group, name, | 1150 | config_group_init_type_name(&wwn->wwn_group, name, |
970 | &TF_CIT_TMPL(tf)->tfc_tpg_cit); | 1151 | &TF_CIT_TMPL(tf)->tfc_tpg_cit); |
1152 | config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", | ||
1153 | &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); | ||
971 | 1154 | ||
972 | return &wwn->wwn_group; | 1155 | return &wwn->wwn_group; |
973 | } | 1156 | } |
@@ -976,6 +1159,18 @@ static void target_fabric_drop_wwn( | |||
976 | struct config_group *group, | 1159 | struct config_group *group, |
977 | struct config_item *item) | 1160 | struct config_item *item) |
978 | { | 1161 | { |
1162 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
1163 | struct se_wwn, wwn_group); | ||
1164 | struct config_item *df_item; | ||
1165 | struct config_group *cg = &wwn->wwn_group; | ||
1166 | int i; | ||
1167 | |||
1168 | for (i = 0; cg->default_groups[i]; i++) { | ||
1169 | df_item = &cg->default_groups[i]->cg_item; | ||
1170 | cg->default_groups[i] = NULL; | ||
1171 | config_item_put(df_item); | ||
1172 | } | ||
1173 | |||
979 | config_item_put(item); | 1174 | config_item_put(item); |
980 | } | 1175 | } |
981 | 1176 | ||
@@ -1015,9 +1210,11 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf) | |||
1015 | { | 1210 | { |
1016 | target_fabric_setup_discovery_cit(tf); | 1211 | target_fabric_setup_discovery_cit(tf); |
1017 | target_fabric_setup_wwn_cit(tf); | 1212 | target_fabric_setup_wwn_cit(tf); |
1213 | target_fabric_setup_wwn_fabric_stats_cit(tf); | ||
1018 | target_fabric_setup_tpg_cit(tf); | 1214 | target_fabric_setup_tpg_cit(tf); |
1019 | target_fabric_setup_tpg_base_cit(tf); | 1215 | target_fabric_setup_tpg_base_cit(tf); |
1020 | target_fabric_setup_tpg_port_cit(tf); | 1216 | target_fabric_setup_tpg_port_cit(tf); |
1217 | target_fabric_setup_tpg_port_stat_cit(tf); | ||
1021 | target_fabric_setup_tpg_lun_cit(tf); | 1218 | target_fabric_setup_tpg_lun_cit(tf); |
1022 | target_fabric_setup_tpg_np_cit(tf); | 1219 | target_fabric_setup_tpg_np_cit(tf); |
1023 | target_fabric_setup_tpg_np_base_cit(tf); | 1220 | target_fabric_setup_tpg_np_base_cit(tf); |
@@ -1028,7 +1225,9 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf) | |||
1028 | target_fabric_setup_tpg_nacl_attrib_cit(tf); | 1225 | target_fabric_setup_tpg_nacl_attrib_cit(tf); |
1029 | target_fabric_setup_tpg_nacl_auth_cit(tf); | 1226 | target_fabric_setup_tpg_nacl_auth_cit(tf); |
1030 | target_fabric_setup_tpg_nacl_param_cit(tf); | 1227 | target_fabric_setup_tpg_nacl_param_cit(tf); |
1228 | target_fabric_setup_tpg_nacl_stat_cit(tf); | ||
1031 | target_fabric_setup_tpg_mappedlun_cit(tf); | 1229 | target_fabric_setup_tpg_mappedlun_cit(tf); |
1230 | target_fabric_setup_tpg_mappedlun_stat_cit(tf); | ||
1032 | 1231 | ||
1033 | return 0; | 1232 | return 0; |
1034 | } | 1233 | } |
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index a3c695adabe..d57ad672677 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <target/target_core_base.h> | 34 | #include <target/target_core_base.h> |
35 | #include <target/target_core_device.h> | 35 | #include <target/target_core_device.h> |
36 | #include <target/target_core_transport.h> | 36 | #include <target/target_core_transport.h> |
37 | #include <target/target_core_fabric_lib.h> | ||
37 | #include <target/target_core_fabric_ops.h> | 38 | #include <target/target_core_fabric_ops.h> |
38 | #include <target/target_core_configfs.h> | 39 | #include <target/target_core_configfs.h> |
39 | 40 | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 190ca8ac249..02f553aef43 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -134,7 +134,7 @@ static struct se_device *fd_create_virtdevice( | |||
134 | mm_segment_t old_fs; | 134 | mm_segment_t old_fs; |
135 | struct file *file; | 135 | struct file *file; |
136 | struct inode *inode = NULL; | 136 | struct inode *inode = NULL; |
137 | int dev_flags = 0, flags; | 137 | int dev_flags = 0, flags, ret = -EINVAL; |
138 | 138 | ||
139 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 139 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
140 | 140 | ||
@@ -146,6 +146,7 @@ static struct se_device *fd_create_virtdevice( | |||
146 | if (IS_ERR(dev_p)) { | 146 | if (IS_ERR(dev_p)) { |
147 | printk(KERN_ERR "getname(%s) failed: %lu\n", | 147 | printk(KERN_ERR "getname(%s) failed: %lu\n", |
148 | fd_dev->fd_dev_name, IS_ERR(dev_p)); | 148 | fd_dev->fd_dev_name, IS_ERR(dev_p)); |
149 | ret = PTR_ERR(dev_p); | ||
149 | goto fail; | 150 | goto fail; |
150 | } | 151 | } |
151 | #if 0 | 152 | #if 0 |
@@ -165,8 +166,12 @@ static struct se_device *fd_create_virtdevice( | |||
165 | flags |= O_SYNC; | 166 | flags |= O_SYNC; |
166 | 167 | ||
167 | file = filp_open(dev_p, flags, 0600); | 168 | file = filp_open(dev_p, flags, 0600); |
168 | 169 | if (IS_ERR(file)) { | |
169 | if (IS_ERR(file) || !file || !file->f_dentry) { | 170 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); |
171 | ret = PTR_ERR(file); | ||
172 | goto fail; | ||
173 | } | ||
174 | if (!file || !file->f_dentry) { | ||
170 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); | 175 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); |
171 | goto fail; | 176 | goto fail; |
172 | } | 177 | } |
@@ -241,7 +246,7 @@ fail: | |||
241 | fd_dev->fd_file = NULL; | 246 | fd_dev->fd_file = NULL; |
242 | } | 247 | } |
243 | putname(dev_p); | 248 | putname(dev_p); |
244 | return NULL; | 249 | return ERR_PTR(ret); |
245 | } | 250 | } |
246 | 251 | ||
247 | /* fd_free_device(): (Part of se_subsystem_api_t template) | 252 | /* fd_free_device(): (Part of se_subsystem_api_t template) |
@@ -509,7 +514,7 @@ enum { | |||
509 | static match_table_t tokens = { | 514 | static match_table_t tokens = { |
510 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | 515 | {Opt_fd_dev_name, "fd_dev_name=%s"}, |
511 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | 516 | {Opt_fd_dev_size, "fd_dev_size=%s"}, |
512 | {Opt_fd_buffered_io, "fd_buffered_id=%d"}, | 517 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, |
513 | {Opt_err, NULL} | 518 | {Opt_err, NULL} |
514 | }; | 519 | }; |
515 | 520 | ||
@@ -536,15 +541,26 @@ static ssize_t fd_set_configfs_dev_params( | |||
536 | token = match_token(ptr, tokens, args); | 541 | token = match_token(ptr, tokens, args); |
537 | switch (token) { | 542 | switch (token) { |
538 | case Opt_fd_dev_name: | 543 | case Opt_fd_dev_name: |
544 | arg_p = match_strdup(&args[0]); | ||
545 | if (!arg_p) { | ||
546 | ret = -ENOMEM; | ||
547 | break; | ||
548 | } | ||
539 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, | 549 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, |
540 | "%s", match_strdup(&args[0])); | 550 | "%s", arg_p); |
551 | kfree(arg_p); | ||
541 | printk(KERN_INFO "FILEIO: Referencing Path: %s\n", | 552 | printk(KERN_INFO "FILEIO: Referencing Path: %s\n", |
542 | fd_dev->fd_dev_name); | 553 | fd_dev->fd_dev_name); |
543 | fd_dev->fbd_flags |= FBDF_HAS_PATH; | 554 | fd_dev->fbd_flags |= FBDF_HAS_PATH; |
544 | break; | 555 | break; |
545 | case Opt_fd_dev_size: | 556 | case Opt_fd_dev_size: |
546 | arg_p = match_strdup(&args[0]); | 557 | arg_p = match_strdup(&args[0]); |
558 | if (!arg_p) { | ||
559 | ret = -ENOMEM; | ||
560 | break; | ||
561 | } | ||
547 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); | 562 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); |
563 | kfree(arg_p); | ||
548 | if (ret < 0) { | 564 | if (ret < 0) { |
549 | printk(KERN_ERR "strict_strtoull() failed for" | 565 | printk(KERN_ERR "strict_strtoull() failed for" |
550 | " fd_dev_size=\n"); | 566 | " fd_dev_size=\n"); |
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 6ec51cbc018..0b8f8da8901 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -151,19 +151,8 @@ out_free_hba: | |||
151 | int | 151 | int |
152 | core_delete_hba(struct se_hba *hba) | 152 | core_delete_hba(struct se_hba *hba) |
153 | { | 153 | { |
154 | struct se_device *dev, *dev_tmp; | 154 | if (!list_empty(&hba->hba_dev_list)) |
155 | 155 | dump_stack(); | |
156 | spin_lock(&hba->device_lock); | ||
157 | list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) { | ||
158 | |||
159 | se_clear_dev_ports(dev); | ||
160 | spin_unlock(&hba->device_lock); | ||
161 | |||
162 | se_release_device_for_hba(dev); | ||
163 | |||
164 | spin_lock(&hba->device_lock); | ||
165 | } | ||
166 | spin_unlock(&hba->device_lock); | ||
167 | 156 | ||
168 | hba->transport->detach_hba(hba); | 157 | hba->transport->detach_hba(hba); |
169 | 158 | ||
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index eb0afec046e..86639004af9 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -129,10 +129,11 @@ static struct se_device *iblock_create_virtdevice( | |||
129 | struct request_queue *q; | 129 | struct request_queue *q; |
130 | struct queue_limits *limits; | 130 | struct queue_limits *limits; |
131 | u32 dev_flags = 0; | 131 | u32 dev_flags = 0; |
132 | int ret = -EINVAL; | ||
132 | 133 | ||
133 | if (!(ib_dev)) { | 134 | if (!(ib_dev)) { |
134 | printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); | 135 | printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); |
135 | return 0; | 136 | return ERR_PTR(ret); |
136 | } | 137 | } |
137 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 138 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
138 | /* | 139 | /* |
@@ -141,7 +142,7 @@ static struct se_device *iblock_create_virtdevice( | |||
141 | ib_dev->ibd_bio_set = bioset_create(32, 64); | 142 | ib_dev->ibd_bio_set = bioset_create(32, 64); |
142 | if (!(ib_dev->ibd_bio_set)) { | 143 | if (!(ib_dev->ibd_bio_set)) { |
143 | printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); | 144 | printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); |
144 | return 0; | 145 | return ERR_PTR(-ENOMEM); |
145 | } | 146 | } |
146 | printk(KERN_INFO "IBLOCK: Created bio_set()\n"); | 147 | printk(KERN_INFO "IBLOCK: Created bio_set()\n"); |
147 | /* | 148 | /* |
@@ -153,8 +154,10 @@ static struct se_device *iblock_create_virtdevice( | |||
153 | 154 | ||
154 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, |
155 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | 156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); |
156 | if (IS_ERR(bd)) | 157 | if (IS_ERR(bd)) { |
158 | ret = PTR_ERR(bd); | ||
157 | goto failed; | 159 | goto failed; |
160 | } | ||
158 | /* | 161 | /* |
159 | * Setup the local scope queue_limits from struct request_queue->limits | 162 | * Setup the local scope queue_limits from struct request_queue->limits |
160 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | 163 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. |
@@ -184,9 +187,7 @@ static struct se_device *iblock_create_virtdevice( | |||
184 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | 187 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM |
185 | * in ATA and we need to set TPE=1 | 188 | * in ATA and we need to set TPE=1 |
186 | */ | 189 | */ |
187 | if (blk_queue_discard(bdev_get_queue(bd))) { | 190 | if (blk_queue_discard(q)) { |
188 | struct request_queue *q = bdev_get_queue(bd); | ||
189 | |||
190 | DEV_ATTRIB(dev)->max_unmap_lba_count = | 191 | DEV_ATTRIB(dev)->max_unmap_lba_count = |
191 | q->limits.max_discard_sectors; | 192 | q->limits.max_discard_sectors; |
192 | /* | 193 | /* |
@@ -212,7 +213,7 @@ failed: | |||
212 | ib_dev->ibd_bd = NULL; | 213 | ib_dev->ibd_bd = NULL; |
213 | ib_dev->ibd_major = 0; | 214 | ib_dev->ibd_major = 0; |
214 | ib_dev->ibd_minor = 0; | 215 | ib_dev->ibd_minor = 0; |
215 | return NULL; | 216 | return ERR_PTR(ret); |
216 | } | 217 | } |
217 | 218 | ||
218 | static void iblock_free_device(void *p) | 219 | static void iblock_free_device(void *p) |
@@ -467,7 +468,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
467 | const char *page, ssize_t count) | 468 | const char *page, ssize_t count) |
468 | { | 469 | { |
469 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; | 470 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; |
470 | char *orig, *ptr, *opts; | 471 | char *orig, *ptr, *arg_p, *opts; |
471 | substring_t args[MAX_OPT_ARGS]; | 472 | substring_t args[MAX_OPT_ARGS]; |
472 | int ret = 0, arg, token; | 473 | int ret = 0, arg, token; |
473 | 474 | ||
@@ -490,9 +491,14 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |||
490 | ret = -EEXIST; | 491 | ret = -EEXIST; |
491 | goto out; | 492 | goto out; |
492 | } | 493 | } |
493 | 494 | arg_p = match_strdup(&args[0]); | |
494 | ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | 495 | if (!arg_p) { |
495 | "%s", match_strdup(&args[0])); | 496 | ret = -ENOMEM; |
497 | break; | ||
498 | } | ||
499 | snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | ||
500 | "%s", arg_p); | ||
501 | kfree(arg_p); | ||
496 | printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", | 502 | printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", |
497 | ib_dev->ibd_udev_path); | 503 | ib_dev->ibd_udev_path); |
498 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | 504 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 5a9d2ba4b60..7ff6a35f26a 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -441,6 +441,7 @@ static struct se_device *pscsi_create_type_disk( | |||
441 | struct pscsi_dev_virt *pdv, | 441 | struct pscsi_dev_virt *pdv, |
442 | struct se_subsystem_dev *se_dev, | 442 | struct se_subsystem_dev *se_dev, |
443 | struct se_hba *hba) | 443 | struct se_hba *hba) |
444 | __releases(sh->host_lock) | ||
444 | { | 445 | { |
445 | struct se_device *dev; | 446 | struct se_device *dev; |
446 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 447 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
@@ -488,6 +489,7 @@ static struct se_device *pscsi_create_type_rom( | |||
488 | struct pscsi_dev_virt *pdv, | 489 | struct pscsi_dev_virt *pdv, |
489 | struct se_subsystem_dev *se_dev, | 490 | struct se_subsystem_dev *se_dev, |
490 | struct se_hba *hba) | 491 | struct se_hba *hba) |
492 | __releases(sh->host_lock) | ||
491 | { | 493 | { |
492 | struct se_device *dev; | 494 | struct se_device *dev; |
493 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 495 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
@@ -522,6 +524,7 @@ static struct se_device *pscsi_create_type_other( | |||
522 | struct pscsi_dev_virt *pdv, | 524 | struct pscsi_dev_virt *pdv, |
523 | struct se_subsystem_dev *se_dev, | 525 | struct se_subsystem_dev *se_dev, |
524 | struct se_hba *hba) | 526 | struct se_hba *hba) |
527 | __releases(sh->host_lock) | ||
525 | { | 528 | { |
526 | struct se_device *dev; | 529 | struct se_device *dev; |
527 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 530 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
@@ -555,7 +558,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
555 | if (!(pdv)) { | 558 | if (!(pdv)) { |
556 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" | 559 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" |
557 | " parameter\n"); | 560 | " parameter\n"); |
558 | return NULL; | 561 | return ERR_PTR(-EINVAL); |
559 | } | 562 | } |
560 | /* | 563 | /* |
561 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the | 564 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the |
@@ -565,7 +568,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
565 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 568 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
566 | printk(KERN_ERR "pSCSI: Unable to locate struct" | 569 | printk(KERN_ERR "pSCSI: Unable to locate struct" |
567 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); | 570 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); |
568 | return NULL; | 571 | return ERR_PTR(-ENODEV); |
569 | } | 572 | } |
570 | /* | 573 | /* |
571 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device | 574 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device |
@@ -574,7 +577,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
574 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | 577 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { |
575 | printk(KERN_ERR "pSCSI: udev_path attribute has not" | 578 | printk(KERN_ERR "pSCSI: udev_path attribute has not" |
576 | " been set before ENABLE=1\n"); | 579 | " been set before ENABLE=1\n"); |
577 | return NULL; | 580 | return ERR_PTR(-EINVAL); |
578 | } | 581 | } |
579 | /* | 582 | /* |
580 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, | 583 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, |
@@ -587,12 +590,12 @@ static struct se_device *pscsi_create_virtdevice( | |||
587 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" | 590 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" |
588 | " with active devices\n"); | 591 | " with active devices\n"); |
589 | spin_unlock(&hba->device_lock); | 592 | spin_unlock(&hba->device_lock); |
590 | return NULL; | 593 | return ERR_PTR(-EEXIST); |
591 | } | 594 | } |
592 | spin_unlock(&hba->device_lock); | 595 | spin_unlock(&hba->device_lock); |
593 | 596 | ||
594 | if (pscsi_pmode_enable_hba(hba, 1) != 1) | 597 | if (pscsi_pmode_enable_hba(hba, 1) != 1) |
595 | return NULL; | 598 | return ERR_PTR(-ENODEV); |
596 | 599 | ||
597 | legacy_mode_enable = 1; | 600 | legacy_mode_enable = 1; |
598 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | 601 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; |
@@ -602,14 +605,14 @@ static struct se_device *pscsi_create_virtdevice( | |||
602 | if (!(sh)) { | 605 | if (!(sh)) { |
603 | printk(KERN_ERR "pSCSI: Unable to locate" | 606 | printk(KERN_ERR "pSCSI: Unable to locate" |
604 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 607 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
605 | return NULL; | 608 | return ERR_PTR(-ENODEV); |
606 | } | 609 | } |
607 | } | 610 | } |
608 | } else { | 611 | } else { |
609 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { | 612 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { |
610 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" | 613 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" |
611 | " struct Scsi_Host exists\n"); | 614 | " struct Scsi_Host exists\n"); |
612 | return NULL; | 615 | return ERR_PTR(-EEXIST); |
613 | } | 616 | } |
614 | } | 617 | } |
615 | 618 | ||
@@ -644,7 +647,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
644 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 647 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
645 | } | 648 | } |
646 | pdv->pdv_sd = NULL; | 649 | pdv->pdv_sd = NULL; |
647 | return NULL; | 650 | return ERR_PTR(-ENODEV); |
648 | } | 651 | } |
649 | return dev; | 652 | return dev; |
650 | } | 653 | } |
@@ -660,7 +663,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
660 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 663 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
661 | } | 664 | } |
662 | 665 | ||
663 | return NULL; | 666 | return ERR_PTR(-ENODEV); |
664 | } | 667 | } |
665 | 668 | ||
666 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) | 669 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) |
@@ -816,6 +819,7 @@ pscsi_alloc_task(struct se_cmd *cmd) | |||
816 | if (!(pt->pscsi_cdb)) { | 819 | if (!(pt->pscsi_cdb)) { |
817 | printk(KERN_ERR "pSCSI: Unable to allocate extended" | 820 | printk(KERN_ERR "pSCSI: Unable to allocate extended" |
818 | " pt->pscsi_cdb\n"); | 821 | " pt->pscsi_cdb\n"); |
822 | kfree(pt); | ||
819 | return NULL; | 823 | return NULL; |
820 | } | 824 | } |
821 | } else | 825 | } else |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 8dc6d74c1d4..7837dd365a9 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -150,7 +150,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
150 | if (rd_dev->rd_page_count <= 0) { | 150 | if (rd_dev->rd_page_count <= 0) { |
151 | printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", | 151 | printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", |
152 | rd_dev->rd_page_count); | 152 | rd_dev->rd_page_count); |
153 | return -1; | 153 | return -EINVAL; |
154 | } | 154 | } |
155 | total_sg_needed = rd_dev->rd_page_count; | 155 | total_sg_needed = rd_dev->rd_page_count; |
156 | 156 | ||
@@ -160,7 +160,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
160 | if (!(sg_table)) { | 160 | if (!(sg_table)) { |
161 | printk(KERN_ERR "Unable to allocate memory for Ramdisk" | 161 | printk(KERN_ERR "Unable to allocate memory for Ramdisk" |
162 | " scatterlist tables\n"); | 162 | " scatterlist tables\n"); |
163 | return -1; | 163 | return -ENOMEM; |
164 | } | 164 | } |
165 | 165 | ||
166 | rd_dev->sg_table_array = sg_table; | 166 | rd_dev->sg_table_array = sg_table; |
@@ -175,7 +175,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
175 | if (!(sg)) { | 175 | if (!(sg)) { |
176 | printk(KERN_ERR "Unable to allocate scatterlist array" | 176 | printk(KERN_ERR "Unable to allocate scatterlist array" |
177 | " for struct rd_dev\n"); | 177 | " for struct rd_dev\n"); |
178 | return -1; | 178 | return -ENOMEM; |
179 | } | 179 | } |
180 | 180 | ||
181 | sg_init_table((struct scatterlist *)&sg[0], sg_per_table); | 181 | sg_init_table((struct scatterlist *)&sg[0], sg_per_table); |
@@ -191,7 +191,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) | |||
191 | if (!(pg)) { | 191 | if (!(pg)) { |
192 | printk(KERN_ERR "Unable to allocate scatterlist" | 192 | printk(KERN_ERR "Unable to allocate scatterlist" |
193 | " pages for struct rd_dev_sg_table\n"); | 193 | " pages for struct rd_dev_sg_table\n"); |
194 | return -1; | 194 | return -ENOMEM; |
195 | } | 195 | } |
196 | sg_assign_page(&sg[j], pg); | 196 | sg_assign_page(&sg[j], pg); |
197 | sg[j].length = PAGE_SIZE; | 197 | sg[j].length = PAGE_SIZE; |
@@ -253,12 +253,13 @@ static struct se_device *rd_create_virtdevice( | |||
253 | struct se_dev_limits dev_limits; | 253 | struct se_dev_limits dev_limits; |
254 | struct rd_dev *rd_dev = p; | 254 | struct rd_dev *rd_dev = p; |
255 | struct rd_host *rd_host = hba->hba_ptr; | 255 | struct rd_host *rd_host = hba->hba_ptr; |
256 | int dev_flags = 0; | 256 | int dev_flags = 0, ret; |
257 | char prod[16], rev[4]; | 257 | char prod[16], rev[4]; |
258 | 258 | ||
259 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 259 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
260 | 260 | ||
261 | if (rd_build_device_space(rd_dev) < 0) | 261 | ret = rd_build_device_space(rd_dev); |
262 | if (ret < 0) | ||
262 | goto fail; | 263 | goto fail; |
263 | 264 | ||
264 | snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); | 265 | snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); |
@@ -292,7 +293,7 @@ static struct se_device *rd_create_virtdevice( | |||
292 | 293 | ||
293 | fail: | 294 | fail: |
294 | rd_release_device_space(rd_dev); | 295 | rd_release_device_space(rd_dev); |
295 | return NULL; | 296 | return ERR_PTR(ret); |
296 | } | 297 | } |
297 | 298 | ||
298 | static struct se_device *rd_DIRECT_create_virtdevice( | 299 | static struct se_device *rd_DIRECT_create_virtdevice( |
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 13badfbaf9c..3ea19e29d8e 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #define RD_BLOCKSIZE 512 | 14 | #define RD_BLOCKSIZE 512 |
15 | #define RD_MAX_SECTORS 1024 | 15 | #define RD_MAX_SECTORS 1024 |
16 | 16 | ||
17 | extern struct kmem_cache *se_mem_cache; | ||
18 | |||
19 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ | 17 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ |
20 | int __init rd_module_init(void); | 18 | int __init rd_module_init(void); |
21 | void rd_module_exit(void); | 19 | void rd_module_exit(void); |
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c new file mode 100644 index 00000000000..5e3a067a747 --- /dev/null +++ b/drivers/target/target_core_stat.c | |||
@@ -0,0 +1,1810 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_stat.c | ||
3 | * | ||
4 | * Copyright (c) 2011 Rising Tide Systems | ||
5 | * Copyright (c) 2011 Linux-iSCSI.org | ||
6 | * | ||
7 | * Modern ConfigFS group context specific statistics based on original | ||
8 | * target_core_mib.c code | ||
9 | * | ||
10 | * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. | ||
11 | * | ||
12 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | ******************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/version.h> | ||
36 | #include <generated/utsrelease.h> | ||
37 | #include <linux/utsname.h> | ||
38 | #include <linux/proc_fs.h> | ||
39 | #include <linux/seq_file.h> | ||
40 | #include <linux/blkdev.h> | ||
41 | #include <linux/configfs.h> | ||
42 | #include <scsi/scsi.h> | ||
43 | #include <scsi/scsi_device.h> | ||
44 | #include <scsi/scsi_host.h> | ||
45 | |||
46 | #include <target/target_core_base.h> | ||
47 | #include <target/target_core_transport.h> | ||
48 | #include <target/target_core_fabric_ops.h> | ||
49 | #include <target/target_core_configfs.h> | ||
50 | #include <target/configfs_macros.h> | ||
51 | |||
52 | #include "target_core_hba.h" | ||
53 | |||
54 | #ifndef INITIAL_JIFFIES | ||
55 | #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) | ||
56 | #endif | ||
57 | |||
58 | #define NONE "None" | ||
59 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
60 | |||
61 | #define SCSI_LU_INDEX 1 | ||
62 | #define LU_COUNT 1 | ||
63 | |||
64 | /* | ||
65 | * SCSI Device Table | ||
66 | */ | ||
67 | |||
68 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps); | ||
69 | #define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \ | ||
70 | static struct target_stat_scsi_dev_attribute \ | ||
71 | target_stat_scsi_dev_##_name = \ | ||
72 | __CONFIGFS_EATTR(_name, _mode, \ | ||
73 | target_stat_scsi_dev_show_attr_##_name, \ | ||
74 | target_stat_scsi_dev_store_attr_##_name); | ||
75 | |||
76 | #define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \ | ||
77 | static struct target_stat_scsi_dev_attribute \ | ||
78 | target_stat_scsi_dev_##_name = \ | ||
79 | __CONFIGFS_EATTR_RO(_name, \ | ||
80 | target_stat_scsi_dev_show_attr_##_name); | ||
81 | |||
82 | static ssize_t target_stat_scsi_dev_show_attr_inst( | ||
83 | struct se_dev_stat_grps *sgrps, char *page) | ||
84 | { | ||
85 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
86 | struct se_subsystem_dev, dev_stat_grps); | ||
87 | struct se_hba *hba = se_subdev->se_dev_hba; | ||
88 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
89 | |||
90 | if (!dev) | ||
91 | return -ENODEV; | ||
92 | |||
93 | return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
94 | } | ||
95 | DEV_STAT_SCSI_DEV_ATTR_RO(inst); | ||
96 | |||
97 | static ssize_t target_stat_scsi_dev_show_attr_indx( | ||
98 | struct se_dev_stat_grps *sgrps, char *page) | ||
99 | { | ||
100 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
101 | struct se_subsystem_dev, dev_stat_grps); | ||
102 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
103 | |||
104 | if (!dev) | ||
105 | return -ENODEV; | ||
106 | |||
107 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
108 | } | ||
109 | DEV_STAT_SCSI_DEV_ATTR_RO(indx); | ||
110 | |||
111 | static ssize_t target_stat_scsi_dev_show_attr_role( | ||
112 | struct se_dev_stat_grps *sgrps, char *page) | ||
113 | { | ||
114 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
115 | struct se_subsystem_dev, dev_stat_grps); | ||
116 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
117 | |||
118 | if (!dev) | ||
119 | return -ENODEV; | ||
120 | |||
121 | return snprintf(page, PAGE_SIZE, "Target\n"); | ||
122 | } | ||
123 | DEV_STAT_SCSI_DEV_ATTR_RO(role); | ||
124 | |||
125 | static ssize_t target_stat_scsi_dev_show_attr_ports( | ||
126 | struct se_dev_stat_grps *sgrps, char *page) | ||
127 | { | ||
128 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
129 | struct se_subsystem_dev, dev_stat_grps); | ||
130 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
131 | |||
132 | if (!dev) | ||
133 | return -ENODEV; | ||
134 | |||
135 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); | ||
136 | } | ||
137 | DEV_STAT_SCSI_DEV_ATTR_RO(ports); | ||
138 | |||
139 | CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group); | ||
140 | |||
141 | static struct configfs_attribute *target_stat_scsi_dev_attrs[] = { | ||
142 | &target_stat_scsi_dev_inst.attr, | ||
143 | &target_stat_scsi_dev_indx.attr, | ||
144 | &target_stat_scsi_dev_role.attr, | ||
145 | &target_stat_scsi_dev_ports.attr, | ||
146 | NULL, | ||
147 | }; | ||
148 | |||
149 | static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = { | ||
150 | .show_attribute = target_stat_scsi_dev_attr_show, | ||
151 | .store_attribute = target_stat_scsi_dev_attr_store, | ||
152 | }; | ||
153 | |||
154 | static struct config_item_type target_stat_scsi_dev_cit = { | ||
155 | .ct_item_ops = &target_stat_scsi_dev_attrib_ops, | ||
156 | .ct_attrs = target_stat_scsi_dev_attrs, | ||
157 | .ct_owner = THIS_MODULE, | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * SCSI Target Device Table | ||
162 | */ | ||
163 | |||
164 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps); | ||
165 | #define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \ | ||
166 | static struct target_stat_scsi_tgt_dev_attribute \ | ||
167 | target_stat_scsi_tgt_dev_##_name = \ | ||
168 | __CONFIGFS_EATTR(_name, _mode, \ | ||
169 | target_stat_scsi_tgt_dev_show_attr_##_name, \ | ||
170 | target_stat_scsi_tgt_dev_store_attr_##_name); | ||
171 | |||
172 | #define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \ | ||
173 | static struct target_stat_scsi_tgt_dev_attribute \ | ||
174 | target_stat_scsi_tgt_dev_##_name = \ | ||
175 | __CONFIGFS_EATTR_RO(_name, \ | ||
176 | target_stat_scsi_tgt_dev_show_attr_##_name); | ||
177 | |||
178 | static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( | ||
179 | struct se_dev_stat_grps *sgrps, char *page) | ||
180 | { | ||
181 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
182 | struct se_subsystem_dev, dev_stat_grps); | ||
183 | struct se_hba *hba = se_subdev->se_dev_hba; | ||
184 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
185 | |||
186 | if (!dev) | ||
187 | return -ENODEV; | ||
188 | |||
189 | return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
190 | } | ||
191 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst); | ||
192 | |||
193 | static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( | ||
194 | struct se_dev_stat_grps *sgrps, char *page) | ||
195 | { | ||
196 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
197 | struct se_subsystem_dev, dev_stat_grps); | ||
198 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
199 | |||
200 | if (!dev) | ||
201 | return -ENODEV; | ||
202 | |||
203 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
204 | } | ||
205 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx); | ||
206 | |||
207 | static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( | ||
208 | struct se_dev_stat_grps *sgrps, char *page) | ||
209 | { | ||
210 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
211 | struct se_subsystem_dev, dev_stat_grps); | ||
212 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
213 | |||
214 | if (!dev) | ||
215 | return -ENODEV; | ||
216 | |||
217 | return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); | ||
218 | } | ||
219 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); | ||
220 | |||
221 | static ssize_t target_stat_scsi_tgt_dev_show_attr_status( | ||
222 | struct se_dev_stat_grps *sgrps, char *page) | ||
223 | { | ||
224 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
225 | struct se_subsystem_dev, dev_stat_grps); | ||
226 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
227 | char status[16]; | ||
228 | |||
229 | if (!dev) | ||
230 | return -ENODEV; | ||
231 | |||
232 | switch (dev->dev_status) { | ||
233 | case TRANSPORT_DEVICE_ACTIVATED: | ||
234 | strcpy(status, "activated"); | ||
235 | break; | ||
236 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
237 | strcpy(status, "deactivated"); | ||
238 | break; | ||
239 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
240 | strcpy(status, "shutdown"); | ||
241 | break; | ||
242 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
243 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
244 | strcpy(status, "offline"); | ||
245 | break; | ||
246 | default: | ||
247 | sprintf(status, "unknown(%d)", dev->dev_status); | ||
248 | break; | ||
249 | } | ||
250 | |||
251 | return snprintf(page, PAGE_SIZE, "%s\n", status); | ||
252 | } | ||
253 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); | ||
254 | |||
255 | static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( | ||
256 | struct se_dev_stat_grps *sgrps, char *page) | ||
257 | { | ||
258 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
259 | struct se_subsystem_dev, dev_stat_grps); | ||
260 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
261 | int non_accessible_lus; | ||
262 | |||
263 | if (!dev) | ||
264 | return -ENODEV; | ||
265 | |||
266 | switch (dev->dev_status) { | ||
267 | case TRANSPORT_DEVICE_ACTIVATED: | ||
268 | non_accessible_lus = 0; | ||
269 | break; | ||
270 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
271 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
272 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
273 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
274 | default: | ||
275 | non_accessible_lus = 1; | ||
276 | break; | ||
277 | } | ||
278 | |||
279 | return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); | ||
280 | } | ||
281 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus); | ||
282 | |||
283 | static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( | ||
284 | struct se_dev_stat_grps *sgrps, char *page) | ||
285 | { | ||
286 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
287 | struct se_subsystem_dev, dev_stat_grps); | ||
288 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
289 | |||
290 | if (!dev) | ||
291 | return -ENODEV; | ||
292 | |||
293 | return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); | ||
294 | } | ||
295 | DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); | ||
296 | |||
297 | |||
298 | CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group); | ||
299 | |||
300 | static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { | ||
301 | &target_stat_scsi_tgt_dev_inst.attr, | ||
302 | &target_stat_scsi_tgt_dev_indx.attr, | ||
303 | &target_stat_scsi_tgt_dev_num_lus.attr, | ||
304 | &target_stat_scsi_tgt_dev_status.attr, | ||
305 | &target_stat_scsi_tgt_dev_non_access_lus.attr, | ||
306 | &target_stat_scsi_tgt_dev_resets.attr, | ||
307 | NULL, | ||
308 | }; | ||
309 | |||
310 | static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = { | ||
311 | .show_attribute = target_stat_scsi_tgt_dev_attr_show, | ||
312 | .store_attribute = target_stat_scsi_tgt_dev_attr_store, | ||
313 | }; | ||
314 | |||
315 | static struct config_item_type target_stat_scsi_tgt_dev_cit = { | ||
316 | .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops, | ||
317 | .ct_attrs = target_stat_scsi_tgt_dev_attrs, | ||
318 | .ct_owner = THIS_MODULE, | ||
319 | }; | ||
320 | |||
321 | /* | ||
322 | * SCSI Logical Unit Table | ||
323 | */ | ||
324 | |||
325 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps); | ||
326 | #define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \ | ||
327 | static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ | ||
328 | __CONFIGFS_EATTR(_name, _mode, \ | ||
329 | target_stat_scsi_lu_show_attr_##_name, \ | ||
330 | target_stat_scsi_lu_store_attr_##_name); | ||
331 | |||
332 | #define DEV_STAT_SCSI_LU_ATTR_RO(_name) \ | ||
333 | static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ | ||
334 | __CONFIGFS_EATTR_RO(_name, \ | ||
335 | target_stat_scsi_lu_show_attr_##_name); | ||
336 | |||
337 | static ssize_t target_stat_scsi_lu_show_attr_inst( | ||
338 | struct se_dev_stat_grps *sgrps, char *page) | ||
339 | { | ||
340 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
341 | struct se_subsystem_dev, dev_stat_grps); | ||
342 | struct se_hba *hba = se_subdev->se_dev_hba; | ||
343 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
344 | |||
345 | if (!dev) | ||
346 | return -ENODEV; | ||
347 | |||
348 | return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
349 | } | ||
350 | DEV_STAT_SCSI_LU_ATTR_RO(inst); | ||
351 | |||
352 | static ssize_t target_stat_scsi_lu_show_attr_dev( | ||
353 | struct se_dev_stat_grps *sgrps, char *page) | ||
354 | { | ||
355 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
356 | struct se_subsystem_dev, dev_stat_grps); | ||
357 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
358 | |||
359 | if (!dev) | ||
360 | return -ENODEV; | ||
361 | |||
362 | return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
363 | } | ||
364 | DEV_STAT_SCSI_LU_ATTR_RO(dev); | ||
365 | |||
366 | static ssize_t target_stat_scsi_lu_show_attr_indx( | ||
367 | struct se_dev_stat_grps *sgrps, char *page) | ||
368 | { | ||
369 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
370 | struct se_subsystem_dev, dev_stat_grps); | ||
371 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
372 | |||
373 | if (!dev) | ||
374 | return -ENODEV; | ||
375 | |||
376 | return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); | ||
377 | } | ||
378 | DEV_STAT_SCSI_LU_ATTR_RO(indx); | ||
379 | |||
380 | static ssize_t target_stat_scsi_lu_show_attr_lun( | ||
381 | struct se_dev_stat_grps *sgrps, char *page) | ||
382 | { | ||
383 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
384 | struct se_subsystem_dev, dev_stat_grps); | ||
385 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
386 | |||
387 | if (!dev) | ||
388 | return -ENODEV; | ||
389 | /* FIXME: scsiLuDefaultLun */ | ||
390 | return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); | ||
391 | } | ||
392 | DEV_STAT_SCSI_LU_ATTR_RO(lun); | ||
393 | |||
394 | static ssize_t target_stat_scsi_lu_show_attr_lu_name( | ||
395 | struct se_dev_stat_grps *sgrps, char *page) | ||
396 | { | ||
397 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
398 | struct se_subsystem_dev, dev_stat_grps); | ||
399 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
400 | |||
401 | if (!dev) | ||
402 | return -ENODEV; | ||
403 | /* scsiLuWwnName */ | ||
404 | return snprintf(page, PAGE_SIZE, "%s\n", | ||
405 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | ||
406 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); | ||
407 | } | ||
408 | DEV_STAT_SCSI_LU_ATTR_RO(lu_name); | ||
409 | |||
410 | static ssize_t target_stat_scsi_lu_show_attr_vend( | ||
411 | struct se_dev_stat_grps *sgrps, char *page) | ||
412 | { | ||
413 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
414 | struct se_subsystem_dev, dev_stat_grps); | ||
415 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
416 | int j; | ||
417 | char str[28]; | ||
418 | |||
419 | if (!dev) | ||
420 | return -ENODEV; | ||
421 | /* scsiLuVendorId */ | ||
422 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
423 | for (j = 0; j < 8; j++) | ||
424 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | ||
425 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | ||
426 | str[8] = 0; | ||
427 | return snprintf(page, PAGE_SIZE, "%s\n", str); | ||
428 | } | ||
429 | DEV_STAT_SCSI_LU_ATTR_RO(vend); | ||
430 | |||
431 | static ssize_t target_stat_scsi_lu_show_attr_prod( | ||
432 | struct se_dev_stat_grps *sgrps, char *page) | ||
433 | { | ||
434 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
435 | struct se_subsystem_dev, dev_stat_grps); | ||
436 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
437 | int j; | ||
438 | char str[28]; | ||
439 | |||
440 | if (!dev) | ||
441 | return -ENODEV; | ||
442 | |||
443 | /* scsiLuProductId */ | ||
444 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
445 | for (j = 0; j < 16; j++) | ||
446 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | ||
447 | DEV_T10_WWN(dev)->model[j] : 0x20; | ||
448 | str[16] = 0; | ||
449 | return snprintf(page, PAGE_SIZE, "%s\n", str); | ||
450 | } | ||
451 | DEV_STAT_SCSI_LU_ATTR_RO(prod); | ||
452 | |||
453 | static ssize_t target_stat_scsi_lu_show_attr_rev( | ||
454 | struct se_dev_stat_grps *sgrps, char *page) | ||
455 | { | ||
456 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
457 | struct se_subsystem_dev, dev_stat_grps); | ||
458 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
459 | int j; | ||
460 | char str[28]; | ||
461 | |||
462 | if (!dev) | ||
463 | return -ENODEV; | ||
464 | |||
465 | /* scsiLuRevisionId */ | ||
466 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
467 | for (j = 0; j < 4; j++) | ||
468 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | ||
469 | DEV_T10_WWN(dev)->revision[j] : 0x20; | ||
470 | str[4] = 0; | ||
471 | return snprintf(page, PAGE_SIZE, "%s\n", str); | ||
472 | } | ||
473 | DEV_STAT_SCSI_LU_ATTR_RO(rev); | ||
474 | |||
475 | static ssize_t target_stat_scsi_lu_show_attr_dev_type( | ||
476 | struct se_dev_stat_grps *sgrps, char *page) | ||
477 | { | ||
478 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
479 | struct se_subsystem_dev, dev_stat_grps); | ||
480 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
481 | |||
482 | if (!dev) | ||
483 | return -ENODEV; | ||
484 | |||
485 | /* scsiLuPeripheralType */ | ||
486 | return snprintf(page, PAGE_SIZE, "%u\n", | ||
487 | TRANSPORT(dev)->get_device_type(dev)); | ||
488 | } | ||
489 | DEV_STAT_SCSI_LU_ATTR_RO(dev_type); | ||
490 | |||
491 | static ssize_t target_stat_scsi_lu_show_attr_status( | ||
492 | struct se_dev_stat_grps *sgrps, char *page) | ||
493 | { | ||
494 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
495 | struct se_subsystem_dev, dev_stat_grps); | ||
496 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
497 | |||
498 | if (!dev) | ||
499 | return -ENODEV; | ||
500 | |||
501 | /* scsiLuStatus */ | ||
502 | return snprintf(page, PAGE_SIZE, "%s\n", | ||
503 | (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? | ||
504 | "available" : "notavailable"); | ||
505 | } | ||
506 | DEV_STAT_SCSI_LU_ATTR_RO(status); | ||
507 | |||
508 | static ssize_t target_stat_scsi_lu_show_attr_state_bit( | ||
509 | struct se_dev_stat_grps *sgrps, char *page) | ||
510 | { | ||
511 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
512 | struct se_subsystem_dev, dev_stat_grps); | ||
513 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
514 | |||
515 | if (!dev) | ||
516 | return -ENODEV; | ||
517 | |||
518 | /* scsiLuState */ | ||
519 | return snprintf(page, PAGE_SIZE, "exposed\n"); | ||
520 | } | ||
521 | DEV_STAT_SCSI_LU_ATTR_RO(state_bit); | ||
522 | |||
523 | static ssize_t target_stat_scsi_lu_show_attr_num_cmds( | ||
524 | struct se_dev_stat_grps *sgrps, char *page) | ||
525 | { | ||
526 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
527 | struct se_subsystem_dev, dev_stat_grps); | ||
528 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
529 | |||
530 | if (!dev) | ||
531 | return -ENODEV; | ||
532 | |||
533 | /* scsiLuNumCommands */ | ||
534 | return snprintf(page, PAGE_SIZE, "%llu\n", | ||
535 | (unsigned long long)dev->num_cmds); | ||
536 | } | ||
537 | DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); | ||
538 | |||
539 | static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( | ||
540 | struct se_dev_stat_grps *sgrps, char *page) | ||
541 | { | ||
542 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
543 | struct se_subsystem_dev, dev_stat_grps); | ||
544 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
545 | |||
546 | if (!dev) | ||
547 | return -ENODEV; | ||
548 | |||
549 | /* scsiLuReadMegaBytes */ | ||
550 | return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); | ||
551 | } | ||
552 | DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); | ||
553 | |||
554 | static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( | ||
555 | struct se_dev_stat_grps *sgrps, char *page) | ||
556 | { | ||
557 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
558 | struct se_subsystem_dev, dev_stat_grps); | ||
559 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
560 | |||
561 | if (!dev) | ||
562 | return -ENODEV; | ||
563 | |||
564 | /* scsiLuWrittenMegaBytes */ | ||
565 | return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); | ||
566 | } | ||
567 | DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); | ||
568 | |||
569 | static ssize_t target_stat_scsi_lu_show_attr_resets( | ||
570 | struct se_dev_stat_grps *sgrps, char *page) | ||
571 | { | ||
572 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
573 | struct se_subsystem_dev, dev_stat_grps); | ||
574 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
575 | |||
576 | if (!dev) | ||
577 | return -ENODEV; | ||
578 | |||
579 | /* scsiLuInResets */ | ||
580 | return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); | ||
581 | } | ||
582 | DEV_STAT_SCSI_LU_ATTR_RO(resets); | ||
583 | |||
584 | static ssize_t target_stat_scsi_lu_show_attr_full_stat( | ||
585 | struct se_dev_stat_grps *sgrps, char *page) | ||
586 | { | ||
587 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
588 | struct se_subsystem_dev, dev_stat_grps); | ||
589 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
590 | |||
591 | if (!dev) | ||
592 | return -ENODEV; | ||
593 | |||
594 | /* FIXME: scsiLuOutTaskSetFullStatus */ | ||
595 | return snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
596 | } | ||
597 | DEV_STAT_SCSI_LU_ATTR_RO(full_stat); | ||
598 | |||
599 | static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( | ||
600 | struct se_dev_stat_grps *sgrps, char *page) | ||
601 | { | ||
602 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
603 | struct se_subsystem_dev, dev_stat_grps); | ||
604 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
605 | |||
606 | if (!dev) | ||
607 | return -ENODEV; | ||
608 | |||
609 | /* FIXME: scsiLuHSInCommands */ | ||
610 | return snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
611 | } | ||
612 | DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds); | ||
613 | |||
614 | static ssize_t target_stat_scsi_lu_show_attr_creation_time( | ||
615 | struct se_dev_stat_grps *sgrps, char *page) | ||
616 | { | ||
617 | struct se_subsystem_dev *se_subdev = container_of(sgrps, | ||
618 | struct se_subsystem_dev, dev_stat_grps); | ||
619 | struct se_device *dev = se_subdev->se_dev_ptr; | ||
620 | |||
621 | if (!dev) | ||
622 | return -ENODEV; | ||
623 | |||
624 | /* scsiLuCreationTime */ | ||
625 | return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - | ||
626 | INITIAL_JIFFIES) * 100 / HZ)); | ||
627 | } | ||
628 | DEV_STAT_SCSI_LU_ATTR_RO(creation_time); | ||
629 | |||
630 | CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group); | ||
631 | |||
632 | static struct configfs_attribute *target_stat_scsi_lu_attrs[] = { | ||
633 | &target_stat_scsi_lu_inst.attr, | ||
634 | &target_stat_scsi_lu_dev.attr, | ||
635 | &target_stat_scsi_lu_indx.attr, | ||
636 | &target_stat_scsi_lu_lun.attr, | ||
637 | &target_stat_scsi_lu_lu_name.attr, | ||
638 | &target_stat_scsi_lu_vend.attr, | ||
639 | &target_stat_scsi_lu_prod.attr, | ||
640 | &target_stat_scsi_lu_rev.attr, | ||
641 | &target_stat_scsi_lu_dev_type.attr, | ||
642 | &target_stat_scsi_lu_status.attr, | ||
643 | &target_stat_scsi_lu_state_bit.attr, | ||
644 | &target_stat_scsi_lu_num_cmds.attr, | ||
645 | &target_stat_scsi_lu_read_mbytes.attr, | ||
646 | &target_stat_scsi_lu_write_mbytes.attr, | ||
647 | &target_stat_scsi_lu_resets.attr, | ||
648 | &target_stat_scsi_lu_full_stat.attr, | ||
649 | &target_stat_scsi_lu_hs_num_cmds.attr, | ||
650 | &target_stat_scsi_lu_creation_time.attr, | ||
651 | NULL, | ||
652 | }; | ||
653 | |||
654 | static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = { | ||
655 | .show_attribute = target_stat_scsi_lu_attr_show, | ||
656 | .store_attribute = target_stat_scsi_lu_attr_store, | ||
657 | }; | ||
658 | |||
659 | static struct config_item_type target_stat_scsi_lu_cit = { | ||
660 | .ct_item_ops = &target_stat_scsi_lu_attrib_ops, | ||
661 | .ct_attrs = target_stat_scsi_lu_attrs, | ||
662 | .ct_owner = THIS_MODULE, | ||
663 | }; | ||
664 | |||
665 | /* | ||
666 | * Called from target_core_configfs.c:target_core_make_subdev() to setup | ||
667 | * the target statistics groups + configfs CITs located in target_core_stat.c | ||
668 | */ | ||
669 | void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) | ||
670 | { | ||
671 | struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; | ||
672 | |||
673 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, | ||
674 | "scsi_dev", &target_stat_scsi_dev_cit); | ||
675 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, | ||
676 | "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); | ||
677 | config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, | ||
678 | "scsi_lu", &target_stat_scsi_lu_cit); | ||
679 | |||
680 | dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; | ||
681 | dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; | ||
682 | dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; | ||
683 | dev_stat_grp->default_groups[3] = NULL; | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * SCSI Port Table | ||
688 | */ | ||
689 | |||
690 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps); | ||
691 | #define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \ | ||
692 | static struct target_stat_scsi_port_attribute \ | ||
693 | target_stat_scsi_port_##_name = \ | ||
694 | __CONFIGFS_EATTR(_name, _mode, \ | ||
695 | target_stat_scsi_port_show_attr_##_name, \ | ||
696 | target_stat_scsi_port_store_attr_##_name); | ||
697 | |||
698 | #define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \ | ||
699 | static struct target_stat_scsi_port_attribute \ | ||
700 | target_stat_scsi_port_##_name = \ | ||
701 | __CONFIGFS_EATTR_RO(_name, \ | ||
702 | target_stat_scsi_port_show_attr_##_name); | ||
703 | |||
704 | static ssize_t target_stat_scsi_port_show_attr_inst( | ||
705 | struct se_port_stat_grps *pgrps, char *page) | ||
706 | { | ||
707 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
708 | struct se_port *sep; | ||
709 | struct se_device *dev = lun->lun_se_dev; | ||
710 | struct se_hba *hba; | ||
711 | ssize_t ret; | ||
712 | |||
713 | spin_lock(&lun->lun_sep_lock); | ||
714 | sep = lun->lun_sep; | ||
715 | if (!sep) { | ||
716 | spin_unlock(&lun->lun_sep_lock); | ||
717 | return -ENODEV; | ||
718 | } | ||
719 | hba = dev->se_hba; | ||
720 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
721 | spin_unlock(&lun->lun_sep_lock); | ||
722 | return ret; | ||
723 | } | ||
724 | DEV_STAT_SCSI_PORT_ATTR_RO(inst); | ||
725 | |||
726 | static ssize_t target_stat_scsi_port_show_attr_dev( | ||
727 | struct se_port_stat_grps *pgrps, char *page) | ||
728 | { | ||
729 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
730 | struct se_port *sep; | ||
731 | struct se_device *dev = lun->lun_se_dev; | ||
732 | ssize_t ret; | ||
733 | |||
734 | spin_lock(&lun->lun_sep_lock); | ||
735 | sep = lun->lun_sep; | ||
736 | if (!sep) { | ||
737 | spin_unlock(&lun->lun_sep_lock); | ||
738 | return -ENODEV; | ||
739 | } | ||
740 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
741 | spin_unlock(&lun->lun_sep_lock); | ||
742 | return ret; | ||
743 | } | ||
744 | DEV_STAT_SCSI_PORT_ATTR_RO(dev); | ||
745 | |||
746 | static ssize_t target_stat_scsi_port_show_attr_indx( | ||
747 | struct se_port_stat_grps *pgrps, char *page) | ||
748 | { | ||
749 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
750 | struct se_port *sep; | ||
751 | ssize_t ret; | ||
752 | |||
753 | spin_lock(&lun->lun_sep_lock); | ||
754 | sep = lun->lun_sep; | ||
755 | if (!sep) { | ||
756 | spin_unlock(&lun->lun_sep_lock); | ||
757 | return -ENODEV; | ||
758 | } | ||
759 | ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); | ||
760 | spin_unlock(&lun->lun_sep_lock); | ||
761 | return ret; | ||
762 | } | ||
763 | DEV_STAT_SCSI_PORT_ATTR_RO(indx); | ||
764 | |||
765 | static ssize_t target_stat_scsi_port_show_attr_role( | ||
766 | struct se_port_stat_grps *pgrps, char *page) | ||
767 | { | ||
768 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
769 | struct se_device *dev = lun->lun_se_dev; | ||
770 | struct se_port *sep; | ||
771 | ssize_t ret; | ||
772 | |||
773 | if (!dev) | ||
774 | return -ENODEV; | ||
775 | |||
776 | spin_lock(&lun->lun_sep_lock); | ||
777 | sep = lun->lun_sep; | ||
778 | if (!sep) { | ||
779 | spin_unlock(&lun->lun_sep_lock); | ||
780 | return -ENODEV; | ||
781 | } | ||
782 | ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); | ||
783 | spin_unlock(&lun->lun_sep_lock); | ||
784 | return ret; | ||
785 | } | ||
786 | DEV_STAT_SCSI_PORT_ATTR_RO(role); | ||
787 | |||
788 | static ssize_t target_stat_scsi_port_show_attr_busy_count( | ||
789 | struct se_port_stat_grps *pgrps, char *page) | ||
790 | { | ||
791 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
792 | struct se_port *sep; | ||
793 | ssize_t ret; | ||
794 | |||
795 | spin_lock(&lun->lun_sep_lock); | ||
796 | sep = lun->lun_sep; | ||
797 | if (!sep) { | ||
798 | spin_unlock(&lun->lun_sep_lock); | ||
799 | return -ENODEV; | ||
800 | } | ||
801 | /* FIXME: scsiPortBusyStatuses */ | ||
802 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
803 | spin_unlock(&lun->lun_sep_lock); | ||
804 | return ret; | ||
805 | } | ||
806 | DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); | ||
807 | |||
808 | CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group); | ||
809 | |||
810 | static struct configfs_attribute *target_stat_scsi_port_attrs[] = { | ||
811 | &target_stat_scsi_port_inst.attr, | ||
812 | &target_stat_scsi_port_dev.attr, | ||
813 | &target_stat_scsi_port_indx.attr, | ||
814 | &target_stat_scsi_port_role.attr, | ||
815 | &target_stat_scsi_port_busy_count.attr, | ||
816 | NULL, | ||
817 | }; | ||
818 | |||
819 | static struct configfs_item_operations target_stat_scsi_port_attrib_ops = { | ||
820 | .show_attribute = target_stat_scsi_port_attr_show, | ||
821 | .store_attribute = target_stat_scsi_port_attr_store, | ||
822 | }; | ||
823 | |||
824 | static struct config_item_type target_stat_scsi_port_cit = { | ||
825 | .ct_item_ops = &target_stat_scsi_port_attrib_ops, | ||
826 | .ct_attrs = target_stat_scsi_port_attrs, | ||
827 | .ct_owner = THIS_MODULE, | ||
828 | }; | ||
829 | |||
830 | /* | ||
831 | * SCSI Target Port Table | ||
832 | */ | ||
833 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps); | ||
834 | #define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \ | ||
835 | static struct target_stat_scsi_tgt_port_attribute \ | ||
836 | target_stat_scsi_tgt_port_##_name = \ | ||
837 | __CONFIGFS_EATTR(_name, _mode, \ | ||
838 | target_stat_scsi_tgt_port_show_attr_##_name, \ | ||
839 | target_stat_scsi_tgt_port_store_attr_##_name); | ||
840 | |||
841 | #define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \ | ||
842 | static struct target_stat_scsi_tgt_port_attribute \ | ||
843 | target_stat_scsi_tgt_port_##_name = \ | ||
844 | __CONFIGFS_EATTR_RO(_name, \ | ||
845 | target_stat_scsi_tgt_port_show_attr_##_name); | ||
846 | |||
847 | static ssize_t target_stat_scsi_tgt_port_show_attr_inst( | ||
848 | struct se_port_stat_grps *pgrps, char *page) | ||
849 | { | ||
850 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
851 | struct se_device *dev = lun->lun_se_dev; | ||
852 | struct se_port *sep; | ||
853 | struct se_hba *hba; | ||
854 | ssize_t ret; | ||
855 | |||
856 | spin_lock(&lun->lun_sep_lock); | ||
857 | sep = lun->lun_sep; | ||
858 | if (!sep) { | ||
859 | spin_unlock(&lun->lun_sep_lock); | ||
860 | return -ENODEV; | ||
861 | } | ||
862 | hba = dev->se_hba; | ||
863 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
864 | spin_unlock(&lun->lun_sep_lock); | ||
865 | return ret; | ||
866 | } | ||
867 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); | ||
868 | |||
869 | static ssize_t target_stat_scsi_tgt_port_show_attr_dev( | ||
870 | struct se_port_stat_grps *pgrps, char *page) | ||
871 | { | ||
872 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
873 | struct se_device *dev = lun->lun_se_dev; | ||
874 | struct se_port *sep; | ||
875 | ssize_t ret; | ||
876 | |||
877 | spin_lock(&lun->lun_sep_lock); | ||
878 | sep = lun->lun_sep; | ||
879 | if (!sep) { | ||
880 | spin_unlock(&lun->lun_sep_lock); | ||
881 | return -ENODEV; | ||
882 | } | ||
883 | ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); | ||
884 | spin_unlock(&lun->lun_sep_lock); | ||
885 | return ret; | ||
886 | } | ||
887 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); | ||
888 | |||
889 | static ssize_t target_stat_scsi_tgt_port_show_attr_indx( | ||
890 | struct se_port_stat_grps *pgrps, char *page) | ||
891 | { | ||
892 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
893 | struct se_port *sep; | ||
894 | ssize_t ret; | ||
895 | |||
896 | spin_lock(&lun->lun_sep_lock); | ||
897 | sep = lun->lun_sep; | ||
898 | if (!sep) { | ||
899 | spin_unlock(&lun->lun_sep_lock); | ||
900 | return -ENODEV; | ||
901 | } | ||
902 | ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); | ||
903 | spin_unlock(&lun->lun_sep_lock); | ||
904 | return ret; | ||
905 | } | ||
906 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); | ||
907 | |||
908 | static ssize_t target_stat_scsi_tgt_port_show_attr_name( | ||
909 | struct se_port_stat_grps *pgrps, char *page) | ||
910 | { | ||
911 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
912 | struct se_port *sep; | ||
913 | struct se_portal_group *tpg; | ||
914 | ssize_t ret; | ||
915 | |||
916 | spin_lock(&lun->lun_sep_lock); | ||
917 | sep = lun->lun_sep; | ||
918 | if (!sep) { | ||
919 | spin_unlock(&lun->lun_sep_lock); | ||
920 | return -ENODEV; | ||
921 | } | ||
922 | tpg = sep->sep_tpg; | ||
923 | |||
924 | ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", | ||
925 | TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); | ||
926 | spin_unlock(&lun->lun_sep_lock); | ||
927 | return ret; | ||
928 | } | ||
929 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); | ||
930 | |||
931 | static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( | ||
932 | struct se_port_stat_grps *pgrps, char *page) | ||
933 | { | ||
934 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
935 | struct se_port *sep; | ||
936 | struct se_portal_group *tpg; | ||
937 | ssize_t ret; | ||
938 | |||
939 | spin_lock(&lun->lun_sep_lock); | ||
940 | sep = lun->lun_sep; | ||
941 | if (!sep) { | ||
942 | spin_unlock(&lun->lun_sep_lock); | ||
943 | return -ENODEV; | ||
944 | } | ||
945 | tpg = sep->sep_tpg; | ||
946 | |||
947 | ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", | ||
948 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | ||
949 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
950 | spin_unlock(&lun->lun_sep_lock); | ||
951 | return ret; | ||
952 | } | ||
953 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); | ||
954 | |||
955 | static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( | ||
956 | struct se_port_stat_grps *pgrps, char *page) | ||
957 | { | ||
958 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
959 | struct se_port *sep; | ||
960 | struct se_portal_group *tpg; | ||
961 | ssize_t ret; | ||
962 | |||
963 | spin_lock(&lun->lun_sep_lock); | ||
964 | sep = lun->lun_sep; | ||
965 | if (!sep) { | ||
966 | spin_unlock(&lun->lun_sep_lock); | ||
967 | return -ENODEV; | ||
968 | } | ||
969 | tpg = sep->sep_tpg; | ||
970 | |||
971 | ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); | ||
972 | spin_unlock(&lun->lun_sep_lock); | ||
973 | return ret; | ||
974 | } | ||
975 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); | ||
976 | |||
977 | static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( | ||
978 | struct se_port_stat_grps *pgrps, char *page) | ||
979 | { | ||
980 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
981 | struct se_port *sep; | ||
982 | struct se_portal_group *tpg; | ||
983 | ssize_t ret; | ||
984 | |||
985 | spin_lock(&lun->lun_sep_lock); | ||
986 | sep = lun->lun_sep; | ||
987 | if (!sep) { | ||
988 | spin_unlock(&lun->lun_sep_lock); | ||
989 | return -ENODEV; | ||
990 | } | ||
991 | tpg = sep->sep_tpg; | ||
992 | |||
993 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
994 | (u32)(sep->sep_stats.rx_data_octets >> 20)); | ||
995 | spin_unlock(&lun->lun_sep_lock); | ||
996 | return ret; | ||
997 | } | ||
998 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); | ||
999 | |||
1000 | static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( | ||
1001 | struct se_port_stat_grps *pgrps, char *page) | ||
1002 | { | ||
1003 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1004 | struct se_port *sep; | ||
1005 | struct se_portal_group *tpg; | ||
1006 | ssize_t ret; | ||
1007 | |||
1008 | spin_lock(&lun->lun_sep_lock); | ||
1009 | sep = lun->lun_sep; | ||
1010 | if (!sep) { | ||
1011 | spin_unlock(&lun->lun_sep_lock); | ||
1012 | return -ENODEV; | ||
1013 | } | ||
1014 | tpg = sep->sep_tpg; | ||
1015 | |||
1016 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1017 | (u32)(sep->sep_stats.tx_data_octets >> 20)); | ||
1018 | spin_unlock(&lun->lun_sep_lock); | ||
1019 | return ret; | ||
1020 | } | ||
1021 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); | ||
1022 | |||
1023 | static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( | ||
1024 | struct se_port_stat_grps *pgrps, char *page) | ||
1025 | { | ||
1026 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1027 | struct se_port *sep; | ||
1028 | struct se_portal_group *tpg; | ||
1029 | ssize_t ret; | ||
1030 | |||
1031 | spin_lock(&lun->lun_sep_lock); | ||
1032 | sep = lun->lun_sep; | ||
1033 | if (!sep) { | ||
1034 | spin_unlock(&lun->lun_sep_lock); | ||
1035 | return -ENODEV; | ||
1036 | } | ||
1037 | tpg = sep->sep_tpg; | ||
1038 | |||
1039 | /* FIXME: scsiTgtPortHsInCommands */ | ||
1040 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
1041 | spin_unlock(&lun->lun_sep_lock); | ||
1042 | return ret; | ||
1043 | } | ||
1044 | DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); | ||
1045 | |||
1046 | CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps, | ||
1047 | scsi_tgt_port_group); | ||
1048 | |||
1049 | static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = { | ||
1050 | &target_stat_scsi_tgt_port_inst.attr, | ||
1051 | &target_stat_scsi_tgt_port_dev.attr, | ||
1052 | &target_stat_scsi_tgt_port_indx.attr, | ||
1053 | &target_stat_scsi_tgt_port_name.attr, | ||
1054 | &target_stat_scsi_tgt_port_port_index.attr, | ||
1055 | &target_stat_scsi_tgt_port_in_cmds.attr, | ||
1056 | &target_stat_scsi_tgt_port_write_mbytes.attr, | ||
1057 | &target_stat_scsi_tgt_port_read_mbytes.attr, | ||
1058 | &target_stat_scsi_tgt_port_hs_in_cmds.attr, | ||
1059 | NULL, | ||
1060 | }; | ||
1061 | |||
1062 | static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = { | ||
1063 | .show_attribute = target_stat_scsi_tgt_port_attr_show, | ||
1064 | .store_attribute = target_stat_scsi_tgt_port_attr_store, | ||
1065 | }; | ||
1066 | |||
1067 | static struct config_item_type target_stat_scsi_tgt_port_cit = { | ||
1068 | .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops, | ||
1069 | .ct_attrs = target_stat_scsi_tgt_port_attrs, | ||
1070 | .ct_owner = THIS_MODULE, | ||
1071 | }; | ||
1072 | |||
1073 | /* | ||
1074 | * SCSI Transport Table | ||
1075 | o */ | ||
1076 | |||
1077 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps); | ||
1078 | #define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \ | ||
1079 | static struct target_stat_scsi_transport_attribute \ | ||
1080 | target_stat_scsi_transport_##_name = \ | ||
1081 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1082 | target_stat_scsi_transport_show_attr_##_name, \ | ||
1083 | target_stat_scsi_transport_store_attr_##_name); | ||
1084 | |||
1085 | #define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \ | ||
1086 | static struct target_stat_scsi_transport_attribute \ | ||
1087 | target_stat_scsi_transport_##_name = \ | ||
1088 | __CONFIGFS_EATTR_RO(_name, \ | ||
1089 | target_stat_scsi_transport_show_attr_##_name); | ||
1090 | |||
1091 | static ssize_t target_stat_scsi_transport_show_attr_inst( | ||
1092 | struct se_port_stat_grps *pgrps, char *page) | ||
1093 | { | ||
1094 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1095 | struct se_device *dev = lun->lun_se_dev; | ||
1096 | struct se_port *sep; | ||
1097 | struct se_hba *hba; | ||
1098 | ssize_t ret; | ||
1099 | |||
1100 | spin_lock(&lun->lun_sep_lock); | ||
1101 | sep = lun->lun_sep; | ||
1102 | if (!sep) { | ||
1103 | spin_unlock(&lun->lun_sep_lock); | ||
1104 | return -ENODEV; | ||
1105 | } | ||
1106 | |||
1107 | hba = dev->se_hba; | ||
1108 | ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); | ||
1109 | spin_unlock(&lun->lun_sep_lock); | ||
1110 | return ret; | ||
1111 | } | ||
1112 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); | ||
1113 | |||
1114 | static ssize_t target_stat_scsi_transport_show_attr_device( | ||
1115 | struct se_port_stat_grps *pgrps, char *page) | ||
1116 | { | ||
1117 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1118 | struct se_port *sep; | ||
1119 | struct se_portal_group *tpg; | ||
1120 | ssize_t ret; | ||
1121 | |||
1122 | spin_lock(&lun->lun_sep_lock); | ||
1123 | sep = lun->lun_sep; | ||
1124 | if (!sep) { | ||
1125 | spin_unlock(&lun->lun_sep_lock); | ||
1126 | return -ENODEV; | ||
1127 | } | ||
1128 | tpg = sep->sep_tpg; | ||
1129 | /* scsiTransportType */ | ||
1130 | ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", | ||
1131 | TPG_TFO(tpg)->get_fabric_name()); | ||
1132 | spin_unlock(&lun->lun_sep_lock); | ||
1133 | return ret; | ||
1134 | } | ||
1135 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); | ||
1136 | |||
1137 | static ssize_t target_stat_scsi_transport_show_attr_indx( | ||
1138 | struct se_port_stat_grps *pgrps, char *page) | ||
1139 | { | ||
1140 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1141 | struct se_port *sep; | ||
1142 | struct se_portal_group *tpg; | ||
1143 | ssize_t ret; | ||
1144 | |||
1145 | spin_lock(&lun->lun_sep_lock); | ||
1146 | sep = lun->lun_sep; | ||
1147 | if (!sep) { | ||
1148 | spin_unlock(&lun->lun_sep_lock); | ||
1149 | return -ENODEV; | ||
1150 | } | ||
1151 | tpg = sep->sep_tpg; | ||
1152 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1153 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | ||
1154 | spin_unlock(&lun->lun_sep_lock); | ||
1155 | return ret; | ||
1156 | } | ||
1157 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); | ||
1158 | |||
1159 | static ssize_t target_stat_scsi_transport_show_attr_dev_name( | ||
1160 | struct se_port_stat_grps *pgrps, char *page) | ||
1161 | { | ||
1162 | struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); | ||
1163 | struct se_device *dev = lun->lun_se_dev; | ||
1164 | struct se_port *sep; | ||
1165 | struct se_portal_group *tpg; | ||
1166 | struct t10_wwn *wwn; | ||
1167 | ssize_t ret; | ||
1168 | |||
1169 | spin_lock(&lun->lun_sep_lock); | ||
1170 | sep = lun->lun_sep; | ||
1171 | if (!sep) { | ||
1172 | spin_unlock(&lun->lun_sep_lock); | ||
1173 | return -ENODEV; | ||
1174 | } | ||
1175 | tpg = sep->sep_tpg; | ||
1176 | wwn = DEV_T10_WWN(dev); | ||
1177 | /* scsiTransportDevName */ | ||
1178 | ret = snprintf(page, PAGE_SIZE, "%s+%s\n", | ||
1179 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
1180 | (strlen(wwn->unit_serial)) ? wwn->unit_serial : | ||
1181 | wwn->vendor); | ||
1182 | spin_unlock(&lun->lun_sep_lock); | ||
1183 | return ret; | ||
1184 | } | ||
1185 | DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); | ||
1186 | |||
1187 | CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps, | ||
1188 | scsi_transport_group); | ||
1189 | |||
1190 | static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { | ||
1191 | &target_stat_scsi_transport_inst.attr, | ||
1192 | &target_stat_scsi_transport_device.attr, | ||
1193 | &target_stat_scsi_transport_indx.attr, | ||
1194 | &target_stat_scsi_transport_dev_name.attr, | ||
1195 | NULL, | ||
1196 | }; | ||
1197 | |||
1198 | static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = { | ||
1199 | .show_attribute = target_stat_scsi_transport_attr_show, | ||
1200 | .store_attribute = target_stat_scsi_transport_attr_store, | ||
1201 | }; | ||
1202 | |||
1203 | static struct config_item_type target_stat_scsi_transport_cit = { | ||
1204 | .ct_item_ops = &target_stat_scsi_transport_attrib_ops, | ||
1205 | .ct_attrs = target_stat_scsi_transport_attrs, | ||
1206 | .ct_owner = THIS_MODULE, | ||
1207 | }; | ||
1208 | |||
1209 | /* | ||
1210 | * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup | ||
1211 | * the target port statistics groups + configfs CITs located in target_core_stat.c | ||
1212 | */ | ||
1213 | void target_stat_setup_port_default_groups(struct se_lun *lun) | ||
1214 | { | ||
1215 | struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; | ||
1216 | |||
1217 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, | ||
1218 | "scsi_port", &target_stat_scsi_port_cit); | ||
1219 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, | ||
1220 | "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); | ||
1221 | config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, | ||
1222 | "scsi_transport", &target_stat_scsi_transport_cit); | ||
1223 | |||
1224 | port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; | ||
1225 | port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; | ||
1226 | port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; | ||
1227 | port_stat_grp->default_groups[3] = NULL; | ||
1228 | } | ||
1229 | |||
1230 | /* | ||
1231 | * SCSI Authorized Initiator Table | ||
1232 | */ | ||
1233 | |||
1234 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps); | ||
1235 | #define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \ | ||
1236 | static struct target_stat_scsi_auth_intr_attribute \ | ||
1237 | target_stat_scsi_auth_intr_##_name = \ | ||
1238 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1239 | target_stat_scsi_auth_intr_show_attr_##_name, \ | ||
1240 | target_stat_scsi_auth_intr_store_attr_##_name); | ||
1241 | |||
1242 | #define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \ | ||
1243 | static struct target_stat_scsi_auth_intr_attribute \ | ||
1244 | target_stat_scsi_auth_intr_##_name = \ | ||
1245 | __CONFIGFS_EATTR_RO(_name, \ | ||
1246 | target_stat_scsi_auth_intr_show_attr_##_name); | ||
1247 | |||
1248 | static ssize_t target_stat_scsi_auth_intr_show_attr_inst( | ||
1249 | struct se_ml_stat_grps *lgrps, char *page) | ||
1250 | { | ||
1251 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1252 | struct se_lun_acl, ml_stat_grps); | ||
1253 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1254 | struct se_dev_entry *deve; | ||
1255 | struct se_portal_group *tpg; | ||
1256 | ssize_t ret; | ||
1257 | |||
1258 | spin_lock_irq(&nacl->device_list_lock); | ||
1259 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1260 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1261 | spin_unlock_irq(&nacl->device_list_lock); | ||
1262 | return -ENODEV; | ||
1263 | } | ||
1264 | tpg = nacl->se_tpg; | ||
1265 | /* scsiInstIndex */ | ||
1266 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1267 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | ||
1268 | spin_unlock_irq(&nacl->device_list_lock); | ||
1269 | return ret; | ||
1270 | } | ||
1271 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); | ||
1272 | |||
1273 | static ssize_t target_stat_scsi_auth_intr_show_attr_dev( | ||
1274 | struct se_ml_stat_grps *lgrps, char *page) | ||
1275 | { | ||
1276 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1277 | struct se_lun_acl, ml_stat_grps); | ||
1278 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1279 | struct se_dev_entry *deve; | ||
1280 | struct se_lun *lun; | ||
1281 | struct se_portal_group *tpg; | ||
1282 | ssize_t ret; | ||
1283 | |||
1284 | spin_lock_irq(&nacl->device_list_lock); | ||
1285 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1286 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1287 | spin_unlock_irq(&nacl->device_list_lock); | ||
1288 | return -ENODEV; | ||
1289 | } | ||
1290 | tpg = nacl->se_tpg; | ||
1291 | lun = deve->se_lun; | ||
1292 | /* scsiDeviceIndex */ | ||
1293 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | ||
1294 | spin_unlock_irq(&nacl->device_list_lock); | ||
1295 | return ret; | ||
1296 | } | ||
1297 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); | ||
1298 | |||
1299 | static ssize_t target_stat_scsi_auth_intr_show_attr_port( | ||
1300 | struct se_ml_stat_grps *lgrps, char *page) | ||
1301 | { | ||
1302 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1303 | struct se_lun_acl, ml_stat_grps); | ||
1304 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1305 | struct se_dev_entry *deve; | ||
1306 | struct se_portal_group *tpg; | ||
1307 | ssize_t ret; | ||
1308 | |||
1309 | spin_lock_irq(&nacl->device_list_lock); | ||
1310 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1311 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1312 | spin_unlock_irq(&nacl->device_list_lock); | ||
1313 | return -ENODEV; | ||
1314 | } | ||
1315 | tpg = nacl->se_tpg; | ||
1316 | /* scsiAuthIntrTgtPortIndex */ | ||
1317 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1318 | spin_unlock_irq(&nacl->device_list_lock); | ||
1319 | return ret; | ||
1320 | } | ||
1321 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); | ||
1322 | |||
1323 | static ssize_t target_stat_scsi_auth_intr_show_attr_indx( | ||
1324 | struct se_ml_stat_grps *lgrps, char *page) | ||
1325 | { | ||
1326 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1327 | struct se_lun_acl, ml_stat_grps); | ||
1328 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1329 | struct se_dev_entry *deve; | ||
1330 | ssize_t ret; | ||
1331 | |||
1332 | spin_lock_irq(&nacl->device_list_lock); | ||
1333 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1334 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1335 | spin_unlock_irq(&nacl->device_list_lock); | ||
1336 | return -ENODEV; | ||
1337 | } | ||
1338 | /* scsiAuthIntrIndex */ | ||
1339 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); | ||
1340 | spin_unlock_irq(&nacl->device_list_lock); | ||
1341 | return ret; | ||
1342 | } | ||
1343 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); | ||
1344 | |||
1345 | static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( | ||
1346 | struct se_ml_stat_grps *lgrps, char *page) | ||
1347 | { | ||
1348 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1349 | struct se_lun_acl, ml_stat_grps); | ||
1350 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1351 | struct se_dev_entry *deve; | ||
1352 | ssize_t ret; | ||
1353 | |||
1354 | spin_lock_irq(&nacl->device_list_lock); | ||
1355 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1356 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1357 | spin_unlock_irq(&nacl->device_list_lock); | ||
1358 | return -ENODEV; | ||
1359 | } | ||
1360 | /* scsiAuthIntrDevOrPort */ | ||
1361 | ret = snprintf(page, PAGE_SIZE, "%u\n", 1); | ||
1362 | spin_unlock_irq(&nacl->device_list_lock); | ||
1363 | return ret; | ||
1364 | } | ||
1365 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); | ||
1366 | |||
1367 | static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( | ||
1368 | struct se_ml_stat_grps *lgrps, char *page) | ||
1369 | { | ||
1370 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1371 | struct se_lun_acl, ml_stat_grps); | ||
1372 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1373 | struct se_dev_entry *deve; | ||
1374 | ssize_t ret; | ||
1375 | |||
1376 | spin_lock_irq(&nacl->device_list_lock); | ||
1377 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1378 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1379 | spin_unlock_irq(&nacl->device_list_lock); | ||
1380 | return -ENODEV; | ||
1381 | } | ||
1382 | /* scsiAuthIntrName */ | ||
1383 | ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); | ||
1384 | spin_unlock_irq(&nacl->device_list_lock); | ||
1385 | return ret; | ||
1386 | } | ||
1387 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); | ||
1388 | |||
1389 | static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( | ||
1390 | struct se_ml_stat_grps *lgrps, char *page) | ||
1391 | { | ||
1392 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1393 | struct se_lun_acl, ml_stat_grps); | ||
1394 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1395 | struct se_dev_entry *deve; | ||
1396 | ssize_t ret; | ||
1397 | |||
1398 | spin_lock_irq(&nacl->device_list_lock); | ||
1399 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1400 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1401 | spin_unlock_irq(&nacl->device_list_lock); | ||
1402 | return -ENODEV; | ||
1403 | } | ||
1404 | /* FIXME: scsiAuthIntrLunMapIndex */ | ||
1405 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
1406 | spin_unlock_irq(&nacl->device_list_lock); | ||
1407 | return ret; | ||
1408 | } | ||
1409 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); | ||
1410 | |||
1411 | static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( | ||
1412 | struct se_ml_stat_grps *lgrps, char *page) | ||
1413 | { | ||
1414 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1415 | struct se_lun_acl, ml_stat_grps); | ||
1416 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1417 | struct se_dev_entry *deve; | ||
1418 | ssize_t ret; | ||
1419 | |||
1420 | spin_lock_irq(&nacl->device_list_lock); | ||
1421 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1422 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1423 | spin_unlock_irq(&nacl->device_list_lock); | ||
1424 | return -ENODEV; | ||
1425 | } | ||
1426 | /* scsiAuthIntrAttachedTimes */ | ||
1427 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); | ||
1428 | spin_unlock_irq(&nacl->device_list_lock); | ||
1429 | return ret; | ||
1430 | } | ||
1431 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); | ||
1432 | |||
1433 | static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( | ||
1434 | struct se_ml_stat_grps *lgrps, char *page) | ||
1435 | { | ||
1436 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1437 | struct se_lun_acl, ml_stat_grps); | ||
1438 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1439 | struct se_dev_entry *deve; | ||
1440 | ssize_t ret; | ||
1441 | |||
1442 | spin_lock_irq(&nacl->device_list_lock); | ||
1443 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1444 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1445 | spin_unlock_irq(&nacl->device_list_lock); | ||
1446 | return -ENODEV; | ||
1447 | } | ||
1448 | /* scsiAuthIntrOutCommands */ | ||
1449 | ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); | ||
1450 | spin_unlock_irq(&nacl->device_list_lock); | ||
1451 | return ret; | ||
1452 | } | ||
1453 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); | ||
1454 | |||
1455 | static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( | ||
1456 | struct se_ml_stat_grps *lgrps, char *page) | ||
1457 | { | ||
1458 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1459 | struct se_lun_acl, ml_stat_grps); | ||
1460 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1461 | struct se_dev_entry *deve; | ||
1462 | ssize_t ret; | ||
1463 | |||
1464 | spin_lock_irq(&nacl->device_list_lock); | ||
1465 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1466 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1467 | spin_unlock_irq(&nacl->device_list_lock); | ||
1468 | return -ENODEV; | ||
1469 | } | ||
1470 | /* scsiAuthIntrReadMegaBytes */ | ||
1471 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); | ||
1472 | spin_unlock_irq(&nacl->device_list_lock); | ||
1473 | return ret; | ||
1474 | } | ||
1475 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); | ||
1476 | |||
1477 | static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( | ||
1478 | struct se_ml_stat_grps *lgrps, char *page) | ||
1479 | { | ||
1480 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1481 | struct se_lun_acl, ml_stat_grps); | ||
1482 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1483 | struct se_dev_entry *deve; | ||
1484 | ssize_t ret; | ||
1485 | |||
1486 | spin_lock_irq(&nacl->device_list_lock); | ||
1487 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1488 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1489 | spin_unlock_irq(&nacl->device_list_lock); | ||
1490 | return -ENODEV; | ||
1491 | } | ||
1492 | /* scsiAuthIntrWrittenMegaBytes */ | ||
1493 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); | ||
1494 | spin_unlock_irq(&nacl->device_list_lock); | ||
1495 | return ret; | ||
1496 | } | ||
1497 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); | ||
1498 | |||
1499 | static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( | ||
1500 | struct se_ml_stat_grps *lgrps, char *page) | ||
1501 | { | ||
1502 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1503 | struct se_lun_acl, ml_stat_grps); | ||
1504 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1505 | struct se_dev_entry *deve; | ||
1506 | ssize_t ret; | ||
1507 | |||
1508 | spin_lock_irq(&nacl->device_list_lock); | ||
1509 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1510 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1511 | spin_unlock_irq(&nacl->device_list_lock); | ||
1512 | return -ENODEV; | ||
1513 | } | ||
1514 | /* FIXME: scsiAuthIntrHSOutCommands */ | ||
1515 | ret = snprintf(page, PAGE_SIZE, "%u\n", 0); | ||
1516 | spin_unlock_irq(&nacl->device_list_lock); | ||
1517 | return ret; | ||
1518 | } | ||
1519 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); | ||
1520 | |||
1521 | static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( | ||
1522 | struct se_ml_stat_grps *lgrps, char *page) | ||
1523 | { | ||
1524 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1525 | struct se_lun_acl, ml_stat_grps); | ||
1526 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1527 | struct se_dev_entry *deve; | ||
1528 | ssize_t ret; | ||
1529 | |||
1530 | spin_lock_irq(&nacl->device_list_lock); | ||
1531 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1532 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1533 | spin_unlock_irq(&nacl->device_list_lock); | ||
1534 | return -ENODEV; | ||
1535 | } | ||
1536 | /* scsiAuthIntrLastCreation */ | ||
1537 | ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - | ||
1538 | INITIAL_JIFFIES) * 100 / HZ)); | ||
1539 | spin_unlock_irq(&nacl->device_list_lock); | ||
1540 | return ret; | ||
1541 | } | ||
1542 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); | ||
1543 | |||
1544 | static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( | ||
1545 | struct se_ml_stat_grps *lgrps, char *page) | ||
1546 | { | ||
1547 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1548 | struct se_lun_acl, ml_stat_grps); | ||
1549 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1550 | struct se_dev_entry *deve; | ||
1551 | ssize_t ret; | ||
1552 | |||
1553 | spin_lock_irq(&nacl->device_list_lock); | ||
1554 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1555 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1556 | spin_unlock_irq(&nacl->device_list_lock); | ||
1557 | return -ENODEV; | ||
1558 | } | ||
1559 | /* FIXME: scsiAuthIntrRowStatus */ | ||
1560 | ret = snprintf(page, PAGE_SIZE, "Ready\n"); | ||
1561 | spin_unlock_irq(&nacl->device_list_lock); | ||
1562 | return ret; | ||
1563 | } | ||
1564 | DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); | ||
1565 | |||
1566 | CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps, | ||
1567 | scsi_auth_intr_group); | ||
1568 | |||
1569 | static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = { | ||
1570 | &target_stat_scsi_auth_intr_inst.attr, | ||
1571 | &target_stat_scsi_auth_intr_dev.attr, | ||
1572 | &target_stat_scsi_auth_intr_port.attr, | ||
1573 | &target_stat_scsi_auth_intr_indx.attr, | ||
1574 | &target_stat_scsi_auth_intr_dev_or_port.attr, | ||
1575 | &target_stat_scsi_auth_intr_intr_name.attr, | ||
1576 | &target_stat_scsi_auth_intr_map_indx.attr, | ||
1577 | &target_stat_scsi_auth_intr_att_count.attr, | ||
1578 | &target_stat_scsi_auth_intr_num_cmds.attr, | ||
1579 | &target_stat_scsi_auth_intr_read_mbytes.attr, | ||
1580 | &target_stat_scsi_auth_intr_write_mbytes.attr, | ||
1581 | &target_stat_scsi_auth_intr_hs_num_cmds.attr, | ||
1582 | &target_stat_scsi_auth_intr_creation_time.attr, | ||
1583 | &target_stat_scsi_auth_intr_row_status.attr, | ||
1584 | NULL, | ||
1585 | }; | ||
1586 | |||
1587 | static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = { | ||
1588 | .show_attribute = target_stat_scsi_auth_intr_attr_show, | ||
1589 | .store_attribute = target_stat_scsi_auth_intr_attr_store, | ||
1590 | }; | ||
1591 | |||
1592 | static struct config_item_type target_stat_scsi_auth_intr_cit = { | ||
1593 | .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops, | ||
1594 | .ct_attrs = target_stat_scsi_auth_intr_attrs, | ||
1595 | .ct_owner = THIS_MODULE, | ||
1596 | }; | ||
1597 | |||
1598 | /* | ||
1599 | * SCSI Attached Initiator Port Table | ||
1600 | */ | ||
1601 | |||
1602 | CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps); | ||
1603 | #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \ | ||
1604 | static struct target_stat_scsi_att_intr_port_attribute \ | ||
1605 | target_stat_scsi_att_intr_port_##_name = \ | ||
1606 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1607 | target_stat_scsi_att_intr_port_show_attr_##_name, \ | ||
1608 | target_stat_scsi_att_intr_port_store_attr_##_name); | ||
1609 | |||
1610 | #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \ | ||
1611 | static struct target_stat_scsi_att_intr_port_attribute \ | ||
1612 | target_stat_scsi_att_intr_port_##_name = \ | ||
1613 | __CONFIGFS_EATTR_RO(_name, \ | ||
1614 | target_stat_scsi_att_intr_port_show_attr_##_name); | ||
1615 | |||
1616 | static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( | ||
1617 | struct se_ml_stat_grps *lgrps, char *page) | ||
1618 | { | ||
1619 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1620 | struct se_lun_acl, ml_stat_grps); | ||
1621 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1622 | struct se_dev_entry *deve; | ||
1623 | struct se_portal_group *tpg; | ||
1624 | ssize_t ret; | ||
1625 | |||
1626 | spin_lock_irq(&nacl->device_list_lock); | ||
1627 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1628 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1629 | spin_unlock_irq(&nacl->device_list_lock); | ||
1630 | return -ENODEV; | ||
1631 | } | ||
1632 | tpg = nacl->se_tpg; | ||
1633 | /* scsiInstIndex */ | ||
1634 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1635 | TPG_TFO(tpg)->tpg_get_inst_index(tpg)); | ||
1636 | spin_unlock_irq(&nacl->device_list_lock); | ||
1637 | return ret; | ||
1638 | } | ||
1639 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); | ||
1640 | |||
1641 | static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( | ||
1642 | struct se_ml_stat_grps *lgrps, char *page) | ||
1643 | { | ||
1644 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1645 | struct se_lun_acl, ml_stat_grps); | ||
1646 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1647 | struct se_dev_entry *deve; | ||
1648 | struct se_lun *lun; | ||
1649 | struct se_portal_group *tpg; | ||
1650 | ssize_t ret; | ||
1651 | |||
1652 | spin_lock_irq(&nacl->device_list_lock); | ||
1653 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1654 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1655 | spin_unlock_irq(&nacl->device_list_lock); | ||
1656 | return -ENODEV; | ||
1657 | } | ||
1658 | tpg = nacl->se_tpg; | ||
1659 | lun = deve->se_lun; | ||
1660 | /* scsiDeviceIndex */ | ||
1661 | ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); | ||
1662 | spin_unlock_irq(&nacl->device_list_lock); | ||
1663 | return ret; | ||
1664 | } | ||
1665 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); | ||
1666 | |||
1667 | static ssize_t target_stat_scsi_att_intr_port_show_attr_port( | ||
1668 | struct se_ml_stat_grps *lgrps, char *page) | ||
1669 | { | ||
1670 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1671 | struct se_lun_acl, ml_stat_grps); | ||
1672 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1673 | struct se_dev_entry *deve; | ||
1674 | struct se_portal_group *tpg; | ||
1675 | ssize_t ret; | ||
1676 | |||
1677 | spin_lock_irq(&nacl->device_list_lock); | ||
1678 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1679 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1680 | spin_unlock_irq(&nacl->device_list_lock); | ||
1681 | return -ENODEV; | ||
1682 | } | ||
1683 | tpg = nacl->se_tpg; | ||
1684 | /* scsiPortIndex */ | ||
1685 | ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1686 | spin_unlock_irq(&nacl->device_list_lock); | ||
1687 | return ret; | ||
1688 | } | ||
1689 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); | ||
1690 | |||
1691 | static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( | ||
1692 | struct se_ml_stat_grps *lgrps, char *page) | ||
1693 | { | ||
1694 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1695 | struct se_lun_acl, ml_stat_grps); | ||
1696 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1697 | struct se_session *se_sess; | ||
1698 | struct se_portal_group *tpg; | ||
1699 | ssize_t ret; | ||
1700 | |||
1701 | spin_lock_irq(&nacl->nacl_sess_lock); | ||
1702 | se_sess = nacl->nacl_sess; | ||
1703 | if (!se_sess) { | ||
1704 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1705 | return -ENODEV; | ||
1706 | } | ||
1707 | |||
1708 | tpg = nacl->se_tpg; | ||
1709 | /* scsiAttIntrPortIndex */ | ||
1710 | ret = snprintf(page, PAGE_SIZE, "%u\n", | ||
1711 | TPG_TFO(tpg)->sess_get_index(se_sess)); | ||
1712 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1713 | return ret; | ||
1714 | } | ||
1715 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx); | ||
1716 | |||
1717 | static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( | ||
1718 | struct se_ml_stat_grps *lgrps, char *page) | ||
1719 | { | ||
1720 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1721 | struct se_lun_acl, ml_stat_grps); | ||
1722 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1723 | struct se_dev_entry *deve; | ||
1724 | ssize_t ret; | ||
1725 | |||
1726 | spin_lock_irq(&nacl->device_list_lock); | ||
1727 | deve = &nacl->device_list[lacl->mapped_lun]; | ||
1728 | if (!deve->se_lun || !deve->se_lun_acl) { | ||
1729 | spin_unlock_irq(&nacl->device_list_lock); | ||
1730 | return -ENODEV; | ||
1731 | } | ||
1732 | /* scsiAttIntrPortAuthIntrIdx */ | ||
1733 | ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); | ||
1734 | spin_unlock_irq(&nacl->device_list_lock); | ||
1735 | return ret; | ||
1736 | } | ||
1737 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); | ||
1738 | |||
1739 | static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( | ||
1740 | struct se_ml_stat_grps *lgrps, char *page) | ||
1741 | { | ||
1742 | struct se_lun_acl *lacl = container_of(lgrps, | ||
1743 | struct se_lun_acl, ml_stat_grps); | ||
1744 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
1745 | struct se_session *se_sess; | ||
1746 | struct se_portal_group *tpg; | ||
1747 | ssize_t ret; | ||
1748 | unsigned char buf[64]; | ||
1749 | |||
1750 | spin_lock_irq(&nacl->nacl_sess_lock); | ||
1751 | se_sess = nacl->nacl_sess; | ||
1752 | if (!se_sess) { | ||
1753 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1754 | return -ENODEV; | ||
1755 | } | ||
1756 | |||
1757 | tpg = nacl->se_tpg; | ||
1758 | /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ | ||
1759 | memset(buf, 0, 64); | ||
1760 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) | ||
1761 | TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, | ||
1762 | (unsigned char *)&buf[0], 64); | ||
1763 | |||
1764 | ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); | ||
1765 | spin_unlock_irq(&nacl->nacl_sess_lock); | ||
1766 | return ret; | ||
1767 | } | ||
1768 | DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident); | ||
1769 | |||
1770 | CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps, | ||
1771 | scsi_att_intr_port_group); | ||
1772 | |||
1773 | static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = { | ||
1774 | &target_stat_scsi_att_intr_port_inst.attr, | ||
1775 | &target_stat_scsi_att_intr_port_dev.attr, | ||
1776 | &target_stat_scsi_att_intr_port_port.attr, | ||
1777 | &target_stat_scsi_att_intr_port_indx.attr, | ||
1778 | &target_stat_scsi_att_intr_port_port_auth_indx.attr, | ||
1779 | &target_stat_scsi_att_intr_port_port_ident.attr, | ||
1780 | NULL, | ||
1781 | }; | ||
1782 | |||
1783 | static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = { | ||
1784 | .show_attribute = target_stat_scsi_att_intr_port_attr_show, | ||
1785 | .store_attribute = target_stat_scsi_att_intr_port_attr_store, | ||
1786 | }; | ||
1787 | |||
1788 | static struct config_item_type target_stat_scsi_att_intr_port_cit = { | ||
1789 | .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops, | ||
1790 | .ct_attrs = target_stat_scsi_ath_intr_port_attrs, | ||
1791 | .ct_owner = THIS_MODULE, | ||
1792 | }; | ||
1793 | |||
1794 | /* | ||
1795 | * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup | ||
1796 | * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c | ||
1797 | */ | ||
1798 | void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) | ||
1799 | { | ||
1800 | struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; | ||
1801 | |||
1802 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, | ||
1803 | "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); | ||
1804 | config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, | ||
1805 | "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); | ||
1806 | |||
1807 | ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; | ||
1808 | ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; | ||
1809 | ml_stat_grp->default_groups[2] = NULL; | ||
1810 | } | ||
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h new file mode 100644 index 00000000000..86c252f9ea4 --- /dev/null +++ b/drivers/target/target_core_stat.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef TARGET_CORE_STAT_H | ||
2 | #define TARGET_CORE_STAT_H | ||
3 | |||
4 | extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); | ||
5 | extern void target_stat_setup_port_default_groups(struct se_lun *); | ||
6 | extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); | ||
7 | |||
8 | #endif /*** TARGET_CORE_STAT_H ***/ | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ff9ace01e27..bf6aa8a9f1d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -227,8 +227,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | 228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); |
229 | 229 | ||
230 | int transport_emulate_control_cdb(struct se_task *task); | ||
231 | |||
232 | int init_se_global(void) | 230 | int init_se_global(void) |
233 | { | 231 | { |
234 | struct se_global *global; | 232 | struct se_global *global; |
@@ -1622,7 +1620,7 @@ struct se_device *transport_add_device_to_core_hba( | |||
1622 | const char *inquiry_prod, | 1620 | const char *inquiry_prod, |
1623 | const char *inquiry_rev) | 1621 | const char *inquiry_rev) |
1624 | { | 1622 | { |
1625 | int ret = 0, force_pt; | 1623 | int force_pt; |
1626 | struct se_device *dev; | 1624 | struct se_device *dev; |
1627 | 1625 | ||
1628 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | 1626 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); |
@@ -1739,9 +1737,8 @@ struct se_device *transport_add_device_to_core_hba( | |||
1739 | } | 1737 | } |
1740 | scsi_dump_inquiry(dev); | 1738 | scsi_dump_inquiry(dev); |
1741 | 1739 | ||
1740 | return dev; | ||
1742 | out: | 1741 | out: |
1743 | if (!ret) | ||
1744 | return dev; | ||
1745 | kthread_stop(dev->process_thread); | 1742 | kthread_stop(dev->process_thread); |
1746 | 1743 | ||
1747 | spin_lock(&hba->device_lock); | 1744 | spin_lock(&hba->device_lock); |
@@ -4359,11 +4356,9 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4359 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 4356 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
4360 | goto out; | 4357 | goto out; |
4361 | } | 4358 | } |
4362 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4363 | se_mem->se_len = (length > dma_size) ? dma_size : length; | ||
4364 | 4359 | ||
4365 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | 4360 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ |
4366 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); | 4361 | se_mem->se_page = alloc_pages(GFP_KERNEL, 0); |
4367 | if (!(se_mem->se_page)) { | 4362 | if (!(se_mem->se_page)) { |
4368 | printk(KERN_ERR "alloc_pages() failed\n"); | 4363 | printk(KERN_ERR "alloc_pages() failed\n"); |
4369 | goto out; | 4364 | goto out; |
@@ -4374,6 +4369,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4374 | printk(KERN_ERR "kmap_atomic() failed\n"); | 4369 | printk(KERN_ERR "kmap_atomic() failed\n"); |
4375 | goto out; | 4370 | goto out; |
4376 | } | 4371 | } |
4372 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4373 | se_mem->se_len = (length > dma_size) ? dma_size : length; | ||
4377 | memset(buf, 0, se_mem->se_len); | 4374 | memset(buf, 0, se_mem->se_len); |
4378 | kunmap_atomic(buf, KM_IRQ0); | 4375 | kunmap_atomic(buf, KM_IRQ0); |
4379 | 4376 | ||
@@ -4392,10 +4389,13 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | |||
4392 | 4389 | ||
4393 | return 0; | 4390 | return 0; |
4394 | out: | 4391 | out: |
4392 | if (se_mem) | ||
4393 | __free_pages(se_mem->se_page, 0); | ||
4394 | kmem_cache_free(se_mem_cache, se_mem); | ||
4395 | return -1; | 4395 | return -1; |
4396 | } | 4396 | } |
4397 | 4397 | ||
4398 | extern u32 transport_calc_sg_num( | 4398 | u32 transport_calc_sg_num( |
4399 | struct se_task *task, | 4399 | struct se_task *task, |
4400 | struct se_mem *in_se_mem, | 4400 | struct se_mem *in_se_mem, |
4401 | u32 task_offset) | 4401 | u32 task_offset) |
@@ -5834,31 +5834,26 @@ int transport_generic_do_tmr(struct se_cmd *cmd) | |||
5834 | int ret; | 5834 | int ret; |
5835 | 5835 | ||
5836 | switch (tmr->function) { | 5836 | switch (tmr->function) { |
5837 | case ABORT_TASK: | 5837 | case TMR_ABORT_TASK: |
5838 | ref_cmd = tmr->ref_cmd; | 5838 | ref_cmd = tmr->ref_cmd; |
5839 | tmr->response = TMR_FUNCTION_REJECTED; | 5839 | tmr->response = TMR_FUNCTION_REJECTED; |
5840 | break; | 5840 | break; |
5841 | case ABORT_TASK_SET: | 5841 | case TMR_ABORT_TASK_SET: |
5842 | case CLEAR_ACA: | 5842 | case TMR_CLEAR_ACA: |
5843 | case CLEAR_TASK_SET: | 5843 | case TMR_CLEAR_TASK_SET: |
5844 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | 5844 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
5845 | break; | 5845 | break; |
5846 | case LUN_RESET: | 5846 | case TMR_LUN_RESET: |
5847 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | 5847 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
5848 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | 5848 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : |
5849 | TMR_FUNCTION_REJECTED; | 5849 | TMR_FUNCTION_REJECTED; |
5850 | break; | 5850 | break; |
5851 | #if 0 | 5851 | case TMR_TARGET_WARM_RESET: |
5852 | case TARGET_WARM_RESET: | ||
5853 | transport_generic_host_reset(dev->se_hba); | ||
5854 | tmr->response = TMR_FUNCTION_REJECTED; | 5852 | tmr->response = TMR_FUNCTION_REJECTED; |
5855 | break; | 5853 | break; |
5856 | case TARGET_COLD_RESET: | 5854 | case TMR_TARGET_COLD_RESET: |
5857 | transport_generic_host_reset(dev->se_hba); | ||
5858 | transport_generic_cold_reset(dev->se_hba); | ||
5859 | tmr->response = TMR_FUNCTION_REJECTED; | 5855 | tmr->response = TMR_FUNCTION_REJECTED; |
5860 | break; | 5856 | break; |
5861 | #endif | ||
5862 | default: | 5857 | default: |
5863 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | 5858 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", |
5864 | tmr->function); | 5859 | tmr->function); |
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 25a8bc565f4..87e7e6c876d 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c | |||
@@ -131,7 +131,7 @@ static void kgdboc_unregister_kbd(void) | |||
131 | 131 | ||
132 | static int kgdboc_option_setup(char *opt) | 132 | static int kgdboc_option_setup(char *opt) |
133 | { | 133 | { |
134 | if (strlen(opt) > MAX_CONFIG_LEN) { | 134 | if (strlen(opt) >= MAX_CONFIG_LEN) { |
135 | printk(KERN_ERR "kgdboc: config string too long\n"); | 135 | printk(KERN_ERR "kgdboc: config string too long\n"); |
136 | return -ENOSPC; | 136 | return -ENOSPC; |
137 | } | 137 | } |
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 81f13958e75..43db715f150 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c | |||
@@ -306,7 +306,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = { | |||
306 | 306 | ||
307 | static void sysrq_handle_showmem(int key) | 307 | static void sysrq_handle_showmem(int key) |
308 | { | 308 | { |
309 | show_mem(); | 309 | show_mem(0); |
310 | } | 310 | } |
311 | static struct sysrq_key_op sysrq_showmem_op = { | 311 | static struct sysrq_key_op sysrq_showmem_op = { |
312 | .handler = sysrq_handle_showmem, | 312 | .handler = sysrq_handle_showmem, |
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 6dd3c68c13a..d6b342b5b42 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c | |||
@@ -600,7 +600,7 @@ static void fn_scroll_back(struct vc_data *vc) | |||
600 | 600 | ||
601 | static void fn_show_mem(struct vc_data *vc) | 601 | static void fn_show_mem(struct vc_data *vc) |
602 | { | 602 | { |
603 | show_mem(); | 603 | show_mem(0); |
604 | } | 604 | } |
605 | 605 | ||
606 | static void fn_show_state(struct vc_data *vc) | 606 | static void fn_show_state(struct vc_data *vc) |