diff options
Diffstat (limited to 'drivers')
214 files changed, 13567 insertions, 5339 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a18e497f1c3c..31e9e10f657e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -824,11 +824,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
824 | device->backlight->props.brightness = | 824 | device->backlight->props.brightness = |
825 | acpi_video_get_brightness(device->backlight); | 825 | acpi_video_get_brightness(device->backlight); |
826 | 826 | ||
827 | result = sysfs_create_link(&device->backlight->dev.kobj, | ||
828 | &device->dev->dev.kobj, "device"); | ||
829 | if (result) | ||
830 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
831 | |||
832 | device->cooling_dev = thermal_cooling_device_register("LCD", | 827 | device->cooling_dev = thermal_cooling_device_register("LCD", |
833 | device->dev, &video_cooling_ops); | 828 | device->dev, &video_cooling_ops); |
834 | if (IS_ERR(device->cooling_dev)) { | 829 | if (IS_ERR(device->cooling_dev)) { |
@@ -1381,7 +1376,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
1381 | "Cant remove video notify handler\n"); | 1376 | "Cant remove video notify handler\n"); |
1382 | } | 1377 | } |
1383 | if (device->backlight) { | 1378 | if (device->backlight) { |
1384 | sysfs_remove_link(&device->backlight->dev.kobj, "device"); | ||
1385 | backlight_device_unregister(device->backlight); | 1379 | backlight_device_unregister(device->backlight); |
1386 | device->backlight = NULL; | 1380 | device->backlight = NULL; |
1387 | } | 1381 | } |
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 5253b271b3fe..f6b3f995f58a 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c | |||
@@ -167,7 +167,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) | |||
167 | 167 | ||
168 | irq = platform_get_irq(pdev, 0); | 168 | irq = platform_get_irq(pdev, 0); |
169 | if (irq) | 169 | if (irq) |
170 | set_irq_type(irq, IRQ_TYPE_EDGE_RISING); | 170 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); |
171 | 171 | ||
172 | /* Setup expansion bus chip selects */ | 172 | /* Setup expansion bus chip selects */ |
173 | *data->cs0_cfg = data->cs0_bits; | 173 | *data->cs0_cfg = data->cs0_bits; |
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index baeaf938d55b..1b9d10d9c5d9 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c | |||
@@ -60,10 +60,10 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance) | |||
60 | struct rb532_cf_info *info = ah->private_data; | 60 | struct rb532_cf_info *info = ah->private_data; |
61 | 61 | ||
62 | if (gpio_get_value(info->gpio_line)) { | 62 | if (gpio_get_value(info->gpio_line)) { |
63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); | 63 | irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); |
64 | ata_sff_interrupt(info->irq, dev_instance); | 64 | ata_sff_interrupt(info->irq, dev_instance); |
65 | } else { | 65 | } else { |
66 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | 66 | irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); |
67 | } | 67 | } |
68 | 68 | ||
69 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 35658f445fca..9bf13988f1a2 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -193,7 +193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, | |||
193 | u64 *cfg_offset); | 193 | u64 *cfg_offset); |
194 | static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, | 194 | static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, |
195 | unsigned long *memory_bar); | 195 | unsigned long *memory_bar); |
196 | 196 | static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); | |
197 | 197 | ||
198 | /* performant mode helper functions */ | 198 | /* performant mode helper functions */ |
199 | static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, | 199 | static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, |
@@ -231,7 +231,7 @@ static const struct block_device_operations cciss_fops = { | |||
231 | */ | 231 | */ |
232 | static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) | 232 | static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) |
233 | { | 233 | { |
234 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | 234 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) |
235 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | 235 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
236 | } | 236 | } |
237 | 237 | ||
@@ -556,6 +556,44 @@ static void __devinit cciss_procinit(ctlr_info_t *h) | |||
556 | #define to_hba(n) container_of(n, struct ctlr_info, dev) | 556 | #define to_hba(n) container_of(n, struct ctlr_info, dev) |
557 | #define to_drv(n) container_of(n, drive_info_struct, dev) | 557 | #define to_drv(n) container_of(n, drive_info_struct, dev) |
558 | 558 | ||
559 | /* List of controllers which cannot be reset on kexec with reset_devices */ | ||
560 | static u32 unresettable_controller[] = { | ||
561 | 0x324a103C, /* Smart Array P712m */ | ||
562 | 0x324b103C, /* SmartArray P711m */ | ||
563 | 0x3223103C, /* Smart Array P800 */ | ||
564 | 0x3234103C, /* Smart Array P400 */ | ||
565 | 0x3235103C, /* Smart Array P400i */ | ||
566 | 0x3211103C, /* Smart Array E200i */ | ||
567 | 0x3212103C, /* Smart Array E200 */ | ||
568 | 0x3213103C, /* Smart Array E200i */ | ||
569 | 0x3214103C, /* Smart Array E200i */ | ||
570 | 0x3215103C, /* Smart Array E200i */ | ||
571 | 0x3237103C, /* Smart Array E500 */ | ||
572 | 0x323D103C, /* Smart Array P700m */ | ||
573 | 0x409C0E11, /* Smart Array 6400 */ | ||
574 | 0x409D0E11, /* Smart Array 6400 EM */ | ||
575 | }; | ||
576 | |||
577 | static int ctlr_is_resettable(struct ctlr_info *h) | ||
578 | { | ||
579 | int i; | ||
580 | |||
581 | for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) | ||
582 | if (unresettable_controller[i] == h->board_id) | ||
583 | return 0; | ||
584 | return 1; | ||
585 | } | ||
586 | |||
587 | static ssize_t host_show_resettable(struct device *dev, | ||
588 | struct device_attribute *attr, | ||
589 | char *buf) | ||
590 | { | ||
591 | struct ctlr_info *h = to_hba(dev); | ||
592 | |||
593 | return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); | ||
594 | } | ||
595 | static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); | ||
596 | |||
559 | static ssize_t host_store_rescan(struct device *dev, | 597 | static ssize_t host_store_rescan(struct device *dev, |
560 | struct device_attribute *attr, | 598 | struct device_attribute *attr, |
561 | const char *buf, size_t count) | 599 | const char *buf, size_t count) |
@@ -741,6 +779,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); | |||
741 | 779 | ||
742 | static struct attribute *cciss_host_attrs[] = { | 780 | static struct attribute *cciss_host_attrs[] = { |
743 | &dev_attr_rescan.attr, | 781 | &dev_attr_rescan.attr, |
782 | &dev_attr_resettable.attr, | ||
744 | NULL | 783 | NULL |
745 | }; | 784 | }; |
746 | 785 | ||
@@ -973,8 +1012,8 @@ static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) | |||
973 | temp64.val32.upper = c->ErrDesc.Addr.upper; | 1012 | temp64.val32.upper = c->ErrDesc.Addr.upper; |
974 | pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), | 1013 | pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), |
975 | c->err_info, (dma_addr_t) temp64.val); | 1014 | c->err_info, (dma_addr_t) temp64.val); |
976 | pci_free_consistent(h->pdev, sizeof(CommandList_struct), | 1015 | pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, |
977 | c, (dma_addr_t) c->busaddr); | 1016 | (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); |
978 | } | 1017 | } |
979 | 1018 | ||
980 | static inline ctlr_info_t *get_host(struct gendisk *disk) | 1019 | static inline ctlr_info_t *get_host(struct gendisk *disk) |
@@ -1490,8 +1529,7 @@ static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) | |||
1490 | return -EINVAL; | 1529 | return -EINVAL; |
1491 | if (!capable(CAP_SYS_RAWIO)) | 1530 | if (!capable(CAP_SYS_RAWIO)) |
1492 | return -EPERM; | 1531 | return -EPERM; |
1493 | ioc = (BIG_IOCTL_Command_struct *) | 1532 | ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); |
1494 | kmalloc(sizeof(*ioc), GFP_KERNEL); | ||
1495 | if (!ioc) { | 1533 | if (!ioc) { |
1496 | status = -ENOMEM; | 1534 | status = -ENOMEM; |
1497 | goto cleanup1; | 1535 | goto cleanup1; |
@@ -2653,6 +2691,10 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) | |||
2653 | c->Request.CDB[0]); | 2691 | c->Request.CDB[0]); |
2654 | return_status = IO_NEEDS_RETRY; | 2692 | return_status = IO_NEEDS_RETRY; |
2655 | break; | 2693 | break; |
2694 | case CMD_UNABORTABLE: | ||
2695 | dev_warn(&h->pdev->dev, "cmd unabortable\n"); | ||
2696 | return_status = IO_ERROR; | ||
2697 | break; | ||
2656 | default: | 2698 | default: |
2657 | dev_warn(&h->pdev->dev, "cmd 0x%02x returned " | 2699 | dev_warn(&h->pdev->dev, "cmd 0x%02x returned " |
2658 | "unknown status %x\n", c->Request.CDB[0], | 2700 | "unknown status %x\n", c->Request.CDB[0], |
@@ -3103,6 +3145,13 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, | |||
3103 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | 3145 | (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
3104 | DID_PASSTHROUGH : DID_ERROR); | 3146 | DID_PASSTHROUGH : DID_ERROR); |
3105 | break; | 3147 | break; |
3148 | case CMD_UNABORTABLE: | ||
3149 | dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); | ||
3150 | rq->errors = make_status_bytes(SAM_STAT_GOOD, | ||
3151 | cmd->err_info->CommandStatus, DRIVER_OK, | ||
3152 | cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? | ||
3153 | DID_PASSTHROUGH : DID_ERROR); | ||
3154 | break; | ||
3106 | default: | 3155 | default: |
3107 | dev_warn(&h->pdev->dev, "cmd %p returned " | 3156 | dev_warn(&h->pdev->dev, "cmd %p returned " |
3108 | "unknown status %x\n", cmd, | 3157 | "unknown status %x\n", cmd, |
@@ -3136,10 +3185,13 @@ static inline u32 cciss_tag_to_index(u32 tag) | |||
3136 | return tag >> DIRECT_LOOKUP_SHIFT; | 3185 | return tag >> DIRECT_LOOKUP_SHIFT; |
3137 | } | 3186 | } |
3138 | 3187 | ||
3139 | static inline u32 cciss_tag_discard_error_bits(u32 tag) | 3188 | static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) |
3140 | { | 3189 | { |
3141 | #define CCISS_ERROR_BITS 0x03 | 3190 | #define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) |
3142 | return tag & ~CCISS_ERROR_BITS; | 3191 | #define CCISS_SIMPLE_ERROR_BITS 0x03 |
3192 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) | ||
3193 | return tag & ~CCISS_PERF_ERROR_BITS; | ||
3194 | return tag & ~CCISS_SIMPLE_ERROR_BITS; | ||
3143 | } | 3195 | } |
3144 | 3196 | ||
3145 | static inline void cciss_mark_tag_indexed(u32 *tag) | 3197 | static inline void cciss_mark_tag_indexed(u32 *tag) |
@@ -3359,7 +3411,7 @@ static inline u32 next_command(ctlr_info_t *h) | |||
3359 | { | 3411 | { |
3360 | u32 a; | 3412 | u32 a; |
3361 | 3413 | ||
3362 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | 3414 | if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) |
3363 | return h->access.command_completed(h); | 3415 | return h->access.command_completed(h); |
3364 | 3416 | ||
3365 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | 3417 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { |
@@ -3394,14 +3446,12 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) | |||
3394 | /* process completion of a non-indexed command */ | 3446 | /* process completion of a non-indexed command */ |
3395 | static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) | 3447 | static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) |
3396 | { | 3448 | { |
3397 | u32 tag; | ||
3398 | CommandList_struct *c = NULL; | 3449 | CommandList_struct *c = NULL; |
3399 | __u32 busaddr_masked, tag_masked; | 3450 | __u32 busaddr_masked, tag_masked; |
3400 | 3451 | ||
3401 | tag = cciss_tag_discard_error_bits(raw_tag); | 3452 | tag_masked = cciss_tag_discard_error_bits(h, raw_tag); |
3402 | list_for_each_entry(c, &h->cmpQ, list) { | 3453 | list_for_each_entry(c, &h->cmpQ, list) { |
3403 | busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); | 3454 | busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); |
3404 | tag_masked = cciss_tag_discard_error_bits(tag); | ||
3405 | if (busaddr_masked == tag_masked) { | 3455 | if (busaddr_masked == tag_masked) { |
3406 | finish_cmd(h, c, raw_tag); | 3456 | finish_cmd(h, c, raw_tag); |
3407 | return next_command(h); | 3457 | return next_command(h); |
@@ -3753,7 +3803,8 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) | |||
3753 | } | 3803 | } |
3754 | } | 3804 | } |
3755 | 3805 | ||
3756 | static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) | 3806 | static __devinit void cciss_enter_performant_mode(ctlr_info_t *h, |
3807 | u32 use_short_tags) | ||
3757 | { | 3808 | { |
3758 | /* This is a bit complicated. There are 8 registers on | 3809 | /* This is a bit complicated. There are 8 registers on |
3759 | * the controller which we write to to tell it 8 different | 3810 | * the controller which we write to to tell it 8 different |
@@ -3808,7 +3859,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) | |||
3808 | writel(0, &h->transtable->RepQCtrAddrHigh32); | 3859 | writel(0, &h->transtable->RepQCtrAddrHigh32); |
3809 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | 3860 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); |
3810 | writel(0, &h->transtable->RepQAddr0High32); | 3861 | writel(0, &h->transtable->RepQAddr0High32); |
3811 | writel(CFGTBL_Trans_Performant, | 3862 | writel(CFGTBL_Trans_Performant | use_short_tags, |
3812 | &(h->cfgtable->HostWrite.TransportRequest)); | 3863 | &(h->cfgtable->HostWrite.TransportRequest)); |
3813 | 3864 | ||
3814 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 3865 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
@@ -3855,7 +3906,8 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) | |||
3855 | if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) | 3906 | if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) |
3856 | goto clean_up; | 3907 | goto clean_up; |
3857 | 3908 | ||
3858 | cciss_enter_performant_mode(h); | 3909 | cciss_enter_performant_mode(h, |
3910 | trans_support & CFGTBL_Trans_use_short_tags); | ||
3859 | 3911 | ||
3860 | /* Change the access methods to the performant access methods */ | 3912 | /* Change the access methods to the performant access methods */ |
3861 | h->access = SA5_performant_access; | 3913 | h->access = SA5_performant_access; |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 579f74918493..554bbd907d14 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) | |||
222 | h->ctlr, c->busaddr); | 222 | h->ctlr, c->busaddr); |
223 | #endif /* CCISS_DEBUG */ | 223 | #endif /* CCISS_DEBUG */ |
224 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | 224 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
225 | readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); | ||
225 | h->commands_outstanding++; | 226 | h->commands_outstanding++; |
226 | if ( h->commands_outstanding > h->max_outstanding) | 227 | if ( h->commands_outstanding > h->max_outstanding) |
227 | h->max_outstanding = h->commands_outstanding; | 228 | h->max_outstanding = h->commands_outstanding; |
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index 35463d2f0ee7..cd441bef031f 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h | |||
@@ -56,6 +56,7 @@ | |||
56 | 56 | ||
57 | #define CFGTBL_Trans_Simple 0x00000002l | 57 | #define CFGTBL_Trans_Simple 0x00000002l |
58 | #define CFGTBL_Trans_Performant 0x00000004l | 58 | #define CFGTBL_Trans_Performant 0x00000004l |
59 | #define CFGTBL_Trans_use_short_tags 0x20000000l | ||
59 | 60 | ||
60 | #define CFGTBL_BusType_Ultra2 0x00000001l | 61 | #define CFGTBL_BusType_Ultra2 0x00000001l |
61 | #define CFGTBL_BusType_Ultra3 0x00000002l | 62 | #define CFGTBL_BusType_Ultra3 0x00000002l |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 727d0225b7d0..df793803f5ae 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -824,13 +824,18 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, | |||
824 | break; | 824 | break; |
825 | case CMD_UNSOLICITED_ABORT: | 825 | case CMD_UNSOLICITED_ABORT: |
826 | cmd->result = DID_ABORT << 16; | 826 | cmd->result = DID_ABORT << 16; |
827 | dev_warn(&h->pdev->dev, "%p aborted do to an " | 827 | dev_warn(&h->pdev->dev, "%p aborted due to an " |
828 | "unsolicited abort\n", c); | 828 | "unsolicited abort\n", c); |
829 | break; | 829 | break; |
830 | case CMD_TIMEOUT: | 830 | case CMD_TIMEOUT: |
831 | cmd->result = DID_TIME_OUT << 16; | 831 | cmd->result = DID_TIME_OUT << 16; |
832 | dev_warn(&h->pdev->dev, "%p timedout\n", c); | 832 | dev_warn(&h->pdev->dev, "%p timedout\n", c); |
833 | break; | 833 | break; |
834 | case CMD_UNABORTABLE: | ||
835 | cmd->result = DID_ERROR << 16; | ||
836 | dev_warn(&h->pdev->dev, "c %p command " | ||
837 | "unabortable\n", c); | ||
838 | break; | ||
834 | default: | 839 | default: |
835 | cmd->result = DID_ERROR << 16; | 840 | cmd->result = DID_ERROR << 16; |
836 | dev_warn(&h->pdev->dev, | 841 | dev_warn(&h->pdev->dev, |
@@ -1007,11 +1012,15 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) | |||
1007 | break; | 1012 | break; |
1008 | case CMD_UNSOLICITED_ABORT: | 1013 | case CMD_UNSOLICITED_ABORT: |
1009 | dev_warn(&h->pdev->dev, | 1014 | dev_warn(&h->pdev->dev, |
1010 | "%p aborted do to an unsolicited abort\n", c); | 1015 | "%p aborted due to an unsolicited abort\n", c); |
1011 | break; | 1016 | break; |
1012 | case CMD_TIMEOUT: | 1017 | case CMD_TIMEOUT: |
1013 | dev_warn(&h->pdev->dev, "%p timedout\n", c); | 1018 | dev_warn(&h->pdev->dev, "%p timedout\n", c); |
1014 | break; | 1019 | break; |
1020 | case CMD_UNABORTABLE: | ||
1021 | dev_warn(&h->pdev->dev, | ||
1022 | "%p unabortable\n", c); | ||
1023 | break; | ||
1015 | default: | 1024 | default: |
1016 | dev_warn(&h->pdev->dev, | 1025 | dev_warn(&h->pdev->dev, |
1017 | "%p returned unknown status %x\n", | 1026 | "%p returned unknown status %x\n", |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index aca302492ff2..2a1642bc451d 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -92,7 +92,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
92 | bio->bi_end_io = drbd_md_io_complete; | 92 | bio->bi_end_io = drbd_md_io_complete; |
93 | bio->bi_rw = rw; | 93 | bio->bi_rw = rw; |
94 | 94 | ||
95 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) | 95 | if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) |
96 | bio_endio(bio, -EIO); | 96 | bio_endio(bio, -EIO); |
97 | else | 97 | else |
98 | submit_bio(rw, bio); | 98 | submit_bio(rw, bio); |
@@ -176,13 +176,17 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) | |||
176 | struct lc_element *al_ext; | 176 | struct lc_element *al_ext; |
177 | struct lc_element *tmp; | 177 | struct lc_element *tmp; |
178 | unsigned long al_flags = 0; | 178 | unsigned long al_flags = 0; |
179 | int wake; | ||
179 | 180 | ||
180 | spin_lock_irq(&mdev->al_lock); | 181 | spin_lock_irq(&mdev->al_lock); |
181 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); | 182 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); |
182 | if (unlikely(tmp != NULL)) { | 183 | if (unlikely(tmp != NULL)) { |
183 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | 184 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); |
184 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { | 185 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { |
186 | wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags); | ||
185 | spin_unlock_irq(&mdev->al_lock); | 187 | spin_unlock_irq(&mdev->al_lock); |
188 | if (wake) | ||
189 | wake_up(&mdev->al_wait); | ||
186 | return NULL; | 190 | return NULL; |
187 | } | 191 | } |
188 | } | 192 | } |
@@ -258,6 +262,33 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) | |||
258 | spin_unlock_irqrestore(&mdev->al_lock, flags); | 262 | spin_unlock_irqrestore(&mdev->al_lock, flags); |
259 | } | 263 | } |
260 | 264 | ||
265 | #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT) | ||
266 | /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT | ||
267 | * are still coupled, or assume too much about their relation. | ||
268 | * Code below will not work if this is violated. | ||
269 | * Will be cleaned up with some followup patch. | ||
270 | */ | ||
271 | # error FIXME | ||
272 | #endif | ||
273 | |||
274 | static unsigned int al_extent_to_bm_page(unsigned int al_enr) | ||
275 | { | ||
276 | return al_enr >> | ||
277 | /* bit to page */ | ||
278 | ((PAGE_SHIFT + 3) - | ||
279 | /* al extent number to bit */ | ||
280 | (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)); | ||
281 | } | ||
282 | |||
283 | static unsigned int rs_extent_to_bm_page(unsigned int rs_enr) | ||
284 | { | ||
285 | return rs_enr >> | ||
286 | /* bit to page */ | ||
287 | ((PAGE_SHIFT + 3) - | ||
288 | /* al extent number to bit */ | ||
289 | (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); | ||
290 | } | ||
291 | |||
261 | int | 292 | int |
262 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | 293 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) |
263 | { | 294 | { |
@@ -285,7 +316,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
285 | * For now, we must not write the transaction, | 316 | * For now, we must not write the transaction, |
286 | * if we cannot write out the bitmap of the evicted extent. */ | 317 | * if we cannot write out the bitmap of the evicted extent. */ |
287 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) | 318 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) |
288 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); | 319 | drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted)); |
289 | 320 | ||
290 | /* The bitmap write may have failed, causing a state change. */ | 321 | /* The bitmap write may have failed, causing a state change. */ |
291 | if (mdev->state.disk < D_INCONSISTENT) { | 322 | if (mdev->state.disk < D_INCONSISTENT) { |
@@ -334,7 +365,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
334 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; | 365 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; |
335 | 366 | ||
336 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) | 367 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) |
337 | drbd_chk_io_error(mdev, 1, TRUE); | 368 | drbd_chk_io_error(mdev, 1, true); |
338 | 369 | ||
339 | if (++mdev->al_tr_pos > | 370 | if (++mdev->al_tr_pos > |
340 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | 371 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) |
@@ -511,225 +542,6 @@ cancel: | |||
511 | return 1; | 542 | return 1; |
512 | } | 543 | } |
513 | 544 | ||
514 | static void atodb_endio(struct bio *bio, int error) | ||
515 | { | ||
516 | struct drbd_atodb_wait *wc = bio->bi_private; | ||
517 | struct drbd_conf *mdev = wc->mdev; | ||
518 | struct page *page; | ||
519 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
520 | |||
521 | /* strange behavior of some lower level drivers... | ||
522 | * fail the request by clearing the uptodate flag, | ||
523 | * but do not return any error?! */ | ||
524 | if (!error && !uptodate) | ||
525 | error = -EIO; | ||
526 | |||
527 | drbd_chk_io_error(mdev, error, TRUE); | ||
528 | if (error && wc->error == 0) | ||
529 | wc->error = error; | ||
530 | |||
531 | if (atomic_dec_and_test(&wc->count)) | ||
532 | complete(&wc->io_done); | ||
533 | |||
534 | page = bio->bi_io_vec[0].bv_page; | ||
535 | put_page(page); | ||
536 | bio_put(bio); | ||
537 | mdev->bm_writ_cnt++; | ||
538 | put_ldev(mdev); | ||
539 | } | ||
540 | |||
541 | /* sector to word */ | ||
542 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | ||
543 | |||
544 | /* activity log to on disk bitmap -- prepare bio unless that sector | ||
545 | * is already covered by previously prepared bios */ | ||
546 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | ||
547 | struct bio **bios, | ||
548 | unsigned int enr, | ||
549 | struct drbd_atodb_wait *wc) __must_hold(local) | ||
550 | { | ||
551 | struct bio *bio; | ||
552 | struct page *page; | ||
553 | sector_t on_disk_sector; | ||
554 | unsigned int page_offset = PAGE_SIZE; | ||
555 | int offset; | ||
556 | int i = 0; | ||
557 | int err = -ENOMEM; | ||
558 | |||
559 | /* We always write aligned, full 4k blocks, | ||
560 | * so we can ignore the logical_block_size (for now) */ | ||
561 | enr &= ~7U; | ||
562 | on_disk_sector = enr + mdev->ldev->md.md_offset | ||
563 | + mdev->ldev->md.bm_offset; | ||
564 | |||
565 | D_ASSERT(!(on_disk_sector & 7U)); | ||
566 | |||
567 | /* Check if that enr is already covered by an already created bio. | ||
568 | * Caution, bios[] is not NULL terminated, | ||
569 | * but only initialized to all NULL. | ||
570 | * For completely scattered activity log, | ||
571 | * the last invocation iterates over all bios, | ||
572 | * and finds the last NULL entry. | ||
573 | */ | ||
574 | while ((bio = bios[i])) { | ||
575 | if (bio->bi_sector == on_disk_sector) | ||
576 | return 0; | ||
577 | i++; | ||
578 | } | ||
579 | /* bios[i] == NULL, the next not yet used slot */ | ||
580 | |||
581 | /* GFP_KERNEL, we are not in the write-out path */ | ||
582 | bio = bio_alloc(GFP_KERNEL, 1); | ||
583 | if (bio == NULL) | ||
584 | return -ENOMEM; | ||
585 | |||
586 | if (i > 0) { | ||
587 | const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; | ||
588 | page_offset = prev_bv->bv_offset + prev_bv->bv_len; | ||
589 | page = prev_bv->bv_page; | ||
590 | } | ||
591 | if (page_offset == PAGE_SIZE) { | ||
592 | page = alloc_page(__GFP_HIGHMEM); | ||
593 | if (page == NULL) | ||
594 | goto out_bio_put; | ||
595 | page_offset = 0; | ||
596 | } else { | ||
597 | get_page(page); | ||
598 | } | ||
599 | |||
600 | offset = S2W(enr); | ||
601 | drbd_bm_get_lel(mdev, offset, | ||
602 | min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), | ||
603 | kmap(page) + page_offset); | ||
604 | kunmap(page); | ||
605 | |||
606 | bio->bi_private = wc; | ||
607 | bio->bi_end_io = atodb_endio; | ||
608 | bio->bi_bdev = mdev->ldev->md_bdev; | ||
609 | bio->bi_sector = on_disk_sector; | ||
610 | |||
611 | if (bio_add_page(bio, page, 4096, page_offset) != 4096) | ||
612 | goto out_put_page; | ||
613 | |||
614 | atomic_inc(&wc->count); | ||
615 | /* we already know that we may do this... | ||
616 | * get_ldev_if_state(mdev,D_ATTACHING); | ||
617 | * just get the extra reference, so that the local_cnt reflects | ||
618 | * the number of pending IO requests DRBD at its backing device. | ||
619 | */ | ||
620 | atomic_inc(&mdev->local_cnt); | ||
621 | |||
622 | bios[i] = bio; | ||
623 | |||
624 | return 0; | ||
625 | |||
626 | out_put_page: | ||
627 | err = -EINVAL; | ||
628 | put_page(page); | ||
629 | out_bio_put: | ||
630 | bio_put(bio); | ||
631 | return err; | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents | ||
636 | * @mdev: DRBD device. | ||
637 | * | ||
638 | * Called when we detach (unconfigure) local storage, | ||
639 | * or when we go from R_PRIMARY to R_SECONDARY role. | ||
640 | */ | ||
641 | void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | ||
642 | { | ||
643 | int i, nr_elements; | ||
644 | unsigned int enr; | ||
645 | struct bio **bios; | ||
646 | struct drbd_atodb_wait wc; | ||
647 | |||
648 | ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) | ||
649 | return; /* sorry, I don't have any act_log etc... */ | ||
650 | |||
651 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
652 | |||
653 | nr_elements = mdev->act_log->nr_elements; | ||
654 | |||
655 | /* GFP_KERNEL, we are not in anyone's write-out path */ | ||
656 | bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); | ||
657 | if (!bios) | ||
658 | goto submit_one_by_one; | ||
659 | |||
660 | atomic_set(&wc.count, 0); | ||
661 | init_completion(&wc.io_done); | ||
662 | wc.mdev = mdev; | ||
663 | wc.error = 0; | ||
664 | |||
665 | for (i = 0; i < nr_elements; i++) { | ||
666 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
667 | if (enr == LC_FREE) | ||
668 | continue; | ||
669 | /* next statement also does atomic_inc wc.count and local_cnt */ | ||
670 | if (atodb_prepare_unless_covered(mdev, bios, | ||
671 | enr/AL_EXT_PER_BM_SECT, | ||
672 | &wc)) | ||
673 | goto free_bios_submit_one_by_one; | ||
674 | } | ||
675 | |||
676 | /* unnecessary optimization? */ | ||
677 | lc_unlock(mdev->act_log); | ||
678 | wake_up(&mdev->al_wait); | ||
679 | |||
680 | /* all prepared, submit them */ | ||
681 | for (i = 0; i < nr_elements; i++) { | ||
682 | if (bios[i] == NULL) | ||
683 | break; | ||
684 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { | ||
685 | bios[i]->bi_rw = WRITE; | ||
686 | bio_endio(bios[i], -EIO); | ||
687 | } else { | ||
688 | submit_bio(WRITE, bios[i]); | ||
689 | } | ||
690 | } | ||
691 | |||
692 | /* always (try to) flush bitmap to stable storage */ | ||
693 | drbd_md_flush(mdev); | ||
694 | |||
695 | /* In case we did not submit a single IO do not wait for | ||
696 | * them to complete. ( Because we would wait forever here. ) | ||
697 | * | ||
698 | * In case we had IOs and they are already complete, there | ||
699 | * is not point in waiting anyways. | ||
700 | * Therefore this if () ... */ | ||
701 | if (atomic_read(&wc.count)) | ||
702 | wait_for_completion(&wc.io_done); | ||
703 | |||
704 | put_ldev(mdev); | ||
705 | |||
706 | kfree(bios); | ||
707 | return; | ||
708 | |||
709 | free_bios_submit_one_by_one: | ||
710 | /* free everything by calling the endio callback directly. */ | ||
711 | for (i = 0; i < nr_elements && bios[i]; i++) | ||
712 | bio_endio(bios[i], 0); | ||
713 | |||
714 | kfree(bios); | ||
715 | |||
716 | submit_one_by_one: | ||
717 | dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); | ||
718 | |||
719 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
720 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
721 | if (enr == LC_FREE) | ||
722 | continue; | ||
723 | /* Really slow: if we have al-extents 16..19 active, | ||
724 | * sector 4 will be written four times! Synchronous! */ | ||
725 | drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); | ||
726 | } | ||
727 | |||
728 | lc_unlock(mdev->act_log); | ||
729 | wake_up(&mdev->al_wait); | ||
730 | put_ldev(mdev); | ||
731 | } | ||
732 | |||
733 | /** | 545 | /** |
734 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents | 546 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents |
735 | * @mdev: DRBD device. | 547 | * @mdev: DRBD device. |
@@ -809,7 +621,7 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused | |||
809 | return 1; | 621 | return 1; |
810 | } | 622 | } |
811 | 623 | ||
812 | drbd_bm_write_sect(mdev, udw->enr); | 624 | drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr)); |
813 | put_ldev(mdev); | 625 | put_ldev(mdev); |
814 | 626 | ||
815 | kfree(udw); | 627 | kfree(udw); |
@@ -889,7 +701,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |||
889 | dev_warn(DEV, "Kicking resync_lru element enr=%u " | 701 | dev_warn(DEV, "Kicking resync_lru element enr=%u " |
890 | "out with rs_failed=%d\n", | 702 | "out with rs_failed=%d\n", |
891 | ext->lce.lc_number, ext->rs_failed); | 703 | ext->lce.lc_number, ext->rs_failed); |
892 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
893 | } | 704 | } |
894 | ext->rs_left = rs_left; | 705 | ext->rs_left = rs_left; |
895 | ext->rs_failed = success ? 0 : count; | 706 | ext->rs_failed = success ? 0 : count; |
@@ -908,7 +719,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |||
908 | drbd_queue_work_front(&mdev->data.work, &udw->w); | 719 | drbd_queue_work_front(&mdev->data.work, &udw->w); |
909 | } else { | 720 | } else { |
910 | dev_warn(DEV, "Could not kmalloc an udw\n"); | 721 | dev_warn(DEV, "Could not kmalloc an udw\n"); |
911 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
912 | } | 722 | } |
913 | } | 723 | } |
914 | } else { | 724 | } else { |
@@ -919,6 +729,22 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |||
919 | } | 729 | } |
920 | } | 730 | } |
921 | 731 | ||
732 | void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go) | ||
733 | { | ||
734 | unsigned long now = jiffies; | ||
735 | unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; | ||
736 | int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; | ||
737 | if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { | ||
738 | if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && | ||
739 | mdev->state.conn != C_PAUSED_SYNC_T && | ||
740 | mdev->state.conn != C_PAUSED_SYNC_S) { | ||
741 | mdev->rs_mark_time[next] = now; | ||
742 | mdev->rs_mark_left[next] = still_to_go; | ||
743 | mdev->rs_last_mark = next; | ||
744 | } | ||
745 | } | ||
746 | } | ||
747 | |||
922 | /* clear the bit corresponding to the piece of storage in question: | 748 | /* clear the bit corresponding to the piece of storage in question: |
923 | * size byte of data starting from sector. Only clear a bits of the affected | 749 | * size byte of data starting from sector. Only clear a bits of the affected |
924 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. | 750 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. |
@@ -936,7 +762,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
936 | int wake_up = 0; | 762 | int wake_up = 0; |
937 | unsigned long flags; | 763 | unsigned long flags; |
938 | 764 | ||
939 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 765 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
940 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", | 766 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", |
941 | (unsigned long long)sector, size); | 767 | (unsigned long long)sector, size); |
942 | return; | 768 | return; |
@@ -969,21 +795,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
969 | */ | 795 | */ |
970 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); | 796 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); |
971 | if (count && get_ldev(mdev)) { | 797 | if (count && get_ldev(mdev)) { |
972 | unsigned long now = jiffies; | 798 | drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); |
973 | unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; | ||
974 | int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; | ||
975 | if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { | ||
976 | unsigned long tw = drbd_bm_total_weight(mdev); | ||
977 | if (mdev->rs_mark_left[mdev->rs_last_mark] != tw && | ||
978 | mdev->state.conn != C_PAUSED_SYNC_T && | ||
979 | mdev->state.conn != C_PAUSED_SYNC_S) { | ||
980 | mdev->rs_mark_time[next] = now; | ||
981 | mdev->rs_mark_left[next] = tw; | ||
982 | mdev->rs_last_mark = next; | ||
983 | } | ||
984 | } | ||
985 | spin_lock_irqsave(&mdev->al_lock, flags); | 799 | spin_lock_irqsave(&mdev->al_lock, flags); |
986 | drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); | 800 | drbd_try_clear_on_disk_bm(mdev, sector, count, true); |
987 | spin_unlock_irqrestore(&mdev->al_lock, flags); | 801 | spin_unlock_irqrestore(&mdev->al_lock, flags); |
988 | 802 | ||
989 | /* just wake_up unconditional now, various lc_chaged(), | 803 | /* just wake_up unconditional now, various lc_chaged(), |
@@ -998,27 +812,27 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
998 | /* | 812 | /* |
999 | * this is intended to set one request worth of data out of sync. | 813 | * this is intended to set one request worth of data out of sync. |
1000 | * affects at least 1 bit, | 814 | * affects at least 1 bit, |
1001 | * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. | 815 | * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits. |
1002 | * | 816 | * |
1003 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). | 817 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). |
1004 | * so this can be _any_ process. | 818 | * so this can be _any_ process. |
1005 | */ | 819 | */ |
1006 | void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, | 820 | int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, |
1007 | const char *file, const unsigned int line) | 821 | const char *file, const unsigned int line) |
1008 | { | 822 | { |
1009 | unsigned long sbnr, ebnr, lbnr, flags; | 823 | unsigned long sbnr, ebnr, lbnr, flags; |
1010 | sector_t esector, nr_sectors; | 824 | sector_t esector, nr_sectors; |
1011 | unsigned int enr, count; | 825 | unsigned int enr, count = 0; |
1012 | struct lc_element *e; | 826 | struct lc_element *e; |
1013 | 827 | ||
1014 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 828 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1015 | dev_err(DEV, "sector: %llus, size: %d\n", | 829 | dev_err(DEV, "sector: %llus, size: %d\n", |
1016 | (unsigned long long)sector, size); | 830 | (unsigned long long)sector, size); |
1017 | return; | 831 | return 0; |
1018 | } | 832 | } |
1019 | 833 | ||
1020 | if (!get_ldev(mdev)) | 834 | if (!get_ldev(mdev)) |
1021 | return; /* no disk, no metadata, no bitmap to set bits in */ | 835 | return 0; /* no disk, no metadata, no bitmap to set bits in */ |
1022 | 836 | ||
1023 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | 837 | nr_sectors = drbd_get_capacity(mdev->this_bdev); |
1024 | esector = sector + (size >> 9) - 1; | 838 | esector = sector + (size >> 9) - 1; |
@@ -1048,6 +862,8 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, | |||
1048 | 862 | ||
1049 | out: | 863 | out: |
1050 | put_ldev(mdev); | 864 | put_ldev(mdev); |
865 | |||
866 | return count; | ||
1051 | } | 867 | } |
1052 | 868 | ||
1053 | static | 869 | static |
@@ -1128,7 +944,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | |||
1128 | unsigned int enr = BM_SECT_TO_EXT(sector); | 944 | unsigned int enr = BM_SECT_TO_EXT(sector); |
1129 | struct bm_extent *bm_ext; | 945 | struct bm_extent *bm_ext; |
1130 | int i, sig; | 946 | int i, sig; |
947 | int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait. | ||
948 | 200 times -> 20 seconds. */ | ||
1131 | 949 | ||
950 | retry: | ||
1132 | sig = wait_event_interruptible(mdev->al_wait, | 951 | sig = wait_event_interruptible(mdev->al_wait, |
1133 | (bm_ext = _bme_get(mdev, enr))); | 952 | (bm_ext = _bme_get(mdev, enr))); |
1134 | if (sig) | 953 | if (sig) |
@@ -1139,16 +958,25 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | |||
1139 | 958 | ||
1140 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | 959 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { |
1141 | sig = wait_event_interruptible(mdev->al_wait, | 960 | sig = wait_event_interruptible(mdev->al_wait, |
1142 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); | 961 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) || |
1143 | if (sig) { | 962 | test_bit(BME_PRIORITY, &bm_ext->flags)); |
963 | |||
964 | if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { | ||
1144 | spin_lock_irq(&mdev->al_lock); | 965 | spin_lock_irq(&mdev->al_lock); |
1145 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | 966 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { |
1146 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | 967 | bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ |
1147 | mdev->resync_locked--; | 968 | mdev->resync_locked--; |
1148 | wake_up(&mdev->al_wait); | 969 | wake_up(&mdev->al_wait); |
1149 | } | 970 | } |
1150 | spin_unlock_irq(&mdev->al_lock); | 971 | spin_unlock_irq(&mdev->al_lock); |
1151 | return -EINTR; | 972 | if (sig) |
973 | return -EINTR; | ||
974 | if (schedule_timeout_interruptible(HZ/10)) | ||
975 | return -EINTR; | ||
976 | if (sa && --sa == 0) | ||
977 | dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec." | ||
978 | "Resync stalled?\n"); | ||
979 | goto retry; | ||
1152 | } | 980 | } |
1153 | } | 981 | } |
1154 | set_bit(BME_LOCKED, &bm_ext->flags); | 982 | set_bit(BME_LOCKED, &bm_ext->flags); |
@@ -1291,8 +1119,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) | |||
1291 | } | 1119 | } |
1292 | 1120 | ||
1293 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | 1121 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { |
1294 | clear_bit(BME_LOCKED, &bm_ext->flags); | 1122 | bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ |
1295 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1296 | mdev->resync_locked--; | 1123 | mdev->resync_locked--; |
1297 | wake_up(&mdev->al_wait); | 1124 | wake_up(&mdev->al_wait); |
1298 | } | 1125 | } |
@@ -1383,7 +1210,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | |||
1383 | sector_t esector, nr_sectors; | 1210 | sector_t esector, nr_sectors; |
1384 | int wake_up = 0; | 1211 | int wake_up = 0; |
1385 | 1212 | ||
1386 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 1213 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1387 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", | 1214 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", |
1388 | (unsigned long long)sector, size); | 1215 | (unsigned long long)sector, size); |
1389 | return; | 1216 | return; |
@@ -1420,7 +1247,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | |||
1420 | mdev->rs_failed += count; | 1247 | mdev->rs_failed += count; |
1421 | 1248 | ||
1422 | if (get_ldev(mdev)) { | 1249 | if (get_ldev(mdev)) { |
1423 | drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); | 1250 | drbd_try_clear_on_disk_bm(mdev, sector, count, false); |
1424 | put_ldev(mdev); | 1251 | put_ldev(mdev); |
1425 | } | 1252 | } |
1426 | 1253 | ||
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 0645ca829a94..76210ba401ac 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -28,18 +28,56 @@ | |||
28 | #include <linux/drbd.h> | 28 | #include <linux/drbd.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <asm/kmap_types.h> | 30 | #include <asm/kmap_types.h> |
31 | |||
31 | #include "drbd_int.h" | 32 | #include "drbd_int.h" |
32 | 33 | ||
34 | |||
33 | /* OPAQUE outside this file! | 35 | /* OPAQUE outside this file! |
34 | * interface defined in drbd_int.h | 36 | * interface defined in drbd_int.h |
35 | 37 | ||
36 | * convention: | 38 | * convention: |
37 | * function name drbd_bm_... => used elsewhere, "public". | 39 | * function name drbd_bm_... => used elsewhere, "public". |
38 | * function name bm_... => internal to implementation, "private". | 40 | * function name bm_... => internal to implementation, "private". |
41 | */ | ||
39 | 42 | ||
40 | * Note that since find_first_bit returns int, at the current granularity of | 43 | |
41 | * the bitmap (4KB per byte), this implementation "only" supports up to | 44 | /* |
42 | * 1<<(32+12) == 16 TB... | 45 | * LIMITATIONS: |
46 | * We want to support >= peta byte of backend storage, while for now still using | ||
47 | * a granularity of one bit per 4KiB of storage. | ||
48 | * 1 << 50 bytes backend storage (1 PiB) | ||
49 | * 1 << (50 - 12) bits needed | ||
50 | * 38 --> we need u64 to index and count bits | ||
51 | * 1 << (38 - 3) bitmap bytes needed | ||
52 | * 35 --> we still need u64 to index and count bytes | ||
53 | * (that's 32 GiB of bitmap for 1 PiB storage) | ||
54 | * 1 << (35 - 2) 32bit longs needed | ||
55 | * 33 --> we'd even need u64 to index and count 32bit long words. | ||
56 | * 1 << (35 - 3) 64bit longs needed | ||
57 | * 32 --> we could get away with a 32bit unsigned int to index and count | ||
58 | * 64bit long words, but I rather stay with unsigned long for now. | ||
59 | * We probably should neither count nor point to bytes or long words | ||
60 | * directly, but either by bitnumber, or by page index and offset. | ||
61 | * 1 << (35 - 12) | ||
62 | * 22 --> we need that much 4KiB pages of bitmap. | ||
63 | * 1 << (22 + 3) --> on a 64bit arch, | ||
64 | * we need 32 MiB to store the array of page pointers. | ||
65 | * | ||
66 | * Because I'm lazy, and because the resulting patch was too large, too ugly | ||
67 | * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), | ||
68 | * (1 << 32) bits * 4k storage. | ||
69 | * | ||
70 | |||
71 | * bitmap storage and IO: | ||
72 | * Bitmap is stored little endian on disk, and is kept little endian in | ||
73 | * core memory. Currently we still hold the full bitmap in core as long | ||
74 | * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage | ||
75 | * seems excessive. | ||
76 | * | ||
77 | * We plan to reduce the amount of in-core bitmap pages by pageing them in | ||
78 | * and out against their on-disk location as necessary, but need to make | ||
79 | * sure we don't cause too much meta data IO, and must not deadlock in | ||
80 | * tight memory situations. This needs some more work. | ||
43 | */ | 81 | */ |
44 | 82 | ||
45 | /* | 83 | /* |
@@ -55,13 +93,9 @@ | |||
55 | struct drbd_bitmap { | 93 | struct drbd_bitmap { |
56 | struct page **bm_pages; | 94 | struct page **bm_pages; |
57 | spinlock_t bm_lock; | 95 | spinlock_t bm_lock; |
58 | /* WARNING unsigned long bm_*: | 96 | |
59 | * 32bit number of bit offset is just enough for 512 MB bitmap. | 97 | /* see LIMITATIONS: above */ |
60 | * it will blow up if we make the bitmap bigger... | 98 | |
61 | * not that it makes much sense to have a bitmap that large, | ||
62 | * rather change the granularity to 16k or 64k or something. | ||
63 | * (that implies other problems, however...) | ||
64 | */ | ||
65 | unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ | 99 | unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ |
66 | unsigned long bm_bits; | 100 | unsigned long bm_bits; |
67 | size_t bm_words; | 101 | size_t bm_words; |
@@ -69,29 +103,18 @@ struct drbd_bitmap { | |||
69 | sector_t bm_dev_capacity; | 103 | sector_t bm_dev_capacity; |
70 | struct mutex bm_change; /* serializes resize operations */ | 104 | struct mutex bm_change; /* serializes resize operations */ |
71 | 105 | ||
72 | atomic_t bm_async_io; | 106 | wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ |
73 | wait_queue_head_t bm_io_wait; | ||
74 | 107 | ||
75 | unsigned long bm_flags; | 108 | enum bm_flag bm_flags; |
76 | 109 | ||
77 | /* debugging aid, in case we are still racy somewhere */ | 110 | /* debugging aid, in case we are still racy somewhere */ |
78 | char *bm_why; | 111 | char *bm_why; |
79 | struct task_struct *bm_task; | 112 | struct task_struct *bm_task; |
80 | }; | 113 | }; |
81 | 114 | ||
82 | /* definition of bits in bm_flags */ | ||
83 | #define BM_LOCKED 0 | ||
84 | #define BM_MD_IO_ERROR 1 | ||
85 | #define BM_P_VMALLOCED 2 | ||
86 | |||
87 | static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | 115 | static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, |
88 | unsigned long e, int val, const enum km_type km); | 116 | unsigned long e, int val, const enum km_type km); |
89 | 117 | ||
90 | static int bm_is_locked(struct drbd_bitmap *b) | ||
91 | { | ||
92 | return test_bit(BM_LOCKED, &b->bm_flags); | ||
93 | } | ||
94 | |||
95 | #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) | 118 | #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) |
96 | static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) | 119 | static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) |
97 | { | 120 | { |
@@ -108,7 +131,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) | |||
108 | b->bm_task == mdev->worker.task ? "worker" : "?"); | 131 | b->bm_task == mdev->worker.task ? "worker" : "?"); |
109 | } | 132 | } |
110 | 133 | ||
111 | void drbd_bm_lock(struct drbd_conf *mdev, char *why) | 134 | void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) |
112 | { | 135 | { |
113 | struct drbd_bitmap *b = mdev->bitmap; | 136 | struct drbd_bitmap *b = mdev->bitmap; |
114 | int trylock_failed; | 137 | int trylock_failed; |
@@ -131,8 +154,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) | |||
131 | b->bm_task == mdev->worker.task ? "worker" : "?"); | 154 | b->bm_task == mdev->worker.task ? "worker" : "?"); |
132 | mutex_lock(&b->bm_change); | 155 | mutex_lock(&b->bm_change); |
133 | } | 156 | } |
134 | if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) | 157 | if (BM_LOCKED_MASK & b->bm_flags) |
135 | dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); | 158 | dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); |
159 | b->bm_flags |= flags & BM_LOCKED_MASK; | ||
136 | 160 | ||
137 | b->bm_why = why; | 161 | b->bm_why = why; |
138 | b->bm_task = current; | 162 | b->bm_task = current; |
@@ -146,31 +170,137 @@ void drbd_bm_unlock(struct drbd_conf *mdev) | |||
146 | return; | 170 | return; |
147 | } | 171 | } |
148 | 172 | ||
149 | if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) | 173 | if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) |
150 | dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); | 174 | dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); |
151 | 175 | ||
176 | b->bm_flags &= ~BM_LOCKED_MASK; | ||
152 | b->bm_why = NULL; | 177 | b->bm_why = NULL; |
153 | b->bm_task = NULL; | 178 | b->bm_task = NULL; |
154 | mutex_unlock(&b->bm_change); | 179 | mutex_unlock(&b->bm_change); |
155 | } | 180 | } |
156 | 181 | ||
157 | /* word offset to long pointer */ | 182 | /* we store some "meta" info about our pages in page->private */ |
158 | static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) | 183 | /* at a granularity of 4k storage per bitmap bit: |
184 | * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks | ||
185 | * 1<<38 bits, | ||
186 | * 1<<23 4k bitmap pages. | ||
187 | * Use 24 bits as page index, covers 2 peta byte storage | ||
188 | * at a granularity of 4k per bit. | ||
189 | * Used to report the failed page idx on io error from the endio handlers. | ||
190 | */ | ||
191 | #define BM_PAGE_IDX_MASK ((1UL<<24)-1) | ||
192 | /* this page is currently read in, or written back */ | ||
193 | #define BM_PAGE_IO_LOCK 31 | ||
194 | /* if there has been an IO error for this page */ | ||
195 | #define BM_PAGE_IO_ERROR 30 | ||
196 | /* this is to be able to intelligently skip disk IO, | ||
197 | * set if bits have been set since last IO. */ | ||
198 | #define BM_PAGE_NEED_WRITEOUT 29 | ||
199 | /* to mark for lazy writeout once syncer cleared all clearable bits, | ||
200 | * we if bits have been cleared since last IO. */ | ||
201 | #define BM_PAGE_LAZY_WRITEOUT 28 | ||
202 | |||
203 | /* store_page_idx uses non-atomic assingment. It is only used directly after | ||
204 | * allocating the page. All other bm_set_page_* and bm_clear_page_* need to | ||
205 | * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap | ||
206 | * changes) may happen from various contexts, and wait_on_bit/wake_up_bit | ||
207 | * requires it all to be atomic as well. */ | ||
208 | static void bm_store_page_idx(struct page *page, unsigned long idx) | ||
209 | { | ||
210 | BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); | ||
211 | page_private(page) |= idx; | ||
212 | } | ||
213 | |||
214 | static unsigned long bm_page_to_idx(struct page *page) | ||
159 | { | 215 | { |
160 | struct page *page; | 216 | return page_private(page) & BM_PAGE_IDX_MASK; |
161 | unsigned long page_nr; | 217 | } |
218 | |||
219 | /* As is very unlikely that the same page is under IO from more than one | ||
220 | * context, we can get away with a bit per page and one wait queue per bitmap. | ||
221 | */ | ||
222 | static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) | ||
223 | { | ||
224 | struct drbd_bitmap *b = mdev->bitmap; | ||
225 | void *addr = &page_private(b->bm_pages[page_nr]); | ||
226 | wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); | ||
227 | } | ||
228 | |||
229 | static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) | ||
230 | { | ||
231 | struct drbd_bitmap *b = mdev->bitmap; | ||
232 | void *addr = &page_private(b->bm_pages[page_nr]); | ||
233 | clear_bit(BM_PAGE_IO_LOCK, addr); | ||
234 | smp_mb__after_clear_bit(); | ||
235 | wake_up(&mdev->bitmap->bm_io_wait); | ||
236 | } | ||
237 | |||
238 | /* set _before_ submit_io, so it may be reset due to being changed | ||
239 | * while this page is in flight... will get submitted later again */ | ||
240 | static void bm_set_page_unchanged(struct page *page) | ||
241 | { | ||
242 | /* use cmpxchg? */ | ||
243 | clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); | ||
244 | clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); | ||
245 | } | ||
162 | 246 | ||
247 | static void bm_set_page_need_writeout(struct page *page) | ||
248 | { | ||
249 | set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); | ||
250 | } | ||
251 | |||
252 | static int bm_test_page_unchanged(struct page *page) | ||
253 | { | ||
254 | volatile const unsigned long *addr = &page_private(page); | ||
255 | return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; | ||
256 | } | ||
257 | |||
258 | static void bm_set_page_io_err(struct page *page) | ||
259 | { | ||
260 | set_bit(BM_PAGE_IO_ERROR, &page_private(page)); | ||
261 | } | ||
262 | |||
263 | static void bm_clear_page_io_err(struct page *page) | ||
264 | { | ||
265 | clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); | ||
266 | } | ||
267 | |||
268 | static void bm_set_page_lazy_writeout(struct page *page) | ||
269 | { | ||
270 | set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); | ||
271 | } | ||
272 | |||
273 | static int bm_test_page_lazy_writeout(struct page *page) | ||
274 | { | ||
275 | return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); | ||
276 | } | ||
277 | |||
278 | /* on a 32bit box, this would allow for exactly (2<<38) bits. */ | ||
279 | static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) | ||
280 | { | ||
163 | /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ | 281 | /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ |
164 | page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); | 282 | unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); |
165 | BUG_ON(page_nr >= b->bm_number_of_pages); | 283 | BUG_ON(page_nr >= b->bm_number_of_pages); |
166 | page = b->bm_pages[page_nr]; | 284 | return page_nr; |
285 | } | ||
167 | 286 | ||
287 | static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) | ||
288 | { | ||
289 | /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ | ||
290 | unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); | ||
291 | BUG_ON(page_nr >= b->bm_number_of_pages); | ||
292 | return page_nr; | ||
293 | } | ||
294 | |||
295 | static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) | ||
296 | { | ||
297 | struct page *page = b->bm_pages[idx]; | ||
168 | return (unsigned long *) kmap_atomic(page, km); | 298 | return (unsigned long *) kmap_atomic(page, km); |
169 | } | 299 | } |
170 | 300 | ||
171 | static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) | 301 | static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) |
172 | { | 302 | { |
173 | return __bm_map_paddr(b, offset, KM_IRQ1); | 303 | return __bm_map_pidx(b, idx, KM_IRQ1); |
174 | } | 304 | } |
175 | 305 | ||
176 | static void __bm_unmap(unsigned long *p_addr, const enum km_type km) | 306 | static void __bm_unmap(unsigned long *p_addr, const enum km_type km) |
@@ -202,6 +332,7 @@ static void bm_unmap(unsigned long *p_addr) | |||
202 | * to be able to report device specific. | 332 | * to be able to report device specific. |
203 | */ | 333 | */ |
204 | 334 | ||
335 | |||
205 | static void bm_free_pages(struct page **pages, unsigned long number) | 336 | static void bm_free_pages(struct page **pages, unsigned long number) |
206 | { | 337 | { |
207 | unsigned long i; | 338 | unsigned long i; |
@@ -269,6 +400,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) | |||
269 | bm_vk_free(new_pages, vmalloced); | 400 | bm_vk_free(new_pages, vmalloced); |
270 | return NULL; | 401 | return NULL; |
271 | } | 402 | } |
403 | /* we want to know which page it is | ||
404 | * from the endio handlers */ | ||
405 | bm_store_page_idx(page, i); | ||
272 | new_pages[i] = page; | 406 | new_pages[i] = page; |
273 | } | 407 | } |
274 | } else { | 408 | } else { |
@@ -280,9 +414,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) | |||
280 | } | 414 | } |
281 | 415 | ||
282 | if (vmalloced) | 416 | if (vmalloced) |
283 | set_bit(BM_P_VMALLOCED, &b->bm_flags); | 417 | b->bm_flags |= BM_P_VMALLOCED; |
284 | else | 418 | else |
285 | clear_bit(BM_P_VMALLOCED, &b->bm_flags); | 419 | b->bm_flags &= ~BM_P_VMALLOCED; |
286 | 420 | ||
287 | return new_pages; | 421 | return new_pages; |
288 | } | 422 | } |
@@ -319,7 +453,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev) | |||
319 | { | 453 | { |
320 | ERR_IF (!mdev->bitmap) return; | 454 | ERR_IF (!mdev->bitmap) return; |
321 | bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); | 455 | bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); |
322 | bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); | 456 | bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); |
323 | kfree(mdev->bitmap); | 457 | kfree(mdev->bitmap); |
324 | mdev->bitmap = NULL; | 458 | mdev->bitmap = NULL; |
325 | } | 459 | } |
@@ -329,22 +463,39 @@ void drbd_bm_cleanup(struct drbd_conf *mdev) | |||
329 | * this masks out the remaining bits. | 463 | * this masks out the remaining bits. |
330 | * Returns the number of bits cleared. | 464 | * Returns the number of bits cleared. |
331 | */ | 465 | */ |
466 | #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) | ||
467 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) | ||
468 | #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) | ||
332 | static int bm_clear_surplus(struct drbd_bitmap *b) | 469 | static int bm_clear_surplus(struct drbd_bitmap *b) |
333 | { | 470 | { |
334 | const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; | 471 | unsigned long mask; |
335 | size_t w = b->bm_bits >> LN2_BPL; | ||
336 | int cleared = 0; | ||
337 | unsigned long *p_addr, *bm; | 472 | unsigned long *p_addr, *bm; |
473 | int tmp; | ||
474 | int cleared = 0; | ||
338 | 475 | ||
339 | p_addr = bm_map_paddr(b, w); | 476 | /* number of bits modulo bits per page */ |
340 | bm = p_addr + MLPP(w); | 477 | tmp = (b->bm_bits & BITS_PER_PAGE_MASK); |
341 | if (w < b->bm_words) { | 478 | /* mask the used bits of the word containing the last bit */ |
479 | mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; | ||
480 | /* bitmap is always stored little endian, | ||
481 | * on disk and in core memory alike */ | ||
482 | mask = cpu_to_lel(mask); | ||
483 | |||
484 | p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); | ||
485 | bm = p_addr + (tmp/BITS_PER_LONG); | ||
486 | if (mask) { | ||
487 | /* If mask != 0, we are not exactly aligned, so bm now points | ||
488 | * to the long containing the last bit. | ||
489 | * If mask == 0, bm already points to the word immediately | ||
490 | * after the last (long word aligned) bit. */ | ||
342 | cleared = hweight_long(*bm & ~mask); | 491 | cleared = hweight_long(*bm & ~mask); |
343 | *bm &= mask; | 492 | *bm &= mask; |
344 | w++; bm++; | 493 | bm++; |
345 | } | 494 | } |
346 | 495 | ||
347 | if (w < b->bm_words) { | 496 | if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { |
497 | /* on a 32bit arch, we may need to zero out | ||
498 | * a padding long to align with a 64bit remote */ | ||
348 | cleared += hweight_long(*bm); | 499 | cleared += hweight_long(*bm); |
349 | *bm = 0; | 500 | *bm = 0; |
350 | } | 501 | } |
@@ -354,66 +505,75 @@ static int bm_clear_surplus(struct drbd_bitmap *b) | |||
354 | 505 | ||
355 | static void bm_set_surplus(struct drbd_bitmap *b) | 506 | static void bm_set_surplus(struct drbd_bitmap *b) |
356 | { | 507 | { |
357 | const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; | 508 | unsigned long mask; |
358 | size_t w = b->bm_bits >> LN2_BPL; | ||
359 | unsigned long *p_addr, *bm; | 509 | unsigned long *p_addr, *bm; |
360 | 510 | int tmp; | |
361 | p_addr = bm_map_paddr(b, w); | 511 | |
362 | bm = p_addr + MLPP(w); | 512 | /* number of bits modulo bits per page */ |
363 | if (w < b->bm_words) { | 513 | tmp = (b->bm_bits & BITS_PER_PAGE_MASK); |
514 | /* mask the used bits of the word containing the last bit */ | ||
515 | mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; | ||
516 | /* bitmap is always stored little endian, | ||
517 | * on disk and in core memory alike */ | ||
518 | mask = cpu_to_lel(mask); | ||
519 | |||
520 | p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); | ||
521 | bm = p_addr + (tmp/BITS_PER_LONG); | ||
522 | if (mask) { | ||
523 | /* If mask != 0, we are not exactly aligned, so bm now points | ||
524 | * to the long containing the last bit. | ||
525 | * If mask == 0, bm already points to the word immediately | ||
526 | * after the last (long word aligned) bit. */ | ||
364 | *bm |= ~mask; | 527 | *bm |= ~mask; |
365 | bm++; w++; | 528 | bm++; |
366 | } | 529 | } |
367 | 530 | ||
368 | if (w < b->bm_words) { | 531 | if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { |
369 | *bm = ~(0UL); | 532 | /* on a 32bit arch, we may need to zero out |
533 | * a padding long to align with a 64bit remote */ | ||
534 | *bm = ~0UL; | ||
370 | } | 535 | } |
371 | bm_unmap(p_addr); | 536 | bm_unmap(p_addr); |
372 | } | 537 | } |
373 | 538 | ||
374 | static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian) | 539 | /* you better not modify the bitmap while this is running, |
540 | * or its results will be stale */ | ||
541 | static unsigned long bm_count_bits(struct drbd_bitmap *b) | ||
375 | { | 542 | { |
376 | unsigned long *p_addr, *bm, offset = 0; | 543 | unsigned long *p_addr; |
377 | unsigned long bits = 0; | 544 | unsigned long bits = 0; |
378 | unsigned long i, do_now; | 545 | unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; |
379 | 546 | int idx, i, last_word; | |
380 | while (offset < b->bm_words) { | 547 | |
381 | i = do_now = min_t(size_t, b->bm_words-offset, LWPP); | 548 | /* all but last page */ |
382 | p_addr = __bm_map_paddr(b, offset, KM_USER0); | 549 | for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { |
383 | bm = p_addr + MLPP(offset); | 550 | p_addr = __bm_map_pidx(b, idx, KM_USER0); |
384 | while (i--) { | 551 | for (i = 0; i < LWPP; i++) |
385 | #ifndef __LITTLE_ENDIAN | 552 | bits += hweight_long(p_addr[i]); |
386 | if (swap_endian) | ||
387 | *bm = lel_to_cpu(*bm); | ||
388 | #endif | ||
389 | bits += hweight_long(*bm++); | ||
390 | } | ||
391 | __bm_unmap(p_addr, KM_USER0); | 553 | __bm_unmap(p_addr, KM_USER0); |
392 | offset += do_now; | ||
393 | cond_resched(); | 554 | cond_resched(); |
394 | } | 555 | } |
395 | 556 | /* last (or only) page */ | |
557 | last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; | ||
558 | p_addr = __bm_map_pidx(b, idx, KM_USER0); | ||
559 | for (i = 0; i < last_word; i++) | ||
560 | bits += hweight_long(p_addr[i]); | ||
561 | p_addr[last_word] &= cpu_to_lel(mask); | ||
562 | bits += hweight_long(p_addr[last_word]); | ||
563 | /* 32bit arch, may have an unused padding long */ | ||
564 | if (BITS_PER_LONG == 32 && (last_word & 1) == 0) | ||
565 | p_addr[last_word+1] = 0; | ||
566 | __bm_unmap(p_addr, KM_USER0); | ||
396 | return bits; | 567 | return bits; |
397 | } | 568 | } |
398 | 569 | ||
399 | static unsigned long bm_count_bits(struct drbd_bitmap *b) | ||
400 | { | ||
401 | return __bm_count_bits(b, 0); | ||
402 | } | ||
403 | |||
404 | static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b) | ||
405 | { | ||
406 | return __bm_count_bits(b, 1); | ||
407 | } | ||
408 | |||
409 | /* offset and len in long words.*/ | 570 | /* offset and len in long words.*/ |
410 | static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | 571 | static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) |
411 | { | 572 | { |
412 | unsigned long *p_addr, *bm; | 573 | unsigned long *p_addr, *bm; |
574 | unsigned int idx; | ||
413 | size_t do_now, end; | 575 | size_t do_now, end; |
414 | 576 | ||
415 | #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) | ||
416 | |||
417 | end = offset + len; | 577 | end = offset + len; |
418 | 578 | ||
419 | if (end > b->bm_words) { | 579 | if (end > b->bm_words) { |
@@ -423,15 +583,16 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | |||
423 | 583 | ||
424 | while (offset < end) { | 584 | while (offset < end) { |
425 | do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; | 585 | do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; |
426 | p_addr = bm_map_paddr(b, offset); | 586 | idx = bm_word_to_page_idx(b, offset); |
587 | p_addr = bm_map_pidx(b, idx); | ||
427 | bm = p_addr + MLPP(offset); | 588 | bm = p_addr + MLPP(offset); |
428 | if (bm+do_now > p_addr + LWPP) { | 589 | if (bm+do_now > p_addr + LWPP) { |
429 | printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", | 590 | printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", |
430 | p_addr, bm, (int)do_now); | 591 | p_addr, bm, (int)do_now); |
431 | break; /* breaks to after catch_oob_access_end() only! */ | 592 | } else |
432 | } | 593 | memset(bm, c, do_now * sizeof(long)); |
433 | memset(bm, c, do_now * sizeof(long)); | ||
434 | bm_unmap(p_addr); | 594 | bm_unmap(p_addr); |
595 | bm_set_page_need_writeout(b->bm_pages[idx]); | ||
435 | offset += do_now; | 596 | offset += do_now; |
436 | } | 597 | } |
437 | } | 598 | } |
@@ -447,7 +608,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) | |||
447 | int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | 608 | int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) |
448 | { | 609 | { |
449 | struct drbd_bitmap *b = mdev->bitmap; | 610 | struct drbd_bitmap *b = mdev->bitmap; |
450 | unsigned long bits, words, owords, obits, *p_addr, *bm; | 611 | unsigned long bits, words, owords, obits; |
451 | unsigned long want, have, onpages; /* number of pages */ | 612 | unsigned long want, have, onpages; /* number of pages */ |
452 | struct page **npages, **opages = NULL; | 613 | struct page **npages, **opages = NULL; |
453 | int err = 0, growing; | 614 | int err = 0, growing; |
@@ -455,7 +616,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
455 | 616 | ||
456 | ERR_IF(!b) return -ENOMEM; | 617 | ERR_IF(!b) return -ENOMEM; |
457 | 618 | ||
458 | drbd_bm_lock(mdev, "resize"); | 619 | drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); |
459 | 620 | ||
460 | dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", | 621 | dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", |
461 | (unsigned long long)capacity); | 622 | (unsigned long long)capacity); |
@@ -463,7 +624,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
463 | if (capacity == b->bm_dev_capacity) | 624 | if (capacity == b->bm_dev_capacity) |
464 | goto out; | 625 | goto out; |
465 | 626 | ||
466 | opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); | 627 | opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); |
467 | 628 | ||
468 | if (capacity == 0) { | 629 | if (capacity == 0) { |
469 | spin_lock_irq(&b->bm_lock); | 630 | spin_lock_irq(&b->bm_lock); |
@@ -491,18 +652,23 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
491 | words = ALIGN(bits, 64) >> LN2_BPL; | 652 | words = ALIGN(bits, 64) >> LN2_BPL; |
492 | 653 | ||
493 | if (get_ldev(mdev)) { | 654 | if (get_ldev(mdev)) { |
494 | D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12)); | 655 | u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; |
495 | put_ldev(mdev); | 656 | put_ldev(mdev); |
657 | if (bits > bits_on_disk) { | ||
658 | dev_info(DEV, "bits = %lu\n", bits); | ||
659 | dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); | ||
660 | err = -ENOSPC; | ||
661 | goto out; | ||
662 | } | ||
496 | } | 663 | } |
497 | 664 | ||
498 | /* one extra long to catch off by one errors */ | 665 | want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; |
499 | want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; | ||
500 | have = b->bm_number_of_pages; | 666 | have = b->bm_number_of_pages; |
501 | if (want == have) { | 667 | if (want == have) { |
502 | D_ASSERT(b->bm_pages != NULL); | 668 | D_ASSERT(b->bm_pages != NULL); |
503 | npages = b->bm_pages; | 669 | npages = b->bm_pages; |
504 | } else { | 670 | } else { |
505 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC)) | 671 | if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) |
506 | npages = NULL; | 672 | npages = NULL; |
507 | else | 673 | else |
508 | npages = bm_realloc_pages(b, want); | 674 | npages = bm_realloc_pages(b, want); |
@@ -542,11 +708,6 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
542 | bm_free_pages(opages + want, have - want); | 708 | bm_free_pages(opages + want, have - want); |
543 | } | 709 | } |
544 | 710 | ||
545 | p_addr = bm_map_paddr(b, words); | ||
546 | bm = p_addr + MLPP(words); | ||
547 | *bm = DRBD_MAGIC; | ||
548 | bm_unmap(p_addr); | ||
549 | |||
550 | (void)bm_clear_surplus(b); | 711 | (void)bm_clear_surplus(b); |
551 | 712 | ||
552 | spin_unlock_irq(&b->bm_lock); | 713 | spin_unlock_irq(&b->bm_lock); |
@@ -554,7 +715,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) | |||
554 | bm_vk_free(opages, opages_vmalloced); | 715 | bm_vk_free(opages, opages_vmalloced); |
555 | if (!growing) | 716 | if (!growing) |
556 | b->bm_set = bm_count_bits(b); | 717 | b->bm_set = bm_count_bits(b); |
557 | dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words); | 718 | dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); |
558 | 719 | ||
559 | out: | 720 | out: |
560 | drbd_bm_unlock(mdev); | 721 | drbd_bm_unlock(mdev); |
@@ -624,6 +785,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
624 | struct drbd_bitmap *b = mdev->bitmap; | 785 | struct drbd_bitmap *b = mdev->bitmap; |
625 | unsigned long *p_addr, *bm; | 786 | unsigned long *p_addr, *bm; |
626 | unsigned long word, bits; | 787 | unsigned long word, bits; |
788 | unsigned int idx; | ||
627 | size_t end, do_now; | 789 | size_t end, do_now; |
628 | 790 | ||
629 | end = offset + number; | 791 | end = offset + number; |
@@ -638,16 +800,18 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
638 | spin_lock_irq(&b->bm_lock); | 800 | spin_lock_irq(&b->bm_lock); |
639 | while (offset < end) { | 801 | while (offset < end) { |
640 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; | 802 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; |
641 | p_addr = bm_map_paddr(b, offset); | 803 | idx = bm_word_to_page_idx(b, offset); |
804 | p_addr = bm_map_pidx(b, idx); | ||
642 | bm = p_addr + MLPP(offset); | 805 | bm = p_addr + MLPP(offset); |
643 | offset += do_now; | 806 | offset += do_now; |
644 | while (do_now--) { | 807 | while (do_now--) { |
645 | bits = hweight_long(*bm); | 808 | bits = hweight_long(*bm); |
646 | word = *bm | lel_to_cpu(*buffer++); | 809 | word = *bm | *buffer++; |
647 | *bm++ = word; | 810 | *bm++ = word; |
648 | b->bm_set += hweight_long(word) - bits; | 811 | b->bm_set += hweight_long(word) - bits; |
649 | } | 812 | } |
650 | bm_unmap(p_addr); | 813 | bm_unmap(p_addr); |
814 | bm_set_page_need_writeout(b->bm_pages[idx]); | ||
651 | } | 815 | } |
652 | /* with 32bit <-> 64bit cross-platform connect | 816 | /* with 32bit <-> 64bit cross-platform connect |
653 | * this is only correct for current usage, | 817 | * this is only correct for current usage, |
@@ -656,7 +820,6 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
656 | */ | 820 | */ |
657 | if (end == b->bm_words) | 821 | if (end == b->bm_words) |
658 | b->bm_set -= bm_clear_surplus(b); | 822 | b->bm_set -= bm_clear_surplus(b); |
659 | |||
660 | spin_unlock_irq(&b->bm_lock); | 823 | spin_unlock_irq(&b->bm_lock); |
661 | } | 824 | } |
662 | 825 | ||
@@ -686,11 +849,11 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, | |||
686 | else { | 849 | else { |
687 | while (offset < end) { | 850 | while (offset < end) { |
688 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; | 851 | do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; |
689 | p_addr = bm_map_paddr(b, offset); | 852 | p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); |
690 | bm = p_addr + MLPP(offset); | 853 | bm = p_addr + MLPP(offset); |
691 | offset += do_now; | 854 | offset += do_now; |
692 | while (do_now--) | 855 | while (do_now--) |
693 | *buffer++ = cpu_to_lel(*bm++); | 856 | *buffer++ = *bm++; |
694 | bm_unmap(p_addr); | 857 | bm_unmap(p_addr); |
695 | } | 858 | } |
696 | } | 859 | } |
@@ -724,9 +887,22 @@ void drbd_bm_clear_all(struct drbd_conf *mdev) | |||
724 | spin_unlock_irq(&b->bm_lock); | 887 | spin_unlock_irq(&b->bm_lock); |
725 | } | 888 | } |
726 | 889 | ||
890 | struct bm_aio_ctx { | ||
891 | struct drbd_conf *mdev; | ||
892 | atomic_t in_flight; | ||
893 | struct completion done; | ||
894 | unsigned flags; | ||
895 | #define BM_AIO_COPY_PAGES 1 | ||
896 | int error; | ||
897 | }; | ||
898 | |||
899 | /* bv_page may be a copy, or may be the original */ | ||
727 | static void bm_async_io_complete(struct bio *bio, int error) | 900 | static void bm_async_io_complete(struct bio *bio, int error) |
728 | { | 901 | { |
729 | struct drbd_bitmap *b = bio->bi_private; | 902 | struct bm_aio_ctx *ctx = bio->bi_private; |
903 | struct drbd_conf *mdev = ctx->mdev; | ||
904 | struct drbd_bitmap *b = mdev->bitmap; | ||
905 | unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); | ||
730 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | 906 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
731 | 907 | ||
732 | 908 | ||
@@ -737,38 +913,83 @@ static void bm_async_io_complete(struct bio *bio, int error) | |||
737 | if (!error && !uptodate) | 913 | if (!error && !uptodate) |
738 | error = -EIO; | 914 | error = -EIO; |
739 | 915 | ||
916 | if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && | ||
917 | !bm_test_page_unchanged(b->bm_pages[idx])) | ||
918 | dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); | ||
919 | |||
740 | if (error) { | 920 | if (error) { |
741 | /* doh. what now? | 921 | /* ctx error will hold the completed-last non-zero error code, |
742 | * for now, set all bits, and flag MD_IO_ERROR */ | 922 | * in case error codes differ. */ |
743 | __set_bit(BM_MD_IO_ERROR, &b->bm_flags); | 923 | ctx->error = error; |
924 | bm_set_page_io_err(b->bm_pages[idx]); | ||
925 | /* Not identical to on disk version of it. | ||
926 | * Is BM_PAGE_IO_ERROR enough? */ | ||
927 | if (__ratelimit(&drbd_ratelimit_state)) | ||
928 | dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", | ||
929 | error, idx); | ||
930 | } else { | ||
931 | bm_clear_page_io_err(b->bm_pages[idx]); | ||
932 | dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); | ||
744 | } | 933 | } |
745 | if (atomic_dec_and_test(&b->bm_async_io)) | 934 | |
746 | wake_up(&b->bm_io_wait); | 935 | bm_page_unlock_io(mdev, idx); |
936 | |||
937 | /* FIXME give back to page pool */ | ||
938 | if (ctx->flags & BM_AIO_COPY_PAGES) | ||
939 | put_page(bio->bi_io_vec[0].bv_page); | ||
747 | 940 | ||
748 | bio_put(bio); | 941 | bio_put(bio); |
942 | |||
943 | if (atomic_dec_and_test(&ctx->in_flight)) | ||
944 | complete(&ctx->done); | ||
749 | } | 945 | } |
750 | 946 | ||
751 | static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local) | 947 | static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) |
752 | { | 948 | { |
753 | /* we are process context. we always get a bio */ | 949 | /* we are process context. we always get a bio */ |
754 | struct bio *bio = bio_alloc(GFP_KERNEL, 1); | 950 | struct bio *bio = bio_alloc(GFP_KERNEL, 1); |
951 | struct drbd_conf *mdev = ctx->mdev; | ||
952 | struct drbd_bitmap *b = mdev->bitmap; | ||
953 | struct page *page; | ||
755 | unsigned int len; | 954 | unsigned int len; |
955 | |||
756 | sector_t on_disk_sector = | 956 | sector_t on_disk_sector = |
757 | mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; | 957 | mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; |
758 | on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); | 958 | on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); |
759 | 959 | ||
760 | /* this might happen with very small | 960 | /* this might happen with very small |
761 | * flexible external meta data device */ | 961 | * flexible external meta data device, |
962 | * or with PAGE_SIZE > 4k */ | ||
762 | len = min_t(unsigned int, PAGE_SIZE, | 963 | len = min_t(unsigned int, PAGE_SIZE, |
763 | (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); | 964 | (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); |
764 | 965 | ||
966 | /* serialize IO on this page */ | ||
967 | bm_page_lock_io(mdev, page_nr); | ||
968 | /* before memcpy and submit, | ||
969 | * so it can be redirtied any time */ | ||
970 | bm_set_page_unchanged(b->bm_pages[page_nr]); | ||
971 | |||
972 | if (ctx->flags & BM_AIO_COPY_PAGES) { | ||
973 | /* FIXME alloc_page is good enough for now, but actually needs | ||
974 | * to use pre-allocated page pool */ | ||
975 | void *src, *dest; | ||
976 | page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); | ||
977 | dest = kmap_atomic(page, KM_USER0); | ||
978 | src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); | ||
979 | memcpy(dest, src, PAGE_SIZE); | ||
980 | kunmap_atomic(src, KM_USER1); | ||
981 | kunmap_atomic(dest, KM_USER0); | ||
982 | bm_store_page_idx(page, page_nr); | ||
983 | } else | ||
984 | page = b->bm_pages[page_nr]; | ||
985 | |||
765 | bio->bi_bdev = mdev->ldev->md_bdev; | 986 | bio->bi_bdev = mdev->ldev->md_bdev; |
766 | bio->bi_sector = on_disk_sector; | 987 | bio->bi_sector = on_disk_sector; |
767 | bio_add_page(bio, b->bm_pages[page_nr], len, 0); | 988 | bio_add_page(bio, page, len, 0); |
768 | bio->bi_private = b; | 989 | bio->bi_private = ctx; |
769 | bio->bi_end_io = bm_async_io_complete; | 990 | bio->bi_end_io = bm_async_io_complete; |
770 | 991 | ||
771 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { | 992 | if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { |
772 | bio->bi_rw |= rw; | 993 | bio->bi_rw |= rw; |
773 | bio_endio(bio, -EIO); | 994 | bio_endio(bio, -EIO); |
774 | } else { | 995 | } else { |
@@ -776,87 +997,84 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int | |||
776 | } | 997 | } |
777 | } | 998 | } |
778 | 999 | ||
779 | # if defined(__LITTLE_ENDIAN) | ||
780 | /* nothing to do, on disk == in memory */ | ||
781 | # define bm_cpu_to_lel(x) ((void)0) | ||
782 | # else | ||
783 | static void bm_cpu_to_lel(struct drbd_bitmap *b) | ||
784 | { | ||
785 | /* need to cpu_to_lel all the pages ... | ||
786 | * this may be optimized by using | ||
787 | * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0; | ||
788 | * the following is still not optimal, but better than nothing */ | ||
789 | unsigned int i; | ||
790 | unsigned long *p_addr, *bm; | ||
791 | if (b->bm_set == 0) { | ||
792 | /* no page at all; avoid swap if all is 0 */ | ||
793 | i = b->bm_number_of_pages; | ||
794 | } else if (b->bm_set == b->bm_bits) { | ||
795 | /* only the last page */ | ||
796 | i = b->bm_number_of_pages - 1; | ||
797 | } else { | ||
798 | /* all pages */ | ||
799 | i = 0; | ||
800 | } | ||
801 | for (; i < b->bm_number_of_pages; i++) { | ||
802 | p_addr = kmap_atomic(b->bm_pages[i], KM_USER0); | ||
803 | for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++) | ||
804 | *bm = cpu_to_lel(*bm); | ||
805 | kunmap_atomic(p_addr, KM_USER0); | ||
806 | } | ||
807 | } | ||
808 | # endif | ||
809 | /* lel_to_cpu == cpu_to_lel */ | ||
810 | # define bm_lel_to_cpu(x) bm_cpu_to_lel(x) | ||
811 | |||
812 | /* | 1000 | /* |
813 | * bm_rw: read/write the whole bitmap from/to its on disk location. | 1001 | * bm_rw: read/write the whole bitmap from/to its on disk location. |
814 | */ | 1002 | */ |
815 | static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) | 1003 | static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) |
816 | { | 1004 | { |
1005 | struct bm_aio_ctx ctx = { | ||
1006 | .mdev = mdev, | ||
1007 | .in_flight = ATOMIC_INIT(1), | ||
1008 | .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), | ||
1009 | .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0, | ||
1010 | }; | ||
817 | struct drbd_bitmap *b = mdev->bitmap; | 1011 | struct drbd_bitmap *b = mdev->bitmap; |
818 | /* sector_t sector; */ | 1012 | int num_pages, i, count = 0; |
819 | int bm_words, num_pages, i; | ||
820 | unsigned long now; | 1013 | unsigned long now; |
821 | char ppb[10]; | 1014 | char ppb[10]; |
822 | int err = 0; | 1015 | int err = 0; |
823 | 1016 | ||
824 | WARN_ON(!bm_is_locked(b)); | 1017 | /* |
825 | 1018 | * We are protected against bitmap disappearing/resizing by holding an | |
826 | /* no spinlock here, the drbd_bm_lock should be enough! */ | 1019 | * ldev reference (caller must have called get_ldev()). |
827 | 1020 | * For read/write, we are protected against changes to the bitmap by | |
828 | bm_words = drbd_bm_words(mdev); | 1021 | * the bitmap lock (see drbd_bitmap_io). |
829 | num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; | 1022 | * For lazy writeout, we don't care for ongoing changes to the bitmap, |
1023 | * as we submit copies of pages anyways. | ||
1024 | */ | ||
1025 | if (!ctx.flags) | ||
1026 | WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); | ||
830 | 1027 | ||
831 | /* on disk bitmap is little endian */ | 1028 | num_pages = b->bm_number_of_pages; |
832 | if (rw == WRITE) | ||
833 | bm_cpu_to_lel(b); | ||
834 | 1029 | ||
835 | now = jiffies; | 1030 | now = jiffies; |
836 | atomic_set(&b->bm_async_io, num_pages); | ||
837 | __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); | ||
838 | 1031 | ||
839 | /* let the layers below us try to merge these bios... */ | 1032 | /* let the layers below us try to merge these bios... */ |
840 | for (i = 0; i < num_pages; i++) | 1033 | for (i = 0; i < num_pages; i++) { |
841 | bm_page_io_async(mdev, b, i, rw); | 1034 | /* ignore completely unchanged pages */ |
1035 | if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) | ||
1036 | break; | ||
1037 | if (rw & WRITE) { | ||
1038 | if (bm_test_page_unchanged(b->bm_pages[i])) { | ||
1039 | dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); | ||
1040 | continue; | ||
1041 | } | ||
1042 | /* during lazy writeout, | ||
1043 | * ignore those pages not marked for lazy writeout. */ | ||
1044 | if (lazy_writeout_upper_idx && | ||
1045 | !bm_test_page_lazy_writeout(b->bm_pages[i])) { | ||
1046 | dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); | ||
1047 | continue; | ||
1048 | } | ||
1049 | } | ||
1050 | atomic_inc(&ctx.in_flight); | ||
1051 | bm_page_io_async(&ctx, i, rw); | ||
1052 | ++count; | ||
1053 | cond_resched(); | ||
1054 | } | ||
842 | 1055 | ||
843 | wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); | 1056 | /* |
1057 | * We initialize ctx.in_flight to one to make sure bm_async_io_complete | ||
1058 | * will not complete() early, and decrement / test it here. If there | ||
1059 | * are still some bios in flight, we need to wait for them here. | ||
1060 | */ | ||
1061 | if (!atomic_dec_and_test(&ctx.in_flight)) | ||
1062 | wait_for_completion(&ctx.done); | ||
1063 | dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", | ||
1064 | rw == WRITE ? "WRITE" : "READ", | ||
1065 | count, jiffies - now); | ||
844 | 1066 | ||
845 | if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { | 1067 | if (ctx.error) { |
846 | dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); | 1068 | dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); |
847 | drbd_chk_io_error(mdev, 1, TRUE); | 1069 | drbd_chk_io_error(mdev, 1, true); |
848 | err = -EIO; | 1070 | err = -EIO; /* ctx.error ? */ |
849 | } | 1071 | } |
850 | 1072 | ||
851 | now = jiffies; | 1073 | now = jiffies; |
852 | if (rw == WRITE) { | 1074 | if (rw == WRITE) { |
853 | /* swap back endianness */ | ||
854 | bm_lel_to_cpu(b); | ||
855 | /* flush bitmap to stable storage */ | ||
856 | drbd_md_flush(mdev); | 1075 | drbd_md_flush(mdev); |
857 | } else /* rw == READ */ { | 1076 | } else /* rw == READ */ { |
858 | /* just read, if necessary adjust endianness */ | 1077 | b->bm_set = bm_count_bits(b); |
859 | b->bm_set = bm_count_bits_swap_endian(b); | ||
860 | dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", | 1078 | dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", |
861 | jiffies - now); | 1079 | jiffies - now); |
862 | } | 1080 | } |
@@ -874,112 +1092,128 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) | |||
874 | */ | 1092 | */ |
875 | int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) | 1093 | int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) |
876 | { | 1094 | { |
877 | return bm_rw(mdev, READ); | 1095 | return bm_rw(mdev, READ, 0); |
878 | } | 1096 | } |
879 | 1097 | ||
880 | /** | 1098 | /** |
881 | * drbd_bm_write() - Write the whole bitmap to its on disk location. | 1099 | * drbd_bm_write() - Write the whole bitmap to its on disk location. |
882 | * @mdev: DRBD device. | 1100 | * @mdev: DRBD device. |
1101 | * | ||
1102 | * Will only write pages that have changed since last IO. | ||
883 | */ | 1103 | */ |
884 | int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) | 1104 | int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) |
885 | { | 1105 | { |
886 | return bm_rw(mdev, WRITE); | 1106 | return bm_rw(mdev, WRITE, 0); |
887 | } | 1107 | } |
888 | 1108 | ||
889 | /** | 1109 | /** |
890 | * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap | 1110 | * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. |
891 | * @mdev: DRBD device. | 1111 | * @mdev: DRBD device. |
892 | * @enr: Extent number in the resync lru (happens to be sector offset) | 1112 | * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages |
893 | * | ||
894 | * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered | ||
895 | * by a single sector write. Therefore enr == sector offset from the | ||
896 | * start of the bitmap. | ||
897 | */ | 1113 | */ |
898 | int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local) | 1114 | int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) |
899 | { | 1115 | { |
900 | sector_t on_disk_sector = enr + mdev->ldev->md.md_offset | 1116 | return bm_rw(mdev, WRITE, upper_idx); |
901 | + mdev->ldev->md.bm_offset; | 1117 | } |
902 | int bm_words, num_words, offset; | 1118 | |
903 | int err = 0; | ||
904 | 1119 | ||
905 | mutex_lock(&mdev->md_io_mutex); | 1120 | /** |
906 | bm_words = drbd_bm_words(mdev); | 1121 | * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap |
907 | offset = S2W(enr); /* word offset into bitmap */ | 1122 | * @mdev: DRBD device. |
908 | num_words = min(S2W(1), bm_words - offset); | 1123 | * @idx: bitmap page index |
909 | if (num_words < S2W(1)) | 1124 | * |
910 | memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE); | 1125 | * We don't want to special case on logical_block_size of the backend device, |
911 | drbd_bm_get_lel(mdev, offset, num_words, | 1126 | * so we submit PAGE_SIZE aligned pieces. |
912 | page_address(mdev->md_io_page)); | 1127 | * Note that on "most" systems, PAGE_SIZE is 4k. |
913 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) { | 1128 | * |
914 | int i; | 1129 | * In case this becomes an issue on systems with larger PAGE_SIZE, |
915 | err = -EIO; | 1130 | * we may want to change this again to write 4k aligned 4k pieces. |
916 | dev_err(DEV, "IO ERROR writing bitmap sector %lu " | 1131 | */ |
917 | "(meta-disk sector %llus)\n", | 1132 | int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) |
918 | enr, (unsigned long long)on_disk_sector); | 1133 | { |
919 | drbd_chk_io_error(mdev, 1, TRUE); | 1134 | struct bm_aio_ctx ctx = { |
920 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) | 1135 | .mdev = mdev, |
921 | drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); | 1136 | .in_flight = ATOMIC_INIT(1), |
1137 | .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), | ||
1138 | .flags = BM_AIO_COPY_PAGES, | ||
1139 | }; | ||
1140 | |||
1141 | if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { | ||
1142 | dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); | ||
1143 | return 0; | ||
922 | } | 1144 | } |
1145 | |||
1146 | bm_page_io_async(&ctx, idx, WRITE_SYNC); | ||
1147 | wait_for_completion(&ctx.done); | ||
1148 | |||
1149 | if (ctx.error) | ||
1150 | drbd_chk_io_error(mdev, 1, true); | ||
1151 | /* that should force detach, so the in memory bitmap will be | ||
1152 | * gone in a moment as well. */ | ||
1153 | |||
923 | mdev->bm_writ_cnt++; | 1154 | mdev->bm_writ_cnt++; |
924 | mutex_unlock(&mdev->md_io_mutex); | 1155 | return ctx.error; |
925 | return err; | ||
926 | } | 1156 | } |
927 | 1157 | ||
928 | /* NOTE | 1158 | /* NOTE |
929 | * find_first_bit returns int, we return unsigned long. | 1159 | * find_first_bit returns int, we return unsigned long. |
930 | * should not make much difference anyways, but ... | 1160 | * For this to work on 32bit arch with bitnumbers > (1<<32), |
1161 | * we'd need to return u64, and get a whole lot of other places | ||
1162 | * fixed where we still use unsigned long. | ||
931 | * | 1163 | * |
932 | * this returns a bit number, NOT a sector! | 1164 | * this returns a bit number, NOT a sector! |
933 | */ | 1165 | */ |
934 | #define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1) | ||
935 | static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, | 1166 | static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, |
936 | const int find_zero_bit, const enum km_type km) | 1167 | const int find_zero_bit, const enum km_type km) |
937 | { | 1168 | { |
938 | struct drbd_bitmap *b = mdev->bitmap; | 1169 | struct drbd_bitmap *b = mdev->bitmap; |
939 | unsigned long i = -1UL; | ||
940 | unsigned long *p_addr; | 1170 | unsigned long *p_addr; |
941 | unsigned long bit_offset; /* bit offset of the mapped page. */ | 1171 | unsigned long bit_offset; |
1172 | unsigned i; | ||
1173 | |||
942 | 1174 | ||
943 | if (bm_fo > b->bm_bits) { | 1175 | if (bm_fo > b->bm_bits) { |
944 | dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); | 1176 | dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); |
1177 | bm_fo = DRBD_END_OF_BITMAP; | ||
945 | } else { | 1178 | } else { |
946 | while (bm_fo < b->bm_bits) { | 1179 | while (bm_fo < b->bm_bits) { |
947 | unsigned long offset; | 1180 | /* bit offset of the first bit in the page */ |
948 | bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */ | 1181 | bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; |
949 | offset = bit_offset >> LN2_BPL; /* word offset of the page */ | 1182 | p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); |
950 | p_addr = __bm_map_paddr(b, offset, km); | ||
951 | 1183 | ||
952 | if (find_zero_bit) | 1184 | if (find_zero_bit) |
953 | i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); | 1185 | i = find_next_zero_bit_le(p_addr, |
1186 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); | ||
954 | else | 1187 | else |
955 | i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); | 1188 | i = find_next_bit_le(p_addr, |
1189 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); | ||
956 | 1190 | ||
957 | __bm_unmap(p_addr, km); | 1191 | __bm_unmap(p_addr, km); |
958 | if (i < PAGE_SIZE*8) { | 1192 | if (i < PAGE_SIZE*8) { |
959 | i = bit_offset + i; | 1193 | bm_fo = bit_offset + i; |
960 | if (i >= b->bm_bits) | 1194 | if (bm_fo >= b->bm_bits) |
961 | break; | 1195 | break; |
962 | goto found; | 1196 | goto found; |
963 | } | 1197 | } |
964 | bm_fo = bit_offset + PAGE_SIZE*8; | 1198 | bm_fo = bit_offset + PAGE_SIZE*8; |
965 | } | 1199 | } |
966 | i = -1UL; | 1200 | bm_fo = DRBD_END_OF_BITMAP; |
967 | } | 1201 | } |
968 | found: | 1202 | found: |
969 | return i; | 1203 | return bm_fo; |
970 | } | 1204 | } |
971 | 1205 | ||
972 | static unsigned long bm_find_next(struct drbd_conf *mdev, | 1206 | static unsigned long bm_find_next(struct drbd_conf *mdev, |
973 | unsigned long bm_fo, const int find_zero_bit) | 1207 | unsigned long bm_fo, const int find_zero_bit) |
974 | { | 1208 | { |
975 | struct drbd_bitmap *b = mdev->bitmap; | 1209 | struct drbd_bitmap *b = mdev->bitmap; |
976 | unsigned long i = -1UL; | 1210 | unsigned long i = DRBD_END_OF_BITMAP; |
977 | 1211 | ||
978 | ERR_IF(!b) return i; | 1212 | ERR_IF(!b) return i; |
979 | ERR_IF(!b->bm_pages) return i; | 1213 | ERR_IF(!b->bm_pages) return i; |
980 | 1214 | ||
981 | spin_lock_irq(&b->bm_lock); | 1215 | spin_lock_irq(&b->bm_lock); |
982 | if (bm_is_locked(b)) | 1216 | if (BM_DONT_TEST & b->bm_flags) |
983 | bm_print_lock_info(mdev); | 1217 | bm_print_lock_info(mdev); |
984 | 1218 | ||
985 | i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); | 1219 | i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); |
@@ -1005,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo | |||
1005 | * you must take drbd_bm_lock() first */ | 1239 | * you must take drbd_bm_lock() first */ |
1006 | unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) | 1240 | unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) |
1007 | { | 1241 | { |
1008 | /* WARN_ON(!bm_is_locked(mdev)); */ | 1242 | /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ |
1009 | return __bm_find_next(mdev, bm_fo, 0, KM_USER1); | 1243 | return __bm_find_next(mdev, bm_fo, 0, KM_USER1); |
1010 | } | 1244 | } |
1011 | 1245 | ||
1012 | unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) | 1246 | unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) |
1013 | { | 1247 | { |
1014 | /* WARN_ON(!bm_is_locked(mdev)); */ | 1248 | /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ |
1015 | return __bm_find_next(mdev, bm_fo, 1, KM_USER1); | 1249 | return __bm_find_next(mdev, bm_fo, 1, KM_USER1); |
1016 | } | 1250 | } |
1017 | 1251 | ||
@@ -1027,8 +1261,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1027 | struct drbd_bitmap *b = mdev->bitmap; | 1261 | struct drbd_bitmap *b = mdev->bitmap; |
1028 | unsigned long *p_addr = NULL; | 1262 | unsigned long *p_addr = NULL; |
1029 | unsigned long bitnr; | 1263 | unsigned long bitnr; |
1030 | unsigned long last_page_nr = -1UL; | 1264 | unsigned int last_page_nr = -1U; |
1031 | int c = 0; | 1265 | int c = 0; |
1266 | int changed_total = 0; | ||
1032 | 1267 | ||
1033 | if (e >= b->bm_bits) { | 1268 | if (e >= b->bm_bits) { |
1034 | dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", | 1269 | dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", |
@@ -1036,23 +1271,33 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1036 | e = b->bm_bits ? b->bm_bits -1 : 0; | 1271 | e = b->bm_bits ? b->bm_bits -1 : 0; |
1037 | } | 1272 | } |
1038 | for (bitnr = s; bitnr <= e; bitnr++) { | 1273 | for (bitnr = s; bitnr <= e; bitnr++) { |
1039 | unsigned long offset = bitnr>>LN2_BPL; | 1274 | unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); |
1040 | unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); | ||
1041 | if (page_nr != last_page_nr) { | 1275 | if (page_nr != last_page_nr) { |
1042 | if (p_addr) | 1276 | if (p_addr) |
1043 | __bm_unmap(p_addr, km); | 1277 | __bm_unmap(p_addr, km); |
1044 | p_addr = __bm_map_paddr(b, offset, km); | 1278 | if (c < 0) |
1279 | bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); | ||
1280 | else if (c > 0) | ||
1281 | bm_set_page_need_writeout(b->bm_pages[last_page_nr]); | ||
1282 | changed_total += c; | ||
1283 | c = 0; | ||
1284 | p_addr = __bm_map_pidx(b, page_nr, km); | ||
1045 | last_page_nr = page_nr; | 1285 | last_page_nr = page_nr; |
1046 | } | 1286 | } |
1047 | if (val) | 1287 | if (val) |
1048 | c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr)); | 1288 | c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); |
1049 | else | 1289 | else |
1050 | c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr)); | 1290 | c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); |
1051 | } | 1291 | } |
1052 | if (p_addr) | 1292 | if (p_addr) |
1053 | __bm_unmap(p_addr, km); | 1293 | __bm_unmap(p_addr, km); |
1054 | b->bm_set += c; | 1294 | if (c < 0) |
1055 | return c; | 1295 | bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); |
1296 | else if (c > 0) | ||
1297 | bm_set_page_need_writeout(b->bm_pages[last_page_nr]); | ||
1298 | changed_total += c; | ||
1299 | b->bm_set += changed_total; | ||
1300 | return changed_total; | ||
1056 | } | 1301 | } |
1057 | 1302 | ||
1058 | /* returns number of bits actually changed. | 1303 | /* returns number of bits actually changed. |
@@ -1070,7 +1315,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1070 | ERR_IF(!b->bm_pages) return 0; | 1315 | ERR_IF(!b->bm_pages) return 0; |
1071 | 1316 | ||
1072 | spin_lock_irqsave(&b->bm_lock, flags); | 1317 | spin_lock_irqsave(&b->bm_lock, flags); |
1073 | if (bm_is_locked(b)) | 1318 | if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) |
1074 | bm_print_lock_info(mdev); | 1319 | bm_print_lock_info(mdev); |
1075 | 1320 | ||
1076 | c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); | 1321 | c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); |
@@ -1187,12 +1432,11 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) | |||
1187 | ERR_IF(!b->bm_pages) return 0; | 1432 | ERR_IF(!b->bm_pages) return 0; |
1188 | 1433 | ||
1189 | spin_lock_irqsave(&b->bm_lock, flags); | 1434 | spin_lock_irqsave(&b->bm_lock, flags); |
1190 | if (bm_is_locked(b)) | 1435 | if (BM_DONT_TEST & b->bm_flags) |
1191 | bm_print_lock_info(mdev); | 1436 | bm_print_lock_info(mdev); |
1192 | if (bitnr < b->bm_bits) { | 1437 | if (bitnr < b->bm_bits) { |
1193 | unsigned long offset = bitnr>>LN2_BPL; | 1438 | p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); |
1194 | p_addr = bm_map_paddr(b, offset); | 1439 | i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; |
1195 | i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; | ||
1196 | bm_unmap(p_addr); | 1440 | bm_unmap(p_addr); |
1197 | } else if (bitnr == b->bm_bits) { | 1441 | } else if (bitnr == b->bm_bits) { |
1198 | i = -1; | 1442 | i = -1; |
@@ -1210,10 +1454,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi | |||
1210 | { | 1454 | { |
1211 | unsigned long flags; | 1455 | unsigned long flags; |
1212 | struct drbd_bitmap *b = mdev->bitmap; | 1456 | struct drbd_bitmap *b = mdev->bitmap; |
1213 | unsigned long *p_addr = NULL, page_nr = -1; | 1457 | unsigned long *p_addr = NULL; |
1214 | unsigned long bitnr; | 1458 | unsigned long bitnr; |
1459 | unsigned int page_nr = -1U; | ||
1215 | int c = 0; | 1460 | int c = 0; |
1216 | size_t w; | ||
1217 | 1461 | ||
1218 | /* If this is called without a bitmap, that is a bug. But just to be | 1462 | /* If this is called without a bitmap, that is a bug. But just to be |
1219 | * robust in case we screwed up elsewhere, in that case pretend there | 1463 | * robust in case we screwed up elsewhere, in that case pretend there |
@@ -1223,20 +1467,20 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi | |||
1223 | ERR_IF(!b->bm_pages) return 1; | 1467 | ERR_IF(!b->bm_pages) return 1; |
1224 | 1468 | ||
1225 | spin_lock_irqsave(&b->bm_lock, flags); | 1469 | spin_lock_irqsave(&b->bm_lock, flags); |
1226 | if (bm_is_locked(b)) | 1470 | if (BM_DONT_TEST & b->bm_flags) |
1227 | bm_print_lock_info(mdev); | 1471 | bm_print_lock_info(mdev); |
1228 | for (bitnr = s; bitnr <= e; bitnr++) { | 1472 | for (bitnr = s; bitnr <= e; bitnr++) { |
1229 | w = bitnr >> LN2_BPL; | 1473 | unsigned int idx = bm_bit_to_page_idx(b, bitnr); |
1230 | if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) { | 1474 | if (page_nr != idx) { |
1231 | page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3); | 1475 | page_nr = idx; |
1232 | if (p_addr) | 1476 | if (p_addr) |
1233 | bm_unmap(p_addr); | 1477 | bm_unmap(p_addr); |
1234 | p_addr = bm_map_paddr(b, w); | 1478 | p_addr = bm_map_pidx(b, idx); |
1235 | } | 1479 | } |
1236 | ERR_IF (bitnr >= b->bm_bits) { | 1480 | ERR_IF (bitnr >= b->bm_bits) { |
1237 | dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); | 1481 | dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); |
1238 | } else { | 1482 | } else { |
1239 | c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); | 1483 | c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); |
1240 | } | 1484 | } |
1241 | } | 1485 | } |
1242 | if (p_addr) | 1486 | if (p_addr) |
@@ -1271,7 +1515,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) | |||
1271 | ERR_IF(!b->bm_pages) return 0; | 1515 | ERR_IF(!b->bm_pages) return 0; |
1272 | 1516 | ||
1273 | spin_lock_irqsave(&b->bm_lock, flags); | 1517 | spin_lock_irqsave(&b->bm_lock, flags); |
1274 | if (bm_is_locked(b)) | 1518 | if (BM_DONT_TEST & b->bm_flags) |
1275 | bm_print_lock_info(mdev); | 1519 | bm_print_lock_info(mdev); |
1276 | 1520 | ||
1277 | s = S2W(enr); | 1521 | s = S2W(enr); |
@@ -1279,7 +1523,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) | |||
1279 | count = 0; | 1523 | count = 0; |
1280 | if (s < b->bm_words) { | 1524 | if (s < b->bm_words) { |
1281 | int n = e-s; | 1525 | int n = e-s; |
1282 | p_addr = bm_map_paddr(b, s); | 1526 | p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); |
1283 | bm = p_addr + MLPP(s); | 1527 | bm = p_addr + MLPP(s); |
1284 | while (n--) | 1528 | while (n--) |
1285 | count += hweight_long(*bm++); | 1529 | count += hweight_long(*bm++); |
@@ -1291,18 +1535,20 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) | |||
1291 | return count; | 1535 | return count; |
1292 | } | 1536 | } |
1293 | 1537 | ||
1294 | /* set all bits covered by the AL-extent al_enr */ | 1538 | /* Set all bits covered by the AL-extent al_enr. |
1539 | * Returns number of bits changed. */ | ||
1295 | unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) | 1540 | unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) |
1296 | { | 1541 | { |
1297 | struct drbd_bitmap *b = mdev->bitmap; | 1542 | struct drbd_bitmap *b = mdev->bitmap; |
1298 | unsigned long *p_addr, *bm; | 1543 | unsigned long *p_addr, *bm; |
1299 | unsigned long weight; | 1544 | unsigned long weight; |
1300 | int count, s, e, i, do_now; | 1545 | unsigned long s, e; |
1546 | int count, i, do_now; | ||
1301 | ERR_IF(!b) return 0; | 1547 | ERR_IF(!b) return 0; |
1302 | ERR_IF(!b->bm_pages) return 0; | 1548 | ERR_IF(!b->bm_pages) return 0; |
1303 | 1549 | ||
1304 | spin_lock_irq(&b->bm_lock); | 1550 | spin_lock_irq(&b->bm_lock); |
1305 | if (bm_is_locked(b)) | 1551 | if (BM_DONT_SET & b->bm_flags) |
1306 | bm_print_lock_info(mdev); | 1552 | bm_print_lock_info(mdev); |
1307 | weight = b->bm_set; | 1553 | weight = b->bm_set; |
1308 | 1554 | ||
@@ -1314,7 +1560,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) | |||
1314 | count = 0; | 1560 | count = 0; |
1315 | if (s < b->bm_words) { | 1561 | if (s < b->bm_words) { |
1316 | i = do_now = e-s; | 1562 | i = do_now = e-s; |
1317 | p_addr = bm_map_paddr(b, s); | 1563 | p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); |
1318 | bm = p_addr + MLPP(s); | 1564 | bm = p_addr + MLPP(s); |
1319 | while (i--) { | 1565 | while (i--) { |
1320 | count += hweight_long(*bm); | 1566 | count += hweight_long(*bm); |
@@ -1326,7 +1572,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) | |||
1326 | if (e == b->bm_words) | 1572 | if (e == b->bm_words) |
1327 | b->bm_set -= bm_clear_surplus(b); | 1573 | b->bm_set -= bm_clear_surplus(b); |
1328 | } else { | 1574 | } else { |
1329 | dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s); | 1575 | dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s); |
1330 | } | 1576 | } |
1331 | weight = b->bm_set - weight; | 1577 | weight = b->bm_set - weight; |
1332 | spin_unlock_irq(&b->bm_lock); | 1578 | spin_unlock_irq(&b->bm_lock); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index b0bd27dfc1e8..81030d8d654b 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -72,13 +72,6 @@ extern int fault_devs; | |||
72 | extern char usermode_helper[]; | 72 | extern char usermode_helper[]; |
73 | 73 | ||
74 | 74 | ||
75 | #ifndef TRUE | ||
76 | #define TRUE 1 | ||
77 | #endif | ||
78 | #ifndef FALSE | ||
79 | #define FALSE 0 | ||
80 | #endif | ||
81 | |||
82 | /* I don't remember why XCPU ... | 75 | /* I don't remember why XCPU ... |
83 | * This is used to wake the asender, | 76 | * This is used to wake the asender, |
84 | * and to interrupt sending the sending task | 77 | * and to interrupt sending the sending task |
@@ -104,6 +97,7 @@ extern char usermode_helper[]; | |||
104 | #define ID_SYNCER (-1ULL) | 97 | #define ID_SYNCER (-1ULL) |
105 | #define ID_VACANT 0 | 98 | #define ID_VACANT 0 |
106 | #define is_syncer_block_id(id) ((id) == ID_SYNCER) | 99 | #define is_syncer_block_id(id) ((id) == ID_SYNCER) |
100 | #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) | ||
107 | 101 | ||
108 | struct drbd_conf; | 102 | struct drbd_conf; |
109 | 103 | ||
@@ -137,20 +131,19 @@ enum { | |||
137 | DRBD_FAULT_MAX, | 131 | DRBD_FAULT_MAX, |
138 | }; | 132 | }; |
139 | 133 | ||
140 | #ifdef CONFIG_DRBD_FAULT_INJECTION | ||
141 | extern unsigned int | 134 | extern unsigned int |
142 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); | 135 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); |
136 | |||
143 | static inline int | 137 | static inline int |
144 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { | 138 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { |
139 | #ifdef CONFIG_DRBD_FAULT_INJECTION | ||
145 | return fault_rate && | 140 | return fault_rate && |
146 | (enable_faults & (1<<type)) && | 141 | (enable_faults & (1<<type)) && |
147 | _drbd_insert_fault(mdev, type); | 142 | _drbd_insert_fault(mdev, type); |
148 | } | ||
149 | #define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t))) | ||
150 | |||
151 | #else | 143 | #else |
152 | #define FAULT_ACTIVE(_m, _t) (0) | 144 | return 0; |
153 | #endif | 145 | #endif |
146 | } | ||
154 | 147 | ||
155 | /* integer division, round _UP_ to the next integer */ | 148 | /* integer division, round _UP_ to the next integer */ |
156 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) | 149 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) |
@@ -212,8 +205,10 @@ enum drbd_packets { | |||
212 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ | 205 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ |
213 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ | 206 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ |
214 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ | 207 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ |
208 | P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ | ||
209 | P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */ | ||
215 | 210 | ||
216 | P_MAX_CMD = 0x28, | 211 | P_MAX_CMD = 0x2A, |
217 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ | 212 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ |
218 | P_MAX_OPT_CMD = 0x101, | 213 | P_MAX_OPT_CMD = 0x101, |
219 | 214 | ||
@@ -269,6 +264,7 @@ static inline const char *cmdname(enum drbd_packets cmd) | |||
269 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | 264 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", |
270 | [P_COMPRESSED_BITMAP] = "CBitmap", | 265 | [P_COMPRESSED_BITMAP] = "CBitmap", |
271 | [P_DELAY_PROBE] = "DelayProbe", | 266 | [P_DELAY_PROBE] = "DelayProbe", |
267 | [P_OUT_OF_SYNC] = "OutOfSync", | ||
272 | [P_MAX_CMD] = NULL, | 268 | [P_MAX_CMD] = NULL, |
273 | }; | 269 | }; |
274 | 270 | ||
@@ -512,7 +508,7 @@ struct p_sizes { | |||
512 | u64 d_size; /* size of disk */ | 508 | u64 d_size; /* size of disk */ |
513 | u64 u_size; /* user requested size */ | 509 | u64 u_size; /* user requested size */ |
514 | u64 c_size; /* current exported size */ | 510 | u64 c_size; /* current exported size */ |
515 | u32 max_segment_size; /* Maximal size of a BIO */ | 511 | u32 max_bio_size; /* Maximal size of a BIO */ |
516 | u16 queue_order_type; /* not yet implemented in DRBD*/ | 512 | u16 queue_order_type; /* not yet implemented in DRBD*/ |
517 | u16 dds_flags; /* use enum dds_flags here. */ | 513 | u16 dds_flags; /* use enum dds_flags here. */ |
518 | } __packed; | 514 | } __packed; |
@@ -550,6 +546,13 @@ struct p_discard { | |||
550 | u32 pad; | 546 | u32 pad; |
551 | } __packed; | 547 | } __packed; |
552 | 548 | ||
549 | struct p_block_desc { | ||
550 | struct p_header80 head; | ||
551 | u64 sector; | ||
552 | u32 blksize; | ||
553 | u32 pad; /* to multiple of 8 Byte */ | ||
554 | } __packed; | ||
555 | |||
553 | /* Valid values for the encoding field. | 556 | /* Valid values for the encoding field. |
554 | * Bump proto version when changing this. */ | 557 | * Bump proto version when changing this. */ |
555 | enum drbd_bitmap_code { | 558 | enum drbd_bitmap_code { |
@@ -647,6 +650,7 @@ union p_polymorph { | |||
647 | struct p_block_req block_req; | 650 | struct p_block_req block_req; |
648 | struct p_delay_probe93 delay_probe93; | 651 | struct p_delay_probe93 delay_probe93; |
649 | struct p_rs_uuid rs_uuid; | 652 | struct p_rs_uuid rs_uuid; |
653 | struct p_block_desc block_desc; | ||
650 | } __packed; | 654 | } __packed; |
651 | 655 | ||
652 | /**********************************************************************/ | 656 | /**********************************************************************/ |
@@ -677,13 +681,6 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) | |||
677 | return thi->t_state; | 681 | return thi->t_state; |
678 | } | 682 | } |
679 | 683 | ||
680 | |||
681 | /* | ||
682 | * Having this as the first member of a struct provides sort of "inheritance". | ||
683 | * "derived" structs can be "drbd_queue_work()"ed. | ||
684 | * The callback should know and cast back to the descendant struct. | ||
685 | * drbd_request and drbd_epoch_entry are descendants of drbd_work. | ||
686 | */ | ||
687 | struct drbd_work; | 684 | struct drbd_work; |
688 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); | 685 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); |
689 | struct drbd_work { | 686 | struct drbd_work { |
@@ -712,9 +709,6 @@ struct drbd_request { | |||
712 | * starting a new epoch... | 709 | * starting a new epoch... |
713 | */ | 710 | */ |
714 | 711 | ||
715 | /* up to here, the struct layout is identical to drbd_epoch_entry; | ||
716 | * we might be able to use that to our advantage... */ | ||
717 | |||
718 | struct list_head tl_requests; /* ring list in the transfer log */ | 712 | struct list_head tl_requests; /* ring list in the transfer log */ |
719 | struct bio *master_bio; /* master bio pointer */ | 713 | struct bio *master_bio; /* master bio pointer */ |
720 | unsigned long rq_state; /* see comments above _req_mod() */ | 714 | unsigned long rq_state; /* see comments above _req_mod() */ |
@@ -831,7 +825,7 @@ enum { | |||
831 | CRASHED_PRIMARY, /* This node was a crashed primary. | 825 | CRASHED_PRIMARY, /* This node was a crashed primary. |
832 | * Gets cleared when the state.conn | 826 | * Gets cleared when the state.conn |
833 | * goes into C_CONNECTED state. */ | 827 | * goes into C_CONNECTED state. */ |
834 | WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ | 828 | NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ |
835 | CONSIDER_RESYNC, | 829 | CONSIDER_RESYNC, |
836 | 830 | ||
837 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ | 831 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ |
@@ -856,10 +850,37 @@ enum { | |||
856 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ | 850 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ |
857 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ | 851 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ |
858 | AL_SUSPENDED, /* Activity logging is currently suspended. */ | 852 | AL_SUSPENDED, /* Activity logging is currently suspended. */ |
853 | AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ | ||
859 | }; | 854 | }; |
860 | 855 | ||
861 | struct drbd_bitmap; /* opaque for drbd_conf */ | 856 | struct drbd_bitmap; /* opaque for drbd_conf */ |
862 | 857 | ||
858 | /* definition of bits in bm_flags to be used in drbd_bm_lock | ||
859 | * and drbd_bitmap_io and friends. */ | ||
860 | enum bm_flag { | ||
861 | /* do we need to kfree, or vfree bm_pages? */ | ||
862 | BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ | ||
863 | |||
864 | /* currently locked for bulk operation */ | ||
865 | BM_LOCKED_MASK = 0x7, | ||
866 | |||
867 | /* in detail, that is: */ | ||
868 | BM_DONT_CLEAR = 0x1, | ||
869 | BM_DONT_SET = 0x2, | ||
870 | BM_DONT_TEST = 0x4, | ||
871 | |||
872 | /* (test bit, count bit) allowed (common case) */ | ||
873 | BM_LOCKED_TEST_ALLOWED = 0x3, | ||
874 | |||
875 | /* testing bits, as well as setting new bits allowed, but clearing bits | ||
876 | * would be unexpected. Used during bitmap receive. Setting new bits | ||
877 | * requires sending of "out-of-sync" information, though. */ | ||
878 | BM_LOCKED_SET_ALLOWED = 0x1, | ||
879 | |||
880 | /* clear is not expected while bitmap is locked for bulk operation */ | ||
881 | }; | ||
882 | |||
883 | |||
863 | /* TODO sort members for performance | 884 | /* TODO sort members for performance |
864 | * MAYBE group them further */ | 885 | * MAYBE group them further */ |
865 | 886 | ||
@@ -925,6 +946,7 @@ struct drbd_md_io { | |||
925 | struct bm_io_work { | 946 | struct bm_io_work { |
926 | struct drbd_work w; | 947 | struct drbd_work w; |
927 | char *why; | 948 | char *why; |
949 | enum bm_flag flags; | ||
928 | int (*io_fn)(struct drbd_conf *mdev); | 950 | int (*io_fn)(struct drbd_conf *mdev); |
929 | void (*done)(struct drbd_conf *mdev, int rv); | 951 | void (*done)(struct drbd_conf *mdev, int rv); |
930 | }; | 952 | }; |
@@ -963,9 +985,12 @@ struct drbd_conf { | |||
963 | struct drbd_work resync_work, | 985 | struct drbd_work resync_work, |
964 | unplug_work, | 986 | unplug_work, |
965 | go_diskless, | 987 | go_diskless, |
966 | md_sync_work; | 988 | md_sync_work, |
989 | start_resync_work; | ||
967 | struct timer_list resync_timer; | 990 | struct timer_list resync_timer; |
968 | struct timer_list md_sync_timer; | 991 | struct timer_list md_sync_timer; |
992 | struct timer_list start_resync_timer; | ||
993 | struct timer_list request_timer; | ||
969 | #ifdef DRBD_DEBUG_MD_SYNC | 994 | #ifdef DRBD_DEBUG_MD_SYNC |
970 | struct { | 995 | struct { |
971 | unsigned int line; | 996 | unsigned int line; |
@@ -1000,9 +1025,9 @@ struct drbd_conf { | |||
1000 | struct hlist_head *tl_hash; | 1025 | struct hlist_head *tl_hash; |
1001 | unsigned int tl_hash_s; | 1026 | unsigned int tl_hash_s; |
1002 | 1027 | ||
1003 | /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ | 1028 | /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ |
1004 | unsigned long rs_total; | 1029 | unsigned long rs_total; |
1005 | /* number of sync IOs that failed in this run */ | 1030 | /* number of resync blocks that failed in this run */ |
1006 | unsigned long rs_failed; | 1031 | unsigned long rs_failed; |
1007 | /* Syncer's start time [unit jiffies] */ | 1032 | /* Syncer's start time [unit jiffies] */ |
1008 | unsigned long rs_start; | 1033 | unsigned long rs_start; |
@@ -1102,6 +1127,7 @@ struct drbd_conf { | |||
1102 | struct fifo_buffer rs_plan_s; /* correction values of resync planer */ | 1127 | struct fifo_buffer rs_plan_s; /* correction values of resync planer */ |
1103 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ | 1128 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ |
1104 | int rs_planed; /* resync sectors already planed */ | 1129 | int rs_planed; /* resync sectors already planed */ |
1130 | atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ | ||
1105 | }; | 1131 | }; |
1106 | 1132 | ||
1107 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) | 1133 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) |
@@ -1163,14 +1189,19 @@ enum dds_flags { | |||
1163 | }; | 1189 | }; |
1164 | 1190 | ||
1165 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); | 1191 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); |
1166 | extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | 1192 | extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev, |
1167 | union drbd_state mask, union drbd_state val); | 1193 | enum chg_state_flags f, |
1194 | union drbd_state mask, | ||
1195 | union drbd_state val); | ||
1168 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, | 1196 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, |
1169 | union drbd_state); | 1197 | union drbd_state); |
1170 | extern int _drbd_request_state(struct drbd_conf *, union drbd_state, | 1198 | extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *, |
1171 | union drbd_state, enum chg_state_flags); | 1199 | union drbd_state, |
1172 | extern int __drbd_set_state(struct drbd_conf *, union drbd_state, | 1200 | union drbd_state, |
1173 | enum chg_state_flags, struct completion *done); | 1201 | enum chg_state_flags); |
1202 | extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state, | ||
1203 | enum chg_state_flags, | ||
1204 | struct completion *done); | ||
1174 | extern void print_st_err(struct drbd_conf *, union drbd_state, | 1205 | extern void print_st_err(struct drbd_conf *, union drbd_state, |
1175 | union drbd_state, int); | 1206 | union drbd_state, int); |
1176 | extern int drbd_thread_start(struct drbd_thread *thi); | 1207 | extern int drbd_thread_start(struct drbd_thread *thi); |
@@ -1195,7 +1226,7 @@ extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, | |||
1195 | extern int drbd_send_protocol(struct drbd_conf *mdev); | 1226 | extern int drbd_send_protocol(struct drbd_conf *mdev); |
1196 | extern int drbd_send_uuids(struct drbd_conf *mdev); | 1227 | extern int drbd_send_uuids(struct drbd_conf *mdev); |
1197 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | 1228 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); |
1198 | extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); | 1229 | extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); |
1199 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); | 1230 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); |
1200 | extern int _drbd_send_state(struct drbd_conf *mdev); | 1231 | extern int _drbd_send_state(struct drbd_conf *mdev); |
1201 | extern int drbd_send_state(struct drbd_conf *mdev); | 1232 | extern int drbd_send_state(struct drbd_conf *mdev); |
@@ -1220,11 +1251,10 @@ extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
1220 | struct p_data *dp, int data_size); | 1251 | struct p_data *dp, int data_size); |
1221 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, | 1252 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, |
1222 | sector_t sector, int blksize, u64 block_id); | 1253 | sector_t sector, int blksize, u64 block_id); |
1254 | extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req); | ||
1223 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | 1255 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, |
1224 | struct drbd_epoch_entry *e); | 1256 | struct drbd_epoch_entry *e); |
1225 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); | 1257 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); |
1226 | extern int _drbd_send_barrier(struct drbd_conf *mdev, | ||
1227 | struct drbd_tl_epoch *barrier); | ||
1228 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | 1258 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, |
1229 | sector_t sector, int size, u64 block_id); | 1259 | sector_t sector, int size, u64 block_id); |
1230 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, | 1260 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, |
@@ -1235,14 +1265,13 @@ extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size) | |||
1235 | 1265 | ||
1236 | extern int drbd_send_bitmap(struct drbd_conf *mdev); | 1266 | extern int drbd_send_bitmap(struct drbd_conf *mdev); |
1237 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); | 1267 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); |
1238 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); | 1268 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); |
1239 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); | 1269 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); |
1240 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); | 1270 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); |
1271 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text); | ||
1241 | 1272 | ||
1242 | /* drbd_meta-data.c (still in drbd_main.c) */ | ||
1243 | extern void drbd_md_sync(struct drbd_conf *mdev); | 1273 | extern void drbd_md_sync(struct drbd_conf *mdev); |
1244 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); | 1274 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); |
1245 | /* maybe define them below as inline? */ | ||
1246 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | 1275 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); |
1247 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | 1276 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); |
1248 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | 1277 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); |
@@ -1261,10 +1290,12 @@ extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, | |||
1261 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, | 1290 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, |
1262 | int (*io_fn)(struct drbd_conf *), | 1291 | int (*io_fn)(struct drbd_conf *), |
1263 | void (*done)(struct drbd_conf *, int), | 1292 | void (*done)(struct drbd_conf *, int), |
1264 | char *why); | 1293 | char *why, enum bm_flag flags); |
1294 | extern int drbd_bitmap_io(struct drbd_conf *mdev, | ||
1295 | int (*io_fn)(struct drbd_conf *), | ||
1296 | char *why, enum bm_flag flags); | ||
1265 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); | 1297 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); |
1266 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); | 1298 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); |
1267 | extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); | ||
1268 | extern void drbd_go_diskless(struct drbd_conf *mdev); | 1299 | extern void drbd_go_diskless(struct drbd_conf *mdev); |
1269 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); | 1300 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); |
1270 | 1301 | ||
@@ -1313,6 +1344,7 @@ struct bm_extent { | |||
1313 | 1344 | ||
1314 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ | 1345 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ |
1315 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ | 1346 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ |
1347 | #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ | ||
1316 | 1348 | ||
1317 | /* drbd_bitmap.c */ | 1349 | /* drbd_bitmap.c */ |
1318 | /* | 1350 | /* |
@@ -1390,7 +1422,9 @@ struct bm_extent { | |||
1390 | * you should use 64bit OS for that much storage, anyways. */ | 1422 | * you should use 64bit OS for that much storage, anyways. */ |
1391 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) | 1423 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) |
1392 | #else | 1424 | #else |
1393 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) | 1425 | /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ |
1426 | #define DRBD_MAX_SECTORS_FLEX (1UL << 51) | ||
1427 | /* corresponds to (1UL << 38) bits right now. */ | ||
1394 | #endif | 1428 | #endif |
1395 | #endif | 1429 | #endif |
1396 | 1430 | ||
@@ -1398,7 +1432,7 @@ struct bm_extent { | |||
1398 | * With a value of 8 all IO in one 128K block make it to the same slot of the | 1432 | * With a value of 8 all IO in one 128K block make it to the same slot of the |
1399 | * hash table. */ | 1433 | * hash table. */ |
1400 | #define HT_SHIFT 8 | 1434 | #define HT_SHIFT 8 |
1401 | #define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) | 1435 | #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) |
1402 | 1436 | ||
1403 | #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ | 1437 | #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ |
1404 | 1438 | ||
@@ -1410,16 +1444,20 @@ extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new | |||
1410 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); | 1444 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); |
1411 | extern void drbd_bm_set_all(struct drbd_conf *mdev); | 1445 | extern void drbd_bm_set_all(struct drbd_conf *mdev); |
1412 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); | 1446 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); |
1447 | /* set/clear/test only a few bits at a time */ | ||
1413 | extern int drbd_bm_set_bits( | 1448 | extern int drbd_bm_set_bits( |
1414 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | 1449 | struct drbd_conf *mdev, unsigned long s, unsigned long e); |
1415 | extern int drbd_bm_clear_bits( | 1450 | extern int drbd_bm_clear_bits( |
1416 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | 1451 | struct drbd_conf *mdev, unsigned long s, unsigned long e); |
1417 | /* bm_set_bits variant for use while holding drbd_bm_lock */ | 1452 | extern int drbd_bm_count_bits( |
1453 | struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | ||
1454 | /* bm_set_bits variant for use while holding drbd_bm_lock, | ||
1455 | * may process the whole bitmap in one go */ | ||
1418 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, | 1456 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, |
1419 | const unsigned long s, const unsigned long e); | 1457 | const unsigned long s, const unsigned long e); |
1420 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); | 1458 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); |
1421 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); | 1459 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); |
1422 | extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); | 1460 | extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); |
1423 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); | 1461 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); |
1424 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); | 1462 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); |
1425 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | 1463 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, |
@@ -1427,6 +1465,8 @@ extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | |||
1427 | extern size_t drbd_bm_words(struct drbd_conf *mdev); | 1465 | extern size_t drbd_bm_words(struct drbd_conf *mdev); |
1428 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); | 1466 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); |
1429 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); | 1467 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); |
1468 | |||
1469 | #define DRBD_END_OF_BITMAP (~(unsigned long)0) | ||
1430 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | 1470 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); |
1431 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ | 1471 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ |
1432 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | 1472 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); |
@@ -1437,14 +1477,12 @@ extern int drbd_bm_rs_done(struct drbd_conf *mdev); | |||
1437 | /* for receive_bitmap */ | 1477 | /* for receive_bitmap */ |
1438 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, | 1478 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, |
1439 | size_t number, unsigned long *buffer); | 1479 | size_t number, unsigned long *buffer); |
1440 | /* for _drbd_send_bitmap and drbd_bm_write_sect */ | 1480 | /* for _drbd_send_bitmap */ |
1441 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, | 1481 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, |
1442 | size_t number, unsigned long *buffer); | 1482 | size_t number, unsigned long *buffer); |
1443 | 1483 | ||
1444 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); | 1484 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); |
1445 | extern void drbd_bm_unlock(struct drbd_conf *mdev); | 1485 | extern void drbd_bm_unlock(struct drbd_conf *mdev); |
1446 | |||
1447 | extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | ||
1448 | /* drbd_main.c */ | 1486 | /* drbd_main.c */ |
1449 | 1487 | ||
1450 | extern struct kmem_cache *drbd_request_cache; | 1488 | extern struct kmem_cache *drbd_request_cache; |
@@ -1467,7 +1505,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev); | |||
1467 | extern int proc_details; | 1505 | extern int proc_details; |
1468 | 1506 | ||
1469 | /* drbd_req */ | 1507 | /* drbd_req */ |
1470 | extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); | 1508 | extern int drbd_make_request(struct request_queue *q, struct bio *bio); |
1471 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); | 1509 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); |
1472 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | 1510 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); |
1473 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | 1511 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); |
@@ -1482,8 +1520,9 @@ enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = | |||
1482 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); | 1520 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); |
1483 | extern void resync_after_online_grow(struct drbd_conf *); | 1521 | extern void resync_after_online_grow(struct drbd_conf *); |
1484 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); | 1522 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); |
1485 | extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, | 1523 | extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, |
1486 | int force); | 1524 | enum drbd_role new_role, |
1525 | int force); | ||
1487 | extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); | 1526 | extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); |
1488 | extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); | 1527 | extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); |
1489 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); | 1528 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); |
@@ -1499,6 +1538,7 @@ extern int drbd_resync_finished(struct drbd_conf *mdev); | |||
1499 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, | 1538 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, |
1500 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | 1539 | struct drbd_backing_dev *bdev, sector_t sector, int rw); |
1501 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); | 1540 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); |
1541 | extern void drbd_rs_controller_reset(struct drbd_conf *mdev); | ||
1502 | 1542 | ||
1503 | static inline void ov_oos_print(struct drbd_conf *mdev) | 1543 | static inline void ov_oos_print(struct drbd_conf *mdev) |
1504 | { | 1544 | { |
@@ -1522,21 +1562,23 @@ extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); | |||
1522 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); | 1562 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); |
1523 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); | 1563 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); |
1524 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); | 1564 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); |
1525 | extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); | 1565 | extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int); |
1526 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); | 1566 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); |
1527 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); | 1567 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); |
1528 | extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); | ||
1529 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); | 1568 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); |
1530 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); | 1569 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); |
1531 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); | 1570 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); |
1532 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); | 1571 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); |
1533 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); | 1572 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); |
1534 | extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); | 1573 | extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); |
1574 | extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); | ||
1575 | extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); | ||
1535 | 1576 | ||
1536 | extern void resync_timer_fn(unsigned long data); | 1577 | extern void resync_timer_fn(unsigned long data); |
1578 | extern void start_resync_timer_fn(unsigned long data); | ||
1537 | 1579 | ||
1538 | /* drbd_receiver.c */ | 1580 | /* drbd_receiver.c */ |
1539 | extern int drbd_rs_should_slow_down(struct drbd_conf *mdev); | 1581 | extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); |
1540 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | 1582 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
1541 | const unsigned rw, const int fault_type); | 1583 | const unsigned rw, const int fault_type); |
1542 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); | 1584 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); |
@@ -1619,16 +1661,16 @@ extern int drbd_rs_del_all(struct drbd_conf *mdev); | |||
1619 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, | 1661 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, |
1620 | sector_t sector, int size); | 1662 | sector_t sector, int size); |
1621 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); | 1663 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); |
1664 | extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); | ||
1622 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, | 1665 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, |
1623 | int size, const char *file, const unsigned int line); | 1666 | int size, const char *file, const unsigned int line); |
1624 | #define drbd_set_in_sync(mdev, sector, size) \ | 1667 | #define drbd_set_in_sync(mdev, sector, size) \ |
1625 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) | 1668 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) |
1626 | extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, | 1669 | extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, |
1627 | int size, const char *file, const unsigned int line); | 1670 | int size, const char *file, const unsigned int line); |
1628 | #define drbd_set_out_of_sync(mdev, sector, size) \ | 1671 | #define drbd_set_out_of_sync(mdev, sector, size) \ |
1629 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) | 1672 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) |
1630 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); | 1673 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); |
1631 | extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); | ||
1632 | extern void drbd_al_shrink(struct drbd_conf *mdev); | 1674 | extern void drbd_al_shrink(struct drbd_conf *mdev); |
1633 | 1675 | ||
1634 | 1676 | ||
@@ -1747,11 +1789,11 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev) | |||
1747 | wake_up(&mdev->misc_wait); | 1789 | wake_up(&mdev->misc_wait); |
1748 | } | 1790 | } |
1749 | 1791 | ||
1750 | static inline int _drbd_set_state(struct drbd_conf *mdev, | 1792 | static inline enum drbd_state_rv |
1751 | union drbd_state ns, enum chg_state_flags flags, | 1793 | _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, |
1752 | struct completion *done) | 1794 | enum chg_state_flags flags, struct completion *done) |
1753 | { | 1795 | { |
1754 | int rv; | 1796 | enum drbd_state_rv rv; |
1755 | 1797 | ||
1756 | read_lock(&global_state_lock); | 1798 | read_lock(&global_state_lock); |
1757 | rv = __drbd_set_state(mdev, ns, flags, done); | 1799 | rv = __drbd_set_state(mdev, ns, flags, done); |
@@ -1982,17 +2024,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev) | |||
1982 | 2024 | ||
1983 | static inline void drbd_thread_stop(struct drbd_thread *thi) | 2025 | static inline void drbd_thread_stop(struct drbd_thread *thi) |
1984 | { | 2026 | { |
1985 | _drbd_thread_stop(thi, FALSE, TRUE); | 2027 | _drbd_thread_stop(thi, false, true); |
1986 | } | 2028 | } |
1987 | 2029 | ||
1988 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) | 2030 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) |
1989 | { | 2031 | { |
1990 | _drbd_thread_stop(thi, FALSE, FALSE); | 2032 | _drbd_thread_stop(thi, false, false); |
1991 | } | 2033 | } |
1992 | 2034 | ||
1993 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) | 2035 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) |
1994 | { | 2036 | { |
1995 | _drbd_thread_stop(thi, TRUE, FALSE); | 2037 | _drbd_thread_stop(thi, true, false); |
1996 | } | 2038 | } |
1997 | 2039 | ||
1998 | /* counts how many answer packets packets we expect from our peer, | 2040 | /* counts how many answer packets packets we expect from our peer, |
@@ -2146,17 +2188,18 @@ extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |||
2146 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | 2188 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, |
2147 | unsigned long *bits_left, unsigned int *per_mil_done) | 2189 | unsigned long *bits_left, unsigned int *per_mil_done) |
2148 | { | 2190 | { |
2149 | /* | 2191 | /* this is to break it at compile time when we change that, in case we |
2150 | * this is to break it at compile time when we change that | 2192 | * want to support more than (1<<32) bits on a 32bit arch. */ |
2151 | * (we may feel 4TB maximum storage per drbd is not enough) | ||
2152 | */ | ||
2153 | typecheck(unsigned long, mdev->rs_total); | 2193 | typecheck(unsigned long, mdev->rs_total); |
2154 | 2194 | ||
2155 | /* note: both rs_total and rs_left are in bits, i.e. in | 2195 | /* note: both rs_total and rs_left are in bits, i.e. in |
2156 | * units of BM_BLOCK_SIZE. | 2196 | * units of BM_BLOCK_SIZE. |
2157 | * for the percentage, we don't care. */ | 2197 | * for the percentage, we don't care. */ |
2158 | 2198 | ||
2159 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | 2199 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) |
2200 | *bits_left = mdev->ov_left; | ||
2201 | else | ||
2202 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | ||
2160 | /* >> 10 to prevent overflow, | 2203 | /* >> 10 to prevent overflow, |
2161 | * +1 to prevent division by zero */ | 2204 | * +1 to prevent division by zero */ |
2162 | if (*bits_left > mdev->rs_total) { | 2205 | if (*bits_left > mdev->rs_total) { |
@@ -2171,10 +2214,19 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | |||
2171 | *bits_left, mdev->rs_total, mdev->rs_failed); | 2214 | *bits_left, mdev->rs_total, mdev->rs_failed); |
2172 | *per_mil_done = 0; | 2215 | *per_mil_done = 0; |
2173 | } else { | 2216 | } else { |
2174 | /* make sure the calculation happens in long context */ | 2217 | /* Make sure the division happens in long context. |
2175 | unsigned long tmp = 1000UL - | 2218 | * We allow up to one petabyte storage right now, |
2176 | (*bits_left >> 10)*1000UL | 2219 | * at a granularity of 4k per bit that is 2**38 bits. |
2177 | / ((mdev->rs_total >> 10) + 1UL); | 2220 | * After shift right and multiplication by 1000, |
2221 | * this should still fit easily into a 32bit long, | ||
2222 | * so we don't need a 64bit division on 32bit arch. | ||
2223 | * Note: currently we don't support such large bitmaps on 32bit | ||
2224 | * arch anyways, but no harm done to be prepared for it here. | ||
2225 | */ | ||
2226 | unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10; | ||
2227 | unsigned long left = *bits_left >> shift; | ||
2228 | unsigned long total = 1UL + (mdev->rs_total >> shift); | ||
2229 | unsigned long tmp = 1000UL - left * 1000UL/total; | ||
2178 | *per_mil_done = tmp; | 2230 | *per_mil_done = tmp; |
2179 | } | 2231 | } |
2180 | } | 2232 | } |
@@ -2193,8 +2245,9 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev) | |||
2193 | return mxb; | 2245 | return mxb; |
2194 | } | 2246 | } |
2195 | 2247 | ||
2196 | static inline int drbd_state_is_stable(union drbd_state s) | 2248 | static inline int drbd_state_is_stable(struct drbd_conf *mdev) |
2197 | { | 2249 | { |
2250 | union drbd_state s = mdev->state; | ||
2198 | 2251 | ||
2199 | /* DO NOT add a default clause, we want the compiler to warn us | 2252 | /* DO NOT add a default clause, we want the compiler to warn us |
2200 | * for any newly introduced state we may have forgotten to add here */ | 2253 | * for any newly introduced state we may have forgotten to add here */ |
@@ -2211,11 +2264,9 @@ static inline int drbd_state_is_stable(union drbd_state s) | |||
2211 | case C_VERIFY_T: | 2264 | case C_VERIFY_T: |
2212 | case C_PAUSED_SYNC_S: | 2265 | case C_PAUSED_SYNC_S: |
2213 | case C_PAUSED_SYNC_T: | 2266 | case C_PAUSED_SYNC_T: |
2214 | /* maybe stable, look at the disk state */ | 2267 | case C_AHEAD: |
2215 | break; | 2268 | case C_BEHIND: |
2216 | 2269 | /* transitional states, IO allowed */ | |
2217 | /* no new io accepted during tansitional states | ||
2218 | * like handshake or teardown */ | ||
2219 | case C_DISCONNECTING: | 2270 | case C_DISCONNECTING: |
2220 | case C_UNCONNECTED: | 2271 | case C_UNCONNECTED: |
2221 | case C_TIMEOUT: | 2272 | case C_TIMEOUT: |
@@ -2226,7 +2277,15 @@ static inline int drbd_state_is_stable(union drbd_state s) | |||
2226 | case C_WF_REPORT_PARAMS: | 2277 | case C_WF_REPORT_PARAMS: |
2227 | case C_STARTING_SYNC_S: | 2278 | case C_STARTING_SYNC_S: |
2228 | case C_STARTING_SYNC_T: | 2279 | case C_STARTING_SYNC_T: |
2280 | break; | ||
2281 | |||
2282 | /* Allow IO in BM exchange states with new protocols */ | ||
2229 | case C_WF_BITMAP_S: | 2283 | case C_WF_BITMAP_S: |
2284 | if (mdev->agreed_pro_version < 96) | ||
2285 | return 0; | ||
2286 | break; | ||
2287 | |||
2288 | /* no new io accepted in these states */ | ||
2230 | case C_WF_BITMAP_T: | 2289 | case C_WF_BITMAP_T: |
2231 | case C_WF_SYNC_UUID: | 2290 | case C_WF_SYNC_UUID: |
2232 | case C_MASK: | 2291 | case C_MASK: |
@@ -2261,41 +2320,47 @@ static inline int is_susp(union drbd_state s) | |||
2261 | return s.susp || s.susp_nod || s.susp_fen; | 2320 | return s.susp || s.susp_nod || s.susp_fen; |
2262 | } | 2321 | } |
2263 | 2322 | ||
2264 | static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) | 2323 | static inline bool may_inc_ap_bio(struct drbd_conf *mdev) |
2265 | { | 2324 | { |
2266 | int mxb = drbd_get_max_buffers(mdev); | 2325 | int mxb = drbd_get_max_buffers(mdev); |
2267 | 2326 | ||
2268 | if (is_susp(mdev->state)) | 2327 | if (is_susp(mdev->state)) |
2269 | return 0; | 2328 | return false; |
2270 | if (test_bit(SUSPEND_IO, &mdev->flags)) | 2329 | if (test_bit(SUSPEND_IO, &mdev->flags)) |
2271 | return 0; | 2330 | return false; |
2272 | 2331 | ||
2273 | /* to avoid potential deadlock or bitmap corruption, | 2332 | /* to avoid potential deadlock or bitmap corruption, |
2274 | * in various places, we only allow new application io | 2333 | * in various places, we only allow new application io |
2275 | * to start during "stable" states. */ | 2334 | * to start during "stable" states. */ |
2276 | 2335 | ||
2277 | /* no new io accepted when attaching or detaching the disk */ | 2336 | /* no new io accepted when attaching or detaching the disk */ |
2278 | if (!drbd_state_is_stable(mdev->state)) | 2337 | if (!drbd_state_is_stable(mdev)) |
2279 | return 0; | 2338 | return false; |
2280 | 2339 | ||
2281 | /* since some older kernels don't have atomic_add_unless, | 2340 | /* since some older kernels don't have atomic_add_unless, |
2282 | * and we are within the spinlock anyways, we have this workaround. */ | 2341 | * and we are within the spinlock anyways, we have this workaround. */ |
2283 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) | 2342 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) |
2284 | return 0; | 2343 | return false; |
2285 | if (test_bit(BITMAP_IO, &mdev->flags)) | 2344 | if (test_bit(BITMAP_IO, &mdev->flags)) |
2286 | return 0; | 2345 | return false; |
2287 | return 1; | 2346 | return true; |
2288 | } | 2347 | } |
2289 | 2348 | ||
2290 | /* I'd like to use wait_event_lock_irq, | 2349 | static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count) |
2291 | * but I'm not sure when it got introduced, | ||
2292 | * and not sure when it has 3 or 4 arguments */ | ||
2293 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | ||
2294 | { | 2350 | { |
2295 | /* compare with after_state_ch, | 2351 | bool rv = false; |
2296 | * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ | 2352 | |
2297 | DEFINE_WAIT(wait); | 2353 | spin_lock_irq(&mdev->req_lock); |
2354 | rv = may_inc_ap_bio(mdev); | ||
2355 | if (rv) | ||
2356 | atomic_add(count, &mdev->ap_bio_cnt); | ||
2357 | spin_unlock_irq(&mdev->req_lock); | ||
2358 | |||
2359 | return rv; | ||
2360 | } | ||
2298 | 2361 | ||
2362 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | ||
2363 | { | ||
2299 | /* we wait here | 2364 | /* we wait here |
2300 | * as long as the device is suspended | 2365 | * as long as the device is suspended |
2301 | * until the bitmap is no longer on the fly during connection | 2366 | * until the bitmap is no longer on the fly during connection |
@@ -2304,16 +2369,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | |||
2304 | * to avoid races with the reconnect code, | 2369 | * to avoid races with the reconnect code, |
2305 | * we need to atomic_inc within the spinlock. */ | 2370 | * we need to atomic_inc within the spinlock. */ |
2306 | 2371 | ||
2307 | spin_lock_irq(&mdev->req_lock); | 2372 | wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count)); |
2308 | while (!__inc_ap_bio_cond(mdev)) { | ||
2309 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | ||
2310 | spin_unlock_irq(&mdev->req_lock); | ||
2311 | schedule(); | ||
2312 | finish_wait(&mdev->misc_wait, &wait); | ||
2313 | spin_lock_irq(&mdev->req_lock); | ||
2314 | } | ||
2315 | atomic_add(count, &mdev->ap_bio_cnt); | ||
2316 | spin_unlock_irq(&mdev->req_lock); | ||
2317 | } | 2373 | } |
2318 | 2374 | ||
2319 | static inline void dec_ap_bio(struct drbd_conf *mdev) | 2375 | static inline void dec_ap_bio(struct drbd_conf *mdev) |
@@ -2333,9 +2389,11 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) | |||
2333 | } | 2389 | } |
2334 | } | 2390 | } |
2335 | 2391 | ||
2336 | static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) | 2392 | static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) |
2337 | { | 2393 | { |
2394 | int changed = mdev->ed_uuid != val; | ||
2338 | mdev->ed_uuid = val; | 2395 | mdev->ed_uuid = val; |
2396 | return changed; | ||
2339 | } | 2397 | } |
2340 | 2398 | ||
2341 | static inline int seq_cmp(u32 a, u32 b) | 2399 | static inline int seq_cmp(u32 a, u32 b) |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8a43ce0edeed..dfc85f32d317 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -85,7 +85,8 @@ MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " | |||
85 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); | 85 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); |
86 | MODULE_VERSION(REL_VERSION); | 86 | MODULE_VERSION(REL_VERSION); |
87 | MODULE_LICENSE("GPL"); | 87 | MODULE_LICENSE("GPL"); |
88 | MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)"); | 88 | MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (" |
89 | __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); | ||
89 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); | 90 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); |
90 | 91 | ||
91 | #include <linux/moduleparam.h> | 92 | #include <linux/moduleparam.h> |
@@ -115,7 +116,7 @@ module_param(fault_devs, int, 0644); | |||
115 | #endif | 116 | #endif |
116 | 117 | ||
117 | /* module parameter, defined */ | 118 | /* module parameter, defined */ |
118 | unsigned int minor_count = 32; | 119 | unsigned int minor_count = DRBD_MINOR_COUNT_DEF; |
119 | int disable_sendpage; | 120 | int disable_sendpage; |
120 | int allow_oos; | 121 | int allow_oos; |
121 | unsigned int cn_idx = CN_IDX_DRBD; | 122 | unsigned int cn_idx = CN_IDX_DRBD; |
@@ -335,6 +336,7 @@ bail: | |||
335 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | 336 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); |
336 | } | 337 | } |
337 | 338 | ||
339 | |||
338 | /** | 340 | /** |
339 | * _tl_restart() - Walks the transfer log, and applies an action to all requests | 341 | * _tl_restart() - Walks the transfer log, and applies an action to all requests |
340 | * @mdev: DRBD device. | 342 | * @mdev: DRBD device. |
@@ -456,7 +458,7 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | |||
456 | } | 458 | } |
457 | 459 | ||
458 | /** | 460 | /** |
459 | * cl_wide_st_chg() - TRUE if the state change is a cluster wide one | 461 | * cl_wide_st_chg() - true if the state change is a cluster wide one |
460 | * @mdev: DRBD device. | 462 | * @mdev: DRBD device. |
461 | * @os: old (current) state. | 463 | * @os: old (current) state. |
462 | * @ns: new (wanted) state. | 464 | * @ns: new (wanted) state. |
@@ -473,12 +475,13 @@ static int cl_wide_st_chg(struct drbd_conf *mdev, | |||
473 | (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); | 475 | (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); |
474 | } | 476 | } |
475 | 477 | ||
476 | int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | 478 | enum drbd_state_rv |
477 | union drbd_state mask, union drbd_state val) | 479 | drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, |
480 | union drbd_state mask, union drbd_state val) | ||
478 | { | 481 | { |
479 | unsigned long flags; | 482 | unsigned long flags; |
480 | union drbd_state os, ns; | 483 | union drbd_state os, ns; |
481 | int rv; | 484 | enum drbd_state_rv rv; |
482 | 485 | ||
483 | spin_lock_irqsave(&mdev->req_lock, flags); | 486 | spin_lock_irqsave(&mdev->req_lock, flags); |
484 | os = mdev->state; | 487 | os = mdev->state; |
@@ -502,20 +505,22 @@ void drbd_force_state(struct drbd_conf *mdev, | |||
502 | drbd_change_state(mdev, CS_HARD, mask, val); | 505 | drbd_change_state(mdev, CS_HARD, mask, val); |
503 | } | 506 | } |
504 | 507 | ||
505 | static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns); | 508 | static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state); |
506 | static int is_valid_state_transition(struct drbd_conf *, | 509 | static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *, |
507 | union drbd_state, union drbd_state); | 510 | union drbd_state, |
511 | union drbd_state); | ||
508 | static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, | 512 | static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, |
509 | union drbd_state ns, const char **warn_sync_abort); | 513 | union drbd_state ns, const char **warn_sync_abort); |
510 | int drbd_send_state_req(struct drbd_conf *, | 514 | int drbd_send_state_req(struct drbd_conf *, |
511 | union drbd_state, union drbd_state); | 515 | union drbd_state, union drbd_state); |
512 | 516 | ||
513 | static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | 517 | static enum drbd_state_rv |
514 | union drbd_state mask, union drbd_state val) | 518 | _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, |
519 | union drbd_state val) | ||
515 | { | 520 | { |
516 | union drbd_state os, ns; | 521 | union drbd_state os, ns; |
517 | unsigned long flags; | 522 | unsigned long flags; |
518 | int rv; | 523 | enum drbd_state_rv rv; |
519 | 524 | ||
520 | if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) | 525 | if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) |
521 | return SS_CW_SUCCESS; | 526 | return SS_CW_SUCCESS; |
@@ -536,7 +541,7 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | |||
536 | if (rv == SS_SUCCESS) { | 541 | if (rv == SS_SUCCESS) { |
537 | rv = is_valid_state_transition(mdev, ns, os); | 542 | rv = is_valid_state_transition(mdev, ns, os); |
538 | if (rv == SS_SUCCESS) | 543 | if (rv == SS_SUCCESS) |
539 | rv = 0; /* cont waiting, otherwise fail. */ | 544 | rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ |
540 | } | 545 | } |
541 | } | 546 | } |
542 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 547 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
@@ -554,14 +559,14 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | |||
554 | * Should not be called directly, use drbd_request_state() or | 559 | * Should not be called directly, use drbd_request_state() or |
555 | * _drbd_request_state(). | 560 | * _drbd_request_state(). |
556 | */ | 561 | */ |
557 | static int drbd_req_state(struct drbd_conf *mdev, | 562 | static enum drbd_state_rv |
558 | union drbd_state mask, union drbd_state val, | 563 | drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, |
559 | enum chg_state_flags f) | 564 | union drbd_state val, enum chg_state_flags f) |
560 | { | 565 | { |
561 | struct completion done; | 566 | struct completion done; |
562 | unsigned long flags; | 567 | unsigned long flags; |
563 | union drbd_state os, ns; | 568 | union drbd_state os, ns; |
564 | int rv; | 569 | enum drbd_state_rv rv; |
565 | 570 | ||
566 | init_completion(&done); | 571 | init_completion(&done); |
567 | 572 | ||
@@ -636,10 +641,11 @@ abort: | |||
636 | * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE | 641 | * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE |
637 | * flag, or when logging of failed state change requests is not desired. | 642 | * flag, or when logging of failed state change requests is not desired. |
638 | */ | 643 | */ |
639 | int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, | 644 | enum drbd_state_rv |
640 | union drbd_state val, enum chg_state_flags f) | 645 | _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, |
646 | union drbd_state val, enum chg_state_flags f) | ||
641 | { | 647 | { |
642 | int rv; | 648 | enum drbd_state_rv rv; |
643 | 649 | ||
644 | wait_event(mdev->state_wait, | 650 | wait_event(mdev->state_wait, |
645 | (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); | 651 | (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); |
@@ -663,8 +669,8 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns) | |||
663 | ); | 669 | ); |
664 | } | 670 | } |
665 | 671 | ||
666 | void print_st_err(struct drbd_conf *mdev, | 672 | void print_st_err(struct drbd_conf *mdev, union drbd_state os, |
667 | union drbd_state os, union drbd_state ns, int err) | 673 | union drbd_state ns, enum drbd_state_rv err) |
668 | { | 674 | { |
669 | if (err == SS_IN_TRANSIENT_STATE) | 675 | if (err == SS_IN_TRANSIENT_STATE) |
670 | return; | 676 | return; |
@@ -674,32 +680,18 @@ void print_st_err(struct drbd_conf *mdev, | |||
674 | } | 680 | } |
675 | 681 | ||
676 | 682 | ||
677 | #define drbd_peer_str drbd_role_str | ||
678 | #define drbd_pdsk_str drbd_disk_str | ||
679 | |||
680 | #define drbd_susp_str(A) ((A) ? "1" : "0") | ||
681 | #define drbd_aftr_isp_str(A) ((A) ? "1" : "0") | ||
682 | #define drbd_peer_isp_str(A) ((A) ? "1" : "0") | ||
683 | #define drbd_user_isp_str(A) ((A) ? "1" : "0") | ||
684 | |||
685 | #define PSC(A) \ | ||
686 | ({ if (ns.A != os.A) { \ | ||
687 | pbp += sprintf(pbp, #A "( %s -> %s ) ", \ | ||
688 | drbd_##A##_str(os.A), \ | ||
689 | drbd_##A##_str(ns.A)); \ | ||
690 | } }) | ||
691 | |||
692 | /** | 683 | /** |
693 | * is_valid_state() - Returns an SS_ error code if ns is not valid | 684 | * is_valid_state() - Returns an SS_ error code if ns is not valid |
694 | * @mdev: DRBD device. | 685 | * @mdev: DRBD device. |
695 | * @ns: State to consider. | 686 | * @ns: State to consider. |
696 | */ | 687 | */ |
697 | static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | 688 | static enum drbd_state_rv |
689 | is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | ||
698 | { | 690 | { |
699 | /* See drbd_state_sw_errors in drbd_strings.c */ | 691 | /* See drbd_state_sw_errors in drbd_strings.c */ |
700 | 692 | ||
701 | enum drbd_fencing_p fp; | 693 | enum drbd_fencing_p fp; |
702 | int rv = SS_SUCCESS; | 694 | enum drbd_state_rv rv = SS_SUCCESS; |
703 | 695 | ||
704 | fp = FP_DONT_CARE; | 696 | fp = FP_DONT_CARE; |
705 | if (get_ldev(mdev)) { | 697 | if (get_ldev(mdev)) { |
@@ -762,10 +754,11 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | |||
762 | * @ns: new state. | 754 | * @ns: new state. |
763 | * @os: old state. | 755 | * @os: old state. |
764 | */ | 756 | */ |
765 | static int is_valid_state_transition(struct drbd_conf *mdev, | 757 | static enum drbd_state_rv |
766 | union drbd_state ns, union drbd_state os) | 758 | is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns, |
759 | union drbd_state os) | ||
767 | { | 760 | { |
768 | int rv = SS_SUCCESS; | 761 | enum drbd_state_rv rv = SS_SUCCESS; |
769 | 762 | ||
770 | if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && | 763 | if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && |
771 | os.conn > C_CONNECTED) | 764 | os.conn > C_CONNECTED) |
@@ -800,6 +793,10 @@ static int is_valid_state_transition(struct drbd_conf *mdev, | |||
800 | os.conn < C_CONNECTED) | 793 | os.conn < C_CONNECTED) |
801 | rv = SS_NEED_CONNECTION; | 794 | rv = SS_NEED_CONNECTION; |
802 | 795 | ||
796 | if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE) | ||
797 | && os.conn < C_WF_REPORT_PARAMS) | ||
798 | rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */ | ||
799 | |||
803 | return rv; | 800 | return rv; |
804 | } | 801 | } |
805 | 802 | ||
@@ -817,6 +814,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
817 | union drbd_state ns, const char **warn_sync_abort) | 814 | union drbd_state ns, const char **warn_sync_abort) |
818 | { | 815 | { |
819 | enum drbd_fencing_p fp; | 816 | enum drbd_fencing_p fp; |
817 | enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max; | ||
820 | 818 | ||
821 | fp = FP_DONT_CARE; | 819 | fp = FP_DONT_CARE; |
822 | if (get_ldev(mdev)) { | 820 | if (get_ldev(mdev)) { |
@@ -869,56 +867,6 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
869 | ns.conn = C_CONNECTED; | 867 | ns.conn = C_CONNECTED; |
870 | } | 868 | } |
871 | 869 | ||
872 | if (ns.conn >= C_CONNECTED && | ||
873 | ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) || | ||
874 | (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) { | ||
875 | switch (ns.conn) { | ||
876 | case C_WF_BITMAP_T: | ||
877 | case C_PAUSED_SYNC_T: | ||
878 | ns.disk = D_OUTDATED; | ||
879 | break; | ||
880 | case C_CONNECTED: | ||
881 | case C_WF_BITMAP_S: | ||
882 | case C_SYNC_SOURCE: | ||
883 | case C_PAUSED_SYNC_S: | ||
884 | ns.disk = D_UP_TO_DATE; | ||
885 | break; | ||
886 | case C_SYNC_TARGET: | ||
887 | ns.disk = D_INCONSISTENT; | ||
888 | dev_warn(DEV, "Implicitly set disk state Inconsistent!\n"); | ||
889 | break; | ||
890 | } | ||
891 | if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE) | ||
892 | dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n"); | ||
893 | } | ||
894 | |||
895 | if (ns.conn >= C_CONNECTED && | ||
896 | (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) { | ||
897 | switch (ns.conn) { | ||
898 | case C_CONNECTED: | ||
899 | case C_WF_BITMAP_T: | ||
900 | case C_PAUSED_SYNC_T: | ||
901 | case C_SYNC_TARGET: | ||
902 | ns.pdsk = D_UP_TO_DATE; | ||
903 | break; | ||
904 | case C_WF_BITMAP_S: | ||
905 | case C_PAUSED_SYNC_S: | ||
906 | /* remap any consistent state to D_OUTDATED, | ||
907 | * but disallow "upgrade" of not even consistent states. | ||
908 | */ | ||
909 | ns.pdsk = | ||
910 | (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED) | ||
911 | ? os.pdsk : D_OUTDATED; | ||
912 | break; | ||
913 | case C_SYNC_SOURCE: | ||
914 | ns.pdsk = D_INCONSISTENT; | ||
915 | dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n"); | ||
916 | break; | ||
917 | } | ||
918 | if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE) | ||
919 | dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n"); | ||
920 | } | ||
921 | |||
922 | /* Connection breaks down before we finished "Negotiating" */ | 870 | /* Connection breaks down before we finished "Negotiating" */ |
923 | if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && | 871 | if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && |
924 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | 872 | get_ldev_if_state(mdev, D_NEGOTIATING)) { |
@@ -933,6 +881,94 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
933 | put_ldev(mdev); | 881 | put_ldev(mdev); |
934 | } | 882 | } |
935 | 883 | ||
884 | /* D_CONSISTENT and D_OUTDATED vanish when we get connected */ | ||
885 | if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) { | ||
886 | if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) | ||
887 | ns.disk = D_UP_TO_DATE; | ||
888 | if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED) | ||
889 | ns.pdsk = D_UP_TO_DATE; | ||
890 | } | ||
891 | |||
892 | /* Implications of the connection stat on the disk states */ | ||
893 | disk_min = D_DISKLESS; | ||
894 | disk_max = D_UP_TO_DATE; | ||
895 | pdsk_min = D_INCONSISTENT; | ||
896 | pdsk_max = D_UNKNOWN; | ||
897 | switch ((enum drbd_conns)ns.conn) { | ||
898 | case C_WF_BITMAP_T: | ||
899 | case C_PAUSED_SYNC_T: | ||
900 | case C_STARTING_SYNC_T: | ||
901 | case C_WF_SYNC_UUID: | ||
902 | case C_BEHIND: | ||
903 | disk_min = D_INCONSISTENT; | ||
904 | disk_max = D_OUTDATED; | ||
905 | pdsk_min = D_UP_TO_DATE; | ||
906 | pdsk_max = D_UP_TO_DATE; | ||
907 | break; | ||
908 | case C_VERIFY_S: | ||
909 | case C_VERIFY_T: | ||
910 | disk_min = D_UP_TO_DATE; | ||
911 | disk_max = D_UP_TO_DATE; | ||
912 | pdsk_min = D_UP_TO_DATE; | ||
913 | pdsk_max = D_UP_TO_DATE; | ||
914 | break; | ||
915 | case C_CONNECTED: | ||
916 | disk_min = D_DISKLESS; | ||
917 | disk_max = D_UP_TO_DATE; | ||
918 | pdsk_min = D_DISKLESS; | ||
919 | pdsk_max = D_UP_TO_DATE; | ||
920 | break; | ||
921 | case C_WF_BITMAP_S: | ||
922 | case C_PAUSED_SYNC_S: | ||
923 | case C_STARTING_SYNC_S: | ||
924 | case C_AHEAD: | ||
925 | disk_min = D_UP_TO_DATE; | ||
926 | disk_max = D_UP_TO_DATE; | ||
927 | pdsk_min = D_INCONSISTENT; | ||
928 | pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/ | ||
929 | break; | ||
930 | case C_SYNC_TARGET: | ||
931 | disk_min = D_INCONSISTENT; | ||
932 | disk_max = D_INCONSISTENT; | ||
933 | pdsk_min = D_UP_TO_DATE; | ||
934 | pdsk_max = D_UP_TO_DATE; | ||
935 | break; | ||
936 | case C_SYNC_SOURCE: | ||
937 | disk_min = D_UP_TO_DATE; | ||
938 | disk_max = D_UP_TO_DATE; | ||
939 | pdsk_min = D_INCONSISTENT; | ||
940 | pdsk_max = D_INCONSISTENT; | ||
941 | break; | ||
942 | case C_STANDALONE: | ||
943 | case C_DISCONNECTING: | ||
944 | case C_UNCONNECTED: | ||
945 | case C_TIMEOUT: | ||
946 | case C_BROKEN_PIPE: | ||
947 | case C_NETWORK_FAILURE: | ||
948 | case C_PROTOCOL_ERROR: | ||
949 | case C_TEAR_DOWN: | ||
950 | case C_WF_CONNECTION: | ||
951 | case C_WF_REPORT_PARAMS: | ||
952 | case C_MASK: | ||
953 | break; | ||
954 | } | ||
955 | if (ns.disk > disk_max) | ||
956 | ns.disk = disk_max; | ||
957 | |||
958 | if (ns.disk < disk_min) { | ||
959 | dev_warn(DEV, "Implicitly set disk from %s to %s\n", | ||
960 | drbd_disk_str(ns.disk), drbd_disk_str(disk_min)); | ||
961 | ns.disk = disk_min; | ||
962 | } | ||
963 | if (ns.pdsk > pdsk_max) | ||
964 | ns.pdsk = pdsk_max; | ||
965 | |||
966 | if (ns.pdsk < pdsk_min) { | ||
967 | dev_warn(DEV, "Implicitly set pdsk from %s to %s\n", | ||
968 | drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min)); | ||
969 | ns.pdsk = pdsk_min; | ||
970 | } | ||
971 | |||
936 | if (fp == FP_STONITH && | 972 | if (fp == FP_STONITH && |
937 | (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && | 973 | (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && |
938 | !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) | 974 | !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) |
@@ -961,6 +997,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state | |||
961 | /* helper for __drbd_set_state */ | 997 | /* helper for __drbd_set_state */ |
962 | static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) | 998 | static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) |
963 | { | 999 | { |
1000 | if (mdev->agreed_pro_version < 90) | ||
1001 | mdev->ov_start_sector = 0; | ||
1002 | mdev->rs_total = drbd_bm_bits(mdev); | ||
1003 | mdev->ov_position = 0; | ||
964 | if (cs == C_VERIFY_T) { | 1004 | if (cs == C_VERIFY_T) { |
965 | /* starting online verify from an arbitrary position | 1005 | /* starting online verify from an arbitrary position |
966 | * does not fit well into the existing protocol. | 1006 | * does not fit well into the existing protocol. |
@@ -970,11 +1010,15 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) | |||
970 | mdev->ov_start_sector = ~(sector_t)0; | 1010 | mdev->ov_start_sector = ~(sector_t)0; |
971 | } else { | 1011 | } else { |
972 | unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); | 1012 | unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); |
973 | if (bit >= mdev->rs_total) | 1013 | if (bit >= mdev->rs_total) { |
974 | mdev->ov_start_sector = | 1014 | mdev->ov_start_sector = |
975 | BM_BIT_TO_SECT(mdev->rs_total - 1); | 1015 | BM_BIT_TO_SECT(mdev->rs_total - 1); |
1016 | mdev->rs_total = 1; | ||
1017 | } else | ||
1018 | mdev->rs_total -= bit; | ||
976 | mdev->ov_position = mdev->ov_start_sector; | 1019 | mdev->ov_position = mdev->ov_start_sector; |
977 | } | 1020 | } |
1021 | mdev->ov_left = mdev->rs_total; | ||
978 | } | 1022 | } |
979 | 1023 | ||
980 | static void drbd_resume_al(struct drbd_conf *mdev) | 1024 | static void drbd_resume_al(struct drbd_conf *mdev) |
@@ -992,12 +1036,12 @@ static void drbd_resume_al(struct drbd_conf *mdev) | |||
992 | * | 1036 | * |
993 | * Caller needs to hold req_lock, and global_state_lock. Do not call directly. | 1037 | * Caller needs to hold req_lock, and global_state_lock. Do not call directly. |
994 | */ | 1038 | */ |
995 | int __drbd_set_state(struct drbd_conf *mdev, | 1039 | enum drbd_state_rv |
996 | union drbd_state ns, enum chg_state_flags flags, | 1040 | __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, |
997 | struct completion *done) | 1041 | enum chg_state_flags flags, struct completion *done) |
998 | { | 1042 | { |
999 | union drbd_state os; | 1043 | union drbd_state os; |
1000 | int rv = SS_SUCCESS; | 1044 | enum drbd_state_rv rv = SS_SUCCESS; |
1001 | const char *warn_sync_abort = NULL; | 1045 | const char *warn_sync_abort = NULL; |
1002 | struct after_state_chg_work *ascw; | 1046 | struct after_state_chg_work *ascw; |
1003 | 1047 | ||
@@ -1033,22 +1077,46 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1033 | dev_warn(DEV, "%s aborted.\n", warn_sync_abort); | 1077 | dev_warn(DEV, "%s aborted.\n", warn_sync_abort); |
1034 | 1078 | ||
1035 | { | 1079 | { |
1036 | char *pbp, pb[300]; | 1080 | char *pbp, pb[300]; |
1037 | pbp = pb; | 1081 | pbp = pb; |
1038 | *pbp = 0; | 1082 | *pbp = 0; |
1039 | PSC(role); | 1083 | if (ns.role != os.role) |
1040 | PSC(peer); | 1084 | pbp += sprintf(pbp, "role( %s -> %s ) ", |
1041 | PSC(conn); | 1085 | drbd_role_str(os.role), |
1042 | PSC(disk); | 1086 | drbd_role_str(ns.role)); |
1043 | PSC(pdsk); | 1087 | if (ns.peer != os.peer) |
1044 | if (is_susp(ns) != is_susp(os)) | 1088 | pbp += sprintf(pbp, "peer( %s -> %s ) ", |
1045 | pbp += sprintf(pbp, "susp( %s -> %s ) ", | 1089 | drbd_role_str(os.peer), |
1046 | drbd_susp_str(is_susp(os)), | 1090 | drbd_role_str(ns.peer)); |
1047 | drbd_susp_str(is_susp(ns))); | 1091 | if (ns.conn != os.conn) |
1048 | PSC(aftr_isp); | 1092 | pbp += sprintf(pbp, "conn( %s -> %s ) ", |
1049 | PSC(peer_isp); | 1093 | drbd_conn_str(os.conn), |
1050 | PSC(user_isp); | 1094 | drbd_conn_str(ns.conn)); |
1051 | dev_info(DEV, "%s\n", pb); | 1095 | if (ns.disk != os.disk) |
1096 | pbp += sprintf(pbp, "disk( %s -> %s ) ", | ||
1097 | drbd_disk_str(os.disk), | ||
1098 | drbd_disk_str(ns.disk)); | ||
1099 | if (ns.pdsk != os.pdsk) | ||
1100 | pbp += sprintf(pbp, "pdsk( %s -> %s ) ", | ||
1101 | drbd_disk_str(os.pdsk), | ||
1102 | drbd_disk_str(ns.pdsk)); | ||
1103 | if (is_susp(ns) != is_susp(os)) | ||
1104 | pbp += sprintf(pbp, "susp( %d -> %d ) ", | ||
1105 | is_susp(os), | ||
1106 | is_susp(ns)); | ||
1107 | if (ns.aftr_isp != os.aftr_isp) | ||
1108 | pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ", | ||
1109 | os.aftr_isp, | ||
1110 | ns.aftr_isp); | ||
1111 | if (ns.peer_isp != os.peer_isp) | ||
1112 | pbp += sprintf(pbp, "peer_isp( %d -> %d ) ", | ||
1113 | os.peer_isp, | ||
1114 | ns.peer_isp); | ||
1115 | if (ns.user_isp != os.user_isp) | ||
1116 | pbp += sprintf(pbp, "user_isp( %d -> %d ) ", | ||
1117 | os.user_isp, | ||
1118 | ns.user_isp); | ||
1119 | dev_info(DEV, "%s\n", pb); | ||
1052 | } | 1120 | } |
1053 | 1121 | ||
1054 | /* solve the race between becoming unconfigured, | 1122 | /* solve the race between becoming unconfigured, |
@@ -1074,6 +1142,10 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1074 | atomic_inc(&mdev->local_cnt); | 1142 | atomic_inc(&mdev->local_cnt); |
1075 | 1143 | ||
1076 | mdev->state = ns; | 1144 | mdev->state = ns; |
1145 | |||
1146 | if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) | ||
1147 | drbd_print_uuids(mdev, "attached to UUIDs"); | ||
1148 | |||
1077 | wake_up(&mdev->misc_wait); | 1149 | wake_up(&mdev->misc_wait); |
1078 | wake_up(&mdev->state_wait); | 1150 | wake_up(&mdev->state_wait); |
1079 | 1151 | ||
@@ -1081,7 +1153,7 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1081 | if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && | 1153 | if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && |
1082 | ns.conn < C_CONNECTED) { | 1154 | ns.conn < C_CONNECTED) { |
1083 | mdev->ov_start_sector = | 1155 | mdev->ov_start_sector = |
1084 | BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left); | 1156 | BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left); |
1085 | dev_info(DEV, "Online Verify reached sector %llu\n", | 1157 | dev_info(DEV, "Online Verify reached sector %llu\n", |
1086 | (unsigned long long)mdev->ov_start_sector); | 1158 | (unsigned long long)mdev->ov_start_sector); |
1087 | } | 1159 | } |
@@ -1106,14 +1178,7 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1106 | unsigned long now = jiffies; | 1178 | unsigned long now = jiffies; |
1107 | int i; | 1179 | int i; |
1108 | 1180 | ||
1109 | mdev->ov_position = 0; | 1181 | set_ov_position(mdev, ns.conn); |
1110 | mdev->rs_total = drbd_bm_bits(mdev); | ||
1111 | if (mdev->agreed_pro_version >= 90) | ||
1112 | set_ov_position(mdev, ns.conn); | ||
1113 | else | ||
1114 | mdev->ov_start_sector = 0; | ||
1115 | mdev->ov_left = mdev->rs_total | ||
1116 | - BM_SECT_TO_BIT(mdev->ov_position); | ||
1117 | mdev->rs_start = now; | 1182 | mdev->rs_start = now; |
1118 | mdev->rs_last_events = 0; | 1183 | mdev->rs_last_events = 0; |
1119 | mdev->rs_last_sect_ev = 0; | 1184 | mdev->rs_last_sect_ev = 0; |
@@ -1121,10 +1186,12 @@ int __drbd_set_state(struct drbd_conf *mdev, | |||
1121 | mdev->ov_last_oos_start = 0; | 1186 | mdev->ov_last_oos_start = 0; |
1122 | 1187 | ||
1123 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | 1188 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
1124 | mdev->rs_mark_left[i] = mdev->rs_total; | 1189 | mdev->rs_mark_left[i] = mdev->ov_left; |
1125 | mdev->rs_mark_time[i] = now; | 1190 | mdev->rs_mark_time[i] = now; |
1126 | } | 1191 | } |
1127 | 1192 | ||
1193 | drbd_rs_controller_reset(mdev); | ||
1194 | |||
1128 | if (ns.conn == C_VERIFY_S) { | 1195 | if (ns.conn == C_VERIFY_S) { |
1129 | dev_info(DEV, "Starting Online Verify from sector %llu\n", | 1196 | dev_info(DEV, "Starting Online Verify from sector %llu\n", |
1130 | (unsigned long long)mdev->ov_position); | 1197 | (unsigned long long)mdev->ov_position); |
@@ -1228,6 +1295,26 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv) | |||
1228 | } | 1295 | } |
1229 | } | 1296 | } |
1230 | 1297 | ||
1298 | int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, | ||
1299 | int (*io_fn)(struct drbd_conf *), | ||
1300 | char *why, enum bm_flag flags) | ||
1301 | { | ||
1302 | int rv; | ||
1303 | |||
1304 | D_ASSERT(current == mdev->worker.task); | ||
1305 | |||
1306 | /* open coded non-blocking drbd_suspend_io(mdev); */ | ||
1307 | set_bit(SUSPEND_IO, &mdev->flags); | ||
1308 | |||
1309 | drbd_bm_lock(mdev, why, flags); | ||
1310 | rv = io_fn(mdev); | ||
1311 | drbd_bm_unlock(mdev); | ||
1312 | |||
1313 | drbd_resume_io(mdev); | ||
1314 | |||
1315 | return rv; | ||
1316 | } | ||
1317 | |||
1231 | /** | 1318 | /** |
1232 | * after_state_ch() - Perform after state change actions that may sleep | 1319 | * after_state_ch() - Perform after state change actions that may sleep |
1233 | * @mdev: DRBD device. | 1320 | * @mdev: DRBD device. |
@@ -1266,16 +1353,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1266 | 1353 | ||
1267 | nsm.i = -1; | 1354 | nsm.i = -1; |
1268 | if (ns.susp_nod) { | 1355 | if (ns.susp_nod) { |
1269 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { | 1356 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) |
1270 | if (ns.conn == C_CONNECTED) | 1357 | what = resend; |
1271 | what = resend, nsm.susp_nod = 0; | ||
1272 | else /* ns.conn > C_CONNECTED */ | ||
1273 | dev_err(DEV, "Unexpected Resynd going on!\n"); | ||
1274 | } | ||
1275 | 1358 | ||
1276 | if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) | 1359 | if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) |
1277 | what = restart_frozen_disk_io, nsm.susp_nod = 0; | 1360 | what = restart_frozen_disk_io; |
1278 | 1361 | ||
1362 | if (what != nothing) | ||
1363 | nsm.susp_nod = 0; | ||
1279 | } | 1364 | } |
1280 | 1365 | ||
1281 | if (ns.susp_fen) { | 1366 | if (ns.susp_fen) { |
@@ -1306,13 +1391,30 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1306 | spin_unlock_irq(&mdev->req_lock); | 1391 | spin_unlock_irq(&mdev->req_lock); |
1307 | } | 1392 | } |
1308 | 1393 | ||
1394 | /* Became sync source. With protocol >= 96, we still need to send out | ||
1395 | * the sync uuid now. Need to do that before any drbd_send_state, or | ||
1396 | * the other side may go "paused sync" before receiving the sync uuids, | ||
1397 | * which is unexpected. */ | ||
1398 | if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && | ||
1399 | (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && | ||
1400 | mdev->agreed_pro_version >= 96 && get_ldev(mdev)) { | ||
1401 | drbd_gen_and_send_sync_uuid(mdev); | ||
1402 | put_ldev(mdev); | ||
1403 | } | ||
1404 | |||
1309 | /* Do not change the order of the if above and the two below... */ | 1405 | /* Do not change the order of the if above and the two below... */ |
1310 | if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ | 1406 | if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ |
1311 | drbd_send_uuids(mdev); | 1407 | drbd_send_uuids(mdev); |
1312 | drbd_send_state(mdev); | 1408 | drbd_send_state(mdev); |
1313 | } | 1409 | } |
1314 | if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S) | 1410 | /* No point in queuing send_bitmap if we don't have a connection |
1315 | drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)"); | 1411 | * anymore, so check also the _current_ state, not only the new state |
1412 | * at the time this work was queued. */ | ||
1413 | if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S && | ||
1414 | mdev->state.conn == C_WF_BITMAP_S) | ||
1415 | drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, | ||
1416 | "send_bitmap (WFBitMapS)", | ||
1417 | BM_LOCKED_TEST_ALLOWED); | ||
1316 | 1418 | ||
1317 | /* Lost contact to peer's copy of the data */ | 1419 | /* Lost contact to peer's copy of the data */ |
1318 | if ((os.pdsk >= D_INCONSISTENT && | 1420 | if ((os.pdsk >= D_INCONSISTENT && |
@@ -1343,7 +1445,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1343 | 1445 | ||
1344 | /* D_DISKLESS Peer becomes secondary */ | 1446 | /* D_DISKLESS Peer becomes secondary */ |
1345 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) | 1447 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) |
1346 | drbd_al_to_on_disk_bm(mdev); | 1448 | /* We may still be Primary ourselves. |
1449 | * No harm done if the bitmap still changes, | ||
1450 | * redirtied pages will follow later. */ | ||
1451 | drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, | ||
1452 | "demote diskless peer", BM_LOCKED_SET_ALLOWED); | ||
1453 | put_ldev(mdev); | ||
1454 | } | ||
1455 | |||
1456 | /* Write out all changed bits on demote. | ||
1457 | * Though, no need to da that just yet | ||
1458 | * if there is a resync going on still */ | ||
1459 | if (os.role == R_PRIMARY && ns.role == R_SECONDARY && | ||
1460 | mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) { | ||
1461 | /* No changes to the bitmap expected this time, so assert that, | ||
1462 | * even though no harm was done if it did change. */ | ||
1463 | drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, | ||
1464 | "demote", BM_LOCKED_TEST_ALLOWED); | ||
1347 | put_ldev(mdev); | 1465 | put_ldev(mdev); |
1348 | } | 1466 | } |
1349 | 1467 | ||
@@ -1371,15 +1489,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1371 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) | 1489 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) |
1372 | drbd_send_state(mdev); | 1490 | drbd_send_state(mdev); |
1373 | 1491 | ||
1492 | if (os.conn != C_AHEAD && ns.conn == C_AHEAD) | ||
1493 | drbd_send_state(mdev); | ||
1494 | |||
1374 | /* We are in the progress to start a full sync... */ | 1495 | /* We are in the progress to start a full sync... */ |
1375 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || | 1496 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || |
1376 | (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) | 1497 | (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) |
1377 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync"); | 1498 | /* no other bitmap changes expected during this phase */ |
1499 | drbd_queue_bitmap_io(mdev, | ||
1500 | &drbd_bmio_set_n_write, &abw_start_sync, | ||
1501 | "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED); | ||
1378 | 1502 | ||
1379 | /* We are invalidating our self... */ | 1503 | /* We are invalidating our self... */ |
1380 | if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && | 1504 | if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && |
1381 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) | 1505 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) |
1382 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); | 1506 | /* other bitmap operation expected during this phase */ |
1507 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, | ||
1508 | "set_n_write from invalidate", BM_LOCKED_MASK); | ||
1383 | 1509 | ||
1384 | /* first half of local IO error, failure to attach, | 1510 | /* first half of local IO error, failure to attach, |
1385 | * or administrative detach */ | 1511 | * or administrative detach */ |
@@ -1434,8 +1560,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1434 | 1560 | ||
1435 | if (drbd_send_state(mdev)) | 1561 | if (drbd_send_state(mdev)) |
1436 | dev_warn(DEV, "Notified peer that I'm now diskless.\n"); | 1562 | dev_warn(DEV, "Notified peer that I'm now diskless.\n"); |
1437 | else | ||
1438 | dev_err(DEV, "Sending state for being diskless failed\n"); | ||
1439 | /* corresponding get_ldev in __drbd_set_state | 1563 | /* corresponding get_ldev in __drbd_set_state |
1440 | * this may finaly trigger drbd_ldev_destroy. */ | 1564 | * this may finaly trigger drbd_ldev_destroy. */ |
1441 | put_ldev(mdev); | 1565 | put_ldev(mdev); |
@@ -1459,6 +1583,19 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1459 | if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) | 1583 | if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) |
1460 | drbd_send_state(mdev); | 1584 | drbd_send_state(mdev); |
1461 | 1585 | ||
1586 | /* This triggers bitmap writeout of potentially still unwritten pages | ||
1587 | * if the resync finished cleanly, or aborted because of peer disk | ||
1588 | * failure, or because of connection loss. | ||
1589 | * For resync aborted because of local disk failure, we cannot do | ||
1590 | * any bitmap writeout anymore. | ||
1591 | * No harm done if some bits change during this phase. | ||
1592 | */ | ||
1593 | if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) { | ||
1594 | drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, | ||
1595 | "write from resync_finished", BM_LOCKED_SET_ALLOWED); | ||
1596 | put_ldev(mdev); | ||
1597 | } | ||
1598 | |||
1462 | /* free tl_hash if we Got thawed and are C_STANDALONE */ | 1599 | /* free tl_hash if we Got thawed and are C_STANDALONE */ |
1463 | if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) | 1600 | if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) |
1464 | drbd_free_tl_hash(mdev); | 1601 | drbd_free_tl_hash(mdev); |
@@ -1559,7 +1696,7 @@ int drbd_thread_start(struct drbd_thread *thi) | |||
1559 | if (!try_module_get(THIS_MODULE)) { | 1696 | if (!try_module_get(THIS_MODULE)) { |
1560 | dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); | 1697 | dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); |
1561 | spin_unlock_irqrestore(&thi->t_lock, flags); | 1698 | spin_unlock_irqrestore(&thi->t_lock, flags); |
1562 | return FALSE; | 1699 | return false; |
1563 | } | 1700 | } |
1564 | 1701 | ||
1565 | init_completion(&thi->stop); | 1702 | init_completion(&thi->stop); |
@@ -1576,7 +1713,7 @@ int drbd_thread_start(struct drbd_thread *thi) | |||
1576 | dev_err(DEV, "Couldn't start thread\n"); | 1713 | dev_err(DEV, "Couldn't start thread\n"); |
1577 | 1714 | ||
1578 | module_put(THIS_MODULE); | 1715 | module_put(THIS_MODULE); |
1579 | return FALSE; | 1716 | return false; |
1580 | } | 1717 | } |
1581 | spin_lock_irqsave(&thi->t_lock, flags); | 1718 | spin_lock_irqsave(&thi->t_lock, flags); |
1582 | thi->task = nt; | 1719 | thi->task = nt; |
@@ -1596,7 +1733,7 @@ int drbd_thread_start(struct drbd_thread *thi) | |||
1596 | break; | 1733 | break; |
1597 | } | 1734 | } |
1598 | 1735 | ||
1599 | return TRUE; | 1736 | return true; |
1600 | } | 1737 | } |
1601 | 1738 | ||
1602 | 1739 | ||
@@ -1694,8 +1831,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |||
1694 | { | 1831 | { |
1695 | int sent, ok; | 1832 | int sent, ok; |
1696 | 1833 | ||
1697 | ERR_IF(!h) return FALSE; | 1834 | ERR_IF(!h) return false; |
1698 | ERR_IF(!size) return FALSE; | 1835 | ERR_IF(!size) return false; |
1699 | 1836 | ||
1700 | h->magic = BE_DRBD_MAGIC; | 1837 | h->magic = BE_DRBD_MAGIC; |
1701 | h->command = cpu_to_be16(cmd); | 1838 | h->command = cpu_to_be16(cmd); |
@@ -1704,8 +1841,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |||
1704 | sent = drbd_send(mdev, sock, h, size, msg_flags); | 1841 | sent = drbd_send(mdev, sock, h, size, msg_flags); |
1705 | 1842 | ||
1706 | ok = (sent == size); | 1843 | ok = (sent == size); |
1707 | if (!ok) | 1844 | if (!ok && !signal_pending(current)) |
1708 | dev_err(DEV, "short sent %s size=%d sent=%d\n", | 1845 | dev_warn(DEV, "short sent %s size=%d sent=%d\n", |
1709 | cmdname(cmd), (int)size, sent); | 1846 | cmdname(cmd), (int)size, sent); |
1710 | return ok; | 1847 | return ok; |
1711 | } | 1848 | } |
@@ -1840,7 +1977,7 @@ int drbd_send_protocol(struct drbd_conf *mdev) | |||
1840 | else { | 1977 | else { |
1841 | dev_err(DEV, "--dry-run is not supported by peer"); | 1978 | dev_err(DEV, "--dry-run is not supported by peer"); |
1842 | kfree(p); | 1979 | kfree(p); |
1843 | return 0; | 1980 | return -1; |
1844 | } | 1981 | } |
1845 | } | 1982 | } |
1846 | p->conn_flags = cpu_to_be32(cf); | 1983 | p->conn_flags = cpu_to_be32(cf); |
@@ -1888,12 +2025,36 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) | |||
1888 | return _drbd_send_uuids(mdev, 8); | 2025 | return _drbd_send_uuids(mdev, 8); |
1889 | } | 2026 | } |
1890 | 2027 | ||
2028 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text) | ||
2029 | { | ||
2030 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | ||
2031 | u64 *uuid = mdev->ldev->md.uuid; | ||
2032 | dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", | ||
2033 | text, | ||
2034 | (unsigned long long)uuid[UI_CURRENT], | ||
2035 | (unsigned long long)uuid[UI_BITMAP], | ||
2036 | (unsigned long long)uuid[UI_HISTORY_START], | ||
2037 | (unsigned long long)uuid[UI_HISTORY_END]); | ||
2038 | put_ldev(mdev); | ||
2039 | } else { | ||
2040 | dev_info(DEV, "%s effective data uuid: %016llX\n", | ||
2041 | text, | ||
2042 | (unsigned long long)mdev->ed_uuid); | ||
2043 | } | ||
2044 | } | ||
1891 | 2045 | ||
1892 | int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) | 2046 | int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) |
1893 | { | 2047 | { |
1894 | struct p_rs_uuid p; | 2048 | struct p_rs_uuid p; |
2049 | u64 uuid; | ||
1895 | 2050 | ||
1896 | p.uuid = cpu_to_be64(val); | 2051 | D_ASSERT(mdev->state.disk == D_UP_TO_DATE); |
2052 | |||
2053 | uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; | ||
2054 | drbd_uuid_set(mdev, UI_BITMAP, uuid); | ||
2055 | drbd_print_uuids(mdev, "updated sync UUID"); | ||
2056 | drbd_md_sync(mdev); | ||
2057 | p.uuid = cpu_to_be64(uuid); | ||
1897 | 2058 | ||
1898 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, | 2059 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, |
1899 | (struct p_header80 *)&p, sizeof(p)); | 2060 | (struct p_header80 *)&p, sizeof(p)); |
@@ -1921,7 +2082,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl | |||
1921 | p.d_size = cpu_to_be64(d_size); | 2082 | p.d_size = cpu_to_be64(d_size); |
1922 | p.u_size = cpu_to_be64(u_size); | 2083 | p.u_size = cpu_to_be64(u_size); |
1923 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); | 2084 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); |
1924 | p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); | 2085 | p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); |
1925 | p.queue_order_type = cpu_to_be16(q_order_type); | 2086 | p.queue_order_type = cpu_to_be16(q_order_type); |
1926 | p.dds_flags = cpu_to_be16(flags); | 2087 | p.dds_flags = cpu_to_be16(flags); |
1927 | 2088 | ||
@@ -1972,7 +2133,7 @@ int drbd_send_state_req(struct drbd_conf *mdev, | |||
1972 | (struct p_header80 *)&p, sizeof(p)); | 2133 | (struct p_header80 *)&p, sizeof(p)); |
1973 | } | 2134 | } |
1974 | 2135 | ||
1975 | int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) | 2136 | int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) |
1976 | { | 2137 | { |
1977 | struct p_req_state_reply p; | 2138 | struct p_req_state_reply p; |
1978 | 2139 | ||
@@ -2076,9 +2237,15 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev, | |||
2076 | return len; | 2237 | return len; |
2077 | } | 2238 | } |
2078 | 2239 | ||
2079 | enum { OK, FAILED, DONE } | 2240 | /** |
2241 | * send_bitmap_rle_or_plain | ||
2242 | * | ||
2243 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
2244 | * code upon failure. | ||
2245 | */ | ||
2246 | static int | ||
2080 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, | 2247 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, |
2081 | struct p_header80 *h, struct bm_xfer_ctx *c) | 2248 | struct p_header80 *h, struct bm_xfer_ctx *c) |
2082 | { | 2249 | { |
2083 | struct p_compressed_bm *p = (void*)h; | 2250 | struct p_compressed_bm *p = (void*)h; |
2084 | unsigned long num_words; | 2251 | unsigned long num_words; |
@@ -2088,7 +2255,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, | |||
2088 | len = fill_bitmap_rle_bits(mdev, p, c); | 2255 | len = fill_bitmap_rle_bits(mdev, p, c); |
2089 | 2256 | ||
2090 | if (len < 0) | 2257 | if (len < 0) |
2091 | return FAILED; | 2258 | return -EIO; |
2092 | 2259 | ||
2093 | if (len) { | 2260 | if (len) { |
2094 | DCBP_set_code(p, RLE_VLI_Bits); | 2261 | DCBP_set_code(p, RLE_VLI_Bits); |
@@ -2118,11 +2285,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, | |||
2118 | if (c->bit_offset > c->bm_bits) | 2285 | if (c->bit_offset > c->bm_bits) |
2119 | c->bit_offset = c->bm_bits; | 2286 | c->bit_offset = c->bm_bits; |
2120 | } | 2287 | } |
2121 | ok = ok ? ((len == 0) ? DONE : OK) : FAILED; | 2288 | if (ok) { |
2122 | 2289 | if (len == 0) { | |
2123 | if (ok == DONE) | 2290 | INFO_bm_xfer_stats(mdev, "send", c); |
2124 | INFO_bm_xfer_stats(mdev, "send", c); | 2291 | return 0; |
2125 | return ok; | 2292 | } else |
2293 | return 1; | ||
2294 | } | ||
2295 | return -EIO; | ||
2126 | } | 2296 | } |
2127 | 2297 | ||
2128 | /* See the comment at receive_bitmap() */ | 2298 | /* See the comment at receive_bitmap() */ |
@@ -2130,16 +2300,16 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) | |||
2130 | { | 2300 | { |
2131 | struct bm_xfer_ctx c; | 2301 | struct bm_xfer_ctx c; |
2132 | struct p_header80 *p; | 2302 | struct p_header80 *p; |
2133 | int ret; | 2303 | int err; |
2134 | 2304 | ||
2135 | ERR_IF(!mdev->bitmap) return FALSE; | 2305 | ERR_IF(!mdev->bitmap) return false; |
2136 | 2306 | ||
2137 | /* maybe we should use some per thread scratch page, | 2307 | /* maybe we should use some per thread scratch page, |
2138 | * and allocate that during initial device creation? */ | 2308 | * and allocate that during initial device creation? */ |
2139 | p = (struct p_header80 *) __get_free_page(GFP_NOIO); | 2309 | p = (struct p_header80 *) __get_free_page(GFP_NOIO); |
2140 | if (!p) { | 2310 | if (!p) { |
2141 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); | 2311 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); |
2142 | return FALSE; | 2312 | return false; |
2143 | } | 2313 | } |
2144 | 2314 | ||
2145 | if (get_ldev(mdev)) { | 2315 | if (get_ldev(mdev)) { |
@@ -2165,11 +2335,11 @@ int _drbd_send_bitmap(struct drbd_conf *mdev) | |||
2165 | }; | 2335 | }; |
2166 | 2336 | ||
2167 | do { | 2337 | do { |
2168 | ret = send_bitmap_rle_or_plain(mdev, p, &c); | 2338 | err = send_bitmap_rle_or_plain(mdev, p, &c); |
2169 | } while (ret == OK); | 2339 | } while (err > 0); |
2170 | 2340 | ||
2171 | free_page((unsigned long) p); | 2341 | free_page((unsigned long) p); |
2172 | return (ret == DONE); | 2342 | return err == 0; |
2173 | } | 2343 | } |
2174 | 2344 | ||
2175 | int drbd_send_bitmap(struct drbd_conf *mdev) | 2345 | int drbd_send_bitmap(struct drbd_conf *mdev) |
@@ -2192,7 +2362,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) | |||
2192 | p.set_size = cpu_to_be32(set_size); | 2362 | p.set_size = cpu_to_be32(set_size); |
2193 | 2363 | ||
2194 | if (mdev->state.conn < C_CONNECTED) | 2364 | if (mdev->state.conn < C_CONNECTED) |
2195 | return FALSE; | 2365 | return false; |
2196 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, | 2366 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, |
2197 | (struct p_header80 *)&p, sizeof(p)); | 2367 | (struct p_header80 *)&p, sizeof(p)); |
2198 | return ok; | 2368 | return ok; |
@@ -2220,7 +2390,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2220 | p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); | 2390 | p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); |
2221 | 2391 | ||
2222 | if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) | 2392 | if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) |
2223 | return FALSE; | 2393 | return false; |
2224 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, | 2394 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, |
2225 | (struct p_header80 *)&p, sizeof(p)); | 2395 | (struct p_header80 *)&p, sizeof(p)); |
2226 | return ok; | 2396 | return ok; |
@@ -2326,8 +2496,8 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) | |||
2326 | } | 2496 | } |
2327 | 2497 | ||
2328 | /* called on sndtimeo | 2498 | /* called on sndtimeo |
2329 | * returns FALSE if we should retry, | 2499 | * returns false if we should retry, |
2330 | * TRUE if we think connection is dead | 2500 | * true if we think connection is dead |
2331 | */ | 2501 | */ |
2332 | static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) | 2502 | static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) |
2333 | { | 2503 | { |
@@ -2340,7 +2510,7 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket * | |||
2340 | || mdev->state.conn < C_CONNECTED; | 2510 | || mdev->state.conn < C_CONNECTED; |
2341 | 2511 | ||
2342 | if (drop_it) | 2512 | if (drop_it) |
2343 | return TRUE; | 2513 | return true; |
2344 | 2514 | ||
2345 | drop_it = !--mdev->ko_count; | 2515 | drop_it = !--mdev->ko_count; |
2346 | if (!drop_it) { | 2516 | if (!drop_it) { |
@@ -2531,13 +2701,39 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |||
2531 | if (ok && dgs) { | 2701 | if (ok && dgs) { |
2532 | dgb = mdev->int_dig_out; | 2702 | dgb = mdev->int_dig_out; |
2533 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); | 2703 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); |
2534 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); | 2704 | ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); |
2535 | } | 2705 | } |
2536 | if (ok) { | 2706 | if (ok) { |
2537 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) | 2707 | /* For protocol A, we have to memcpy the payload into |
2708 | * socket buffers, as we may complete right away | ||
2709 | * as soon as we handed it over to tcp, at which point the data | ||
2710 | * pages may become invalid. | ||
2711 | * | ||
2712 | * For data-integrity enabled, we copy it as well, so we can be | ||
2713 | * sure that even if the bio pages may still be modified, it | ||
2714 | * won't change the data on the wire, thus if the digest checks | ||
2715 | * out ok after sending on this side, but does not fit on the | ||
2716 | * receiving side, we sure have detected corruption elsewhere. | ||
2717 | */ | ||
2718 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs) | ||
2538 | ok = _drbd_send_bio(mdev, req->master_bio); | 2719 | ok = _drbd_send_bio(mdev, req->master_bio); |
2539 | else | 2720 | else |
2540 | ok = _drbd_send_zc_bio(mdev, req->master_bio); | 2721 | ok = _drbd_send_zc_bio(mdev, req->master_bio); |
2722 | |||
2723 | /* double check digest, sometimes buffers have been modified in flight. */ | ||
2724 | if (dgs > 0 && dgs <= 64) { | ||
2725 | /* 64 byte, 512 bit, is the larges digest size | ||
2726 | * currently supported in kernel crypto. */ | ||
2727 | unsigned char digest[64]; | ||
2728 | drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); | ||
2729 | if (memcmp(mdev->int_dig_out, digest, dgs)) { | ||
2730 | dev_warn(DEV, | ||
2731 | "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", | ||
2732 | (unsigned long long)req->sector, req->size); | ||
2733 | } | ||
2734 | } /* else if (dgs > 64) { | ||
2735 | ... Be noisy about digest too large ... | ||
2736 | } */ | ||
2541 | } | 2737 | } |
2542 | 2738 | ||
2543 | drbd_put_data_sock(mdev); | 2739 | drbd_put_data_sock(mdev); |
@@ -2587,7 +2783,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2587 | if (ok && dgs) { | 2783 | if (ok && dgs) { |
2588 | dgb = mdev->int_dig_out; | 2784 | dgb = mdev->int_dig_out; |
2589 | drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); | 2785 | drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); |
2590 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); | 2786 | ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); |
2591 | } | 2787 | } |
2592 | if (ok) | 2788 | if (ok) |
2593 | ok = _drbd_send_zc_ee(mdev, e); | 2789 | ok = _drbd_send_zc_ee(mdev, e); |
@@ -2597,6 +2793,16 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |||
2597 | return ok; | 2793 | return ok; |
2598 | } | 2794 | } |
2599 | 2795 | ||
2796 | int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req) | ||
2797 | { | ||
2798 | struct p_block_desc p; | ||
2799 | |||
2800 | p.sector = cpu_to_be64(req->sector); | ||
2801 | p.blksize = cpu_to_be32(req->size); | ||
2802 | |||
2803 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p)); | ||
2804 | } | ||
2805 | |||
2600 | /* | 2806 | /* |
2601 | drbd_send distinguishes two cases: | 2807 | drbd_send distinguishes two cases: |
2602 | 2808 | ||
@@ -2770,6 +2976,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2770 | atomic_set(&mdev->pp_in_use_by_net, 0); | 2976 | atomic_set(&mdev->pp_in_use_by_net, 0); |
2771 | atomic_set(&mdev->rs_sect_in, 0); | 2977 | atomic_set(&mdev->rs_sect_in, 0); |
2772 | atomic_set(&mdev->rs_sect_ev, 0); | 2978 | atomic_set(&mdev->rs_sect_ev, 0); |
2979 | atomic_set(&mdev->ap_in_flight, 0); | ||
2773 | 2980 | ||
2774 | mutex_init(&mdev->md_io_mutex); | 2981 | mutex_init(&mdev->md_io_mutex); |
2775 | mutex_init(&mdev->data.mutex); | 2982 | mutex_init(&mdev->data.mutex); |
@@ -2798,19 +3005,27 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) | |||
2798 | INIT_LIST_HEAD(&mdev->unplug_work.list); | 3005 | INIT_LIST_HEAD(&mdev->unplug_work.list); |
2799 | INIT_LIST_HEAD(&mdev->go_diskless.list); | 3006 | INIT_LIST_HEAD(&mdev->go_diskless.list); |
2800 | INIT_LIST_HEAD(&mdev->md_sync_work.list); | 3007 | INIT_LIST_HEAD(&mdev->md_sync_work.list); |
3008 | INIT_LIST_HEAD(&mdev->start_resync_work.list); | ||
2801 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); | 3009 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); |
2802 | 3010 | ||
2803 | mdev->resync_work.cb = w_resync_inactive; | 3011 | mdev->resync_work.cb = w_resync_timer; |
2804 | mdev->unplug_work.cb = w_send_write_hint; | 3012 | mdev->unplug_work.cb = w_send_write_hint; |
2805 | mdev->go_diskless.cb = w_go_diskless; | 3013 | mdev->go_diskless.cb = w_go_diskless; |
2806 | mdev->md_sync_work.cb = w_md_sync; | 3014 | mdev->md_sync_work.cb = w_md_sync; |
2807 | mdev->bm_io_work.w.cb = w_bitmap_io; | 3015 | mdev->bm_io_work.w.cb = w_bitmap_io; |
3016 | mdev->start_resync_work.cb = w_start_resync; | ||
2808 | init_timer(&mdev->resync_timer); | 3017 | init_timer(&mdev->resync_timer); |
2809 | init_timer(&mdev->md_sync_timer); | 3018 | init_timer(&mdev->md_sync_timer); |
3019 | init_timer(&mdev->start_resync_timer); | ||
3020 | init_timer(&mdev->request_timer); | ||
2810 | mdev->resync_timer.function = resync_timer_fn; | 3021 | mdev->resync_timer.function = resync_timer_fn; |
2811 | mdev->resync_timer.data = (unsigned long) mdev; | 3022 | mdev->resync_timer.data = (unsigned long) mdev; |
2812 | mdev->md_sync_timer.function = md_sync_timer_fn; | 3023 | mdev->md_sync_timer.function = md_sync_timer_fn; |
2813 | mdev->md_sync_timer.data = (unsigned long) mdev; | 3024 | mdev->md_sync_timer.data = (unsigned long) mdev; |
3025 | mdev->start_resync_timer.function = start_resync_timer_fn; | ||
3026 | mdev->start_resync_timer.data = (unsigned long) mdev; | ||
3027 | mdev->request_timer.function = request_timer_fn; | ||
3028 | mdev->request_timer.data = (unsigned long) mdev; | ||
2814 | 3029 | ||
2815 | init_waitqueue_head(&mdev->misc_wait); | 3030 | init_waitqueue_head(&mdev->misc_wait); |
2816 | init_waitqueue_head(&mdev->state_wait); | 3031 | init_waitqueue_head(&mdev->state_wait); |
@@ -2881,6 +3096,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) | |||
2881 | D_ASSERT(list_empty(&mdev->resync_work.list)); | 3096 | D_ASSERT(list_empty(&mdev->resync_work.list)); |
2882 | D_ASSERT(list_empty(&mdev->unplug_work.list)); | 3097 | D_ASSERT(list_empty(&mdev->unplug_work.list)); |
2883 | D_ASSERT(list_empty(&mdev->go_diskless.list)); | 3098 | D_ASSERT(list_empty(&mdev->go_diskless.list)); |
3099 | |||
3100 | drbd_set_defaults(mdev); | ||
2884 | } | 3101 | } |
2885 | 3102 | ||
2886 | 3103 | ||
@@ -2923,7 +3140,7 @@ static void drbd_destroy_mempools(void) | |||
2923 | static int drbd_create_mempools(void) | 3140 | static int drbd_create_mempools(void) |
2924 | { | 3141 | { |
2925 | struct page *page; | 3142 | struct page *page; |
2926 | const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count; | 3143 | const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; |
2927 | int i; | 3144 | int i; |
2928 | 3145 | ||
2929 | /* prepare our caches and mempools */ | 3146 | /* prepare our caches and mempools */ |
@@ -3087,11 +3304,20 @@ static void drbd_cleanup(void) | |||
3087 | 3304 | ||
3088 | unregister_reboot_notifier(&drbd_notifier); | 3305 | unregister_reboot_notifier(&drbd_notifier); |
3089 | 3306 | ||
3307 | /* first remove proc, | ||
3308 | * drbdsetup uses it's presence to detect | ||
3309 | * whether DRBD is loaded. | ||
3310 | * If we would get stuck in proc removal, | ||
3311 | * but have netlink already deregistered, | ||
3312 | * some drbdsetup commands may wait forever | ||
3313 | * for an answer. | ||
3314 | */ | ||
3315 | if (drbd_proc) | ||
3316 | remove_proc_entry("drbd", NULL); | ||
3317 | |||
3090 | drbd_nl_cleanup(); | 3318 | drbd_nl_cleanup(); |
3091 | 3319 | ||
3092 | if (minor_table) { | 3320 | if (minor_table) { |
3093 | if (drbd_proc) | ||
3094 | remove_proc_entry("drbd", NULL); | ||
3095 | i = minor_count; | 3321 | i = minor_count; |
3096 | while (i--) | 3322 | while (i--) |
3097 | drbd_delete_device(i); | 3323 | drbd_delete_device(i); |
@@ -3119,7 +3345,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) | |||
3119 | char reason = '-'; | 3345 | char reason = '-'; |
3120 | int r = 0; | 3346 | int r = 0; |
3121 | 3347 | ||
3122 | if (!__inc_ap_bio_cond(mdev)) { | 3348 | if (!may_inc_ap_bio(mdev)) { |
3123 | /* DRBD has frozen IO */ | 3349 | /* DRBD has frozen IO */ |
3124 | r = bdi_bits; | 3350 | r = bdi_bits; |
3125 | reason = 'd'; | 3351 | reason = 'd'; |
@@ -3172,7 +3398,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
3172 | goto out_no_disk; | 3398 | goto out_no_disk; |
3173 | mdev->vdisk = disk; | 3399 | mdev->vdisk = disk; |
3174 | 3400 | ||
3175 | set_disk_ro(disk, TRUE); | 3401 | set_disk_ro(disk, true); |
3176 | 3402 | ||
3177 | disk->queue = q; | 3403 | disk->queue = q; |
3178 | disk->major = DRBD_MAJOR; | 3404 | disk->major = DRBD_MAJOR; |
@@ -3188,8 +3414,8 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
3188 | q->backing_dev_info.congested_fn = drbd_congested; | 3414 | q->backing_dev_info.congested_fn = drbd_congested; |
3189 | q->backing_dev_info.congested_data = mdev; | 3415 | q->backing_dev_info.congested_data = mdev; |
3190 | 3416 | ||
3191 | blk_queue_make_request(q, drbd_make_request_26); | 3417 | blk_queue_make_request(q, drbd_make_request); |
3192 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | 3418 | blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); |
3193 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 3419 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
3194 | blk_queue_merge_bvec(q, drbd_merge_bvec); | 3420 | blk_queue_merge_bvec(q, drbd_merge_bvec); |
3195 | q->queue_lock = &mdev->req_lock; | 3421 | q->queue_lock = &mdev->req_lock; |
@@ -3251,6 +3477,7 @@ void drbd_free_mdev(struct drbd_conf *mdev) | |||
3251 | put_disk(mdev->vdisk); | 3477 | put_disk(mdev->vdisk); |
3252 | blk_cleanup_queue(mdev->rq_queue); | 3478 | blk_cleanup_queue(mdev->rq_queue); |
3253 | free_cpumask_var(mdev->cpu_mask); | 3479 | free_cpumask_var(mdev->cpu_mask); |
3480 | drbd_free_tl_hash(mdev); | ||
3254 | kfree(mdev); | 3481 | kfree(mdev); |
3255 | } | 3482 | } |
3256 | 3483 | ||
@@ -3266,7 +3493,7 @@ int __init drbd_init(void) | |||
3266 | return -EINVAL; | 3493 | return -EINVAL; |
3267 | } | 3494 | } |
3268 | 3495 | ||
3269 | if (1 > minor_count || minor_count > 255) { | 3496 | if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { |
3270 | printk(KERN_ERR | 3497 | printk(KERN_ERR |
3271 | "drbd: invalid minor_count (%d)\n", minor_count); | 3498 | "drbd: invalid minor_count (%d)\n", minor_count); |
3272 | #ifdef MODULE | 3499 | #ifdef MODULE |
@@ -3448,7 +3675,7 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
3448 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { | 3675 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { |
3449 | /* this was a try anyways ... */ | 3676 | /* this was a try anyways ... */ |
3450 | dev_err(DEV, "meta data update failed!\n"); | 3677 | dev_err(DEV, "meta data update failed!\n"); |
3451 | drbd_chk_io_error(mdev, 1, TRUE); | 3678 | drbd_chk_io_error(mdev, 1, true); |
3452 | } | 3679 | } |
3453 | 3680 | ||
3454 | /* Update mdev->ldev->md.la_size_sect, | 3681 | /* Update mdev->ldev->md.la_size_sect, |
@@ -3464,7 +3691,7 @@ void drbd_md_sync(struct drbd_conf *mdev) | |||
3464 | * @mdev: DRBD device. | 3691 | * @mdev: DRBD device. |
3465 | * @bdev: Device from which the meta data should be read in. | 3692 | * @bdev: Device from which the meta data should be read in. |
3466 | * | 3693 | * |
3467 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case | 3694 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case |
3468 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. | 3695 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. |
3469 | */ | 3696 | */ |
3470 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | 3697 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) |
@@ -3534,28 +3761,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |||
3534 | return rv; | 3761 | return rv; |
3535 | } | 3762 | } |
3536 | 3763 | ||
3537 | static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index) | ||
3538 | { | ||
3539 | static char *uuid_str[UI_EXTENDED_SIZE] = { | ||
3540 | [UI_CURRENT] = "CURRENT", | ||
3541 | [UI_BITMAP] = "BITMAP", | ||
3542 | [UI_HISTORY_START] = "HISTORY_START", | ||
3543 | [UI_HISTORY_END] = "HISTORY_END", | ||
3544 | [UI_SIZE] = "SIZE", | ||
3545 | [UI_FLAGS] = "FLAGS", | ||
3546 | }; | ||
3547 | |||
3548 | if (index >= UI_EXTENDED_SIZE) { | ||
3549 | dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n"); | ||
3550 | return; | ||
3551 | } | ||
3552 | |||
3553 | dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n", | ||
3554 | uuid_str[index], | ||
3555 | (unsigned long long)mdev->ldev->md.uuid[index]); | ||
3556 | } | ||
3557 | |||
3558 | |||
3559 | /** | 3764 | /** |
3560 | * drbd_md_mark_dirty() - Mark meta data super block as dirty | 3765 | * drbd_md_mark_dirty() - Mark meta data super block as dirty |
3561 | * @mdev: DRBD device. | 3766 | * @mdev: DRBD device. |
@@ -3585,10 +3790,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) | |||
3585 | { | 3790 | { |
3586 | int i; | 3791 | int i; |
3587 | 3792 | ||
3588 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) { | 3793 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) |
3589 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; | 3794 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; |
3590 | debug_drbd_uuid(mdev, i+1); | ||
3591 | } | ||
3592 | } | 3795 | } |
3593 | 3796 | ||
3594 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | 3797 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) |
@@ -3603,7 +3806,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |||
3603 | } | 3806 | } |
3604 | 3807 | ||
3605 | mdev->ldev->md.uuid[idx] = val; | 3808 | mdev->ldev->md.uuid[idx] = val; |
3606 | debug_drbd_uuid(mdev, idx); | ||
3607 | drbd_md_mark_dirty(mdev); | 3809 | drbd_md_mark_dirty(mdev); |
3608 | } | 3810 | } |
3609 | 3811 | ||
@@ -3613,7 +3815,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |||
3613 | if (mdev->ldev->md.uuid[idx]) { | 3815 | if (mdev->ldev->md.uuid[idx]) { |
3614 | drbd_uuid_move_history(mdev); | 3816 | drbd_uuid_move_history(mdev); |
3615 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; | 3817 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; |
3616 | debug_drbd_uuid(mdev, UI_HISTORY_START); | ||
3617 | } | 3818 | } |
3618 | _drbd_uuid_set(mdev, idx, val); | 3819 | _drbd_uuid_set(mdev, idx, val); |
3619 | } | 3820 | } |
@@ -3628,14 +3829,16 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |||
3628 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) | 3829 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) |
3629 | { | 3830 | { |
3630 | u64 val; | 3831 | u64 val; |
3832 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; | ||
3833 | |||
3834 | if (bm_uuid) | ||
3835 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | ||
3631 | 3836 | ||
3632 | dev_info(DEV, "Creating new current UUID\n"); | ||
3633 | D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); | ||
3634 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; | 3837 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; |
3635 | debug_drbd_uuid(mdev, UI_BITMAP); | ||
3636 | 3838 | ||
3637 | get_random_bytes(&val, sizeof(u64)); | 3839 | get_random_bytes(&val, sizeof(u64)); |
3638 | _drbd_uuid_set(mdev, UI_CURRENT, val); | 3840 | _drbd_uuid_set(mdev, UI_CURRENT, val); |
3841 | drbd_print_uuids(mdev, "new current UUID"); | ||
3639 | /* get it to stable storage _now_ */ | 3842 | /* get it to stable storage _now_ */ |
3640 | drbd_md_sync(mdev); | 3843 | drbd_md_sync(mdev); |
3641 | } | 3844 | } |
@@ -3649,16 +3852,12 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) | |||
3649 | drbd_uuid_move_history(mdev); | 3852 | drbd_uuid_move_history(mdev); |
3650 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; | 3853 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; |
3651 | mdev->ldev->md.uuid[UI_BITMAP] = 0; | 3854 | mdev->ldev->md.uuid[UI_BITMAP] = 0; |
3652 | debug_drbd_uuid(mdev, UI_HISTORY_START); | ||
3653 | debug_drbd_uuid(mdev, UI_BITMAP); | ||
3654 | } else { | 3855 | } else { |
3655 | if (mdev->ldev->md.uuid[UI_BITMAP]) | 3856 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; |
3656 | dev_warn(DEV, "bm UUID already set"); | 3857 | if (bm_uuid) |
3657 | 3858 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | |
3658 | mdev->ldev->md.uuid[UI_BITMAP] = val; | ||
3659 | mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); | ||
3660 | 3859 | ||
3661 | debug_drbd_uuid(mdev, UI_BITMAP); | 3860 | mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); |
3662 | } | 3861 | } |
3663 | drbd_md_mark_dirty(mdev); | 3862 | drbd_md_mark_dirty(mdev); |
3664 | } | 3863 | } |
@@ -3714,15 +3913,19 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev) | |||
3714 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | 3913 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) |
3715 | { | 3914 | { |
3716 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); | 3915 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); |
3717 | int rv; | 3916 | int rv = -EIO; |
3718 | 3917 | ||
3719 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); | 3918 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); |
3720 | 3919 | ||
3721 | drbd_bm_lock(mdev, work->why); | 3920 | if (get_ldev(mdev)) { |
3722 | rv = work->io_fn(mdev); | 3921 | drbd_bm_lock(mdev, work->why, work->flags); |
3723 | drbd_bm_unlock(mdev); | 3922 | rv = work->io_fn(mdev); |
3923 | drbd_bm_unlock(mdev); | ||
3924 | put_ldev(mdev); | ||
3925 | } | ||
3724 | 3926 | ||
3725 | clear_bit(BITMAP_IO, &mdev->flags); | 3927 | clear_bit(BITMAP_IO, &mdev->flags); |
3928 | smp_mb__after_clear_bit(); | ||
3726 | wake_up(&mdev->misc_wait); | 3929 | wake_up(&mdev->misc_wait); |
3727 | 3930 | ||
3728 | if (work->done) | 3931 | if (work->done) |
@@ -3730,6 +3933,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |||
3730 | 3933 | ||
3731 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); | 3934 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); |
3732 | work->why = NULL; | 3935 | work->why = NULL; |
3936 | work->flags = 0; | ||
3733 | 3937 | ||
3734 | return 1; | 3938 | return 1; |
3735 | } | 3939 | } |
@@ -3784,7 +3988,7 @@ void drbd_go_diskless(struct drbd_conf *mdev) | |||
3784 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, | 3988 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, |
3785 | int (*io_fn)(struct drbd_conf *), | 3989 | int (*io_fn)(struct drbd_conf *), |
3786 | void (*done)(struct drbd_conf *, int), | 3990 | void (*done)(struct drbd_conf *, int), |
3787 | char *why) | 3991 | char *why, enum bm_flag flags) |
3788 | { | 3992 | { |
3789 | D_ASSERT(current == mdev->worker.task); | 3993 | D_ASSERT(current == mdev->worker.task); |
3790 | 3994 | ||
@@ -3798,15 +4002,15 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |||
3798 | mdev->bm_io_work.io_fn = io_fn; | 4002 | mdev->bm_io_work.io_fn = io_fn; |
3799 | mdev->bm_io_work.done = done; | 4003 | mdev->bm_io_work.done = done; |
3800 | mdev->bm_io_work.why = why; | 4004 | mdev->bm_io_work.why = why; |
4005 | mdev->bm_io_work.flags = flags; | ||
3801 | 4006 | ||
4007 | spin_lock_irq(&mdev->req_lock); | ||
3802 | set_bit(BITMAP_IO, &mdev->flags); | 4008 | set_bit(BITMAP_IO, &mdev->flags); |
3803 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { | 4009 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { |
3804 | if (list_empty(&mdev->bm_io_work.w.list)) { | 4010 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) |
3805 | set_bit(BITMAP_IO_QUEUED, &mdev->flags); | ||
3806 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); | 4011 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); |
3807 | } else | ||
3808 | dev_err(DEV, "FIXME avoided double queuing bm_io_work\n"); | ||
3809 | } | 4012 | } |
4013 | spin_unlock_irq(&mdev->req_lock); | ||
3810 | } | 4014 | } |
3811 | 4015 | ||
3812 | /** | 4016 | /** |
@@ -3818,19 +4022,22 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |||
3818 | * freezes application IO while that the actual IO operations runs. This | 4022 | * freezes application IO while that the actual IO operations runs. This |
3819 | * functions MAY NOT be called from worker context. | 4023 | * functions MAY NOT be called from worker context. |
3820 | */ | 4024 | */ |
3821 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) | 4025 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), |
4026 | char *why, enum bm_flag flags) | ||
3822 | { | 4027 | { |
3823 | int rv; | 4028 | int rv; |
3824 | 4029 | ||
3825 | D_ASSERT(current != mdev->worker.task); | 4030 | D_ASSERT(current != mdev->worker.task); |
3826 | 4031 | ||
3827 | drbd_suspend_io(mdev); | 4032 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
4033 | drbd_suspend_io(mdev); | ||
3828 | 4034 | ||
3829 | drbd_bm_lock(mdev, why); | 4035 | drbd_bm_lock(mdev, why, flags); |
3830 | rv = io_fn(mdev); | 4036 | rv = io_fn(mdev); |
3831 | drbd_bm_unlock(mdev); | 4037 | drbd_bm_unlock(mdev); |
3832 | 4038 | ||
3833 | drbd_resume_io(mdev); | 4039 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
4040 | drbd_resume_io(mdev); | ||
3834 | 4041 | ||
3835 | return rv; | 4042 | return rv; |
3836 | } | 4043 | } |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index fe81c851ca88..03b29f78a37d 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -288,10 +288,11 @@ void drbd_try_outdate_peer_async(struct drbd_conf *mdev) | |||
288 | dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); | 288 | dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); |
289 | } | 289 | } |
290 | 290 | ||
291 | int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | 291 | enum drbd_state_rv |
292 | drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | ||
292 | { | 293 | { |
293 | const int max_tries = 4; | 294 | const int max_tries = 4; |
294 | int r = 0; | 295 | enum drbd_state_rv rv = SS_UNKNOWN_ERROR; |
295 | int try = 0; | 296 | int try = 0; |
296 | int forced = 0; | 297 | int forced = 0; |
297 | union drbd_state mask, val; | 298 | union drbd_state mask, val; |
@@ -306,17 +307,17 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
306 | val.i = 0; val.role = new_role; | 307 | val.i = 0; val.role = new_role; |
307 | 308 | ||
308 | while (try++ < max_tries) { | 309 | while (try++ < max_tries) { |
309 | r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); | 310 | rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); |
310 | 311 | ||
311 | /* in case we first succeeded to outdate, | 312 | /* in case we first succeeded to outdate, |
312 | * but now suddenly could establish a connection */ | 313 | * but now suddenly could establish a connection */ |
313 | if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { | 314 | if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { |
314 | val.pdsk = 0; | 315 | val.pdsk = 0; |
315 | mask.pdsk = 0; | 316 | mask.pdsk = 0; |
316 | continue; | 317 | continue; |
317 | } | 318 | } |
318 | 319 | ||
319 | if (r == SS_NO_UP_TO_DATE_DISK && force && | 320 | if (rv == SS_NO_UP_TO_DATE_DISK && force && |
320 | (mdev->state.disk < D_UP_TO_DATE && | 321 | (mdev->state.disk < D_UP_TO_DATE && |
321 | mdev->state.disk >= D_INCONSISTENT)) { | 322 | mdev->state.disk >= D_INCONSISTENT)) { |
322 | mask.disk = D_MASK; | 323 | mask.disk = D_MASK; |
@@ -325,7 +326,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
325 | continue; | 326 | continue; |
326 | } | 327 | } |
327 | 328 | ||
328 | if (r == SS_NO_UP_TO_DATE_DISK && | 329 | if (rv == SS_NO_UP_TO_DATE_DISK && |
329 | mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { | 330 | mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { |
330 | D_ASSERT(mdev->state.pdsk == D_UNKNOWN); | 331 | D_ASSERT(mdev->state.pdsk == D_UNKNOWN); |
331 | nps = drbd_try_outdate_peer(mdev); | 332 | nps = drbd_try_outdate_peer(mdev); |
@@ -341,9 +342,9 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
341 | continue; | 342 | continue; |
342 | } | 343 | } |
343 | 344 | ||
344 | if (r == SS_NOTHING_TO_DO) | 345 | if (rv == SS_NOTHING_TO_DO) |
345 | goto fail; | 346 | goto fail; |
346 | if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { | 347 | if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { |
347 | nps = drbd_try_outdate_peer(mdev); | 348 | nps = drbd_try_outdate_peer(mdev); |
348 | 349 | ||
349 | if (force && nps > D_OUTDATED) { | 350 | if (force && nps > D_OUTDATED) { |
@@ -356,25 +357,24 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
356 | 357 | ||
357 | continue; | 358 | continue; |
358 | } | 359 | } |
359 | if (r == SS_TWO_PRIMARIES) { | 360 | if (rv == SS_TWO_PRIMARIES) { |
360 | /* Maybe the peer is detected as dead very soon... | 361 | /* Maybe the peer is detected as dead very soon... |
361 | retry at most once more in this case. */ | 362 | retry at most once more in this case. */ |
362 | __set_current_state(TASK_INTERRUPTIBLE); | 363 | schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10); |
363 | schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); | ||
364 | if (try < max_tries) | 364 | if (try < max_tries) |
365 | try = max_tries - 1; | 365 | try = max_tries - 1; |
366 | continue; | 366 | continue; |
367 | } | 367 | } |
368 | if (r < SS_SUCCESS) { | 368 | if (rv < SS_SUCCESS) { |
369 | r = _drbd_request_state(mdev, mask, val, | 369 | rv = _drbd_request_state(mdev, mask, val, |
370 | CS_VERBOSE + CS_WAIT_COMPLETE); | 370 | CS_VERBOSE + CS_WAIT_COMPLETE); |
371 | if (r < SS_SUCCESS) | 371 | if (rv < SS_SUCCESS) |
372 | goto fail; | 372 | goto fail; |
373 | } | 373 | } |
374 | break; | 374 | break; |
375 | } | 375 | } |
376 | 376 | ||
377 | if (r < SS_SUCCESS) | 377 | if (rv < SS_SUCCESS) |
378 | goto fail; | 378 | goto fail; |
379 | 379 | ||
380 | if (forced) | 380 | if (forced) |
@@ -384,7 +384,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
384 | wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); | 384 | wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); |
385 | 385 | ||
386 | if (new_role == R_SECONDARY) { | 386 | if (new_role == R_SECONDARY) { |
387 | set_disk_ro(mdev->vdisk, TRUE); | 387 | set_disk_ro(mdev->vdisk, true); |
388 | if (get_ldev(mdev)) { | 388 | if (get_ldev(mdev)) { |
389 | mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; | 389 | mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; |
390 | put_ldev(mdev); | 390 | put_ldev(mdev); |
@@ -394,7 +394,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
394 | mdev->net_conf->want_lose = 0; | 394 | mdev->net_conf->want_lose = 0; |
395 | put_net_conf(mdev); | 395 | put_net_conf(mdev); |
396 | } | 396 | } |
397 | set_disk_ro(mdev->vdisk, FALSE); | 397 | set_disk_ro(mdev->vdisk, false); |
398 | if (get_ldev(mdev)) { | 398 | if (get_ldev(mdev)) { |
399 | if (((mdev->state.conn < C_CONNECTED || | 399 | if (((mdev->state.conn < C_CONNECTED || |
400 | mdev->state.pdsk <= D_FAILED) | 400 | mdev->state.pdsk <= D_FAILED) |
@@ -406,10 +406,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
406 | } | 406 | } |
407 | } | 407 | } |
408 | 408 | ||
409 | if ((new_role == R_SECONDARY) && get_ldev(mdev)) { | 409 | /* writeout of activity log covered areas of the bitmap |
410 | drbd_al_to_on_disk_bm(mdev); | 410 | * to stable storage done in after state change already */ |
411 | put_ldev(mdev); | ||
412 | } | ||
413 | 411 | ||
414 | if (mdev->state.conn >= C_WF_REPORT_PARAMS) { | 412 | if (mdev->state.conn >= C_WF_REPORT_PARAMS) { |
415 | /* if this was forced, we should consider sync */ | 413 | /* if this was forced, we should consider sync */ |
@@ -423,7 +421,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
423 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | 421 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); |
424 | fail: | 422 | fail: |
425 | mutex_unlock(&mdev->state_mutex); | 423 | mutex_unlock(&mdev->state_mutex); |
426 | return r; | 424 | return rv; |
427 | } | 425 | } |
428 | 426 | ||
429 | static struct drbd_conf *ensure_mdev(int minor, int create) | 427 | static struct drbd_conf *ensure_mdev(int minor, int create) |
@@ -528,17 +526,19 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, | |||
528 | } | 526 | } |
529 | } | 527 | } |
530 | 528 | ||
529 | /* input size is expected to be in KB */ | ||
531 | char *ppsize(char *buf, unsigned long long size) | 530 | char *ppsize(char *buf, unsigned long long size) |
532 | { | 531 | { |
533 | /* Needs 9 bytes at max. */ | 532 | /* Needs 9 bytes at max including trailing NUL: |
533 | * -1ULL ==> "16384 EB" */ | ||
534 | static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; | 534 | static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; |
535 | int base = 0; | 535 | int base = 0; |
536 | while (size >= 10000) { | 536 | while (size >= 10000 && base < sizeof(units)-1) { |
537 | /* shift + round */ | 537 | /* shift + round */ |
538 | size = (size >> 10) + !!(size & (1<<9)); | 538 | size = (size >> 10) + !!(size & (1<<9)); |
539 | base++; | 539 | base++; |
540 | } | 540 | } |
541 | sprintf(buf, "%lu %cB", (long)size, units[base]); | 541 | sprintf(buf, "%u %cB", (unsigned)size, units[base]); |
542 | 542 | ||
543 | return buf; | 543 | return buf; |
544 | } | 544 | } |
@@ -642,11 +642,19 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_ | |||
642 | || prev_size != mdev->ldev->md.md_size_sect; | 642 | || prev_size != mdev->ldev->md.md_size_sect; |
643 | 643 | ||
644 | if (la_size_changed || md_moved) { | 644 | if (la_size_changed || md_moved) { |
645 | int err; | ||
646 | |||
645 | drbd_al_shrink(mdev); /* All extents inactive. */ | 647 | drbd_al_shrink(mdev); /* All extents inactive. */ |
646 | dev_info(DEV, "Writing the whole bitmap, %s\n", | 648 | dev_info(DEV, "Writing the whole bitmap, %s\n", |
647 | la_size_changed && md_moved ? "size changed and md moved" : | 649 | la_size_changed && md_moved ? "size changed and md moved" : |
648 | la_size_changed ? "size changed" : "md moved"); | 650 | la_size_changed ? "size changed" : "md moved"); |
649 | rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ | 651 | /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ |
652 | err = drbd_bitmap_io(mdev, &drbd_bm_write, | ||
653 | "size changed", BM_LOCKED_MASK); | ||
654 | if (err) { | ||
655 | rv = dev_size_error; | ||
656 | goto out; | ||
657 | } | ||
650 | drbd_md_mark_dirty(mdev); | 658 | drbd_md_mark_dirty(mdev); |
651 | } | 659 | } |
652 | 660 | ||
@@ -765,22 +773,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev) | |||
765 | return 0; | 773 | return 0; |
766 | } | 774 | } |
767 | 775 | ||
768 | void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) | 776 | void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) |
769 | { | 777 | { |
770 | struct request_queue * const q = mdev->rq_queue; | 778 | struct request_queue * const q = mdev->rq_queue; |
771 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; | 779 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; |
772 | int max_segments = mdev->ldev->dc.max_bio_bvecs; | 780 | int max_segments = mdev->ldev->dc.max_bio_bvecs; |
781 | int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); | ||
773 | 782 | ||
774 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); | ||
775 | |||
776 | blk_queue_max_hw_sectors(q, max_seg_s >> 9); | ||
777 | blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); | ||
778 | blk_queue_max_segment_size(q, max_seg_s); | ||
779 | blk_queue_logical_block_size(q, 512); | 783 | blk_queue_logical_block_size(q, 512); |
780 | blk_queue_segment_boundary(q, PAGE_SIZE-1); | 784 | blk_queue_max_hw_sectors(q, max_hw_sectors); |
781 | blk_stack_limits(&q->limits, &b->limits, 0); | 785 | /* This is the workaround for "bio would need to, but cannot, be split" */ |
786 | blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); | ||
787 | blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); | ||
788 | blk_queue_stack_limits(q, b); | ||
782 | 789 | ||
783 | dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); | 790 | dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); |
784 | 791 | ||
785 | if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { | 792 | if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { |
786 | dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", | 793 | dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", |
@@ -850,7 +857,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev) | |||
850 | static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 857 | static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, |
851 | struct drbd_nl_cfg_reply *reply) | 858 | struct drbd_nl_cfg_reply *reply) |
852 | { | 859 | { |
853 | enum drbd_ret_codes retcode; | 860 | enum drbd_ret_code retcode; |
854 | enum determine_dev_size dd; | 861 | enum determine_dev_size dd; |
855 | sector_t max_possible_sectors; | 862 | sector_t max_possible_sectors; |
856 | sector_t min_md_device_sectors; | 863 | sector_t min_md_device_sectors; |
@@ -858,8 +865,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
858 | struct block_device *bdev; | 865 | struct block_device *bdev; |
859 | struct lru_cache *resync_lru = NULL; | 866 | struct lru_cache *resync_lru = NULL; |
860 | union drbd_state ns, os; | 867 | union drbd_state ns, os; |
861 | unsigned int max_seg_s; | 868 | unsigned int max_bio_size; |
862 | int rv; | 869 | enum drbd_state_rv rv; |
863 | int cp_discovered = 0; | 870 | int cp_discovered = 0; |
864 | int logical_block_size; | 871 | int logical_block_size; |
865 | 872 | ||
@@ -1005,9 +1012,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1005 | /* and for any other previously queued work */ | 1012 | /* and for any other previously queued work */ |
1006 | drbd_flush_workqueue(mdev); | 1013 | drbd_flush_workqueue(mdev); |
1007 | 1014 | ||
1008 | retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); | 1015 | rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); |
1016 | retcode = rv; /* FIXME: Type mismatch. */ | ||
1009 | drbd_resume_io(mdev); | 1017 | drbd_resume_io(mdev); |
1010 | if (retcode < SS_SUCCESS) | 1018 | if (rv < SS_SUCCESS) |
1011 | goto fail; | 1019 | goto fail; |
1012 | 1020 | ||
1013 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | 1021 | if (!get_ldev_if_state(mdev, D_ATTACHING)) |
@@ -1109,20 +1117,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1109 | mdev->read_cnt = 0; | 1117 | mdev->read_cnt = 0; |
1110 | mdev->writ_cnt = 0; | 1118 | mdev->writ_cnt = 0; |
1111 | 1119 | ||
1112 | max_seg_s = DRBD_MAX_SEGMENT_SIZE; | 1120 | max_bio_size = DRBD_MAX_BIO_SIZE; |
1113 | if (mdev->state.conn == C_CONNECTED) { | 1121 | if (mdev->state.conn == C_CONNECTED) { |
1114 | /* We are Primary, Connected, and now attach a new local | 1122 | /* We are Primary, Connected, and now attach a new local |
1115 | * backing store. We must not increase the user visible maximum | 1123 | * backing store. We must not increase the user visible maximum |
1116 | * bio size on this device to something the peer may not be | 1124 | * bio size on this device to something the peer may not be |
1117 | * able to handle. */ | 1125 | * able to handle. */ |
1118 | if (mdev->agreed_pro_version < 94) | 1126 | if (mdev->agreed_pro_version < 94) |
1119 | max_seg_s = queue_max_segment_size(mdev->rq_queue); | 1127 | max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; |
1120 | else if (mdev->agreed_pro_version == 94) | 1128 | else if (mdev->agreed_pro_version == 94) |
1121 | max_seg_s = DRBD_MAX_SIZE_H80_PACKET; | 1129 | max_bio_size = DRBD_MAX_SIZE_H80_PACKET; |
1122 | /* else: drbd 8.3.9 and later, stay with default */ | 1130 | /* else: drbd 8.3.9 and later, stay with default */ |
1123 | } | 1131 | } |
1124 | 1132 | ||
1125 | drbd_setup_queue_param(mdev, max_seg_s); | 1133 | drbd_setup_queue_param(mdev, max_bio_size); |
1126 | 1134 | ||
1127 | /* If I am currently not R_PRIMARY, | 1135 | /* If I am currently not R_PRIMARY, |
1128 | * but meta data primary indicator is set, | 1136 | * but meta data primary indicator is set, |
@@ -1154,12 +1162,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1154 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { | 1162 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { |
1155 | dev_info(DEV, "Assuming that all blocks are out of sync " | 1163 | dev_info(DEV, "Assuming that all blocks are out of sync " |
1156 | "(aka FullSync)\n"); | 1164 | "(aka FullSync)\n"); |
1157 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { | 1165 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, |
1166 | "set_n_write from attaching", BM_LOCKED_MASK)) { | ||
1158 | retcode = ERR_IO_MD_DISK; | 1167 | retcode = ERR_IO_MD_DISK; |
1159 | goto force_diskless_dec; | 1168 | goto force_diskless_dec; |
1160 | } | 1169 | } |
1161 | } else { | 1170 | } else { |
1162 | if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { | 1171 | if (drbd_bitmap_io(mdev, &drbd_bm_read, |
1172 | "read from attaching", BM_LOCKED_MASK) < 0) { | ||
1163 | retcode = ERR_IO_MD_DISK; | 1173 | retcode = ERR_IO_MD_DISK; |
1164 | goto force_diskless_dec; | 1174 | goto force_diskless_dec; |
1165 | } | 1175 | } |
@@ -1167,7 +1177,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1167 | 1177 | ||
1168 | if (cp_discovered) { | 1178 | if (cp_discovered) { |
1169 | drbd_al_apply_to_bm(mdev); | 1179 | drbd_al_apply_to_bm(mdev); |
1170 | drbd_al_to_on_disk_bm(mdev); | 1180 | if (drbd_bitmap_io(mdev, &drbd_bm_write, |
1181 | "crashed primary apply AL", BM_LOCKED_MASK)) { | ||
1182 | retcode = ERR_IO_MD_DISK; | ||
1183 | goto force_diskless_dec; | ||
1184 | } | ||
1171 | } | 1185 | } |
1172 | 1186 | ||
1173 | if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) | 1187 | if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) |
@@ -1279,7 +1293,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1279 | struct drbd_nl_cfg_reply *reply) | 1293 | struct drbd_nl_cfg_reply *reply) |
1280 | { | 1294 | { |
1281 | int i, ns; | 1295 | int i, ns; |
1282 | enum drbd_ret_codes retcode; | 1296 | enum drbd_ret_code retcode; |
1283 | struct net_conf *new_conf = NULL; | 1297 | struct net_conf *new_conf = NULL; |
1284 | struct crypto_hash *tfm = NULL; | 1298 | struct crypto_hash *tfm = NULL; |
1285 | struct crypto_hash *integrity_w_tfm = NULL; | 1299 | struct crypto_hash *integrity_w_tfm = NULL; |
@@ -1324,6 +1338,8 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1324 | new_conf->wire_protocol = DRBD_PROT_C; | 1338 | new_conf->wire_protocol = DRBD_PROT_C; |
1325 | new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; | 1339 | new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; |
1326 | new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; | 1340 | new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; |
1341 | new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; | ||
1342 | new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; | ||
1327 | 1343 | ||
1328 | if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { | 1344 | if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { |
1329 | retcode = ERR_MANDATORY_TAG; | 1345 | retcode = ERR_MANDATORY_TAG; |
@@ -1345,6 +1361,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1345 | } | 1361 | } |
1346 | } | 1362 | } |
1347 | 1363 | ||
1364 | if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) { | ||
1365 | retcode = ERR_CONG_NOT_PROTO_A; | ||
1366 | goto fail; | ||
1367 | } | ||
1368 | |||
1348 | if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { | 1369 | if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { |
1349 | retcode = ERR_DISCARD; | 1370 | retcode = ERR_DISCARD; |
1350 | goto fail; | 1371 | goto fail; |
@@ -1525,6 +1546,21 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
1525 | struct drbd_nl_cfg_reply *reply) | 1546 | struct drbd_nl_cfg_reply *reply) |
1526 | { | 1547 | { |
1527 | int retcode; | 1548 | int retcode; |
1549 | struct disconnect dc; | ||
1550 | |||
1551 | memset(&dc, 0, sizeof(struct disconnect)); | ||
1552 | if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) { | ||
1553 | retcode = ERR_MANDATORY_TAG; | ||
1554 | goto fail; | ||
1555 | } | ||
1556 | |||
1557 | if (dc.force) { | ||
1558 | spin_lock_irq(&mdev->req_lock); | ||
1559 | if (mdev->state.conn >= C_WF_CONNECTION) | ||
1560 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); | ||
1561 | spin_unlock_irq(&mdev->req_lock); | ||
1562 | goto done; | ||
1563 | } | ||
1528 | 1564 | ||
1529 | retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); | 1565 | retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); |
1530 | 1566 | ||
@@ -1842,6 +1878,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
1842 | { | 1878 | { |
1843 | int retcode; | 1879 | int retcode; |
1844 | 1880 | ||
1881 | /* If there is still bitmap IO pending, probably because of a previous | ||
1882 | * resync just being finished, wait for it before requesting a new resync. */ | ||
1883 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
1884 | |||
1845 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); | 1885 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); |
1846 | 1886 | ||
1847 | if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) | 1887 | if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) |
@@ -1877,6 +1917,10 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re | |||
1877 | { | 1917 | { |
1878 | int retcode; | 1918 | int retcode; |
1879 | 1919 | ||
1920 | /* If there is still bitmap IO pending, probably because of a previous | ||
1921 | * resync just being finished, wait for it before requesting a new resync. */ | ||
1922 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
1923 | |||
1880 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); | 1924 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); |
1881 | 1925 | ||
1882 | if (retcode < SS_SUCCESS) { | 1926 | if (retcode < SS_SUCCESS) { |
@@ -1885,9 +1929,9 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re | |||
1885 | into a full resync. */ | 1929 | into a full resync. */ |
1886 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); | 1930 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); |
1887 | if (retcode >= SS_SUCCESS) { | 1931 | if (retcode >= SS_SUCCESS) { |
1888 | /* open coded drbd_bitmap_io() */ | ||
1889 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, | 1932 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, |
1890 | "set_n_write from invalidate_peer")) | 1933 | "set_n_write from invalidate_peer", |
1934 | BM_LOCKED_SET_ALLOWED)) | ||
1891 | retcode = ERR_IO_MD_DISK; | 1935 | retcode = ERR_IO_MD_DISK; |
1892 | } | 1936 | } |
1893 | } else | 1937 | } else |
@@ -1914,9 +1958,17 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1914 | struct drbd_nl_cfg_reply *reply) | 1958 | struct drbd_nl_cfg_reply *reply) |
1915 | { | 1959 | { |
1916 | int retcode = NO_ERROR; | 1960 | int retcode = NO_ERROR; |
1961 | union drbd_state s; | ||
1917 | 1962 | ||
1918 | if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) | 1963 | if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { |
1919 | retcode = ERR_PAUSE_IS_CLEAR; | 1964 | s = mdev->state; |
1965 | if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { | ||
1966 | retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : | ||
1967 | s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; | ||
1968 | } else { | ||
1969 | retcode = ERR_PAUSE_IS_CLEAR; | ||
1970 | } | ||
1971 | } | ||
1920 | 1972 | ||
1921 | reply->ret_code = retcode; | 1973 | reply->ret_code = retcode; |
1922 | return 0; | 1974 | return 0; |
@@ -2054,6 +2106,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
2054 | reply->ret_code = ERR_MANDATORY_TAG; | 2106 | reply->ret_code = ERR_MANDATORY_TAG; |
2055 | return 0; | 2107 | return 0; |
2056 | } | 2108 | } |
2109 | |||
2110 | /* If there is still bitmap IO pending, e.g. previous resync or verify | ||
2111 | * just being finished, wait for it before requesting a new resync. */ | ||
2112 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
2113 | |||
2057 | /* w_make_ov_request expects position to be aligned */ | 2114 | /* w_make_ov_request expects position to be aligned */ |
2058 | mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; | 2115 | mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; |
2059 | reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); | 2116 | reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); |
@@ -2097,7 +2154,8 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
2097 | drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ | 2154 | drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ |
2098 | 2155 | ||
2099 | if (args.clear_bm) { | 2156 | if (args.clear_bm) { |
2100 | err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); | 2157 | err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, |
2158 | "clear_n_write from new_c_uuid", BM_LOCKED_MASK); | ||
2101 | if (err) { | 2159 | if (err) { |
2102 | dev_err(DEV, "Writing bitmap failed with %d\n",err); | 2160 | dev_err(DEV, "Writing bitmap failed with %d\n",err); |
2103 | retcode = ERR_IO_MD_DISK; | 2161 | retcode = ERR_IO_MD_DISK; |
@@ -2105,6 +2163,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
2105 | if (skip_initial_sync) { | 2163 | if (skip_initial_sync) { |
2106 | drbd_send_uuids_skip_initial_sync(mdev); | 2164 | drbd_send_uuids_skip_initial_sync(mdev); |
2107 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | 2165 | _drbd_uuid_set(mdev, UI_BITMAP, 0); |
2166 | drbd_print_uuids(mdev, "cleared bitmap UUID"); | ||
2108 | spin_lock_irq(&mdev->req_lock); | 2167 | spin_lock_irq(&mdev->req_lock); |
2109 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | 2168 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), |
2110 | CS_VERBOSE, NULL); | 2169 | CS_VERBOSE, NULL); |
@@ -2189,7 +2248,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms | |||
2189 | goto fail; | 2248 | goto fail; |
2190 | } | 2249 | } |
2191 | 2250 | ||
2192 | if (nlp->packet_type >= P_nl_after_last_packet) { | 2251 | if (nlp->packet_type >= P_nl_after_last_packet || |
2252 | nlp->packet_type == P_return_code_only) { | ||
2193 | retcode = ERR_PACKET_NR; | 2253 | retcode = ERR_PACKET_NR; |
2194 | goto fail; | 2254 | goto fail; |
2195 | } | 2255 | } |
@@ -2205,7 +2265,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms | |||
2205 | reply_size += cm->reply_body_size; | 2265 | reply_size += cm->reply_body_size; |
2206 | 2266 | ||
2207 | /* allocation not in the IO path, cqueue thread context */ | 2267 | /* allocation not in the IO path, cqueue thread context */ |
2208 | cn_reply = kmalloc(reply_size, GFP_KERNEL); | 2268 | cn_reply = kzalloc(reply_size, GFP_KERNEL); |
2209 | if (!cn_reply) { | 2269 | if (!cn_reply) { |
2210 | retcode = ERR_NOMEM; | 2270 | retcode = ERR_NOMEM; |
2211 | goto fail; | 2271 | goto fail; |
@@ -2213,7 +2273,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms | |||
2213 | reply = (struct drbd_nl_cfg_reply *) cn_reply->data; | 2273 | reply = (struct drbd_nl_cfg_reply *) cn_reply->data; |
2214 | 2274 | ||
2215 | reply->packet_type = | 2275 | reply->packet_type = |
2216 | cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; | 2276 | cm->reply_body_size ? nlp->packet_type : P_return_code_only; |
2217 | reply->minor = nlp->drbd_minor; | 2277 | reply->minor = nlp->drbd_minor; |
2218 | reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ | 2278 | reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ |
2219 | /* reply->tag_list; might be modified by cm->function. */ | 2279 | /* reply->tag_list; might be modified by cm->function. */ |
@@ -2376,7 +2436,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2376 | /* receiver thread context, which is not in the writeout path (of this node), | 2436 | /* receiver thread context, which is not in the writeout path (of this node), |
2377 | * but may be in the writeout path of the _other_ node. | 2437 | * but may be in the writeout path of the _other_ node. |
2378 | * GFP_NOIO to avoid potential "distributed deadlock". */ | 2438 | * GFP_NOIO to avoid potential "distributed deadlock". */ |
2379 | cn_reply = kmalloc( | 2439 | cn_reply = kzalloc( |
2380 | sizeof(struct cn_msg)+ | 2440 | sizeof(struct cn_msg)+ |
2381 | sizeof(struct drbd_nl_cfg_reply)+ | 2441 | sizeof(struct drbd_nl_cfg_reply)+ |
2382 | sizeof(struct dump_ee_tag_len_struct)+ | 2442 | sizeof(struct dump_ee_tag_len_struct)+ |
@@ -2398,10 +2458,11 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2398 | tl = tl_add_int(tl, T_ee_sector, &e->sector); | 2458 | tl = tl_add_int(tl, T_ee_sector, &e->sector); |
2399 | tl = tl_add_int(tl, T_ee_block_id, &e->block_id); | 2459 | tl = tl_add_int(tl, T_ee_block_id, &e->block_id); |
2400 | 2460 | ||
2461 | /* dump the first 32k */ | ||
2462 | len = min_t(unsigned, e->size, 32 << 10); | ||
2401 | put_unaligned(T_ee_data, tl++); | 2463 | put_unaligned(T_ee_data, tl++); |
2402 | put_unaligned(e->size, tl++); | 2464 | put_unaligned(len, tl++); |
2403 | 2465 | ||
2404 | len = e->size; | ||
2405 | page = e->pages; | 2466 | page = e->pages; |
2406 | page_chain_for_each(page) { | 2467 | page_chain_for_each(page) { |
2407 | void *d = kmap_atomic(page, KM_USER0); | 2468 | void *d = kmap_atomic(page, KM_USER0); |
@@ -2410,6 +2471,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev, | |||
2410 | kunmap_atomic(d, KM_USER0); | 2471 | kunmap_atomic(d, KM_USER0); |
2411 | tl = (unsigned short*)((char*)tl + l); | 2472 | tl = (unsigned short*)((char*)tl + l); |
2412 | len -= l; | 2473 | len -= l; |
2474 | if (len == 0) | ||
2475 | break; | ||
2413 | } | 2476 | } |
2414 | put_unaligned(TT_END, tl++); /* Close the tag list */ | 2477 | put_unaligned(TT_END, tl++); /* Close the tag list */ |
2415 | 2478 | ||
@@ -2508,6 +2571,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code) | |||
2508 | (struct drbd_nl_cfg_reply *)cn_reply->data; | 2571 | (struct drbd_nl_cfg_reply *)cn_reply->data; |
2509 | int rr; | 2572 | int rr; |
2510 | 2573 | ||
2574 | memset(buffer, 0, sizeof(buffer)); | ||
2511 | cn_reply->id = req->id; | 2575 | cn_reply->id = req->id; |
2512 | 2576 | ||
2513 | cn_reply->seq = req->seq; | 2577 | cn_reply->seq = req->seq; |
@@ -2515,6 +2579,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code) | |||
2515 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply); | 2579 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply); |
2516 | cn_reply->flags = 0; | 2580 | cn_reply->flags = 0; |
2517 | 2581 | ||
2582 | reply->packet_type = P_return_code_only; | ||
2518 | reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; | 2583 | reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; |
2519 | reply->ret_code = ret_code; | 2584 | reply->ret_code = ret_code; |
2520 | 2585 | ||
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 7e6ac307e2de..2959cdfb77f5 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "drbd_int.h" | 34 | #include "drbd_int.h" |
35 | 35 | ||
36 | static int drbd_proc_open(struct inode *inode, struct file *file); | 36 | static int drbd_proc_open(struct inode *inode, struct file *file); |
37 | static int drbd_proc_release(struct inode *inode, struct file *file); | ||
37 | 38 | ||
38 | 39 | ||
39 | struct proc_dir_entry *drbd_proc; | 40 | struct proc_dir_entry *drbd_proc; |
@@ -42,9 +43,22 @@ const struct file_operations drbd_proc_fops = { | |||
42 | .open = drbd_proc_open, | 43 | .open = drbd_proc_open, |
43 | .read = seq_read, | 44 | .read = seq_read, |
44 | .llseek = seq_lseek, | 45 | .llseek = seq_lseek, |
45 | .release = single_release, | 46 | .release = drbd_proc_release, |
46 | }; | 47 | }; |
47 | 48 | ||
49 | void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) | ||
50 | { | ||
51 | /* v is in kB/sec. We don't expect TiByte/sec yet. */ | ||
52 | if (unlikely(v >= 1000000)) { | ||
53 | /* cool: > GiByte/s */ | ||
54 | seq_printf(seq, "%ld,", v / 1000000); | ||
55 | v /= 1000000; | ||
56 | seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000); | ||
57 | } else if (likely(v >= 1000)) | ||
58 | seq_printf(seq, "%ld,%03ld", v/1000, v % 1000); | ||
59 | else | ||
60 | seq_printf(seq, "%ld", v); | ||
61 | } | ||
48 | 62 | ||
49 | /*lge | 63 | /*lge |
50 | * progress bars shamelessly adapted from driver/md/md.c | 64 | * progress bars shamelessly adapted from driver/md/md.c |
@@ -71,10 +85,15 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
71 | seq_printf(seq, "."); | 85 | seq_printf(seq, "."); |
72 | seq_printf(seq, "] "); | 86 | seq_printf(seq, "] "); |
73 | 87 | ||
74 | seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10); | 88 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) |
75 | /* if more than 1 GB display in MB */ | 89 | seq_printf(seq, "verified:"); |
76 | if (mdev->rs_total > 0x100000L) | 90 | else |
77 | seq_printf(seq, "(%lu/%lu)M\n\t", | 91 | seq_printf(seq, "sync'ed:"); |
92 | seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); | ||
93 | |||
94 | /* if more than a few GB, display in MB */ | ||
95 | if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) | ||
96 | seq_printf(seq, "(%lu/%lu)M", | ||
78 | (unsigned long) Bit2KB(rs_left >> 10), | 97 | (unsigned long) Bit2KB(rs_left >> 10), |
79 | (unsigned long) Bit2KB(mdev->rs_total >> 10)); | 98 | (unsigned long) Bit2KB(mdev->rs_total >> 10)); |
80 | else | 99 | else |
@@ -94,6 +113,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
94 | /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is | 113 | /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is |
95 | * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at | 114 | * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at |
96 | * least DRBD_SYNC_MARK_STEP time before it will be modified. */ | 115 | * least DRBD_SYNC_MARK_STEP time before it will be modified. */ |
116 | /* ------------------------ ~18s average ------------------------ */ | ||
97 | i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; | 117 | i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; |
98 | dt = (jiffies - mdev->rs_mark_time[i]) / HZ; | 118 | dt = (jiffies - mdev->rs_mark_time[i]) / HZ; |
99 | if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) | 119 | if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) |
@@ -107,14 +127,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
107 | seq_printf(seq, "finish: %lu:%02lu:%02lu", | 127 | seq_printf(seq, "finish: %lu:%02lu:%02lu", |
108 | rt / 3600, (rt % 3600) / 60, rt % 60); | 128 | rt / 3600, (rt % 3600) / 60, rt % 60); |
109 | 129 | ||
110 | /* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */ | ||
111 | dbdt = Bit2KB(db/dt); | 130 | dbdt = Bit2KB(db/dt); |
112 | if (dbdt > 1000) | 131 | seq_printf(seq, " speed: "); |
113 | seq_printf(seq, " speed: %ld,%03ld", | 132 | seq_printf_with_thousands_grouping(seq, dbdt); |
114 | dbdt/1000, dbdt % 1000); | 133 | seq_printf(seq, " ("); |
115 | else | 134 | /* ------------------------- ~3s average ------------------------ */ |
116 | seq_printf(seq, " speed: %ld", dbdt); | 135 | if (proc_details >= 1) { |
136 | /* this is what drbd_rs_should_slow_down() uses */ | ||
137 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; | ||
138 | dt = (jiffies - mdev->rs_mark_time[i]) / HZ; | ||
139 | if (!dt) | ||
140 | dt++; | ||
141 | db = mdev->rs_mark_left[i] - rs_left; | ||
142 | dbdt = Bit2KB(db/dt); | ||
143 | seq_printf_with_thousands_grouping(seq, dbdt); | ||
144 | seq_printf(seq, " -- "); | ||
145 | } | ||
117 | 146 | ||
147 | /* --------------------- long term average ---------------------- */ | ||
118 | /* mean speed since syncer started | 148 | /* mean speed since syncer started |
119 | * we do account for PausedSync periods */ | 149 | * we do account for PausedSync periods */ |
120 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; | 150 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; |
@@ -122,20 +152,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) | |||
122 | dt = 1; | 152 | dt = 1; |
123 | db = mdev->rs_total - rs_left; | 153 | db = mdev->rs_total - rs_left; |
124 | dbdt = Bit2KB(db/dt); | 154 | dbdt = Bit2KB(db/dt); |
125 | if (dbdt > 1000) | 155 | seq_printf_with_thousands_grouping(seq, dbdt); |
126 | seq_printf(seq, " (%ld,%03ld)", | 156 | seq_printf(seq, ")"); |
127 | dbdt/1000, dbdt % 1000); | ||
128 | else | ||
129 | seq_printf(seq, " (%ld)", dbdt); | ||
130 | 157 | ||
131 | if (mdev->state.conn == C_SYNC_TARGET) { | 158 | if (mdev->state.conn == C_SYNC_TARGET || |
132 | if (mdev->c_sync_rate > 1000) | 159 | mdev->state.conn == C_VERIFY_S) { |
133 | seq_printf(seq, " want: %d,%03d", | 160 | seq_printf(seq, " want: "); |
134 | mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); | 161 | seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate); |
135 | else | ||
136 | seq_printf(seq, " want: %d", mdev->c_sync_rate); | ||
137 | } | 162 | } |
138 | seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); | 163 | seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); |
164 | |||
165 | if (proc_details >= 1) { | ||
166 | /* 64 bit: | ||
167 | * we convert to sectors in the display below. */ | ||
168 | unsigned long bm_bits = drbd_bm_bits(mdev); | ||
169 | unsigned long bit_pos; | ||
170 | if (mdev->state.conn == C_VERIFY_S || | ||
171 | mdev->state.conn == C_VERIFY_T) | ||
172 | bit_pos = bm_bits - mdev->ov_left; | ||
173 | else | ||
174 | bit_pos = mdev->bm_resync_fo; | ||
175 | /* Total sectors may be slightly off for oddly | ||
176 | * sized devices. So what. */ | ||
177 | seq_printf(seq, | ||
178 | "\t%3d%% sector pos: %llu/%llu\n", | ||
179 | (int)(bit_pos / (bm_bits/100+1)), | ||
180 | (unsigned long long)bit_pos * BM_SECT_PER_BIT, | ||
181 | (unsigned long long)bm_bits * BM_SECT_PER_BIT); | ||
182 | } | ||
139 | } | 183 | } |
140 | 184 | ||
141 | static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) | 185 | static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) |
@@ -232,20 +276,16 @@ static int drbd_seq_show(struct seq_file *seq, void *v) | |||
232 | mdev->epochs, | 276 | mdev->epochs, |
233 | write_ordering_chars[mdev->write_ordering] | 277 | write_ordering_chars[mdev->write_ordering] |
234 | ); | 278 | ); |
235 | seq_printf(seq, " oos:%lu\n", | 279 | seq_printf(seq, " oos:%llu\n", |
236 | Bit2KB(drbd_bm_total_weight(mdev))); | 280 | Bit2KB((unsigned long long) |
281 | drbd_bm_total_weight(mdev))); | ||
237 | } | 282 | } |
238 | if (mdev->state.conn == C_SYNC_SOURCE || | 283 | if (mdev->state.conn == C_SYNC_SOURCE || |
239 | mdev->state.conn == C_SYNC_TARGET) | 284 | mdev->state.conn == C_SYNC_TARGET || |
285 | mdev->state.conn == C_VERIFY_S || | ||
286 | mdev->state.conn == C_VERIFY_T) | ||
240 | drbd_syncer_progress(mdev, seq); | 287 | drbd_syncer_progress(mdev, seq); |
241 | 288 | ||
242 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | ||
243 | seq_printf(seq, "\t%3d%% %lu/%lu\n", | ||
244 | (int)((mdev->rs_total-mdev->ov_left) / | ||
245 | (mdev->rs_total/100+1)), | ||
246 | mdev->rs_total - mdev->ov_left, | ||
247 | mdev->rs_total); | ||
248 | |||
249 | if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { | 289 | if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { |
250 | lc_seq_printf_stats(seq, mdev->resync); | 290 | lc_seq_printf_stats(seq, mdev->resync); |
251 | lc_seq_printf_stats(seq, mdev->act_log); | 291 | lc_seq_printf_stats(seq, mdev->act_log); |
@@ -265,7 +305,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v) | |||
265 | 305 | ||
266 | static int drbd_proc_open(struct inode *inode, struct file *file) | 306 | static int drbd_proc_open(struct inode *inode, struct file *file) |
267 | { | 307 | { |
268 | return single_open(file, drbd_seq_show, PDE(inode)->data); | 308 | if (try_module_get(THIS_MODULE)) |
309 | return single_open(file, drbd_seq_show, PDE(inode)->data); | ||
310 | return -ENODEV; | ||
311 | } | ||
312 | |||
313 | static int drbd_proc_release(struct inode *inode, struct file *file) | ||
314 | { | ||
315 | module_put(THIS_MODULE); | ||
316 | return single_release(inode, file); | ||
269 | } | 317 | } |
270 | 318 | ||
271 | /* PROC FS stuff end */ | 319 | /* PROC FS stuff end */ |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8e68be939deb..fe1564c7d8b6 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) | |||
277 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; | 277 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; |
278 | int i; | 278 | int i; |
279 | 279 | ||
280 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) | 280 | if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) |
281 | i = page_chain_free(page); | 281 | i = page_chain_free(page); |
282 | else { | 282 | else { |
283 | struct page *tmp; | 283 | struct page *tmp; |
@@ -319,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |||
319 | struct page *page; | 319 | struct page *page; |
320 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; | 320 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; |
321 | 321 | ||
322 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) | 322 | if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) |
323 | return NULL; | 323 | return NULL; |
324 | 324 | ||
325 | e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); | 325 | e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); |
@@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) | |||
725 | char tb[4]; | 725 | char tb[4]; |
726 | 726 | ||
727 | if (!*sock) | 727 | if (!*sock) |
728 | return FALSE; | 728 | return false; |
729 | 729 | ||
730 | rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); | 730 | rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); |
731 | 731 | ||
732 | if (rr > 0 || rr == -EAGAIN) { | 732 | if (rr > 0 || rr == -EAGAIN) { |
733 | return TRUE; | 733 | return true; |
734 | } else { | 734 | } else { |
735 | sock_release(*sock); | 735 | sock_release(*sock); |
736 | *sock = NULL; | 736 | *sock = NULL; |
737 | return FALSE; | 737 | return false; |
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
@@ -768,8 +768,7 @@ static int drbd_connect(struct drbd_conf *mdev) | |||
768 | if (s || ++try >= 3) | 768 | if (s || ++try >= 3) |
769 | break; | 769 | break; |
770 | /* give the other side time to call bind() & listen() */ | 770 | /* give the other side time to call bind() & listen() */ |
771 | __set_current_state(TASK_INTERRUPTIBLE); | 771 | schedule_timeout_interruptible(HZ / 10); |
772 | schedule_timeout(HZ / 10); | ||
773 | } | 772 | } |
774 | 773 | ||
775 | if (s) { | 774 | if (s) { |
@@ -788,8 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev) | |||
788 | } | 787 | } |
789 | 788 | ||
790 | if (sock && msock) { | 789 | if (sock && msock) { |
791 | __set_current_state(TASK_INTERRUPTIBLE); | 790 | schedule_timeout_interruptible(HZ / 10); |
792 | schedule_timeout(HZ / 10); | ||
793 | ok = drbd_socket_okay(mdev, &sock); | 791 | ok = drbd_socket_okay(mdev, &sock); |
794 | ok = drbd_socket_okay(mdev, &msock) && ok; | 792 | ok = drbd_socket_okay(mdev, &msock) && ok; |
795 | if (ok) | 793 | if (ok) |
@@ -906,7 +904,7 @@ retry: | |||
906 | put_ldev(mdev); | 904 | put_ldev(mdev); |
907 | } | 905 | } |
908 | 906 | ||
909 | if (!drbd_send_protocol(mdev)) | 907 | if (drbd_send_protocol(mdev) == -1) |
910 | return -1; | 908 | return -1; |
911 | drbd_send_sync_param(mdev, &mdev->sync_conf); | 909 | drbd_send_sync_param(mdev, &mdev->sync_conf); |
912 | drbd_send_sizes(mdev, 0, 0); | 910 | drbd_send_sizes(mdev, 0, 0); |
@@ -914,6 +912,7 @@ retry: | |||
914 | drbd_send_state(mdev); | 912 | drbd_send_state(mdev); |
915 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); | 913 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); |
916 | clear_bit(RESIZE_PENDING, &mdev->flags); | 914 | clear_bit(RESIZE_PENDING, &mdev->flags); |
915 | mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ | ||
917 | 916 | ||
918 | return 1; | 917 | return 1; |
919 | 918 | ||
@@ -932,8 +931,9 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
932 | 931 | ||
933 | r = drbd_recv(mdev, h, sizeof(*h)); | 932 | r = drbd_recv(mdev, h, sizeof(*h)); |
934 | if (unlikely(r != sizeof(*h))) { | 933 | if (unlikely(r != sizeof(*h))) { |
935 | dev_err(DEV, "short read expecting header on sock: r=%d\n", r); | 934 | if (!signal_pending(current)) |
936 | return FALSE; | 935 | dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); |
936 | return false; | ||
937 | } | 937 | } |
938 | 938 | ||
939 | if (likely(h->h80.magic == BE_DRBD_MAGIC)) { | 939 | if (likely(h->h80.magic == BE_DRBD_MAGIC)) { |
@@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
947 | be32_to_cpu(h->h80.magic), | 947 | be32_to_cpu(h->h80.magic), |
948 | be16_to_cpu(h->h80.command), | 948 | be16_to_cpu(h->h80.command), |
949 | be16_to_cpu(h->h80.length)); | 949 | be16_to_cpu(h->h80.length)); |
950 | return FALSE; | 950 | return false; |
951 | } | 951 | } |
952 | mdev->last_received = jiffies; | 952 | mdev->last_received = jiffies; |
953 | 953 | ||
954 | return TRUE; | 954 | return true; |
955 | } | 955 | } |
956 | 956 | ||
957 | static void drbd_flush(struct drbd_conf *mdev) | 957 | static void drbd_flush(struct drbd_conf *mdev) |
@@ -1074,6 +1074,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) | |||
1074 | * @mdev: DRBD device. | 1074 | * @mdev: DRBD device. |
1075 | * @e: epoch entry | 1075 | * @e: epoch entry |
1076 | * @rw: flag field, see bio->bi_rw | 1076 | * @rw: flag field, see bio->bi_rw |
1077 | * | ||
1078 | * May spread the pages to multiple bios, | ||
1079 | * depending on bio_add_page restrictions. | ||
1080 | * | ||
1081 | * Returns 0 if all bios have been submitted, | ||
1082 | * -ENOMEM if we could not allocate enough bios, | ||
1083 | * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a | ||
1084 | * single page to an empty bio (which should never happen and likely indicates | ||
1085 | * that the lower level IO stack is in some way broken). This has been observed | ||
1086 | * on certain Xen deployments. | ||
1077 | */ | 1087 | */ |
1078 | /* TODO allocate from our own bio_set. */ | 1088 | /* TODO allocate from our own bio_set. */ |
1079 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | 1089 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
@@ -1086,6 +1096,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | |||
1086 | unsigned ds = e->size; | 1096 | unsigned ds = e->size; |
1087 | unsigned n_bios = 0; | 1097 | unsigned n_bios = 0; |
1088 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; | 1098 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; |
1099 | int err = -ENOMEM; | ||
1089 | 1100 | ||
1090 | /* In most cases, we will only need one bio. But in case the lower | 1101 | /* In most cases, we will only need one bio. But in case the lower |
1091 | * level restrictions happen to be different at this offset on this | 1102 | * level restrictions happen to be different at this offset on this |
@@ -1111,8 +1122,17 @@ next_bio: | |||
1111 | page_chain_for_each(page) { | 1122 | page_chain_for_each(page) { |
1112 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); | 1123 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); |
1113 | if (!bio_add_page(bio, page, len, 0)) { | 1124 | if (!bio_add_page(bio, page, len, 0)) { |
1114 | /* a single page must always be possible! */ | 1125 | /* A single page must always be possible! |
1115 | BUG_ON(bio->bi_vcnt == 0); | 1126 | * But in case it fails anyways, |
1127 | * we deal with it, and complain (below). */ | ||
1128 | if (bio->bi_vcnt == 0) { | ||
1129 | dev_err(DEV, | ||
1130 | "bio_add_page failed for len=%u, " | ||
1131 | "bi_vcnt=0 (bi_sector=%llu)\n", | ||
1132 | len, (unsigned long long)bio->bi_sector); | ||
1133 | err = -ENOSPC; | ||
1134 | goto fail; | ||
1135 | } | ||
1116 | goto next_bio; | 1136 | goto next_bio; |
1117 | } | 1137 | } |
1118 | ds -= len; | 1138 | ds -= len; |
@@ -1138,7 +1158,7 @@ fail: | |||
1138 | bios = bios->bi_next; | 1158 | bios = bios->bi_next; |
1139 | bio_put(bio); | 1159 | bio_put(bio); |
1140 | } | 1160 | } |
1141 | return -ENOMEM; | 1161 | return err; |
1142 | } | 1162 | } |
1143 | 1163 | ||
1144 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1164 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -1160,7 +1180,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1160 | switch (mdev->write_ordering) { | 1180 | switch (mdev->write_ordering) { |
1161 | case WO_none: | 1181 | case WO_none: |
1162 | if (rv == FE_RECYCLED) | 1182 | if (rv == FE_RECYCLED) |
1163 | return TRUE; | 1183 | return true; |
1164 | 1184 | ||
1165 | /* receiver context, in the writeout path of the other node. | 1185 | /* receiver context, in the writeout path of the other node. |
1166 | * avoid potential distributed deadlock */ | 1186 | * avoid potential distributed deadlock */ |
@@ -1188,10 +1208,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1188 | D_ASSERT(atomic_read(&epoch->active) == 0); | 1208 | D_ASSERT(atomic_read(&epoch->active) == 0); |
1189 | D_ASSERT(epoch->flags == 0); | 1209 | D_ASSERT(epoch->flags == 0); |
1190 | 1210 | ||
1191 | return TRUE; | 1211 | return true; |
1192 | default: | 1212 | default: |
1193 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); | 1213 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); |
1194 | return FALSE; | 1214 | return false; |
1195 | } | 1215 | } |
1196 | 1216 | ||
1197 | epoch->flags = 0; | 1217 | epoch->flags = 0; |
@@ -1209,7 +1229,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1209 | } | 1229 | } |
1210 | spin_unlock(&mdev->epoch_lock); | 1230 | spin_unlock(&mdev->epoch_lock); |
1211 | 1231 | ||
1212 | return TRUE; | 1232 | return true; |
1213 | } | 1233 | } |
1214 | 1234 | ||
1215 | /* used from receive_RSDataReply (recv_resync_read) | 1235 | /* used from receive_RSDataReply (recv_resync_read) |
@@ -1231,21 +1251,25 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1231 | if (dgs) { | 1251 | if (dgs) { |
1232 | rr = drbd_recv(mdev, dig_in, dgs); | 1252 | rr = drbd_recv(mdev, dig_in, dgs); |
1233 | if (rr != dgs) { | 1253 | if (rr != dgs) { |
1234 | dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", | 1254 | if (!signal_pending(current)) |
1235 | rr, dgs); | 1255 | dev_warn(DEV, |
1256 | "short read receiving data digest: read %d expected %d\n", | ||
1257 | rr, dgs); | ||
1236 | return NULL; | 1258 | return NULL; |
1237 | } | 1259 | } |
1238 | } | 1260 | } |
1239 | 1261 | ||
1240 | data_size -= dgs; | 1262 | data_size -= dgs; |
1241 | 1263 | ||
1264 | ERR_IF(data_size == 0) return NULL; | ||
1242 | ERR_IF(data_size & 0x1ff) return NULL; | 1265 | ERR_IF(data_size & 0x1ff) return NULL; |
1243 | ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; | 1266 | ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; |
1244 | 1267 | ||
1245 | /* even though we trust out peer, | 1268 | /* even though we trust out peer, |
1246 | * we sometimes have to double check. */ | 1269 | * we sometimes have to double check. */ |
1247 | if (sector + (data_size>>9) > capacity) { | 1270 | if (sector + (data_size>>9) > capacity) { |
1248 | dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", | 1271 | dev_err(DEV, "request from peer beyond end of local disk: " |
1272 | "capacity: %llus < sector: %llus + size: %u\n", | ||
1249 | (unsigned long long)capacity, | 1273 | (unsigned long long)capacity, |
1250 | (unsigned long long)sector, data_size); | 1274 | (unsigned long long)sector, data_size); |
1251 | return NULL; | 1275 | return NULL; |
@@ -1264,15 +1288,16 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1264 | unsigned len = min_t(int, ds, PAGE_SIZE); | 1288 | unsigned len = min_t(int, ds, PAGE_SIZE); |
1265 | data = kmap(page); | 1289 | data = kmap(page); |
1266 | rr = drbd_recv(mdev, data, len); | 1290 | rr = drbd_recv(mdev, data, len); |
1267 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { | 1291 | if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { |
1268 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); | 1292 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); |
1269 | data[0] = data[0] ^ (unsigned long)-1; | 1293 | data[0] = data[0] ^ (unsigned long)-1; |
1270 | } | 1294 | } |
1271 | kunmap(page); | 1295 | kunmap(page); |
1272 | if (rr != len) { | 1296 | if (rr != len) { |
1273 | drbd_free_ee(mdev, e); | 1297 | drbd_free_ee(mdev, e); |
1274 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1298 | if (!signal_pending(current)) |
1275 | rr, len); | 1299 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", |
1300 | rr, len); | ||
1276 | return NULL; | 1301 | return NULL; |
1277 | } | 1302 | } |
1278 | ds -= rr; | 1303 | ds -= rr; |
@@ -1281,7 +1306,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1281 | if (dgs) { | 1306 | if (dgs) { |
1282 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); | 1307 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); |
1283 | if (memcmp(dig_in, dig_vv, dgs)) { | 1308 | if (memcmp(dig_in, dig_vv, dgs)) { |
1284 | dev_err(DEV, "Digest integrity check FAILED.\n"); | 1309 | dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", |
1310 | (unsigned long long)sector, data_size); | ||
1285 | drbd_bcast_ee(mdev, "digest failed", | 1311 | drbd_bcast_ee(mdev, "digest failed", |
1286 | dgs, dig_in, dig_vv, e); | 1312 | dgs, dig_in, dig_vv, e); |
1287 | drbd_free_ee(mdev, e); | 1313 | drbd_free_ee(mdev, e); |
@@ -1302,7 +1328,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1302 | void *data; | 1328 | void *data; |
1303 | 1329 | ||
1304 | if (!data_size) | 1330 | if (!data_size) |
1305 | return TRUE; | 1331 | return true; |
1306 | 1332 | ||
1307 | page = drbd_pp_alloc(mdev, 1, 1); | 1333 | page = drbd_pp_alloc(mdev, 1, 1); |
1308 | 1334 | ||
@@ -1311,8 +1337,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1311 | rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); | 1337 | rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); |
1312 | if (rr != min_t(int, data_size, PAGE_SIZE)) { | 1338 | if (rr != min_t(int, data_size, PAGE_SIZE)) { |
1313 | rv = 0; | 1339 | rv = 0; |
1314 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1340 | if (!signal_pending(current)) |
1315 | rr, min_t(int, data_size, PAGE_SIZE)); | 1341 | dev_warn(DEV, |
1342 | "short read receiving data: read %d expected %d\n", | ||
1343 | rr, min_t(int, data_size, PAGE_SIZE)); | ||
1316 | break; | 1344 | break; |
1317 | } | 1345 | } |
1318 | data_size -= rr; | 1346 | data_size -= rr; |
@@ -1337,8 +1365,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1337 | if (dgs) { | 1365 | if (dgs) { |
1338 | rr = drbd_recv(mdev, dig_in, dgs); | 1366 | rr = drbd_recv(mdev, dig_in, dgs); |
1339 | if (rr != dgs) { | 1367 | if (rr != dgs) { |
1340 | dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", | 1368 | if (!signal_pending(current)) |
1341 | rr, dgs); | 1369 | dev_warn(DEV, |
1370 | "short read receiving data reply digest: read %d expected %d\n", | ||
1371 | rr, dgs); | ||
1342 | return 0; | 1372 | return 0; |
1343 | } | 1373 | } |
1344 | } | 1374 | } |
@@ -1359,9 +1389,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1359 | expect); | 1389 | expect); |
1360 | kunmap(bvec->bv_page); | 1390 | kunmap(bvec->bv_page); |
1361 | if (rr != expect) { | 1391 | if (rr != expect) { |
1362 | dev_warn(DEV, "short read receiving data reply: " | 1392 | if (!signal_pending(current)) |
1363 | "read %d expected %d\n", | 1393 | dev_warn(DEV, "short read receiving data reply: " |
1364 | rr, expect); | 1394 | "read %d expected %d\n", |
1395 | rr, expect); | ||
1365 | return 0; | 1396 | return 0; |
1366 | } | 1397 | } |
1367 | data_size -= rr; | 1398 | data_size -= rr; |
@@ -1425,11 +1456,10 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1425 | 1456 | ||
1426 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); | 1457 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); |
1427 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) | 1458 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) |
1428 | return TRUE; | 1459 | return true; |
1429 | 1460 | ||
1430 | /* drbd_submit_ee currently fails for one reason only: | 1461 | /* don't care for the reason here */ |
1431 | * not being able to allocate enough bios. | 1462 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
1432 | * Is dropping the connection going to help? */ | ||
1433 | spin_lock_irq(&mdev->req_lock); | 1463 | spin_lock_irq(&mdev->req_lock); |
1434 | list_del(&e->w.list); | 1464 | list_del(&e->w.list); |
1435 | spin_unlock_irq(&mdev->req_lock); | 1465 | spin_unlock_irq(&mdev->req_lock); |
@@ -1437,7 +1467,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1437 | drbd_free_ee(mdev, e); | 1467 | drbd_free_ee(mdev, e); |
1438 | fail: | 1468 | fail: |
1439 | put_ldev(mdev); | 1469 | put_ldev(mdev); |
1440 | return FALSE; | 1470 | return false; |
1441 | } | 1471 | } |
1442 | 1472 | ||
1443 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1473 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -1454,7 +1484,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
1454 | spin_unlock_irq(&mdev->req_lock); | 1484 | spin_unlock_irq(&mdev->req_lock); |
1455 | if (unlikely(!req)) { | 1485 | if (unlikely(!req)) { |
1456 | dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); | 1486 | dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); |
1457 | return FALSE; | 1487 | return false; |
1458 | } | 1488 | } |
1459 | 1489 | ||
1460 | /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid | 1490 | /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid |
@@ -1611,15 +1641,15 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) | |||
1611 | return ret; | 1641 | return ret; |
1612 | } | 1642 | } |
1613 | 1643 | ||
1614 | static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | 1644 | /* see also bio_flags_to_wire() |
1645 | * DRBD_REQ_*, because we need to semantically map the flags to data packet | ||
1646 | * flags and back. We may replicate to other kernel versions. */ | ||
1647 | static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | ||
1615 | { | 1648 | { |
1616 | if (mdev->agreed_pro_version >= 95) | 1649 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
1617 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | 1650 | (dpf & DP_FUA ? REQ_FUA : 0) | |
1618 | (dpf & DP_FUA ? REQ_FUA : 0) | | 1651 | (dpf & DP_FLUSH ? REQ_FLUSH : 0) | |
1619 | (dpf & DP_FLUSH ? REQ_FUA : 0) | | 1652 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); |
1620 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | ||
1621 | else | ||
1622 | return dpf & DP_RW_SYNC ? REQ_SYNC : 0; | ||
1623 | } | 1653 | } |
1624 | 1654 | ||
1625 | /* mirrored write */ | 1655 | /* mirrored write */ |
@@ -1632,9 +1662,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1632 | u32 dp_flags; | 1662 | u32 dp_flags; |
1633 | 1663 | ||
1634 | if (!get_ldev(mdev)) { | 1664 | if (!get_ldev(mdev)) { |
1635 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1636 | dev_err(DEV, "Can not write mirrored data block " | ||
1637 | "to local disk.\n"); | ||
1638 | spin_lock(&mdev->peer_seq_lock); | 1665 | spin_lock(&mdev->peer_seq_lock); |
1639 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) | 1666 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) |
1640 | mdev->peer_seq++; | 1667 | mdev->peer_seq++; |
@@ -1654,23 +1681,23 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1654 | e = read_in_block(mdev, p->block_id, sector, data_size); | 1681 | e = read_in_block(mdev, p->block_id, sector, data_size); |
1655 | if (!e) { | 1682 | if (!e) { |
1656 | put_ldev(mdev); | 1683 | put_ldev(mdev); |
1657 | return FALSE; | 1684 | return false; |
1658 | } | 1685 | } |
1659 | 1686 | ||
1660 | e->w.cb = e_end_block; | 1687 | e->w.cb = e_end_block; |
1661 | 1688 | ||
1689 | dp_flags = be32_to_cpu(p->dp_flags); | ||
1690 | rw |= wire_flags_to_bio(mdev, dp_flags); | ||
1691 | |||
1692 | if (dp_flags & DP_MAY_SET_IN_SYNC) | ||
1693 | e->flags |= EE_MAY_SET_IN_SYNC; | ||
1694 | |||
1662 | spin_lock(&mdev->epoch_lock); | 1695 | spin_lock(&mdev->epoch_lock); |
1663 | e->epoch = mdev->current_epoch; | 1696 | e->epoch = mdev->current_epoch; |
1664 | atomic_inc(&e->epoch->epoch_size); | 1697 | atomic_inc(&e->epoch->epoch_size); |
1665 | atomic_inc(&e->epoch->active); | 1698 | atomic_inc(&e->epoch->active); |
1666 | spin_unlock(&mdev->epoch_lock); | 1699 | spin_unlock(&mdev->epoch_lock); |
1667 | 1700 | ||
1668 | dp_flags = be32_to_cpu(p->dp_flags); | ||
1669 | rw |= write_flags_to_bio(mdev, dp_flags); | ||
1670 | |||
1671 | if (dp_flags & DP_MAY_SET_IN_SYNC) | ||
1672 | e->flags |= EE_MAY_SET_IN_SYNC; | ||
1673 | |||
1674 | /* I'm the receiver, I do hold a net_cnt reference. */ | 1701 | /* I'm the receiver, I do hold a net_cnt reference. */ |
1675 | if (!mdev->net_conf->two_primaries) { | 1702 | if (!mdev->net_conf->two_primaries) { |
1676 | spin_lock_irq(&mdev->req_lock); | 1703 | spin_lock_irq(&mdev->req_lock); |
@@ -1773,7 +1800,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1773 | put_ldev(mdev); | 1800 | put_ldev(mdev); |
1774 | wake_asender(mdev); | 1801 | wake_asender(mdev); |
1775 | finish_wait(&mdev->misc_wait, &wait); | 1802 | finish_wait(&mdev->misc_wait, &wait); |
1776 | return TRUE; | 1803 | return true; |
1777 | } | 1804 | } |
1778 | 1805 | ||
1779 | if (signal_pending(current)) { | 1806 | if (signal_pending(current)) { |
@@ -1829,11 +1856,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1829 | } | 1856 | } |
1830 | 1857 | ||
1831 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) | 1858 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) |
1832 | return TRUE; | 1859 | return true; |
1833 | 1860 | ||
1834 | /* drbd_submit_ee currently fails for one reason only: | 1861 | /* don't care for the reason here */ |
1835 | * not being able to allocate enough bios. | 1862 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
1836 | * Is dropping the connection going to help? */ | ||
1837 | spin_lock_irq(&mdev->req_lock); | 1863 | spin_lock_irq(&mdev->req_lock); |
1838 | list_del(&e->w.list); | 1864 | list_del(&e->w.list); |
1839 | hlist_del_init(&e->colision); | 1865 | hlist_del_init(&e->colision); |
@@ -1842,12 +1868,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1842 | drbd_al_complete_io(mdev, e->sector); | 1868 | drbd_al_complete_io(mdev, e->sector); |
1843 | 1869 | ||
1844 | out_interrupted: | 1870 | out_interrupted: |
1845 | /* yes, the epoch_size now is imbalanced. | 1871 | drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); |
1846 | * but we drop the connection anyways, so we don't have a chance to | ||
1847 | * receive a barrier... atomic_inc(&mdev->epoch_size); */ | ||
1848 | put_ldev(mdev); | 1872 | put_ldev(mdev); |
1849 | drbd_free_ee(mdev, e); | 1873 | drbd_free_ee(mdev, e); |
1850 | return FALSE; | 1874 | return false; |
1851 | } | 1875 | } |
1852 | 1876 | ||
1853 | /* We may throttle resync, if the lower device seems to be busy, | 1877 | /* We may throttle resync, if the lower device seems to be busy, |
@@ -1861,10 +1885,11 @@ out_interrupted: | |||
1861 | * The current sync rate used here uses only the most recent two step marks, | 1885 | * The current sync rate used here uses only the most recent two step marks, |
1862 | * to have a short time average so we can react faster. | 1886 | * to have a short time average so we can react faster. |
1863 | */ | 1887 | */ |
1864 | int drbd_rs_should_slow_down(struct drbd_conf *mdev) | 1888 | int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) |
1865 | { | 1889 | { |
1866 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; | 1890 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; |
1867 | unsigned long db, dt, dbdt; | 1891 | unsigned long db, dt, dbdt; |
1892 | struct lc_element *tmp; | ||
1868 | int curr_events; | 1893 | int curr_events; |
1869 | int throttle = 0; | 1894 | int throttle = 0; |
1870 | 1895 | ||
@@ -1872,9 +1897,22 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) | |||
1872 | if (mdev->sync_conf.c_min_rate == 0) | 1897 | if (mdev->sync_conf.c_min_rate == 0) |
1873 | return 0; | 1898 | return 0; |
1874 | 1899 | ||
1900 | spin_lock_irq(&mdev->al_lock); | ||
1901 | tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); | ||
1902 | if (tmp) { | ||
1903 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | ||
1904 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) { | ||
1905 | spin_unlock_irq(&mdev->al_lock); | ||
1906 | return 0; | ||
1907 | } | ||
1908 | /* Do not slow down if app IO is already waiting for this extent */ | ||
1909 | } | ||
1910 | spin_unlock_irq(&mdev->al_lock); | ||
1911 | |||
1875 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + | 1912 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + |
1876 | (int)part_stat_read(&disk->part0, sectors[1]) - | 1913 | (int)part_stat_read(&disk->part0, sectors[1]) - |
1877 | atomic_read(&mdev->rs_sect_ev); | 1914 | atomic_read(&mdev->rs_sect_ev); |
1915 | |||
1878 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { | 1916 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { |
1879 | unsigned long rs_left; | 1917 | unsigned long rs_left; |
1880 | int i; | 1918 | int i; |
@@ -1883,8 +1921,12 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) | |||
1883 | 1921 | ||
1884 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, | 1922 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, |
1885 | * approx. */ | 1923 | * approx. */ |
1886 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS; | 1924 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; |
1887 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | 1925 | |
1926 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | ||
1927 | rs_left = mdev->ov_left; | ||
1928 | else | ||
1929 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | ||
1888 | 1930 | ||
1889 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; | 1931 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; |
1890 | if (!dt) | 1932 | if (!dt) |
@@ -1912,15 +1954,15 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1912 | sector = be64_to_cpu(p->sector); | 1954 | sector = be64_to_cpu(p->sector); |
1913 | size = be32_to_cpu(p->blksize); | 1955 | size = be32_to_cpu(p->blksize); |
1914 | 1956 | ||
1915 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 1957 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1916 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | 1958 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
1917 | (unsigned long long)sector, size); | 1959 | (unsigned long long)sector, size); |
1918 | return FALSE; | 1960 | return false; |
1919 | } | 1961 | } |
1920 | if (sector + (size>>9) > capacity) { | 1962 | if (sector + (size>>9) > capacity) { |
1921 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | 1963 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
1922 | (unsigned long long)sector, size); | 1964 | (unsigned long long)sector, size); |
1923 | return FALSE; | 1965 | return false; |
1924 | } | 1966 | } |
1925 | 1967 | ||
1926 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { | 1968 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { |
@@ -1957,7 +1999,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1957 | e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); | 1999 | e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); |
1958 | if (!e) { | 2000 | if (!e) { |
1959 | put_ldev(mdev); | 2001 | put_ldev(mdev); |
1960 | return FALSE; | 2002 | return false; |
1961 | } | 2003 | } |
1962 | 2004 | ||
1963 | switch (cmd) { | 2005 | switch (cmd) { |
@@ -1970,6 +2012,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1970 | case P_RS_DATA_REQUEST: | 2012 | case P_RS_DATA_REQUEST: |
1971 | e->w.cb = w_e_end_rsdata_req; | 2013 | e->w.cb = w_e_end_rsdata_req; |
1972 | fault_type = DRBD_FAULT_RS_RD; | 2014 | fault_type = DRBD_FAULT_RS_RD; |
2015 | /* used in the sector offset progress display */ | ||
2016 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | ||
1973 | break; | 2017 | break; |
1974 | 2018 | ||
1975 | case P_OV_REPLY: | 2019 | case P_OV_REPLY: |
@@ -1991,7 +2035,11 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1991 | if (cmd == P_CSUM_RS_REQUEST) { | 2035 | if (cmd == P_CSUM_RS_REQUEST) { |
1992 | D_ASSERT(mdev->agreed_pro_version >= 89); | 2036 | D_ASSERT(mdev->agreed_pro_version >= 89); |
1993 | e->w.cb = w_e_end_csum_rs_req; | 2037 | e->w.cb = w_e_end_csum_rs_req; |
2038 | /* used in the sector offset progress display */ | ||
2039 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | ||
1994 | } else if (cmd == P_OV_REPLY) { | 2040 | } else if (cmd == P_OV_REPLY) { |
2041 | /* track progress, we may need to throttle */ | ||
2042 | atomic_add(size >> 9, &mdev->rs_sect_in); | ||
1995 | e->w.cb = w_e_end_ov_reply; | 2043 | e->w.cb = w_e_end_ov_reply; |
1996 | dec_rs_pending(mdev); | 2044 | dec_rs_pending(mdev); |
1997 | /* drbd_rs_begin_io done when we sent this request, | 2045 | /* drbd_rs_begin_io done when we sent this request, |
@@ -2003,9 +2051,16 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
2003 | case P_OV_REQUEST: | 2051 | case P_OV_REQUEST: |
2004 | if (mdev->ov_start_sector == ~(sector_t)0 && | 2052 | if (mdev->ov_start_sector == ~(sector_t)0 && |
2005 | mdev->agreed_pro_version >= 90) { | 2053 | mdev->agreed_pro_version >= 90) { |
2054 | unsigned long now = jiffies; | ||
2055 | int i; | ||
2006 | mdev->ov_start_sector = sector; | 2056 | mdev->ov_start_sector = sector; |
2007 | mdev->ov_position = sector; | 2057 | mdev->ov_position = sector; |
2008 | mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector); | 2058 | mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); |
2059 | mdev->rs_total = mdev->ov_left; | ||
2060 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | ||
2061 | mdev->rs_mark_left[i] = mdev->ov_left; | ||
2062 | mdev->rs_mark_time[i] = now; | ||
2063 | } | ||
2009 | dev_info(DEV, "Online Verify start sector: %llu\n", | 2064 | dev_info(DEV, "Online Verify start sector: %llu\n", |
2010 | (unsigned long long)sector); | 2065 | (unsigned long long)sector); |
2011 | } | 2066 | } |
@@ -2042,9 +2097,9 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
2042 | * we would also throttle its application reads. | 2097 | * we would also throttle its application reads. |
2043 | * In that case, throttling is done on the SyncTarget only. | 2098 | * In that case, throttling is done on the SyncTarget only. |
2044 | */ | 2099 | */ |
2045 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev)) | 2100 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) |
2046 | msleep(100); | 2101 | schedule_timeout_uninterruptible(HZ/10); |
2047 | if (drbd_rs_begin_io(mdev, e->sector)) | 2102 | if (drbd_rs_begin_io(mdev, sector)) |
2048 | goto out_free_e; | 2103 | goto out_free_e; |
2049 | 2104 | ||
2050 | submit_for_resync: | 2105 | submit_for_resync: |
@@ -2057,11 +2112,10 @@ submit: | |||
2057 | spin_unlock_irq(&mdev->req_lock); | 2112 | spin_unlock_irq(&mdev->req_lock); |
2058 | 2113 | ||
2059 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) | 2114 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) |
2060 | return TRUE; | 2115 | return true; |
2061 | 2116 | ||
2062 | /* drbd_submit_ee currently fails for one reason only: | 2117 | /* don't care for the reason here */ |
2063 | * not being able to allocate enough bios. | 2118 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
2064 | * Is dropping the connection going to help? */ | ||
2065 | spin_lock_irq(&mdev->req_lock); | 2119 | spin_lock_irq(&mdev->req_lock); |
2066 | list_del(&e->w.list); | 2120 | list_del(&e->w.list); |
2067 | spin_unlock_irq(&mdev->req_lock); | 2121 | spin_unlock_irq(&mdev->req_lock); |
@@ -2070,7 +2124,7 @@ submit: | |||
2070 | out_free_e: | 2124 | out_free_e: |
2071 | put_ldev(mdev); | 2125 | put_ldev(mdev); |
2072 | drbd_free_ee(mdev, e); | 2126 | drbd_free_ee(mdev, e); |
2073 | return FALSE; | 2127 | return false; |
2074 | } | 2128 | } |
2075 | 2129 | ||
2076 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | 2130 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) |
@@ -2147,10 +2201,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | |||
2147 | 2201 | ||
2148 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | 2202 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) |
2149 | { | 2203 | { |
2150 | int self, peer, hg, rv = -100; | 2204 | int hg, rv = -100; |
2151 | |||
2152 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | ||
2153 | peer = mdev->p_uuid[UI_BITMAP] & 1; | ||
2154 | 2205 | ||
2155 | switch (mdev->net_conf->after_sb_1p) { | 2206 | switch (mdev->net_conf->after_sb_1p) { |
2156 | case ASB_DISCARD_YOUNGER_PRI: | 2207 | case ASB_DISCARD_YOUNGER_PRI: |
@@ -2177,12 +2228,14 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |||
2177 | case ASB_CALL_HELPER: | 2228 | case ASB_CALL_HELPER: |
2178 | hg = drbd_asb_recover_0p(mdev); | 2229 | hg = drbd_asb_recover_0p(mdev); |
2179 | if (hg == -1 && mdev->state.role == R_PRIMARY) { | 2230 | if (hg == -1 && mdev->state.role == R_PRIMARY) { |
2180 | self = drbd_set_role(mdev, R_SECONDARY, 0); | 2231 | enum drbd_state_rv rv2; |
2232 | |||
2233 | drbd_set_role(mdev, R_SECONDARY, 0); | ||
2181 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | 2234 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
2182 | * we might be here in C_WF_REPORT_PARAMS which is transient. | 2235 | * we might be here in C_WF_REPORT_PARAMS which is transient. |
2183 | * we do not need to wait for the after state change work either. */ | 2236 | * we do not need to wait for the after state change work either. */ |
2184 | self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | 2237 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); |
2185 | if (self != SS_SUCCESS) { | 2238 | if (rv2 != SS_SUCCESS) { |
2186 | drbd_khelper(mdev, "pri-lost-after-sb"); | 2239 | drbd_khelper(mdev, "pri-lost-after-sb"); |
2187 | } else { | 2240 | } else { |
2188 | dev_warn(DEV, "Successfully gave up primary role.\n"); | 2241 | dev_warn(DEV, "Successfully gave up primary role.\n"); |
@@ -2197,10 +2250,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |||
2197 | 2250 | ||
2198 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | 2251 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) |
2199 | { | 2252 | { |
2200 | int self, peer, hg, rv = -100; | 2253 | int hg, rv = -100; |
2201 | |||
2202 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | ||
2203 | peer = mdev->p_uuid[UI_BITMAP] & 1; | ||
2204 | 2254 | ||
2205 | switch (mdev->net_conf->after_sb_2p) { | 2255 | switch (mdev->net_conf->after_sb_2p) { |
2206 | case ASB_DISCARD_YOUNGER_PRI: | 2256 | case ASB_DISCARD_YOUNGER_PRI: |
@@ -2220,11 +2270,13 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | |||
2220 | case ASB_CALL_HELPER: | 2270 | case ASB_CALL_HELPER: |
2221 | hg = drbd_asb_recover_0p(mdev); | 2271 | hg = drbd_asb_recover_0p(mdev); |
2222 | if (hg == -1) { | 2272 | if (hg == -1) { |
2273 | enum drbd_state_rv rv2; | ||
2274 | |||
2223 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | 2275 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
2224 | * we might be here in C_WF_REPORT_PARAMS which is transient. | 2276 | * we might be here in C_WF_REPORT_PARAMS which is transient. |
2225 | * we do not need to wait for the after state change work either. */ | 2277 | * we do not need to wait for the after state change work either. */ |
2226 | self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | 2278 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); |
2227 | if (self != SS_SUCCESS) { | 2279 | if (rv2 != SS_SUCCESS) { |
2228 | drbd_khelper(mdev, "pri-lost-after-sb"); | 2280 | drbd_khelper(mdev, "pri-lost-after-sb"); |
2229 | } else { | 2281 | } else { |
2230 | dev_warn(DEV, "Successfully gave up primary role.\n"); | 2282 | dev_warn(DEV, "Successfully gave up primary role.\n"); |
@@ -2263,6 +2315,8 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, | |||
2263 | -2 C_SYNC_TARGET set BitMap | 2315 | -2 C_SYNC_TARGET set BitMap |
2264 | -100 after split brain, disconnect | 2316 | -100 after split brain, disconnect |
2265 | -1000 unrelated data | 2317 | -1000 unrelated data |
2318 | -1091 requires proto 91 | ||
2319 | -1096 requires proto 96 | ||
2266 | */ | 2320 | */ |
2267 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) | 2321 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) |
2268 | { | 2322 | { |
@@ -2292,7 +2346,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2292 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { | 2346 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { |
2293 | 2347 | ||
2294 | if (mdev->agreed_pro_version < 91) | 2348 | if (mdev->agreed_pro_version < 91) |
2295 | return -1001; | 2349 | return -1091; |
2296 | 2350 | ||
2297 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && | 2351 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && |
2298 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { | 2352 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { |
@@ -2313,7 +2367,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2313 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { | 2367 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { |
2314 | 2368 | ||
2315 | if (mdev->agreed_pro_version < 91) | 2369 | if (mdev->agreed_pro_version < 91) |
2316 | return -1001; | 2370 | return -1091; |
2317 | 2371 | ||
2318 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && | 2372 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && |
2319 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { | 2373 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { |
@@ -2358,17 +2412,22 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2358 | *rule_nr = 51; | 2412 | *rule_nr = 51; |
2359 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2413 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); |
2360 | if (self == peer) { | 2414 | if (self == peer) { |
2361 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2415 | if (mdev->agreed_pro_version < 96 ? |
2362 | peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); | 2416 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == |
2363 | if (self == peer) { | 2417 | (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : |
2418 | peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { | ||
2364 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2419 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2365 | resync as sync source modifications of the peer's UUIDs. */ | 2420 | resync as sync source modifications of the peer's UUIDs. */ |
2366 | 2421 | ||
2367 | if (mdev->agreed_pro_version < 91) | 2422 | if (mdev->agreed_pro_version < 91) |
2368 | return -1001; | 2423 | return -1091; |
2369 | 2424 | ||
2370 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; | 2425 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; |
2371 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; | 2426 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; |
2427 | |||
2428 | dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); | ||
2429 | drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); | ||
2430 | |||
2372 | return -1; | 2431 | return -1; |
2373 | } | 2432 | } |
2374 | } | 2433 | } |
@@ -2390,20 +2449,20 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2390 | *rule_nr = 71; | 2449 | *rule_nr = 71; |
2391 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2450 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); |
2392 | if (self == peer) { | 2451 | if (self == peer) { |
2393 | self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); | 2452 | if (mdev->agreed_pro_version < 96 ? |
2394 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2453 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == |
2395 | if (self == peer) { | 2454 | (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : |
2455 | self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { | ||
2396 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2456 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2397 | resync as sync source modifications of our UUIDs. */ | 2457 | resync as sync source modifications of our UUIDs. */ |
2398 | 2458 | ||
2399 | if (mdev->agreed_pro_version < 91) | 2459 | if (mdev->agreed_pro_version < 91) |
2400 | return -1001; | 2460 | return -1091; |
2401 | 2461 | ||
2402 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); | 2462 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); |
2403 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); | 2463 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); |
2404 | 2464 | ||
2405 | dev_info(DEV, "Undid last start of resync:\n"); | 2465 | dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); |
2406 | |||
2407 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, | 2466 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, |
2408 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); | 2467 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); |
2409 | 2468 | ||
@@ -2466,8 +2525,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2466 | dev_alert(DEV, "Unrelated data, aborting!\n"); | 2525 | dev_alert(DEV, "Unrelated data, aborting!\n"); |
2467 | return C_MASK; | 2526 | return C_MASK; |
2468 | } | 2527 | } |
2469 | if (hg == -1001) { | 2528 | if (hg < -1000) { |
2470 | dev_alert(DEV, "To resolve this both sides have to support at least protocol\n"); | 2529 | dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); |
2471 | return C_MASK; | 2530 | return C_MASK; |
2472 | } | 2531 | } |
2473 | 2532 | ||
@@ -2566,7 +2625,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2566 | 2625 | ||
2567 | if (abs(hg) >= 2) { | 2626 | if (abs(hg) >= 2) { |
2568 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); | 2627 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); |
2569 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) | 2628 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", |
2629 | BM_LOCKED_SET_ALLOWED)) | ||
2570 | return C_MASK; | 2630 | return C_MASK; |
2571 | } | 2631 | } |
2572 | 2632 | ||
@@ -2660,7 +2720,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig | |||
2660 | unsigned char *my_alg = mdev->net_conf->integrity_alg; | 2720 | unsigned char *my_alg = mdev->net_conf->integrity_alg; |
2661 | 2721 | ||
2662 | if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) | 2722 | if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) |
2663 | return FALSE; | 2723 | return false; |
2664 | 2724 | ||
2665 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; | 2725 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; |
2666 | if (strcmp(p_integrity_alg, my_alg)) { | 2726 | if (strcmp(p_integrity_alg, my_alg)) { |
@@ -2671,11 +2731,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig | |||
2671 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); | 2731 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); |
2672 | } | 2732 | } |
2673 | 2733 | ||
2674 | return TRUE; | 2734 | return true; |
2675 | 2735 | ||
2676 | disconnect: | 2736 | disconnect: |
2677 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2737 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2678 | return FALSE; | 2738 | return false; |
2679 | } | 2739 | } |
2680 | 2740 | ||
2681 | /* helper function | 2741 | /* helper function |
@@ -2707,7 +2767,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, | |||
2707 | 2767 | ||
2708 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) | 2768 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) |
2709 | { | 2769 | { |
2710 | int ok = TRUE; | 2770 | int ok = true; |
2711 | struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; | 2771 | struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; |
2712 | unsigned int header_size, data_size, exp_max_sz; | 2772 | unsigned int header_size, data_size, exp_max_sz; |
2713 | struct crypto_hash *verify_tfm = NULL; | 2773 | struct crypto_hash *verify_tfm = NULL; |
@@ -2725,7 +2785,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2725 | if (packet_size > exp_max_sz) { | 2785 | if (packet_size > exp_max_sz) { |
2726 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", | 2786 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", |
2727 | packet_size, exp_max_sz); | 2787 | packet_size, exp_max_sz); |
2728 | return FALSE; | 2788 | return false; |
2729 | } | 2789 | } |
2730 | 2790 | ||
2731 | if (apv <= 88) { | 2791 | if (apv <= 88) { |
@@ -2745,7 +2805,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2745 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | 2805 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); |
2746 | 2806 | ||
2747 | if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) | 2807 | if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) |
2748 | return FALSE; | 2808 | return false; |
2749 | 2809 | ||
2750 | mdev->sync_conf.rate = be32_to_cpu(p->rate); | 2810 | mdev->sync_conf.rate = be32_to_cpu(p->rate); |
2751 | 2811 | ||
@@ -2755,11 +2815,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2755 | dev_err(DEV, "verify-alg too long, " | 2815 | dev_err(DEV, "verify-alg too long, " |
2756 | "peer wants %u, accepting only %u byte\n", | 2816 | "peer wants %u, accepting only %u byte\n", |
2757 | data_size, SHARED_SECRET_MAX); | 2817 | data_size, SHARED_SECRET_MAX); |
2758 | return FALSE; | 2818 | return false; |
2759 | } | 2819 | } |
2760 | 2820 | ||
2761 | if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) | 2821 | if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) |
2762 | return FALSE; | 2822 | return false; |
2763 | 2823 | ||
2764 | /* we expect NUL terminated string */ | 2824 | /* we expect NUL terminated string */ |
2765 | /* but just in case someone tries to be evil */ | 2825 | /* but just in case someone tries to be evil */ |
@@ -2853,7 +2913,7 @@ disconnect: | |||
2853 | /* but free the verify_tfm again, if csums_tfm did not work out */ | 2913 | /* but free the verify_tfm again, if csums_tfm did not work out */ |
2854 | crypto_free_hash(verify_tfm); | 2914 | crypto_free_hash(verify_tfm); |
2855 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2915 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2856 | return FALSE; | 2916 | return false; |
2857 | } | 2917 | } |
2858 | 2918 | ||
2859 | static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) | 2919 | static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) |
@@ -2879,7 +2939,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2879 | { | 2939 | { |
2880 | struct p_sizes *p = &mdev->data.rbuf.sizes; | 2940 | struct p_sizes *p = &mdev->data.rbuf.sizes; |
2881 | enum determine_dev_size dd = unchanged; | 2941 | enum determine_dev_size dd = unchanged; |
2882 | unsigned int max_seg_s; | 2942 | unsigned int max_bio_size; |
2883 | sector_t p_size, p_usize, my_usize; | 2943 | sector_t p_size, p_usize, my_usize; |
2884 | int ldsc = 0; /* local disk size changed */ | 2944 | int ldsc = 0; /* local disk size changed */ |
2885 | enum dds_flags ddsf; | 2945 | enum dds_flags ddsf; |
@@ -2890,7 +2950,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2890 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { | 2950 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { |
2891 | dev_err(DEV, "some backing storage is needed\n"); | 2951 | dev_err(DEV, "some backing storage is needed\n"); |
2892 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2952 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2893 | return FALSE; | 2953 | return false; |
2894 | } | 2954 | } |
2895 | 2955 | ||
2896 | /* just store the peer's disk size for now. | 2956 | /* just store the peer's disk size for now. |
@@ -2927,18 +2987,17 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2927 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2987 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2928 | mdev->ldev->dc.disk_size = my_usize; | 2988 | mdev->ldev->dc.disk_size = my_usize; |
2929 | put_ldev(mdev); | 2989 | put_ldev(mdev); |
2930 | return FALSE; | 2990 | return false; |
2931 | } | 2991 | } |
2932 | put_ldev(mdev); | 2992 | put_ldev(mdev); |
2933 | } | 2993 | } |
2934 | #undef min_not_zero | ||
2935 | 2994 | ||
2936 | ddsf = be16_to_cpu(p->dds_flags); | 2995 | ddsf = be16_to_cpu(p->dds_flags); |
2937 | if (get_ldev(mdev)) { | 2996 | if (get_ldev(mdev)) { |
2938 | dd = drbd_determin_dev_size(mdev, ddsf); | 2997 | dd = drbd_determin_dev_size(mdev, ddsf); |
2939 | put_ldev(mdev); | 2998 | put_ldev(mdev); |
2940 | if (dd == dev_size_error) | 2999 | if (dd == dev_size_error) |
2941 | return FALSE; | 3000 | return false; |
2942 | drbd_md_sync(mdev); | 3001 | drbd_md_sync(mdev); |
2943 | } else { | 3002 | } else { |
2944 | /* I am diskless, need to accept the peer's size. */ | 3003 | /* I am diskless, need to accept the peer's size. */ |
@@ -2952,14 +3011,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2952 | } | 3011 | } |
2953 | 3012 | ||
2954 | if (mdev->agreed_pro_version < 94) | 3013 | if (mdev->agreed_pro_version < 94) |
2955 | max_seg_s = be32_to_cpu(p->max_segment_size); | 3014 | max_bio_size = be32_to_cpu(p->max_bio_size); |
2956 | else if (mdev->agreed_pro_version == 94) | 3015 | else if (mdev->agreed_pro_version == 94) |
2957 | max_seg_s = DRBD_MAX_SIZE_H80_PACKET; | 3016 | max_bio_size = DRBD_MAX_SIZE_H80_PACKET; |
2958 | else /* drbd 8.3.8 onwards */ | 3017 | else /* drbd 8.3.8 onwards */ |
2959 | max_seg_s = DRBD_MAX_SEGMENT_SIZE; | 3018 | max_bio_size = DRBD_MAX_BIO_SIZE; |
2960 | 3019 | ||
2961 | if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) | 3020 | if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) |
2962 | drbd_setup_queue_param(mdev, max_seg_s); | 3021 | drbd_setup_queue_param(mdev, max_bio_size); |
2963 | 3022 | ||
2964 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); | 3023 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); |
2965 | put_ldev(mdev); | 3024 | put_ldev(mdev); |
@@ -2985,14 +3044,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2985 | } | 3044 | } |
2986 | } | 3045 | } |
2987 | 3046 | ||
2988 | return TRUE; | 3047 | return true; |
2989 | } | 3048 | } |
2990 | 3049 | ||
2991 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3050 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
2992 | { | 3051 | { |
2993 | struct p_uuids *p = &mdev->data.rbuf.uuids; | 3052 | struct p_uuids *p = &mdev->data.rbuf.uuids; |
2994 | u64 *p_uuid; | 3053 | u64 *p_uuid; |
2995 | int i; | 3054 | int i, updated_uuids = 0; |
2996 | 3055 | ||
2997 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); | 3056 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); |
2998 | 3057 | ||
@@ -3009,7 +3068,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3009 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", | 3068 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", |
3010 | (unsigned long long)mdev->ed_uuid); | 3069 | (unsigned long long)mdev->ed_uuid); |
3011 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3070 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3012 | return FALSE; | 3071 | return false; |
3013 | } | 3072 | } |
3014 | 3073 | ||
3015 | if (get_ldev(mdev)) { | 3074 | if (get_ldev(mdev)) { |
@@ -3021,19 +3080,21 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3021 | if (skip_initial_sync) { | 3080 | if (skip_initial_sync) { |
3022 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); | 3081 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); |
3023 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, | 3082 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, |
3024 | "clear_n_write from receive_uuids"); | 3083 | "clear_n_write from receive_uuids", |
3084 | BM_LOCKED_TEST_ALLOWED); | ||
3025 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); | 3085 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); |
3026 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | 3086 | _drbd_uuid_set(mdev, UI_BITMAP, 0); |
3027 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | 3087 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), |
3028 | CS_VERBOSE, NULL); | 3088 | CS_VERBOSE, NULL); |
3029 | drbd_md_sync(mdev); | 3089 | drbd_md_sync(mdev); |
3090 | updated_uuids = 1; | ||
3030 | } | 3091 | } |
3031 | put_ldev(mdev); | 3092 | put_ldev(mdev); |
3032 | } else if (mdev->state.disk < D_INCONSISTENT && | 3093 | } else if (mdev->state.disk < D_INCONSISTENT && |
3033 | mdev->state.role == R_PRIMARY) { | 3094 | mdev->state.role == R_PRIMARY) { |
3034 | /* I am a diskless primary, the peer just created a new current UUID | 3095 | /* I am a diskless primary, the peer just created a new current UUID |
3035 | for me. */ | 3096 | for me. */ |
3036 | drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | 3097 | updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); |
3037 | } | 3098 | } |
3038 | 3099 | ||
3039 | /* Before we test for the disk state, we should wait until an eventually | 3100 | /* Before we test for the disk state, we should wait until an eventually |
@@ -3042,9 +3103,12 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3042 | new disk state... */ | 3103 | new disk state... */ |
3043 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | 3104 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); |
3044 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) | 3105 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) |
3045 | drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | 3106 | updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); |
3046 | 3107 | ||
3047 | return TRUE; | 3108 | if (updated_uuids) |
3109 | drbd_print_uuids(mdev, "receiver updated UUIDs to"); | ||
3110 | |||
3111 | return true; | ||
3048 | } | 3112 | } |
3049 | 3113 | ||
3050 | /** | 3114 | /** |
@@ -3081,7 +3145,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3081 | { | 3145 | { |
3082 | struct p_req_state *p = &mdev->data.rbuf.req_state; | 3146 | struct p_req_state *p = &mdev->data.rbuf.req_state; |
3083 | union drbd_state mask, val; | 3147 | union drbd_state mask, val; |
3084 | int rv; | 3148 | enum drbd_state_rv rv; |
3085 | 3149 | ||
3086 | mask.i = be32_to_cpu(p->mask); | 3150 | mask.i = be32_to_cpu(p->mask); |
3087 | val.i = be32_to_cpu(p->val); | 3151 | val.i = be32_to_cpu(p->val); |
@@ -3089,7 +3153,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3089 | if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && | 3153 | if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && |
3090 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { | 3154 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { |
3091 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); | 3155 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); |
3092 | return TRUE; | 3156 | return true; |
3093 | } | 3157 | } |
3094 | 3158 | ||
3095 | mask = convert_state(mask); | 3159 | mask = convert_state(mask); |
@@ -3100,7 +3164,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3100 | drbd_send_sr_reply(mdev, rv); | 3164 | drbd_send_sr_reply(mdev, rv); |
3101 | drbd_md_sync(mdev); | 3165 | drbd_md_sync(mdev); |
3102 | 3166 | ||
3103 | return TRUE; | 3167 | return true; |
3104 | } | 3168 | } |
3105 | 3169 | ||
3106 | static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3170 | static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -3145,7 +3209,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3145 | peer_state.conn == C_CONNECTED) { | 3209 | peer_state.conn == C_CONNECTED) { |
3146 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) | 3210 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) |
3147 | drbd_resync_finished(mdev); | 3211 | drbd_resync_finished(mdev); |
3148 | return TRUE; | 3212 | return true; |
3149 | } | 3213 | } |
3150 | } | 3214 | } |
3151 | 3215 | ||
@@ -3161,6 +3225,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3161 | if (ns.conn == C_WF_REPORT_PARAMS) | 3225 | if (ns.conn == C_WF_REPORT_PARAMS) |
3162 | ns.conn = C_CONNECTED; | 3226 | ns.conn = C_CONNECTED; |
3163 | 3227 | ||
3228 | if (peer_state.conn == C_AHEAD) | ||
3229 | ns.conn = C_BEHIND; | ||
3230 | |||
3164 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && | 3231 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && |
3165 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | 3232 | get_ldev_if_state(mdev, D_NEGOTIATING)) { |
3166 | int cr; /* consider resync */ | 3233 | int cr; /* consider resync */ |
@@ -3195,10 +3262,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3195 | real_peer_disk = D_DISKLESS; | 3262 | real_peer_disk = D_DISKLESS; |
3196 | } else { | 3263 | } else { |
3197 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) | 3264 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) |
3198 | return FALSE; | 3265 | return false; |
3199 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); | 3266 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); |
3200 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3267 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3201 | return FALSE; | 3268 | return false; |
3202 | } | 3269 | } |
3203 | } | 3270 | } |
3204 | } | 3271 | } |
@@ -3223,7 +3290,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3223 | drbd_uuid_new_current(mdev); | 3290 | drbd_uuid_new_current(mdev); |
3224 | clear_bit(NEW_CUR_UUID, &mdev->flags); | 3291 | clear_bit(NEW_CUR_UUID, &mdev->flags); |
3225 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); | 3292 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); |
3226 | return FALSE; | 3293 | return false; |
3227 | } | 3294 | } |
3228 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); | 3295 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); |
3229 | ns = mdev->state; | 3296 | ns = mdev->state; |
@@ -3231,7 +3298,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3231 | 3298 | ||
3232 | if (rv < SS_SUCCESS) { | 3299 | if (rv < SS_SUCCESS) { |
3233 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3300 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3234 | return FALSE; | 3301 | return false; |
3235 | } | 3302 | } |
3236 | 3303 | ||
3237 | if (os.conn > C_WF_REPORT_PARAMS) { | 3304 | if (os.conn > C_WF_REPORT_PARAMS) { |
@@ -3249,7 +3316,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3249 | 3316 | ||
3250 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ | 3317 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ |
3251 | 3318 | ||
3252 | return TRUE; | 3319 | return true; |
3253 | } | 3320 | } |
3254 | 3321 | ||
3255 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3322 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -3258,6 +3325,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3258 | 3325 | ||
3259 | wait_event(mdev->misc_wait, | 3326 | wait_event(mdev->misc_wait, |
3260 | mdev->state.conn == C_WF_SYNC_UUID || | 3327 | mdev->state.conn == C_WF_SYNC_UUID || |
3328 | mdev->state.conn == C_BEHIND || | ||
3261 | mdev->state.conn < C_CONNECTED || | 3329 | mdev->state.conn < C_CONNECTED || |
3262 | mdev->state.disk < D_NEGOTIATING); | 3330 | mdev->state.disk < D_NEGOTIATING); |
3263 | 3331 | ||
@@ -3269,32 +3337,42 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3269 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); | 3337 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); |
3270 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); | 3338 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); |
3271 | 3339 | ||
3340 | drbd_print_uuids(mdev, "updated sync uuid"); | ||
3272 | drbd_start_resync(mdev, C_SYNC_TARGET); | 3341 | drbd_start_resync(mdev, C_SYNC_TARGET); |
3273 | 3342 | ||
3274 | put_ldev(mdev); | 3343 | put_ldev(mdev); |
3275 | } else | 3344 | } else |
3276 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); | 3345 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); |
3277 | 3346 | ||
3278 | return TRUE; | 3347 | return true; |
3279 | } | 3348 | } |
3280 | 3349 | ||
3281 | enum receive_bitmap_ret { OK, DONE, FAILED }; | 3350 | /** |
3282 | 3351 | * receive_bitmap_plain | |
3283 | static enum receive_bitmap_ret | 3352 | * |
3353 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3354 | * code upon failure. | ||
3355 | */ | ||
3356 | static int | ||
3284 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | 3357 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, |
3285 | unsigned long *buffer, struct bm_xfer_ctx *c) | 3358 | unsigned long *buffer, struct bm_xfer_ctx *c) |
3286 | { | 3359 | { |
3287 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | 3360 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); |
3288 | unsigned want = num_words * sizeof(long); | 3361 | unsigned want = num_words * sizeof(long); |
3362 | int err; | ||
3289 | 3363 | ||
3290 | if (want != data_size) { | 3364 | if (want != data_size) { |
3291 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); | 3365 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); |
3292 | return FAILED; | 3366 | return -EIO; |
3293 | } | 3367 | } |
3294 | if (want == 0) | 3368 | if (want == 0) |
3295 | return DONE; | 3369 | return 0; |
3296 | if (drbd_recv(mdev, buffer, want) != want) | 3370 | err = drbd_recv(mdev, buffer, want); |
3297 | return FAILED; | 3371 | if (err != want) { |
3372 | if (err >= 0) | ||
3373 | err = -EIO; | ||
3374 | return err; | ||
3375 | } | ||
3298 | 3376 | ||
3299 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); | 3377 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); |
3300 | 3378 | ||
@@ -3303,10 +3381,16 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | |||
3303 | if (c->bit_offset > c->bm_bits) | 3381 | if (c->bit_offset > c->bm_bits) |
3304 | c->bit_offset = c->bm_bits; | 3382 | c->bit_offset = c->bm_bits; |
3305 | 3383 | ||
3306 | return OK; | 3384 | return 1; |
3307 | } | 3385 | } |
3308 | 3386 | ||
3309 | static enum receive_bitmap_ret | 3387 | /** |
3388 | * recv_bm_rle_bits | ||
3389 | * | ||
3390 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3391 | * code upon failure. | ||
3392 | */ | ||
3393 | static int | ||
3310 | recv_bm_rle_bits(struct drbd_conf *mdev, | 3394 | recv_bm_rle_bits(struct drbd_conf *mdev, |
3311 | struct p_compressed_bm *p, | 3395 | struct p_compressed_bm *p, |
3312 | struct bm_xfer_ctx *c) | 3396 | struct bm_xfer_ctx *c) |
@@ -3326,18 +3410,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3326 | 3410 | ||
3327 | bits = bitstream_get_bits(&bs, &look_ahead, 64); | 3411 | bits = bitstream_get_bits(&bs, &look_ahead, 64); |
3328 | if (bits < 0) | 3412 | if (bits < 0) |
3329 | return FAILED; | 3413 | return -EIO; |
3330 | 3414 | ||
3331 | for (have = bits; have > 0; s += rl, toggle = !toggle) { | 3415 | for (have = bits; have > 0; s += rl, toggle = !toggle) { |
3332 | bits = vli_decode_bits(&rl, look_ahead); | 3416 | bits = vli_decode_bits(&rl, look_ahead); |
3333 | if (bits <= 0) | 3417 | if (bits <= 0) |
3334 | return FAILED; | 3418 | return -EIO; |
3335 | 3419 | ||
3336 | if (toggle) { | 3420 | if (toggle) { |
3337 | e = s + rl -1; | 3421 | e = s + rl -1; |
3338 | if (e >= c->bm_bits) { | 3422 | if (e >= c->bm_bits) { |
3339 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); | 3423 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); |
3340 | return FAILED; | 3424 | return -EIO; |
3341 | } | 3425 | } |
3342 | _drbd_bm_set_bits(mdev, s, e); | 3426 | _drbd_bm_set_bits(mdev, s, e); |
3343 | } | 3427 | } |
@@ -3347,14 +3431,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3347 | have, bits, look_ahead, | 3431 | have, bits, look_ahead, |
3348 | (unsigned int)(bs.cur.b - p->code), | 3432 | (unsigned int)(bs.cur.b - p->code), |
3349 | (unsigned int)bs.buf_len); | 3433 | (unsigned int)bs.buf_len); |
3350 | return FAILED; | 3434 | return -EIO; |
3351 | } | 3435 | } |
3352 | look_ahead >>= bits; | 3436 | look_ahead >>= bits; |
3353 | have -= bits; | 3437 | have -= bits; |
3354 | 3438 | ||
3355 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); | 3439 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); |
3356 | if (bits < 0) | 3440 | if (bits < 0) |
3357 | return FAILED; | 3441 | return -EIO; |
3358 | look_ahead |= tmp << have; | 3442 | look_ahead |= tmp << have; |
3359 | have += bits; | 3443 | have += bits; |
3360 | } | 3444 | } |
@@ -3362,10 +3446,16 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3362 | c->bit_offset = s; | 3446 | c->bit_offset = s; |
3363 | bm_xfer_ctx_bit_to_word_offset(c); | 3447 | bm_xfer_ctx_bit_to_word_offset(c); |
3364 | 3448 | ||
3365 | return (s == c->bm_bits) ? DONE : OK; | 3449 | return (s != c->bm_bits); |
3366 | } | 3450 | } |
3367 | 3451 | ||
3368 | static enum receive_bitmap_ret | 3452 | /** |
3453 | * decode_bitmap_c | ||
3454 | * | ||
3455 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3456 | * code upon failure. | ||
3457 | */ | ||
3458 | static int | ||
3369 | decode_bitmap_c(struct drbd_conf *mdev, | 3459 | decode_bitmap_c(struct drbd_conf *mdev, |
3370 | struct p_compressed_bm *p, | 3460 | struct p_compressed_bm *p, |
3371 | struct bm_xfer_ctx *c) | 3461 | struct bm_xfer_ctx *c) |
@@ -3379,7 +3469,7 @@ decode_bitmap_c(struct drbd_conf *mdev, | |||
3379 | 3469 | ||
3380 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); | 3470 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); |
3381 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | 3471 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); |
3382 | return FAILED; | 3472 | return -EIO; |
3383 | } | 3473 | } |
3384 | 3474 | ||
3385 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, | 3475 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, |
@@ -3428,13 +3518,13 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3428 | { | 3518 | { |
3429 | struct bm_xfer_ctx c; | 3519 | struct bm_xfer_ctx c; |
3430 | void *buffer; | 3520 | void *buffer; |
3431 | enum receive_bitmap_ret ret; | 3521 | int err; |
3432 | int ok = FALSE; | 3522 | int ok = false; |
3433 | struct p_header80 *h = &mdev->data.rbuf.header.h80; | 3523 | struct p_header80 *h = &mdev->data.rbuf.header.h80; |
3434 | 3524 | ||
3435 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | 3525 | drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); |
3436 | 3526 | /* you are supposed to send additional out-of-sync information | |
3437 | drbd_bm_lock(mdev, "receive bitmap"); | 3527 | * if you actually set bits during this phase */ |
3438 | 3528 | ||
3439 | /* maybe we should use some per thread scratch page, | 3529 | /* maybe we should use some per thread scratch page, |
3440 | * and allocate that during initial device creation? */ | 3530 | * and allocate that during initial device creation? */ |
@@ -3449,9 +3539,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3449 | .bm_words = drbd_bm_words(mdev), | 3539 | .bm_words = drbd_bm_words(mdev), |
3450 | }; | 3540 | }; |
3451 | 3541 | ||
3452 | do { | 3542 | for(;;) { |
3453 | if (cmd == P_BITMAP) { | 3543 | if (cmd == P_BITMAP) { |
3454 | ret = receive_bitmap_plain(mdev, data_size, buffer, &c); | 3544 | err = receive_bitmap_plain(mdev, data_size, buffer, &c); |
3455 | } else if (cmd == P_COMPRESSED_BITMAP) { | 3545 | } else if (cmd == P_COMPRESSED_BITMAP) { |
3456 | /* MAYBE: sanity check that we speak proto >= 90, | 3546 | /* MAYBE: sanity check that we speak proto >= 90, |
3457 | * and the feature is enabled! */ | 3547 | * and the feature is enabled! */ |
@@ -3468,9 +3558,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3468 | goto out; | 3558 | goto out; |
3469 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { | 3559 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { |
3470 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); | 3560 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); |
3471 | return FAILED; | 3561 | goto out; |
3472 | } | 3562 | } |
3473 | ret = decode_bitmap_c(mdev, p, &c); | 3563 | err = decode_bitmap_c(mdev, p, &c); |
3474 | } else { | 3564 | } else { |
3475 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); | 3565 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); |
3476 | goto out; | 3566 | goto out; |
@@ -3479,24 +3569,26 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3479 | c.packets[cmd == P_BITMAP]++; | 3569 | c.packets[cmd == P_BITMAP]++; |
3480 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; | 3570 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; |
3481 | 3571 | ||
3482 | if (ret != OK) | 3572 | if (err <= 0) { |
3573 | if (err < 0) | ||
3574 | goto out; | ||
3483 | break; | 3575 | break; |
3484 | 3576 | } | |
3485 | if (!drbd_recv_header(mdev, &cmd, &data_size)) | 3577 | if (!drbd_recv_header(mdev, &cmd, &data_size)) |
3486 | goto out; | 3578 | goto out; |
3487 | } while (ret == OK); | 3579 | } |
3488 | if (ret == FAILED) | ||
3489 | goto out; | ||
3490 | 3580 | ||
3491 | INFO_bm_xfer_stats(mdev, "receive", &c); | 3581 | INFO_bm_xfer_stats(mdev, "receive", &c); |
3492 | 3582 | ||
3493 | if (mdev->state.conn == C_WF_BITMAP_T) { | 3583 | if (mdev->state.conn == C_WF_BITMAP_T) { |
3584 | enum drbd_state_rv rv; | ||
3585 | |||
3494 | ok = !drbd_send_bitmap(mdev); | 3586 | ok = !drbd_send_bitmap(mdev); |
3495 | if (!ok) | 3587 | if (!ok) |
3496 | goto out; | 3588 | goto out; |
3497 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ | 3589 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ |
3498 | ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); | 3590 | rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); |
3499 | D_ASSERT(ok == SS_SUCCESS); | 3591 | D_ASSERT(rv == SS_SUCCESS); |
3500 | } else if (mdev->state.conn != C_WF_BITMAP_S) { | 3592 | } else if (mdev->state.conn != C_WF_BITMAP_S) { |
3501 | /* admin may have requested C_DISCONNECTING, | 3593 | /* admin may have requested C_DISCONNECTING, |
3502 | * other threads may have noticed network errors */ | 3594 | * other threads may have noticed network errors */ |
@@ -3504,7 +3596,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3504 | drbd_conn_str(mdev->state.conn)); | 3596 | drbd_conn_str(mdev->state.conn)); |
3505 | } | 3597 | } |
3506 | 3598 | ||
3507 | ok = TRUE; | 3599 | ok = true; |
3508 | out: | 3600 | out: |
3509 | drbd_bm_unlock(mdev); | 3601 | drbd_bm_unlock(mdev); |
3510 | if (ok && mdev->state.conn == C_WF_BITMAP_S) | 3602 | if (ok && mdev->state.conn == C_WF_BITMAP_S) |
@@ -3538,7 +3630,26 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u | |||
3538 | * with the data requests being unplugged */ | 3630 | * with the data requests being unplugged */ |
3539 | drbd_tcp_quickack(mdev->data.socket); | 3631 | drbd_tcp_quickack(mdev->data.socket); |
3540 | 3632 | ||
3541 | return TRUE; | 3633 | return true; |
3634 | } | ||
3635 | |||
3636 | static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | ||
3637 | { | ||
3638 | struct p_block_desc *p = &mdev->data.rbuf.block_desc; | ||
3639 | |||
3640 | switch (mdev->state.conn) { | ||
3641 | case C_WF_SYNC_UUID: | ||
3642 | case C_WF_BITMAP_T: | ||
3643 | case C_BEHIND: | ||
3644 | break; | ||
3645 | default: | ||
3646 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", | ||
3647 | drbd_conn_str(mdev->state.conn)); | ||
3648 | } | ||
3649 | |||
3650 | drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); | ||
3651 | |||
3652 | return true; | ||
3542 | } | 3653 | } |
3543 | 3654 | ||
3544 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); | 3655 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); |
@@ -3571,6 +3682,7 @@ static struct data_cmd drbd_cmd_handler[] = { | |||
3571 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | 3682 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, |
3572 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | 3683 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, |
3573 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, | 3684 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, |
3685 | [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, | ||
3574 | /* anything missing from this table is in | 3686 | /* anything missing from this table is in |
3575 | * the asender_tbl, see get_asender_cmd */ | 3687 | * the asender_tbl, see get_asender_cmd */ |
3576 | [P_MAX_CMD] = { 0, 0, NULL }, | 3688 | [P_MAX_CMD] = { 0, 0, NULL }, |
@@ -3610,7 +3722,8 @@ static void drbdd(struct drbd_conf *mdev) | |||
3610 | if (shs) { | 3722 | if (shs) { |
3611 | rv = drbd_recv(mdev, &header->h80.payload, shs); | 3723 | rv = drbd_recv(mdev, &header->h80.payload, shs); |
3612 | if (unlikely(rv != shs)) { | 3724 | if (unlikely(rv != shs)) { |
3613 | dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); | 3725 | if (!signal_pending(current)) |
3726 | dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); | ||
3614 | goto err_out; | 3727 | goto err_out; |
3615 | } | 3728 | } |
3616 | } | 3729 | } |
@@ -3682,9 +3795,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3682 | 3795 | ||
3683 | if (mdev->state.conn == C_STANDALONE) | 3796 | if (mdev->state.conn == C_STANDALONE) |
3684 | return; | 3797 | return; |
3685 | if (mdev->state.conn >= C_WF_CONNECTION) | ||
3686 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", | ||
3687 | drbd_conn_str(mdev->state.conn)); | ||
3688 | 3798 | ||
3689 | /* asender does not clean up anything. it must not interfere, either */ | 3799 | /* asender does not clean up anything. it must not interfere, either */ |
3690 | drbd_thread_stop(&mdev->asender); | 3800 | drbd_thread_stop(&mdev->asender); |
@@ -3713,6 +3823,8 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3713 | atomic_set(&mdev->rs_pending_cnt, 0); | 3823 | atomic_set(&mdev->rs_pending_cnt, 0); |
3714 | wake_up(&mdev->misc_wait); | 3824 | wake_up(&mdev->misc_wait); |
3715 | 3825 | ||
3826 | del_timer(&mdev->request_timer); | ||
3827 | |||
3716 | /* make sure syncer is stopped and w_resume_next_sg queued */ | 3828 | /* make sure syncer is stopped and w_resume_next_sg queued */ |
3717 | del_timer_sync(&mdev->resync_timer); | 3829 | del_timer_sync(&mdev->resync_timer); |
3718 | resync_timer_fn((unsigned long)mdev); | 3830 | resync_timer_fn((unsigned long)mdev); |
@@ -3758,13 +3870,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3758 | if (os.conn == C_DISCONNECTING) { | 3870 | if (os.conn == C_DISCONNECTING) { |
3759 | wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); | 3871 | wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); |
3760 | 3872 | ||
3761 | if (!is_susp(mdev->state)) { | ||
3762 | /* we must not free the tl_hash | ||
3763 | * while application io is still on the fly */ | ||
3764 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | ||
3765 | drbd_free_tl_hash(mdev); | ||
3766 | } | ||
3767 | |||
3768 | crypto_free_hash(mdev->cram_hmac_tfm); | 3873 | crypto_free_hash(mdev->cram_hmac_tfm); |
3769 | mdev->cram_hmac_tfm = NULL; | 3874 | mdev->cram_hmac_tfm = NULL; |
3770 | 3875 | ||
@@ -3773,6 +3878,10 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3773 | drbd_request_state(mdev, NS(conn, C_STANDALONE)); | 3878 | drbd_request_state(mdev, NS(conn, C_STANDALONE)); |
3774 | } | 3879 | } |
3775 | 3880 | ||
3881 | /* serialize with bitmap writeout triggered by the state change, | ||
3882 | * if any. */ | ||
3883 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
3884 | |||
3776 | /* tcp_close and release of sendpage pages can be deferred. I don't | 3885 | /* tcp_close and release of sendpage pages can be deferred. I don't |
3777 | * want to use SO_LINGER, because apparently it can be deferred for | 3886 | * want to use SO_LINGER, because apparently it can be deferred for |
3778 | * more than 20 seconds (longest time I checked). | 3887 | * more than 20 seconds (longest time I checked). |
@@ -3873,7 +3982,8 @@ static int drbd_do_handshake(struct drbd_conf *mdev) | |||
3873 | rv = drbd_recv(mdev, &p->head.payload, expect); | 3982 | rv = drbd_recv(mdev, &p->head.payload, expect); |
3874 | 3983 | ||
3875 | if (rv != expect) { | 3984 | if (rv != expect) { |
3876 | dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); | 3985 | if (!signal_pending(current)) |
3986 | dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); | ||
3877 | return 0; | 3987 | return 0; |
3878 | } | 3988 | } |
3879 | 3989 | ||
@@ -3975,7 +4085,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) | |||
3975 | rv = drbd_recv(mdev, peers_ch, length); | 4085 | rv = drbd_recv(mdev, peers_ch, length); |
3976 | 4086 | ||
3977 | if (rv != length) { | 4087 | if (rv != length) { |
3978 | dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); | 4088 | if (!signal_pending(current)) |
4089 | dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); | ||
3979 | rv = 0; | 4090 | rv = 0; |
3980 | goto fail; | 4091 | goto fail; |
3981 | } | 4092 | } |
@@ -4022,7 +4133,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) | |||
4022 | rv = drbd_recv(mdev, response , resp_size); | 4133 | rv = drbd_recv(mdev, response , resp_size); |
4023 | 4134 | ||
4024 | if (rv != resp_size) { | 4135 | if (rv != resp_size) { |
4025 | dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); | 4136 | if (!signal_pending(current)) |
4137 | dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); | ||
4026 | rv = 0; | 4138 | rv = 0; |
4027 | goto fail; | 4139 | goto fail; |
4028 | } | 4140 | } |
@@ -4074,8 +4186,7 @@ int drbdd_init(struct drbd_thread *thi) | |||
4074 | h = drbd_connect(mdev); | 4186 | h = drbd_connect(mdev); |
4075 | if (h == 0) { | 4187 | if (h == 0) { |
4076 | drbd_disconnect(mdev); | 4188 | drbd_disconnect(mdev); |
4077 | __set_current_state(TASK_INTERRUPTIBLE); | 4189 | schedule_timeout_interruptible(HZ); |
4078 | schedule_timeout(HZ); | ||
4079 | } | 4190 | } |
4080 | if (h == -1) { | 4191 | if (h == -1) { |
4081 | dev_warn(DEV, "Discarding network configuration.\n"); | 4192 | dev_warn(DEV, "Discarding network configuration.\n"); |
@@ -4113,7 +4224,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) | |||
4113 | } | 4224 | } |
4114 | wake_up(&mdev->state_wait); | 4225 | wake_up(&mdev->state_wait); |
4115 | 4226 | ||
4116 | return TRUE; | 4227 | return true; |
4117 | } | 4228 | } |
4118 | 4229 | ||
4119 | static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) | 4230 | static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4129,7 +4240,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4129 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) | 4240 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) |
4130 | wake_up(&mdev->misc_wait); | 4241 | wake_up(&mdev->misc_wait); |
4131 | 4242 | ||
4132 | return TRUE; | 4243 | return true; |
4133 | } | 4244 | } |
4134 | 4245 | ||
4135 | static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) | 4246 | static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4152,7 +4263,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) | |||
4152 | dec_rs_pending(mdev); | 4263 | dec_rs_pending(mdev); |
4153 | atomic_add(blksize >> 9, &mdev->rs_sect_in); | 4264 | atomic_add(blksize >> 9, &mdev->rs_sect_in); |
4154 | 4265 | ||
4155 | return TRUE; | 4266 | return true; |
4156 | } | 4267 | } |
4157 | 4268 | ||
4158 | /* when we receive the ACK for a write request, | 4269 | /* when we receive the ACK for a write request, |
@@ -4176,8 +4287,6 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, | |||
4176 | return req; | 4287 | return req; |
4177 | } | 4288 | } |
4178 | } | 4289 | } |
4179 | dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", | ||
4180 | (void *)(unsigned long)id, (unsigned long long)sector); | ||
4181 | return NULL; | 4290 | return NULL; |
4182 | } | 4291 | } |
4183 | 4292 | ||
@@ -4195,15 +4304,17 @@ static int validate_req_change_req_state(struct drbd_conf *mdev, | |||
4195 | req = validator(mdev, id, sector); | 4304 | req = validator(mdev, id, sector); |
4196 | if (unlikely(!req)) { | 4305 | if (unlikely(!req)) { |
4197 | spin_unlock_irq(&mdev->req_lock); | 4306 | spin_unlock_irq(&mdev->req_lock); |
4198 | dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); | 4307 | |
4199 | return FALSE; | 4308 | dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func, |
4309 | (void *)(unsigned long)id, (unsigned long long)sector); | ||
4310 | return false; | ||
4200 | } | 4311 | } |
4201 | __req_mod(req, what, &m); | 4312 | __req_mod(req, what, &m); |
4202 | spin_unlock_irq(&mdev->req_lock); | 4313 | spin_unlock_irq(&mdev->req_lock); |
4203 | 4314 | ||
4204 | if (m.bio) | 4315 | if (m.bio) |
4205 | complete_master_bio(mdev, &m); | 4316 | complete_master_bio(mdev, &m); |
4206 | return TRUE; | 4317 | return true; |
4207 | } | 4318 | } |
4208 | 4319 | ||
4209 | static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | 4320 | static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4218,7 +4329,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4218 | if (is_syncer_block_id(p->block_id)) { | 4329 | if (is_syncer_block_id(p->block_id)) { |
4219 | drbd_set_in_sync(mdev, sector, blksize); | 4330 | drbd_set_in_sync(mdev, sector, blksize); |
4220 | dec_rs_pending(mdev); | 4331 | dec_rs_pending(mdev); |
4221 | return TRUE; | 4332 | return true; |
4222 | } | 4333 | } |
4223 | switch (be16_to_cpu(h->command)) { | 4334 | switch (be16_to_cpu(h->command)) { |
4224 | case P_RS_WRITE_ACK: | 4335 | case P_RS_WRITE_ACK: |
@@ -4239,7 +4350,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4239 | break; | 4350 | break; |
4240 | default: | 4351 | default: |
4241 | D_ASSERT(0); | 4352 | D_ASSERT(0); |
4242 | return FALSE; | 4353 | return false; |
4243 | } | 4354 | } |
4244 | 4355 | ||
4245 | return validate_req_change_req_state(mdev, p->block_id, sector, | 4356 | return validate_req_change_req_state(mdev, p->block_id, sector, |
@@ -4250,20 +4361,44 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4250 | { | 4361 | { |
4251 | struct p_block_ack *p = (struct p_block_ack *)h; | 4362 | struct p_block_ack *p = (struct p_block_ack *)h; |
4252 | sector_t sector = be64_to_cpu(p->sector); | 4363 | sector_t sector = be64_to_cpu(p->sector); |
4253 | 4364 | int size = be32_to_cpu(p->blksize); | |
4254 | if (__ratelimit(&drbd_ratelimit_state)) | 4365 | struct drbd_request *req; |
4255 | dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); | 4366 | struct bio_and_error m; |
4256 | 4367 | ||
4257 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | 4368 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); |
4258 | 4369 | ||
4259 | if (is_syncer_block_id(p->block_id)) { | 4370 | if (is_syncer_block_id(p->block_id)) { |
4260 | int size = be32_to_cpu(p->blksize); | ||
4261 | dec_rs_pending(mdev); | 4371 | dec_rs_pending(mdev); |
4262 | drbd_rs_failed_io(mdev, sector, size); | 4372 | drbd_rs_failed_io(mdev, sector, size); |
4263 | return TRUE; | 4373 | return true; |
4264 | } | 4374 | } |
4265 | return validate_req_change_req_state(mdev, p->block_id, sector, | 4375 | |
4266 | _ack_id_to_req, __func__ , neg_acked); | 4376 | spin_lock_irq(&mdev->req_lock); |
4377 | req = _ack_id_to_req(mdev, p->block_id, sector); | ||
4378 | if (!req) { | ||
4379 | spin_unlock_irq(&mdev->req_lock); | ||
4380 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A || | ||
4381 | mdev->net_conf->wire_protocol == DRBD_PROT_B) { | ||
4382 | /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. | ||
4383 | The master bio might already be completed, therefore the | ||
4384 | request is no longer in the collision hash. | ||
4385 | => Do not try to validate block_id as request. */ | ||
4386 | /* In Protocol B we might already have got a P_RECV_ACK | ||
4387 | but then get a P_NEG_ACK after wards. */ | ||
4388 | drbd_set_out_of_sync(mdev, sector, size); | ||
4389 | return true; | ||
4390 | } else { | ||
4391 | dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__, | ||
4392 | (void *)(unsigned long)p->block_id, (unsigned long long)sector); | ||
4393 | return false; | ||
4394 | } | ||
4395 | } | ||
4396 | __req_mod(req, neg_acked, &m); | ||
4397 | spin_unlock_irq(&mdev->req_lock); | ||
4398 | |||
4399 | if (m.bio) | ||
4400 | complete_master_bio(mdev, &m); | ||
4401 | return true; | ||
4267 | } | 4402 | } |
4268 | 4403 | ||
4269 | static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) | 4404 | static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4294,11 +4429,20 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) | |||
4294 | 4429 | ||
4295 | if (get_ldev_if_state(mdev, D_FAILED)) { | 4430 | if (get_ldev_if_state(mdev, D_FAILED)) { |
4296 | drbd_rs_complete_io(mdev, sector); | 4431 | drbd_rs_complete_io(mdev, sector); |
4297 | drbd_rs_failed_io(mdev, sector, size); | 4432 | switch (be16_to_cpu(h->command)) { |
4433 | case P_NEG_RS_DREPLY: | ||
4434 | drbd_rs_failed_io(mdev, sector, size); | ||
4435 | case P_RS_CANCEL: | ||
4436 | break; | ||
4437 | default: | ||
4438 | D_ASSERT(0); | ||
4439 | put_ldev(mdev); | ||
4440 | return false; | ||
4441 | } | ||
4298 | put_ldev(mdev); | 4442 | put_ldev(mdev); |
4299 | } | 4443 | } |
4300 | 4444 | ||
4301 | return TRUE; | 4445 | return true; |
4302 | } | 4446 | } |
4303 | 4447 | ||
4304 | static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) | 4448 | static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4307,7 +4451,14 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4307 | 4451 | ||
4308 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); | 4452 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); |
4309 | 4453 | ||
4310 | return TRUE; | 4454 | if (mdev->state.conn == C_AHEAD && |
4455 | atomic_read(&mdev->ap_in_flight) == 0 && | ||
4456 | !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { | ||
4457 | mdev->start_resync_timer.expires = jiffies + HZ; | ||
4458 | add_timer(&mdev->start_resync_timer); | ||
4459 | } | ||
4460 | |||
4461 | return true; | ||
4311 | } | 4462 | } |
4312 | 4463 | ||
4313 | static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | 4464 | static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4328,12 +4479,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | |||
4328 | ov_oos_print(mdev); | 4479 | ov_oos_print(mdev); |
4329 | 4480 | ||
4330 | if (!get_ldev(mdev)) | 4481 | if (!get_ldev(mdev)) |
4331 | return TRUE; | 4482 | return true; |
4332 | 4483 | ||
4333 | drbd_rs_complete_io(mdev, sector); | 4484 | drbd_rs_complete_io(mdev, sector); |
4334 | dec_rs_pending(mdev); | 4485 | dec_rs_pending(mdev); |
4335 | 4486 | ||
4336 | if (--mdev->ov_left == 0) { | 4487 | --mdev->ov_left; |
4488 | |||
4489 | /* let's advance progress step marks only for every other megabyte */ | ||
4490 | if ((mdev->ov_left & 0x200) == 0x200) | ||
4491 | drbd_advance_rs_marks(mdev, mdev->ov_left); | ||
4492 | |||
4493 | if (mdev->ov_left == 0) { | ||
4337 | w = kmalloc(sizeof(*w), GFP_NOIO); | 4494 | w = kmalloc(sizeof(*w), GFP_NOIO); |
4338 | if (w) { | 4495 | if (w) { |
4339 | w->cb = w_ov_finished; | 4496 | w->cb = w_ov_finished; |
@@ -4345,12 +4502,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | |||
4345 | } | 4502 | } |
4346 | } | 4503 | } |
4347 | put_ldev(mdev); | 4504 | put_ldev(mdev); |
4348 | return TRUE; | 4505 | return true; |
4349 | } | 4506 | } |
4350 | 4507 | ||
4351 | static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) | 4508 | static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) |
4352 | { | 4509 | { |
4353 | return TRUE; | 4510 | return true; |
4354 | } | 4511 | } |
4355 | 4512 | ||
4356 | struct asender_cmd { | 4513 | struct asender_cmd { |
@@ -4378,6 +4535,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) | |||
4378 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | 4535 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, |
4379 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | 4536 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, |
4380 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, | 4537 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, |
4538 | [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, | ||
4381 | [P_MAX_CMD] = { 0, NULL }, | 4539 | [P_MAX_CMD] = { 0, NULL }, |
4382 | }; | 4540 | }; |
4383 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) | 4541 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index ad3fc6228f27..5c0c8be1bb0a 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -140,9 +140,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, | |||
140 | struct hlist_node *n; | 140 | struct hlist_node *n; |
141 | struct hlist_head *slot; | 141 | struct hlist_head *slot; |
142 | 142 | ||
143 | /* before we can signal completion to the upper layers, | 143 | /* Before we can signal completion to the upper layers, |
144 | * we may need to close the current epoch */ | 144 | * we may need to close the current epoch. |
145 | * We can skip this, if this request has not even been sent, because we | ||
146 | * did not have a fully established connection yet/anymore, during | ||
147 | * bitmap exchange, or while we are C_AHEAD due to congestion policy. | ||
148 | */ | ||
145 | if (mdev->state.conn >= C_CONNECTED && | 149 | if (mdev->state.conn >= C_CONNECTED && |
150 | (s & RQ_NET_SENT) != 0 && | ||
146 | req->epoch == mdev->newest_tle->br_number) | 151 | req->epoch == mdev->newest_tle->br_number) |
147 | queue_barrier(mdev); | 152 | queue_barrier(mdev); |
148 | 153 | ||
@@ -440,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
440 | req->rq_state |= RQ_LOCAL_COMPLETED; | 445 | req->rq_state |= RQ_LOCAL_COMPLETED; |
441 | req->rq_state &= ~RQ_LOCAL_PENDING; | 446 | req->rq_state &= ~RQ_LOCAL_PENDING; |
442 | 447 | ||
443 | __drbd_chk_io_error(mdev, FALSE); | 448 | __drbd_chk_io_error(mdev, false); |
444 | _req_may_be_done_not_susp(req, m); | 449 | _req_may_be_done_not_susp(req, m); |
445 | put_ldev(mdev); | 450 | put_ldev(mdev); |
446 | break; | 451 | break; |
@@ -461,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
461 | 466 | ||
462 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | 467 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
463 | 468 | ||
464 | __drbd_chk_io_error(mdev, FALSE); | 469 | __drbd_chk_io_error(mdev, false); |
465 | put_ldev(mdev); | 470 | put_ldev(mdev); |
466 | 471 | ||
467 | /* no point in retrying if there is no good remote data, | 472 | /* no point in retrying if there is no good remote data, |
@@ -545,6 +550,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
545 | 550 | ||
546 | break; | 551 | break; |
547 | 552 | ||
553 | case queue_for_send_oos: | ||
554 | req->rq_state |= RQ_NET_QUEUED; | ||
555 | req->w.cb = w_send_oos; | ||
556 | drbd_queue_work(&mdev->data.work, &req->w); | ||
557 | break; | ||
558 | |||
559 | case oos_handed_to_network: | ||
560 | /* actually the same */ | ||
548 | case send_canceled: | 561 | case send_canceled: |
549 | /* treat it the same */ | 562 | /* treat it the same */ |
550 | case send_failed: | 563 | case send_failed: |
@@ -558,6 +571,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
558 | 571 | ||
559 | case handed_over_to_network: | 572 | case handed_over_to_network: |
560 | /* assert something? */ | 573 | /* assert something? */ |
574 | if (bio_data_dir(req->master_bio) == WRITE) | ||
575 | atomic_add(req->size>>9, &mdev->ap_in_flight); | ||
576 | |||
561 | if (bio_data_dir(req->master_bio) == WRITE && | 577 | if (bio_data_dir(req->master_bio) == WRITE && |
562 | mdev->net_conf->wire_protocol == DRBD_PROT_A) { | 578 | mdev->net_conf->wire_protocol == DRBD_PROT_A) { |
563 | /* this is what is dangerous about protocol A: | 579 | /* this is what is dangerous about protocol A: |
@@ -591,6 +607,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
591 | dec_ap_pending(mdev); | 607 | dec_ap_pending(mdev); |
592 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); | 608 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
593 | req->rq_state |= RQ_NET_DONE; | 609 | req->rq_state |= RQ_NET_DONE; |
610 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) | ||
611 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
612 | |||
594 | /* if it is still queued, we may not complete it here. | 613 | /* if it is still queued, we may not complete it here. |
595 | * it will be canceled soon. */ | 614 | * it will be canceled soon. */ |
596 | if (!(req->rq_state & RQ_NET_QUEUED)) | 615 | if (!(req->rq_state & RQ_NET_QUEUED)) |
@@ -628,14 +647,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
628 | req->rq_state |= RQ_NET_OK; | 647 | req->rq_state |= RQ_NET_OK; |
629 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 648 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
630 | dec_ap_pending(mdev); | 649 | dec_ap_pending(mdev); |
650 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
631 | req->rq_state &= ~RQ_NET_PENDING; | 651 | req->rq_state &= ~RQ_NET_PENDING; |
632 | _req_may_be_done_not_susp(req, m); | 652 | _req_may_be_done_not_susp(req, m); |
633 | break; | 653 | break; |
634 | 654 | ||
635 | case neg_acked: | 655 | case neg_acked: |
636 | /* assert something? */ | 656 | /* assert something? */ |
637 | if (req->rq_state & RQ_NET_PENDING) | 657 | if (req->rq_state & RQ_NET_PENDING) { |
638 | dec_ap_pending(mdev); | 658 | dec_ap_pending(mdev); |
659 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
660 | } | ||
639 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); | 661 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
640 | 662 | ||
641 | req->rq_state |= RQ_NET_DONE; | 663 | req->rq_state |= RQ_NET_DONE; |
@@ -690,8 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
690 | dev_err(DEV, "FIXME (barrier_acked but pending)\n"); | 712 | dev_err(DEV, "FIXME (barrier_acked but pending)\n"); |
691 | list_move(&req->tl_requests, &mdev->out_of_sequence_requests); | 713 | list_move(&req->tl_requests, &mdev->out_of_sequence_requests); |
692 | } | 714 | } |
693 | D_ASSERT(req->rq_state & RQ_NET_SENT); | 715 | if ((req->rq_state & RQ_NET_MASK) != 0) { |
694 | req->rq_state |= RQ_NET_DONE; | 716 | req->rq_state |= RQ_NET_DONE; |
717 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) | ||
718 | atomic_sub(req->size>>9, &mdev->ap_in_flight); | ||
719 | } | ||
695 | _req_may_be_done(req, m); /* Allowed while state.susp */ | 720 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
696 | break; | 721 | break; |
697 | 722 | ||
@@ -738,14 +763,14 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s | |||
738 | return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); | 763 | return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); |
739 | } | 764 | } |
740 | 765 | ||
741 | static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | 766 | static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) |
742 | { | 767 | { |
743 | const int rw = bio_rw(bio); | 768 | const int rw = bio_rw(bio); |
744 | const int size = bio->bi_size; | 769 | const int size = bio->bi_size; |
745 | const sector_t sector = bio->bi_sector; | 770 | const sector_t sector = bio->bi_sector; |
746 | struct drbd_tl_epoch *b = NULL; | 771 | struct drbd_tl_epoch *b = NULL; |
747 | struct drbd_request *req; | 772 | struct drbd_request *req; |
748 | int local, remote; | 773 | int local, remote, send_oos = 0; |
749 | int err = -EIO; | 774 | int err = -EIO; |
750 | int ret = 0; | 775 | int ret = 0; |
751 | 776 | ||
@@ -759,6 +784,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
759 | bio_endio(bio, -ENOMEM); | 784 | bio_endio(bio, -ENOMEM); |
760 | return 0; | 785 | return 0; |
761 | } | 786 | } |
787 | req->start_time = start_time; | ||
762 | 788 | ||
763 | local = get_ldev(mdev); | 789 | local = get_ldev(mdev); |
764 | if (!local) { | 790 | if (!local) { |
@@ -808,9 +834,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
808 | drbd_al_begin_io(mdev, sector); | 834 | drbd_al_begin_io(mdev, sector); |
809 | } | 835 | } |
810 | 836 | ||
811 | remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || | 837 | remote = remote && drbd_should_do_remote(mdev->state); |
812 | (mdev->state.pdsk == D_INCONSISTENT && | 838 | send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); |
813 | mdev->state.conn >= C_CONNECTED)); | 839 | D_ASSERT(!(remote && send_oos)); |
814 | 840 | ||
815 | if (!(local || remote) && !is_susp(mdev->state)) { | 841 | if (!(local || remote) && !is_susp(mdev->state)) { |
816 | if (__ratelimit(&drbd_ratelimit_state)) | 842 | if (__ratelimit(&drbd_ratelimit_state)) |
@@ -824,7 +850,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) | |||
824 | * but there is a race between testing the bit and pointer outside the | 850 | * but there is a race between testing the bit and pointer outside the |
825 | * spinlock, and grabbing the spinlock. | 851 | * spinlock, and grabbing the spinlock. |
826 | * if we lost that race, we retry. */ | 852 | * if we lost that race, we retry. */ |
827 | if (rw == WRITE && remote && | 853 | if (rw == WRITE && (remote || send_oos) && |
828 | mdev->unused_spare_tle == NULL && | 854 | mdev->unused_spare_tle == NULL && |
829 | test_bit(CREATE_BARRIER, &mdev->flags)) { | 855 | test_bit(CREATE_BARRIER, &mdev->flags)) { |
830 | allocate_barrier: | 856 | allocate_barrier: |
@@ -842,18 +868,19 @@ allocate_barrier: | |||
842 | if (is_susp(mdev->state)) { | 868 | if (is_susp(mdev->state)) { |
843 | /* If we got suspended, use the retry mechanism of | 869 | /* If we got suspended, use the retry mechanism of |
844 | generic_make_request() to restart processing of this | 870 | generic_make_request() to restart processing of this |
845 | bio. In the next call to drbd_make_request_26 | 871 | bio. In the next call to drbd_make_request |
846 | we sleep in inc_ap_bio() */ | 872 | we sleep in inc_ap_bio() */ |
847 | ret = 1; | 873 | ret = 1; |
848 | spin_unlock_irq(&mdev->req_lock); | 874 | spin_unlock_irq(&mdev->req_lock); |
849 | goto fail_free_complete; | 875 | goto fail_free_complete; |
850 | } | 876 | } |
851 | 877 | ||
852 | if (remote) { | 878 | if (remote || send_oos) { |
853 | remote = (mdev->state.pdsk == D_UP_TO_DATE || | 879 | remote = drbd_should_do_remote(mdev->state); |
854 | (mdev->state.pdsk == D_INCONSISTENT && | 880 | send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); |
855 | mdev->state.conn >= C_CONNECTED)); | 881 | D_ASSERT(!(remote && send_oos)); |
856 | if (!remote) | 882 | |
883 | if (!(remote || send_oos)) | ||
857 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); | 884 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); |
858 | if (!(local || remote)) { | 885 | if (!(local || remote)) { |
859 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | 886 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); |
@@ -866,7 +893,7 @@ allocate_barrier: | |||
866 | mdev->unused_spare_tle = b; | 893 | mdev->unused_spare_tle = b; |
867 | b = NULL; | 894 | b = NULL; |
868 | } | 895 | } |
869 | if (rw == WRITE && remote && | 896 | if (rw == WRITE && (remote || send_oos) && |
870 | mdev->unused_spare_tle == NULL && | 897 | mdev->unused_spare_tle == NULL && |
871 | test_bit(CREATE_BARRIER, &mdev->flags)) { | 898 | test_bit(CREATE_BARRIER, &mdev->flags)) { |
872 | /* someone closed the current epoch | 899 | /* someone closed the current epoch |
@@ -889,7 +916,7 @@ allocate_barrier: | |||
889 | * barrier packet. To get the write ordering right, we only have to | 916 | * barrier packet. To get the write ordering right, we only have to |
890 | * make sure that, if this is a write request and it triggered a | 917 | * make sure that, if this is a write request and it triggered a |
891 | * barrier packet, this request is queued within the same spinlock. */ | 918 | * barrier packet, this request is queued within the same spinlock. */ |
892 | if (remote && mdev->unused_spare_tle && | 919 | if ((remote || send_oos) && mdev->unused_spare_tle && |
893 | test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { | 920 | test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { |
894 | _tl_add_barrier(mdev, mdev->unused_spare_tle); | 921 | _tl_add_barrier(mdev, mdev->unused_spare_tle); |
895 | mdev->unused_spare_tle = NULL; | 922 | mdev->unused_spare_tle = NULL; |
@@ -937,6 +964,34 @@ allocate_barrier: | |||
937 | ? queue_for_net_write | 964 | ? queue_for_net_write |
938 | : queue_for_net_read); | 965 | : queue_for_net_read); |
939 | } | 966 | } |
967 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) | ||
968 | _req_mod(req, queue_for_send_oos); | ||
969 | |||
970 | if (remote && | ||
971 | mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { | ||
972 | int congested = 0; | ||
973 | |||
974 | if (mdev->net_conf->cong_fill && | ||
975 | atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { | ||
976 | dev_info(DEV, "Congestion-fill threshold reached\n"); | ||
977 | congested = 1; | ||
978 | } | ||
979 | |||
980 | if (mdev->act_log->used >= mdev->net_conf->cong_extents) { | ||
981 | dev_info(DEV, "Congestion-extents threshold reached\n"); | ||
982 | congested = 1; | ||
983 | } | ||
984 | |||
985 | if (congested) { | ||
986 | queue_barrier(mdev); /* last barrier, after mirrored writes */ | ||
987 | |||
988 | if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) | ||
989 | _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); | ||
990 | else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ | ||
991 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); | ||
992 | } | ||
993 | } | ||
994 | |||
940 | spin_unlock_irq(&mdev->req_lock); | 995 | spin_unlock_irq(&mdev->req_lock); |
941 | kfree(b); /* if someone else has beaten us to it... */ | 996 | kfree(b); /* if someone else has beaten us to it... */ |
942 | 997 | ||
@@ -949,9 +1004,9 @@ allocate_barrier: | |||
949 | * stable storage, and this is a WRITE, we may not even submit | 1004 | * stable storage, and this is a WRITE, we may not even submit |
950 | * this bio. */ | 1005 | * this bio. */ |
951 | if (get_ldev(mdev)) { | 1006 | if (get_ldev(mdev)) { |
952 | if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR | 1007 | if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR |
953 | : rw == READ ? DRBD_FAULT_DT_RD | 1008 | : rw == READ ? DRBD_FAULT_DT_RD |
954 | : DRBD_FAULT_DT_RA)) | 1009 | : DRBD_FAULT_DT_RA)) |
955 | bio_endio(req->private_bio, -EIO); | 1010 | bio_endio(req->private_bio, -EIO); |
956 | else | 1011 | else |
957 | generic_make_request(req->private_bio); | 1012 | generic_make_request(req->private_bio); |
@@ -1018,16 +1073,19 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) | |||
1018 | return 0; | 1073 | return 0; |
1019 | } | 1074 | } |
1020 | 1075 | ||
1021 | int drbd_make_request_26(struct request_queue *q, struct bio *bio) | 1076 | int drbd_make_request(struct request_queue *q, struct bio *bio) |
1022 | { | 1077 | { |
1023 | unsigned int s_enr, e_enr; | 1078 | unsigned int s_enr, e_enr; |
1024 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | 1079 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
1080 | unsigned long start_time; | ||
1025 | 1081 | ||
1026 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { | 1082 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { |
1027 | bio_endio(bio, -EPERM); | 1083 | bio_endio(bio, -EPERM); |
1028 | return 0; | 1084 | return 0; |
1029 | } | 1085 | } |
1030 | 1086 | ||
1087 | start_time = jiffies; | ||
1088 | |||
1031 | /* | 1089 | /* |
1032 | * what we "blindly" assume: | 1090 | * what we "blindly" assume: |
1033 | */ | 1091 | */ |
@@ -1042,12 +1100,12 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1042 | 1100 | ||
1043 | if (likely(s_enr == e_enr)) { | 1101 | if (likely(s_enr == e_enr)) { |
1044 | inc_ap_bio(mdev, 1); | 1102 | inc_ap_bio(mdev, 1); |
1045 | return drbd_make_request_common(mdev, bio); | 1103 | return drbd_make_request_common(mdev, bio, start_time); |
1046 | } | 1104 | } |
1047 | 1105 | ||
1048 | /* can this bio be split generically? | 1106 | /* can this bio be split generically? |
1049 | * Maybe add our own split-arbitrary-bios function. */ | 1107 | * Maybe add our own split-arbitrary-bios function. */ |
1050 | if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) { | 1108 | if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) { |
1051 | /* rather error out here than BUG in bio_split */ | 1109 | /* rather error out here than BUG in bio_split */ |
1052 | dev_err(DEV, "bio would need to, but cannot, be split: " | 1110 | dev_err(DEV, "bio would need to, but cannot, be split: " |
1053 | "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", | 1111 | "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", |
@@ -1069,11 +1127,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1069 | const int sps = 1 << HT_SHIFT; /* sectors per slot */ | 1127 | const int sps = 1 << HT_SHIFT; /* sectors per slot */ |
1070 | const int mask = sps - 1; | 1128 | const int mask = sps - 1; |
1071 | const sector_t first_sectors = sps - (sect & mask); | 1129 | const sector_t first_sectors = sps - (sect & mask); |
1072 | bp = bio_split(bio, | 1130 | bp = bio_split(bio, first_sectors); |
1073 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) | ||
1074 | bio_split_pool, | ||
1075 | #endif | ||
1076 | first_sectors); | ||
1077 | 1131 | ||
1078 | /* we need to get a "reference count" (ap_bio_cnt) | 1132 | /* we need to get a "reference count" (ap_bio_cnt) |
1079 | * to avoid races with the disconnect/reconnect/suspend code. | 1133 | * to avoid races with the disconnect/reconnect/suspend code. |
@@ -1084,10 +1138,10 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1084 | 1138 | ||
1085 | D_ASSERT(e_enr == s_enr + 1); | 1139 | D_ASSERT(e_enr == s_enr + 1); |
1086 | 1140 | ||
1087 | while (drbd_make_request_common(mdev, &bp->bio1)) | 1141 | while (drbd_make_request_common(mdev, &bp->bio1, start_time)) |
1088 | inc_ap_bio(mdev, 1); | 1142 | inc_ap_bio(mdev, 1); |
1089 | 1143 | ||
1090 | while (drbd_make_request_common(mdev, &bp->bio2)) | 1144 | while (drbd_make_request_common(mdev, &bp->bio2, start_time)) |
1091 | inc_ap_bio(mdev, 1); | 1145 | inc_ap_bio(mdev, 1); |
1092 | 1146 | ||
1093 | dec_ap_bio(mdev); | 1147 | dec_ap_bio(mdev); |
@@ -1098,7 +1152,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1098 | } | 1152 | } |
1099 | 1153 | ||
1100 | /* This is called by bio_add_page(). With this function we reduce | 1154 | /* This is called by bio_add_page(). With this function we reduce |
1101 | * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs | 1155 | * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs |
1102 | * units (was AL_EXTENTs). | 1156 | * units (was AL_EXTENTs). |
1103 | * | 1157 | * |
1104 | * we do the calculation within the lower 32bit of the byte offsets, | 1158 | * we do the calculation within the lower 32bit of the byte offsets, |
@@ -1108,7 +1162,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
1108 | * As long as the BIO is empty we have to allow at least one bvec, | 1162 | * As long as the BIO is empty we have to allow at least one bvec, |
1109 | * regardless of size and offset. so the resulting bio may still | 1163 | * regardless of size and offset. so the resulting bio may still |
1110 | * cross extent boundaries. those are dealt with (bio_split) in | 1164 | * cross extent boundaries. those are dealt with (bio_split) in |
1111 | * drbd_make_request_26. | 1165 | * drbd_make_request. |
1112 | */ | 1166 | */ |
1113 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | 1167 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) |
1114 | { | 1168 | { |
@@ -1118,8 +1172,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1118 | unsigned int bio_size = bvm->bi_size; | 1172 | unsigned int bio_size = bvm->bi_size; |
1119 | int limit, backing_limit; | 1173 | int limit, backing_limit; |
1120 | 1174 | ||
1121 | limit = DRBD_MAX_SEGMENT_SIZE | 1175 | limit = DRBD_MAX_BIO_SIZE |
1122 | - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size); | 1176 | - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size); |
1123 | if (limit < 0) | 1177 | if (limit < 0) |
1124 | limit = 0; | 1178 | limit = 0; |
1125 | if (bio_size == 0) { | 1179 | if (bio_size == 0) { |
@@ -1136,3 +1190,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1136 | } | 1190 | } |
1137 | return limit; | 1191 | return limit; |
1138 | } | 1192 | } |
1193 | |||
1194 | void request_timer_fn(unsigned long data) | ||
1195 | { | ||
1196 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
1197 | struct drbd_request *req; /* oldest request */ | ||
1198 | struct list_head *le; | ||
1199 | unsigned long et = 0; /* effective timeout = ko_count * timeout */ | ||
1200 | |||
1201 | if (get_net_conf(mdev)) { | ||
1202 | et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count; | ||
1203 | put_net_conf(mdev); | ||
1204 | } | ||
1205 | if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) | ||
1206 | return; /* Recurring timer stopped */ | ||
1207 | |||
1208 | spin_lock_irq(&mdev->req_lock); | ||
1209 | le = &mdev->oldest_tle->requests; | ||
1210 | if (list_empty(le)) { | ||
1211 | spin_unlock_irq(&mdev->req_lock); | ||
1212 | mod_timer(&mdev->request_timer, jiffies + et); | ||
1213 | return; | ||
1214 | } | ||
1215 | |||
1216 | le = le->prev; | ||
1217 | req = list_entry(le, struct drbd_request, tl_requests); | ||
1218 | if (time_is_before_eq_jiffies(req->start_time + et)) { | ||
1219 | if (req->rq_state & RQ_NET_PENDING) { | ||
1220 | dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); | ||
1221 | _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL); | ||
1222 | } else { | ||
1223 | dev_warn(DEV, "Local backing block device frozen?\n"); | ||
1224 | mod_timer(&mdev->request_timer, jiffies + et); | ||
1225 | } | ||
1226 | } else { | ||
1227 | mod_timer(&mdev->request_timer, req->start_time + et); | ||
1228 | } | ||
1229 | |||
1230 | spin_unlock_irq(&mdev->req_lock); | ||
1231 | } | ||
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index ab2bd09d54b4..32e2c3e6a813 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h | |||
@@ -82,14 +82,16 @@ enum drbd_req_event { | |||
82 | to_be_submitted, | 82 | to_be_submitted, |
83 | 83 | ||
84 | /* XXX yes, now I am inconsistent... | 84 | /* XXX yes, now I am inconsistent... |
85 | * these two are not "events" but "actions" | 85 | * these are not "events" but "actions" |
86 | * oh, well... */ | 86 | * oh, well... */ |
87 | queue_for_net_write, | 87 | queue_for_net_write, |
88 | queue_for_net_read, | 88 | queue_for_net_read, |
89 | queue_for_send_oos, | ||
89 | 90 | ||
90 | send_canceled, | 91 | send_canceled, |
91 | send_failed, | 92 | send_failed, |
92 | handed_over_to_network, | 93 | handed_over_to_network, |
94 | oos_handed_to_network, | ||
93 | connection_lost_while_pending, | 95 | connection_lost_while_pending, |
94 | read_retry_remote_canceled, | 96 | read_retry_remote_canceled, |
95 | recv_acked_by_peer, | 97 | recv_acked_by_peer, |
@@ -289,7 +291,6 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, | |||
289 | req->epoch = 0; | 291 | req->epoch = 0; |
290 | req->sector = bio_src->bi_sector; | 292 | req->sector = bio_src->bi_sector; |
291 | req->size = bio_src->bi_size; | 293 | req->size = bio_src->bi_size; |
292 | req->start_time = jiffies; | ||
293 | INIT_HLIST_NODE(&req->colision); | 294 | INIT_HLIST_NODE(&req->colision); |
294 | INIT_LIST_HEAD(&req->tl_requests); | 295 | INIT_LIST_HEAD(&req->tl_requests); |
295 | INIT_LIST_HEAD(&req->w.list); | 296 | INIT_LIST_HEAD(&req->w.list); |
@@ -321,6 +322,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
321 | struct bio_and_error *m); | 322 | struct bio_and_error *m); |
322 | extern void complete_master_bio(struct drbd_conf *mdev, | 323 | extern void complete_master_bio(struct drbd_conf *mdev, |
323 | struct bio_and_error *m); | 324 | struct bio_and_error *m); |
325 | extern void request_timer_fn(unsigned long data); | ||
324 | 326 | ||
325 | /* use this if you don't want to deal with calling complete_master_bio() | 327 | /* use this if you don't want to deal with calling complete_master_bio() |
326 | * outside the spinlock, e.g. when walking some list on cleanup. */ | 328 | * outside the spinlock, e.g. when walking some list on cleanup. */ |
@@ -338,23 +340,43 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) | |||
338 | return rv; | 340 | return rv; |
339 | } | 341 | } |
340 | 342 | ||
341 | /* completion of master bio is outside of spinlock. | 343 | /* completion of master bio is outside of our spinlock. |
342 | * If you need it irqsave, do it your self! | 344 | * We still may or may not be inside some irqs disabled section |
343 | * Which means: don't use from bio endio callback. */ | 345 | * of the lower level driver completion callback, so we need to |
346 | * spin_lock_irqsave here. */ | ||
344 | static inline int req_mod(struct drbd_request *req, | 347 | static inline int req_mod(struct drbd_request *req, |
345 | enum drbd_req_event what) | 348 | enum drbd_req_event what) |
346 | { | 349 | { |
350 | unsigned long flags; | ||
347 | struct drbd_conf *mdev = req->mdev; | 351 | struct drbd_conf *mdev = req->mdev; |
348 | struct bio_and_error m; | 352 | struct bio_and_error m; |
349 | int rv; | 353 | int rv; |
350 | 354 | ||
351 | spin_lock_irq(&mdev->req_lock); | 355 | spin_lock_irqsave(&mdev->req_lock, flags); |
352 | rv = __req_mod(req, what, &m); | 356 | rv = __req_mod(req, what, &m); |
353 | spin_unlock_irq(&mdev->req_lock); | 357 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
354 | 358 | ||
355 | if (m.bio) | 359 | if (m.bio) |
356 | complete_master_bio(mdev, &m); | 360 | complete_master_bio(mdev, &m); |
357 | 361 | ||
358 | return rv; | 362 | return rv; |
359 | } | 363 | } |
364 | |||
365 | static inline bool drbd_should_do_remote(union drbd_state s) | ||
366 | { | ||
367 | return s.pdsk == D_UP_TO_DATE || | ||
368 | (s.pdsk >= D_INCONSISTENT && | ||
369 | s.conn >= C_WF_BITMAP_T && | ||
370 | s.conn < C_AHEAD); | ||
371 | /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. | ||
372 | That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* | ||
373 | states. */ | ||
374 | } | ||
375 | static inline bool drbd_should_send_oos(union drbd_state s) | ||
376 | { | ||
377 | return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; | ||
378 | /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary | ||
379 | since we enter state C_AHEAD only if proto >= 96 */ | ||
380 | } | ||
381 | |||
360 | #endif | 382 | #endif |
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c index 85179e1fb50a..c44a2a602772 100644 --- a/drivers/block/drbd/drbd_strings.c +++ b/drivers/block/drbd/drbd_strings.c | |||
@@ -48,6 +48,8 @@ static const char *drbd_conn_s_names[] = { | |||
48 | [C_PAUSED_SYNC_T] = "PausedSyncT", | 48 | [C_PAUSED_SYNC_T] = "PausedSyncT", |
49 | [C_VERIFY_S] = "VerifyS", | 49 | [C_VERIFY_S] = "VerifyS", |
50 | [C_VERIFY_T] = "VerifyT", | 50 | [C_VERIFY_T] = "VerifyT", |
51 | [C_AHEAD] = "Ahead", | ||
52 | [C_BEHIND] = "Behind", | ||
51 | }; | 53 | }; |
52 | 54 | ||
53 | static const char *drbd_role_s_names[] = { | 55 | static const char *drbd_role_s_names[] = { |
@@ -92,7 +94,7 @@ static const char *drbd_state_sw_errors[] = { | |||
92 | const char *drbd_conn_str(enum drbd_conns s) | 94 | const char *drbd_conn_str(enum drbd_conns s) |
93 | { | 95 | { |
94 | /* enums are unsigned... */ | 96 | /* enums are unsigned... */ |
95 | return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s]; | 97 | return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s]; |
96 | } | 98 | } |
97 | 99 | ||
98 | const char *drbd_role_str(enum drbd_role s) | 100 | const char *drbd_role_str(enum drbd_role s) |
@@ -105,7 +107,7 @@ const char *drbd_disk_str(enum drbd_disk_state s) | |||
105 | return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; | 107 | return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; |
106 | } | 108 | } |
107 | 109 | ||
108 | const char *drbd_set_st_err_str(enum drbd_state_ret_codes err) | 110 | const char *drbd_set_st_err_str(enum drbd_state_rv err) |
109 | { | 111 | { |
110 | return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : | 112 | return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : |
111 | err > SS_TWO_PRIMARIES ? "TOO_LARGE" | 113 | err > SS_TWO_PRIMARIES ? "TOO_LARGE" |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index e027446590d3..f7e6c92f8d03 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -39,18 +39,17 @@ | |||
39 | #include "drbd_req.h" | 39 | #include "drbd_req.h" |
40 | 40 | ||
41 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); | 41 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); |
42 | static int w_make_resync_request(struct drbd_conf *mdev, | ||
43 | struct drbd_work *w, int cancel); | ||
42 | 44 | ||
43 | 45 | ||
44 | 46 | ||
45 | /* defined here: | 47 | /* endio handlers: |
46 | drbd_md_io_complete | 48 | * drbd_md_io_complete (defined here) |
47 | drbd_endio_sec | 49 | * drbd_endio_pri (defined here) |
48 | drbd_endio_pri | 50 | * drbd_endio_sec (defined here) |
49 | 51 | * bm_async_io_complete (defined in drbd_bitmap.c) | |
50 | * more endio handlers: | 52 | * |
51 | atodb_endio in drbd_actlog.c | ||
52 | drbd_bm_async_io_complete in drbd_bitmap.c | ||
53 | |||
54 | * For all these callbacks, note the following: | 53 | * For all these callbacks, note the following: |
55 | * The callbacks will be called in irq context by the IDE drivers, | 54 | * The callbacks will be called in irq context by the IDE drivers, |
56 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. | 55 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. |
@@ -94,7 +93,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) | |||
94 | if (list_empty(&mdev->read_ee)) | 93 | if (list_empty(&mdev->read_ee)) |
95 | wake_up(&mdev->ee_wait); | 94 | wake_up(&mdev->ee_wait); |
96 | if (test_bit(__EE_WAS_ERROR, &e->flags)) | 95 | if (test_bit(__EE_WAS_ERROR, &e->flags)) |
97 | __drbd_chk_io_error(mdev, FALSE); | 96 | __drbd_chk_io_error(mdev, false); |
98 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 97 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
99 | 98 | ||
100 | drbd_queue_work(&mdev->data.work, &e->w); | 99 | drbd_queue_work(&mdev->data.work, &e->w); |
@@ -137,7 +136,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo | |||
137 | : list_empty(&mdev->active_ee); | 136 | : list_empty(&mdev->active_ee); |
138 | 137 | ||
139 | if (test_bit(__EE_WAS_ERROR, &e->flags)) | 138 | if (test_bit(__EE_WAS_ERROR, &e->flags)) |
140 | __drbd_chk_io_error(mdev, FALSE); | 139 | __drbd_chk_io_error(mdev, false); |
141 | spin_unlock_irqrestore(&mdev->req_lock, flags); | 140 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
142 | 141 | ||
143 | if (is_syncer_req) | 142 | if (is_syncer_req) |
@@ -163,14 +162,15 @@ void drbd_endio_sec(struct bio *bio, int error) | |||
163 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | 162 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
164 | int is_write = bio_data_dir(bio) == WRITE; | 163 | int is_write = bio_data_dir(bio) == WRITE; |
165 | 164 | ||
166 | if (error) | 165 | if (error && __ratelimit(&drbd_ratelimit_state)) |
167 | dev_warn(DEV, "%s: error=%d s=%llus\n", | 166 | dev_warn(DEV, "%s: error=%d s=%llus\n", |
168 | is_write ? "write" : "read", error, | 167 | is_write ? "write" : "read", error, |
169 | (unsigned long long)e->sector); | 168 | (unsigned long long)e->sector); |
170 | if (!error && !uptodate) { | 169 | if (!error && !uptodate) { |
171 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", | 170 | if (__ratelimit(&drbd_ratelimit_state)) |
172 | is_write ? "write" : "read", | 171 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", |
173 | (unsigned long long)e->sector); | 172 | is_write ? "write" : "read", |
173 | (unsigned long long)e->sector); | ||
174 | /* strange behavior of some lower level drivers... | 174 | /* strange behavior of some lower level drivers... |
175 | * fail the request by clearing the uptodate flag, | 175 | * fail the request by clearing the uptodate flag, |
176 | * but do not return any error?! */ | 176 | * but do not return any error?! */ |
@@ -250,13 +250,6 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
250 | return w_send_read_req(mdev, w, 0); | 250 | return w_send_read_req(mdev, w, 0); |
251 | } | 251 | } |
252 | 252 | ||
253 | int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
254 | { | ||
255 | ERR_IF(cancel) return 1; | ||
256 | dev_err(DEV, "resync inactive, but callback triggered??\n"); | ||
257 | return 1; /* Simply ignore this! */ | ||
258 | } | ||
259 | |||
260 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) | 253 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) |
261 | { | 254 | { |
262 | struct hash_desc desc; | 255 | struct hash_desc desc; |
@@ -355,7 +348,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |||
355 | if (!get_ldev(mdev)) | 348 | if (!get_ldev(mdev)) |
356 | return -EIO; | 349 | return -EIO; |
357 | 350 | ||
358 | if (drbd_rs_should_slow_down(mdev)) | 351 | if (drbd_rs_should_slow_down(mdev, sector)) |
359 | goto defer; | 352 | goto defer; |
360 | 353 | ||
361 | /* GFP_TRY, because if there is no memory available right now, this may | 354 | /* GFP_TRY, because if there is no memory available right now, this may |
@@ -373,9 +366,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |||
373 | if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) | 366 | if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) |
374 | return 0; | 367 | return 0; |
375 | 368 | ||
376 | /* drbd_submit_ee currently fails for one reason only: | 369 | /* If it failed because of ENOMEM, retry should help. If it failed |
377 | * not being able to allocate enough bios. | 370 | * because bio_add_page failed (probably broken lower level driver), |
378 | * Is dropping the connection going to help? */ | 371 | * retry may or may not help. |
372 | * If it does not, you may need to force disconnect. */ | ||
379 | spin_lock_irq(&mdev->req_lock); | 373 | spin_lock_irq(&mdev->req_lock); |
380 | list_del(&e->w.list); | 374 | list_del(&e->w.list); |
381 | spin_unlock_irq(&mdev->req_lock); | 375 | spin_unlock_irq(&mdev->req_lock); |
@@ -386,26 +380,25 @@ defer: | |||
386 | return -EAGAIN; | 380 | return -EAGAIN; |
387 | } | 381 | } |
388 | 382 | ||
389 | void resync_timer_fn(unsigned long data) | 383 | int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
390 | { | 384 | { |
391 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
392 | int queue; | ||
393 | |||
394 | queue = 1; | ||
395 | switch (mdev->state.conn) { | 385 | switch (mdev->state.conn) { |
396 | case C_VERIFY_S: | 386 | case C_VERIFY_S: |
397 | mdev->resync_work.cb = w_make_ov_request; | 387 | w_make_ov_request(mdev, w, cancel); |
398 | break; | 388 | break; |
399 | case C_SYNC_TARGET: | 389 | case C_SYNC_TARGET: |
400 | mdev->resync_work.cb = w_make_resync_request; | 390 | w_make_resync_request(mdev, w, cancel); |
401 | break; | 391 | break; |
402 | default: | ||
403 | queue = 0; | ||
404 | mdev->resync_work.cb = w_resync_inactive; | ||
405 | } | 392 | } |
406 | 393 | ||
407 | /* harmless race: list_empty outside data.work.q_lock */ | 394 | return 1; |
408 | if (list_empty(&mdev->resync_work.list) && queue) | 395 | } |
396 | |||
397 | void resync_timer_fn(unsigned long data) | ||
398 | { | ||
399 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
400 | |||
401 | if (list_empty(&mdev->resync_work.list)) | ||
409 | drbd_queue_work(&mdev->data.work, &mdev->resync_work); | 402 | drbd_queue_work(&mdev->data.work, &mdev->resync_work); |
410 | } | 403 | } |
411 | 404 | ||
@@ -438,7 +431,7 @@ static void fifo_add_val(struct fifo_buffer *fb, int value) | |||
438 | fb->values[i] += value; | 431 | fb->values[i] += value; |
439 | } | 432 | } |
440 | 433 | ||
441 | int drbd_rs_controller(struct drbd_conf *mdev) | 434 | static int drbd_rs_controller(struct drbd_conf *mdev) |
442 | { | 435 | { |
443 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ | 436 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ |
444 | unsigned int want; /* The number of sectors we want in the proxy */ | 437 | unsigned int want; /* The number of sectors we want in the proxy */ |
@@ -492,29 +485,36 @@ int drbd_rs_controller(struct drbd_conf *mdev) | |||
492 | return req_sect; | 485 | return req_sect; |
493 | } | 486 | } |
494 | 487 | ||
495 | int w_make_resync_request(struct drbd_conf *mdev, | 488 | static int drbd_rs_number_requests(struct drbd_conf *mdev) |
496 | struct drbd_work *w, int cancel) | 489 | { |
490 | int number; | ||
491 | if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ | ||
492 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); | ||
493 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | ||
494 | } else { | ||
495 | mdev->c_sync_rate = mdev->sync_conf.rate; | ||
496 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | ||
497 | } | ||
498 | |||
499 | /* ignore the amount of pending requests, the resync controller should | ||
500 | * throttle down to incoming reply rate soon enough anyways. */ | ||
501 | return number; | ||
502 | } | ||
503 | |||
504 | static int w_make_resync_request(struct drbd_conf *mdev, | ||
505 | struct drbd_work *w, int cancel) | ||
497 | { | 506 | { |
498 | unsigned long bit; | 507 | unsigned long bit; |
499 | sector_t sector; | 508 | sector_t sector; |
500 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | 509 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); |
501 | int max_segment_size; | 510 | int max_bio_size; |
502 | int number, rollback_i, size, pe, mx; | 511 | int number, rollback_i, size; |
503 | int align, queued, sndbuf; | 512 | int align, queued, sndbuf; |
504 | int i = 0; | 513 | int i = 0; |
505 | 514 | ||
506 | if (unlikely(cancel)) | 515 | if (unlikely(cancel)) |
507 | return 1; | 516 | return 1; |
508 | 517 | ||
509 | if (unlikely(mdev->state.conn < C_CONNECTED)) { | ||
510 | dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected"); | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | if (mdev->state.conn != C_SYNC_TARGET) | ||
515 | dev_err(DEV, "%s in w_make_resync_request\n", | ||
516 | drbd_conn_str(mdev->state.conn)); | ||
517 | |||
518 | if (mdev->rs_total == 0) { | 518 | if (mdev->rs_total == 0) { |
519 | /* empty resync? */ | 519 | /* empty resync? */ |
520 | drbd_resync_finished(mdev); | 520 | drbd_resync_finished(mdev); |
@@ -527,49 +527,19 @@ int w_make_resync_request(struct drbd_conf *mdev, | |||
527 | to continue resync with a broken disk makes no sense at | 527 | to continue resync with a broken disk makes no sense at |
528 | all */ | 528 | all */ |
529 | dev_err(DEV, "Disk broke down during resync!\n"); | 529 | dev_err(DEV, "Disk broke down during resync!\n"); |
530 | mdev->resync_work.cb = w_resync_inactive; | ||
531 | return 1; | 530 | return 1; |
532 | } | 531 | } |
533 | 532 | ||
534 | /* starting with drbd 8.3.8, we can handle multi-bio EEs, | 533 | /* starting with drbd 8.3.8, we can handle multi-bio EEs, |
535 | * if it should be necessary */ | 534 | * if it should be necessary */ |
536 | max_segment_size = | 535 | max_bio_size = |
537 | mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : | 536 | mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 : |
538 | mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; | 537 | mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE; |
539 | 538 | ||
540 | if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ | 539 | number = drbd_rs_number_requests(mdev); |
541 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); | 540 | if (number == 0) |
542 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | ||
543 | } else { | ||
544 | mdev->c_sync_rate = mdev->sync_conf.rate; | ||
545 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | ||
546 | } | ||
547 | |||
548 | /* Throttle resync on lower level disk activity, which may also be | ||
549 | * caused by application IO on Primary/SyncTarget. | ||
550 | * Keep this after the call to drbd_rs_controller, as that assumes | ||
551 | * to be called as precisely as possible every SLEEP_TIME, | ||
552 | * and would be confused otherwise. */ | ||
553 | if (drbd_rs_should_slow_down(mdev)) | ||
554 | goto requeue; | 541 | goto requeue; |
555 | 542 | ||
556 | mutex_lock(&mdev->data.mutex); | ||
557 | if (mdev->data.socket) | ||
558 | mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req); | ||
559 | else | ||
560 | mx = 1; | ||
561 | mutex_unlock(&mdev->data.mutex); | ||
562 | |||
563 | /* For resync rates >160MB/sec, allow more pending RS requests */ | ||
564 | if (number > mx) | ||
565 | mx = number; | ||
566 | |||
567 | /* Limit the number of pending RS requests to no more than the peer's receive buffer */ | ||
568 | pe = atomic_read(&mdev->rs_pending_cnt); | ||
569 | if ((pe + number) > mx) { | ||
570 | number = mx - pe; | ||
571 | } | ||
572 | |||
573 | for (i = 0; i < number; i++) { | 543 | for (i = 0; i < number; i++) { |
574 | /* Stop generating RS requests, when half of the send buffer is filled */ | 544 | /* Stop generating RS requests, when half of the send buffer is filled */ |
575 | mutex_lock(&mdev->data.mutex); | 545 | mutex_lock(&mdev->data.mutex); |
@@ -588,16 +558,16 @@ next_sector: | |||
588 | size = BM_BLOCK_SIZE; | 558 | size = BM_BLOCK_SIZE; |
589 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); | 559 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); |
590 | 560 | ||
591 | if (bit == -1UL) { | 561 | if (bit == DRBD_END_OF_BITMAP) { |
592 | mdev->bm_resync_fo = drbd_bm_bits(mdev); | 562 | mdev->bm_resync_fo = drbd_bm_bits(mdev); |
593 | mdev->resync_work.cb = w_resync_inactive; | ||
594 | put_ldev(mdev); | 563 | put_ldev(mdev); |
595 | return 1; | 564 | return 1; |
596 | } | 565 | } |
597 | 566 | ||
598 | sector = BM_BIT_TO_SECT(bit); | 567 | sector = BM_BIT_TO_SECT(bit); |
599 | 568 | ||
600 | if (drbd_try_rs_begin_io(mdev, sector)) { | 569 | if (drbd_rs_should_slow_down(mdev, sector) || |
570 | drbd_try_rs_begin_io(mdev, sector)) { | ||
601 | mdev->bm_resync_fo = bit; | 571 | mdev->bm_resync_fo = bit; |
602 | goto requeue; | 572 | goto requeue; |
603 | } | 573 | } |
@@ -608,7 +578,7 @@ next_sector: | |||
608 | goto next_sector; | 578 | goto next_sector; |
609 | } | 579 | } |
610 | 580 | ||
611 | #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE | 581 | #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE |
612 | /* try to find some adjacent bits. | 582 | /* try to find some adjacent bits. |
613 | * we stop if we have already the maximum req size. | 583 | * we stop if we have already the maximum req size. |
614 | * | 584 | * |
@@ -618,7 +588,7 @@ next_sector: | |||
618 | align = 1; | 588 | align = 1; |
619 | rollback_i = i; | 589 | rollback_i = i; |
620 | for (;;) { | 590 | for (;;) { |
621 | if (size + BM_BLOCK_SIZE > max_segment_size) | 591 | if (size + BM_BLOCK_SIZE > max_bio_size) |
622 | break; | 592 | break; |
623 | 593 | ||
624 | /* Be always aligned */ | 594 | /* Be always aligned */ |
@@ -685,7 +655,6 @@ next_sector: | |||
685 | * resync data block, and the last bit is cleared. | 655 | * resync data block, and the last bit is cleared. |
686 | * until then resync "work" is "inactive" ... | 656 | * until then resync "work" is "inactive" ... |
687 | */ | 657 | */ |
688 | mdev->resync_work.cb = w_resync_inactive; | ||
689 | put_ldev(mdev); | 658 | put_ldev(mdev); |
690 | return 1; | 659 | return 1; |
691 | } | 660 | } |
@@ -706,27 +675,18 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca | |||
706 | if (unlikely(cancel)) | 675 | if (unlikely(cancel)) |
707 | return 1; | 676 | return 1; |
708 | 677 | ||
709 | if (unlikely(mdev->state.conn < C_CONNECTED)) { | 678 | number = drbd_rs_number_requests(mdev); |
710 | dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected"); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ); | ||
715 | if (atomic_read(&mdev->rs_pending_cnt) > number) | ||
716 | goto requeue; | ||
717 | |||
718 | number -= atomic_read(&mdev->rs_pending_cnt); | ||
719 | 679 | ||
720 | sector = mdev->ov_position; | 680 | sector = mdev->ov_position; |
721 | for (i = 0; i < number; i++) { | 681 | for (i = 0; i < number; i++) { |
722 | if (sector >= capacity) { | 682 | if (sector >= capacity) { |
723 | mdev->resync_work.cb = w_resync_inactive; | ||
724 | return 1; | 683 | return 1; |
725 | } | 684 | } |
726 | 685 | ||
727 | size = BM_BLOCK_SIZE; | 686 | size = BM_BLOCK_SIZE; |
728 | 687 | ||
729 | if (drbd_try_rs_begin_io(mdev, sector)) { | 688 | if (drbd_rs_should_slow_down(mdev, sector) || |
689 | drbd_try_rs_begin_io(mdev, sector)) { | ||
730 | mdev->ov_position = sector; | 690 | mdev->ov_position = sector; |
731 | goto requeue; | 691 | goto requeue; |
732 | } | 692 | } |
@@ -744,11 +704,33 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca | |||
744 | mdev->ov_position = sector; | 704 | mdev->ov_position = sector; |
745 | 705 | ||
746 | requeue: | 706 | requeue: |
707 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); | ||
747 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); | 708 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
748 | return 1; | 709 | return 1; |
749 | } | 710 | } |
750 | 711 | ||
751 | 712 | ||
713 | void start_resync_timer_fn(unsigned long data) | ||
714 | { | ||
715 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
716 | |||
717 | drbd_queue_work(&mdev->data.work, &mdev->start_resync_work); | ||
718 | } | ||
719 | |||
720 | int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
721 | { | ||
722 | if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { | ||
723 | dev_warn(DEV, "w_start_resync later...\n"); | ||
724 | mdev->start_resync_timer.expires = jiffies + HZ/10; | ||
725 | add_timer(&mdev->start_resync_timer); | ||
726 | return 1; | ||
727 | } | ||
728 | |||
729 | drbd_start_resync(mdev, C_SYNC_SOURCE); | ||
730 | clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); | ||
731 | return 1; | ||
732 | } | ||
733 | |||
752 | int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 734 | int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
753 | { | 735 | { |
754 | kfree(w); | 736 | kfree(w); |
@@ -782,6 +764,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
782 | union drbd_state os, ns; | 764 | union drbd_state os, ns; |
783 | struct drbd_work *w; | 765 | struct drbd_work *w; |
784 | char *khelper_cmd = NULL; | 766 | char *khelper_cmd = NULL; |
767 | int verify_done = 0; | ||
785 | 768 | ||
786 | /* Remove all elements from the resync LRU. Since future actions | 769 | /* Remove all elements from the resync LRU. Since future actions |
787 | * might set bits in the (main) bitmap, then the entries in the | 770 | * might set bits in the (main) bitmap, then the entries in the |
@@ -792,8 +775,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
792 | * queue (or even the read operations for those packets | 775 | * queue (or even the read operations for those packets |
793 | * is not finished by now). Retry in 100ms. */ | 776 | * is not finished by now). Retry in 100ms. */ |
794 | 777 | ||
795 | __set_current_state(TASK_INTERRUPTIBLE); | 778 | schedule_timeout_interruptible(HZ / 10); |
796 | schedule_timeout(HZ / 10); | ||
797 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); | 779 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
798 | if (w) { | 780 | if (w) { |
799 | w->cb = w_resync_finished; | 781 | w->cb = w_resync_finished; |
@@ -818,6 +800,8 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
818 | spin_lock_irq(&mdev->req_lock); | 800 | spin_lock_irq(&mdev->req_lock); |
819 | os = mdev->state; | 801 | os = mdev->state; |
820 | 802 | ||
803 | verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); | ||
804 | |||
821 | /* This protects us against multiple calls (that can happen in the presence | 805 | /* This protects us against multiple calls (that can happen in the presence |
822 | of application IO), and against connectivity loss just before we arrive here. */ | 806 | of application IO), and against connectivity loss just before we arrive here. */ |
823 | if (os.conn <= C_CONNECTED) | 807 | if (os.conn <= C_CONNECTED) |
@@ -827,8 +811,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
827 | ns.conn = C_CONNECTED; | 811 | ns.conn = C_CONNECTED; |
828 | 812 | ||
829 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", | 813 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", |
830 | (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ? | 814 | verify_done ? "Online verify " : "Resync", |
831 | "Online verify " : "Resync", | ||
832 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); | 815 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); |
833 | 816 | ||
834 | n_oos = drbd_bm_total_weight(mdev); | 817 | n_oos = drbd_bm_total_weight(mdev); |
@@ -886,14 +869,18 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
886 | } | 869 | } |
887 | } | 870 | } |
888 | 871 | ||
889 | drbd_uuid_set_bm(mdev, 0UL); | 872 | if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { |
890 | 873 | /* for verify runs, we don't update uuids here, | |
891 | if (mdev->p_uuid) { | 874 | * so there would be nothing to report. */ |
892 | /* Now the two UUID sets are equal, update what we | 875 | drbd_uuid_set_bm(mdev, 0UL); |
893 | * know of the peer. */ | 876 | drbd_print_uuids(mdev, "updated UUIDs"); |
894 | int i; | 877 | if (mdev->p_uuid) { |
895 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | 878 | /* Now the two UUID sets are equal, update what we |
896 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; | 879 | * know of the peer. */ |
880 | int i; | ||
881 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | ||
882 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; | ||
883 | } | ||
897 | } | 884 | } |
898 | } | 885 | } |
899 | 886 | ||
@@ -905,15 +892,11 @@ out: | |||
905 | mdev->rs_total = 0; | 892 | mdev->rs_total = 0; |
906 | mdev->rs_failed = 0; | 893 | mdev->rs_failed = 0; |
907 | mdev->rs_paused = 0; | 894 | mdev->rs_paused = 0; |
908 | mdev->ov_start_sector = 0; | 895 | if (verify_done) |
896 | mdev->ov_start_sector = 0; | ||
909 | 897 | ||
910 | drbd_md_sync(mdev); | 898 | drbd_md_sync(mdev); |
911 | 899 | ||
912 | if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { | ||
913 | dev_info(DEV, "Writing the whole bitmap\n"); | ||
914 | drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); | ||
915 | } | ||
916 | |||
917 | if (khelper_cmd) | 900 | if (khelper_cmd) |
918 | drbd_khelper(mdev, khelper_cmd); | 901 | drbd_khelper(mdev, khelper_cmd); |
919 | 902 | ||
@@ -994,7 +977,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
994 | put_ldev(mdev); | 977 | put_ldev(mdev); |
995 | } | 978 | } |
996 | 979 | ||
997 | if (likely((e->flags & EE_WAS_ERROR) == 0)) { | 980 | if (mdev->state.conn == C_AHEAD) { |
981 | ok = drbd_send_ack(mdev, P_RS_CANCEL, e); | ||
982 | } else if (likely((e->flags & EE_WAS_ERROR) == 0)) { | ||
998 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { | 983 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { |
999 | inc_rs_pending(mdev); | 984 | inc_rs_pending(mdev); |
1000 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); | 985 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); |
@@ -1096,25 +1081,27 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1096 | if (unlikely(cancel)) | 1081 | if (unlikely(cancel)) |
1097 | goto out; | 1082 | goto out; |
1098 | 1083 | ||
1099 | if (unlikely((e->flags & EE_WAS_ERROR) != 0)) | ||
1100 | goto out; | ||
1101 | |||
1102 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); | 1084 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
1103 | /* FIXME if this allocation fails, online verify will not terminate! */ | ||
1104 | digest = kmalloc(digest_size, GFP_NOIO); | 1085 | digest = kmalloc(digest_size, GFP_NOIO); |
1105 | if (digest) { | 1086 | if (!digest) { |
1106 | drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); | 1087 | ok = 0; /* terminate the connection in case the allocation failed */ |
1107 | inc_rs_pending(mdev); | 1088 | goto out; |
1108 | ok = drbd_send_drequest_csum(mdev, e->sector, e->size, | ||
1109 | digest, digest_size, P_OV_REPLY); | ||
1110 | if (!ok) | ||
1111 | dec_rs_pending(mdev); | ||
1112 | kfree(digest); | ||
1113 | } | 1089 | } |
1114 | 1090 | ||
1091 | if (likely(!(e->flags & EE_WAS_ERROR))) | ||
1092 | drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); | ||
1093 | else | ||
1094 | memset(digest, 0, digest_size); | ||
1095 | |||
1096 | inc_rs_pending(mdev); | ||
1097 | ok = drbd_send_drequest_csum(mdev, e->sector, e->size, | ||
1098 | digest, digest_size, P_OV_REPLY); | ||
1099 | if (!ok) | ||
1100 | dec_rs_pending(mdev); | ||
1101 | kfree(digest); | ||
1102 | |||
1115 | out: | 1103 | out: |
1116 | drbd_free_ee(mdev, e); | 1104 | drbd_free_ee(mdev, e); |
1117 | |||
1118 | dec_unacked(mdev); | 1105 | dec_unacked(mdev); |
1119 | 1106 | ||
1120 | return ok; | 1107 | return ok; |
@@ -1129,7 +1116,6 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size) | |||
1129 | mdev->ov_last_oos_size = size>>9; | 1116 | mdev->ov_last_oos_size = size>>9; |
1130 | } | 1117 | } |
1131 | drbd_set_out_of_sync(mdev, sector, size); | 1118 | drbd_set_out_of_sync(mdev, sector, size); |
1132 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
1133 | } | 1119 | } |
1134 | 1120 | ||
1135 | int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | 1121 | int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
@@ -1165,10 +1151,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1165 | eq = !memcmp(digest, di->digest, digest_size); | 1151 | eq = !memcmp(digest, di->digest, digest_size); |
1166 | kfree(digest); | 1152 | kfree(digest); |
1167 | } | 1153 | } |
1168 | } else { | ||
1169 | ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); | ||
1170 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1171 | dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); | ||
1172 | } | 1154 | } |
1173 | 1155 | ||
1174 | dec_unacked(mdev); | 1156 | dec_unacked(mdev); |
@@ -1182,7 +1164,13 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1182 | 1164 | ||
1183 | drbd_free_ee(mdev, e); | 1165 | drbd_free_ee(mdev, e); |
1184 | 1166 | ||
1185 | if (--mdev->ov_left == 0) { | 1167 | --mdev->ov_left; |
1168 | |||
1169 | /* let's advance progress step marks only for every other megabyte */ | ||
1170 | if ((mdev->ov_left & 0x200) == 0x200) | ||
1171 | drbd_advance_rs_marks(mdev, mdev->ov_left); | ||
1172 | |||
1173 | if (mdev->ov_left == 0) { | ||
1186 | ov_oos_print(mdev); | 1174 | ov_oos_print(mdev); |
1187 | drbd_resync_finished(mdev); | 1175 | drbd_resync_finished(mdev); |
1188 | } | 1176 | } |
@@ -1235,6 +1223,22 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |||
1235 | return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); | 1223 | return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); |
1236 | } | 1224 | } |
1237 | 1225 | ||
1226 | int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | ||
1227 | { | ||
1228 | struct drbd_request *req = container_of(w, struct drbd_request, w); | ||
1229 | int ok; | ||
1230 | |||
1231 | if (unlikely(cancel)) { | ||
1232 | req_mod(req, send_canceled); | ||
1233 | return 1; | ||
1234 | } | ||
1235 | |||
1236 | ok = drbd_send_oos(mdev, req); | ||
1237 | req_mod(req, oos_handed_to_network); | ||
1238 | |||
1239 | return ok; | ||
1240 | } | ||
1241 | |||
1238 | /** | 1242 | /** |
1239 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request | 1243 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request |
1240 | * @mdev: DRBD device. | 1244 | * @mdev: DRBD device. |
@@ -1430,6 +1434,17 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na) | |||
1430 | return retcode; | 1434 | return retcode; |
1431 | } | 1435 | } |
1432 | 1436 | ||
1437 | void drbd_rs_controller_reset(struct drbd_conf *mdev) | ||
1438 | { | ||
1439 | atomic_set(&mdev->rs_sect_in, 0); | ||
1440 | atomic_set(&mdev->rs_sect_ev, 0); | ||
1441 | mdev->rs_in_flight = 0; | ||
1442 | mdev->rs_planed = 0; | ||
1443 | spin_lock(&mdev->peer_seq_lock); | ||
1444 | fifo_set(&mdev->rs_plan_s, 0); | ||
1445 | spin_unlock(&mdev->peer_seq_lock); | ||
1446 | } | ||
1447 | |||
1433 | /** | 1448 | /** |
1434 | * drbd_start_resync() - Start the resync process | 1449 | * drbd_start_resync() - Start the resync process |
1435 | * @mdev: DRBD device. | 1450 | * @mdev: DRBD device. |
@@ -1443,13 +1458,18 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1443 | union drbd_state ns; | 1458 | union drbd_state ns; |
1444 | int r; | 1459 | int r; |
1445 | 1460 | ||
1446 | if (mdev->state.conn >= C_SYNC_SOURCE) { | 1461 | if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { |
1447 | dev_err(DEV, "Resync already running!\n"); | 1462 | dev_err(DEV, "Resync already running!\n"); |
1448 | return; | 1463 | return; |
1449 | } | 1464 | } |
1450 | 1465 | ||
1451 | /* In case a previous resync run was aborted by an IO error/detach on the peer. */ | 1466 | if (mdev->state.conn < C_AHEAD) { |
1452 | drbd_rs_cancel_all(mdev); | 1467 | /* In case a previous resync run was aborted by an IO error/detach on the peer. */ |
1468 | drbd_rs_cancel_all(mdev); | ||
1469 | /* This should be done when we abort the resync. We definitely do not | ||
1470 | want to have this for connections going back and forth between | ||
1471 | Ahead/Behind and SyncSource/SyncTarget */ | ||
1472 | } | ||
1453 | 1473 | ||
1454 | if (side == C_SYNC_TARGET) { | 1474 | if (side == C_SYNC_TARGET) { |
1455 | /* Since application IO was locked out during C_WF_BITMAP_T and | 1475 | /* Since application IO was locked out during C_WF_BITMAP_T and |
@@ -1463,6 +1483,20 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1463 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 1483 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
1464 | return; | 1484 | return; |
1465 | } | 1485 | } |
1486 | } else /* C_SYNC_SOURCE */ { | ||
1487 | r = drbd_khelper(mdev, "before-resync-source"); | ||
1488 | r = (r >> 8) & 0xff; | ||
1489 | if (r > 0) { | ||
1490 | if (r == 3) { | ||
1491 | dev_info(DEV, "before-resync-source handler returned %d, " | ||
1492 | "ignoring. Old userland tools?", r); | ||
1493 | } else { | ||
1494 | dev_info(DEV, "before-resync-source handler returned %d, " | ||
1495 | "dropping connection.\n", r); | ||
1496 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
1497 | return; | ||
1498 | } | ||
1499 | } | ||
1466 | } | 1500 | } |
1467 | 1501 | ||
1468 | drbd_state_lock(mdev); | 1502 | drbd_state_lock(mdev); |
@@ -1472,18 +1506,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1472 | return; | 1506 | return; |
1473 | } | 1507 | } |
1474 | 1508 | ||
1475 | if (side == C_SYNC_TARGET) { | ||
1476 | mdev->bm_resync_fo = 0; | ||
1477 | } else /* side == C_SYNC_SOURCE */ { | ||
1478 | u64 uuid; | ||
1479 | |||
1480 | get_random_bytes(&uuid, sizeof(u64)); | ||
1481 | drbd_uuid_set(mdev, UI_BITMAP, uuid); | ||
1482 | drbd_send_sync_uuid(mdev, uuid); | ||
1483 | |||
1484 | D_ASSERT(mdev->state.disk == D_UP_TO_DATE); | ||
1485 | } | ||
1486 | |||
1487 | write_lock_irq(&global_state_lock); | 1509 | write_lock_irq(&global_state_lock); |
1488 | ns = mdev->state; | 1510 | ns = mdev->state; |
1489 | 1511 | ||
@@ -1521,13 +1543,24 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1521 | _drbd_pause_after(mdev); | 1543 | _drbd_pause_after(mdev); |
1522 | } | 1544 | } |
1523 | write_unlock_irq(&global_state_lock); | 1545 | write_unlock_irq(&global_state_lock); |
1524 | put_ldev(mdev); | ||
1525 | 1546 | ||
1526 | if (r == SS_SUCCESS) { | 1547 | if (r == SS_SUCCESS) { |
1527 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", | 1548 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", |
1528 | drbd_conn_str(ns.conn), | 1549 | drbd_conn_str(ns.conn), |
1529 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), | 1550 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), |
1530 | (unsigned long) mdev->rs_total); | 1551 | (unsigned long) mdev->rs_total); |
1552 | if (side == C_SYNC_TARGET) | ||
1553 | mdev->bm_resync_fo = 0; | ||
1554 | |||
1555 | /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid | ||
1556 | * with w_send_oos, or the sync target will get confused as to | ||
1557 | * how much bits to resync. We cannot do that always, because for an | ||
1558 | * empty resync and protocol < 95, we need to do it here, as we call | ||
1559 | * drbd_resync_finished from here in that case. | ||
1560 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, | ||
1561 | * and from after_state_ch otherwise. */ | ||
1562 | if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96) | ||
1563 | drbd_gen_and_send_sync_uuid(mdev); | ||
1531 | 1564 | ||
1532 | if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { | 1565 | if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { |
1533 | /* This still has a race (about when exactly the peers | 1566 | /* This still has a race (about when exactly the peers |
@@ -1547,13 +1580,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1547 | drbd_resync_finished(mdev); | 1580 | drbd_resync_finished(mdev); |
1548 | } | 1581 | } |
1549 | 1582 | ||
1550 | atomic_set(&mdev->rs_sect_in, 0); | 1583 | drbd_rs_controller_reset(mdev); |
1551 | atomic_set(&mdev->rs_sect_ev, 0); | ||
1552 | mdev->rs_in_flight = 0; | ||
1553 | mdev->rs_planed = 0; | ||
1554 | spin_lock(&mdev->peer_seq_lock); | ||
1555 | fifo_set(&mdev->rs_plan_s, 0); | ||
1556 | spin_unlock(&mdev->peer_seq_lock); | ||
1557 | /* ns.conn may already be != mdev->state.conn, | 1584 | /* ns.conn may already be != mdev->state.conn, |
1558 | * we may have been paused in between, or become paused until | 1585 | * we may have been paused in between, or become paused until |
1559 | * the timer triggers. | 1586 | * the timer triggers. |
@@ -1563,6 +1590,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |||
1563 | 1590 | ||
1564 | drbd_md_sync(mdev); | 1591 | drbd_md_sync(mdev); |
1565 | } | 1592 | } |
1593 | put_ldev(mdev); | ||
1566 | drbd_state_unlock(mdev); | 1594 | drbd_state_unlock(mdev); |
1567 | } | 1595 | } |
1568 | 1596 | ||
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h index 53586fa5ae1b..151f1a37478f 100644 --- a/drivers/block/drbd/drbd_wrappers.h +++ b/drivers/block/drbd/drbd_wrappers.h | |||
@@ -39,7 +39,7 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev, | |||
39 | return; | 39 | return; |
40 | } | 40 | } |
41 | 41 | ||
42 | if (FAULT_ACTIVE(mdev, fault_type)) | 42 | if (drbd_insert_fault(mdev, fault_type)) |
43 | bio_endio(bio, -EIO); | 43 | bio_endio(bio, -EIO); |
44 | else | 44 | else |
45 | generic_make_request(bio); | 45 | generic_make_request(bio); |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 1f46f1cd9225..7beb0e25f1e1 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -980,7 +980,7 @@ int tpm_open(struct inode *inode, struct file *file) | |||
980 | return -EBUSY; | 980 | return -EBUSY; |
981 | } | 981 | } |
982 | 982 | ||
983 | chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); | 983 | chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); |
984 | if (chip->data_buffer == NULL) { | 984 | if (chip->data_buffer == NULL) { |
985 | clear_bit(0, &chip->is_open); | 985 | clear_bit(0, &chip->is_open); |
986 | put_device(chip->dev); | 986 | put_device(chip->dev); |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index dd8ebc75b667..ab8a4eff072a 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -94,9 +94,9 @@ static struct ipu_irq_map *src2map(unsigned int src) | |||
94 | return NULL; | 94 | return NULL; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void ipu_irq_unmask(unsigned int irq) | 97 | static void ipu_irq_unmask(struct irq_data *d) |
98 | { | 98 | { |
99 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 99 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
100 | struct ipu_irq_bank *bank; | 100 | struct ipu_irq_bank *bank; |
101 | uint32_t reg; | 101 | uint32_t reg; |
102 | unsigned long lock_flags; | 102 | unsigned long lock_flags; |
@@ -106,7 +106,7 @@ static void ipu_irq_unmask(unsigned int irq) | |||
106 | bank = map->bank; | 106 | bank = map->bank; |
107 | if (!bank) { | 107 | if (!bank) { |
108 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 108 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
110 | return; | 110 | return; |
111 | } | 111 | } |
112 | 112 | ||
@@ -117,9 +117,9 @@ static void ipu_irq_unmask(unsigned int irq) | |||
117 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 117 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void ipu_irq_mask(unsigned int irq) | 120 | static void ipu_irq_mask(struct irq_data *d) |
121 | { | 121 | { |
122 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 122 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
123 | struct ipu_irq_bank *bank; | 123 | struct ipu_irq_bank *bank; |
124 | uint32_t reg; | 124 | uint32_t reg; |
125 | unsigned long lock_flags; | 125 | unsigned long lock_flags; |
@@ -129,7 +129,7 @@ static void ipu_irq_mask(unsigned int irq) | |||
129 | bank = map->bank; | 129 | bank = map->bank; |
130 | if (!bank) { | 130 | if (!bank) { |
131 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 131 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
133 | return; | 133 | return; |
134 | } | 134 | } |
135 | 135 | ||
@@ -140,9 +140,9 @@ static void ipu_irq_mask(unsigned int irq) | |||
140 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 140 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ipu_irq_ack(unsigned int irq) | 143 | static void ipu_irq_ack(struct irq_data *d) |
144 | { | 144 | { |
145 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 145 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
146 | struct ipu_irq_bank *bank; | 146 | struct ipu_irq_bank *bank; |
147 | unsigned long lock_flags; | 147 | unsigned long lock_flags; |
148 | 148 | ||
@@ -151,7 +151,7 @@ static void ipu_irq_ack(unsigned int irq) | |||
151 | bank = map->bank; | 151 | bank = map->bank; |
152 | if (!bank) { | 152 | if (!bank) { |
153 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 153 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
155 | return; | 155 | return; |
156 | } | 156 | } |
157 | 157 | ||
@@ -167,7 +167,7 @@ static void ipu_irq_ack(unsigned int irq) | |||
167 | */ | 167 | */ |
168 | bool ipu_irq_status(unsigned int irq) | 168 | bool ipu_irq_status(unsigned int irq) |
169 | { | 169 | { |
170 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 170 | struct ipu_irq_map *map = irq_get_chip_data(irq); |
171 | struct ipu_irq_bank *bank; | 171 | struct ipu_irq_bank *bank; |
172 | unsigned long lock_flags; | 172 | unsigned long lock_flags; |
173 | bool ret; | 173 | bool ret; |
@@ -269,7 +269,7 @@ int ipu_irq_unmap(unsigned int source) | |||
269 | /* Chained IRQ handler for IPU error interrupt */ | 269 | /* Chained IRQ handler for IPU error interrupt */ |
270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | 270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) |
271 | { | 271 | { |
272 | struct ipu *ipu = get_irq_data(irq); | 272 | struct ipu *ipu = irq_get_handler_data(irq); |
273 | u32 status; | 273 | u32 status; |
274 | int i, line; | 274 | int i, line; |
275 | 275 | ||
@@ -310,7 +310,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
310 | /* Chained IRQ handler for IPU function interrupt */ | 310 | /* Chained IRQ handler for IPU function interrupt */ |
311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | 311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) |
312 | { | 312 | { |
313 | struct ipu *ipu = get_irq_data(irq); | 313 | struct ipu *ipu = irq_desc_get_handler_data(desc); |
314 | u32 status; | 314 | u32 status; |
315 | int i, line; | 315 | int i, line; |
316 | 316 | ||
@@ -345,10 +345,10 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | |||
345 | } | 345 | } |
346 | 346 | ||
347 | static struct irq_chip ipu_irq_chip = { | 347 | static struct irq_chip ipu_irq_chip = { |
348 | .name = "ipu_irq", | 348 | .name = "ipu_irq", |
349 | .ack = ipu_irq_ack, | 349 | .irq_ack = ipu_irq_ack, |
350 | .mask = ipu_irq_mask, | 350 | .irq_mask = ipu_irq_mask, |
351 | .unmask = ipu_irq_unmask, | 351 | .irq_unmask = ipu_irq_unmask, |
352 | }; | 352 | }; |
353 | 353 | ||
354 | /* Install the IRQ handler */ | 354 | /* Install the IRQ handler */ |
@@ -366,26 +366,26 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | |||
366 | int ret; | 366 | int ret; |
367 | 367 | ||
368 | irq = irq_base + i; | 368 | irq = irq_base + i; |
369 | ret = set_irq_chip(irq, &ipu_irq_chip); | 369 | ret = irq_set_chip(irq, &ipu_irq_chip); |
370 | if (ret < 0) | 370 | if (ret < 0) |
371 | return ret; | 371 | return ret; |
372 | ret = set_irq_chip_data(irq, irq_map + i); | 372 | ret = irq_set_chip_data(irq, irq_map + i); |
373 | if (ret < 0) | 373 | if (ret < 0) |
374 | return ret; | 374 | return ret; |
375 | irq_map[i].ipu = ipu; | 375 | irq_map[i].ipu = ipu; |
376 | irq_map[i].irq = irq; | 376 | irq_map[i].irq = irq; |
377 | irq_map[i].source = -EINVAL; | 377 | irq_map[i].source = -EINVAL; |
378 | set_irq_handler(irq, handle_level_irq); | 378 | irq_set_handler(irq, handle_level_irq); |
379 | #ifdef CONFIG_ARM | 379 | #ifdef CONFIG_ARM |
380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
381 | #endif | 381 | #endif |
382 | } | 382 | } |
383 | 383 | ||
384 | set_irq_data(ipu->irq_fn, ipu); | 384 | irq_set_handler_data(ipu->irq_fn, ipu); |
385 | set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn); | 385 | irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn); |
386 | 386 | ||
387 | set_irq_data(ipu->irq_err, ipu); | 387 | irq_set_handler_data(ipu->irq_err, ipu); |
388 | set_irq_chained_handler(ipu->irq_err, ipu_irq_err); | 388 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); |
389 | 389 | ||
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
@@ -397,17 +397,17 @@ void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) | |||
397 | 397 | ||
398 | irq_base = pdata->irq_base; | 398 | irq_base = pdata->irq_base; |
399 | 399 | ||
400 | set_irq_chained_handler(ipu->irq_fn, NULL); | 400 | irq_set_chained_handler(ipu->irq_fn, NULL); |
401 | set_irq_data(ipu->irq_fn, NULL); | 401 | irq_set_handler_data(ipu->irq_fn, NULL); |
402 | 402 | ||
403 | set_irq_chained_handler(ipu->irq_err, NULL); | 403 | irq_set_chained_handler(ipu->irq_err, NULL); |
404 | set_irq_data(ipu->irq_err, NULL); | 404 | irq_set_handler_data(ipu->irq_err, NULL); |
405 | 405 | ||
406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { | 406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { |
407 | #ifdef CONFIG_ARM | 407 | #ifdef CONFIG_ARM |
408 | set_irq_flags(irq, 0); | 408 | set_irq_flags(irq, 0); |
409 | #endif | 409 | #endif |
410 | set_irq_chip(irq, NULL); | 410 | irq_set_chip(irq, NULL); |
411 | set_irq_chip_data(irq, NULL); | 411 | irq_set_chip_data(irq, NULL); |
412 | } | 412 | } |
413 | } | 413 | } |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 0be30e978c85..31e71c4fc831 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2679,7 +2679,7 @@ static int __init amd64_edac_init(void) | |||
2679 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); | 2679 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); |
2680 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); | 2680 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); |
2681 | if (!(mcis && ecc_stngs)) | 2681 | if (!(mcis && ecc_stngs)) |
2682 | goto err_ret; | 2682 | goto err_free; |
2683 | 2683 | ||
2684 | msrs = msrs_alloc(); | 2684 | msrs = msrs_alloc(); |
2685 | if (!msrs) | 2685 | if (!msrs) |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d8d0cda2641d..d3b295305542 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -414,4 +414,9 @@ config GPIO_JANZ_TTL | |||
414 | This driver provides support for driving the pins in output | 414 | This driver provides support for driving the pins in output |
415 | mode only. Input mode is not supported. | 415 | mode only. Input mode is not supported. |
416 | 416 | ||
417 | config AB8500_GPIO | ||
418 | bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions" | ||
419 | depends on AB8500_CORE && BROKEN | ||
420 | help | ||
421 | Select this to enable the AB8500 IC GPIO driver | ||
417 | endif | 422 | endif |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 3351cf87b0ed..becef5954356 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -42,3 +42,4 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o | |||
42 | obj-$(CONFIG_GPIO_SX150X) += sx150x.o | 42 | obj-$(CONFIG_GPIO_SX150X) += sx150x.o |
43 | obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o | 43 | obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o |
44 | obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o | 44 | obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o |
45 | obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o | ||
diff --git a/drivers/gpio/ab8500-gpio.c b/drivers/gpio/ab8500-gpio.c new file mode 100644 index 000000000000..e7b834d054b7 --- /dev/null +++ b/drivers/gpio/ab8500-gpio.c | |||
@@ -0,0 +1,522 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson SA 2011 | ||
3 | * | ||
4 | * Author: BIBEK BASU <bibek.basu@stericsson.com> | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/gpio.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/mfd/ab8500.h> | ||
23 | #include <linux/mfd/abx500.h> | ||
24 | #include <linux/mfd/ab8500/gpio.h> | ||
25 | |||
26 | /* | ||
27 | * GPIO registers offset | ||
28 | * Bank: 0x10 | ||
29 | */ | ||
30 | #define AB8500_GPIO_SEL1_REG 0x00 | ||
31 | #define AB8500_GPIO_SEL2_REG 0x01 | ||
32 | #define AB8500_GPIO_SEL3_REG 0x02 | ||
33 | #define AB8500_GPIO_SEL4_REG 0x03 | ||
34 | #define AB8500_GPIO_SEL5_REG 0x04 | ||
35 | #define AB8500_GPIO_SEL6_REG 0x05 | ||
36 | |||
37 | #define AB8500_GPIO_DIR1_REG 0x10 | ||
38 | #define AB8500_GPIO_DIR2_REG 0x11 | ||
39 | #define AB8500_GPIO_DIR3_REG 0x12 | ||
40 | #define AB8500_GPIO_DIR4_REG 0x13 | ||
41 | #define AB8500_GPIO_DIR5_REG 0x14 | ||
42 | #define AB8500_GPIO_DIR6_REG 0x15 | ||
43 | |||
44 | #define AB8500_GPIO_OUT1_REG 0x20 | ||
45 | #define AB8500_GPIO_OUT2_REG 0x21 | ||
46 | #define AB8500_GPIO_OUT3_REG 0x22 | ||
47 | #define AB8500_GPIO_OUT4_REG 0x23 | ||
48 | #define AB8500_GPIO_OUT5_REG 0x24 | ||
49 | #define AB8500_GPIO_OUT6_REG 0x25 | ||
50 | |||
51 | #define AB8500_GPIO_PUD1_REG 0x30 | ||
52 | #define AB8500_GPIO_PUD2_REG 0x31 | ||
53 | #define AB8500_GPIO_PUD3_REG 0x32 | ||
54 | #define AB8500_GPIO_PUD4_REG 0x33 | ||
55 | #define AB8500_GPIO_PUD5_REG 0x34 | ||
56 | #define AB8500_GPIO_PUD6_REG 0x35 | ||
57 | |||
58 | #define AB8500_GPIO_IN1_REG 0x40 | ||
59 | #define AB8500_GPIO_IN2_REG 0x41 | ||
60 | #define AB8500_GPIO_IN3_REG 0x42 | ||
61 | #define AB8500_GPIO_IN4_REG 0x43 | ||
62 | #define AB8500_GPIO_IN5_REG 0x44 | ||
63 | #define AB8500_GPIO_IN6_REG 0x45 | ||
64 | #define AB8500_GPIO_ALTFUN_REG 0x45 | ||
65 | #define ALTFUN_REG_INDEX 6 | ||
66 | #define AB8500_NUM_GPIO 42 | ||
67 | #define AB8500_NUM_VIR_GPIO_IRQ 16 | ||
68 | |||
69 | enum ab8500_gpio_action { | ||
70 | NONE, | ||
71 | STARTUP, | ||
72 | SHUTDOWN, | ||
73 | MASK, | ||
74 | UNMASK | ||
75 | }; | ||
76 | |||
77 | struct ab8500_gpio { | ||
78 | struct gpio_chip chip; | ||
79 | struct ab8500 *parent; | ||
80 | struct device *dev; | ||
81 | struct mutex lock; | ||
82 | u32 irq_base; | ||
83 | enum ab8500_gpio_action irq_action; | ||
84 | u16 rising; | ||
85 | u16 falling; | ||
86 | }; | ||
87 | /** | ||
88 | * to_ab8500_gpio() - get the pointer to ab8500_gpio | ||
89 | * @chip: Member of the structure ab8500_gpio | ||
90 | */ | ||
91 | static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip) | ||
92 | { | ||
93 | return container_of(chip, struct ab8500_gpio, chip); | ||
94 | } | ||
95 | |||
96 | static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg, | ||
97 | unsigned offset, int val) | ||
98 | { | ||
99 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
100 | u8 pos = offset % 8; | ||
101 | int ret; | ||
102 | |||
103 | reg = reg + (offset / 8); | ||
104 | ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev, | ||
105 | AB8500_MISC, reg, 1 << pos, val << pos); | ||
106 | if (ret < 0) | ||
107 | dev_err(ab8500_gpio->dev, "%s write failed\n", __func__); | ||
108 | return ret; | ||
109 | } | ||
110 | /** | ||
111 | * ab8500_gpio_get() - Get the particular GPIO value | ||
112 | * @chip: Gpio device | ||
113 | * @offset: GPIO number to read | ||
114 | */ | ||
115 | static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset) | ||
116 | { | ||
117 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
118 | u8 mask = 1 << (offset % 8); | ||
119 | u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8); | ||
120 | int ret; | ||
121 | u8 data; | ||
122 | ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC, | ||
123 | reg, &data); | ||
124 | if (ret < 0) { | ||
125 | dev_err(ab8500_gpio->dev, "%s read failed\n", __func__); | ||
126 | return ret; | ||
127 | } | ||
128 | return (data & mask) >> (offset % 8); | ||
129 | } | ||
130 | |||
131 | static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) | ||
132 | { | ||
133 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
134 | int ret; | ||
135 | /* Write the data */ | ||
136 | ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1); | ||
137 | if (ret < 0) | ||
138 | dev_err(ab8500_gpio->dev, "%s write failed\n", __func__); | ||
139 | } | ||
140 | |||
141 | static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | ||
142 | int val) | ||
143 | { | ||
144 | int ret; | ||
145 | /* set direction as output */ | ||
146 | ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1); | ||
147 | if (ret < 0) | ||
148 | return ret; | ||
149 | /* disable pull down */ | ||
150 | ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1); | ||
151 | if (ret < 0) | ||
152 | return ret; | ||
153 | /* set the output as 1 or 0 */ | ||
154 | return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val); | ||
155 | |||
156 | } | ||
157 | |||
158 | static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
159 | { | ||
160 | /* set the register as input */ | ||
161 | return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0); | ||
162 | } | ||
163 | |||
164 | static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | ||
165 | { | ||
166 | /* | ||
167 | * Only some GPIOs are interrupt capable, and they are | ||
168 | * organized in discontiguous clusters: | ||
169 | * | ||
170 | * GPIO6 to GPIO13 | ||
171 | * GPIO24 and GPIO25 | ||
172 | * GPIO36 to GPIO41 | ||
173 | */ | ||
174 | static struct ab8500_gpio_irq_cluster { | ||
175 | int start; | ||
176 | int end; | ||
177 | } clusters[] = { | ||
178 | {.start = 6, .end = 13}, | ||
179 | {.start = 24, .end = 25}, | ||
180 | {.start = 36, .end = 41}, | ||
181 | }; | ||
182 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
183 | int base = ab8500_gpio->irq_base; | ||
184 | int i; | ||
185 | |||
186 | for (i = 0; i < ARRAY_SIZE(clusters); i++) { | ||
187 | struct ab8500_gpio_irq_cluster *cluster = &clusters[i]; | ||
188 | |||
189 | if (offset >= cluster->start && offset <= cluster->end) | ||
190 | return base + offset - cluster->start; | ||
191 | |||
192 | /* Advance by the number of gpios in this cluster */ | ||
193 | base += cluster->end - cluster->start + 1; | ||
194 | } | ||
195 | |||
196 | return -EINVAL; | ||
197 | } | ||
198 | |||
199 | static struct gpio_chip ab8500gpio_chip = { | ||
200 | .label = "ab8500_gpio", | ||
201 | .owner = THIS_MODULE, | ||
202 | .direction_input = ab8500_gpio_direction_input, | ||
203 | .get = ab8500_gpio_get, | ||
204 | .direction_output = ab8500_gpio_direction_output, | ||
205 | .set = ab8500_gpio_set, | ||
206 | .to_irq = ab8500_gpio_to_irq, | ||
207 | }; | ||
208 | |||
209 | static unsigned int irq_to_rising(unsigned int irq) | ||
210 | { | ||
211 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
212 | int offset = irq - ab8500_gpio->irq_base; | ||
213 | int new_irq = offset + AB8500_INT_GPIO6R | ||
214 | + ab8500_gpio->parent->irq_base; | ||
215 | return new_irq; | ||
216 | } | ||
217 | |||
218 | static unsigned int irq_to_falling(unsigned int irq) | ||
219 | { | ||
220 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
221 | int offset = irq - ab8500_gpio->irq_base; | ||
222 | int new_irq = offset + AB8500_INT_GPIO6F | ||
223 | + ab8500_gpio->parent->irq_base; | ||
224 | return new_irq; | ||
225 | |||
226 | } | ||
227 | |||
228 | static unsigned int rising_to_irq(unsigned int irq, void *dev) | ||
229 | { | ||
230 | struct ab8500_gpio *ab8500_gpio = dev; | ||
231 | int offset = irq - AB8500_INT_GPIO6R | ||
232 | - ab8500_gpio->parent->irq_base ; | ||
233 | int new_irq = offset + ab8500_gpio->irq_base; | ||
234 | return new_irq; | ||
235 | } | ||
236 | |||
237 | static unsigned int falling_to_irq(unsigned int irq, void *dev) | ||
238 | { | ||
239 | struct ab8500_gpio *ab8500_gpio = dev; | ||
240 | int offset = irq - AB8500_INT_GPIO6F | ||
241 | - ab8500_gpio->parent->irq_base ; | ||
242 | int new_irq = offset + ab8500_gpio->irq_base; | ||
243 | return new_irq; | ||
244 | |||
245 | } | ||
246 | |||
247 | /* | ||
248 | * IRQ handler | ||
249 | */ | ||
250 | |||
251 | static irqreturn_t handle_rising(int irq, void *dev) | ||
252 | { | ||
253 | |||
254 | handle_nested_irq(rising_to_irq(irq , dev)); | ||
255 | return IRQ_HANDLED; | ||
256 | } | ||
257 | |||
258 | static irqreturn_t handle_falling(int irq, void *dev) | ||
259 | { | ||
260 | |||
261 | handle_nested_irq(falling_to_irq(irq, dev)); | ||
262 | return IRQ_HANDLED; | ||
263 | } | ||
264 | |||
265 | static void ab8500_gpio_irq_lock(unsigned int irq) | ||
266 | { | ||
267 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
268 | mutex_lock(&ab8500_gpio->lock); | ||
269 | } | ||
270 | |||
271 | static void ab8500_gpio_irq_sync_unlock(unsigned int irq) | ||
272 | { | ||
273 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
274 | int offset = irq - ab8500_gpio->irq_base; | ||
275 | bool rising = ab8500_gpio->rising & BIT(offset); | ||
276 | bool falling = ab8500_gpio->falling & BIT(offset); | ||
277 | int ret; | ||
278 | |||
279 | switch (ab8500_gpio->irq_action) { | ||
280 | case STARTUP: | ||
281 | if (rising) | ||
282 | ret = request_threaded_irq(irq_to_rising(irq), | ||
283 | NULL, handle_rising, | ||
284 | IRQF_TRIGGER_RISING, | ||
285 | "ab8500-gpio-r", ab8500_gpio); | ||
286 | if (falling) | ||
287 | ret = request_threaded_irq(irq_to_falling(irq), | ||
288 | NULL, handle_falling, | ||
289 | IRQF_TRIGGER_FALLING, | ||
290 | "ab8500-gpio-f", ab8500_gpio); | ||
291 | break; | ||
292 | case SHUTDOWN: | ||
293 | if (rising) | ||
294 | free_irq(irq_to_rising(irq), ab8500_gpio); | ||
295 | if (falling) | ||
296 | free_irq(irq_to_falling(irq), ab8500_gpio); | ||
297 | break; | ||
298 | case MASK: | ||
299 | if (rising) | ||
300 | disable_irq(irq_to_rising(irq)); | ||
301 | if (falling) | ||
302 | disable_irq(irq_to_falling(irq)); | ||
303 | break; | ||
304 | case UNMASK: | ||
305 | if (rising) | ||
306 | enable_irq(irq_to_rising(irq)); | ||
307 | if (falling) | ||
308 | enable_irq(irq_to_falling(irq)); | ||
309 | break; | ||
310 | case NONE: | ||
311 | break; | ||
312 | } | ||
313 | ab8500_gpio->irq_action = NONE; | ||
314 | ab8500_gpio->rising &= ~(BIT(offset)); | ||
315 | ab8500_gpio->falling &= ~(BIT(offset)); | ||
316 | mutex_unlock(&ab8500_gpio->lock); | ||
317 | } | ||
318 | |||
319 | |||
320 | static void ab8500_gpio_irq_mask(unsigned int irq) | ||
321 | { | ||
322 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
323 | ab8500_gpio->irq_action = MASK; | ||
324 | } | ||
325 | |||
326 | static void ab8500_gpio_irq_unmask(unsigned int irq) | ||
327 | { | ||
328 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
329 | ab8500_gpio->irq_action = UNMASK; | ||
330 | } | ||
331 | |||
332 | static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type) | ||
333 | { | ||
334 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
335 | int offset = irq - ab8500_gpio->irq_base; | ||
336 | |||
337 | if (type == IRQ_TYPE_EDGE_BOTH) { | ||
338 | ab8500_gpio->rising = BIT(offset); | ||
339 | ab8500_gpio->falling = BIT(offset); | ||
340 | } else if (type == IRQ_TYPE_EDGE_RISING) { | ||
341 | ab8500_gpio->rising = BIT(offset); | ||
342 | } else { | ||
343 | ab8500_gpio->falling = BIT(offset); | ||
344 | } | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | unsigned int ab8500_gpio_irq_startup(unsigned int irq) | ||
349 | { | ||
350 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
351 | ab8500_gpio->irq_action = STARTUP; | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | void ab8500_gpio_irq_shutdown(unsigned int irq) | ||
356 | { | ||
357 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
358 | ab8500_gpio->irq_action = SHUTDOWN; | ||
359 | } | ||
360 | |||
361 | static struct irq_chip ab8500_gpio_irq_chip = { | ||
362 | .name = "ab8500-gpio", | ||
363 | .startup = ab8500_gpio_irq_startup, | ||
364 | .shutdown = ab8500_gpio_irq_shutdown, | ||
365 | .bus_lock = ab8500_gpio_irq_lock, | ||
366 | .bus_sync_unlock = ab8500_gpio_irq_sync_unlock, | ||
367 | .mask = ab8500_gpio_irq_mask, | ||
368 | .unmask = ab8500_gpio_irq_unmask, | ||
369 | .set_type = ab8500_gpio_irq_set_type, | ||
370 | }; | ||
371 | |||
372 | static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio) | ||
373 | { | ||
374 | u32 base = ab8500_gpio->irq_base; | ||
375 | int irq; | ||
376 | |||
377 | for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) { | ||
378 | set_irq_chip_data(irq, ab8500_gpio); | ||
379 | set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip, | ||
380 | handle_simple_irq); | ||
381 | set_irq_nested_thread(irq, 1); | ||
382 | #ifdef CONFIG_ARM | ||
383 | set_irq_flags(irq, IRQF_VALID); | ||
384 | #else | ||
385 | set_irq_noprobe(irq); | ||
386 | #endif | ||
387 | } | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio) | ||
393 | { | ||
394 | int base = ab8500_gpio->irq_base; | ||
395 | int irq; | ||
396 | |||
397 | for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) { | ||
398 | #ifdef CONFIG_ARM | ||
399 | set_irq_flags(irq, 0); | ||
400 | #endif | ||
401 | set_irq_chip_and_handler(irq, NULL, NULL); | ||
402 | set_irq_chip_data(irq, NULL); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | static int __devinit ab8500_gpio_probe(struct platform_device *pdev) | ||
407 | { | ||
408 | struct ab8500_platform_data *ab8500_pdata = | ||
409 | dev_get_platdata(pdev->dev.parent); | ||
410 | struct ab8500_gpio_platform_data *pdata; | ||
411 | struct ab8500_gpio *ab8500_gpio; | ||
412 | int ret; | ||
413 | int i; | ||
414 | |||
415 | pdata = ab8500_pdata->gpio; | ||
416 | if (!pdata) { | ||
417 | dev_err(&pdev->dev, "gpio platform data missing\n"); | ||
418 | return -ENODEV; | ||
419 | } | ||
420 | |||
421 | ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL); | ||
422 | if (ab8500_gpio == NULL) { | ||
423 | dev_err(&pdev->dev, "failed to allocate memory\n"); | ||
424 | return -ENOMEM; | ||
425 | } | ||
426 | ab8500_gpio->dev = &pdev->dev; | ||
427 | ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent); | ||
428 | ab8500_gpio->chip = ab8500gpio_chip; | ||
429 | ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO; | ||
430 | ab8500_gpio->chip.dev = &pdev->dev; | ||
431 | ab8500_gpio->chip.base = pdata->gpio_base; | ||
432 | ab8500_gpio->irq_base = pdata->irq_base; | ||
433 | /* initialize the lock */ | ||
434 | mutex_init(&ab8500_gpio->lock); | ||
435 | /* | ||
436 | * AB8500 core will handle and clear the IRQ | ||
437 | * configre GPIO based on config-reg value. | ||
438 | * These values are for selecting the PINs as | ||
439 | * GPIO or alternate function | ||
440 | */ | ||
441 | for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) { | ||
442 | ret = abx500_set_register_interruptible(ab8500_gpio->dev, | ||
443 | AB8500_MISC, i, | ||
444 | pdata->config_reg[i]); | ||
445 | if (ret < 0) | ||
446 | goto out_free; | ||
447 | } | ||
448 | ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC, | ||
449 | AB8500_GPIO_ALTFUN_REG, | ||
450 | pdata->config_reg[ALTFUN_REG_INDEX]); | ||
451 | if (ret < 0) | ||
452 | goto out_free; | ||
453 | |||
454 | ret = ab8500_gpio_irq_init(ab8500_gpio); | ||
455 | if (ret) | ||
456 | goto out_free; | ||
457 | ret = gpiochip_add(&ab8500_gpio->chip); | ||
458 | if (ret) { | ||
459 | dev_err(&pdev->dev, "unable to add gpiochip: %d\n", | ||
460 | ret); | ||
461 | goto out_rem_irq; | ||
462 | } | ||
463 | platform_set_drvdata(pdev, ab8500_gpio); | ||
464 | return 0; | ||
465 | |||
466 | out_rem_irq: | ||
467 | ab8500_gpio_irq_remove(ab8500_gpio); | ||
468 | out_free: | ||
469 | mutex_destroy(&ab8500_gpio->lock); | ||
470 | kfree(ab8500_gpio); | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * ab8500_gpio_remove() - remove Ab8500-gpio driver | ||
476 | * @pdev : Platform device registered | ||
477 | */ | ||
478 | static int __devexit ab8500_gpio_remove(struct platform_device *pdev) | ||
479 | { | ||
480 | struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev); | ||
481 | int ret; | ||
482 | |||
483 | ret = gpiochip_remove(&ab8500_gpio->chip); | ||
484 | if (ret < 0) { | ||
485 | dev_err(ab8500_gpio->dev, "unable to remove gpiochip:\ | ||
486 | %d\n", ret); | ||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | platform_set_drvdata(pdev, NULL); | ||
491 | mutex_destroy(&ab8500_gpio->lock); | ||
492 | kfree(ab8500_gpio); | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static struct platform_driver ab8500_gpio_driver = { | ||
498 | .driver = { | ||
499 | .name = "ab8500-gpio", | ||
500 | .owner = THIS_MODULE, | ||
501 | }, | ||
502 | .probe = ab8500_gpio_probe, | ||
503 | .remove = __devexit_p(ab8500_gpio_remove), | ||
504 | }; | ||
505 | |||
506 | static int __init ab8500_gpio_init(void) | ||
507 | { | ||
508 | return platform_driver_register(&ab8500_gpio_driver); | ||
509 | } | ||
510 | arch_initcall(ab8500_gpio_init); | ||
511 | |||
512 | static void __exit ab8500_gpio_exit(void) | ||
513 | { | ||
514 | platform_driver_unregister(&ab8500_gpio_driver); | ||
515 | } | ||
516 | module_exit(ab8500_gpio_exit); | ||
517 | |||
518 | MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>"); | ||
519 | MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins\ | ||
520 | to be used as GPIO"); | ||
521 | MODULE_ALIAS("AB8500 GPIO driver"); | ||
522 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 81131eda5544..060ef6327876 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -315,11 +315,22 @@ config SENSORS_F71805F | |||
315 | will be called f71805f. | 315 | will be called f71805f. |
316 | 316 | ||
317 | config SENSORS_F71882FG | 317 | config SENSORS_F71882FG |
318 | tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000" | 318 | tristate "Fintek F71882FG and compatibles" |
319 | help | 319 | help |
320 | If you say yes here you get support for hardware monitoring | 320 | If you say yes here you get support for hardware monitoring |
321 | features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG, | 321 | features of many Fintek Super-I/O (LPC) chips. The currently |
322 | F71889FG and F8000 Super-I/O chips. | 322 | supported chips are: |
323 | F71808E | ||
324 | F71858FG | ||
325 | F71862FG | ||
326 | F71863FG | ||
327 | F71869F/E | ||
328 | F71882FG | ||
329 | F71883FG | ||
330 | F71889FG/ED/A | ||
331 | F8000 | ||
332 | F81801U | ||
333 | F81865F | ||
323 | 334 | ||
324 | This driver can also be built as a module. If so, the module | 335 | This driver can also be built as a module. If so, the module |
325 | will be called f71882fg. | 336 | will be called f71882fg. |
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index a4d430ee7e20..ca07a32447c2 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c | |||
@@ -54,7 +54,9 @@ | |||
54 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ | 54 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ |
55 | #define SIO_F71889_ID 0x0723 /* Chipset ID */ | 55 | #define SIO_F71889_ID 0x0723 /* Chipset ID */ |
56 | #define SIO_F71889E_ID 0x0909 /* Chipset ID */ | 56 | #define SIO_F71889E_ID 0x0909 /* Chipset ID */ |
57 | #define SIO_F71889A_ID 0x1005 /* Chipset ID */ | ||
57 | #define SIO_F8000_ID 0x0581 /* Chipset ID */ | 58 | #define SIO_F8000_ID 0x0581 /* Chipset ID */ |
59 | #define SIO_F81865_ID 0x0704 /* Chipset ID */ | ||
58 | 60 | ||
59 | #define REGION_LENGTH 8 | 61 | #define REGION_LENGTH 8 |
60 | #define ADDR_REG_OFFSET 5 | 62 | #define ADDR_REG_OFFSET 5 |
@@ -106,7 +108,7 @@ module_param(force_id, ushort, 0); | |||
106 | MODULE_PARM_DESC(force_id, "Override the detected device ID"); | 108 | MODULE_PARM_DESC(force_id, "Override the detected device ID"); |
107 | 109 | ||
108 | enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, | 110 | enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, |
109 | f71889ed, f8000 }; | 111 | f71889ed, f71889a, f8000, f81865f }; |
110 | 112 | ||
111 | static const char *f71882fg_names[] = { | 113 | static const char *f71882fg_names[] = { |
112 | "f71808e", | 114 | "f71808e", |
@@ -114,42 +116,76 @@ static const char *f71882fg_names[] = { | |||
114 | "f71862fg", | 116 | "f71862fg", |
115 | "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ | 117 | "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ |
116 | "f71882fg", | 118 | "f71882fg", |
117 | "f71889fg", | 119 | "f71889fg", /* f81801u too, same id */ |
118 | "f71889ed", | 120 | "f71889ed", |
121 | "f71889a", | ||
119 | "f8000", | 122 | "f8000", |
123 | "f81865f", | ||
120 | }; | 124 | }; |
121 | 125 | ||
122 | static const char f71882fg_has_in[8][F71882FG_MAX_INS] = { | 126 | static const char f71882fg_has_in[][F71882FG_MAX_INS] = { |
123 | { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */ | 127 | [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, |
124 | { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */ | 128 | [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, |
125 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */ | 129 | [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
126 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */ | 130 | [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
127 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */ | 131 | [f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
128 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */ | 132 | [f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
129 | { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */ | 133 | [f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
130 | { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */ | 134 | [f71889a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, |
135 | [f8000] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, | ||
136 | [f81865f] = { 1, 1, 1, 1, 1, 1, 1, 0, 0 }, | ||
131 | }; | 137 | }; |
132 | 138 | ||
133 | static const char f71882fg_has_in1_alarm[8] = { | 139 | static const char f71882fg_has_in1_alarm[] = { |
134 | 0, /* f71808e */ | 140 | [f71808e] = 0, |
135 | 0, /* f71858fg */ | 141 | [f71858fg] = 0, |
136 | 0, /* f71862fg */ | 142 | [f71862fg] = 0, |
137 | 0, /* f71869 */ | 143 | [f71869] = 0, |
138 | 1, /* f71882fg */ | 144 | [f71882fg] = 1, |
139 | 1, /* f71889fg */ | 145 | [f71889fg] = 1, |
140 | 1, /* f71889ed */ | 146 | [f71889ed] = 1, |
141 | 0, /* f8000 */ | 147 | [f71889a] = 1, |
148 | [f8000] = 0, | ||
149 | [f81865f] = 1, | ||
142 | }; | 150 | }; |
143 | 151 | ||
144 | static const char f71882fg_has_beep[8] = { | 152 | static const char f71882fg_has_beep[] = { |
145 | 0, /* f71808e */ | 153 | [f71808e] = 0, |
146 | 0, /* f71858fg */ | 154 | [f71858fg] = 0, |
147 | 1, /* f71862fg */ | 155 | [f71862fg] = 1, |
148 | 1, /* f71869 */ | 156 | [f71869] = 1, |
149 | 1, /* f71882fg */ | 157 | [f71882fg] = 1, |
150 | 1, /* f71889fg */ | 158 | [f71889fg] = 1, |
151 | 1, /* f71889ed */ | 159 | [f71889ed] = 1, |
152 | 0, /* f8000 */ | 160 | [f71889a] = 1, |
161 | [f8000] = 0, | ||
162 | [f81865f] = 1, | ||
163 | }; | ||
164 | |||
165 | static const char f71882fg_nr_fans[] = { | ||
166 | [f71808e] = 3, | ||
167 | [f71858fg] = 3, | ||
168 | [f71862fg] = 3, | ||
169 | [f71869] = 3, | ||
170 | [f71882fg] = 4, | ||
171 | [f71889fg] = 3, | ||
172 | [f71889ed] = 3, | ||
173 | [f71889a] = 3, | ||
174 | [f8000] = 3, | ||
175 | [f81865f] = 2, | ||
176 | }; | ||
177 | |||
178 | static const char f71882fg_nr_temps[] = { | ||
179 | [f71808e] = 2, | ||
180 | [f71858fg] = 3, | ||
181 | [f71862fg] = 3, | ||
182 | [f71869] = 3, | ||
183 | [f71882fg] = 3, | ||
184 | [f71889fg] = 3, | ||
185 | [f71889ed] = 3, | ||
186 | [f71889a] = 3, | ||
187 | [f8000] = 3, | ||
188 | [f81865f] = 2, | ||
153 | }; | 189 | }; |
154 | 190 | ||
155 | static struct platform_device *f71882fg_pdev; | 191 | static struct platform_device *f71882fg_pdev; |
@@ -1071,9 +1107,9 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) | |||
1071 | static struct f71882fg_data *f71882fg_update_device(struct device *dev) | 1107 | static struct f71882fg_data *f71882fg_update_device(struct device *dev) |
1072 | { | 1108 | { |
1073 | struct f71882fg_data *data = dev_get_drvdata(dev); | 1109 | struct f71882fg_data *data = dev_get_drvdata(dev); |
1110 | int nr_fans = f71882fg_nr_fans[data->type]; | ||
1111 | int nr_temps = f71882fg_nr_temps[data->type]; | ||
1074 | int nr, reg, point; | 1112 | int nr, reg, point; |
1075 | int nr_fans = (data->type == f71882fg) ? 4 : 3; | ||
1076 | int nr_temps = (data->type == f71808e) ? 2 : 3; | ||
1077 | 1113 | ||
1078 | mutex_lock(&data->update_lock); | 1114 | mutex_lock(&data->update_lock); |
1079 | 1115 | ||
@@ -2042,8 +2078,9 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2042 | { | 2078 | { |
2043 | struct f71882fg_data *data; | 2079 | struct f71882fg_data *data; |
2044 | struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; | 2080 | struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; |
2045 | int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3; | 2081 | int nr_fans = f71882fg_nr_fans[sio_data->type]; |
2046 | int nr_temps = (sio_data->type == f71808e) ? 2 : 3; | 2082 | int nr_temps = f71882fg_nr_temps[sio_data->type]; |
2083 | int err, i; | ||
2047 | u8 start_reg, reg; | 2084 | u8 start_reg, reg; |
2048 | 2085 | ||
2049 | data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL); | 2086 | data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL); |
@@ -2138,6 +2175,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2138 | /* Fall through to select correct fan/pwm reg bank! */ | 2175 | /* Fall through to select correct fan/pwm reg bank! */ |
2139 | case f71889fg: | 2176 | case f71889fg: |
2140 | case f71889ed: | 2177 | case f71889ed: |
2178 | case f71889a: | ||
2141 | reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T); | 2179 | reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T); |
2142 | if (reg & F71882FG_FAN_NEG_TEMP_EN) | 2180 | if (reg & F71882FG_FAN_NEG_TEMP_EN) |
2143 | data->auto_point_temp_signed = 1; | 2181 | data->auto_point_temp_signed = 1; |
@@ -2163,16 +2201,12 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2163 | case f71862fg: | 2201 | case f71862fg: |
2164 | err = (data->pwm_enable & 0x15) != 0x15; | 2202 | err = (data->pwm_enable & 0x15) != 0x15; |
2165 | break; | 2203 | break; |
2166 | case f71808e: | ||
2167 | case f71869: | ||
2168 | case f71882fg: | ||
2169 | case f71889fg: | ||
2170 | case f71889ed: | ||
2171 | err = 0; | ||
2172 | break; | ||
2173 | case f8000: | 2204 | case f8000: |
2174 | err = data->pwm_enable & 0x20; | 2205 | err = data->pwm_enable & 0x20; |
2175 | break; | 2206 | break; |
2207 | default: | ||
2208 | err = 0; | ||
2209 | break; | ||
2176 | } | 2210 | } |
2177 | if (err) { | 2211 | if (err) { |
2178 | dev_err(&pdev->dev, | 2212 | dev_err(&pdev->dev, |
@@ -2199,6 +2233,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2199 | case f71869: | 2233 | case f71869: |
2200 | case f71889fg: | 2234 | case f71889fg: |
2201 | case f71889ed: | 2235 | case f71889ed: |
2236 | case f71889a: | ||
2202 | for (i = 0; i < nr_fans; i++) { | 2237 | for (i = 0; i < nr_fans; i++) { |
2203 | data->pwm_auto_point_mapping[i] = | 2238 | data->pwm_auto_point_mapping[i] = |
2204 | f71882fg_read8(data, | 2239 | f71882fg_read8(data, |
@@ -2276,8 +2311,9 @@ exit_free: | |||
2276 | static int f71882fg_remove(struct platform_device *pdev) | 2311 | static int f71882fg_remove(struct platform_device *pdev) |
2277 | { | 2312 | { |
2278 | struct f71882fg_data *data = platform_get_drvdata(pdev); | 2313 | struct f71882fg_data *data = platform_get_drvdata(pdev); |
2279 | int i, nr_fans = (data->type == f71882fg) ? 4 : 3; | 2314 | int nr_fans = f71882fg_nr_fans[data->type]; |
2280 | int nr_temps = (data->type == f71808e) ? 2 : 3; | 2315 | int nr_temps = f71882fg_nr_temps[data->type]; |
2316 | int i; | ||
2281 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); | 2317 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); |
2282 | 2318 | ||
2283 | if (data->hwmon_dev) | 2319 | if (data->hwmon_dev) |
@@ -2406,9 +2442,15 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
2406 | case SIO_F71889E_ID: | 2442 | case SIO_F71889E_ID: |
2407 | sio_data->type = f71889ed; | 2443 | sio_data->type = f71889ed; |
2408 | break; | 2444 | break; |
2445 | case SIO_F71889A_ID: | ||
2446 | sio_data->type = f71889a; | ||
2447 | break; | ||
2409 | case SIO_F8000_ID: | 2448 | case SIO_F8000_ID: |
2410 | sio_data->type = f8000; | 2449 | sio_data->type = f8000; |
2411 | break; | 2450 | break; |
2451 | case SIO_F81865_ID: | ||
2452 | sio_data->type = f81865f; | ||
2453 | break; | ||
2412 | default: | 2454 | default: |
2413 | pr_info("Unsupported Fintek device: %04x\n", | 2455 | pr_info("Unsupported Fintek device: %04x\n", |
2414 | (unsigned int)devid); | 2456 | (unsigned int)devid); |
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index f141a1de519c..89aa9fb743af 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -116,7 +116,7 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data, | |||
116 | return 0; | 116 | return 0; |
117 | 117 | ||
118 | INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); | 118 | INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); |
119 | set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); | 119 | irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); |
120 | err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, | 120 | err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, |
121 | "GPIO fan alarm", fan_data); | 121 | "GPIO fan alarm", fan_data); |
122 | if (err) | 122 | if (err) |
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c index 6474512f49b0..edfb92e41735 100644 --- a/drivers/hwmon/pmbus_core.c +++ b/drivers/hwmon/pmbus_core.c | |||
@@ -752,7 +752,7 @@ static void pmbus_add_boolean_cmp(struct pmbus_data *data, | |||
752 | static void pmbus_add_sensor(struct pmbus_data *data, | 752 | static void pmbus_add_sensor(struct pmbus_data *data, |
753 | const char *name, const char *type, int seq, | 753 | const char *name, const char *type, int seq, |
754 | int page, int reg, enum pmbus_sensor_classes class, | 754 | int page, int reg, enum pmbus_sensor_classes class, |
755 | bool update) | 755 | bool update, bool readonly) |
756 | { | 756 | { |
757 | struct pmbus_sensor *sensor; | 757 | struct pmbus_sensor *sensor; |
758 | 758 | ||
@@ -765,7 +765,7 @@ static void pmbus_add_sensor(struct pmbus_data *data, | |||
765 | sensor->reg = reg; | 765 | sensor->reg = reg; |
766 | sensor->class = class; | 766 | sensor->class = class; |
767 | sensor->update = update; | 767 | sensor->update = update; |
768 | if (update) | 768 | if (readonly) |
769 | PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, | 769 | PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, |
770 | data->num_sensors); | 770 | data->num_sensors); |
771 | else | 771 | else |
@@ -916,14 +916,14 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
916 | 916 | ||
917 | i0 = data->num_sensors; | 917 | i0 = data->num_sensors; |
918 | pmbus_add_label(data, "in", in_index, "vin", 0); | 918 | pmbus_add_label(data, "in", in_index, "vin", 0); |
919 | pmbus_add_sensor(data, "in", "input", in_index, | 919 | pmbus_add_sensor(data, "in", "input", in_index, 0, |
920 | 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true); | 920 | PMBUS_READ_VIN, PSC_VOLTAGE_IN, true, true); |
921 | if (pmbus_check_word_register(client, 0, | 921 | if (pmbus_check_word_register(client, 0, |
922 | PMBUS_VIN_UV_WARN_LIMIT)) { | 922 | PMBUS_VIN_UV_WARN_LIMIT)) { |
923 | i1 = data->num_sensors; | 923 | i1 = data->num_sensors; |
924 | pmbus_add_sensor(data, "in", "min", in_index, | 924 | pmbus_add_sensor(data, "in", "min", in_index, |
925 | 0, PMBUS_VIN_UV_WARN_LIMIT, | 925 | 0, PMBUS_VIN_UV_WARN_LIMIT, |
926 | PSC_VOLTAGE_IN, false); | 926 | PSC_VOLTAGE_IN, false, false); |
927 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 927 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
928 | pmbus_add_boolean_reg(data, "in", "min_alarm", | 928 | pmbus_add_boolean_reg(data, "in", "min_alarm", |
929 | in_index, | 929 | in_index, |
@@ -937,7 +937,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
937 | i1 = data->num_sensors; | 937 | i1 = data->num_sensors; |
938 | pmbus_add_sensor(data, "in", "lcrit", in_index, | 938 | pmbus_add_sensor(data, "in", "lcrit", in_index, |
939 | 0, PMBUS_VIN_UV_FAULT_LIMIT, | 939 | 0, PMBUS_VIN_UV_FAULT_LIMIT, |
940 | PSC_VOLTAGE_IN, false); | 940 | PSC_VOLTAGE_IN, false, false); |
941 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 941 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
942 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", | 942 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", |
943 | in_index, | 943 | in_index, |
@@ -951,7 +951,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
951 | i1 = data->num_sensors; | 951 | i1 = data->num_sensors; |
952 | pmbus_add_sensor(data, "in", "max", in_index, | 952 | pmbus_add_sensor(data, "in", "max", in_index, |
953 | 0, PMBUS_VIN_OV_WARN_LIMIT, | 953 | 0, PMBUS_VIN_OV_WARN_LIMIT, |
954 | PSC_VOLTAGE_IN, false); | 954 | PSC_VOLTAGE_IN, false, false); |
955 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 955 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
956 | pmbus_add_boolean_reg(data, "in", "max_alarm", | 956 | pmbus_add_boolean_reg(data, "in", "max_alarm", |
957 | in_index, | 957 | in_index, |
@@ -965,7 +965,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
965 | i1 = data->num_sensors; | 965 | i1 = data->num_sensors; |
966 | pmbus_add_sensor(data, "in", "crit", in_index, | 966 | pmbus_add_sensor(data, "in", "crit", in_index, |
967 | 0, PMBUS_VIN_OV_FAULT_LIMIT, | 967 | 0, PMBUS_VIN_OV_FAULT_LIMIT, |
968 | PSC_VOLTAGE_IN, false); | 968 | PSC_VOLTAGE_IN, false, false); |
969 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 969 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
970 | pmbus_add_boolean_reg(data, "in", "crit_alarm", | 970 | pmbus_add_boolean_reg(data, "in", "crit_alarm", |
971 | in_index, | 971 | in_index, |
@@ -988,7 +988,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
988 | if (info->func[0] & PMBUS_HAVE_VCAP) { | 988 | if (info->func[0] & PMBUS_HAVE_VCAP) { |
989 | pmbus_add_label(data, "in", in_index, "vcap", 0); | 989 | pmbus_add_label(data, "in", in_index, "vcap", 0); |
990 | pmbus_add_sensor(data, "in", "input", in_index, 0, | 990 | pmbus_add_sensor(data, "in", "input", in_index, 0, |
991 | PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true); | 991 | PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true, true); |
992 | in_index++; | 992 | in_index++; |
993 | } | 993 | } |
994 | 994 | ||
@@ -1004,13 +1004,13 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1004 | i0 = data->num_sensors; | 1004 | i0 = data->num_sensors; |
1005 | pmbus_add_label(data, "in", in_index, "vout", page + 1); | 1005 | pmbus_add_label(data, "in", in_index, "vout", page + 1); |
1006 | pmbus_add_sensor(data, "in", "input", in_index, page, | 1006 | pmbus_add_sensor(data, "in", "input", in_index, page, |
1007 | PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true); | 1007 | PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true, true); |
1008 | if (pmbus_check_word_register(client, page, | 1008 | if (pmbus_check_word_register(client, page, |
1009 | PMBUS_VOUT_UV_WARN_LIMIT)) { | 1009 | PMBUS_VOUT_UV_WARN_LIMIT)) { |
1010 | i1 = data->num_sensors; | 1010 | i1 = data->num_sensors; |
1011 | pmbus_add_sensor(data, "in", "min", in_index, page, | 1011 | pmbus_add_sensor(data, "in", "min", in_index, page, |
1012 | PMBUS_VOUT_UV_WARN_LIMIT, | 1012 | PMBUS_VOUT_UV_WARN_LIMIT, |
1013 | PSC_VOLTAGE_OUT, false); | 1013 | PSC_VOLTAGE_OUT, false, false); |
1014 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1014 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1015 | pmbus_add_boolean_reg(data, "in", "min_alarm", | 1015 | pmbus_add_boolean_reg(data, "in", "min_alarm", |
1016 | in_index, | 1016 | in_index, |
@@ -1025,7 +1025,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1025 | i1 = data->num_sensors; | 1025 | i1 = data->num_sensors; |
1026 | pmbus_add_sensor(data, "in", "lcrit", in_index, page, | 1026 | pmbus_add_sensor(data, "in", "lcrit", in_index, page, |
1027 | PMBUS_VOUT_UV_FAULT_LIMIT, | 1027 | PMBUS_VOUT_UV_FAULT_LIMIT, |
1028 | PSC_VOLTAGE_OUT, false); | 1028 | PSC_VOLTAGE_OUT, false, false); |
1029 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1029 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1030 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", | 1030 | pmbus_add_boolean_reg(data, "in", "lcrit_alarm", |
1031 | in_index, | 1031 | in_index, |
@@ -1040,7 +1040,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1040 | i1 = data->num_sensors; | 1040 | i1 = data->num_sensors; |
1041 | pmbus_add_sensor(data, "in", "max", in_index, page, | 1041 | pmbus_add_sensor(data, "in", "max", in_index, page, |
1042 | PMBUS_VOUT_OV_WARN_LIMIT, | 1042 | PMBUS_VOUT_OV_WARN_LIMIT, |
1043 | PSC_VOLTAGE_OUT, false); | 1043 | PSC_VOLTAGE_OUT, false, false); |
1044 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1044 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1045 | pmbus_add_boolean_reg(data, "in", "max_alarm", | 1045 | pmbus_add_boolean_reg(data, "in", "max_alarm", |
1046 | in_index, | 1046 | in_index, |
@@ -1055,7 +1055,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1055 | i1 = data->num_sensors; | 1055 | i1 = data->num_sensors; |
1056 | pmbus_add_sensor(data, "in", "crit", in_index, page, | 1056 | pmbus_add_sensor(data, "in", "crit", in_index, page, |
1057 | PMBUS_VOUT_OV_FAULT_LIMIT, | 1057 | PMBUS_VOUT_OV_FAULT_LIMIT, |
1058 | PSC_VOLTAGE_OUT, false); | 1058 | PSC_VOLTAGE_OUT, false, false); |
1059 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { | 1059 | if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { |
1060 | pmbus_add_boolean_reg(data, "in", "crit_alarm", | 1060 | pmbus_add_boolean_reg(data, "in", "crit_alarm", |
1061 | in_index, | 1061 | in_index, |
@@ -1088,14 +1088,14 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1088 | if (info->func[0] & PMBUS_HAVE_IIN) { | 1088 | if (info->func[0] & PMBUS_HAVE_IIN) { |
1089 | i0 = data->num_sensors; | 1089 | i0 = data->num_sensors; |
1090 | pmbus_add_label(data, "curr", in_index, "iin", 0); | 1090 | pmbus_add_label(data, "curr", in_index, "iin", 0); |
1091 | pmbus_add_sensor(data, "curr", "input", in_index, | 1091 | pmbus_add_sensor(data, "curr", "input", in_index, 0, |
1092 | 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true); | 1092 | PMBUS_READ_IIN, PSC_CURRENT_IN, true, true); |
1093 | if (pmbus_check_word_register(client, 0, | 1093 | if (pmbus_check_word_register(client, 0, |
1094 | PMBUS_IIN_OC_WARN_LIMIT)) { | 1094 | PMBUS_IIN_OC_WARN_LIMIT)) { |
1095 | i1 = data->num_sensors; | 1095 | i1 = data->num_sensors; |
1096 | pmbus_add_sensor(data, "curr", "max", in_index, | 1096 | pmbus_add_sensor(data, "curr", "max", in_index, |
1097 | 0, PMBUS_IIN_OC_WARN_LIMIT, | 1097 | 0, PMBUS_IIN_OC_WARN_LIMIT, |
1098 | PSC_CURRENT_IN, false); | 1098 | PSC_CURRENT_IN, false, false); |
1099 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { | 1099 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { |
1100 | pmbus_add_boolean_reg(data, "curr", "max_alarm", | 1100 | pmbus_add_boolean_reg(data, "curr", "max_alarm", |
1101 | in_index, | 1101 | in_index, |
@@ -1108,7 +1108,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1108 | i1 = data->num_sensors; | 1108 | i1 = data->num_sensors; |
1109 | pmbus_add_sensor(data, "curr", "crit", in_index, | 1109 | pmbus_add_sensor(data, "curr", "crit", in_index, |
1110 | 0, PMBUS_IIN_OC_FAULT_LIMIT, | 1110 | 0, PMBUS_IIN_OC_FAULT_LIMIT, |
1111 | PSC_CURRENT_IN, false); | 1111 | PSC_CURRENT_IN, false, false); |
1112 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) | 1112 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) |
1113 | pmbus_add_boolean_reg(data, "curr", | 1113 | pmbus_add_boolean_reg(data, "curr", |
1114 | "crit_alarm", | 1114 | "crit_alarm", |
@@ -1131,13 +1131,13 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1131 | i0 = data->num_sensors; | 1131 | i0 = data->num_sensors; |
1132 | pmbus_add_label(data, "curr", in_index, "iout", page + 1); | 1132 | pmbus_add_label(data, "curr", in_index, "iout", page + 1); |
1133 | pmbus_add_sensor(data, "curr", "input", in_index, page, | 1133 | pmbus_add_sensor(data, "curr", "input", in_index, page, |
1134 | PMBUS_READ_IOUT, PSC_CURRENT_OUT, true); | 1134 | PMBUS_READ_IOUT, PSC_CURRENT_OUT, true, true); |
1135 | if (pmbus_check_word_register(client, page, | 1135 | if (pmbus_check_word_register(client, page, |
1136 | PMBUS_IOUT_OC_WARN_LIMIT)) { | 1136 | PMBUS_IOUT_OC_WARN_LIMIT)) { |
1137 | i1 = data->num_sensors; | 1137 | i1 = data->num_sensors; |
1138 | pmbus_add_sensor(data, "curr", "max", in_index, page, | 1138 | pmbus_add_sensor(data, "curr", "max", in_index, page, |
1139 | PMBUS_IOUT_OC_WARN_LIMIT, | 1139 | PMBUS_IOUT_OC_WARN_LIMIT, |
1140 | PSC_CURRENT_OUT, false); | 1140 | PSC_CURRENT_OUT, false, false); |
1141 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { | 1141 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { |
1142 | pmbus_add_boolean_reg(data, "curr", "max_alarm", | 1142 | pmbus_add_boolean_reg(data, "curr", "max_alarm", |
1143 | in_index, | 1143 | in_index, |
@@ -1151,7 +1151,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1151 | i1 = data->num_sensors; | 1151 | i1 = data->num_sensors; |
1152 | pmbus_add_sensor(data, "curr", "lcrit", in_index, page, | 1152 | pmbus_add_sensor(data, "curr", "lcrit", in_index, page, |
1153 | PMBUS_IOUT_UC_FAULT_LIMIT, | 1153 | PMBUS_IOUT_UC_FAULT_LIMIT, |
1154 | PSC_CURRENT_OUT, false); | 1154 | PSC_CURRENT_OUT, false, false); |
1155 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { | 1155 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { |
1156 | pmbus_add_boolean_reg(data, "curr", | 1156 | pmbus_add_boolean_reg(data, "curr", |
1157 | "lcrit_alarm", | 1157 | "lcrit_alarm", |
@@ -1166,7 +1166,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1166 | i1 = data->num_sensors; | 1166 | i1 = data->num_sensors; |
1167 | pmbus_add_sensor(data, "curr", "crit", in_index, page, | 1167 | pmbus_add_sensor(data, "curr", "crit", in_index, page, |
1168 | PMBUS_IOUT_OC_FAULT_LIMIT, | 1168 | PMBUS_IOUT_OC_FAULT_LIMIT, |
1169 | PSC_CURRENT_OUT, false); | 1169 | PSC_CURRENT_OUT, false, false); |
1170 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { | 1170 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { |
1171 | pmbus_add_boolean_reg(data, "curr", | 1171 | pmbus_add_boolean_reg(data, "curr", |
1172 | "crit_alarm", | 1172 | "crit_alarm", |
@@ -1199,13 +1199,13 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1199 | i0 = data->num_sensors; | 1199 | i0 = data->num_sensors; |
1200 | pmbus_add_label(data, "power", in_index, "pin", 0); | 1200 | pmbus_add_label(data, "power", in_index, "pin", 0); |
1201 | pmbus_add_sensor(data, "power", "input", in_index, | 1201 | pmbus_add_sensor(data, "power", "input", in_index, |
1202 | 0, PMBUS_READ_PIN, PSC_POWER, true); | 1202 | 0, PMBUS_READ_PIN, PSC_POWER, true, true); |
1203 | if (pmbus_check_word_register(client, 0, | 1203 | if (pmbus_check_word_register(client, 0, |
1204 | PMBUS_PIN_OP_WARN_LIMIT)) { | 1204 | PMBUS_PIN_OP_WARN_LIMIT)) { |
1205 | i1 = data->num_sensors; | 1205 | i1 = data->num_sensors; |
1206 | pmbus_add_sensor(data, "power", "max", in_index, | 1206 | pmbus_add_sensor(data, "power", "max", in_index, |
1207 | 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER, | 1207 | 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER, |
1208 | false); | 1208 | false, false); |
1209 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) | 1209 | if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) |
1210 | pmbus_add_boolean_reg(data, "power", | 1210 | pmbus_add_boolean_reg(data, "power", |
1211 | "alarm", | 1211 | "alarm", |
@@ -1228,7 +1228,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1228 | i0 = data->num_sensors; | 1228 | i0 = data->num_sensors; |
1229 | pmbus_add_label(data, "power", in_index, "pout", page + 1); | 1229 | pmbus_add_label(data, "power", in_index, "pout", page + 1); |
1230 | pmbus_add_sensor(data, "power", "input", in_index, page, | 1230 | pmbus_add_sensor(data, "power", "input", in_index, page, |
1231 | PMBUS_READ_POUT, PSC_POWER, true); | 1231 | PMBUS_READ_POUT, PSC_POWER, true, true); |
1232 | /* | 1232 | /* |
1233 | * Per hwmon sysfs API, power_cap is to be used to limit output | 1233 | * Per hwmon sysfs API, power_cap is to be used to limit output |
1234 | * power. | 1234 | * power. |
@@ -1241,7 +1241,8 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1241 | if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) { | 1241 | if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) { |
1242 | i1 = data->num_sensors; | 1242 | i1 = data->num_sensors; |
1243 | pmbus_add_sensor(data, "power", "cap", in_index, page, | 1243 | pmbus_add_sensor(data, "power", "cap", in_index, page, |
1244 | PMBUS_POUT_MAX, PSC_POWER, false); | 1244 | PMBUS_POUT_MAX, PSC_POWER, |
1245 | false, false); | ||
1245 | need_alarm = true; | 1246 | need_alarm = true; |
1246 | } | 1247 | } |
1247 | if (pmbus_check_word_register(client, page, | 1248 | if (pmbus_check_word_register(client, page, |
@@ -1249,7 +1250,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1249 | i1 = data->num_sensors; | 1250 | i1 = data->num_sensors; |
1250 | pmbus_add_sensor(data, "power", "max", in_index, page, | 1251 | pmbus_add_sensor(data, "power", "max", in_index, page, |
1251 | PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER, | 1252 | PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER, |
1252 | false); | 1253 | false, false); |
1253 | need_alarm = true; | 1254 | need_alarm = true; |
1254 | } | 1255 | } |
1255 | if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT)) | 1256 | if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT)) |
@@ -1264,7 +1265,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1264 | i1 = data->num_sensors; | 1265 | i1 = data->num_sensors; |
1265 | pmbus_add_sensor(data, "power", "crit", in_index, page, | 1266 | pmbus_add_sensor(data, "power", "crit", in_index, page, |
1266 | PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER, | 1267 | PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER, |
1267 | false); | 1268 | false, false); |
1268 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) | 1269 | if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) |
1269 | pmbus_add_boolean_reg(data, "power", | 1270 | pmbus_add_boolean_reg(data, "power", |
1270 | "crit_alarm", | 1271 | "crit_alarm", |
@@ -1302,7 +1303,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1302 | i0 = data->num_sensors; | 1303 | i0 = data->num_sensors; |
1303 | pmbus_add_sensor(data, "temp", "input", in_index, page, | 1304 | pmbus_add_sensor(data, "temp", "input", in_index, page, |
1304 | pmbus_temp_registers[t], | 1305 | pmbus_temp_registers[t], |
1305 | PSC_TEMPERATURE, true); | 1306 | PSC_TEMPERATURE, true, true); |
1306 | 1307 | ||
1307 | /* | 1308 | /* |
1308 | * PMBus provides only one status register for TEMP1-3. | 1309 | * PMBus provides only one status register for TEMP1-3. |
@@ -1323,7 +1324,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1323 | i1 = data->num_sensors; | 1324 | i1 = data->num_sensors; |
1324 | pmbus_add_sensor(data, "temp", "min", in_index, | 1325 | pmbus_add_sensor(data, "temp", "min", in_index, |
1325 | page, PMBUS_UT_WARN_LIMIT, | 1326 | page, PMBUS_UT_WARN_LIMIT, |
1326 | PSC_TEMPERATURE, true); | 1327 | PSC_TEMPERATURE, true, false); |
1327 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1328 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1328 | pmbus_add_boolean_cmp(data, "temp", | 1329 | pmbus_add_boolean_cmp(data, "temp", |
1329 | "min_alarm", in_index, i1, i0, | 1330 | "min_alarm", in_index, i1, i0, |
@@ -1338,7 +1339,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1338 | pmbus_add_sensor(data, "temp", "lcrit", | 1339 | pmbus_add_sensor(data, "temp", "lcrit", |
1339 | in_index, page, | 1340 | in_index, page, |
1340 | PMBUS_UT_FAULT_LIMIT, | 1341 | PMBUS_UT_FAULT_LIMIT, |
1341 | PSC_TEMPERATURE, true); | 1342 | PSC_TEMPERATURE, true, false); |
1342 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1343 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1343 | pmbus_add_boolean_cmp(data, "temp", | 1344 | pmbus_add_boolean_cmp(data, "temp", |
1344 | "lcrit_alarm", in_index, i1, i0, | 1345 | "lcrit_alarm", in_index, i1, i0, |
@@ -1352,7 +1353,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1352 | i1 = data->num_sensors; | 1353 | i1 = data->num_sensors; |
1353 | pmbus_add_sensor(data, "temp", "max", in_index, | 1354 | pmbus_add_sensor(data, "temp", "max", in_index, |
1354 | page, PMBUS_OT_WARN_LIMIT, | 1355 | page, PMBUS_OT_WARN_LIMIT, |
1355 | PSC_TEMPERATURE, true); | 1356 | PSC_TEMPERATURE, true, false); |
1356 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1357 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1357 | pmbus_add_boolean_cmp(data, "temp", | 1358 | pmbus_add_boolean_cmp(data, "temp", |
1358 | "max_alarm", in_index, i0, i1, | 1359 | "max_alarm", in_index, i0, i1, |
@@ -1366,7 +1367,7 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1366 | i1 = data->num_sensors; | 1367 | i1 = data->num_sensors; |
1367 | pmbus_add_sensor(data, "temp", "crit", in_index, | 1368 | pmbus_add_sensor(data, "temp", "crit", in_index, |
1368 | page, PMBUS_OT_FAULT_LIMIT, | 1369 | page, PMBUS_OT_FAULT_LIMIT, |
1369 | PSC_TEMPERATURE, true); | 1370 | PSC_TEMPERATURE, true, false); |
1370 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { | 1371 | if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { |
1371 | pmbus_add_boolean_cmp(data, "temp", | 1372 | pmbus_add_boolean_cmp(data, "temp", |
1372 | "crit_alarm", in_index, i0, i1, | 1373 | "crit_alarm", in_index, i0, i1, |
@@ -1421,7 +1422,8 @@ static void pmbus_find_attributes(struct i2c_client *client, | |||
1421 | 1422 | ||
1422 | i0 = data->num_sensors; | 1423 | i0 = data->num_sensors; |
1423 | pmbus_add_sensor(data, "fan", "input", in_index, page, | 1424 | pmbus_add_sensor(data, "fan", "input", in_index, page, |
1424 | pmbus_fan_registers[f], PSC_FAN, true); | 1425 | pmbus_fan_registers[f], PSC_FAN, true, |
1426 | true); | ||
1425 | 1427 | ||
1426 | /* | 1428 | /* |
1427 | * Each fan status register covers multiple fans, | 1429 | * Each fan status register covers multiple fans, |
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index eb4af28f8567..1f29bab6b3e5 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | config HWSPINLOCK | 5 | config HWSPINLOCK |
6 | tristate "Generic Hardware Spinlock framework" | 6 | tristate "Generic Hardware Spinlock framework" |
7 | depends on ARCH_OMAP4 | ||
7 | help | 8 | help |
8 | Say y here to support the generic hardware spinlock framework. | 9 | Say y here to support the generic hardware spinlock framework. |
9 | You only need to enable this if you have hardware spinlock module | 10 | You only need to enable this if you have hardware spinlock module |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index f4077840d3ab..0e406d73b2c8 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -440,6 +440,7 @@ void do_ide_request(struct request_queue *q) | |||
440 | struct ide_host *host = hwif->host; | 440 | struct ide_host *host = hwif->host; |
441 | struct request *rq = NULL; | 441 | struct request *rq = NULL; |
442 | ide_startstop_t startstop; | 442 | ide_startstop_t startstop; |
443 | unsigned long queue_run_ms = 3; /* old plug delay */ | ||
443 | 444 | ||
444 | spin_unlock_irq(q->queue_lock); | 445 | spin_unlock_irq(q->queue_lock); |
445 | 446 | ||
@@ -459,6 +460,9 @@ repeat: | |||
459 | prev_port = hwif->host->cur_port; | 460 | prev_port = hwif->host->cur_port; |
460 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && | 461 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && |
461 | time_after(drive->sleep, jiffies)) { | 462 | time_after(drive->sleep, jiffies)) { |
463 | unsigned long left = jiffies - drive->sleep; | ||
464 | |||
465 | queue_run_ms = jiffies_to_msecs(left + 1); | ||
462 | ide_unlock_port(hwif); | 466 | ide_unlock_port(hwif); |
463 | goto plug_device; | 467 | goto plug_device; |
464 | } | 468 | } |
@@ -547,8 +551,10 @@ plug_device: | |||
547 | plug_device_2: | 551 | plug_device_2: |
548 | spin_lock_irq(q->queue_lock); | 552 | spin_lock_irq(q->queue_lock); |
549 | 553 | ||
550 | if (rq) | 554 | if (rq) { |
551 | blk_requeue_request(q, rq); | 555 | blk_requeue_request(q, rq); |
556 | blk_delay_queue(q, queue_run_ms); | ||
557 | } | ||
552 | } | 558 | } |
553 | 559 | ||
554 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | 560 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
@@ -562,6 +568,10 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | |||
562 | blk_requeue_request(q, rq); | 568 | blk_requeue_request(q, rq); |
563 | 569 | ||
564 | spin_unlock_irqrestore(q->queue_lock, flags); | 570 | spin_unlock_irqrestore(q->queue_lock, flags); |
571 | |||
572 | /* Use 3ms as that was the old plug delay */ | ||
573 | if (rq) | ||
574 | blk_delay_queue(q, 3); | ||
565 | } | 575 | } |
566 | 576 | ||
567 | static int drive_is_ready(ide_drive_t *drive) | 577 | static int drive_is_ready(ide_drive_t *drive) |
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index b732870ecc89..71f744a8e686 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c | |||
@@ -809,7 +809,7 @@ static int lm8323_suspend(struct device *dev) | |||
809 | struct lm8323_chip *lm = i2c_get_clientdata(client); | 809 | struct lm8323_chip *lm = i2c_get_clientdata(client); |
810 | int i; | 810 | int i; |
811 | 811 | ||
812 | set_irq_wake(client->irq, 0); | 812 | irq_set_irq_wake(client->irq, 0); |
813 | disable_irq(client->irq); | 813 | disable_irq(client->irq); |
814 | 814 | ||
815 | mutex_lock(&lm->lock); | 815 | mutex_lock(&lm->lock); |
@@ -838,7 +838,7 @@ static int lm8323_resume(struct device *dev) | |||
838 | led_classdev_resume(&lm->pwm[i].cdev); | 838 | led_classdev_resume(&lm->pwm[i].cdev); |
839 | 839 | ||
840 | enable_irq(client->irq); | 840 | enable_irq(client->irq); |
841 | set_irq_wake(client->irq, 1); | 841 | irq_set_irq_wake(client->irq, 1); |
842 | 842 | ||
843 | return 0; | 843 | return 0; |
844 | } | 844 | } |
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c index ebe955325677..4b2a42f9f0bb 100644 --- a/drivers/input/serio/ams_delta_serio.c +++ b/drivers/input/serio/ams_delta_serio.c | |||
@@ -149,7 +149,7 @@ static int __init ams_delta_serio_init(void) | |||
149 | * at FIQ level, switch back from edge to simple interrupt handler | 149 | * at FIQ level, switch back from edge to simple interrupt handler |
150 | * to avoid bad interaction. | 150 | * to avoid bad interaction. |
151 | */ | 151 | */ |
152 | set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), | 152 | irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), |
153 | handle_simple_irq); | 153 | handle_simple_irq); |
154 | 154 | ||
155 | serio_register_port(ams_delta_serio); | 155 | serio_register_port(ams_delta_serio); |
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index b6b8b1c7ecea..3242e7076258 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c | |||
@@ -219,7 +219,7 @@ static int wm97xx_acc_startup(struct wm97xx *wm) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | wm->pen_irq = gpio_to_irq(irq); | 221 | wm->pen_irq = gpio_to_irq(irq); |
222 | set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); | 222 | irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); |
223 | } else /* pen irq not supported */ | 223 | } else /* pen irq not supported */ |
224 | pen_int = 0; | 224 | pen_int = 0; |
225 | 225 | ||
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c index 048849867643..5b0f15ec874a 100644 --- a/drivers/input/touchscreen/zylonite-wm97xx.c +++ b/drivers/input/touchscreen/zylonite-wm97xx.c | |||
@@ -193,7 +193,7 @@ static int zylonite_wm97xx_probe(struct platform_device *pdev) | |||
193 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); | 193 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); |
194 | 194 | ||
195 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); | 195 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); |
196 | set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); | 196 | irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); |
197 | 197 | ||
198 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, | 198 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, |
199 | WM97XX_GPIO_POL_HIGH, | 199 | WM97XX_GPIO_POL_HIGH, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 06ecea751a39..8b66e04c2ea6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1777,12 +1777,6 @@ int md_integrity_register(mddev_t *mddev) | |||
1777 | continue; | 1777 | continue; |
1778 | if (rdev->raid_disk < 0) | 1778 | if (rdev->raid_disk < 0) |
1779 | continue; | 1779 | continue; |
1780 | /* | ||
1781 | * If at least one rdev is not integrity capable, we can not | ||
1782 | * enable data integrity for the md device. | ||
1783 | */ | ||
1784 | if (!bdev_get_integrity(rdev->bdev)) | ||
1785 | return -EINVAL; | ||
1786 | if (!reference) { | 1780 | if (!reference) { |
1787 | /* Use the first rdev as the reference */ | 1781 | /* Use the first rdev as the reference */ |
1788 | reference = rdev; | 1782 | reference = rdev; |
@@ -1793,6 +1787,8 @@ int md_integrity_register(mddev_t *mddev) | |||
1793 | rdev->bdev->bd_disk) < 0) | 1787 | rdev->bdev->bd_disk) < 0) |
1794 | return -EINVAL; | 1788 | return -EINVAL; |
1795 | } | 1789 | } |
1790 | if (!reference || !bdev_get_integrity(reference->bdev)) | ||
1791 | return 0; | ||
1796 | /* | 1792 | /* |
1797 | * All component devices are integrity capable and have matching | 1793 | * All component devices are integrity capable and have matching |
1798 | * profiles, register the common profile for the md device. | 1794 | * profiles, register the common profile for the md device. |
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index 767406c95291..700d420a59ac 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/swab.h> | 23 | #include <linux/swab.h> |
24 | #include "r592.h" | 24 | #include "r592.h" |
25 | 25 | ||
26 | static int enable_dma = 1; | 26 | static int r592_enable_dma = 1; |
27 | static int debug; | 27 | static int debug; |
28 | 28 | ||
29 | static const char *tpc_names[] = { | 29 | static const char *tpc_names[] = { |
@@ -267,7 +267,7 @@ static void r592_stop_dma(struct r592_device *dev, int error) | |||
267 | /* Test if hardware supports DMA */ | 267 | /* Test if hardware supports DMA */ |
268 | static void r592_check_dma(struct r592_device *dev) | 268 | static void r592_check_dma(struct r592_device *dev) |
269 | { | 269 | { |
270 | dev->dma_capable = enable_dma && | 270 | dev->dma_capable = r592_enable_dma && |
271 | (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & | 271 | (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & |
272 | R592_FIFO_DMA_SETTINGS_CAP); | 272 | R592_FIFO_DMA_SETTINGS_CAP); |
273 | } | 273 | } |
@@ -898,7 +898,7 @@ static void __exit r592_module_exit(void) | |||
898 | module_init(r592_module_init); | 898 | module_init(r592_module_init); |
899 | module_exit(r592_module_exit); | 899 | module_exit(r592_module_exit); |
900 | 900 | ||
901 | module_param(enable_dma, bool, S_IRUGO); | 901 | module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); |
902 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); | 902 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); |
903 | module_param(debug, int, S_IRUGO | S_IWUSR); | 903 | module_param(debug, int, S_IRUGO | S_IWUSR); |
904 | MODULE_PARM_DESC(debug, "Debug level (0-3)"); | 904 | MODULE_PARM_DESC(debug, "Debug level (0-3)"); |
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 9c511c1604a5..011cb6ce861b 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c | |||
@@ -416,7 +416,6 @@ static int __devinit device_irq_init(struct pm860x_chip *chip, | |||
416 | : chip->companion; | 416 | : chip->companion; |
417 | unsigned char status_buf[INT_STATUS_NUM]; | 417 | unsigned char status_buf[INT_STATUS_NUM]; |
418 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; | 418 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; |
419 | struct irq_desc *desc; | ||
420 | int i, data, mask, ret = -EINVAL; | 419 | int i, data, mask, ret = -EINVAL; |
421 | int __irq; | 420 | int __irq; |
422 | 421 | ||
@@ -468,19 +467,17 @@ static int __devinit device_irq_init(struct pm860x_chip *chip, | |||
468 | if (!chip->core_irq) | 467 | if (!chip->core_irq) |
469 | goto out; | 468 | goto out; |
470 | 469 | ||
471 | desc = irq_to_desc(chip->core_irq); | ||
472 | |||
473 | /* register IRQ by genirq */ | 470 | /* register IRQ by genirq */ |
474 | for (i = 0; i < ARRAY_SIZE(pm860x_irqs); i++) { | 471 | for (i = 0; i < ARRAY_SIZE(pm860x_irqs); i++) { |
475 | __irq = i + chip->irq_base; | 472 | __irq = i + chip->irq_base; |
476 | set_irq_chip_data(__irq, chip); | 473 | irq_set_chip_data(__irq, chip); |
477 | set_irq_chip_and_handler(__irq, &pm860x_irq_chip, | 474 | irq_set_chip_and_handler(__irq, &pm860x_irq_chip, |
478 | handle_edge_irq); | 475 | handle_edge_irq); |
479 | set_irq_nested_thread(__irq, 1); | 476 | irq_set_nested_thread(__irq, 1); |
480 | #ifdef CONFIG_ARM | 477 | #ifdef CONFIG_ARM |
481 | set_irq_flags(__irq, IRQF_VALID); | 478 | set_irq_flags(__irq, IRQF_VALID); |
482 | #else | 479 | #else |
483 | set_irq_noprobe(__irq); | 480 | irq_set_noprobe(__irq); |
484 | #endif | 481 | #endif |
485 | } | 482 | } |
486 | 483 | ||
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a9a1af49281e..e2fea580585a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -60,15 +60,6 @@ config MFD_ASIC3 | |||
60 | This driver supports the ASIC3 multifunction chip found on many | 60 | This driver supports the ASIC3 multifunction chip found on many |
61 | PDAs (mainly iPAQ and HTC based ones) | 61 | PDAs (mainly iPAQ and HTC based ones) |
62 | 62 | ||
63 | config MFD_SH_MOBILE_SDHI | ||
64 | bool "Support for SuperH Mobile SDHI" | ||
65 | depends on SUPERH || ARCH_SHMOBILE | ||
66 | select MFD_CORE | ||
67 | select TMIO_MMC_DMA | ||
68 | ---help--- | ||
69 | This driver supports the SDHI hardware block found in many | ||
70 | SuperH Mobile SoCs. | ||
71 | |||
72 | config MFD_DAVINCI_VOICECODEC | 63 | config MFD_DAVINCI_VOICECODEC |
73 | tristate | 64 | tristate |
74 | select MFD_CORE | 65 | select MFD_CORE |
@@ -133,6 +124,7 @@ config TPS6105X | |||
133 | tristate "TPS61050/61052 Boost Converters" | 124 | tristate "TPS61050/61052 Boost Converters" |
134 | depends on I2C | 125 | depends on I2C |
135 | select REGULATOR | 126 | select REGULATOR |
127 | select MFD_CORE | ||
136 | select REGULATOR_FIXED_VOLTAGE | 128 | select REGULATOR_FIXED_VOLTAGE |
137 | help | 129 | help |
138 | This option enables a driver for the TP61050/TPS61052 | 130 | This option enables a driver for the TP61050/TPS61052 |
@@ -265,11 +257,6 @@ config MFD_TMIO | |||
265 | bool | 257 | bool |
266 | default n | 258 | default n |
267 | 259 | ||
268 | config TMIO_MMC_DMA | ||
269 | bool | ||
270 | select DMA_ENGINE | ||
271 | select DMADEVICES | ||
272 | |||
273 | config MFD_T7L66XB | 260 | config MFD_T7L66XB |
274 | bool "Support Toshiba T7L66XB" | 261 | bool "Support Toshiba T7L66XB" |
275 | depends on ARM && HAVE_CLK | 262 | depends on ARM && HAVE_CLK |
@@ -591,7 +578,7 @@ config AB3550_CORE | |||
591 | config MFD_CS5535 | 578 | config MFD_CS5535 |
592 | tristate "Support for CS5535 and CS5536 southbridge core functions" | 579 | tristate "Support for CS5535 and CS5536 southbridge core functions" |
593 | select MFD_CORE | 580 | select MFD_CORE |
594 | depends on PCI | 581 | depends on PCI && X86 |
595 | ---help--- | 582 | ---help--- |
596 | This is the core driver for CS5535/CS5536 MFD functions. This is | 583 | This is the core driver for CS5535/CS5536 MFD functions. This is |
597 | necessary for using the board's GPIO and MFGPT functionality. | 584 | necessary for using the board's GPIO and MFGPT functionality. |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 47f5709f3828..419caa9d7dcf 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -6,7 +6,6 @@ | |||
6 | obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o | 6 | obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o |
7 | obj-$(CONFIG_MFD_SM501) += sm501.o | 7 | obj-$(CONFIG_MFD_SM501) += sm501.o |
8 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o | 8 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o |
9 | obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o | ||
10 | 9 | ||
11 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o | 10 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o |
12 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o | 11 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o |
@@ -63,7 +62,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o | |||
63 | obj-$(CONFIG_PMIC_DA903X) += da903x.o | 62 | obj-$(CONFIG_PMIC_DA903X) += da903x.o |
64 | max8925-objs := max8925-core.o max8925-i2c.o | 63 | max8925-objs := max8925-core.o max8925-i2c.o |
65 | obj-$(CONFIG_MFD_MAX8925) += max8925.o | 64 | obj-$(CONFIG_MFD_MAX8925) += max8925.o |
66 | obj-$(CONFIG_MFD_MAX8997) += max8997.o | 65 | obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o |
67 | obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o | 66 | obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o |
68 | 67 | ||
69 | pcf50633-objs := pcf50633-core.o pcf50633-irq.o | 68 | pcf50633-objs := pcf50633-core.o pcf50633-irq.o |
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c index c12d04285226..ff86acf3e6bd 100644 --- a/drivers/mfd/ab3550-core.c +++ b/drivers/mfd/ab3550-core.c | |||
@@ -668,7 +668,7 @@ static int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq) | |||
668 | struct ab3550_platform_data *plf_data; | 668 | struct ab3550_platform_data *plf_data; |
669 | bool val; | 669 | bool val; |
670 | 670 | ||
671 | ab = get_irq_chip_data(irq); | 671 | ab = irq_get_chip_data(irq); |
672 | plf_data = ab->i2c_client[0]->dev.platform_data; | 672 | plf_data = ab->i2c_client[0]->dev.platform_data; |
673 | irq -= plf_data->irq.base; | 673 | irq -= plf_data->irq.base; |
674 | val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0); | 674 | val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0); |
@@ -1296,14 +1296,14 @@ static int __init ab3550_probe(struct i2c_client *client, | |||
1296 | unsigned int irq; | 1296 | unsigned int irq; |
1297 | 1297 | ||
1298 | irq = ab3550_plf_data->irq.base + i; | 1298 | irq = ab3550_plf_data->irq.base + i; |
1299 | set_irq_chip_data(irq, ab); | 1299 | irq_set_chip_data(irq, ab); |
1300 | set_irq_chip_and_handler(irq, &ab3550_irq_chip, | 1300 | irq_set_chip_and_handler(irq, &ab3550_irq_chip, |
1301 | handle_simple_irq); | 1301 | handle_simple_irq); |
1302 | set_irq_nested_thread(irq, 1); | 1302 | irq_set_nested_thread(irq, 1); |
1303 | #ifdef CONFIG_ARM | 1303 | #ifdef CONFIG_ARM |
1304 | set_irq_flags(irq, IRQF_VALID); | 1304 | set_irq_flags(irq, IRQF_VALID); |
1305 | #else | 1305 | #else |
1306 | set_irq_noprobe(irq); | 1306 | irq_set_noprobe(irq); |
1307 | #endif | 1307 | #endif |
1308 | } | 1308 | } |
1309 | 1309 | ||
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 6e185b272d00..67d01c938284 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c | |||
@@ -334,14 +334,14 @@ static int ab8500_irq_init(struct ab8500 *ab8500) | |||
334 | int irq; | 334 | int irq; |
335 | 335 | ||
336 | for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { | 336 | for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { |
337 | set_irq_chip_data(irq, ab8500); | 337 | irq_set_chip_data(irq, ab8500); |
338 | set_irq_chip_and_handler(irq, &ab8500_irq_chip, | 338 | irq_set_chip_and_handler(irq, &ab8500_irq_chip, |
339 | handle_simple_irq); | 339 | handle_simple_irq); |
340 | set_irq_nested_thread(irq, 1); | 340 | irq_set_nested_thread(irq, 1); |
341 | #ifdef CONFIG_ARM | 341 | #ifdef CONFIG_ARM |
342 | set_irq_flags(irq, IRQF_VALID); | 342 | set_irq_flags(irq, IRQF_VALID); |
343 | #else | 343 | #else |
344 | set_irq_noprobe(irq); | 344 | irq_set_noprobe(irq); |
345 | #endif | 345 | #endif |
346 | } | 346 | } |
347 | 347 | ||
@@ -357,11 +357,20 @@ static void ab8500_irq_remove(struct ab8500 *ab8500) | |||
357 | #ifdef CONFIG_ARM | 357 | #ifdef CONFIG_ARM |
358 | set_irq_flags(irq, 0); | 358 | set_irq_flags(irq, 0); |
359 | #endif | 359 | #endif |
360 | set_irq_chip_and_handler(irq, NULL, NULL); | 360 | irq_set_chip_and_handler(irq, NULL, NULL); |
361 | set_irq_chip_data(irq, NULL); | 361 | irq_set_chip_data(irq, NULL); |
362 | } | 362 | } |
363 | } | 363 | } |
364 | 364 | ||
365 | static struct resource ab8500_gpio_resources[] = { | ||
366 | { | ||
367 | .name = "GPIO_INT6", | ||
368 | .start = AB8500_INT_GPIO6R, | ||
369 | .end = AB8500_INT_GPIO41F, | ||
370 | .flags = IORESOURCE_IRQ, | ||
371 | } | ||
372 | }; | ||
373 | |||
365 | static struct resource ab8500_gpadc_resources[] = { | 374 | static struct resource ab8500_gpadc_resources[] = { |
366 | { | 375 | { |
367 | .name = "HW_CONV_END", | 376 | .name = "HW_CONV_END", |
@@ -596,6 +605,11 @@ static struct mfd_cell ab8500_devs[] = { | |||
596 | .name = "ab8500-regulator", | 605 | .name = "ab8500-regulator", |
597 | }, | 606 | }, |
598 | { | 607 | { |
608 | .name = "ab8500-gpio", | ||
609 | .num_resources = ARRAY_SIZE(ab8500_gpio_resources), | ||
610 | .resources = ab8500_gpio_resources, | ||
611 | }, | ||
612 | { | ||
599 | .name = "ab8500-gpadc", | 613 | .name = "ab8500-gpadc", |
600 | .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), | 614 | .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), |
601 | .resources = ab8500_gpadc_resources, | 615 | .resources = ab8500_gpadc_resources, |
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c index 6820327adf4a..821e6b86afd2 100644 --- a/drivers/mfd/ab8500-i2c.c +++ b/drivers/mfd/ab8500-i2c.c | |||
@@ -97,7 +97,7 @@ static void __exit ab8500_i2c_exit(void) | |||
97 | { | 97 | { |
98 | platform_driver_unregister(&ab8500_i2c_driver); | 98 | platform_driver_unregister(&ab8500_i2c_driver); |
99 | } | 99 | } |
100 | subsys_initcall(ab8500_i2c_init); | 100 | arch_initcall(ab8500_i2c_init); |
101 | module_exit(ab8500_i2c_exit); | 101 | module_exit(ab8500_i2c_exit); |
102 | 102 | ||
103 | MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com"); | 103 | MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com"); |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 0241f08fc00d..d4a851c6b5bf 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -139,13 +139,12 @@ static void asic3_irq_flip_edge(struct asic3 *asic, | |||
139 | 139 | ||
140 | static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | 140 | static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) |
141 | { | 141 | { |
142 | struct asic3 *asic = irq_desc_get_handler_data(desc); | ||
143 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
142 | int iter, i; | 144 | int iter, i; |
143 | unsigned long flags; | 145 | unsigned long flags; |
144 | struct asic3 *asic; | ||
145 | |||
146 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
147 | 146 | ||
148 | asic = get_irq_data(irq); | 147 | data->chip->irq_ack(irq_data); |
149 | 148 | ||
150 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { | 149 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { |
151 | u32 status; | 150 | u32 status; |
@@ -188,8 +187,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
188 | irqnr = asic->irq_base + | 187 | irqnr = asic->irq_base + |
189 | (ASIC3_GPIOS_PER_BANK * bank) | 188 | (ASIC3_GPIOS_PER_BANK * bank) |
190 | + i; | 189 | + i; |
191 | desc = irq_to_desc(irqnr); | 190 | generic_handle_irq(irqnr); |
192 | desc->handle_irq(irqnr, desc); | ||
193 | if (asic->irq_bothedge[bank] & bit) | 191 | if (asic->irq_bothedge[bank] & bit) |
194 | asic3_irq_flip_edge(asic, base, | 192 | asic3_irq_flip_edge(asic, base, |
195 | bit); | 193 | bit); |
@@ -200,11 +198,8 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
200 | /* Handle remaining IRQs in the status register */ | 198 | /* Handle remaining IRQs in the status register */ |
201 | for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) { | 199 | for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) { |
202 | /* They start at bit 4 and go up */ | 200 | /* They start at bit 4 and go up */ |
203 | if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) { | 201 | if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) |
204 | desc = irq_to_desc(asic->irq_base + i); | 202 | generic_handle_irq(asic->irq_base + i); |
205 | desc->handle_irq(asic->irq_base + i, | ||
206 | desc); | ||
207 | } | ||
208 | } | 203 | } |
209 | } | 204 | } |
210 | 205 | ||
@@ -393,21 +388,21 @@ static int __init asic3_irq_probe(struct platform_device *pdev) | |||
393 | 388 | ||
394 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { | 389 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { |
395 | if (irq < asic->irq_base + ASIC3_NUM_GPIOS) | 390 | if (irq < asic->irq_base + ASIC3_NUM_GPIOS) |
396 | set_irq_chip(irq, &asic3_gpio_irq_chip); | 391 | irq_set_chip(irq, &asic3_gpio_irq_chip); |
397 | else | 392 | else |
398 | set_irq_chip(irq, &asic3_irq_chip); | 393 | irq_set_chip(irq, &asic3_irq_chip); |
399 | 394 | ||
400 | set_irq_chip_data(irq, asic); | 395 | irq_set_chip_data(irq, asic); |
401 | set_irq_handler(irq, handle_level_irq); | 396 | irq_set_handler(irq, handle_level_irq); |
402 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 397 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
403 | } | 398 | } |
404 | 399 | ||
405 | asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK), | 400 | asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK), |
406 | ASIC3_INTMASK_GINTMASK); | 401 | ASIC3_INTMASK_GINTMASK); |
407 | 402 | ||
408 | set_irq_chained_handler(asic->irq_nr, asic3_irq_demux); | 403 | irq_set_chained_handler(asic->irq_nr, asic3_irq_demux); |
409 | set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING); | 404 | irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING); |
410 | set_irq_data(asic->irq_nr, asic); | 405 | irq_set_handler_data(asic->irq_nr, asic); |
411 | 406 | ||
412 | return 0; | 407 | return 0; |
413 | } | 408 | } |
@@ -421,11 +416,10 @@ static void asic3_irq_remove(struct platform_device *pdev) | |||
421 | 416 | ||
422 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { | 417 | for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) { |
423 | set_irq_flags(irq, 0); | 418 | set_irq_flags(irq, 0); |
424 | set_irq_handler(irq, NULL); | 419 | irq_set_chip_and_handler(irq, NULL, NULL); |
425 | set_irq_chip(irq, NULL); | 420 | irq_set_chip_data(irq, NULL); |
426 | set_irq_chip_data(irq, NULL); | ||
427 | } | 421 | } |
428 | set_irq_chained_handler(asic->irq_nr, NULL); | 422 | irq_set_chained_handler(asic->irq_nr, NULL); |
429 | } | 423 | } |
430 | 424 | ||
431 | /* GPIOs */ | 425 | /* GPIOs */ |
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c index 886a06871065..155fa0407882 100644 --- a/drivers/mfd/cs5535-mfd.c +++ b/drivers/mfd/cs5535-mfd.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/mfd/core.h> | 27 | #include <linux/mfd/core.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <asm/olpc.h> | ||
30 | 31 | ||
31 | #define DRV_NAME "cs5535-mfd" | 32 | #define DRV_NAME "cs5535-mfd" |
32 | 33 | ||
@@ -111,6 +112,20 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = { | |||
111 | }, | 112 | }, |
112 | }; | 113 | }; |
113 | 114 | ||
115 | #ifdef CONFIG_OLPC | ||
116 | static void __devinit cs5535_clone_olpc_cells(void) | ||
117 | { | ||
118 | const char *acpi_clones[] = { "olpc-xo1-pm-acpi", "olpc-xo1-sci-acpi" }; | ||
119 | |||
120 | if (!machine_is_olpc()) | ||
121 | return; | ||
122 | |||
123 | mfd_clone_cell("cs5535-acpi", acpi_clones, ARRAY_SIZE(acpi_clones)); | ||
124 | } | ||
125 | #else | ||
126 | static void cs5535_clone_olpc_cells(void) { } | ||
127 | #endif | ||
128 | |||
114 | static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, | 129 | static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, |
115 | const struct pci_device_id *id) | 130 | const struct pci_device_id *id) |
116 | { | 131 | { |
@@ -139,6 +154,7 @@ static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, | |||
139 | dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); | 154 | dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); |
140 | goto err_disable; | 155 | goto err_disable; |
141 | } | 156 | } |
157 | cs5535_clone_olpc_cells(); | ||
142 | 158 | ||
143 | dev_info(&pdev->dev, "%zu devices registered.\n", | 159 | dev_info(&pdev->dev, "%zu devices registered.\n", |
144 | ARRAY_SIZE(cs5535_mfd_cells)); | 160 | ARRAY_SIZE(cs5535_mfd_cells)); |
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 9e2d8dd5f9e5..f2f4029e21a0 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c | |||
@@ -162,6 +162,7 @@ static void pcap_unmask_irq(struct irq_data *d) | |||
162 | 162 | ||
163 | static struct irq_chip pcap_irq_chip = { | 163 | static struct irq_chip pcap_irq_chip = { |
164 | .name = "pcap", | 164 | .name = "pcap", |
165 | .irq_disable = pcap_mask_irq, | ||
165 | .irq_mask = pcap_mask_irq, | 166 | .irq_mask = pcap_mask_irq, |
166 | .irq_unmask = pcap_unmask_irq, | 167 | .irq_unmask = pcap_unmask_irq, |
167 | }; | 168 | }; |
@@ -196,17 +197,8 @@ static void pcap_isr_work(struct work_struct *work) | |||
196 | local_irq_disable(); | 197 | local_irq_disable(); |
197 | service = isr & ~msr; | 198 | service = isr & ~msr; |
198 | for (irq = pcap->irq_base; service; service >>= 1, irq++) { | 199 | for (irq = pcap->irq_base; service; service >>= 1, irq++) { |
199 | if (service & 1) { | 200 | if (service & 1) |
200 | struct irq_desc *desc = irq_to_desc(irq); | 201 | generic_handle_irq(irq); |
201 | |||
202 | if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq)) | ||
203 | break; | ||
204 | |||
205 | if (desc->status & IRQ_DISABLED) | ||
206 | note_interrupt(irq, desc, IRQ_NONE); | ||
207 | else | ||
208 | desc->handle_irq(irq, desc); | ||
209 | } | ||
210 | } | 202 | } |
211 | local_irq_enable(); | 203 | local_irq_enable(); |
212 | ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); | 204 | ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); |
@@ -215,7 +207,7 @@ static void pcap_isr_work(struct work_struct *work) | |||
215 | 207 | ||
216 | static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) | 208 | static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) |
217 | { | 209 | { |
218 | struct pcap_chip *pcap = get_irq_data(irq); | 210 | struct pcap_chip *pcap = irq_get_handler_data(irq); |
219 | 211 | ||
220 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 212 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
221 | queue_work(pcap->workqueue, &pcap->isr_work); | 213 | queue_work(pcap->workqueue, &pcap->isr_work); |
@@ -419,7 +411,7 @@ static int __devexit ezx_pcap_remove(struct spi_device *spi) | |||
419 | 411 | ||
420 | /* cleanup irqchip */ | 412 | /* cleanup irqchip */ |
421 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) | 413 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) |
422 | set_irq_chip_and_handler(i, NULL, NULL); | 414 | irq_set_chip_and_handler(i, NULL, NULL); |
423 | 415 | ||
424 | destroy_workqueue(pcap->workqueue); | 416 | destroy_workqueue(pcap->workqueue); |
425 | 417 | ||
@@ -476,12 +468,12 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi) | |||
476 | 468 | ||
477 | /* setup irq chip */ | 469 | /* setup irq chip */ |
478 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { | 470 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { |
479 | set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); | 471 | irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); |
480 | set_irq_chip_data(i, pcap); | 472 | irq_set_chip_data(i, pcap); |
481 | #ifdef CONFIG_ARM | 473 | #ifdef CONFIG_ARM |
482 | set_irq_flags(i, IRQF_VALID); | 474 | set_irq_flags(i, IRQF_VALID); |
483 | #else | 475 | #else |
484 | set_irq_noprobe(i); | 476 | irq_set_noprobe(i); |
485 | #endif | 477 | #endif |
486 | } | 478 | } |
487 | 479 | ||
@@ -490,10 +482,10 @@ static int __devinit ezx_pcap_probe(struct spi_device *spi) | |||
490 | ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); | 482 | ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); |
491 | pcap->msr = PCAP_MASK_ALL_INTERRUPT; | 483 | pcap->msr = PCAP_MASK_ALL_INTERRUPT; |
492 | 484 | ||
493 | set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); | 485 | irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); |
494 | set_irq_data(spi->irq, pcap); | 486 | irq_set_handler_data(spi->irq, pcap); |
495 | set_irq_chained_handler(spi->irq, pcap_irq_handler); | 487 | irq_set_chained_handler(spi->irq, pcap_irq_handler); |
496 | set_irq_wake(spi->irq, 1); | 488 | irq_set_irq_wake(spi->irq, 1); |
497 | 489 | ||
498 | /* ADC */ | 490 | /* ADC */ |
499 | adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? | 491 | adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? |
@@ -522,7 +514,7 @@ remove_subdevs: | |||
522 | free_irq(adc_irq, pcap); | 514 | free_irq(adc_irq, pcap); |
523 | free_irqchip: | 515 | free_irqchip: |
524 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) | 516 | for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) |
525 | set_irq_chip_and_handler(i, NULL, NULL); | 517 | irq_set_chip_and_handler(i, NULL, NULL); |
526 | /* destroy_workqueue: */ | 518 | /* destroy_workqueue: */ |
527 | destroy_workqueue(pcap->workqueue); | 519 | destroy_workqueue(pcap->workqueue); |
528 | free_pcap: | 520 | free_pcap: |
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c index d00b6d1a69e5..bbaec0ccba8f 100644 --- a/drivers/mfd/htc-egpio.c +++ b/drivers/mfd/htc-egpio.c | |||
@@ -100,7 +100,7 @@ static struct irq_chip egpio_muxed_chip = { | |||
100 | 100 | ||
101 | static void egpio_handler(unsigned int irq, struct irq_desc *desc) | 101 | static void egpio_handler(unsigned int irq, struct irq_desc *desc) |
102 | { | 102 | { |
103 | struct egpio_info *ei = get_irq_data(irq); | 103 | struct egpio_info *ei = irq_desc_get_handler_data(desc); |
104 | int irqpin; | 104 | int irqpin; |
105 | 105 | ||
106 | /* Read current pins. */ | 106 | /* Read current pins. */ |
@@ -113,9 +113,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc) | |||
113 | for_each_set_bit(irqpin, &readval, ei->nirqs) { | 113 | for_each_set_bit(irqpin, &readval, ei->nirqs) { |
114 | /* Run irq handler */ | 114 | /* Run irq handler */ |
115 | pr_debug("got IRQ %d\n", irqpin); | 115 | pr_debug("got IRQ %d\n", irqpin); |
116 | irq = ei->irq_start + irqpin; | 116 | generic_handle_irq(ei->irq_start + irqpin); |
117 | desc = irq_to_desc(irq); | ||
118 | desc->handle_irq(irq, desc); | ||
119 | } | 117 | } |
120 | } | 118 | } |
121 | 119 | ||
@@ -346,14 +344,14 @@ static int __init egpio_probe(struct platform_device *pdev) | |||
346 | ei->ack_write = 0; | 344 | ei->ack_write = 0; |
347 | irq_end = ei->irq_start + ei->nirqs; | 345 | irq_end = ei->irq_start + ei->nirqs; |
348 | for (irq = ei->irq_start; irq < irq_end; irq++) { | 346 | for (irq = ei->irq_start; irq < irq_end; irq++) { |
349 | set_irq_chip(irq, &egpio_muxed_chip); | 347 | irq_set_chip_and_handler(irq, &egpio_muxed_chip, |
350 | set_irq_chip_data(irq, ei); | 348 | handle_simple_irq); |
351 | set_irq_handler(irq, handle_simple_irq); | 349 | irq_set_chip_data(irq, ei); |
352 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 350 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
353 | } | 351 | } |
354 | set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING); | 352 | irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING); |
355 | set_irq_data(ei->chained_irq, ei); | 353 | irq_set_handler_data(ei->chained_irq, ei); |
356 | set_irq_chained_handler(ei->chained_irq, egpio_handler); | 354 | irq_set_chained_handler(ei->chained_irq, egpio_handler); |
357 | ack_irqs(ei); | 355 | ack_irqs(ei); |
358 | 356 | ||
359 | device_init_wakeup(&pdev->dev, 1); | 357 | device_init_wakeup(&pdev->dev, 1); |
@@ -375,11 +373,10 @@ static int __exit egpio_remove(struct platform_device *pdev) | |||
375 | if (ei->chained_irq) { | 373 | if (ei->chained_irq) { |
376 | irq_end = ei->irq_start + ei->nirqs; | 374 | irq_end = ei->irq_start + ei->nirqs; |
377 | for (irq = ei->irq_start; irq < irq_end; irq++) { | 375 | for (irq = ei->irq_start; irq < irq_end; irq++) { |
378 | set_irq_chip(irq, NULL); | 376 | irq_set_chip_and_handler(irq, NULL, NULL); |
379 | set_irq_handler(irq, NULL); | ||
380 | set_irq_flags(irq, 0); | 377 | set_irq_flags(irq, 0); |
381 | } | 378 | } |
382 | set_irq_chained_handler(ei->chained_irq, NULL); | 379 | irq_set_chained_handler(ei->chained_irq, NULL); |
383 | device_init_wakeup(&pdev->dev, 0); | 380 | device_init_wakeup(&pdev->dev, 0); |
384 | } | 381 | } |
385 | iounmap(ei->base_addr); | 382 | iounmap(ei->base_addr); |
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c index 296ad1562f69..d55065cc324c 100644 --- a/drivers/mfd/htc-i2cpld.c +++ b/drivers/mfd/htc-i2cpld.c | |||
@@ -58,6 +58,7 @@ struct htcpld_chip { | |||
58 | uint irq_start; | 58 | uint irq_start; |
59 | int nirqs; | 59 | int nirqs; |
60 | 60 | ||
61 | unsigned int flow_type; | ||
61 | /* | 62 | /* |
62 | * Work structure to allow for setting values outside of any | 63 | * Work structure to allow for setting values outside of any |
63 | * possible interrupt context | 64 | * possible interrupt context |
@@ -97,12 +98,7 @@ static void htcpld_unmask(struct irq_data *data) | |||
97 | 98 | ||
98 | static int htcpld_set_type(struct irq_data *data, unsigned int flags) | 99 | static int htcpld_set_type(struct irq_data *data, unsigned int flags) |
99 | { | 100 | { |
100 | struct irq_desc *d = irq_to_desc(data->irq); | 101 | struct htcpld_chip *chip = irq_data_get_irq_chip_data(data); |
101 | |||
102 | if (!d) { | ||
103 | pr_err("HTCPLD invalid IRQ: %d\n", data->irq); | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | 102 | ||
107 | if (flags & ~IRQ_TYPE_SENSE_MASK) | 103 | if (flags & ~IRQ_TYPE_SENSE_MASK) |
108 | return -EINVAL; | 104 | return -EINVAL; |
@@ -111,9 +107,7 @@ static int htcpld_set_type(struct irq_data *data, unsigned int flags) | |||
111 | if (flags & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)) | 107 | if (flags & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)) |
112 | return -EINVAL; | 108 | return -EINVAL; |
113 | 109 | ||
114 | d->status &= ~IRQ_TYPE_SENSE_MASK; | 110 | chip->flow_type = flags; |
115 | d->status |= flags; | ||
116 | |||
117 | return 0; | 111 | return 0; |
118 | } | 112 | } |
119 | 113 | ||
@@ -135,7 +129,6 @@ static irqreturn_t htcpld_handler(int irq, void *dev) | |||
135 | unsigned int i; | 129 | unsigned int i; |
136 | unsigned long flags; | 130 | unsigned long flags; |
137 | int irqpin; | 131 | int irqpin; |
138 | struct irq_desc *desc; | ||
139 | 132 | ||
140 | if (!htcpld) { | 133 | if (!htcpld) { |
141 | pr_debug("htcpld is null in ISR\n"); | 134 | pr_debug("htcpld is null in ISR\n"); |
@@ -195,23 +188,19 @@ static irqreturn_t htcpld_handler(int irq, void *dev) | |||
195 | * associated interrupts. | 188 | * associated interrupts. |
196 | */ | 189 | */ |
197 | for (irqpin = 0; irqpin < chip->nirqs; irqpin++) { | 190 | for (irqpin = 0; irqpin < chip->nirqs; irqpin++) { |
198 | unsigned oldb, newb; | 191 | unsigned oldb, newb, type = chip->flow_type; |
199 | int flags; | ||
200 | 192 | ||
201 | irq = chip->irq_start + irqpin; | 193 | irq = chip->irq_start + irqpin; |
202 | desc = irq_to_desc(irq); | ||
203 | flags = desc->status; | ||
204 | 194 | ||
205 | /* Run the IRQ handler, but only if the bit value | 195 | /* Run the IRQ handler, but only if the bit value |
206 | * changed, and the proper flags are set */ | 196 | * changed, and the proper flags are set */ |
207 | oldb = (old_val >> irqpin) & 1; | 197 | oldb = (old_val >> irqpin) & 1; |
208 | newb = (uval >> irqpin) & 1; | 198 | newb = (uval >> irqpin) & 1; |
209 | 199 | ||
210 | if ((!oldb && newb && (flags & IRQ_TYPE_EDGE_RISING)) || | 200 | if ((!oldb && newb && (type & IRQ_TYPE_EDGE_RISING)) || |
211 | (oldb && !newb && | 201 | (oldb && !newb && (type & IRQ_TYPE_EDGE_FALLING))) { |
212 | (flags & IRQ_TYPE_EDGE_FALLING))) { | ||
213 | pr_debug("fire IRQ %d\n", irqpin); | 202 | pr_debug("fire IRQ %d\n", irqpin); |
214 | desc->handle_irq(irq, desc); | 203 | generic_handle_irq(irq); |
215 | } | 204 | } |
216 | } | 205 | } |
217 | } | 206 | } |
@@ -359,13 +348,13 @@ static int __devinit htcpld_setup_chip_irq( | |||
359 | /* Setup irq handlers */ | 348 | /* Setup irq handlers */ |
360 | irq_end = chip->irq_start + chip->nirqs; | 349 | irq_end = chip->irq_start + chip->nirqs; |
361 | for (irq = chip->irq_start; irq < irq_end; irq++) { | 350 | for (irq = chip->irq_start; irq < irq_end; irq++) { |
362 | set_irq_chip(irq, &htcpld_muxed_chip); | 351 | irq_set_chip_and_handler(irq, &htcpld_muxed_chip, |
363 | set_irq_chip_data(irq, chip); | 352 | handle_simple_irq); |
364 | set_irq_handler(irq, handle_simple_irq); | 353 | irq_set_chip_data(irq, chip); |
365 | #ifdef CONFIG_ARM | 354 | #ifdef CONFIG_ARM |
366 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 355 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
367 | #else | 356 | #else |
368 | set_irq_probe(irq); | 357 | irq_set_probe(irq); |
369 | #endif | 358 | #endif |
370 | } | 359 | } |
371 | 360 | ||
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index aa518b9beaf5..a0bd0cf05af3 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -112,7 +112,7 @@ static struct irq_chip jz4740_adc_irq_chip = { | |||
112 | 112 | ||
113 | static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) | 113 | static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) |
114 | { | 114 | { |
115 | struct jz4740_adc *adc = get_irq_desc_data(desc); | 115 | struct jz4740_adc *adc = irq_desc_get_handler_data(desc); |
116 | uint8_t status; | 116 | uint8_t status; |
117 | unsigned int i; | 117 | unsigned int i; |
118 | 118 | ||
@@ -310,13 +310,13 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) | |||
310 | platform_set_drvdata(pdev, adc); | 310 | platform_set_drvdata(pdev, adc); |
311 | 311 | ||
312 | for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) { | 312 | for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) { |
313 | set_irq_chip_data(irq, adc); | 313 | irq_set_chip_data(irq, adc); |
314 | set_irq_chip_and_handler(irq, &jz4740_adc_irq_chip, | 314 | irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip, |
315 | handle_level_irq); | 315 | handle_level_irq); |
316 | } | 316 | } |
317 | 317 | ||
318 | set_irq_data(adc->irq, adc); | 318 | irq_set_handler_data(adc->irq, adc); |
319 | set_irq_chained_handler(adc->irq, jz4740_adc_irq_demux); | 319 | irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux); |
320 | 320 | ||
321 | writeb(0x00, adc->base + JZ_REG_ADC_ENABLE); | 321 | writeb(0x00, adc->base + JZ_REG_ADC_ENABLE); |
322 | writeb(0xff, adc->base + JZ_REG_ADC_CTRL); | 322 | writeb(0xff, adc->base + JZ_REG_ADC_CTRL); |
@@ -347,8 +347,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev) | |||
347 | 347 | ||
348 | mfd_remove_devices(&pdev->dev); | 348 | mfd_remove_devices(&pdev->dev); |
349 | 349 | ||
350 | set_irq_data(adc->irq, NULL); | 350 | irq_set_handler_data(adc->irq, NULL); |
351 | set_irq_chained_handler(adc->irq, NULL); | 351 | irq_set_chained_handler(adc->irq, NULL); |
352 | 352 | ||
353 | iounmap(adc->base); | 353 | iounmap(adc->base); |
354 | release_mem_region(adc->mem->start, resource_size(adc->mem)); | 354 | release_mem_region(adc->mem->start, resource_size(adc->mem)); |
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 0e998dc4e7d8..58cc5fdde016 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c | |||
@@ -517,7 +517,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, | |||
517 | struct max8925_platform_data *pdata) | 517 | struct max8925_platform_data *pdata) |
518 | { | 518 | { |
519 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; | 519 | unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; |
520 | struct irq_desc *desc; | ||
521 | int i, ret; | 520 | int i, ret; |
522 | int __irq; | 521 | int __irq; |
523 | 522 | ||
@@ -544,19 +543,18 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, | |||
544 | mutex_init(&chip->irq_lock); | 543 | mutex_init(&chip->irq_lock); |
545 | chip->core_irq = irq; | 544 | chip->core_irq = irq; |
546 | chip->irq_base = pdata->irq_base; | 545 | chip->irq_base = pdata->irq_base; |
547 | desc = irq_to_desc(chip->core_irq); | ||
548 | 546 | ||
549 | /* register with genirq */ | 547 | /* register with genirq */ |
550 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { | 548 | for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { |
551 | __irq = i + chip->irq_base; | 549 | __irq = i + chip->irq_base; |
552 | set_irq_chip_data(__irq, chip); | 550 | irq_set_chip_data(__irq, chip); |
553 | set_irq_chip_and_handler(__irq, &max8925_irq_chip, | 551 | irq_set_chip_and_handler(__irq, &max8925_irq_chip, |
554 | handle_edge_irq); | 552 | handle_edge_irq); |
555 | set_irq_nested_thread(__irq, 1); | 553 | irq_set_nested_thread(__irq, 1); |
556 | #ifdef CONFIG_ARM | 554 | #ifdef CONFIG_ARM |
557 | set_irq_flags(__irq, IRQF_VALID); | 555 | set_irq_flags(__irq, IRQF_VALID); |
558 | #else | 556 | #else |
559 | set_irq_noprobe(__irq); | 557 | irq_set_noprobe(__irq); |
560 | #endif | 558 | #endif |
561 | } | 559 | } |
562 | if (!irq) { | 560 | if (!irq) { |
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c new file mode 100644 index 000000000000..638bf7e4d3b3 --- /dev/null +++ b/drivers/mfd/max8997-irq.c | |||
@@ -0,0 +1,377 @@ | |||
1 | /* | ||
2 | * max8997-irq.c - Interrupt controller support for MAX8997 | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co.Ltd | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | * This driver is based on max8998-irq.c | ||
22 | */ | ||
23 | |||
24 | #include <linux/err.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/mfd/max8997.h> | ||
28 | #include <linux/mfd/max8997-private.h> | ||
29 | |||
30 | static const u8 max8997_mask_reg[] = { | ||
31 | [PMIC_INT1] = MAX8997_REG_INT1MSK, | ||
32 | [PMIC_INT2] = MAX8997_REG_INT2MSK, | ||
33 | [PMIC_INT3] = MAX8997_REG_INT3MSK, | ||
34 | [PMIC_INT4] = MAX8997_REG_INT4MSK, | ||
35 | [FUEL_GAUGE] = MAX8997_REG_INVALID, | ||
36 | [MUIC_INT1] = MAX8997_MUIC_REG_INTMASK1, | ||
37 | [MUIC_INT2] = MAX8997_MUIC_REG_INTMASK2, | ||
38 | [MUIC_INT3] = MAX8997_MUIC_REG_INTMASK3, | ||
39 | [GPIO_LOW] = MAX8997_REG_INVALID, | ||
40 | [GPIO_HI] = MAX8997_REG_INVALID, | ||
41 | [FLASH_STATUS] = MAX8997_REG_INVALID, | ||
42 | }; | ||
43 | |||
44 | static struct i2c_client *get_i2c(struct max8997_dev *max8997, | ||
45 | enum max8997_irq_source src) | ||
46 | { | ||
47 | switch (src) { | ||
48 | case PMIC_INT1 ... PMIC_INT4: | ||
49 | return max8997->i2c; | ||
50 | case FUEL_GAUGE: | ||
51 | return NULL; | ||
52 | case MUIC_INT1 ... MUIC_INT3: | ||
53 | return max8997->muic; | ||
54 | case GPIO_LOW ... GPIO_HI: | ||
55 | return max8997->i2c; | ||
56 | case FLASH_STATUS: | ||
57 | return max8997->i2c; | ||
58 | default: | ||
59 | return ERR_PTR(-EINVAL); | ||
60 | } | ||
61 | |||
62 | return ERR_PTR(-EINVAL); | ||
63 | } | ||
64 | |||
65 | struct max8997_irq_data { | ||
66 | int mask; | ||
67 | enum max8997_irq_source group; | ||
68 | }; | ||
69 | |||
70 | #define DECLARE_IRQ(idx, _group, _mask) \ | ||
71 | [(idx)] = { .group = (_group), .mask = (_mask) } | ||
72 | static const struct max8997_irq_data max8997_irqs[] = { | ||
73 | DECLARE_IRQ(MAX8997_PMICIRQ_PWRONR, PMIC_INT1, 1 << 0), | ||
74 | DECLARE_IRQ(MAX8997_PMICIRQ_PWRONF, PMIC_INT1, 1 << 1), | ||
75 | DECLARE_IRQ(MAX8997_PMICIRQ_PWRON1SEC, PMIC_INT1, 1 << 3), | ||
76 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGONR, PMIC_INT1, 1 << 4), | ||
77 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGONF, PMIC_INT1, 1 << 5), | ||
78 | DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT2, PMIC_INT1, 1 << 6), | ||
79 | DECLARE_IRQ(MAX8997_PMICIRQ_LOWBAT1, PMIC_INT1, 1 << 7), | ||
80 | |||
81 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGR, PMIC_INT2, 1 << 0), | ||
82 | DECLARE_IRQ(MAX8997_PMICIRQ_JIGF, PMIC_INT2, 1 << 1), | ||
83 | DECLARE_IRQ(MAX8997_PMICIRQ_MR, PMIC_INT2, 1 << 2), | ||
84 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS1OK, PMIC_INT2, 1 << 3), | ||
85 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS2OK, PMIC_INT2, 1 << 4), | ||
86 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS3OK, PMIC_INT2, 1 << 5), | ||
87 | DECLARE_IRQ(MAX8997_PMICIRQ_DVS4OK, PMIC_INT2, 1 << 6), | ||
88 | |||
89 | DECLARE_IRQ(MAX8997_PMICIRQ_CHGINS, PMIC_INT3, 1 << 0), | ||
90 | DECLARE_IRQ(MAX8997_PMICIRQ_CHGRM, PMIC_INT3, 1 << 1), | ||
91 | DECLARE_IRQ(MAX8997_PMICIRQ_DCINOVP, PMIC_INT3, 1 << 2), | ||
92 | DECLARE_IRQ(MAX8997_PMICIRQ_TOPOFFR, PMIC_INT3, 1 << 3), | ||
93 | DECLARE_IRQ(MAX8997_PMICIRQ_CHGRSTF, PMIC_INT3, 1 << 5), | ||
94 | DECLARE_IRQ(MAX8997_PMICIRQ_MBCHGTMEXPD, PMIC_INT3, 1 << 7), | ||
95 | |||
96 | DECLARE_IRQ(MAX8997_PMICIRQ_RTC60S, PMIC_INT4, 1 << 0), | ||
97 | DECLARE_IRQ(MAX8997_PMICIRQ_RTCA1, PMIC_INT4, 1 << 1), | ||
98 | DECLARE_IRQ(MAX8997_PMICIRQ_RTCA2, PMIC_INT4, 1 << 2), | ||
99 | DECLARE_IRQ(MAX8997_PMICIRQ_SMPL_INT, PMIC_INT4, 1 << 3), | ||
100 | DECLARE_IRQ(MAX8997_PMICIRQ_RTC1S, PMIC_INT4, 1 << 4), | ||
101 | DECLARE_IRQ(MAX8997_PMICIRQ_WTSR, PMIC_INT4, 1 << 5), | ||
102 | |||
103 | DECLARE_IRQ(MAX8997_MUICIRQ_ADCError, MUIC_INT1, 1 << 2), | ||
104 | DECLARE_IRQ(MAX8997_MUICIRQ_ADCLow, MUIC_INT1, 1 << 1), | ||
105 | DECLARE_IRQ(MAX8997_MUICIRQ_ADC, MUIC_INT1, 1 << 0), | ||
106 | |||
107 | DECLARE_IRQ(MAX8997_MUICIRQ_VBVolt, MUIC_INT2, 1 << 4), | ||
108 | DECLARE_IRQ(MAX8997_MUICIRQ_DBChg, MUIC_INT2, 1 << 3), | ||
109 | DECLARE_IRQ(MAX8997_MUICIRQ_DCDTmr, MUIC_INT2, 1 << 2), | ||
110 | DECLARE_IRQ(MAX8997_MUICIRQ_ChgDetRun, MUIC_INT2, 1 << 1), | ||
111 | DECLARE_IRQ(MAX8997_MUICIRQ_ChgTyp, MUIC_INT2, 1 << 0), | ||
112 | |||
113 | DECLARE_IRQ(MAX8997_MUICIRQ_OVP, MUIC_INT3, 1 << 2), | ||
114 | }; | ||
115 | |||
116 | static void max8997_irq_lock(struct irq_data *data) | ||
117 | { | ||
118 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
119 | |||
120 | mutex_lock(&max8997->irqlock); | ||
121 | } | ||
122 | |||
123 | static void max8997_irq_sync_unlock(struct irq_data *data) | ||
124 | { | ||
125 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
126 | int i; | ||
127 | |||
128 | for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { | ||
129 | u8 mask_reg = max8997_mask_reg[i]; | ||
130 | struct i2c_client *i2c = get_i2c(max8997, i); | ||
131 | |||
132 | if (mask_reg == MAX8997_REG_INVALID || | ||
133 | IS_ERR_OR_NULL(i2c)) | ||
134 | continue; | ||
135 | max8997->irq_masks_cache[i] = max8997->irq_masks_cur[i]; | ||
136 | |||
137 | max8997_write_reg(i2c, max8997_mask_reg[i], | ||
138 | max8997->irq_masks_cur[i]); | ||
139 | } | ||
140 | |||
141 | mutex_unlock(&max8997->irqlock); | ||
142 | } | ||
143 | |||
144 | static const inline struct max8997_irq_data * | ||
145 | irq_to_max8997_irq(struct max8997_dev *max8997, int irq) | ||
146 | { | ||
147 | return &max8997_irqs[irq - max8997->irq_base]; | ||
148 | } | ||
149 | |||
150 | static void max8997_irq_mask(struct irq_data *data) | ||
151 | { | ||
152 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
153 | const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, | ||
154 | data->irq); | ||
155 | |||
156 | max8997->irq_masks_cur[irq_data->group] |= irq_data->mask; | ||
157 | } | ||
158 | |||
159 | static void max8997_irq_unmask(struct irq_data *data) | ||
160 | { | ||
161 | struct max8997_dev *max8997 = irq_get_chip_data(data->irq); | ||
162 | const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, | ||
163 | data->irq); | ||
164 | |||
165 | max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask; | ||
166 | } | ||
167 | |||
168 | static struct irq_chip max8997_irq_chip = { | ||
169 | .name = "max8997", | ||
170 | .irq_bus_lock = max8997_irq_lock, | ||
171 | .irq_bus_sync_unlock = max8997_irq_sync_unlock, | ||
172 | .irq_mask = max8997_irq_mask, | ||
173 | .irq_unmask = max8997_irq_unmask, | ||
174 | }; | ||
175 | |||
176 | #define MAX8997_IRQSRC_PMIC (1 << 1) | ||
177 | #define MAX8997_IRQSRC_FUELGAUGE (1 << 2) | ||
178 | #define MAX8997_IRQSRC_MUIC (1 << 3) | ||
179 | #define MAX8997_IRQSRC_GPIO (1 << 4) | ||
180 | #define MAX8997_IRQSRC_FLASH (1 << 5) | ||
181 | static irqreturn_t max8997_irq_thread(int irq, void *data) | ||
182 | { | ||
183 | struct max8997_dev *max8997 = data; | ||
184 | u8 irq_reg[MAX8997_IRQ_GROUP_NR] = {}; | ||
185 | u8 irq_src; | ||
186 | int ret; | ||
187 | int i; | ||
188 | |||
189 | ret = max8997_read_reg(max8997->i2c, MAX8997_REG_INTSRC, &irq_src); | ||
190 | if (ret < 0) { | ||
191 | dev_err(max8997->dev, "Failed to read interrupt source: %d\n", | ||
192 | ret); | ||
193 | return IRQ_NONE; | ||
194 | } | ||
195 | |||
196 | if (irq_src & MAX8997_IRQSRC_PMIC) { | ||
197 | /* PMIC INT1 ~ INT4 */ | ||
198 | max8997_bulk_read(max8997->i2c, MAX8997_REG_INT1, 4, | ||
199 | &irq_reg[PMIC_INT1]); | ||
200 | } | ||
201 | if (irq_src & MAX8997_IRQSRC_FUELGAUGE) { | ||
202 | /* | ||
203 | * TODO: FUEL GAUGE | ||
204 | * | ||
205 | * This is to be supported by Max17042 driver. When | ||
206 | * an interrupt incurs here, it should be relayed to a | ||
207 | * Max17042 device that is connected (probably by | ||
208 | * platform-data). However, we do not have interrupt | ||
209 | * handling in Max17042 driver currently. The Max17042 IRQ | ||
210 | * driver should be ready to be used as a stand-alone device and | ||
211 | * a Max8997-dependent device. Because it is not ready in | ||
212 | * Max17042-side and it is not too critical in operating | ||
213 | * Max8997, we do not implement this in initial releases. | ||
214 | */ | ||
215 | irq_reg[FUEL_GAUGE] = 0; | ||
216 | } | ||
217 | if (irq_src & MAX8997_IRQSRC_MUIC) { | ||
218 | /* MUIC INT1 ~ INT3 */ | ||
219 | max8997_bulk_read(max8997->muic, MAX8997_MUIC_REG_INT1, 3, | ||
220 | &irq_reg[MUIC_INT1]); | ||
221 | } | ||
222 | if (irq_src & MAX8997_IRQSRC_GPIO) { | ||
223 | /* GPIO Interrupt */ | ||
224 | u8 gpio_info[MAX8997_NUM_GPIO]; | ||
225 | |||
226 | irq_reg[GPIO_LOW] = 0; | ||
227 | irq_reg[GPIO_HI] = 0; | ||
228 | |||
229 | max8997_bulk_read(max8997->i2c, MAX8997_REG_GPIOCNTL1, | ||
230 | MAX8997_NUM_GPIO, gpio_info); | ||
231 | for (i = 0; i < MAX8997_NUM_GPIO; i++) { | ||
232 | bool interrupt = false; | ||
233 | |||
234 | switch (gpio_info[i] & MAX8997_GPIO_INT_MASK) { | ||
235 | case MAX8997_GPIO_INT_BOTH: | ||
236 | if (max8997->gpio_status[i] != gpio_info[i]) | ||
237 | interrupt = true; | ||
238 | break; | ||
239 | case MAX8997_GPIO_INT_RISE: | ||
240 | if ((max8997->gpio_status[i] != gpio_info[i]) && | ||
241 | (gpio_info[i] & MAX8997_GPIO_DATA_MASK)) | ||
242 | interrupt = true; | ||
243 | break; | ||
244 | case MAX8997_GPIO_INT_FALL: | ||
245 | if ((max8997->gpio_status[i] != gpio_info[i]) && | ||
246 | !(gpio_info[i] & MAX8997_GPIO_DATA_MASK)) | ||
247 | interrupt = true; | ||
248 | break; | ||
249 | default: | ||
250 | break; | ||
251 | } | ||
252 | |||
253 | if (interrupt) { | ||
254 | if (i < 8) | ||
255 | irq_reg[GPIO_LOW] |= (1 << i); | ||
256 | else | ||
257 | irq_reg[GPIO_HI] |= (1 << (i - 8)); | ||
258 | } | ||
259 | |||
260 | } | ||
261 | } | ||
262 | if (irq_src & MAX8997_IRQSRC_FLASH) { | ||
263 | /* Flash Status Interrupt */ | ||
264 | ret = max8997_read_reg(max8997->i2c, MAX8997_REG_FLASHSTATUS, | ||
265 | &irq_reg[FLASH_STATUS]); | ||
266 | } | ||
267 | |||
268 | /* Apply masking */ | ||
269 | for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) | ||
270 | irq_reg[i] &= ~max8997->irq_masks_cur[i]; | ||
271 | |||
272 | /* Report */ | ||
273 | for (i = 0; i < MAX8997_IRQ_NR; i++) { | ||
274 | if (irq_reg[max8997_irqs[i].group] & max8997_irqs[i].mask) | ||
275 | handle_nested_irq(max8997->irq_base + i); | ||
276 | } | ||
277 | |||
278 | return IRQ_HANDLED; | ||
279 | } | ||
280 | |||
281 | int max8997_irq_resume(struct max8997_dev *max8997) | ||
282 | { | ||
283 | if (max8997->irq && max8997->irq_base) | ||
284 | max8997_irq_thread(max8997->irq_base, max8997); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | int max8997_irq_init(struct max8997_dev *max8997) | ||
289 | { | ||
290 | int i; | ||
291 | int cur_irq; | ||
292 | int ret; | ||
293 | u8 val; | ||
294 | |||
295 | if (!max8997->irq) { | ||
296 | dev_warn(max8997->dev, "No interrupt specified.\n"); | ||
297 | max8997->irq_base = 0; | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | if (!max8997->irq_base) { | ||
302 | dev_err(max8997->dev, "No interrupt base specified.\n"); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | mutex_init(&max8997->irqlock); | ||
307 | |||
308 | /* Mask individual interrupt sources */ | ||
309 | for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { | ||
310 | struct i2c_client *i2c; | ||
311 | |||
312 | max8997->irq_masks_cur[i] = 0xff; | ||
313 | max8997->irq_masks_cache[i] = 0xff; | ||
314 | i2c = get_i2c(max8997, i); | ||
315 | |||
316 | if (IS_ERR_OR_NULL(i2c)) | ||
317 | continue; | ||
318 | if (max8997_mask_reg[i] == MAX8997_REG_INVALID) | ||
319 | continue; | ||
320 | |||
321 | max8997_write_reg(i2c, max8997_mask_reg[i], 0xff); | ||
322 | } | ||
323 | |||
324 | for (i = 0; i < MAX8997_NUM_GPIO; i++) { | ||
325 | max8997->gpio_status[i] = (max8997_read_reg(max8997->i2c, | ||
326 | MAX8997_REG_GPIOCNTL1 + i, | ||
327 | &val) | ||
328 | & MAX8997_GPIO_DATA_MASK) ? | ||
329 | true : false; | ||
330 | } | ||
331 | |||
332 | /* Register with genirq */ | ||
333 | for (i = 0; i < MAX8997_IRQ_NR; i++) { | ||
334 | cur_irq = i + max8997->irq_base; | ||
335 | irq_set_chip_data(cur_irq, max8997); | ||
336 | irq_set_chip_and_handler(cur_irq, &max8997_irq_chip, | ||
337 | handle_edge_irq); | ||
338 | irq_set_nested_thread(cur_irq, 1); | ||
339 | #ifdef CONFIG_ARM | ||
340 | set_irq_flags(cur_irq, IRQF_VALID); | ||
341 | #else | ||
342 | irq_set_noprobe(cur_irq); | ||
343 | #endif | ||
344 | } | ||
345 | |||
346 | ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread, | ||
347 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | ||
348 | "max8997-irq", max8997); | ||
349 | |||
350 | if (ret) { | ||
351 | dev_err(max8997->dev, "Failed to request IRQ %d: %d\n", | ||
352 | max8997->irq, ret); | ||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | if (!max8997->ono) | ||
357 | return 0; | ||
358 | |||
359 | ret = request_threaded_irq(max8997->ono, NULL, max8997_irq_thread, | ||
360 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | | ||
361 | IRQF_ONESHOT, "max8997-ono", max8997); | ||
362 | |||
363 | if (ret) | ||
364 | dev_err(max8997->dev, "Failed to request ono-IRQ %d: %d\n", | ||
365 | max8997->ono, ret); | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | void max8997_irq_exit(struct max8997_dev *max8997) | ||
371 | { | ||
372 | if (max8997->ono) | ||
373 | free_irq(max8997->ono, max8997); | ||
374 | |||
375 | if (max8997->irq) | ||
376 | free_irq(max8997->irq, max8997); | ||
377 | } | ||
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c index 3903e1fbb334..5919710dc9ed 100644 --- a/drivers/mfd/max8998-irq.c +++ b/drivers/mfd/max8998-irq.c | |||
@@ -224,14 +224,14 @@ int max8998_irq_init(struct max8998_dev *max8998) | |||
224 | /* register with genirq */ | 224 | /* register with genirq */ |
225 | for (i = 0; i < MAX8998_IRQ_NR; i++) { | 225 | for (i = 0; i < MAX8998_IRQ_NR; i++) { |
226 | cur_irq = i + max8998->irq_base; | 226 | cur_irq = i + max8998->irq_base; |
227 | set_irq_chip_data(cur_irq, max8998); | 227 | irq_set_chip_data(cur_irq, max8998); |
228 | set_irq_chip_and_handler(cur_irq, &max8998_irq_chip, | 228 | irq_set_chip_and_handler(cur_irq, &max8998_irq_chip, |
229 | handle_edge_irq); | 229 | handle_edge_irq); |
230 | set_irq_nested_thread(cur_irq, 1); | 230 | irq_set_nested_thread(cur_irq, 1); |
231 | #ifdef CONFIG_ARM | 231 | #ifdef CONFIG_ARM |
232 | set_irq_flags(cur_irq, IRQF_VALID); | 232 | set_irq_flags(cur_irq, IRQF_VALID); |
233 | #else | 233 | #else |
234 | set_irq_noprobe(cur_irq); | 234 | irq_set_noprobe(cur_irq); |
235 | #endif | 235 | #endif |
236 | } | 236 | } |
237 | 237 | ||
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c index c00214257da2..9ec7570f5b81 100644 --- a/drivers/mfd/max8998.c +++ b/drivers/mfd/max8998.c | |||
@@ -209,7 +209,7 @@ static int max8998_suspend(struct device *dev) | |||
209 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); | 209 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); |
210 | 210 | ||
211 | if (max8998->wakeup) | 211 | if (max8998->wakeup) |
212 | set_irq_wake(max8998->irq, 1); | 212 | irq_set_irq_wake(max8998->irq, 1); |
213 | return 0; | 213 | return 0; |
214 | } | 214 | } |
215 | 215 | ||
@@ -219,7 +219,7 @@ static int max8998_resume(struct device *dev) | |||
219 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); | 219 | struct max8998_dev *max8998 = i2c_get_clientdata(i2c); |
220 | 220 | ||
221 | if (max8998->wakeup) | 221 | if (max8998->wakeup) |
222 | set_irq_wake(max8998->irq, 0); | 222 | irq_set_irq_wake(max8998->irq, 0); |
223 | /* | 223 | /* |
224 | * In LP3974, if IRQ registers are not "read & clear" | 224 | * In LP3974, if IRQ registers are not "read & clear" |
225 | * when it's set during sleep, the interrupt becomes | 225 | * when it's set during sleep, the interrupt becomes |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 79eda0264fb2..d01574d98870 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
@@ -184,16 +184,12 @@ void mfd_remove_devices(struct device *parent) | |||
184 | } | 184 | } |
185 | EXPORT_SYMBOL(mfd_remove_devices); | 185 | EXPORT_SYMBOL(mfd_remove_devices); |
186 | 186 | ||
187 | static int add_shared_platform_device(const char *cell, const char *name) | 187 | int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) |
188 | { | 188 | { |
189 | struct mfd_cell cell_entry; | 189 | struct mfd_cell cell_entry; |
190 | struct device *dev; | 190 | struct device *dev; |
191 | struct platform_device *pdev; | 191 | struct platform_device *pdev; |
192 | int err; | 192 | int i; |
193 | |||
194 | /* check if we've already registered a device (don't fail if we have) */ | ||
195 | if (bus_find_device_by_name(&platform_bus_type, NULL, name)) | ||
196 | return 0; | ||
197 | 193 | ||
198 | /* fetch the parent cell's device (should already be registered!) */ | 194 | /* fetch the parent cell's device (should already be registered!) */ |
199 | dev = bus_find_device_by_name(&platform_bus_type, NULL, cell); | 195 | dev = bus_find_device_by_name(&platform_bus_type, NULL, cell); |
@@ -206,44 +202,17 @@ static int add_shared_platform_device(const char *cell, const char *name) | |||
206 | 202 | ||
207 | WARN_ON(!cell_entry.enable); | 203 | WARN_ON(!cell_entry.enable); |
208 | 204 | ||
209 | cell_entry.name = name; | 205 | for (i = 0; i < n_clones; i++) { |
210 | err = mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0); | 206 | cell_entry.name = clones[i]; |
211 | if (err) | 207 | /* don't give up if a single call fails; just report error */ |
212 | dev_err(dev, "MFD add devices failed: %d\n", err); | 208 | if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0)) |
213 | return err; | 209 | dev_err(dev, "failed to create platform device '%s'\n", |
214 | } | 210 | clones[i]); |
215 | 211 | } | |
216 | int mfd_shared_platform_driver_register(struct platform_driver *drv, | ||
217 | const char *cellname) | ||
218 | { | ||
219 | int err; | ||
220 | |||
221 | err = add_shared_platform_device(cellname, drv->driver.name); | ||
222 | if (err) | ||
223 | printk(KERN_ERR "failed to add platform device %s\n", | ||
224 | drv->driver.name); | ||
225 | |||
226 | err = platform_driver_register(drv); | ||
227 | if (err) | ||
228 | printk(KERN_ERR "failed to add platform driver %s\n", | ||
229 | drv->driver.name); | ||
230 | |||
231 | return err; | ||
232 | } | ||
233 | EXPORT_SYMBOL(mfd_shared_platform_driver_register); | ||
234 | |||
235 | void mfd_shared_platform_driver_unregister(struct platform_driver *drv) | ||
236 | { | ||
237 | struct device *dev; | ||
238 | |||
239 | dev = bus_find_device_by_name(&platform_bus_type, NULL, | ||
240 | drv->driver.name); | ||
241 | if (dev) | ||
242 | platform_device_unregister(to_platform_device(dev)); | ||
243 | 212 | ||
244 | platform_driver_unregister(drv); | 213 | return 0; |
245 | } | 214 | } |
246 | EXPORT_SYMBOL(mfd_shared_platform_driver_unregister); | 215 | EXPORT_SYMBOL(mfd_clone_cell); |
247 | 216 | ||
248 | MODULE_LICENSE("GPL"); | 217 | MODULE_LICENSE("GPL"); |
249 | MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); | 218 | MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); |
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index c1306ed43e3c..c7687f6a78a0 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c | |||
@@ -356,7 +356,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) | |||
356 | return 0; | 356 | return 0; |
357 | } | 357 | } |
358 | 358 | ||
359 | static struct i2c_device_id pcf50633_id_table[] = { | 359 | static const struct i2c_device_id pcf50633_id_table[] = { |
360 | {"pcf50633", 0x73}, | 360 | {"pcf50633", 0x73}, |
361 | {/* end of list */} | 361 | {/* end of list */} |
362 | }; | 362 | }; |
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c index 193c940225b5..10dbe6374a89 100644 --- a/drivers/mfd/rdc321x-southbridge.c +++ b/drivers/mfd/rdc321x-southbridge.c | |||
@@ -97,6 +97,7 @@ static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = { | |||
97 | { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, | 97 | { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, |
98 | {} | 98 | {} |
99 | }; | 99 | }; |
100 | MODULE_DEVICE_TABLE(pci, rdc321x_sb_table); | ||
100 | 101 | ||
101 | static struct pci_driver rdc321x_sb_driver = { | 102 | static struct pci_driver rdc321x_sb_driver = { |
102 | .name = "RDC321x Southbridge", | 103 | .name = "RDC321x Southbridge", |
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 3e5732b58c49..7ab7746631d4 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c | |||
@@ -762,14 +762,14 @@ static int __devinit stmpe_irq_init(struct stmpe *stmpe) | |||
762 | int irq; | 762 | int irq; |
763 | 763 | ||
764 | for (irq = base; irq < base + num_irqs; irq++) { | 764 | for (irq = base; irq < base + num_irqs; irq++) { |
765 | set_irq_chip_data(irq, stmpe); | 765 | irq_set_chip_data(irq, stmpe); |
766 | set_irq_chip_and_handler(irq, &stmpe_irq_chip, | 766 | irq_set_chip_and_handler(irq, &stmpe_irq_chip, |
767 | handle_edge_irq); | 767 | handle_edge_irq); |
768 | set_irq_nested_thread(irq, 1); | 768 | irq_set_nested_thread(irq, 1); |
769 | #ifdef CONFIG_ARM | 769 | #ifdef CONFIG_ARM |
770 | set_irq_flags(irq, IRQF_VALID); | 770 | set_irq_flags(irq, IRQF_VALID); |
771 | #else | 771 | #else |
772 | set_irq_noprobe(irq); | 772 | irq_set_noprobe(irq); |
773 | #endif | 773 | #endif |
774 | } | 774 | } |
775 | 775 | ||
@@ -786,8 +786,8 @@ static void stmpe_irq_remove(struct stmpe *stmpe) | |||
786 | #ifdef CONFIG_ARM | 786 | #ifdef CONFIG_ARM |
787 | set_irq_flags(irq, 0); | 787 | set_irq_flags(irq, 0); |
788 | #endif | 788 | #endif |
789 | set_irq_chip_and_handler(irq, NULL, NULL); | 789 | irq_set_chip_and_handler(irq, NULL, NULL); |
790 | set_irq_chip_data(irq, NULL); | 790 | irq_set_chip_data(irq, NULL); |
791 | } | 791 | } |
792 | } | 792 | } |
793 | 793 | ||
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index af57fc706a4c..42830e692964 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c | |||
@@ -186,7 +186,7 @@ static struct mfd_cell t7l66xb_cells[] = { | |||
186 | /* Handle the T7L66XB interrupt mux */ | 186 | /* Handle the T7L66XB interrupt mux */ |
187 | static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) | 187 | static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) |
188 | { | 188 | { |
189 | struct t7l66xb *t7l66xb = get_irq_data(irq); | 189 | struct t7l66xb *t7l66xb = irq_get_handler_data(irq); |
190 | unsigned int isr; | 190 | unsigned int isr; |
191 | unsigned int i, irq_base; | 191 | unsigned int i, irq_base; |
192 | 192 | ||
@@ -243,17 +243,16 @@ static void t7l66xb_attach_irq(struct platform_device *dev) | |||
243 | irq_base = t7l66xb->irq_base; | 243 | irq_base = t7l66xb->irq_base; |
244 | 244 | ||
245 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { | 245 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { |
246 | set_irq_chip(irq, &t7l66xb_chip); | 246 | irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq); |
247 | set_irq_chip_data(irq, t7l66xb); | 247 | irq_set_chip_data(irq, t7l66xb); |
248 | set_irq_handler(irq, handle_level_irq); | ||
249 | #ifdef CONFIG_ARM | 248 | #ifdef CONFIG_ARM |
250 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 249 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
251 | #endif | 250 | #endif |
252 | } | 251 | } |
253 | 252 | ||
254 | set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); | 253 | irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); |
255 | set_irq_data(t7l66xb->irq, t7l66xb); | 254 | irq_set_handler_data(t7l66xb->irq, t7l66xb); |
256 | set_irq_chained_handler(t7l66xb->irq, t7l66xb_irq); | 255 | irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq); |
257 | } | 256 | } |
258 | 257 | ||
259 | static void t7l66xb_detach_irq(struct platform_device *dev) | 258 | static void t7l66xb_detach_irq(struct platform_device *dev) |
@@ -263,15 +262,15 @@ static void t7l66xb_detach_irq(struct platform_device *dev) | |||
263 | 262 | ||
264 | irq_base = t7l66xb->irq_base; | 263 | irq_base = t7l66xb->irq_base; |
265 | 264 | ||
266 | set_irq_chained_handler(t7l66xb->irq, NULL); | 265 | irq_set_chained_handler(t7l66xb->irq, NULL); |
267 | set_irq_data(t7l66xb->irq, NULL); | 266 | irq_set_handler_data(t7l66xb->irq, NULL); |
268 | 267 | ||
269 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { | 268 | for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { |
270 | #ifdef CONFIG_ARM | 269 | #ifdef CONFIG_ARM |
271 | set_irq_flags(irq, 0); | 270 | set_irq_flags(irq, 0); |
272 | #endif | 271 | #endif |
273 | set_irq_chip(irq, NULL); | 272 | irq_set_chip(irq, NULL); |
274 | set_irq_chip_data(irq, NULL); | 273 | irq_set_chip_data(irq, NULL); |
275 | } | 274 | } |
276 | } | 275 | } |
277 | 276 | ||
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index 729dbeed2ce0..c27e515b0722 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c | |||
@@ -192,14 +192,14 @@ static int tc3589x_irq_init(struct tc3589x *tc3589x) | |||
192 | int irq; | 192 | int irq; |
193 | 193 | ||
194 | for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { | 194 | for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { |
195 | set_irq_chip_data(irq, tc3589x); | 195 | irq_set_chip_data(irq, tc3589x); |
196 | set_irq_chip_and_handler(irq, &dummy_irq_chip, | 196 | irq_set_chip_and_handler(irq, &dummy_irq_chip, |
197 | handle_edge_irq); | 197 | handle_edge_irq); |
198 | set_irq_nested_thread(irq, 1); | 198 | irq_set_nested_thread(irq, 1); |
199 | #ifdef CONFIG_ARM | 199 | #ifdef CONFIG_ARM |
200 | set_irq_flags(irq, IRQF_VALID); | 200 | set_irq_flags(irq, IRQF_VALID); |
201 | #else | 201 | #else |
202 | set_irq_noprobe(irq); | 202 | irq_set_noprobe(irq); |
203 | #endif | 203 | #endif |
204 | } | 204 | } |
205 | 205 | ||
@@ -215,8 +215,8 @@ static void tc3589x_irq_remove(struct tc3589x *tc3589x) | |||
215 | #ifdef CONFIG_ARM | 215 | #ifdef CONFIG_ARM |
216 | set_irq_flags(irq, 0); | 216 | set_irq_flags(irq, 0); |
217 | #endif | 217 | #endif |
218 | set_irq_chip_and_handler(irq, NULL, NULL); | 218 | irq_set_chip_and_handler(irq, NULL, NULL); |
219 | set_irq_chip_data(irq, NULL); | 219 | irq_set_chip_data(irq, NULL); |
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 3d62ded86a8f..fc53ce287601 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c | |||
@@ -513,7 +513,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) | |||
513 | static void | 513 | static void |
514 | tc6393xb_irq(unsigned int irq, struct irq_desc *desc) | 514 | tc6393xb_irq(unsigned int irq, struct irq_desc *desc) |
515 | { | 515 | { |
516 | struct tc6393xb *tc6393xb = get_irq_data(irq); | 516 | struct tc6393xb *tc6393xb = irq_get_handler_data(irq); |
517 | unsigned int isr; | 517 | unsigned int isr; |
518 | unsigned int i, irq_base; | 518 | unsigned int i, irq_base; |
519 | 519 | ||
@@ -572,15 +572,14 @@ static void tc6393xb_attach_irq(struct platform_device *dev) | |||
572 | irq_base = tc6393xb->irq_base; | 572 | irq_base = tc6393xb->irq_base; |
573 | 573 | ||
574 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { | 574 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { |
575 | set_irq_chip(irq, &tc6393xb_chip); | 575 | irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq); |
576 | set_irq_chip_data(irq, tc6393xb); | 576 | irq_set_chip_data(irq, tc6393xb); |
577 | set_irq_handler(irq, handle_edge_irq); | ||
578 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 577 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
579 | } | 578 | } |
580 | 579 | ||
581 | set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING); | 580 | irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING); |
582 | set_irq_data(tc6393xb->irq, tc6393xb); | 581 | irq_set_handler_data(tc6393xb->irq, tc6393xb); |
583 | set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq); | 582 | irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq); |
584 | } | 583 | } |
585 | 584 | ||
586 | static void tc6393xb_detach_irq(struct platform_device *dev) | 585 | static void tc6393xb_detach_irq(struct platform_device *dev) |
@@ -588,15 +587,15 @@ static void tc6393xb_detach_irq(struct platform_device *dev) | |||
588 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | 587 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); |
589 | unsigned int irq, irq_base; | 588 | unsigned int irq, irq_base; |
590 | 589 | ||
591 | set_irq_chained_handler(tc6393xb->irq, NULL); | 590 | irq_set_chained_handler(tc6393xb->irq, NULL); |
592 | set_irq_data(tc6393xb->irq, NULL); | 591 | irq_set_handler_data(tc6393xb->irq, NULL); |
593 | 592 | ||
594 | irq_base = tc6393xb->irq_base; | 593 | irq_base = tc6393xb->irq_base; |
595 | 594 | ||
596 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { | 595 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { |
597 | set_irq_flags(irq, 0); | 596 | set_irq_flags(irq, 0); |
598 | set_irq_chip(irq, NULL); | 597 | irq_set_chip(irq, NULL); |
599 | set_irq_chip_data(irq, NULL); | 598 | irq_set_chip_data(irq, NULL); |
600 | } | 599 | } |
601 | } | 600 | } |
602 | 601 | ||
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 0aa9186aec19..b600808690c1 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c | |||
@@ -422,10 +422,10 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq, | |||
422 | 422 | ||
423 | for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) { | 423 | for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) { |
424 | int __irq = i + tps6586x->irq_base; | 424 | int __irq = i + tps6586x->irq_base; |
425 | set_irq_chip_data(__irq, tps6586x); | 425 | irq_set_chip_data(__irq, tps6586x); |
426 | set_irq_chip_and_handler(__irq, &tps6586x->irq_chip, | 426 | irq_set_chip_and_handler(__irq, &tps6586x->irq_chip, |
427 | handle_simple_irq); | 427 | handle_simple_irq); |
428 | set_irq_nested_thread(__irq, 1); | 428 | irq_set_nested_thread(__irq, 1); |
429 | #ifdef CONFIG_ARM | 429 | #ifdef CONFIG_ARM |
430 | set_irq_flags(__irq, IRQF_VALID); | 430 | set_irq_flags(__irq, IRQF_VALID); |
431 | #endif | 431 | #endif |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 63a30e88908f..8a7ee3139b86 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c | |||
@@ -320,24 +320,8 @@ static int twl4030_irq_thread(void *data) | |||
320 | for (module_irq = twl4030_irq_base; | 320 | for (module_irq = twl4030_irq_base; |
321 | pih_isr; | 321 | pih_isr; |
322 | pih_isr >>= 1, module_irq++) { | 322 | pih_isr >>= 1, module_irq++) { |
323 | if (pih_isr & 0x1) { | 323 | if (pih_isr & 0x1) |
324 | struct irq_desc *d = irq_to_desc(module_irq); | 324 | generic_handle_irq(module_irq); |
325 | |||
326 | if (!d) { | ||
327 | pr_err("twl4030: Invalid SIH IRQ: %d\n", | ||
328 | module_irq); | ||
329 | return -EINVAL; | ||
330 | } | ||
331 | |||
332 | /* These can't be masked ... always warn | ||
333 | * if we get any surprises. | ||
334 | */ | ||
335 | if (d->status & IRQ_DISABLED) | ||
336 | note_interrupt(module_irq, d, | ||
337 | IRQ_NONE); | ||
338 | else | ||
339 | d->handle_irq(module_irq, d); | ||
340 | } | ||
341 | } | 325 | } |
342 | local_irq_enable(); | 326 | local_irq_enable(); |
343 | 327 | ||
@@ -470,7 +454,7 @@ static inline void activate_irq(int irq) | |||
470 | set_irq_flags(irq, IRQF_VALID); | 454 | set_irq_flags(irq, IRQF_VALID); |
471 | #else | 455 | #else |
472 | /* same effect on other architectures */ | 456 | /* same effect on other architectures */ |
473 | set_irq_noprobe(irq); | 457 | irq_set_noprobe(irq); |
474 | #endif | 458 | #endif |
475 | } | 459 | } |
476 | 460 | ||
@@ -560,24 +544,18 @@ static void twl4030_sih_do_edge(struct work_struct *work) | |||
560 | /* Modify only the bits we know must change */ | 544 | /* Modify only the bits we know must change */ |
561 | while (edge_change) { | 545 | while (edge_change) { |
562 | int i = fls(edge_change) - 1; | 546 | int i = fls(edge_change) - 1; |
563 | struct irq_desc *d = irq_to_desc(i + agent->irq_base); | 547 | struct irq_data *idata = irq_get_irq_data(i + agent->irq_base); |
564 | int byte = 1 + (i >> 2); | 548 | int byte = 1 + (i >> 2); |
565 | int off = (i & 0x3) * 2; | 549 | int off = (i & 0x3) * 2; |
566 | 550 | unsigned int type; | |
567 | if (!d) { | ||
568 | pr_err("twl4030: Invalid IRQ: %d\n", | ||
569 | i + agent->irq_base); | ||
570 | return; | ||
571 | } | ||
572 | 551 | ||
573 | bytes[byte] &= ~(0x03 << off); | 552 | bytes[byte] &= ~(0x03 << off); |
574 | 553 | ||
575 | raw_spin_lock_irq(&d->lock); | 554 | type = irqd_get_trigger_type(idata); |
576 | if (d->status & IRQ_TYPE_EDGE_RISING) | 555 | if (type & IRQ_TYPE_EDGE_RISING) |
577 | bytes[byte] |= BIT(off + 1); | 556 | bytes[byte] |= BIT(off + 1); |
578 | if (d->status & IRQ_TYPE_EDGE_FALLING) | 557 | if (type & IRQ_TYPE_EDGE_FALLING) |
579 | bytes[byte] |= BIT(off + 0); | 558 | bytes[byte] |= BIT(off + 0); |
580 | raw_spin_unlock_irq(&d->lock); | ||
581 | 559 | ||
582 | edge_change &= ~BIT(i); | 560 | edge_change &= ~BIT(i); |
583 | } | 561 | } |
@@ -626,21 +604,13 @@ static void twl4030_sih_unmask(struct irq_data *data) | |||
626 | static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger) | 604 | static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger) |
627 | { | 605 | { |
628 | struct sih_agent *sih = irq_data_get_irq_chip_data(data); | 606 | struct sih_agent *sih = irq_data_get_irq_chip_data(data); |
629 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
630 | unsigned long flags; | 607 | unsigned long flags; |
631 | 608 | ||
632 | if (!desc) { | ||
633 | pr_err("twl4030: Invalid IRQ: %d\n", data->irq); | ||
634 | return -EINVAL; | ||
635 | } | ||
636 | |||
637 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 609 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
638 | return -EINVAL; | 610 | return -EINVAL; |
639 | 611 | ||
640 | spin_lock_irqsave(&sih_agent_lock, flags); | 612 | spin_lock_irqsave(&sih_agent_lock, flags); |
641 | if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) { | 613 | if (irqd_get_trigger_type(data) != trigger) { |
642 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
643 | desc->status |= trigger; | ||
644 | sih->edge_change |= BIT(data->irq - sih->irq_base); | 614 | sih->edge_change |= BIT(data->irq - sih->irq_base); |
645 | queue_work(wq, &sih->edge_work); | 615 | queue_work(wq, &sih->edge_work); |
646 | } | 616 | } |
@@ -680,7 +650,7 @@ static inline int sih_read_isr(const struct sih *sih) | |||
680 | */ | 650 | */ |
681 | static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) | 651 | static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc) |
682 | { | 652 | { |
683 | struct sih_agent *agent = get_irq_data(irq); | 653 | struct sih_agent *agent = irq_get_handler_data(irq); |
684 | const struct sih *sih = agent->sih; | 654 | const struct sih *sih = agent->sih; |
685 | int isr; | 655 | int isr; |
686 | 656 | ||
@@ -754,9 +724,9 @@ int twl4030_sih_setup(int module) | |||
754 | for (i = 0; i < sih->bits; i++) { | 724 | for (i = 0; i < sih->bits; i++) { |
755 | irq = irq_base + i; | 725 | irq = irq_base + i; |
756 | 726 | ||
757 | set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip, | 727 | irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip, |
758 | handle_edge_irq); | 728 | handle_edge_irq); |
759 | set_irq_chip_data(irq, agent); | 729 | irq_set_chip_data(irq, agent); |
760 | activate_irq(irq); | 730 | activate_irq(irq); |
761 | } | 731 | } |
762 | 732 | ||
@@ -765,8 +735,8 @@ int twl4030_sih_setup(int module) | |||
765 | 735 | ||
766 | /* replace generic PIH handler (handle_simple_irq) */ | 736 | /* replace generic PIH handler (handle_simple_irq) */ |
767 | irq = sih_mod + twl4030_irq_base; | 737 | irq = sih_mod + twl4030_irq_base; |
768 | set_irq_data(irq, agent); | 738 | irq_set_handler_data(irq, agent); |
769 | set_irq_chained_handler(irq, handle_twl4030_sih); | 739 | irq_set_chained_handler(irq, handle_twl4030_sih); |
770 | 740 | ||
771 | pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, | 741 | pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name, |
772 | irq, irq_base, twl4030_irq_next - 1); | 742 | irq, irq_base, twl4030_irq_next - 1); |
@@ -815,8 +785,8 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | |||
815 | twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; | 785 | twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; |
816 | 786 | ||
817 | for (i = irq_base; i < irq_end; i++) { | 787 | for (i = irq_base; i < irq_end; i++) { |
818 | set_irq_chip_and_handler(i, &twl4030_irq_chip, | 788 | irq_set_chip_and_handler(i, &twl4030_irq_chip, |
819 | handle_simple_irq); | 789 | handle_simple_irq); |
820 | activate_irq(i); | 790 | activate_irq(i); |
821 | } | 791 | } |
822 | twl4030_irq_next = i; | 792 | twl4030_irq_next = i; |
@@ -856,7 +826,7 @@ fail_rqirq: | |||
856 | /* clean up twl4030_sih_setup */ | 826 | /* clean up twl4030_sih_setup */ |
857 | fail: | 827 | fail: |
858 | for (i = irq_base; i < irq_end; i++) | 828 | for (i = irq_base; i < irq_end; i++) |
859 | set_irq_chip_and_handler(i, NULL, NULL); | 829 | irq_set_chip_and_handler(i, NULL, NULL); |
860 | destroy_workqueue(wq); | 830 | destroy_workqueue(wq); |
861 | wq = NULL; | 831 | wq = NULL; |
862 | return status; | 832 | return status; |
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 4082ed73613f..fa937052fbab 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c | |||
@@ -140,22 +140,7 @@ static int twl6030_irq_thread(void *data) | |||
140 | if (sts.int_sts & 0x1) { | 140 | if (sts.int_sts & 0x1) { |
141 | int module_irq = twl6030_irq_base + | 141 | int module_irq = twl6030_irq_base + |
142 | twl6030_interrupt_mapping[i]; | 142 | twl6030_interrupt_mapping[i]; |
143 | struct irq_desc *d = irq_to_desc(module_irq); | 143 | generic_handle_irq(module_irq); |
144 | |||
145 | if (!d) { | ||
146 | pr_err("twl6030: Invalid SIH IRQ: %d\n", | ||
147 | module_irq); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
151 | /* These can't be masked ... always warn | ||
152 | * if we get any surprises. | ||
153 | */ | ||
154 | if (d->status & IRQ_DISABLED) | ||
155 | note_interrupt(module_irq, d, | ||
156 | IRQ_NONE); | ||
157 | else | ||
158 | d->handle_irq(module_irq, d); | ||
159 | 144 | ||
160 | } | 145 | } |
161 | local_irq_enable(); | 146 | local_irq_enable(); |
@@ -198,7 +183,7 @@ static inline void activate_irq(int irq) | |||
198 | set_irq_flags(irq, IRQF_VALID); | 183 | set_irq_flags(irq, IRQF_VALID); |
199 | #else | 184 | #else |
200 | /* same effect on other architectures */ | 185 | /* same effect on other architectures */ |
201 | set_irq_noprobe(irq); | 186 | irq_set_noprobe(irq); |
202 | #endif | 187 | #endif |
203 | } | 188 | } |
204 | 189 | ||
@@ -335,8 +320,8 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | |||
335 | twl6030_irq_chip.irq_set_type = NULL; | 320 | twl6030_irq_chip.irq_set_type = NULL; |
336 | 321 | ||
337 | for (i = irq_base; i < irq_end; i++) { | 322 | for (i = irq_base; i < irq_end; i++) { |
338 | set_irq_chip_and_handler(i, &twl6030_irq_chip, | 323 | irq_set_chip_and_handler(i, &twl6030_irq_chip, |
339 | handle_simple_irq); | 324 | handle_simple_irq); |
340 | activate_irq(i); | 325 | activate_irq(i); |
341 | } | 326 | } |
342 | 327 | ||
@@ -365,7 +350,7 @@ fail_irq: | |||
365 | 350 | ||
366 | fail_kthread: | 351 | fail_kthread: |
367 | for (i = irq_base; i < irq_end; i++) | 352 | for (i = irq_base; i < irq_end; i++) |
368 | set_irq_chip_and_handler(i, NULL, NULL); | 353 | irq_set_chip_and_handler(i, NULL, NULL); |
369 | return status; | 354 | return status; |
370 | } | 355 | } |
371 | 356 | ||
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c index f76f6c798046..04914f2836c0 100644 --- a/drivers/mfd/wl1273-core.c +++ b/drivers/mfd/wl1273-core.c | |||
@@ -25,7 +25,7 @@ | |||
25 | 25 | ||
26 | #define DRIVER_DESC "WL1273 FM Radio Core" | 26 | #define DRIVER_DESC "WL1273 FM Radio Core" |
27 | 27 | ||
28 | static struct i2c_device_id wl1273_driver_id_table[] = { | 28 | static const struct i2c_device_id wl1273_driver_id_table[] = { |
29 | { WL1273_FM_DRIVER_NAME, 0 }, | 29 | { WL1273_FM_DRIVER_NAME, 0 }, |
30 | { } | 30 | { } |
31 | }; | 31 | }; |
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index a5cd17e18d09..23e66af89dea 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c | |||
@@ -553,17 +553,17 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) | |||
553 | for (cur_irq = wm831x->irq_base; | 553 | for (cur_irq = wm831x->irq_base; |
554 | cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; | 554 | cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; |
555 | cur_irq++) { | 555 | cur_irq++) { |
556 | set_irq_chip_data(cur_irq, wm831x); | 556 | irq_set_chip_data(cur_irq, wm831x); |
557 | set_irq_chip_and_handler(cur_irq, &wm831x_irq_chip, | 557 | irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip, |
558 | handle_edge_irq); | 558 | handle_edge_irq); |
559 | set_irq_nested_thread(cur_irq, 1); | 559 | irq_set_nested_thread(cur_irq, 1); |
560 | 560 | ||
561 | /* ARM needs us to explicitly flag the IRQ as valid | 561 | /* ARM needs us to explicitly flag the IRQ as valid |
562 | * and will set them noprobe when we do so. */ | 562 | * and will set them noprobe when we do so. */ |
563 | #ifdef CONFIG_ARM | 563 | #ifdef CONFIG_ARM |
564 | set_irq_flags(cur_irq, IRQF_VALID); | 564 | set_irq_flags(cur_irq, IRQF_VALID); |
565 | #else | 565 | #else |
566 | set_irq_noprobe(cur_irq); | 566 | irq_set_noprobe(cur_irq); |
567 | #endif | 567 | #endif |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c index 5839966ebd85..ed4b22a167b3 100644 --- a/drivers/mfd/wm8350-irq.c +++ b/drivers/mfd/wm8350-irq.c | |||
@@ -518,17 +518,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq, | |||
518 | for (cur_irq = wm8350->irq_base; | 518 | for (cur_irq = wm8350->irq_base; |
519 | cur_irq < ARRAY_SIZE(wm8350_irqs) + wm8350->irq_base; | 519 | cur_irq < ARRAY_SIZE(wm8350_irqs) + wm8350->irq_base; |
520 | cur_irq++) { | 520 | cur_irq++) { |
521 | set_irq_chip_data(cur_irq, wm8350); | 521 | irq_set_chip_data(cur_irq, wm8350); |
522 | set_irq_chip_and_handler(cur_irq, &wm8350_irq_chip, | 522 | irq_set_chip_and_handler(cur_irq, &wm8350_irq_chip, |
523 | handle_edge_irq); | 523 | handle_edge_irq); |
524 | set_irq_nested_thread(cur_irq, 1); | 524 | irq_set_nested_thread(cur_irq, 1); |
525 | 525 | ||
526 | /* ARM needs us to explicitly flag the IRQ as valid | 526 | /* ARM needs us to explicitly flag the IRQ as valid |
527 | * and will set them noprobe when we do so. */ | 527 | * and will set them noprobe when we do so. */ |
528 | #ifdef CONFIG_ARM | 528 | #ifdef CONFIG_ARM |
529 | set_irq_flags(cur_irq, IRQF_VALID); | 529 | set_irq_flags(cur_irq, IRQF_VALID); |
530 | #else | 530 | #else |
531 | set_irq_noprobe(cur_irq); | 531 | irq_set_noprobe(cur_irq); |
532 | #endif | 532 | #endif |
533 | } | 533 | } |
534 | 534 | ||
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c index 1e3bf4a2ff8e..71c6e8f9aedb 100644 --- a/drivers/mfd/wm8994-irq.c +++ b/drivers/mfd/wm8994-irq.c | |||
@@ -278,17 +278,17 @@ int wm8994_irq_init(struct wm8994 *wm8994) | |||
278 | for (cur_irq = wm8994->irq_base; | 278 | for (cur_irq = wm8994->irq_base; |
279 | cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base; | 279 | cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base; |
280 | cur_irq++) { | 280 | cur_irq++) { |
281 | set_irq_chip_data(cur_irq, wm8994); | 281 | irq_set_chip_data(cur_irq, wm8994); |
282 | set_irq_chip_and_handler(cur_irq, &wm8994_irq_chip, | 282 | irq_set_chip_and_handler(cur_irq, &wm8994_irq_chip, |
283 | handle_edge_irq); | 283 | handle_edge_irq); |
284 | set_irq_nested_thread(cur_irq, 1); | 284 | irq_set_nested_thread(cur_irq, 1); |
285 | 285 | ||
286 | /* ARM needs us to explicitly flag the IRQ as valid | 286 | /* ARM needs us to explicitly flag the IRQ as valid |
287 | * and will set them noprobe when we do so. */ | 287 | * and will set them noprobe when we do so. */ |
288 | #ifdef CONFIG_ARM | 288 | #ifdef CONFIG_ARM |
289 | set_irq_flags(cur_irq, IRQF_VALID); | 289 | set_irq_flags(cur_irq, IRQF_VALID); |
290 | #else | 290 | #else |
291 | set_irq_noprobe(cur_irq); | 291 | irq_set_noprobe(cur_irq); |
292 | #endif | 292 | #endif |
293 | } | 293 | } |
294 | 294 | ||
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 28852dfa310d..20e4e9395b61 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -373,7 +373,7 @@ static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, | |||
373 | 373 | ||
374 | if (gru_irq_count[chiplet] == 0) { | 374 | if (gru_irq_count[chiplet] == 0) { |
375 | gru_chip[chiplet].name = irq_name; | 375 | gru_chip[chiplet].name = irq_name; |
376 | ret = set_irq_chip(irq, &gru_chip[chiplet]); | 376 | ret = irq_set_chip(irq, &gru_chip[chiplet]); |
377 | if (ret) { | 377 | if (ret) { |
378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", | 378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", |
379 | GRU_DRIVER_ID_STR, -ret); | 379 | GRU_DRIVER_ID_STR, -ret); |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 5ec8eddfcf6e..f5cedeccad42 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -1875,7 +1875,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, | |||
1875 | unsigned int tot_sz, int max_scatter) | 1875 | unsigned int tot_sz, int max_scatter) |
1876 | { | 1876 | { |
1877 | unsigned int dev_addr, i, cnt, sz, ssz; | 1877 | unsigned int dev_addr, i, cnt, sz, ssz; |
1878 | struct timespec ts1, ts2, ts; | 1878 | struct timespec ts1, ts2; |
1879 | int ret; | 1879 | int ret; |
1880 | 1880 | ||
1881 | sz = test->area.max_tfr; | 1881 | sz = test->area.max_tfr; |
@@ -1912,7 +1912,6 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, | |||
1912 | } | 1912 | } |
1913 | getnstimeofday(&ts2); | 1913 | getnstimeofday(&ts2); |
1914 | 1914 | ||
1915 | ts = timespec_sub(ts2, ts1); | ||
1916 | mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); | 1915 | mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); |
1917 | 1916 | ||
1918 | return 0; | 1917 | return 0; |
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 797cdb5887fd..76af349c14b4 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * your option) any later version. | 9 | * your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
14 | 15 | ||
@@ -252,6 +253,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
252 | struct mmc_command cmd; | 253 | struct mmc_command cmd; |
253 | struct mmc_data data; | 254 | struct mmc_data data; |
254 | struct scatterlist sg; | 255 | struct scatterlist sg; |
256 | void *data_buf; | ||
255 | 257 | ||
256 | BUG_ON(!card); | 258 | BUG_ON(!card); |
257 | BUG_ON(!card->host); | 259 | BUG_ON(!card->host); |
@@ -263,6 +265,13 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
263 | if (err) | 265 | if (err) |
264 | return err; | 266 | return err; |
265 | 267 | ||
268 | /* dma onto stack is unsafe/nonportable, but callers to this | ||
269 | * routine normally provide temporary on-stack buffers ... | ||
270 | */ | ||
271 | data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); | ||
272 | if (data_buf == NULL) | ||
273 | return -ENOMEM; | ||
274 | |||
266 | memset(&mrq, 0, sizeof(struct mmc_request)); | 275 | memset(&mrq, 0, sizeof(struct mmc_request)); |
267 | memset(&cmd, 0, sizeof(struct mmc_command)); | 276 | memset(&cmd, 0, sizeof(struct mmc_command)); |
268 | memset(&data, 0, sizeof(struct mmc_data)); | 277 | memset(&data, 0, sizeof(struct mmc_data)); |
@@ -280,12 +289,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
280 | data.sg = &sg; | 289 | data.sg = &sg; |
281 | data.sg_len = 1; | 290 | data.sg_len = 1; |
282 | 291 | ||
283 | sg_init_one(&sg, scr, 8); | 292 | sg_init_one(&sg, data_buf, 8); |
284 | 293 | ||
285 | mmc_set_data_timeout(&data, card); | 294 | mmc_set_data_timeout(&data, card); |
286 | 295 | ||
287 | mmc_wait_for_req(card->host, &mrq); | 296 | mmc_wait_for_req(card->host, &mrq); |
288 | 297 | ||
298 | memcpy(scr, data_buf, sizeof(card->raw_scr)); | ||
299 | kfree(data_buf); | ||
300 | |||
289 | if (cmd.error) | 301 | if (cmd.error) |
290 | return cmd.error; | 302 | return cmd.error; |
291 | if (data.error) | 303 | if (data.error) |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1a21c6427a19..94df40531c38 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -439,13 +439,25 @@ config MMC_SDRICOH_CS | |||
439 | To compile this driver as a module, choose M here: the | 439 | To compile this driver as a module, choose M here: the |
440 | module will be called sdricoh_cs. | 440 | module will be called sdricoh_cs. |
441 | 441 | ||
442 | config MMC_TMIO_CORE | ||
443 | tristate | ||
444 | |||
442 | config MMC_TMIO | 445 | config MMC_TMIO |
443 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" | 446 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" |
444 | depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI | 447 | depends on MFD_TMIO || MFD_ASIC3 |
448 | select MMC_TMIO_CORE | ||
445 | help | 449 | help |
446 | This provides support for the SD/MMC cell found in TC6393XB, | 450 | This provides support for the SD/MMC cell found in TC6393XB, |
447 | T7L66XB and also HTC ASIC3 | 451 | T7L66XB and also HTC ASIC3 |
448 | 452 | ||
453 | config MMC_SDHI | ||
454 | tristate "SH-Mobile SDHI SD/SDIO controller support" | ||
455 | depends on SUPERH || ARCH_SHMOBILE | ||
456 | select MMC_TMIO_CORE | ||
457 | help | ||
458 | This provides support for the SDHI SD/SDIO controller found in | ||
459 | SuperH and ARM SH-Mobile SoCs | ||
460 | |||
449 | config MMC_CB710 | 461 | config MMC_CB710 |
450 | tristate "ENE CB710 MMC/SD Interface support" | 462 | tristate "ENE CB710 MMC/SD Interface support" |
451 | depends on PCI | 463 | depends on PCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 30aa6867745f..4f1df0aae574 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -29,7 +29,13 @@ endif | |||
29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o | 29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o |
30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o | 30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o |
31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o | 31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o |
32 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | 32 | obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o |
33 | tmio_mmc_core-y := tmio_mmc_pio.o | ||
34 | ifneq ($(CONFIG_MMC_SDHI),n) | ||
35 | tmio_mmc_core-y += tmio_mmc_dma.o | ||
36 | endif | ||
37 | obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o | ||
38 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | ||
33 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | 39 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o |
34 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o | 40 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o |
35 | obj-$(CONFIG_MMC_DW) += dw_mmc.o | 41 | obj-$(CONFIG_MMC_DW) += dw_mmc.o |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 5a614069cb00..87e1f57ec9ba 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -316,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) | |||
316 | 316 | ||
317 | /* Stop the IDMAC running */ | 317 | /* Stop the IDMAC running */ |
318 | temp = mci_readl(host, BMOD); | 318 | temp = mci_readl(host, BMOD); |
319 | temp &= ~SDMMC_IDMAC_ENABLE; | 319 | temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); |
320 | mci_writel(host, BMOD, temp); | 320 | mci_writel(host, BMOD, temp); |
321 | } | 321 | } |
322 | 322 | ||
@@ -385,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) | |||
385 | 385 | ||
386 | /* Enable the IDMAC */ | 386 | /* Enable the IDMAC */ |
387 | temp = mci_readl(host, BMOD); | 387 | temp = mci_readl(host, BMOD); |
388 | temp |= SDMMC_IDMAC_ENABLE; | 388 | temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; |
389 | mci_writel(host, BMOD, temp); | 389 | mci_writel(host, BMOD, temp); |
390 | 390 | ||
391 | /* Start it running */ | 391 | /* Start it running */ |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 5bbb87d10251..b4a7e4fba90f 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -68,6 +68,12 @@ static struct variant_data variant_arm = { | |||
68 | .datalength_bits = 16, | 68 | .datalength_bits = 16, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct variant_data variant_arm_extended_fifo = { | ||
72 | .fifosize = 128 * 4, | ||
73 | .fifohalfsize = 64 * 4, | ||
74 | .datalength_bits = 16, | ||
75 | }; | ||
76 | |||
71 | static struct variant_data variant_u300 = { | 77 | static struct variant_data variant_u300 = { |
72 | .fifosize = 16 * 4, | 78 | .fifosize = 16 * 4, |
73 | .fifohalfsize = 8 * 4, | 79 | .fifohalfsize = 8 * 4, |
@@ -1277,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev) | |||
1277 | static struct amba_id mmci_ids[] = { | 1283 | static struct amba_id mmci_ids[] = { |
1278 | { | 1284 | { |
1279 | .id = 0x00041180, | 1285 | .id = 0x00041180, |
1280 | .mask = 0x000fffff, | 1286 | .mask = 0xff0fffff, |
1281 | .data = &variant_arm, | 1287 | .data = &variant_arm, |
1282 | }, | 1288 | }, |
1283 | { | 1289 | { |
1290 | .id = 0x01041180, | ||
1291 | .mask = 0xff0fffff, | ||
1292 | .data = &variant_arm_extended_fifo, | ||
1293 | }, | ||
1294 | { | ||
1284 | .id = 0x00041181, | 1295 | .id = 0x00041181, |
1285 | .mask = 0x000fffff, | 1296 | .mask = 0x000fffff, |
1286 | .data = &variant_arm, | 1297 | .data = &variant_arm, |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 5530def54e5b..e2aecb7f1d5c 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -15,9 +15,11 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/irq.h> | ||
18 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
19 | #include <linux/of.h> | 20 | #include <linux/of.h> |
20 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
22 | #include <linux/of_irq.h> | ||
21 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
22 | #include <linux/spi/mmc_spi.h> | 24 | #include <linux/spi/mmc_spi.h> |
23 | #include <linux/mmc/core.h> | 25 | #include <linux/mmc/core.h> |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 3b5248567973..a19967d0bfc4 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -16,14 +16,40 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/slab.h> | ||
19 | #include <linux/mmc/host.h> | 20 | #include <linux/mmc/host.h> |
20 | #include <linux/mmc/sdhci-pltfm.h> | 21 | #include <linux/mmc/sdhci-pltfm.h> |
22 | #include <linux/mmc/mmc.h> | ||
23 | #include <linux/mmc/sdio.h> | ||
21 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
22 | #include <mach/esdhc.h> | 25 | #include <mach/esdhc.h> |
23 | #include "sdhci.h" | 26 | #include "sdhci.h" |
24 | #include "sdhci-pltfm.h" | 27 | #include "sdhci-pltfm.h" |
25 | #include "sdhci-esdhc.h" | 28 | #include "sdhci-esdhc.h" |
26 | 29 | ||
30 | /* VENDOR SPEC register */ | ||
31 | #define SDHCI_VENDOR_SPEC 0xC0 | ||
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | ||
33 | |||
34 | #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) | ||
35 | /* | ||
36 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | ||
37 | * "11" when the STOP CMD12 is issued on imx53 to abort one | ||
38 | * open ended multi-blk IO. Otherwise the TC INT wouldn't | ||
39 | * be generated. | ||
40 | * In exact block transfer, the controller doesn't complete the | ||
41 | * operations automatically as required at the end of the | ||
42 | * transfer and remains on hold if the abort command is not sent. | ||
43 | * As a result, the TC flag is not asserted and SW received timeout | ||
44 | * exeception. Bit1 of Vendor Spec registor is used to fix it. | ||
45 | */ | ||
46 | #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) | ||
47 | |||
48 | struct pltfm_imx_data { | ||
49 | int flags; | ||
50 | u32 scratchpad; | ||
51 | }; | ||
52 | |||
27 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) | 53 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) |
28 | { | 54 | { |
29 | void __iomem *base = host->ioaddr + (reg & ~0x3); | 55 | void __iomem *base = host->ioaddr + (reg & ~0x3); |
@@ -34,10 +60,14 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i | |||
34 | 60 | ||
35 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | 61 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) |
36 | { | 62 | { |
63 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
64 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
65 | |||
37 | /* fake CARD_PRESENT flag on mx25/35 */ | 66 | /* fake CARD_PRESENT flag on mx25/35 */ |
38 | u32 val = readl(host->ioaddr + reg); | 67 | u32 val = readl(host->ioaddr + reg); |
39 | 68 | ||
40 | if (unlikely(reg == SDHCI_PRESENT_STATE)) { | 69 | if (unlikely((reg == SDHCI_PRESENT_STATE) |
70 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { | ||
41 | struct esdhc_platform_data *boarddata = | 71 | struct esdhc_platform_data *boarddata = |
42 | host->mmc->parent->platform_data; | 72 | host->mmc->parent->platform_data; |
43 | 73 | ||
@@ -55,13 +85,26 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
55 | 85 | ||
56 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | 86 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) |
57 | { | 87 | { |
58 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) | 88 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
89 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
90 | |||
91 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | ||
92 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) | ||
59 | /* | 93 | /* |
60 | * these interrupts won't work with a custom card_detect gpio | 94 | * these interrupts won't work with a custom card_detect gpio |
61 | * (only applied to mx25/35) | 95 | * (only applied to mx25/35) |
62 | */ | 96 | */ |
63 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | 97 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); |
64 | 98 | ||
99 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | ||
100 | && (reg == SDHCI_INT_STATUS) | ||
101 | && (val & SDHCI_INT_DATA_END))) { | ||
102 | u32 v; | ||
103 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
104 | v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
105 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
106 | } | ||
107 | |||
65 | writel(val, host->ioaddr + reg); | 108 | writel(val, host->ioaddr + reg); |
66 | } | 109 | } |
67 | 110 | ||
@@ -76,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) | |||
76 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | 119 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) |
77 | { | 120 | { |
78 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 121 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
122 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
79 | 123 | ||
80 | switch (reg) { | 124 | switch (reg) { |
81 | case SDHCI_TRANSFER_MODE: | 125 | case SDHCI_TRANSFER_MODE: |
@@ -83,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
83 | * Postpone this write, we must do it together with a | 127 | * Postpone this write, we must do it together with a |
84 | * command write that is down below. | 128 | * command write that is down below. |
85 | */ | 129 | */ |
86 | pltfm_host->scratchpad = val; | 130 | if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
131 | && (host->cmd->opcode == SD_IO_RW_EXTENDED) | ||
132 | && (host->cmd->data->blocks > 1) | ||
133 | && (host->cmd->data->flags & MMC_DATA_READ)) { | ||
134 | u32 v; | ||
135 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
136 | v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
137 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
138 | } | ||
139 | imx_data->scratchpad = val; | ||
87 | return; | 140 | return; |
88 | case SDHCI_COMMAND: | 141 | case SDHCI_COMMAND: |
89 | writel(val << 16 | pltfm_host->scratchpad, | 142 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) |
143 | && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) | ||
144 | val |= SDHCI_CMD_ABORTCMD; | ||
145 | writel(val << 16 | imx_data->scratchpad, | ||
90 | host->ioaddr + SDHCI_TRANSFER_MODE); | 146 | host->ioaddr + SDHCI_TRANSFER_MODE); |
91 | return; | 147 | return; |
92 | case SDHCI_BLOCK_SIZE: | 148 | case SDHCI_BLOCK_SIZE: |
@@ -146,7 +202,9 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | |||
146 | } | 202 | } |
147 | 203 | ||
148 | static struct sdhci_ops sdhci_esdhc_ops = { | 204 | static struct sdhci_ops sdhci_esdhc_ops = { |
205 | .read_l = esdhc_readl_le, | ||
149 | .read_w = esdhc_readw_le, | 206 | .read_w = esdhc_readw_le, |
207 | .write_l = esdhc_writel_le, | ||
150 | .write_w = esdhc_writew_le, | 208 | .write_w = esdhc_writew_le, |
151 | .write_b = esdhc_writeb_le, | 209 | .write_b = esdhc_writeb_le, |
152 | .set_clock = esdhc_set_clock, | 210 | .set_clock = esdhc_set_clock, |
@@ -168,6 +226,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
168 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 226 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
169 | struct clk *clk; | 227 | struct clk *clk; |
170 | int err; | 228 | int err; |
229 | struct pltfm_imx_data *imx_data; | ||
171 | 230 | ||
172 | clk = clk_get(mmc_dev(host->mmc), NULL); | 231 | clk = clk_get(mmc_dev(host->mmc), NULL); |
173 | if (IS_ERR(clk)) { | 232 | if (IS_ERR(clk)) { |
@@ -177,7 +236,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
177 | clk_enable(clk); | 236 | clk_enable(clk); |
178 | pltfm_host->clk = clk; | 237 | pltfm_host->clk = clk; |
179 | 238 | ||
180 | if (cpu_is_mx35() || cpu_is_mx51()) | 239 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); |
240 | if (!imx_data) { | ||
241 | clk_disable(pltfm_host->clk); | ||
242 | clk_put(pltfm_host->clk); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | pltfm_host->priv = imx_data; | ||
246 | |||
247 | if (!cpu_is_mx25()) | ||
181 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | 248 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; |
182 | 249 | ||
183 | if (cpu_is_mx25() || cpu_is_mx35()) { | 250 | if (cpu_is_mx25() || cpu_is_mx35()) { |
@@ -187,6 +254,9 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
187 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; | 254 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; |
188 | } | 255 | } |
189 | 256 | ||
257 | if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) | ||
258 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | ||
259 | |||
190 | if (boarddata) { | 260 | if (boarddata) { |
191 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); | 261 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); |
192 | if (err) { | 262 | if (err) { |
@@ -214,8 +284,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
214 | goto no_card_detect_irq; | 284 | goto no_card_detect_irq; |
215 | } | 285 | } |
216 | 286 | ||
217 | sdhci_esdhc_ops.write_l = esdhc_writel_le; | 287 | imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; |
218 | sdhci_esdhc_ops.read_l = esdhc_readl_le; | ||
219 | /* Now we have a working card_detect again */ | 288 | /* Now we have a working card_detect again */ |
220 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 289 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
221 | } | 290 | } |
@@ -227,6 +296,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
227 | no_card_detect_pin: | 296 | no_card_detect_pin: |
228 | boarddata->cd_gpio = err; | 297 | boarddata->cd_gpio = err; |
229 | not_supported: | 298 | not_supported: |
299 | kfree(imx_data); | ||
230 | return 0; | 300 | return 0; |
231 | } | 301 | } |
232 | 302 | ||
@@ -234,6 +304,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
234 | { | 304 | { |
235 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 305 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
236 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 306 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
307 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
237 | 308 | ||
238 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | 309 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) |
239 | gpio_free(boarddata->wp_gpio); | 310 | gpio_free(boarddata->wp_gpio); |
@@ -247,6 +318,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
247 | 318 | ||
248 | clk_disable(pltfm_host->clk); | 319 | clk_disable(pltfm_host->clk); |
249 | clk_put(pltfm_host->clk); | 320 | clk_put(pltfm_host->clk); |
321 | kfree(imx_data); | ||
250 | } | 322 | } |
251 | 323 | ||
252 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 324 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index c55aae828aac..c3b08f111942 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -23,8 +23,7 @@ | |||
23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ | 23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ |
24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ | 24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ |
25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ | 25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ |
26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ | 26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) |
27 | SDHCI_QUIRK_NO_CARD_NO_RESET) | ||
28 | 27 | ||
29 | #define ESDHC_SYSTEM_CONTROL 0x2c | 28 | #define ESDHC_SYSTEM_CONTROL 0x2c |
30 | #define ESDHC_CLOCK_MASK 0x0000fff0 | 29 | #define ESDHC_CLOCK_MASK 0x0000fff0 |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 08161f690ae8..ba40d6d035c7 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -74,7 +74,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) | |||
74 | 74 | ||
75 | struct sdhci_of_data sdhci_esdhc = { | 75 | struct sdhci_of_data sdhci_esdhc = { |
76 | /* card detection could be handled via GPIO */ | 76 | /* card detection could be handled via GPIO */ |
77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | 77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
78 | | SDHCI_QUIRK_NO_CARD_NO_RESET, | ||
78 | .ops = { | 79 | .ops = { |
79 | .read_l = sdhci_be32bs_readl, | 80 | .read_l = sdhci_be32bs_readl, |
80 | .read_w = esdhc_readw, | 81 | .read_w = esdhc_readw, |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 2f8d46854acd..a136be706347 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -1016,16 +1016,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, | |||
1016 | struct sdhci_pci_chip *chip; | 1016 | struct sdhci_pci_chip *chip; |
1017 | struct sdhci_pci_slot *slot; | 1017 | struct sdhci_pci_slot *slot; |
1018 | 1018 | ||
1019 | u8 slots, rev, first_bar; | 1019 | u8 slots, first_bar; |
1020 | int ret, i; | 1020 | int ret, i; |
1021 | 1021 | ||
1022 | BUG_ON(pdev == NULL); | 1022 | BUG_ON(pdev == NULL); |
1023 | BUG_ON(ent == NULL); | 1023 | BUG_ON(ent == NULL); |
1024 | 1024 | ||
1025 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); | ||
1026 | |||
1027 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", | 1025 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", |
1028 | (int)pdev->vendor, (int)pdev->device, (int)rev); | 1026 | (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); |
1029 | 1027 | ||
1030 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); | 1028 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); |
1031 | if (ret) | 1029 | if (ret) |
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index ea2e44d9be5e..2b37016ad0ac 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | struct sdhci_pltfm_host { | 18 | struct sdhci_pltfm_host { |
19 | struct clk *clk; | 19 | struct clk *clk; |
20 | u32 scratchpad; /* to handle quirks across io-accessor calls */ | 20 | void *priv; /* to handle quirks across io-accessor calls */ |
21 | }; | 21 | }; |
22 | 22 | ||
23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; | 23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; |
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d70c54c7b70a..60a4c97d3d18 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c | |||
@@ -50,7 +50,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) | |||
50 | /* val == 1 -> card removed, val == 0 -> card inserted */ | 50 | /* val == 1 -> card removed, val == 0 -> card inserted */ |
51 | /* if card removed - set irq for low level, else vice versa */ | 51 | /* if card removed - set irq for low level, else vice versa */ |
52 | gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; | 52 | gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; |
53 | set_irq_type(irq, gpio_irq_type); | 53 | irq_set_irq_type(irq, gpio_irq_type); |
54 | 54 | ||
55 | if (sdhci->data->card_power_gpio >= 0) { | 55 | if (sdhci->data->card_power_gpio >= 0) { |
56 | if (!sdhci->data->power_always_enb) { | 56 | if (!sdhci->data->power_always_enb) { |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6e0969e40650..25e8bde600d1 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define SDHCI_CMD_CRC 0x08 | 45 | #define SDHCI_CMD_CRC 0x08 |
46 | #define SDHCI_CMD_INDEX 0x10 | 46 | #define SDHCI_CMD_INDEX 0x10 |
47 | #define SDHCI_CMD_DATA 0x20 | 47 | #define SDHCI_CMD_DATA 0x20 |
48 | #define SDHCI_CMD_ABORTCMD 0xC0 | ||
48 | 49 | ||
49 | #define SDHCI_CMD_RESP_NONE 0x00 | 50 | #define SDHCI_CMD_RESP_NONE 0x00 |
50 | #define SDHCI_CMD_RESP_LONG 0x01 | 51 | #define SDHCI_CMD_RESP_LONG 0x01 |
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 53a63024bf11..cc701236d16f 100644 --- a/drivers/mfd/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -23,51 +23,30 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
26 | #include <linux/mfd/core.h> | 26 | #include <linux/mmc/sh_mobile_sdhi.h> |
27 | #include <linux/mfd/tmio.h> | 27 | #include <linux/mfd/tmio.h> |
28 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
29 | #include <linux/sh_dma.h> | 28 | #include <linux/sh_dma.h> |
30 | 29 | ||
30 | #include "tmio_mmc.h" | ||
31 | |||
31 | struct sh_mobile_sdhi { | 32 | struct sh_mobile_sdhi { |
32 | struct clk *clk; | 33 | struct clk *clk; |
33 | struct tmio_mmc_data mmc_data; | 34 | struct tmio_mmc_data mmc_data; |
34 | struct mfd_cell cell_mmc; | ||
35 | struct sh_dmae_slave param_tx; | 35 | struct sh_dmae_slave param_tx; |
36 | struct sh_dmae_slave param_rx; | 36 | struct sh_dmae_slave param_rx; |
37 | struct tmio_mmc_dma dma_priv; | 37 | struct tmio_mmc_dma dma_priv; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct resource sh_mobile_sdhi_resources[] = { | 40 | static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) |
41 | { | ||
42 | .start = 0x000, | ||
43 | .end = 0x1ff, | ||
44 | .flags = IORESOURCE_MEM, | ||
45 | }, | ||
46 | { | ||
47 | .start = 0, | ||
48 | .end = 0, | ||
49 | .flags = IORESOURCE_IRQ, | ||
50 | }, | ||
51 | }; | ||
52 | |||
53 | static struct mfd_cell sh_mobile_sdhi_cell = { | ||
54 | .name = "tmio-mmc", | ||
55 | .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources), | ||
56 | .resources = sh_mobile_sdhi_resources, | ||
57 | }; | ||
58 | |||
59 | static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) | ||
60 | { | 41 | { |
61 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 42 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
63 | 43 | ||
64 | if (p && p->set_pwr) | 44 | if (p && p->set_pwr) |
65 | p->set_pwr(pdev, state); | 45 | p->set_pwr(pdev, state); |
66 | } | 46 | } |
67 | 47 | ||
68 | static int sh_mobile_sdhi_get_cd(struct platform_device *tmio) | 48 | static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) |
69 | { | 49 | { |
70 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
71 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 50 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
72 | 51 | ||
73 | if (p && p->get_cd) | 52 | if (p && p->get_cd) |
@@ -81,20 +60,9 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
81 | struct sh_mobile_sdhi *priv; | 60 | struct sh_mobile_sdhi *priv; |
82 | struct tmio_mmc_data *mmc_data; | 61 | struct tmio_mmc_data *mmc_data; |
83 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
84 | struct resource *mem; | 63 | struct tmio_mmc_host *host; |
85 | char clk_name[8]; | 64 | char clk_name[8]; |
86 | int ret, irq; | 65 | int ret; |
87 | |||
88 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
89 | if (!mem) | ||
90 | dev_err(&pdev->dev, "missing MEM resource\n"); | ||
91 | |||
92 | irq = platform_get_irq(pdev, 0); | ||
93 | if (irq < 0) | ||
94 | dev_err(&pdev->dev, "missing IRQ resource\n"); | ||
95 | |||
96 | if (!mem || (irq < 0)) | ||
97 | return -EINVAL; | ||
98 | 66 | ||
99 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | 67 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); |
100 | if (priv == NULL) { | 68 | if (priv == NULL) { |
@@ -109,8 +77,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
109 | if (IS_ERR(priv->clk)) { | 77 | if (IS_ERR(priv->clk)) { |
110 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | 78 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); |
111 | ret = PTR_ERR(priv->clk); | 79 | ret = PTR_ERR(priv->clk); |
112 | kfree(priv); | 80 | goto eclkget; |
113 | return ret; | ||
114 | } | 81 | } |
115 | 82 | ||
116 | clk_enable(priv->clk); | 83 | clk_enable(priv->clk); |
@@ -123,6 +90,15 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
123 | mmc_data->flags = p->tmio_flags; | 90 | mmc_data->flags = p->tmio_flags; |
124 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 91 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
125 | mmc_data->capabilities |= p->tmio_caps; | 92 | mmc_data->capabilities |= p->tmio_caps; |
93 | |||
94 | if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | ||
95 | priv->param_tx.slave_id = p->dma_slave_tx; | ||
96 | priv->param_rx.slave_id = p->dma_slave_rx; | ||
97 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
98 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
99 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
100 | mmc_data->dma = &priv->dma_priv; | ||
101 | } | ||
126 | } | 102 | } |
127 | 103 | ||
128 | /* | 104 | /* |
@@ -136,36 +112,30 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
136 | */ | 112 | */ |
137 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; | 113 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; |
138 | 114 | ||
139 | if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | 115 | ret = tmio_mmc_host_probe(&host, pdev, mmc_data); |
140 | priv->param_tx.slave_id = p->dma_slave_tx; | 116 | if (ret < 0) |
141 | priv->param_rx.slave_id = p->dma_slave_rx; | 117 | goto eprobe; |
142 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
143 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
144 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
145 | mmc_data->dma = &priv->dma_priv; | ||
146 | } | ||
147 | 118 | ||
148 | memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); | 119 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
149 | priv->cell_mmc.mfd_data = mmc_data; | 120 | (unsigned long)host->ctl, host->irq); |
150 | 121 | ||
151 | platform_set_drvdata(pdev, priv); | 122 | return ret; |
152 | |||
153 | ret = mfd_add_devices(&pdev->dev, pdev->id, | ||
154 | &priv->cell_mmc, 1, mem, irq); | ||
155 | if (ret) { | ||
156 | clk_disable(priv->clk); | ||
157 | clk_put(priv->clk); | ||
158 | kfree(priv); | ||
159 | } | ||
160 | 123 | ||
124 | eprobe: | ||
125 | clk_disable(priv->clk); | ||
126 | clk_put(priv->clk); | ||
127 | eclkget: | ||
128 | kfree(priv); | ||
161 | return ret; | 129 | return ret; |
162 | } | 130 | } |
163 | 131 | ||
164 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) | 132 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) |
165 | { | 133 | { |
166 | struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); | 134 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
135 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
136 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
167 | 137 | ||
168 | mfd_remove_devices(&pdev->dev); | 138 | tmio_mmc_host_remove(host); |
169 | clk_disable(priv->clk); | 139 | clk_disable(priv->clk); |
170 | clk_put(priv->clk); | 140 | clk_put(priv->clk); |
171 | kfree(priv); | 141 | kfree(priv); |
@@ -198,3 +168,4 @@ module_exit(sh_mobile_sdhi_exit); | |||
198 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); | 168 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); |
199 | MODULE_AUTHOR("Magnus Damm"); | 169 | MODULE_AUTHOR("Magnus Damm"); |
200 | MODULE_LICENSE("GPL v2"); | 170 | MODULE_LICENSE("GPL v2"); |
171 | MODULE_ALIAS("platform:sh_mobile_sdhi"); | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ab1adeabdd22..79c568461d59 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/tmio_mmc.c | 2 | * linux/drivers/mmc/host/tmio_mmc.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Ian Molton | 4 | * Copyright (C) 2007 Ian Molton |
5 | * Copyright (C) 2007 Ian Molton | 5 | * Copyright (C) 2004 Ian Molton |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -11,1182 +11,17 @@ | |||
11 | * Driver for the MMC / SD / SDIO cell found in: | 11 | * Driver for the MMC / SD / SDIO cell found in: |
12 | * | 12 | * |
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | 13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 |
14 | * | ||
15 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
16 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
17 | * support). (Further 4 bit support from a later datasheet). | ||
18 | * | ||
19 | * TODO: | ||
20 | * Investigate using a workqueue for PIO transfers | ||
21 | * Eliminate FIXMEs | ||
22 | * SDIO support | ||
23 | * Better Power management | ||
24 | * Handle MMC errors better | ||
25 | * double buffer support | ||
26 | * | ||
27 | */ | 14 | */ |
28 | 15 | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/device.h> | 16 | #include <linux/device.h> |
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/core.h> | 17 | #include <linux/mfd/core.h> |
37 | #include <linux/mfd/tmio.h> | 18 | #include <linux/mfd/tmio.h> |
38 | #include <linux/mmc/host.h> | 19 | #include <linux/mmc/host.h> |
39 | #include <linux/module.h> | 20 | #include <linux/module.h> |
40 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
41 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
42 | #include <linux/workqueue.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | |||
45 | #define CTL_SD_CMD 0x00 | ||
46 | #define CTL_ARG_REG 0x04 | ||
47 | #define CTL_STOP_INTERNAL_ACTION 0x08 | ||
48 | #define CTL_XFER_BLK_COUNT 0xa | ||
49 | #define CTL_RESPONSE 0x0c | ||
50 | #define CTL_STATUS 0x1c | ||
51 | #define CTL_IRQ_MASK 0x20 | ||
52 | #define CTL_SD_CARD_CLK_CTL 0x24 | ||
53 | #define CTL_SD_XFER_LEN 0x26 | ||
54 | #define CTL_SD_MEM_CARD_OPT 0x28 | ||
55 | #define CTL_SD_ERROR_DETAIL_STATUS 0x2c | ||
56 | #define CTL_SD_DATA_PORT 0x30 | ||
57 | #define CTL_TRANSACTION_CTL 0x34 | ||
58 | #define CTL_SDIO_STATUS 0x36 | ||
59 | #define CTL_SDIO_IRQ_MASK 0x38 | ||
60 | #define CTL_RESET_SD 0xe0 | ||
61 | #define CTL_SDIO_REGS 0x100 | ||
62 | #define CTL_CLK_AND_WAIT_CTL 0x138 | ||
63 | #define CTL_RESET_SDIO 0x1e0 | ||
64 | |||
65 | /* Definitions for values the CTRL_STATUS register can take. */ | ||
66 | #define TMIO_STAT_CMDRESPEND 0x00000001 | ||
67 | #define TMIO_STAT_DATAEND 0x00000004 | ||
68 | #define TMIO_STAT_CARD_REMOVE 0x00000008 | ||
69 | #define TMIO_STAT_CARD_INSERT 0x00000010 | ||
70 | #define TMIO_STAT_SIGSTATE 0x00000020 | ||
71 | #define TMIO_STAT_WRPROTECT 0x00000080 | ||
72 | #define TMIO_STAT_CARD_REMOVE_A 0x00000100 | ||
73 | #define TMIO_STAT_CARD_INSERT_A 0x00000200 | ||
74 | #define TMIO_STAT_SIGSTATE_A 0x00000400 | ||
75 | #define TMIO_STAT_CMD_IDX_ERR 0x00010000 | ||
76 | #define TMIO_STAT_CRCFAIL 0x00020000 | ||
77 | #define TMIO_STAT_STOPBIT_ERR 0x00040000 | ||
78 | #define TMIO_STAT_DATATIMEOUT 0x00080000 | ||
79 | #define TMIO_STAT_RXOVERFLOW 0x00100000 | ||
80 | #define TMIO_STAT_TXUNDERRUN 0x00200000 | ||
81 | #define TMIO_STAT_CMDTIMEOUT 0x00400000 | ||
82 | #define TMIO_STAT_RXRDY 0x01000000 | ||
83 | #define TMIO_STAT_TXRQ 0x02000000 | ||
84 | #define TMIO_STAT_ILL_FUNC 0x20000000 | ||
85 | #define TMIO_STAT_CMD_BUSY 0x40000000 | ||
86 | #define TMIO_STAT_ILL_ACCESS 0x80000000 | ||
87 | |||
88 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
89 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
90 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
91 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
92 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
93 | |||
94 | /* Define some IRQ masks */ | ||
95 | /* This is the mask used at reset by the chip */ | ||
96 | #define TMIO_MASK_ALL 0x837f031d | ||
97 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
98 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
99 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
100 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
101 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
102 | |||
103 | #define enable_mmc_irqs(host, i) \ | ||
104 | do { \ | ||
105 | u32 mask;\ | ||
106 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
107 | mask &= ~((i) & TMIO_MASK_IRQ); \ | ||
108 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
109 | } while (0) | ||
110 | |||
111 | #define disable_mmc_irqs(host, i) \ | ||
112 | do { \ | ||
113 | u32 mask;\ | ||
114 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
115 | mask |= ((i) & TMIO_MASK_IRQ); \ | ||
116 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define ack_mmc_irqs(host, i) \ | ||
120 | do { \ | ||
121 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ | ||
122 | } while (0) | ||
123 | |||
124 | /* This is arbitrary, just noone needed any higher alignment yet */ | ||
125 | #define MAX_ALIGN 4 | ||
126 | |||
127 | struct tmio_mmc_host { | ||
128 | void __iomem *ctl; | ||
129 | unsigned long bus_shift; | ||
130 | struct mmc_command *cmd; | ||
131 | struct mmc_request *mrq; | ||
132 | struct mmc_data *data; | ||
133 | struct mmc_host *mmc; | ||
134 | int irq; | ||
135 | unsigned int sdio_irq_enabled; | ||
136 | |||
137 | /* Callbacks for clock / power control */ | ||
138 | void (*set_pwr)(struct platform_device *host, int state); | ||
139 | void (*set_clk_div)(struct platform_device *host, int state); | ||
140 | |||
141 | /* pio related stuff */ | ||
142 | struct scatterlist *sg_ptr; | ||
143 | struct scatterlist *sg_orig; | ||
144 | unsigned int sg_len; | ||
145 | unsigned int sg_off; | ||
146 | |||
147 | struct platform_device *pdev; | ||
148 | |||
149 | /* DMA support */ | ||
150 | struct dma_chan *chan_rx; | ||
151 | struct dma_chan *chan_tx; | ||
152 | struct tasklet_struct dma_complete; | ||
153 | struct tasklet_struct dma_issue; | ||
154 | #ifdef CONFIG_TMIO_MMC_DMA | ||
155 | u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); | ||
156 | struct scatterlist bounce_sg; | ||
157 | #endif | ||
158 | |||
159 | /* Track lost interrupts */ | ||
160 | struct delayed_work delayed_reset_work; | ||
161 | spinlock_t lock; | ||
162 | unsigned long last_req_ts; | ||
163 | }; | ||
164 | |||
165 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); | ||
166 | |||
167 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
168 | { | ||
169 | return readw(host->ctl + (addr << host->bus_shift)); | ||
170 | } | ||
171 | |||
172 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
173 | u16 *buf, int count) | ||
174 | { | ||
175 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
176 | } | ||
177 | |||
178 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
179 | { | ||
180 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
181 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
182 | } | ||
183 | |||
184 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
185 | { | ||
186 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
187 | } | ||
188 | |||
189 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
190 | u16 *buf, int count) | ||
191 | { | ||
192 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
193 | } | ||
194 | |||
195 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
196 | { | ||
197 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
198 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
199 | } | ||
200 | |||
201 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
202 | { | ||
203 | host->sg_len = data->sg_len; | ||
204 | host->sg_ptr = data->sg; | ||
205 | host->sg_orig = data->sg; | ||
206 | host->sg_off = 0; | ||
207 | } | ||
208 | |||
209 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
210 | { | ||
211 | host->sg_ptr = sg_next(host->sg_ptr); | ||
212 | host->sg_off = 0; | ||
213 | return --host->sg_len; | ||
214 | } | ||
215 | |||
216 | static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | ||
217 | { | ||
218 | local_irq_save(*flags); | ||
219 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
220 | } | ||
221 | |||
222 | static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) | ||
223 | { | ||
224 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
225 | local_irq_restore(*flags); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_MMC_DEBUG | ||
229 | |||
230 | #define STATUS_TO_TEXT(a, status, i) \ | ||
231 | do { \ | ||
232 | if (status & TMIO_STAT_##a) { \ | ||
233 | if (i++) \ | ||
234 | printk(" | "); \ | ||
235 | printk(#a); \ | ||
236 | } \ | ||
237 | } while (0) | ||
238 | |||
239 | void pr_debug_status(u32 status) | ||
240 | { | ||
241 | int i = 0; | ||
242 | printk(KERN_DEBUG "status: %08x = ", status); | ||
243 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
244 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
245 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
246 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
247 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
248 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
249 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
250 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
251 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
252 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
253 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
254 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
255 | STATUS_TO_TEXT(DATAEND, status, i); | ||
256 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
257 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
258 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
259 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
260 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
261 | STATUS_TO_TEXT(RXRDY, status, i); | ||
262 | STATUS_TO_TEXT(TXRQ, status, i); | ||
263 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
264 | printk("\n"); | ||
265 | } | ||
266 | |||
267 | #else | ||
268 | #define pr_debug_status(s) do { } while (0) | ||
269 | #endif | ||
270 | |||
271 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
272 | { | ||
273 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
274 | |||
275 | if (enable) { | ||
276 | host->sdio_irq_enabled = 1; | ||
277 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
278 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
279 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
280 | } else { | ||
281 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
282 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
283 | host->sdio_irq_enabled = 0; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
288 | { | ||
289 | u32 clk = 0, clock; | ||
290 | |||
291 | if (new_clock) { | ||
292 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
293 | new_clock >= (clock<<1); clk >>= 1) | ||
294 | clock <<= 1; | ||
295 | clk |= 0x100; | ||
296 | } | ||
297 | |||
298 | if (host->set_clk_div) | ||
299 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
300 | |||
301 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
302 | } | ||
303 | |||
304 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
305 | { | ||
306 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
307 | |||
308 | /* | ||
309 | * Testing on sh-mobile showed that SDIO IRQs are unmasked when | ||
310 | * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the | ||
311 | * device IRQ here and restore the SDIO IRQ mask before | ||
312 | * re-enabling the device IRQ. | ||
313 | */ | ||
314 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
315 | disable_irq(host->irq); | ||
316 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
317 | msleep(10); | ||
318 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
319 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
320 | enable_irq(host->irq); | ||
321 | } | ||
322 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
323 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
324 | msleep(10); | ||
325 | } | ||
326 | |||
327 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
328 | { | ||
329 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
330 | |||
331 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
332 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
333 | msleep(10); | ||
334 | /* see comment in tmio_mmc_clk_stop above */ | ||
335 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
336 | disable_irq(host->irq); | ||
337 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
338 | msleep(10); | ||
339 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
340 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
341 | enable_irq(host->irq); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void reset(struct tmio_mmc_host *host) | ||
346 | { | ||
347 | /* FIXME - should we set stop clock reg here */ | ||
348 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
349 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
350 | msleep(10); | ||
351 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
352 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
353 | msleep(10); | ||
354 | } | ||
355 | |||
356 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
357 | { | ||
358 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
359 | delayed_reset_work.work); | ||
360 | struct mmc_request *mrq; | ||
361 | unsigned long flags; | ||
362 | |||
363 | spin_lock_irqsave(&host->lock, flags); | ||
364 | mrq = host->mrq; | ||
365 | |||
366 | /* request already finished */ | ||
367 | if (!mrq | ||
368 | || time_is_after_jiffies(host->last_req_ts + | ||
369 | msecs_to_jiffies(2000))) { | ||
370 | spin_unlock_irqrestore(&host->lock, flags); | ||
371 | return; | ||
372 | } | ||
373 | |||
374 | dev_warn(&host->pdev->dev, | ||
375 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
376 | mrq->cmd->opcode); | ||
377 | |||
378 | if (host->data) | ||
379 | host->data->error = -ETIMEDOUT; | ||
380 | else if (host->cmd) | ||
381 | host->cmd->error = -ETIMEDOUT; | ||
382 | else | ||
383 | mrq->cmd->error = -ETIMEDOUT; | ||
384 | |||
385 | host->cmd = NULL; | ||
386 | host->data = NULL; | ||
387 | host->mrq = NULL; | ||
388 | |||
389 | spin_unlock_irqrestore(&host->lock, flags); | ||
390 | |||
391 | reset(host); | ||
392 | |||
393 | mmc_request_done(host->mmc, mrq); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
398 | { | ||
399 | struct mmc_request *mrq = host->mrq; | ||
400 | |||
401 | if (!mrq) | ||
402 | return; | ||
403 | |||
404 | host->mrq = NULL; | ||
405 | host->cmd = NULL; | ||
406 | host->data = NULL; | ||
407 | |||
408 | cancel_delayed_work(&host->delayed_reset_work); | ||
409 | |||
410 | mmc_request_done(host->mmc, mrq); | ||
411 | } | ||
412 | |||
413 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
414 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
415 | #define APP_CMD 0x0040 | ||
416 | #define RESP_NONE 0x0300 | ||
417 | #define RESP_R1 0x0400 | ||
418 | #define RESP_R1B 0x0500 | ||
419 | #define RESP_R2 0x0600 | ||
420 | #define RESP_R3 0x0700 | ||
421 | #define DATA_PRESENT 0x0800 | ||
422 | #define TRANSFER_READ 0x1000 | ||
423 | #define TRANSFER_MULTI 0x2000 | ||
424 | #define SECURITY_CMD 0x4000 | ||
425 | |||
426 | static int | ||
427 | tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
428 | { | ||
429 | struct mmc_data *data = host->data; | ||
430 | int c = cmd->opcode; | ||
431 | |||
432 | /* Command 12 is handled by hardware */ | ||
433 | if (cmd->opcode == 12 && !cmd->arg) { | ||
434 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | switch (mmc_resp_type(cmd)) { | ||
439 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
440 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
441 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
442 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
443 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
444 | default: | ||
445 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | |||
449 | host->cmd = cmd; | ||
450 | |||
451 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
452 | * should be set when issuing app commands. | ||
453 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
454 | * c |= APP_CMD; | ||
455 | */ | ||
456 | if (data) { | ||
457 | c |= DATA_PRESENT; | ||
458 | if (data->blocks > 1) { | ||
459 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
460 | c |= TRANSFER_MULTI; | ||
461 | } | ||
462 | if (data->flags & MMC_DATA_READ) | ||
463 | c |= TRANSFER_READ; | ||
464 | } | ||
465 | |||
466 | enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
467 | |||
468 | /* Fire off the command */ | ||
469 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
470 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * This chip always returns (at least?) as much data as you ask for. | ||
477 | * I'm unsure what happens if you ask for less than a block. This should be | ||
478 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
479 | */ | ||
480 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
481 | { | ||
482 | struct mmc_data *data = host->data; | ||
483 | void *sg_virt; | ||
484 | unsigned short *buf; | ||
485 | unsigned int count; | ||
486 | unsigned long flags; | ||
487 | |||
488 | if (!data) { | ||
489 | pr_debug("Spurious PIO IRQ\n"); | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
494 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
495 | |||
496 | count = host->sg_ptr->length - host->sg_off; | ||
497 | if (count > data->blksz) | ||
498 | count = data->blksz; | ||
499 | |||
500 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
501 | count, host->sg_off, data->flags); | ||
502 | |||
503 | /* Transfer the data */ | ||
504 | if (data->flags & MMC_DATA_READ) | ||
505 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
506 | else | ||
507 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
508 | |||
509 | host->sg_off += count; | ||
510 | |||
511 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
512 | |||
513 | if (host->sg_off == host->sg_ptr->length) | ||
514 | tmio_mmc_next_sg(host); | ||
515 | |||
516 | return; | ||
517 | } | ||
518 | |||
519 | /* needs to be called with host->lock held */ | ||
520 | static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
521 | { | ||
522 | struct mmc_data *data = host->data; | ||
523 | struct mmc_command *stop; | ||
524 | |||
525 | host->data = NULL; | ||
526 | |||
527 | if (!data) { | ||
528 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
529 | return; | ||
530 | } | ||
531 | stop = data->stop; | ||
532 | |||
533 | /* FIXME - return correct transfer count on errors */ | ||
534 | if (!data->error) | ||
535 | data->bytes_xfered = data->blocks * data->blksz; | ||
536 | else | ||
537 | data->bytes_xfered = 0; | ||
538 | |||
539 | pr_debug("Completed data request\n"); | ||
540 | |||
541 | /* | ||
542 | * FIXME: other drivers allow an optional stop command of any given type | ||
543 | * which we dont do, as the chip can auto generate them. | ||
544 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
545 | * only issue the auto request when we know this is the desired | ||
546 | * stop command, allowing fallback to the stop command the | ||
547 | * upper layers expect. For now, we do what works. | ||
548 | */ | ||
549 | |||
550 | if (data->flags & MMC_DATA_READ) { | ||
551 | if (!host->chan_rx) | ||
552 | disable_mmc_irqs(host, TMIO_MASK_READOP); | ||
553 | else | ||
554 | tmio_check_bounce_buffer(host); | ||
555 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
556 | host->mrq); | ||
557 | } else { | ||
558 | if (!host->chan_tx) | ||
559 | disable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
560 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
561 | host->mrq); | ||
562 | } | ||
563 | |||
564 | if (stop) { | ||
565 | if (stop->opcode == 12 && !stop->arg) | ||
566 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
567 | else | ||
568 | BUG(); | ||
569 | } | ||
570 | |||
571 | tmio_mmc_finish_request(host); | ||
572 | } | ||
573 | |||
574 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
575 | { | ||
576 | struct mmc_data *data; | ||
577 | spin_lock(&host->lock); | ||
578 | data = host->data; | ||
579 | |||
580 | if (!data) | ||
581 | goto out; | ||
582 | |||
583 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { | ||
584 | /* | ||
585 | * Has all data been written out yet? Testing on SuperH showed, | ||
586 | * that in most cases the first interrupt comes already with the | ||
587 | * BUSY status bit clear, but on some operations, like mount or | ||
588 | * in the beginning of a write / sync / umount, there is one | ||
589 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
590 | * waiting for one more interrupt fixes the problem. | ||
591 | */ | ||
592 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
593 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
594 | tasklet_schedule(&host->dma_complete); | ||
595 | } | ||
596 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { | ||
597 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
598 | tasklet_schedule(&host->dma_complete); | ||
599 | } else { | ||
600 | tmio_mmc_do_data_irq(host); | ||
601 | } | ||
602 | out: | ||
603 | spin_unlock(&host->lock); | ||
604 | } | ||
605 | |||
606 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
607 | unsigned int stat) | ||
608 | { | ||
609 | struct mmc_command *cmd = host->cmd; | ||
610 | int i, addr; | ||
611 | |||
612 | spin_lock(&host->lock); | ||
613 | |||
614 | if (!host->cmd) { | ||
615 | pr_debug("Spurious CMD irq\n"); | ||
616 | goto out; | ||
617 | } | ||
618 | |||
619 | host->cmd = NULL; | ||
620 | |||
621 | /* This controller is sicker than the PXA one. Not only do we need to | ||
622 | * drop the top 8 bits of the first response word, we also need to | ||
623 | * modify the order of the response for short response command types. | ||
624 | */ | ||
625 | |||
626 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
627 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
628 | |||
629 | if (cmd->flags & MMC_RSP_136) { | ||
630 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
631 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
632 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
633 | cmd->resp[3] <<= 8; | ||
634 | } else if (cmd->flags & MMC_RSP_R3) { | ||
635 | cmd->resp[0] = cmd->resp[3]; | ||
636 | } | ||
637 | |||
638 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
639 | cmd->error = -ETIMEDOUT; | ||
640 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
641 | cmd->error = -EILSEQ; | ||
642 | |||
643 | /* If there is data to handle we enable data IRQs here, and | ||
644 | * we will ultimatley finish the request in the data_end handler. | ||
645 | * If theres no data or we encountered an error, finish now. | ||
646 | */ | ||
647 | if (host->data && !cmd->error) { | ||
648 | if (host->data->flags & MMC_DATA_READ) { | ||
649 | if (!host->chan_rx) | ||
650 | enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
651 | } else { | ||
652 | if (!host->chan_tx) | ||
653 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
654 | else | ||
655 | tasklet_schedule(&host->dma_issue); | ||
656 | } | ||
657 | } else { | ||
658 | tmio_mmc_finish_request(host); | ||
659 | } | ||
660 | |||
661 | out: | ||
662 | spin_unlock(&host->lock); | ||
663 | |||
664 | return; | ||
665 | } | ||
666 | |||
667 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
668 | { | ||
669 | struct tmio_mmc_host *host = devid; | ||
670 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
671 | unsigned int ireg, irq_mask, status; | ||
672 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
673 | |||
674 | pr_debug("MMC IRQ begin\n"); | ||
675 | |||
676 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
677 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
678 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
679 | |||
680 | sdio_ireg = 0; | ||
681 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
682 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
683 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
684 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
685 | |||
686 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
687 | |||
688 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
689 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
690 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
691 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
692 | goto out; | ||
693 | } | ||
694 | 23 | ||
695 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | 24 | #include "tmio_mmc.h" |
696 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
697 | mmc_signal_sdio_irq(host->mmc); | ||
698 | |||
699 | if (sdio_ireg) | ||
700 | goto out; | ||
701 | } | ||
702 | |||
703 | pr_debug_status(status); | ||
704 | pr_debug_status(ireg); | ||
705 | |||
706 | if (!ireg) { | ||
707 | disable_mmc_irqs(host, status & ~irq_mask); | ||
708 | |||
709 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
710 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
711 | pr_debug_status(status); | ||
712 | |||
713 | goto out; | ||
714 | } | ||
715 | |||
716 | while (ireg) { | ||
717 | /* Card insert / remove attempts */ | ||
718 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
719 | ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
720 | TMIO_STAT_CARD_REMOVE); | ||
721 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
722 | } | ||
723 | |||
724 | /* CRC and other errors */ | ||
725 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
726 | * handled |= tmio_error_irq(host, irq, stat); | ||
727 | */ | ||
728 | |||
729 | /* Command completion */ | ||
730 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
731 | ack_mmc_irqs(host, | ||
732 | TMIO_STAT_CMDRESPEND | | ||
733 | TMIO_STAT_CMDTIMEOUT); | ||
734 | tmio_mmc_cmd_irq(host, status); | ||
735 | } | ||
736 | |||
737 | /* Data transfer */ | ||
738 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
739 | ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
740 | tmio_mmc_pio_irq(host); | ||
741 | } | ||
742 | |||
743 | /* Data transfer completion */ | ||
744 | if (ireg & TMIO_STAT_DATAEND) { | ||
745 | ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
746 | tmio_mmc_data_irq(host); | ||
747 | } | ||
748 | |||
749 | /* Check status - keep going until we've handled it all */ | ||
750 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
751 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
752 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
753 | |||
754 | pr_debug("Status at end of loop: %08x\n", status); | ||
755 | pr_debug_status(status); | ||
756 | } | ||
757 | pr_debug("MMC IRQ end\n"); | ||
758 | |||
759 | out: | ||
760 | return IRQ_HANDLED; | ||
761 | } | ||
762 | |||
763 | #ifdef CONFIG_TMIO_MMC_DMA | ||
764 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
765 | { | ||
766 | if (host->sg_ptr == &host->bounce_sg) { | ||
767 | unsigned long flags; | ||
768 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
769 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
770 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
775 | { | ||
776 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
777 | /* Switch DMA mode on or off - SuperH specific? */ | ||
778 | sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); | ||
779 | #endif | ||
780 | } | ||
781 | |||
782 | static void tmio_dma_complete(void *arg) | ||
783 | { | ||
784 | struct tmio_mmc_host *host = arg; | ||
785 | |||
786 | dev_dbg(&host->pdev->dev, "Command completed\n"); | ||
787 | |||
788 | if (!host->data) | ||
789 | dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); | ||
790 | else | ||
791 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
792 | } | ||
793 | |||
794 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
795 | { | ||
796 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
797 | struct dma_async_tx_descriptor *desc = NULL; | ||
798 | struct dma_chan *chan = host->chan_rx; | ||
799 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
800 | dma_cookie_t cookie; | ||
801 | int ret, i; | ||
802 | bool aligned = true, multiple = true; | ||
803 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
804 | |||
805 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
806 | if (sg_tmp->offset & align) | ||
807 | aligned = false; | ||
808 | if (sg_tmp->length & align) { | ||
809 | multiple = false; | ||
810 | break; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
815 | align >= MAX_ALIGN)) || !multiple) { | ||
816 | ret = -EINVAL; | ||
817 | goto pio; | ||
818 | } | ||
819 | |||
820 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
821 | if (!aligned) { | ||
822 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
823 | host->sg_ptr = &host->bounce_sg; | ||
824 | sg = host->sg_ptr; | ||
825 | } | ||
826 | |||
827 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
828 | if (ret > 0) | ||
829 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
830 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
831 | |||
832 | if (desc) { | ||
833 | desc->callback = tmio_dma_complete; | ||
834 | desc->callback_param = host; | ||
835 | cookie = dmaengine_submit(desc); | ||
836 | dma_async_issue_pending(chan); | ||
837 | } | ||
838 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
839 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
840 | |||
841 | pio: | ||
842 | if (!desc) { | ||
843 | /* DMA failed, fall back to PIO */ | ||
844 | if (ret >= 0) | ||
845 | ret = -EIO; | ||
846 | host->chan_rx = NULL; | ||
847 | dma_release_channel(chan); | ||
848 | /* Free the Tx channel too */ | ||
849 | chan = host->chan_tx; | ||
850 | if (chan) { | ||
851 | host->chan_tx = NULL; | ||
852 | dma_release_channel(chan); | ||
853 | } | ||
854 | dev_warn(&host->pdev->dev, | ||
855 | "DMA failed: %d, falling back to PIO\n", ret); | ||
856 | tmio_mmc_enable_dma(host, false); | ||
857 | } | ||
858 | |||
859 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
860 | desc, cookie, host->sg_len); | ||
861 | } | ||
862 | |||
863 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
864 | { | ||
865 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
866 | struct dma_async_tx_descriptor *desc = NULL; | ||
867 | struct dma_chan *chan = host->chan_tx; | ||
868 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
869 | dma_cookie_t cookie; | ||
870 | int ret, i; | ||
871 | bool aligned = true, multiple = true; | ||
872 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
873 | |||
874 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
875 | if (sg_tmp->offset & align) | ||
876 | aligned = false; | ||
877 | if (sg_tmp->length & align) { | ||
878 | multiple = false; | ||
879 | break; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
884 | align >= MAX_ALIGN)) || !multiple) { | ||
885 | ret = -EINVAL; | ||
886 | goto pio; | ||
887 | } | ||
888 | |||
889 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
890 | if (!aligned) { | ||
891 | unsigned long flags; | ||
892 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
893 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
894 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
895 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
896 | host->sg_ptr = &host->bounce_sg; | ||
897 | sg = host->sg_ptr; | ||
898 | } | ||
899 | |||
900 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
901 | if (ret > 0) | ||
902 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
903 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
904 | |||
905 | if (desc) { | ||
906 | desc->callback = tmio_dma_complete; | ||
907 | desc->callback_param = host; | ||
908 | cookie = dmaengine_submit(desc); | ||
909 | } | ||
910 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
911 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
912 | |||
913 | pio: | ||
914 | if (!desc) { | ||
915 | /* DMA failed, fall back to PIO */ | ||
916 | if (ret >= 0) | ||
917 | ret = -EIO; | ||
918 | host->chan_tx = NULL; | ||
919 | dma_release_channel(chan); | ||
920 | /* Free the Rx channel too */ | ||
921 | chan = host->chan_rx; | ||
922 | if (chan) { | ||
923 | host->chan_rx = NULL; | ||
924 | dma_release_channel(chan); | ||
925 | } | ||
926 | dev_warn(&host->pdev->dev, | ||
927 | "DMA failed: %d, falling back to PIO\n", ret); | ||
928 | tmio_mmc_enable_dma(host, false); | ||
929 | } | ||
930 | |||
931 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
932 | desc, cookie); | ||
933 | } | ||
934 | |||
935 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
936 | struct mmc_data *data) | ||
937 | { | ||
938 | if (data->flags & MMC_DATA_READ) { | ||
939 | if (host->chan_rx) | ||
940 | tmio_mmc_start_dma_rx(host); | ||
941 | } else { | ||
942 | if (host->chan_tx) | ||
943 | tmio_mmc_start_dma_tx(host); | ||
944 | } | ||
945 | } | ||
946 | |||
947 | static void tmio_issue_tasklet_fn(unsigned long priv) | ||
948 | { | ||
949 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
950 | struct dma_chan *chan = host->chan_tx; | ||
951 | |||
952 | dma_async_issue_pending(chan); | ||
953 | } | ||
954 | |||
955 | static void tmio_tasklet_fn(unsigned long arg) | ||
956 | { | ||
957 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
958 | unsigned long flags; | ||
959 | |||
960 | spin_lock_irqsave(&host->lock, flags); | ||
961 | |||
962 | if (!host->data) | ||
963 | goto out; | ||
964 | |||
965 | if (host->data->flags & MMC_DATA_READ) | ||
966 | dma_unmap_sg(host->chan_rx->device->dev, | ||
967 | host->sg_ptr, host->sg_len, | ||
968 | DMA_FROM_DEVICE); | ||
969 | else | ||
970 | dma_unmap_sg(host->chan_tx->device->dev, | ||
971 | host->sg_ptr, host->sg_len, | ||
972 | DMA_TO_DEVICE); | ||
973 | |||
974 | tmio_mmc_do_data_irq(host); | ||
975 | out: | ||
976 | spin_unlock_irqrestore(&host->lock, flags); | ||
977 | } | ||
978 | |||
979 | /* It might be necessary to make filter MFD specific */ | ||
980 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
981 | { | ||
982 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
983 | chan->private = arg; | ||
984 | return true; | ||
985 | } | ||
986 | |||
987 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
988 | struct tmio_mmc_data *pdata) | ||
989 | { | ||
990 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
991 | if (pdata->dma) { | ||
992 | dma_cap_mask_t mask; | ||
993 | |||
994 | dma_cap_zero(mask); | ||
995 | dma_cap_set(DMA_SLAVE, mask); | ||
996 | |||
997 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
998 | pdata->dma->chan_priv_tx); | ||
999 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
1000 | host->chan_tx); | ||
1001 | |||
1002 | if (!host->chan_tx) | ||
1003 | return; | ||
1004 | |||
1005 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
1006 | pdata->dma->chan_priv_rx); | ||
1007 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
1008 | host->chan_rx); | ||
1009 | |||
1010 | if (!host->chan_rx) { | ||
1011 | dma_release_channel(host->chan_tx); | ||
1012 | host->chan_tx = NULL; | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
1016 | tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); | ||
1017 | tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); | ||
1018 | |||
1019 | tmio_mmc_enable_dma(host, true); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1024 | { | ||
1025 | if (host->chan_tx) { | ||
1026 | struct dma_chan *chan = host->chan_tx; | ||
1027 | host->chan_tx = NULL; | ||
1028 | dma_release_channel(chan); | ||
1029 | } | ||
1030 | if (host->chan_rx) { | ||
1031 | struct dma_chan *chan = host->chan_rx; | ||
1032 | host->chan_rx = NULL; | ||
1033 | dma_release_channel(chan); | ||
1034 | } | ||
1035 | } | ||
1036 | #else | ||
1037 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
1038 | { | ||
1039 | } | ||
1040 | |||
1041 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
1042 | struct mmc_data *data) | ||
1043 | { | ||
1044 | } | ||
1045 | |||
1046 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
1047 | struct tmio_mmc_data *pdata) | ||
1048 | { | ||
1049 | host->chan_tx = NULL; | ||
1050 | host->chan_rx = NULL; | ||
1051 | } | ||
1052 | |||
1053 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1054 | { | ||
1055 | } | ||
1056 | #endif | ||
1057 | |||
1058 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
1059 | struct mmc_data *data) | ||
1060 | { | ||
1061 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1062 | |||
1063 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
1064 | data->blksz, data->blocks); | ||
1065 | |||
1066 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
1067 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
1068 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
1069 | |||
1070 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
1071 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
1072 | mmc_hostname(host->mmc), data->blksz); | ||
1073 | return -EINVAL; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | tmio_mmc_init_sg(host, data); | ||
1078 | host->data = data; | ||
1079 | |||
1080 | /* Set transfer length / blocksize */ | ||
1081 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
1082 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
1083 | |||
1084 | tmio_mmc_start_dma(host, data); | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | /* Process requests from the MMC layer */ | ||
1090 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
1091 | { | ||
1092 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1093 | int ret; | ||
1094 | |||
1095 | if (host->mrq) | ||
1096 | pr_debug("request not null\n"); | ||
1097 | |||
1098 | host->last_req_ts = jiffies; | ||
1099 | wmb(); | ||
1100 | host->mrq = mrq; | ||
1101 | |||
1102 | if (mrq->data) { | ||
1103 | ret = tmio_mmc_start_data(host, mrq->data); | ||
1104 | if (ret) | ||
1105 | goto fail; | ||
1106 | } | ||
1107 | |||
1108 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
1109 | if (!ret) { | ||
1110 | schedule_delayed_work(&host->delayed_reset_work, | ||
1111 | msecs_to_jiffies(2000)); | ||
1112 | return; | ||
1113 | } | ||
1114 | |||
1115 | fail: | ||
1116 | host->mrq = NULL; | ||
1117 | mrq->cmd->error = ret; | ||
1118 | mmc_request_done(mmc, mrq); | ||
1119 | } | ||
1120 | |||
1121 | /* Set MMC clock / power. | ||
1122 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
1123 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
1124 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
1125 | * slowest setting. | ||
1126 | */ | ||
1127 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
1128 | { | ||
1129 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1130 | |||
1131 | if (ios->clock) | ||
1132 | tmio_mmc_set_clock(host, ios->clock); | ||
1133 | |||
1134 | /* Power sequence - OFF -> ON -> UP */ | ||
1135 | switch (ios->power_mode) { | ||
1136 | case MMC_POWER_OFF: /* power down SD bus */ | ||
1137 | if (host->set_pwr) | ||
1138 | host->set_pwr(host->pdev, 0); | ||
1139 | tmio_mmc_clk_stop(host); | ||
1140 | break; | ||
1141 | case MMC_POWER_ON: /* power up SD bus */ | ||
1142 | if (host->set_pwr) | ||
1143 | host->set_pwr(host->pdev, 1); | ||
1144 | break; | ||
1145 | case MMC_POWER_UP: /* start bus clock */ | ||
1146 | tmio_mmc_clk_start(host); | ||
1147 | break; | ||
1148 | } | ||
1149 | |||
1150 | switch (ios->bus_width) { | ||
1151 | case MMC_BUS_WIDTH_1: | ||
1152 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
1153 | break; | ||
1154 | case MMC_BUS_WIDTH_4: | ||
1155 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
1156 | break; | ||
1157 | } | ||
1158 | |||
1159 | /* Let things settle. delay taken from winCE driver */ | ||
1160 | udelay(140); | ||
1161 | } | ||
1162 | |||
1163 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
1164 | { | ||
1165 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1166 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1167 | |||
1168 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
1169 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; | ||
1170 | } | ||
1171 | |||
1172 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
1173 | { | ||
1174 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1175 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1176 | |||
1177 | if (!pdata->get_cd) | ||
1178 | return -ENOSYS; | ||
1179 | else | ||
1180 | return pdata->get_cd(host->pdev); | ||
1181 | } | ||
1182 | |||
1183 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
1184 | .request = tmio_mmc_request, | ||
1185 | .set_ios = tmio_mmc_set_ios, | ||
1186 | .get_ro = tmio_mmc_get_ro, | ||
1187 | .get_cd = tmio_mmc_get_cd, | ||
1188 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
1189 | }; | ||
1190 | 25 | ||
1191 | #ifdef CONFIG_PM | 26 | #ifdef CONFIG_PM |
1192 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | 27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) |
@@ -1227,138 +62,54 @@ out: | |||
1227 | #define tmio_mmc_resume NULL | 62 | #define tmio_mmc_resume NULL |
1228 | #endif | 63 | #endif |
1229 | 64 | ||
1230 | static int __devinit tmio_mmc_probe(struct platform_device *dev) | 65 | static int __devinit tmio_mmc_probe(struct platform_device *pdev) |
1231 | { | 66 | { |
1232 | const struct mfd_cell *cell = mfd_get_cell(dev); | 67 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1233 | struct tmio_mmc_data *pdata; | 68 | struct tmio_mmc_data *pdata; |
1234 | struct resource *res_ctl; | ||
1235 | struct tmio_mmc_host *host; | 69 | struct tmio_mmc_host *host; |
1236 | struct mmc_host *mmc; | ||
1237 | int ret = -EINVAL; | 70 | int ret = -EINVAL; |
1238 | u32 irq_mask = TMIO_MASK_CMD; | ||
1239 | 71 | ||
1240 | if (dev->num_resources != 2) | 72 | if (pdev->num_resources != 2) |
1241 | goto out; | 73 | goto out; |
1242 | 74 | ||
1243 | res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); | 75 | pdata = mfd_get_data(pdev); |
1244 | if (!res_ctl) | ||
1245 | goto out; | ||
1246 | |||
1247 | pdata = mfd_get_data(dev); | ||
1248 | if (!pdata || !pdata->hclk) | 76 | if (!pdata || !pdata->hclk) |
1249 | goto out; | 77 | goto out; |
1250 | 78 | ||
1251 | ret = -ENOMEM; | ||
1252 | |||
1253 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); | ||
1254 | if (!mmc) | ||
1255 | goto out; | ||
1256 | |||
1257 | host = mmc_priv(mmc); | ||
1258 | host->mmc = mmc; | ||
1259 | host->pdev = dev; | ||
1260 | platform_set_drvdata(dev, mmc); | ||
1261 | |||
1262 | host->set_pwr = pdata->set_pwr; | ||
1263 | host->set_clk_div = pdata->set_clk_div; | ||
1264 | |||
1265 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
1266 | host->bus_shift = resource_size(res_ctl) >> 10; | ||
1267 | |||
1268 | host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
1269 | if (!host->ctl) | ||
1270 | goto host_free; | ||
1271 | |||
1272 | mmc->ops = &tmio_mmc_ops; | ||
1273 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
1274 | mmc->f_max = pdata->hclk; | ||
1275 | mmc->f_min = mmc->f_max / 512; | ||
1276 | mmc->max_segs = 32; | ||
1277 | mmc->max_blk_size = 512; | ||
1278 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
1279 | mmc->max_segs; | ||
1280 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
1281 | mmc->max_seg_size = mmc->max_req_size; | ||
1282 | if (pdata->ocr_mask) | ||
1283 | mmc->ocr_avail = pdata->ocr_mask; | ||
1284 | else | ||
1285 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
1286 | |||
1287 | /* Tell the MFD core we are ready to be enabled */ | 79 | /* Tell the MFD core we are ready to be enabled */ |
1288 | if (cell->enable) { | 80 | if (cell->enable) { |
1289 | ret = cell->enable(dev); | 81 | ret = cell->enable(pdev); |
1290 | if (ret) | 82 | if (ret) |
1291 | goto unmap_ctl; | 83 | goto out; |
1292 | } | 84 | } |
1293 | 85 | ||
1294 | tmio_mmc_clk_stop(host); | 86 | ret = tmio_mmc_host_probe(&host, pdev, pdata); |
1295 | reset(host); | ||
1296 | |||
1297 | ret = platform_get_irq(dev, 0); | ||
1298 | if (ret >= 0) | ||
1299 | host->irq = ret; | ||
1300 | else | ||
1301 | goto cell_disable; | ||
1302 | |||
1303 | disable_mmc_irqs(host, TMIO_MASK_ALL); | ||
1304 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
1305 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
1306 | |||
1307 | ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
1308 | IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); | ||
1309 | if (ret) | 87 | if (ret) |
1310 | goto cell_disable; | 88 | goto cell_disable; |
1311 | 89 | ||
1312 | spin_lock_init(&host->lock); | ||
1313 | |||
1314 | /* Init delayed work for request timeouts */ | ||
1315 | INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); | ||
1316 | |||
1317 | /* See if we also get DMA */ | ||
1318 | tmio_mmc_request_dma(host, pdata); | ||
1319 | |||
1320 | mmc_add_host(mmc); | ||
1321 | |||
1322 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), | 90 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
1323 | (unsigned long)host->ctl, host->irq); | 91 | (unsigned long)host->ctl, host->irq); |
1324 | 92 | ||
1325 | /* Unmask the IRQs we want to know about */ | ||
1326 | if (!host->chan_rx) | ||
1327 | irq_mask |= TMIO_MASK_READOP; | ||
1328 | if (!host->chan_tx) | ||
1329 | irq_mask |= TMIO_MASK_WRITEOP; | ||
1330 | enable_mmc_irqs(host, irq_mask); | ||
1331 | |||
1332 | return 0; | 93 | return 0; |
1333 | 94 | ||
1334 | cell_disable: | 95 | cell_disable: |
1335 | if (cell->disable) | 96 | if (cell->disable) |
1336 | cell->disable(dev); | 97 | cell->disable(pdev); |
1337 | unmap_ctl: | ||
1338 | iounmap(host->ctl); | ||
1339 | host_free: | ||
1340 | mmc_free_host(mmc); | ||
1341 | out: | 98 | out: |
1342 | return ret; | 99 | return ret; |
1343 | } | 100 | } |
1344 | 101 | ||
1345 | static int __devexit tmio_mmc_remove(struct platform_device *dev) | 102 | static int __devexit tmio_mmc_remove(struct platform_device *pdev) |
1346 | { | 103 | { |
1347 | const struct mfd_cell *cell = mfd_get_cell(dev); | 104 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1348 | struct mmc_host *mmc = platform_get_drvdata(dev); | 105 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1349 | 106 | ||
1350 | platform_set_drvdata(dev, NULL); | 107 | platform_set_drvdata(pdev, NULL); |
1351 | 108 | ||
1352 | if (mmc) { | 109 | if (mmc) { |
1353 | struct tmio_mmc_host *host = mmc_priv(mmc); | 110 | tmio_mmc_host_remove(mmc_priv(mmc)); |
1354 | mmc_remove_host(mmc); | ||
1355 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
1356 | tmio_mmc_release_dma(host); | ||
1357 | free_irq(host->irq, host); | ||
1358 | if (cell->disable) | 111 | if (cell->disable) |
1359 | cell->disable(dev); | 112 | cell->disable(pdev); |
1360 | iounmap(host->ctl); | ||
1361 | mmc_free_host(mmc); | ||
1362 | } | 113 | } |
1363 | 114 | ||
1364 | return 0; | 115 | return 0; |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h new file mode 100644 index 000000000000..099ed49a259b --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc.h | ||
3 | * | ||
4 | * Copyright (C) 2007 Ian Molton | ||
5 | * Copyright (C) 2004 Ian Molton | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Driver for the MMC / SD / SDIO cell found in: | ||
12 | * | ||
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | ||
14 | */ | ||
15 | |||
16 | #ifndef TMIO_MMC_H | ||
17 | #define TMIO_MMC_H | ||
18 | |||
19 | #include <linux/highmem.h> | ||
20 | #include <linux/mmc/tmio.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
23 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
24 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
25 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
26 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
27 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
28 | |||
29 | /* Define some IRQ masks */ | ||
30 | /* This is the mask used at reset by the chip */ | ||
31 | #define TMIO_MASK_ALL 0x837f031d | ||
32 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
33 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
34 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
35 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
36 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
37 | |||
38 | struct tmio_mmc_data; | ||
39 | |||
40 | struct tmio_mmc_host { | ||
41 | void __iomem *ctl; | ||
42 | unsigned long bus_shift; | ||
43 | struct mmc_command *cmd; | ||
44 | struct mmc_request *mrq; | ||
45 | struct mmc_data *data; | ||
46 | struct mmc_host *mmc; | ||
47 | int irq; | ||
48 | unsigned int sdio_irq_enabled; | ||
49 | |||
50 | /* Callbacks for clock / power control */ | ||
51 | void (*set_pwr)(struct platform_device *host, int state); | ||
52 | void (*set_clk_div)(struct platform_device *host, int state); | ||
53 | |||
54 | /* pio related stuff */ | ||
55 | struct scatterlist *sg_ptr; | ||
56 | struct scatterlist *sg_orig; | ||
57 | unsigned int sg_len; | ||
58 | unsigned int sg_off; | ||
59 | |||
60 | struct platform_device *pdev; | ||
61 | struct tmio_mmc_data *pdata; | ||
62 | |||
63 | /* DMA support */ | ||
64 | bool force_pio; | ||
65 | struct dma_chan *chan_rx; | ||
66 | struct dma_chan *chan_tx; | ||
67 | struct tasklet_struct dma_complete; | ||
68 | struct tasklet_struct dma_issue; | ||
69 | struct scatterlist bounce_sg; | ||
70 | u8 *bounce_buf; | ||
71 | |||
72 | /* Track lost interrupts */ | ||
73 | struct delayed_work delayed_reset_work; | ||
74 | spinlock_t lock; | ||
75 | unsigned long last_req_ts; | ||
76 | }; | ||
77 | |||
78 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
79 | struct platform_device *pdev, | ||
80 | struct tmio_mmc_data *pdata); | ||
81 | void tmio_mmc_host_remove(struct tmio_mmc_host *host); | ||
82 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); | ||
83 | |||
84 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
85 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
86 | |||
87 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, | ||
88 | unsigned long *flags) | ||
89 | { | ||
90 | local_irq_save(*flags); | ||
91 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
92 | } | ||
93 | |||
94 | static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, | ||
95 | unsigned long *flags, void *virt) | ||
96 | { | ||
97 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
98 | local_irq_restore(*flags); | ||
99 | } | ||
100 | |||
101 | #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) | ||
102 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | ||
103 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | ||
104 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | ||
105 | #else | ||
106 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
107 | struct mmc_data *data) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
112 | struct tmio_mmc_data *pdata) | ||
113 | { | ||
114 | host->chan_tx = NULL; | ||
115 | host->chan_rx = NULL; | ||
116 | } | ||
117 | |||
118 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
119 | { | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | #endif | ||
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c new file mode 100644 index 000000000000..d3de74ab633e --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/tmio_mmc_dma.c | ||
3 | * | ||
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * DMA function for TMIO MMC implementations | ||
11 | */ | ||
12 | |||
13 | #include <linux/device.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/mfd/tmio.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include <linux/mmc/tmio.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | |||
21 | #include "tmio_mmc.h" | ||
22 | |||
23 | #define TMIO_MMC_MIN_DMA_LEN 8 | ||
24 | |||
25 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
26 | { | ||
27 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
28 | /* Switch DMA mode on or off - SuperH specific? */ | ||
29 | writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); | ||
30 | #endif | ||
31 | } | ||
32 | |||
33 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
34 | { | ||
35 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
36 | struct dma_async_tx_descriptor *desc = NULL; | ||
37 | struct dma_chan *chan = host->chan_rx; | ||
38 | struct tmio_mmc_data *pdata = host->pdata; | ||
39 | dma_cookie_t cookie; | ||
40 | int ret, i; | ||
41 | bool aligned = true, multiple = true; | ||
42 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
43 | |||
44 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
45 | if (sg_tmp->offset & align) | ||
46 | aligned = false; | ||
47 | if (sg_tmp->length & align) { | ||
48 | multiple = false; | ||
49 | break; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
54 | (align & PAGE_MASK))) || !multiple) { | ||
55 | ret = -EINVAL; | ||
56 | goto pio; | ||
57 | } | ||
58 | |||
59 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
60 | host->force_pio = true; | ||
61 | return; | ||
62 | } | ||
63 | |||
64 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | ||
65 | |||
66 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
67 | if (!aligned) { | ||
68 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
69 | host->sg_ptr = &host->bounce_sg; | ||
70 | sg = host->sg_ptr; | ||
71 | } | ||
72 | |||
73 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
74 | if (ret > 0) | ||
75 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
76 | DMA_FROM_DEVICE, DMA_CTRL_ACK); | ||
77 | |||
78 | if (desc) { | ||
79 | cookie = dmaengine_submit(desc); | ||
80 | if (cookie < 0) { | ||
81 | desc = NULL; | ||
82 | ret = cookie; | ||
83 | } | ||
84 | } | ||
85 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
86 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
87 | |||
88 | pio: | ||
89 | if (!desc) { | ||
90 | /* DMA failed, fall back to PIO */ | ||
91 | if (ret >= 0) | ||
92 | ret = -EIO; | ||
93 | host->chan_rx = NULL; | ||
94 | dma_release_channel(chan); | ||
95 | /* Free the Tx channel too */ | ||
96 | chan = host->chan_tx; | ||
97 | if (chan) { | ||
98 | host->chan_tx = NULL; | ||
99 | dma_release_channel(chan); | ||
100 | } | ||
101 | dev_warn(&host->pdev->dev, | ||
102 | "DMA failed: %d, falling back to PIO\n", ret); | ||
103 | tmio_mmc_enable_dma(host, false); | ||
104 | } | ||
105 | |||
106 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
107 | desc, cookie, host->sg_len); | ||
108 | } | ||
109 | |||
110 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
111 | { | ||
112 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
113 | struct dma_async_tx_descriptor *desc = NULL; | ||
114 | struct dma_chan *chan = host->chan_tx; | ||
115 | struct tmio_mmc_data *pdata = host->pdata; | ||
116 | dma_cookie_t cookie; | ||
117 | int ret, i; | ||
118 | bool aligned = true, multiple = true; | ||
119 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
120 | |||
121 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
122 | if (sg_tmp->offset & align) | ||
123 | aligned = false; | ||
124 | if (sg_tmp->length & align) { | ||
125 | multiple = false; | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
131 | (align & PAGE_MASK))) || !multiple) { | ||
132 | ret = -EINVAL; | ||
133 | goto pio; | ||
134 | } | ||
135 | |||
136 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
137 | host->force_pio = true; | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | ||
142 | |||
143 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
144 | if (!aligned) { | ||
145 | unsigned long flags; | ||
146 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
147 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
148 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
149 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
150 | host->sg_ptr = &host->bounce_sg; | ||
151 | sg = host->sg_ptr; | ||
152 | } | ||
153 | |||
154 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
155 | if (ret > 0) | ||
156 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
157 | DMA_TO_DEVICE, DMA_CTRL_ACK); | ||
158 | |||
159 | if (desc) { | ||
160 | cookie = dmaengine_submit(desc); | ||
161 | if (cookie < 0) { | ||
162 | desc = NULL; | ||
163 | ret = cookie; | ||
164 | } | ||
165 | } | ||
166 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
167 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
168 | |||
169 | pio: | ||
170 | if (!desc) { | ||
171 | /* DMA failed, fall back to PIO */ | ||
172 | if (ret >= 0) | ||
173 | ret = -EIO; | ||
174 | host->chan_tx = NULL; | ||
175 | dma_release_channel(chan); | ||
176 | /* Free the Rx channel too */ | ||
177 | chan = host->chan_rx; | ||
178 | if (chan) { | ||
179 | host->chan_rx = NULL; | ||
180 | dma_release_channel(chan); | ||
181 | } | ||
182 | dev_warn(&host->pdev->dev, | ||
183 | "DMA failed: %d, falling back to PIO\n", ret); | ||
184 | tmio_mmc_enable_dma(host, false); | ||
185 | } | ||
186 | |||
187 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
188 | desc, cookie); | ||
189 | } | ||
190 | |||
191 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
192 | struct mmc_data *data) | ||
193 | { | ||
194 | if (data->flags & MMC_DATA_READ) { | ||
195 | if (host->chan_rx) | ||
196 | tmio_mmc_start_dma_rx(host); | ||
197 | } else { | ||
198 | if (host->chan_tx) | ||
199 | tmio_mmc_start_dma_tx(host); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | ||
204 | { | ||
205 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
206 | struct dma_chan *chan = NULL; | ||
207 | |||
208 | spin_lock_irq(&host->lock); | ||
209 | |||
210 | if (host && host->data) { | ||
211 | if (host->data->flags & MMC_DATA_READ) | ||
212 | chan = host->chan_rx; | ||
213 | else | ||
214 | chan = host->chan_tx; | ||
215 | } | ||
216 | |||
217 | spin_unlock_irq(&host->lock); | ||
218 | |||
219 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
220 | |||
221 | if (chan) | ||
222 | dma_async_issue_pending(chan); | ||
223 | } | ||
224 | |||
225 | static void tmio_mmc_tasklet_fn(unsigned long arg) | ||
226 | { | ||
227 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
228 | |||
229 | spin_lock_irq(&host->lock); | ||
230 | |||
231 | if (!host->data) | ||
232 | goto out; | ||
233 | |||
234 | if (host->data->flags & MMC_DATA_READ) | ||
235 | dma_unmap_sg(host->chan_rx->device->dev, | ||
236 | host->sg_ptr, host->sg_len, | ||
237 | DMA_FROM_DEVICE); | ||
238 | else | ||
239 | dma_unmap_sg(host->chan_tx->device->dev, | ||
240 | host->sg_ptr, host->sg_len, | ||
241 | DMA_TO_DEVICE); | ||
242 | |||
243 | tmio_mmc_do_data_irq(host); | ||
244 | out: | ||
245 | spin_unlock_irq(&host->lock); | ||
246 | } | ||
247 | |||
248 | /* It might be necessary to make filter MFD specific */ | ||
249 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
250 | { | ||
251 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
252 | chan->private = arg; | ||
253 | return true; | ||
254 | } | ||
255 | |||
256 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) | ||
257 | { | ||
258 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
259 | if (pdata->dma) { | ||
260 | dma_cap_mask_t mask; | ||
261 | |||
262 | dma_cap_zero(mask); | ||
263 | dma_cap_set(DMA_SLAVE, mask); | ||
264 | |||
265 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
266 | pdata->dma->chan_priv_tx); | ||
267 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
268 | host->chan_tx); | ||
269 | |||
270 | if (!host->chan_tx) | ||
271 | return; | ||
272 | |||
273 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
274 | pdata->dma->chan_priv_rx); | ||
275 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
276 | host->chan_rx); | ||
277 | |||
278 | if (!host->chan_rx) | ||
279 | goto ereqrx; | ||
280 | |||
281 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); | ||
282 | if (!host->bounce_buf) | ||
283 | goto ebouncebuf; | ||
284 | |||
285 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | ||
286 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | ||
287 | |||
288 | tmio_mmc_enable_dma(host, true); | ||
289 | |||
290 | return; | ||
291 | ebouncebuf: | ||
292 | dma_release_channel(host->chan_rx); | ||
293 | host->chan_rx = NULL; | ||
294 | ereqrx: | ||
295 | dma_release_channel(host->chan_tx); | ||
296 | host->chan_tx = NULL; | ||
297 | return; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
302 | { | ||
303 | if (host->chan_tx) { | ||
304 | struct dma_chan *chan = host->chan_tx; | ||
305 | host->chan_tx = NULL; | ||
306 | dma_release_channel(chan); | ||
307 | } | ||
308 | if (host->chan_rx) { | ||
309 | struct dma_chan *chan = host->chan_rx; | ||
310 | host->chan_rx = NULL; | ||
311 | dma_release_channel(chan); | ||
312 | } | ||
313 | if (host->bounce_buf) { | ||
314 | free_pages((unsigned long)host->bounce_buf, 0); | ||
315 | host->bounce_buf = NULL; | ||
316 | } | ||
317 | } | ||
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c new file mode 100644 index 000000000000..6ae8d2f00ec7 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -0,0 +1,897 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc_pio.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Guennadi Liakhovetski | ||
5 | * Copyright (C) 2007 Ian Molton | ||
6 | * Copyright (C) 2004 Ian Molton | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Driver for the MMC / SD / SDIO IP found in: | ||
13 | * | ||
14 | * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs | ||
15 | * | ||
16 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
17 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
18 | * support). (Further 4 bit support from a later datasheet). | ||
19 | * | ||
20 | * TODO: | ||
21 | * Investigate using a workqueue for PIO transfers | ||
22 | * Eliminate FIXMEs | ||
23 | * SDIO support | ||
24 | * Better Power management | ||
25 | * Handle MMC errors better | ||
26 | * double buffer support | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #include <linux/delay.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/tmio.h> | ||
37 | #include <linux/mmc/host.h> | ||
38 | #include <linux/mmc/tmio.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/pagemap.h> | ||
41 | #include <linux/platform_device.h> | ||
42 | #include <linux/scatterlist.h> | ||
43 | #include <linux/workqueue.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | |||
46 | #include "tmio_mmc.h" | ||
47 | |||
48 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
49 | { | ||
50 | return readw(host->ctl + (addr << host->bus_shift)); | ||
51 | } | ||
52 | |||
53 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
54 | u16 *buf, int count) | ||
55 | { | ||
56 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
57 | } | ||
58 | |||
59 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
60 | { | ||
61 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
62 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
63 | } | ||
64 | |||
65 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
66 | { | ||
67 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
68 | } | ||
69 | |||
70 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
71 | u16 *buf, int count) | ||
72 | { | ||
73 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
74 | } | ||
75 | |||
76 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
77 | { | ||
78 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
79 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
80 | } | ||
81 | |||
82 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
83 | { | ||
84 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); | ||
85 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
86 | } | ||
87 | |||
88 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
89 | { | ||
90 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); | ||
91 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
92 | } | ||
93 | |||
94 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
95 | { | ||
96 | sd_ctrl_write32(host, CTL_STATUS, ~i); | ||
97 | } | ||
98 | |||
99 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
100 | { | ||
101 | host->sg_len = data->sg_len; | ||
102 | host->sg_ptr = data->sg; | ||
103 | host->sg_orig = data->sg; | ||
104 | host->sg_off = 0; | ||
105 | } | ||
106 | |||
107 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
108 | { | ||
109 | host->sg_ptr = sg_next(host->sg_ptr); | ||
110 | host->sg_off = 0; | ||
111 | return --host->sg_len; | ||
112 | } | ||
113 | |||
114 | #ifdef CONFIG_MMC_DEBUG | ||
115 | |||
116 | #define STATUS_TO_TEXT(a, status, i) \ | ||
117 | do { \ | ||
118 | if (status & TMIO_STAT_##a) { \ | ||
119 | if (i++) \ | ||
120 | printk(" | "); \ | ||
121 | printk(#a); \ | ||
122 | } \ | ||
123 | } while (0) | ||
124 | |||
125 | static void pr_debug_status(u32 status) | ||
126 | { | ||
127 | int i = 0; | ||
128 | printk(KERN_DEBUG "status: %08x = ", status); | ||
129 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
130 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
131 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
132 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
133 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
134 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
135 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
136 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
137 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
138 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
139 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
140 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
141 | STATUS_TO_TEXT(DATAEND, status, i); | ||
142 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
143 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
144 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
145 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
146 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
147 | STATUS_TO_TEXT(RXRDY, status, i); | ||
148 | STATUS_TO_TEXT(TXRQ, status, i); | ||
149 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
150 | printk("\n"); | ||
151 | } | ||
152 | |||
153 | #else | ||
154 | #define pr_debug_status(s) do { } while (0) | ||
155 | #endif | ||
156 | |||
157 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
158 | { | ||
159 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
160 | |||
161 | if (enable) { | ||
162 | host->sdio_irq_enabled = 1; | ||
163 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
164 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
165 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
166 | } else { | ||
167 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
168 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
169 | host->sdio_irq_enabled = 0; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
174 | { | ||
175 | u32 clk = 0, clock; | ||
176 | |||
177 | if (new_clock) { | ||
178 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
179 | new_clock >= (clock<<1); clk >>= 1) | ||
180 | clock <<= 1; | ||
181 | clk |= 0x100; | ||
182 | } | ||
183 | |||
184 | if (host->set_clk_div) | ||
185 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
186 | |||
187 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
188 | } | ||
189 | |||
190 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
191 | { | ||
192 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
193 | |||
194 | /* implicit BUG_ON(!res) */ | ||
195 | if (resource_size(res) > 0x100) { | ||
196 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
197 | msleep(10); | ||
198 | } | ||
199 | |||
200 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
201 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
202 | msleep(10); | ||
203 | } | ||
204 | |||
205 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
206 | { | ||
207 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
208 | |||
209 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
210 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
211 | msleep(10); | ||
212 | |||
213 | /* implicit BUG_ON(!res) */ | ||
214 | if (resource_size(res) > 0x100) { | ||
215 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
216 | msleep(10); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void tmio_mmc_reset(struct tmio_mmc_host *host) | ||
221 | { | ||
222 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
223 | |||
224 | /* FIXME - should we set stop clock reg here */ | ||
225 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
226 | /* implicit BUG_ON(!res) */ | ||
227 | if (resource_size(res) > 0x100) | ||
228 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
229 | msleep(10); | ||
230 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
231 | if (resource_size(res) > 0x100) | ||
232 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
233 | msleep(10); | ||
234 | } | ||
235 | |||
236 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
237 | { | ||
238 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
239 | delayed_reset_work.work); | ||
240 | struct mmc_request *mrq; | ||
241 | unsigned long flags; | ||
242 | |||
243 | spin_lock_irqsave(&host->lock, flags); | ||
244 | mrq = host->mrq; | ||
245 | |||
246 | /* request already finished */ | ||
247 | if (!mrq | ||
248 | || time_is_after_jiffies(host->last_req_ts + | ||
249 | msecs_to_jiffies(2000))) { | ||
250 | spin_unlock_irqrestore(&host->lock, flags); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | dev_warn(&host->pdev->dev, | ||
255 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
256 | mrq->cmd->opcode); | ||
257 | |||
258 | if (host->data) | ||
259 | host->data->error = -ETIMEDOUT; | ||
260 | else if (host->cmd) | ||
261 | host->cmd->error = -ETIMEDOUT; | ||
262 | else | ||
263 | mrq->cmd->error = -ETIMEDOUT; | ||
264 | |||
265 | host->cmd = NULL; | ||
266 | host->data = NULL; | ||
267 | host->mrq = NULL; | ||
268 | host->force_pio = false; | ||
269 | |||
270 | spin_unlock_irqrestore(&host->lock, flags); | ||
271 | |||
272 | tmio_mmc_reset(host); | ||
273 | |||
274 | mmc_request_done(host->mmc, mrq); | ||
275 | } | ||
276 | |||
277 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
278 | { | ||
279 | struct mmc_request *mrq = host->mrq; | ||
280 | |||
281 | if (!mrq) | ||
282 | return; | ||
283 | |||
284 | host->mrq = NULL; | ||
285 | host->cmd = NULL; | ||
286 | host->data = NULL; | ||
287 | host->force_pio = false; | ||
288 | |||
289 | cancel_delayed_work(&host->delayed_reset_work); | ||
290 | |||
291 | mmc_request_done(host->mmc, mrq); | ||
292 | } | ||
293 | |||
294 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
295 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
296 | #define APP_CMD 0x0040 | ||
297 | #define RESP_NONE 0x0300 | ||
298 | #define RESP_R1 0x0400 | ||
299 | #define RESP_R1B 0x0500 | ||
300 | #define RESP_R2 0x0600 | ||
301 | #define RESP_R3 0x0700 | ||
302 | #define DATA_PRESENT 0x0800 | ||
303 | #define TRANSFER_READ 0x1000 | ||
304 | #define TRANSFER_MULTI 0x2000 | ||
305 | #define SECURITY_CMD 0x4000 | ||
306 | |||
307 | static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
308 | { | ||
309 | struct mmc_data *data = host->data; | ||
310 | int c = cmd->opcode; | ||
311 | |||
312 | /* Command 12 is handled by hardware */ | ||
313 | if (cmd->opcode == 12 && !cmd->arg) { | ||
314 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | switch (mmc_resp_type(cmd)) { | ||
319 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
320 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
321 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
322 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
323 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
324 | default: | ||
325 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | host->cmd = cmd; | ||
330 | |||
331 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
332 | * should be set when issuing app commands. | ||
333 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
334 | * c |= APP_CMD; | ||
335 | */ | ||
336 | if (data) { | ||
337 | c |= DATA_PRESENT; | ||
338 | if (data->blocks > 1) { | ||
339 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
340 | c |= TRANSFER_MULTI; | ||
341 | } | ||
342 | if (data->flags & MMC_DATA_READ) | ||
343 | c |= TRANSFER_READ; | ||
344 | } | ||
345 | |||
346 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
347 | |||
348 | /* Fire off the command */ | ||
349 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
350 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * This chip always returns (at least?) as much data as you ask for. | ||
357 | * I'm unsure what happens if you ask for less than a block. This should be | ||
358 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
359 | */ | ||
360 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
361 | { | ||
362 | struct mmc_data *data = host->data; | ||
363 | void *sg_virt; | ||
364 | unsigned short *buf; | ||
365 | unsigned int count; | ||
366 | unsigned long flags; | ||
367 | |||
368 | if ((host->chan_tx || host->chan_rx) && !host->force_pio) { | ||
369 | pr_err("PIO IRQ in DMA mode!\n"); | ||
370 | return; | ||
371 | } else if (!data) { | ||
372 | pr_debug("Spurious PIO IRQ\n"); | ||
373 | return; | ||
374 | } | ||
375 | |||
376 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
377 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
378 | |||
379 | count = host->sg_ptr->length - host->sg_off; | ||
380 | if (count > data->blksz) | ||
381 | count = data->blksz; | ||
382 | |||
383 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
384 | count, host->sg_off, data->flags); | ||
385 | |||
386 | /* Transfer the data */ | ||
387 | if (data->flags & MMC_DATA_READ) | ||
388 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
389 | else | ||
390 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
391 | |||
392 | host->sg_off += count; | ||
393 | |||
394 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
395 | |||
396 | if (host->sg_off == host->sg_ptr->length) | ||
397 | tmio_mmc_next_sg(host); | ||
398 | |||
399 | return; | ||
400 | } | ||
401 | |||
402 | static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) | ||
403 | { | ||
404 | if (host->sg_ptr == &host->bounce_sg) { | ||
405 | unsigned long flags; | ||
406 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
407 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
408 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* needs to be called with host->lock held */ | ||
413 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
414 | { | ||
415 | struct mmc_data *data = host->data; | ||
416 | struct mmc_command *stop; | ||
417 | |||
418 | host->data = NULL; | ||
419 | |||
420 | if (!data) { | ||
421 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
422 | return; | ||
423 | } | ||
424 | stop = data->stop; | ||
425 | |||
426 | /* FIXME - return correct transfer count on errors */ | ||
427 | if (!data->error) | ||
428 | data->bytes_xfered = data->blocks * data->blksz; | ||
429 | else | ||
430 | data->bytes_xfered = 0; | ||
431 | |||
432 | pr_debug("Completed data request\n"); | ||
433 | |||
434 | /* | ||
435 | * FIXME: other drivers allow an optional stop command of any given type | ||
436 | * which we dont do, as the chip can auto generate them. | ||
437 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
438 | * only issue the auto request when we know this is the desired | ||
439 | * stop command, allowing fallback to the stop command the | ||
440 | * upper layers expect. For now, we do what works. | ||
441 | */ | ||
442 | |||
443 | if (data->flags & MMC_DATA_READ) { | ||
444 | if (host->chan_rx && !host->force_pio) | ||
445 | tmio_mmc_check_bounce_buffer(host); | ||
446 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
447 | host->mrq); | ||
448 | } else { | ||
449 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
450 | host->mrq); | ||
451 | } | ||
452 | |||
453 | if (stop) { | ||
454 | if (stop->opcode == 12 && !stop->arg) | ||
455 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
456 | else | ||
457 | BUG(); | ||
458 | } | ||
459 | |||
460 | tmio_mmc_finish_request(host); | ||
461 | } | ||
462 | |||
463 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
464 | { | ||
465 | struct mmc_data *data; | ||
466 | spin_lock(&host->lock); | ||
467 | data = host->data; | ||
468 | |||
469 | if (!data) | ||
470 | goto out; | ||
471 | |||
472 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { | ||
473 | /* | ||
474 | * Has all data been written out yet? Testing on SuperH showed, | ||
475 | * that in most cases the first interrupt comes already with the | ||
476 | * BUSY status bit clear, but on some operations, like mount or | ||
477 | * in the beginning of a write / sync / umount, there is one | ||
478 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
479 | * waiting for one more interrupt fixes the problem. | ||
480 | */ | ||
481 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
482 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
483 | tasklet_schedule(&host->dma_complete); | ||
484 | } | ||
485 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { | ||
486 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
487 | tasklet_schedule(&host->dma_complete); | ||
488 | } else { | ||
489 | tmio_mmc_do_data_irq(host); | ||
490 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); | ||
491 | } | ||
492 | out: | ||
493 | spin_unlock(&host->lock); | ||
494 | } | ||
495 | |||
496 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
497 | unsigned int stat) | ||
498 | { | ||
499 | struct mmc_command *cmd = host->cmd; | ||
500 | int i, addr; | ||
501 | |||
502 | spin_lock(&host->lock); | ||
503 | |||
504 | if (!host->cmd) { | ||
505 | pr_debug("Spurious CMD irq\n"); | ||
506 | goto out; | ||
507 | } | ||
508 | |||
509 | host->cmd = NULL; | ||
510 | |||
511 | /* This controller is sicker than the PXA one. Not only do we need to | ||
512 | * drop the top 8 bits of the first response word, we also need to | ||
513 | * modify the order of the response for short response command types. | ||
514 | */ | ||
515 | |||
516 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
517 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
518 | |||
519 | if (cmd->flags & MMC_RSP_136) { | ||
520 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
521 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
522 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
523 | cmd->resp[3] <<= 8; | ||
524 | } else if (cmd->flags & MMC_RSP_R3) { | ||
525 | cmd->resp[0] = cmd->resp[3]; | ||
526 | } | ||
527 | |||
528 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
529 | cmd->error = -ETIMEDOUT; | ||
530 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
531 | cmd->error = -EILSEQ; | ||
532 | |||
533 | /* If there is data to handle we enable data IRQs here, and | ||
534 | * we will ultimatley finish the request in the data_end handler. | ||
535 | * If theres no data or we encountered an error, finish now. | ||
536 | */ | ||
537 | if (host->data && !cmd->error) { | ||
538 | if (host->data->flags & MMC_DATA_READ) { | ||
539 | if (host->force_pio || !host->chan_rx) | ||
540 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
541 | else | ||
542 | tasklet_schedule(&host->dma_issue); | ||
543 | } else { | ||
544 | if (host->force_pio || !host->chan_tx) | ||
545 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
546 | else | ||
547 | tasklet_schedule(&host->dma_issue); | ||
548 | } | ||
549 | } else { | ||
550 | tmio_mmc_finish_request(host); | ||
551 | } | ||
552 | |||
553 | out: | ||
554 | spin_unlock(&host->lock); | ||
555 | } | ||
556 | |||
557 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
558 | { | ||
559 | struct tmio_mmc_host *host = devid; | ||
560 | struct tmio_mmc_data *pdata = host->pdata; | ||
561 | unsigned int ireg, irq_mask, status; | ||
562 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
563 | |||
564 | pr_debug("MMC IRQ begin\n"); | ||
565 | |||
566 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
567 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
568 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
569 | |||
570 | sdio_ireg = 0; | ||
571 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
572 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
573 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
574 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
575 | |||
576 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
577 | |||
578 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
579 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
580 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
581 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
582 | goto out; | ||
583 | } | ||
584 | |||
585 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | ||
586 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
587 | mmc_signal_sdio_irq(host->mmc); | ||
588 | |||
589 | if (sdio_ireg) | ||
590 | goto out; | ||
591 | } | ||
592 | |||
593 | pr_debug_status(status); | ||
594 | pr_debug_status(ireg); | ||
595 | |||
596 | if (!ireg) { | ||
597 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | ||
598 | |||
599 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
600 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
601 | pr_debug_status(status); | ||
602 | |||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | while (ireg) { | ||
607 | /* Card insert / remove attempts */ | ||
608 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
609 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
610 | TMIO_STAT_CARD_REMOVE); | ||
611 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
612 | } | ||
613 | |||
614 | /* CRC and other errors */ | ||
615 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
616 | * handled |= tmio_error_irq(host, irq, stat); | ||
617 | */ | ||
618 | |||
619 | /* Command completion */ | ||
620 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
621 | tmio_mmc_ack_mmc_irqs(host, | ||
622 | TMIO_STAT_CMDRESPEND | | ||
623 | TMIO_STAT_CMDTIMEOUT); | ||
624 | tmio_mmc_cmd_irq(host, status); | ||
625 | } | ||
626 | |||
627 | /* Data transfer */ | ||
628 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
629 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
630 | tmio_mmc_pio_irq(host); | ||
631 | } | ||
632 | |||
633 | /* Data transfer completion */ | ||
634 | if (ireg & TMIO_STAT_DATAEND) { | ||
635 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
636 | tmio_mmc_data_irq(host); | ||
637 | } | ||
638 | |||
639 | /* Check status - keep going until we've handled it all */ | ||
640 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
641 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
642 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
643 | |||
644 | pr_debug("Status at end of loop: %08x\n", status); | ||
645 | pr_debug_status(status); | ||
646 | } | ||
647 | pr_debug("MMC IRQ end\n"); | ||
648 | |||
649 | out: | ||
650 | return IRQ_HANDLED; | ||
651 | } | ||
652 | |||
653 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
654 | struct mmc_data *data) | ||
655 | { | ||
656 | struct tmio_mmc_data *pdata = host->pdata; | ||
657 | |||
658 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
659 | data->blksz, data->blocks); | ||
660 | |||
661 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
662 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
663 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
664 | |||
665 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
666 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
667 | mmc_hostname(host->mmc), data->blksz); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | tmio_mmc_init_sg(host, data); | ||
673 | host->data = data; | ||
674 | |||
675 | /* Set transfer length / blocksize */ | ||
676 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
677 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
678 | |||
679 | tmio_mmc_start_dma(host, data); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /* Process requests from the MMC layer */ | ||
685 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
686 | { | ||
687 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
688 | int ret; | ||
689 | |||
690 | if (host->mrq) | ||
691 | pr_debug("request not null\n"); | ||
692 | |||
693 | host->last_req_ts = jiffies; | ||
694 | wmb(); | ||
695 | host->mrq = mrq; | ||
696 | |||
697 | if (mrq->data) { | ||
698 | ret = tmio_mmc_start_data(host, mrq->data); | ||
699 | if (ret) | ||
700 | goto fail; | ||
701 | } | ||
702 | |||
703 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
704 | if (!ret) { | ||
705 | schedule_delayed_work(&host->delayed_reset_work, | ||
706 | msecs_to_jiffies(2000)); | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | fail: | ||
711 | host->mrq = NULL; | ||
712 | host->force_pio = false; | ||
713 | mrq->cmd->error = ret; | ||
714 | mmc_request_done(mmc, mrq); | ||
715 | } | ||
716 | |||
717 | /* Set MMC clock / power. | ||
718 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
719 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
720 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
721 | * slowest setting. | ||
722 | */ | ||
723 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
724 | { | ||
725 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
726 | |||
727 | if (ios->clock) | ||
728 | tmio_mmc_set_clock(host, ios->clock); | ||
729 | |||
730 | /* Power sequence - OFF -> UP -> ON */ | ||
731 | if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | ||
732 | /* power down SD bus */ | ||
733 | if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) | ||
734 | host->set_pwr(host->pdev, 0); | ||
735 | tmio_mmc_clk_stop(host); | ||
736 | } else if (ios->power_mode == MMC_POWER_UP) { | ||
737 | /* power up SD bus */ | ||
738 | if (host->set_pwr) | ||
739 | host->set_pwr(host->pdev, 1); | ||
740 | } else { | ||
741 | /* start bus clock */ | ||
742 | tmio_mmc_clk_start(host); | ||
743 | } | ||
744 | |||
745 | switch (ios->bus_width) { | ||
746 | case MMC_BUS_WIDTH_1: | ||
747 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
748 | break; | ||
749 | case MMC_BUS_WIDTH_4: | ||
750 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
751 | break; | ||
752 | } | ||
753 | |||
754 | /* Let things settle. delay taken from winCE driver */ | ||
755 | udelay(140); | ||
756 | } | ||
757 | |||
758 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
759 | { | ||
760 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
761 | struct tmio_mmc_data *pdata = host->pdata; | ||
762 | |||
763 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
764 | !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | ||
765 | } | ||
766 | |||
767 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
768 | { | ||
769 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
770 | struct tmio_mmc_data *pdata = host->pdata; | ||
771 | |||
772 | if (!pdata->get_cd) | ||
773 | return -ENOSYS; | ||
774 | else | ||
775 | return pdata->get_cd(host->pdev); | ||
776 | } | ||
777 | |||
778 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
779 | .request = tmio_mmc_request, | ||
780 | .set_ios = tmio_mmc_set_ios, | ||
781 | .get_ro = tmio_mmc_get_ro, | ||
782 | .get_cd = tmio_mmc_get_cd, | ||
783 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
784 | }; | ||
785 | |||
786 | int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
787 | struct platform_device *pdev, | ||
788 | struct tmio_mmc_data *pdata) | ||
789 | { | ||
790 | struct tmio_mmc_host *_host; | ||
791 | struct mmc_host *mmc; | ||
792 | struct resource *res_ctl; | ||
793 | int ret; | ||
794 | u32 irq_mask = TMIO_MASK_CMD; | ||
795 | |||
796 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
797 | if (!res_ctl) | ||
798 | return -EINVAL; | ||
799 | |||
800 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); | ||
801 | if (!mmc) | ||
802 | return -ENOMEM; | ||
803 | |||
804 | _host = mmc_priv(mmc); | ||
805 | _host->pdata = pdata; | ||
806 | _host->mmc = mmc; | ||
807 | _host->pdev = pdev; | ||
808 | platform_set_drvdata(pdev, mmc); | ||
809 | |||
810 | _host->set_pwr = pdata->set_pwr; | ||
811 | _host->set_clk_div = pdata->set_clk_div; | ||
812 | |||
813 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
814 | _host->bus_shift = resource_size(res_ctl) >> 10; | ||
815 | |||
816 | _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
817 | if (!_host->ctl) { | ||
818 | ret = -ENOMEM; | ||
819 | goto host_free; | ||
820 | } | ||
821 | |||
822 | mmc->ops = &tmio_mmc_ops; | ||
823 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
824 | mmc->f_max = pdata->hclk; | ||
825 | mmc->f_min = mmc->f_max / 512; | ||
826 | mmc->max_segs = 32; | ||
827 | mmc->max_blk_size = 512; | ||
828 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
829 | mmc->max_segs; | ||
830 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
831 | mmc->max_seg_size = mmc->max_req_size; | ||
832 | if (pdata->ocr_mask) | ||
833 | mmc->ocr_avail = pdata->ocr_mask; | ||
834 | else | ||
835 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
836 | |||
837 | tmio_mmc_clk_stop(_host); | ||
838 | tmio_mmc_reset(_host); | ||
839 | |||
840 | ret = platform_get_irq(pdev, 0); | ||
841 | if (ret < 0) | ||
842 | goto unmap_ctl; | ||
843 | |||
844 | _host->irq = ret; | ||
845 | |||
846 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); | ||
847 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
848 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
849 | |||
850 | ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
851 | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); | ||
852 | if (ret) | ||
853 | goto unmap_ctl; | ||
854 | |||
855 | spin_lock_init(&_host->lock); | ||
856 | |||
857 | /* Init delayed work for request timeouts */ | ||
858 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); | ||
859 | |||
860 | /* See if we also get DMA */ | ||
861 | tmio_mmc_request_dma(_host, pdata); | ||
862 | |||
863 | mmc_add_host(mmc); | ||
864 | |||
865 | /* Unmask the IRQs we want to know about */ | ||
866 | if (!_host->chan_rx) | ||
867 | irq_mask |= TMIO_MASK_READOP; | ||
868 | if (!_host->chan_tx) | ||
869 | irq_mask |= TMIO_MASK_WRITEOP; | ||
870 | |||
871 | tmio_mmc_enable_mmc_irqs(_host, irq_mask); | ||
872 | |||
873 | *host = _host; | ||
874 | |||
875 | return 0; | ||
876 | |||
877 | unmap_ctl: | ||
878 | iounmap(_host->ctl); | ||
879 | host_free: | ||
880 | mmc_free_host(mmc); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | EXPORT_SYMBOL(tmio_mmc_host_probe); | ||
885 | |||
886 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | ||
887 | { | ||
888 | mmc_remove_host(host->mmc); | ||
889 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
890 | tmio_mmc_release_dma(host); | ||
891 | free_irq(host->irq, host); | ||
892 | iounmap(host->ctl); | ||
893 | mmc_free_host(host->mmc); | ||
894 | } | ||
895 | EXPORT_SYMBOL(tmio_mmc_host_remove); | ||
896 | |||
897 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 8c5b4881ccd6..4dfe2c02ea91 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c | |||
@@ -1087,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev, | |||
1087 | struct mmc_host *mmc; | 1087 | struct mmc_host *mmc; |
1088 | struct via_crdr_mmc_host *sdhost; | 1088 | struct via_crdr_mmc_host *sdhost; |
1089 | u32 base, len; | 1089 | u32 base, len; |
1090 | u8 rev, gatt; | 1090 | u8 gatt; |
1091 | int ret; | 1091 | int ret; |
1092 | 1092 | ||
1093 | pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); | ||
1094 | pr_info(DRV_NAME | 1093 | pr_info(DRV_NAME |
1095 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", | 1094 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", |
1096 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, | 1095 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, |
1097 | (int)rev); | 1096 | (int)pcidev->revision); |
1098 | 1097 | ||
1099 | ret = pci_enable_device(pcidev); | 1098 | ret = pci_enable_device(pcidev); |
1100 | if (ret) | 1099 | if (ret) |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 77414702cb00..b4567c35a322 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -33,14 +33,6 @@ config MTD_TESTS | |||
33 | should normally be compiled as kernel modules. The modules perform | 33 | should normally be compiled as kernel modules. The modules perform |
34 | various checks and verifications when loaded. | 34 | various checks and verifications when loaded. |
35 | 35 | ||
36 | config MTD_CONCAT | ||
37 | tristate "MTD concatenating support" | ||
38 | help | ||
39 | Support for concatenating several MTD devices into a single | ||
40 | (virtual) one. This allows you to have -for example- a JFFS(2) | ||
41 | file system spanning multiple physical flash chips. If unsure, | ||
42 | say 'Y'. | ||
43 | |||
44 | config MTD_PARTITIONS | 36 | config MTD_PARTITIONS |
45 | bool "MTD partitioning support" | 37 | bool "MTD partitioning support" |
46 | help | 38 | help |
@@ -333,6 +325,16 @@ config MTD_OOPS | |||
333 | To use, add console=ttyMTDx to the kernel command line, | 325 | To use, add console=ttyMTDx to the kernel command line, |
334 | where x is the MTD device number to use. | 326 | where x is the MTD device number to use. |
335 | 327 | ||
328 | config MTD_SWAP | ||
329 | tristate "Swap on MTD device support" | ||
330 | depends on MTD && SWAP | ||
331 | select MTD_BLKDEVS | ||
332 | help | ||
333 | Provides volatile block device driver on top of mtd partition | ||
334 | suitable for swapping. The mapping of written blocks is not saved. | ||
335 | The driver provides wear leveling by storing erase counter into the | ||
336 | OOB. | ||
337 | |||
336 | source "drivers/mtd/chips/Kconfig" | 338 | source "drivers/mtd/chips/Kconfig" |
337 | 339 | ||
338 | source "drivers/mtd/maps/Kconfig" | 340 | source "drivers/mtd/maps/Kconfig" |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index d4e7f25b1ebb..d578095fb255 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -4,11 +4,10 @@ | |||
4 | 4 | ||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o | 7 | mtd-y := mtdcore.o mtdsuper.o mtdconcat.o |
8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o | 8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o |
9 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o | 9 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o |
10 | 10 | ||
11 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | ||
12 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o | 11 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o |
13 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o | 12 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o |
14 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o | 13 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
@@ -26,6 +25,7 @@ obj-$(CONFIG_RFD_FTL) += rfd_ftl.o | |||
26 | obj-$(CONFIG_SSFDC) += ssfdc.o | 25 | obj-$(CONFIG_SSFDC) += ssfdc.o |
27 | obj-$(CONFIG_SM_FTL) += sm_ftl.o | 26 | obj-$(CONFIG_SM_FTL) += sm_ftl.o |
28 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o | 27 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o |
28 | obj-$(CONFIG_MTD_SWAP) += mtdswap.o | ||
29 | 29 | ||
30 | nftl-objs := nftlcore.o nftlmount.o | 30 | nftl-objs := nftlcore.o nftlmount.o |
31 | inftl-objs := inftlcore.o inftlmount.o | 31 | inftl-objs := inftlcore.o inftlmount.o |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 4aaa88f8ab5f..092aef11120c 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -455,7 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
455 | mtd->flags = MTD_CAP_NORFLASH; | 455 | mtd->flags = MTD_CAP_NORFLASH; |
456 | mtd->name = map->name; | 456 | mtd->name = map->name; |
457 | mtd->writesize = 1; | 457 | mtd->writesize = 1; |
458 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 458 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
459 | 459 | ||
460 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; | 460 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; |
461 | 461 | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index f072fcfde04e..f9a5331e9445 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -349,6 +349,7 @@ static struct cfi_fixup cfi_fixup_table[] = { | |||
349 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, | 349 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, |
350 | #ifdef AMD_BOOTLOC_BUG | 350 | #ifdef AMD_BOOTLOC_BUG |
351 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, | 351 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, |
352 | { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, | ||
352 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, | 353 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, |
353 | #endif | 354 | #endif |
354 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, | 355 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, |
@@ -440,7 +441,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
440 | mtd->flags = MTD_CAP_NORFLASH; | 441 | mtd->flags = MTD_CAP_NORFLASH; |
441 | mtd->name = map->name; | 442 | mtd->name = map->name; |
442 | mtd->writesize = 1; | 443 | mtd->writesize = 1; |
443 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 444 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
444 | 445 | ||
445 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", | 446 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", |
446 | __func__, mtd->writebufsize); | 447 | __func__, mtd->writebufsize); |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index c04b7658abe9..ed56ad3884fb 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -238,7 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) | |||
238 | mtd->resume = cfi_staa_resume; | 238 | mtd->resume = cfi_staa_resume; |
239 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; | 239 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; |
240 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ | 240 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ |
241 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 241 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
242 | map->fldrv = &cfi_staa_chipdrv; | 242 | map->fldrv = &cfi_staa_chipdrv; |
243 | __module_get(THIS_MODULE); | 243 | __module_get(THIS_MODULE); |
244 | mtd->name = map->name; | 244 | mtd->name = map->name; |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index e4eba6cc1b2e..3fb981d4bb51 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -655,7 +655,8 @@ static const struct spi_device_id m25p_ids[] = { | |||
655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, | 655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, | 656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
657 | 657 | ||
658 | /* EON -- en25pxx */ | 658 | /* EON -- en25xxx */ |
659 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, | ||
659 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, | 660 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
660 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, | 661 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
661 | 662 | ||
@@ -728,6 +729,8 @@ static const struct spi_device_id m25p_ids[] = { | |||
728 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, | 729 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
729 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, | 730 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
730 | 731 | ||
732 | { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, | ||
733 | |||
731 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ | 734 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ |
732 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, | 735 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, |
733 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, | 736 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, |
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 26a6e809013d..1483e18971ce 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c | |||
@@ -121,6 +121,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, | |||
121 | mtd->flags = MTD_CAP_RAM; | 121 | mtd->flags = MTD_CAP_RAM; |
122 | mtd->size = size; | 122 | mtd->size = size; |
123 | mtd->writesize = 1; | 123 | mtd->writesize = 1; |
124 | mtd->writebufsize = 64; /* Mimic CFI NOR flashes */ | ||
124 | mtd->erasesize = MTDRAM_ERASE_SIZE; | 125 | mtd->erasesize = MTDRAM_ERASE_SIZE; |
125 | mtd->priv = mapped_address; | 126 | mtd->priv = mapped_address; |
126 | 127 | ||
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 52393282eaf1..8d28fa02a5a2 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c | |||
@@ -117,6 +117,7 @@ static void unregister_devices(void) | |||
117 | list_for_each_entry_safe(this, safe, &phram_list, list) { | 117 | list_for_each_entry_safe(this, safe, &phram_list, list) { |
118 | del_mtd_device(&this->mtd); | 118 | del_mtd_device(&this->mtd); |
119 | iounmap(this->mtd.priv); | 119 | iounmap(this->mtd.priv); |
120 | kfree(this->mtd.name); | ||
120 | kfree(this); | 121 | kfree(this); |
121 | } | 122 | } |
122 | } | 123 | } |
@@ -275,6 +276,8 @@ static int phram_setup(const char *val, struct kernel_param *kp) | |||
275 | ret = register_device(name, start, len); | 276 | ret = register_device(name, start, len); |
276 | if (!ret) | 277 | if (!ret) |
277 | pr_info("%s device: %#x at %#x\n", name, len, start); | 278 | pr_info("%s device: %#x at %#x\n", name, len, start); |
279 | else | ||
280 | kfree(name); | ||
278 | 281 | ||
279 | return ret; | 282 | return ret; |
280 | } | 283 | } |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 5d37d315fa98..44b1f46458ca 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -114,7 +114,7 @@ config MTD_SUN_UFLASH | |||
114 | 114 | ||
115 | config MTD_SC520CDP | 115 | config MTD_SC520CDP |
116 | tristate "CFI Flash device mapped on AMD SC520 CDP" | 116 | tristate "CFI Flash device mapped on AMD SC520 CDP" |
117 | depends on X86 && MTD_CFI && MTD_CONCAT | 117 | depends on X86 && MTD_CFI |
118 | help | 118 | help |
119 | The SC520 CDP board has two banks of CFI-compliant chips and one | 119 | The SC520 CDP board has two banks of CFI-compliant chips and one |
120 | Dual-in-line JEDEC chip. This 'mapping' driver supports that | 120 | Dual-in-line JEDEC chip. This 'mapping' driver supports that |
@@ -262,7 +262,7 @@ config MTD_BCM963XX | |||
262 | 262 | ||
263 | config MTD_DILNETPC | 263 | config MTD_DILNETPC |
264 | tristate "CFI Flash device mapped on DIL/Net PC" | 264 | tristate "CFI Flash device mapped on DIL/Net PC" |
265 | depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN | 265 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN |
266 | help | 266 | help |
267 | MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". | 267 | MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". |
268 | For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> | 268 | For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> |
@@ -552,4 +552,13 @@ config MTD_PISMO | |||
552 | 552 | ||
553 | When built as a module, it will be called pismo.ko | 553 | When built as a module, it will be called pismo.ko |
554 | 554 | ||
555 | config MTD_LATCH_ADDR | ||
556 | tristate "Latch-assisted Flash Chip Support" | ||
557 | depends on MTD_COMPLEX_MAPPINGS | ||
558 | help | ||
559 | Map driver which allows flashes to be partially physically addressed | ||
560 | and have the upper address lines set by a board specific code. | ||
561 | |||
562 | If compiled as a module, it will be called latch-addr-flash. | ||
563 | |||
555 | endmenu | 564 | endmenu |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index c7869c7a6b18..08533bd5cba7 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -59,3 +59,4 @@ obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o | |||
59 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o | 59 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o |
60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o | 60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o |
61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o | 61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o |
62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o | ||
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c index c09f4f57093e..e5f645b775ad 100644 --- a/drivers/mtd/maps/ceiva.c +++ b/drivers/mtd/maps/ceiva.c | |||
@@ -194,16 +194,10 @@ static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info | |||
194 | * We detected multiple devices. Concatenate | 194 | * We detected multiple devices. Concatenate |
195 | * them together. | 195 | * them together. |
196 | */ | 196 | */ |
197 | #ifdef CONFIG_MTD_CONCAT | ||
198 | *rmtd = mtd_concat_create(subdev, found, | 197 | *rmtd = mtd_concat_create(subdev, found, |
199 | "clps flash"); | 198 | "clps flash"); |
200 | if (*rmtd == NULL) | 199 | if (*rmtd == NULL) |
201 | ret = -ENXIO; | 200 | ret = -ENXIO; |
202 | #else | ||
203 | printk(KERN_ERR "clps flash: multiple devices " | ||
204 | "found but MTD concat support disabled.\n"); | ||
205 | ret = -ENXIO; | ||
206 | #endif | ||
207 | } | 201 | } |
208 | } | 202 | } |
209 | 203 | ||
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c index 2aac41bde8b3..e22ff5adbbf4 100644 --- a/drivers/mtd/maps/integrator-flash.c +++ b/drivers/mtd/maps/integrator-flash.c | |||
@@ -202,7 +202,6 @@ static int armflash_probe(struct platform_device *dev) | |||
202 | if (info->nr_subdev == 1) | 202 | if (info->nr_subdev == 1) |
203 | info->mtd = info->subdev[0].mtd; | 203 | info->mtd = info->subdev[0].mtd; |
204 | else if (info->nr_subdev > 1) { | 204 | else if (info->nr_subdev > 1) { |
205 | #ifdef CONFIG_MTD_CONCAT | ||
206 | struct mtd_info *cdev[info->nr_subdev]; | 205 | struct mtd_info *cdev[info->nr_subdev]; |
207 | 206 | ||
208 | /* | 207 | /* |
@@ -215,11 +214,6 @@ static int armflash_probe(struct platform_device *dev) | |||
215 | dev_name(&dev->dev)); | 214 | dev_name(&dev->dev)); |
216 | if (info->mtd == NULL) | 215 | if (info->mtd == NULL) |
217 | err = -ENXIO; | 216 | err = -ENXIO; |
218 | #else | ||
219 | printk(KERN_ERR "armflash: multiple devices found but " | ||
220 | "MTD concat support disabled.\n"); | ||
221 | err = -ENXIO; | ||
222 | #endif | ||
223 | } | 217 | } |
224 | 218 | ||
225 | if (err < 0) | 219 | if (err < 0) |
@@ -244,10 +238,8 @@ static int armflash_probe(struct platform_device *dev) | |||
244 | cleanup: | 238 | cleanup: |
245 | if (info->mtd) { | 239 | if (info->mtd) { |
246 | del_mtd_partitions(info->mtd); | 240 | del_mtd_partitions(info->mtd); |
247 | #ifdef CONFIG_MTD_CONCAT | ||
248 | if (info->mtd != info->subdev[0].mtd) | 241 | if (info->mtd != info->subdev[0].mtd) |
249 | mtd_concat_destroy(info->mtd); | 242 | mtd_concat_destroy(info->mtd); |
250 | #endif | ||
251 | } | 243 | } |
252 | kfree(info->parts); | 244 | kfree(info->parts); |
253 | subdev_err: | 245 | subdev_err: |
@@ -272,10 +264,8 @@ static int armflash_remove(struct platform_device *dev) | |||
272 | if (info) { | 264 | if (info) { |
273 | if (info->mtd) { | 265 | if (info->mtd) { |
274 | del_mtd_partitions(info->mtd); | 266 | del_mtd_partitions(info->mtd); |
275 | #ifdef CONFIG_MTD_CONCAT | ||
276 | if (info->mtd != info->subdev[0].mtd) | 267 | if (info->mtd != info->subdev[0].mtd) |
277 | mtd_concat_destroy(info->mtd); | 268 | mtd_concat_destroy(info->mtd); |
278 | #endif | ||
279 | } | 269 | } |
280 | kfree(info->parts); | 270 | kfree(info->parts); |
281 | 271 | ||
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c new file mode 100644 index 000000000000..ee2548085334 --- /dev/null +++ b/drivers/mtd/maps/latch-addr-flash.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Interface for NOR flash driver whose high address lines are latched | ||
3 | * | ||
4 | * Copyright © 2000 Nicolas Pitre <nico@cam.org> | ||
5 | * Copyright © 2005-2008 Analog Devices Inc. | ||
6 | * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public License | ||
9 | * version 2. This program is licensed "as is" without any warranty of any | ||
10 | * kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/mtd/map.h> | ||
18 | #include <linux/mtd/partitions.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/mtd/latch-addr-flash.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #define DRIVER_NAME "latch-addr-flash" | ||
24 | |||
25 | struct latch_addr_flash_info { | ||
26 | struct mtd_info *mtd; | ||
27 | struct map_info map; | ||
28 | struct resource *res; | ||
29 | |||
30 | void (*set_window)(unsigned long offset, void *data); | ||
31 | void *data; | ||
32 | |||
33 | /* cache; could be found out of res */ | ||
34 | unsigned long win_mask; | ||
35 | |||
36 | int nr_parts; | ||
37 | struct mtd_partition *parts; | ||
38 | |||
39 | spinlock_t lock; | ||
40 | }; | ||
41 | |||
42 | static map_word lf_read(struct map_info *map, unsigned long ofs) | ||
43 | { | ||
44 | struct latch_addr_flash_info *info; | ||
45 | map_word datum; | ||
46 | |||
47 | info = (struct latch_addr_flash_info *)map->map_priv_1; | ||
48 | |||
49 | spin_lock(&info->lock); | ||
50 | |||
51 | info->set_window(ofs, info->data); | ||
52 | datum = inline_map_read(map, info->win_mask & ofs); | ||
53 | |||
54 | spin_unlock(&info->lock); | ||
55 | |||
56 | return datum; | ||
57 | } | ||
58 | |||
59 | static void lf_write(struct map_info *map, map_word datum, unsigned long ofs) | ||
60 | { | ||
61 | struct latch_addr_flash_info *info; | ||
62 | |||
63 | info = (struct latch_addr_flash_info *)map->map_priv_1; | ||
64 | |||
65 | spin_lock(&info->lock); | ||
66 | |||
67 | info->set_window(ofs, info->data); | ||
68 | inline_map_write(map, datum, info->win_mask & ofs); | ||
69 | |||
70 | spin_unlock(&info->lock); | ||
71 | } | ||
72 | |||
73 | static void lf_copy_from(struct map_info *map, void *to, | ||
74 | unsigned long from, ssize_t len) | ||
75 | { | ||
76 | struct latch_addr_flash_info *info = | ||
77 | (struct latch_addr_flash_info *) map->map_priv_1; | ||
78 | unsigned n; | ||
79 | |||
80 | while (len > 0) { | ||
81 | n = info->win_mask + 1 - (from & info->win_mask); | ||
82 | if (n > len) | ||
83 | n = len; | ||
84 | |||
85 | spin_lock(&info->lock); | ||
86 | |||
87 | info->set_window(from, info->data); | ||
88 | memcpy_fromio(to, map->virt + (from & info->win_mask), n); | ||
89 | |||
90 | spin_unlock(&info->lock); | ||
91 | |||
92 | to += n; | ||
93 | from += n; | ||
94 | len -= n; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static char *rom_probe_types[] = { "cfi_probe", NULL }; | ||
99 | |||
100 | static char *part_probe_types[] = { "cmdlinepart", NULL }; | ||
101 | |||
102 | static int latch_addr_flash_remove(struct platform_device *dev) | ||
103 | { | ||
104 | struct latch_addr_flash_info *info; | ||
105 | struct latch_addr_flash_data *latch_addr_data; | ||
106 | |||
107 | info = platform_get_drvdata(dev); | ||
108 | if (info == NULL) | ||
109 | return 0; | ||
110 | platform_set_drvdata(dev, NULL); | ||
111 | |||
112 | latch_addr_data = dev->dev.platform_data; | ||
113 | |||
114 | if (info->mtd != NULL) { | ||
115 | if (mtd_has_partitions()) { | ||
116 | if (info->nr_parts) { | ||
117 | del_mtd_partitions(info->mtd); | ||
118 | kfree(info->parts); | ||
119 | } else if (latch_addr_data->nr_parts) { | ||
120 | del_mtd_partitions(info->mtd); | ||
121 | } else { | ||
122 | del_mtd_device(info->mtd); | ||
123 | } | ||
124 | } else { | ||
125 | del_mtd_device(info->mtd); | ||
126 | } | ||
127 | map_destroy(info->mtd); | ||
128 | } | ||
129 | |||
130 | if (info->map.virt != NULL) | ||
131 | iounmap(info->map.virt); | ||
132 | |||
133 | if (info->res != NULL) | ||
134 | release_mem_region(info->res->start, resource_size(info->res)); | ||
135 | |||
136 | kfree(info); | ||
137 | |||
138 | if (latch_addr_data->done) | ||
139 | latch_addr_data->done(latch_addr_data->data); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int __devinit latch_addr_flash_probe(struct platform_device *dev) | ||
145 | { | ||
146 | struct latch_addr_flash_data *latch_addr_data; | ||
147 | struct latch_addr_flash_info *info; | ||
148 | resource_size_t win_base = dev->resource->start; | ||
149 | resource_size_t win_size = resource_size(dev->resource); | ||
150 | char **probe_type; | ||
151 | int chipsel; | ||
152 | int err; | ||
153 | |||
154 | latch_addr_data = dev->dev.platform_data; | ||
155 | if (latch_addr_data == NULL) | ||
156 | return -ENODEV; | ||
157 | |||
158 | pr_notice("latch-addr platform flash device: %#llx byte " | ||
159 | "window at %#.8llx\n", | ||
160 | (unsigned long long)win_size, (unsigned long long)win_base); | ||
161 | |||
162 | chipsel = dev->id; | ||
163 | |||
164 | if (latch_addr_data->init) { | ||
165 | err = latch_addr_data->init(latch_addr_data->data, chipsel); | ||
166 | if (err != 0) | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL); | ||
171 | if (info == NULL) { | ||
172 | err = -ENOMEM; | ||
173 | goto done; | ||
174 | } | ||
175 | |||
176 | platform_set_drvdata(dev, info); | ||
177 | |||
178 | info->res = request_mem_region(win_base, win_size, DRIVER_NAME); | ||
179 | if (info->res == NULL) { | ||
180 | dev_err(&dev->dev, "Could not reserve memory region\n"); | ||
181 | err = -EBUSY; | ||
182 | goto free_info; | ||
183 | } | ||
184 | |||
185 | info->map.name = DRIVER_NAME; | ||
186 | info->map.size = latch_addr_data->size; | ||
187 | info->map.bankwidth = latch_addr_data->width; | ||
188 | |||
189 | info->map.phys = NO_XIP; | ||
190 | info->map.virt = ioremap(win_base, win_size); | ||
191 | if (!info->map.virt) { | ||
192 | err = -ENOMEM; | ||
193 | goto free_res; | ||
194 | } | ||
195 | |||
196 | info->map.map_priv_1 = (unsigned long)info; | ||
197 | |||
198 | info->map.read = lf_read; | ||
199 | info->map.copy_from = lf_copy_from; | ||
200 | info->map.write = lf_write; | ||
201 | info->set_window = latch_addr_data->set_window; | ||
202 | info->data = latch_addr_data->data; | ||
203 | info->win_mask = win_size - 1; | ||
204 | |||
205 | spin_lock_init(&info->lock); | ||
206 | |||
207 | for (probe_type = rom_probe_types; !info->mtd && *probe_type; | ||
208 | probe_type++) | ||
209 | info->mtd = do_map_probe(*probe_type, &info->map); | ||
210 | |||
211 | if (info->mtd == NULL) { | ||
212 | dev_err(&dev->dev, "map_probe failed\n"); | ||
213 | err = -ENODEV; | ||
214 | goto iounmap; | ||
215 | } | ||
216 | info->mtd->owner = THIS_MODULE; | ||
217 | |||
218 | if (mtd_has_partitions()) { | ||
219 | |||
220 | err = parse_mtd_partitions(info->mtd, | ||
221 | (const char **)part_probe_types, | ||
222 | &info->parts, 0); | ||
223 | if (err > 0) { | ||
224 | add_mtd_partitions(info->mtd, info->parts, err); | ||
225 | return 0; | ||
226 | } | ||
227 | if (latch_addr_data->nr_parts) { | ||
228 | pr_notice("Using latch-addr-flash partition information\n"); | ||
229 | add_mtd_partitions(info->mtd, latch_addr_data->parts, | ||
230 | latch_addr_data->nr_parts); | ||
231 | return 0; | ||
232 | } | ||
233 | } | ||
234 | add_mtd_device(info->mtd); | ||
235 | return 0; | ||
236 | |||
237 | iounmap: | ||
238 | iounmap(info->map.virt); | ||
239 | free_res: | ||
240 | release_mem_region(info->res->start, resource_size(info->res)); | ||
241 | free_info: | ||
242 | kfree(info); | ||
243 | done: | ||
244 | if (latch_addr_data->done) | ||
245 | latch_addr_data->done(latch_addr_data->data); | ||
246 | return err; | ||
247 | } | ||
248 | |||
249 | static struct platform_driver latch_addr_flash_driver = { | ||
250 | .probe = latch_addr_flash_probe, | ||
251 | .remove = __devexit_p(latch_addr_flash_remove), | ||
252 | .driver = { | ||
253 | .name = DRIVER_NAME, | ||
254 | }, | ||
255 | }; | ||
256 | |||
257 | static int __init latch_addr_flash_init(void) | ||
258 | { | ||
259 | return platform_driver_register(&latch_addr_flash_driver); | ||
260 | } | ||
261 | module_init(latch_addr_flash_init); | ||
262 | |||
263 | static void __exit latch_addr_flash_exit(void) | ||
264 | { | ||
265 | platform_driver_unregister(&latch_addr_flash_driver); | ||
266 | } | ||
267 | module_exit(latch_addr_flash_exit); | ||
268 | |||
269 | MODULE_AUTHOR("David Griego <dgriego@mvista.com>"); | ||
270 | MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper " | ||
271 | "address lines being set board specifically"); | ||
272 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 4c18b98a3110..7522df4f71f1 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -59,10 +59,8 @@ static int physmap_flash_remove(struct platform_device *dev) | |||
59 | #else | 59 | #else |
60 | del_mtd_device(info->cmtd); | 60 | del_mtd_device(info->cmtd); |
61 | #endif | 61 | #endif |
62 | #ifdef CONFIG_MTD_CONCAT | ||
63 | if (info->cmtd != info->mtd[0]) | 62 | if (info->cmtd != info->mtd[0]) |
64 | mtd_concat_destroy(info->cmtd); | 63 | mtd_concat_destroy(info->cmtd); |
65 | #endif | ||
66 | } | 64 | } |
67 | 65 | ||
68 | for (i = 0; i < MAX_RESOURCES; i++) { | 66 | for (i = 0; i < MAX_RESOURCES; i++) { |
@@ -159,15 +157,9 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
159 | /* | 157 | /* |
160 | * We detected multiple devices. Concatenate them together. | 158 | * We detected multiple devices. Concatenate them together. |
161 | */ | 159 | */ |
162 | #ifdef CONFIG_MTD_CONCAT | ||
163 | info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); | 160 | info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); |
164 | if (info->cmtd == NULL) | 161 | if (info->cmtd == NULL) |
165 | err = -ENXIO; | 162 | err = -ENXIO; |
166 | #else | ||
167 | printk(KERN_ERR "physmap-flash: multiple devices " | ||
168 | "found but MTD concat support disabled.\n"); | ||
169 | err = -ENXIO; | ||
170 | #endif | ||
171 | } | 163 | } |
172 | if (err) | 164 | if (err) |
173 | goto err_out; | 165 | goto err_out; |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 3db0cb083d31..bd483f0c57e1 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -104,12 +104,10 @@ static int of_flash_remove(struct platform_device *dev) | |||
104 | return 0; | 104 | return 0; |
105 | dev_set_drvdata(&dev->dev, NULL); | 105 | dev_set_drvdata(&dev->dev, NULL); |
106 | 106 | ||
107 | #ifdef CONFIG_MTD_CONCAT | ||
108 | if (info->cmtd != info->list[0].mtd) { | 107 | if (info->cmtd != info->list[0].mtd) { |
109 | del_mtd_device(info->cmtd); | 108 | del_mtd_device(info->cmtd); |
110 | mtd_concat_destroy(info->cmtd); | 109 | mtd_concat_destroy(info->cmtd); |
111 | } | 110 | } |
112 | #endif | ||
113 | 111 | ||
114 | if (info->cmtd) { | 112 | if (info->cmtd) { |
115 | if (OF_FLASH_PARTS(info)) { | 113 | if (OF_FLASH_PARTS(info)) { |
@@ -337,16 +335,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
337 | /* | 335 | /* |
338 | * We detected multiple devices. Concatenate them together. | 336 | * We detected multiple devices. Concatenate them together. |
339 | */ | 337 | */ |
340 | #ifdef CONFIG_MTD_CONCAT | ||
341 | info->cmtd = mtd_concat_create(mtd_list, info->list_size, | 338 | info->cmtd = mtd_concat_create(mtd_list, info->list_size, |
342 | dev_name(&dev->dev)); | 339 | dev_name(&dev->dev)); |
343 | if (info->cmtd == NULL) | 340 | if (info->cmtd == NULL) |
344 | err = -ENXIO; | 341 | err = -ENXIO; |
345 | #else | ||
346 | printk(KERN_ERR "physmap_of: multiple devices " | ||
347 | "found but MTD concat support disabled.\n"); | ||
348 | err = -ENXIO; | ||
349 | #endif | ||
350 | } | 342 | } |
351 | if (err) | 343 | if (err) |
352 | goto err_out; | 344 | goto err_out; |
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index f3af87e08ecd..da875908ea8e 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
@@ -232,10 +232,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla | |||
232 | else | 232 | else |
233 | del_mtd_partitions(info->mtd); | 233 | del_mtd_partitions(info->mtd); |
234 | #endif | 234 | #endif |
235 | #ifdef CONFIG_MTD_CONCAT | ||
236 | if (info->mtd != info->subdev[0].mtd) | 235 | if (info->mtd != info->subdev[0].mtd) |
237 | mtd_concat_destroy(info->mtd); | 236 | mtd_concat_destroy(info->mtd); |
238 | #endif | ||
239 | } | 237 | } |
240 | 238 | ||
241 | kfree(info->parts); | 239 | kfree(info->parts); |
@@ -321,7 +319,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) | |||
321 | info->mtd = info->subdev[0].mtd; | 319 | info->mtd = info->subdev[0].mtd; |
322 | ret = 0; | 320 | ret = 0; |
323 | } else if (info->num_subdev > 1) { | 321 | } else if (info->num_subdev > 1) { |
324 | #ifdef CONFIG_MTD_CONCAT | ||
325 | struct mtd_info *cdev[nr]; | 322 | struct mtd_info *cdev[nr]; |
326 | /* | 323 | /* |
327 | * We detected multiple devices. Concatenate them together. | 324 | * We detected multiple devices. Concatenate them together. |
@@ -333,11 +330,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) | |||
333 | plat->name); | 330 | plat->name); |
334 | if (info->mtd == NULL) | 331 | if (info->mtd == NULL) |
335 | ret = -ENXIO; | 332 | ret = -ENXIO; |
336 | #else | ||
337 | printk(KERN_ERR "SA1100 flash: multiple devices " | ||
338 | "found but MTD concat support disabled.\n"); | ||
339 | ret = -ENXIO; | ||
340 | #endif | ||
341 | } | 333 | } |
342 | 334 | ||
343 | if (ret == 0) | 335 | if (ret == 0) |
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index e2147bf11c88..e02dfa9d4ddd 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c | |||
@@ -94,7 +94,6 @@ static int __init init_ts5500_map(void) | |||
94 | return 0; | 94 | return 0; |
95 | 95 | ||
96 | err1: | 96 | err1: |
97 | map_destroy(mymtd); | ||
98 | iounmap(ts5500_map.virt); | 97 | iounmap(ts5500_map.virt); |
99 | err2: | 98 | err2: |
100 | return rc; | 99 | return rc; |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index e0a2373bf0e2..a534e1f0c348 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -40,7 +40,7 @@ | |||
40 | static LIST_HEAD(blktrans_majors); | 40 | static LIST_HEAD(blktrans_majors); |
41 | static DEFINE_MUTEX(blktrans_ref_mutex); | 41 | static DEFINE_MUTEX(blktrans_ref_mutex); |
42 | 42 | ||
43 | void blktrans_dev_release(struct kref *kref) | 43 | static void blktrans_dev_release(struct kref *kref) |
44 | { | 44 | { |
45 | struct mtd_blktrans_dev *dev = | 45 | struct mtd_blktrans_dev *dev = |
46 | container_of(kref, struct mtd_blktrans_dev, ref); | 46 | container_of(kref, struct mtd_blktrans_dev, ref); |
@@ -67,7 +67,7 @@ unlock: | |||
67 | return dev; | 67 | return dev; |
68 | } | 68 | } |
69 | 69 | ||
70 | void blktrans_dev_put(struct mtd_blktrans_dev *dev) | 70 | static void blktrans_dev_put(struct mtd_blktrans_dev *dev) |
71 | { | 71 | { |
72 | mutex_lock(&blktrans_ref_mutex); | 72 | mutex_lock(&blktrans_ref_mutex); |
73 | kref_put(&dev->ref, blktrans_dev_release); | 73 | kref_put(&dev->ref, blktrans_dev_release); |
@@ -119,18 +119,43 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) | ||
123 | { | ||
124 | if (kthread_should_stop()) | ||
125 | return 1; | ||
126 | |||
127 | return dev->bg_stop; | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); | ||
130 | |||
122 | static int mtd_blktrans_thread(void *arg) | 131 | static int mtd_blktrans_thread(void *arg) |
123 | { | 132 | { |
124 | struct mtd_blktrans_dev *dev = arg; | 133 | struct mtd_blktrans_dev *dev = arg; |
134 | struct mtd_blktrans_ops *tr = dev->tr; | ||
125 | struct request_queue *rq = dev->rq; | 135 | struct request_queue *rq = dev->rq; |
126 | struct request *req = NULL; | 136 | struct request *req = NULL; |
137 | int background_done = 0; | ||
127 | 138 | ||
128 | spin_lock_irq(rq->queue_lock); | 139 | spin_lock_irq(rq->queue_lock); |
129 | 140 | ||
130 | while (!kthread_should_stop()) { | 141 | while (!kthread_should_stop()) { |
131 | int res; | 142 | int res; |
132 | 143 | ||
144 | dev->bg_stop = false; | ||
133 | if (!req && !(req = blk_fetch_request(rq))) { | 145 | if (!req && !(req = blk_fetch_request(rq))) { |
146 | if (tr->background && !background_done) { | ||
147 | spin_unlock_irq(rq->queue_lock); | ||
148 | mutex_lock(&dev->lock); | ||
149 | tr->background(dev); | ||
150 | mutex_unlock(&dev->lock); | ||
151 | spin_lock_irq(rq->queue_lock); | ||
152 | /* | ||
153 | * Do background processing just once per idle | ||
154 | * period. | ||
155 | */ | ||
156 | background_done = !dev->bg_stop; | ||
157 | continue; | ||
158 | } | ||
134 | set_current_state(TASK_INTERRUPTIBLE); | 159 | set_current_state(TASK_INTERRUPTIBLE); |
135 | 160 | ||
136 | if (kthread_should_stop()) | 161 | if (kthread_should_stop()) |
@@ -152,6 +177,8 @@ static int mtd_blktrans_thread(void *arg) | |||
152 | 177 | ||
153 | if (!__blk_end_request_cur(req, res)) | 178 | if (!__blk_end_request_cur(req, res)) |
154 | req = NULL; | 179 | req = NULL; |
180 | |||
181 | background_done = 0; | ||
155 | } | 182 | } |
156 | 183 | ||
157 | if (req) | 184 | if (req) |
@@ -172,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq) | |||
172 | if (!dev) | 199 | if (!dev) |
173 | while ((req = blk_fetch_request(rq)) != NULL) | 200 | while ((req = blk_fetch_request(rq)) != NULL) |
174 | __blk_end_request_all(req, -ENODEV); | 201 | __blk_end_request_all(req, -ENODEV); |
175 | else | 202 | else { |
203 | dev->bg_stop = true; | ||
176 | wake_up_process(dev->thread); | 204 | wake_up_process(dev->thread); |
205 | } | ||
177 | } | 206 | } |
178 | 207 | ||
179 | static int blktrans_open(struct block_device *bdev, fmode_t mode) | 208 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
@@ -379,9 +408,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
379 | new->rq->queuedata = new; | 408 | new->rq->queuedata = new; |
380 | blk_queue_logical_block_size(new->rq, tr->blksize); | 409 | blk_queue_logical_block_size(new->rq, tr->blksize); |
381 | 410 | ||
382 | if (tr->discard) | 411 | if (tr->discard) { |
383 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | 412 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); |
384 | new->rq); | 413 | new->rq->limits.max_discard_sectors = UINT_MAX; |
414 | } | ||
385 | 415 | ||
386 | gd->queue = new->rq; | 416 | gd->queue = new->rq; |
387 | 417 | ||
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 5f5777bd3f75..5060e608ea5d 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -750,6 +750,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
750 | struct mtd_concat *concat; | 750 | struct mtd_concat *concat; |
751 | uint32_t max_erasesize, curr_erasesize; | 751 | uint32_t max_erasesize, curr_erasesize; |
752 | int num_erase_region; | 752 | int num_erase_region; |
753 | int max_writebufsize = 0; | ||
753 | 754 | ||
754 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); | 755 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); |
755 | for (i = 0; i < num_devs; i++) | 756 | for (i = 0; i < num_devs; i++) |
@@ -776,7 +777,12 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
776 | concat->mtd.size = subdev[0]->size; | 777 | concat->mtd.size = subdev[0]->size; |
777 | concat->mtd.erasesize = subdev[0]->erasesize; | 778 | concat->mtd.erasesize = subdev[0]->erasesize; |
778 | concat->mtd.writesize = subdev[0]->writesize; | 779 | concat->mtd.writesize = subdev[0]->writesize; |
779 | concat->mtd.writebufsize = subdev[0]->writebufsize; | 780 | |
781 | for (i = 0; i < num_devs; i++) | ||
782 | if (max_writebufsize < subdev[i]->writebufsize) | ||
783 | max_writebufsize = subdev[i]->writebufsize; | ||
784 | concat->mtd.writebufsize = max_writebufsize; | ||
785 | |||
780 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; | 786 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; |
781 | concat->mtd.oobsize = subdev[0]->oobsize; | 787 | concat->mtd.oobsize = subdev[0]->oobsize; |
782 | concat->mtd.oobavail = subdev[0]->oobavail; | 788 | concat->mtd.oobavail = subdev[0]->oobavail; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 527cebf58da4..da69bc8a5a7d 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * backing device capabilities for non-mappable devices (such as NAND flash) | 43 | * backing device capabilities for non-mappable devices (such as NAND flash) |
44 | * - permits private mappings, copies are taken of the data | 44 | * - permits private mappings, copies are taken of the data |
45 | */ | 45 | */ |
46 | struct backing_dev_info mtd_bdi_unmappable = { | 46 | static struct backing_dev_info mtd_bdi_unmappable = { |
47 | .capabilities = BDI_CAP_MAP_COPY, | 47 | .capabilities = BDI_CAP_MAP_COPY, |
48 | }; | 48 | }; |
49 | 49 | ||
@@ -52,7 +52,7 @@ struct backing_dev_info mtd_bdi_unmappable = { | |||
52 | * - permits private mappings, copies are taken of the data | 52 | * - permits private mappings, copies are taken of the data |
53 | * - permits non-writable shared mappings | 53 | * - permits non-writable shared mappings |
54 | */ | 54 | */ |
55 | struct backing_dev_info mtd_bdi_ro_mappable = { | 55 | static struct backing_dev_info mtd_bdi_ro_mappable = { |
56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | 56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | |
57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | 57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), |
58 | }; | 58 | }; |
@@ -62,7 +62,7 @@ struct backing_dev_info mtd_bdi_ro_mappable = { | |||
62 | * - permits private mappings, copies are taken of the data | 62 | * - permits private mappings, copies are taken of the data |
63 | * - permits non-writable shared mappings | 63 | * - permits non-writable shared mappings |
64 | */ | 64 | */ |
65 | struct backing_dev_info mtd_bdi_rw_mappable = { | 65 | static struct backing_dev_info mtd_bdi_rw_mappable = { |
66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | 66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | |
67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | 67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | |
68 | BDI_CAP_WRITE_MAP), | 68 | BDI_CAP_WRITE_MAP), |
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c new file mode 100644 index 000000000000..237913c5c92c --- /dev/null +++ b/drivers/mtd/mtdswap.c | |||
@@ -0,0 +1,1587 @@ | |||
1 | /* | ||
2 | * Swap block device support for MTDs | ||
3 | * Turns an MTD device into a swap device with block wear leveling | ||
4 | * | ||
5 | * Copyright © 2007,2011 Nokia Corporation. All rights reserved. | ||
6 | * | ||
7 | * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com> | ||
8 | * | ||
9 | * Based on Richard Purdie's earlier implementation in 2007. Background | ||
10 | * support and lock-less operation written by Adrian Hunter. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * version 2 as published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
24 | * 02110-1301 USA | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/mtd/mtd.h> | ||
30 | #include <linux/mtd/blktrans.h> | ||
31 | #include <linux/rbtree.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/genhd.h> | ||
36 | #include <linux/swap.h> | ||
37 | #include <linux/debugfs.h> | ||
38 | #include <linux/seq_file.h> | ||
39 | #include <linux/device.h> | ||
40 | #include <linux/math64.h> | ||
41 | |||
42 | #define MTDSWAP_PREFIX "mtdswap" | ||
43 | |||
44 | /* | ||
45 | * The number of free eraseblocks when GC should stop | ||
46 | */ | ||
47 | #define CLEAN_BLOCK_THRESHOLD 20 | ||
48 | |||
49 | /* | ||
50 | * Number of free eraseblocks below which GC can also collect low frag | ||
51 | * blocks. | ||
52 | */ | ||
53 | #define LOW_FRAG_GC_TRESHOLD 5 | ||
54 | |||
55 | /* | ||
56 | * Wear level cost amortization. We want to do wear leveling on the background | ||
57 | * without disturbing gc too much. This is made by defining max GC frequency. | ||
58 | * Frequency value 6 means 1/6 of the GC passes will pick an erase block based | ||
59 | * on the biggest wear difference rather than the biggest dirtiness. | ||
60 | * | ||
61 | * The lower freq2 should be chosen so that it makes sure the maximum erase | ||
62 | * difference will decrease even if a malicious application is deliberately | ||
63 | * trying to make erase differences large. | ||
64 | */ | ||
65 | #define MAX_ERASE_DIFF 4000 | ||
66 | #define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF | ||
67 | #define COLLECT_NONDIRTY_FREQ1 6 | ||
68 | #define COLLECT_NONDIRTY_FREQ2 4 | ||
69 | |||
70 | #define PAGE_UNDEF UINT_MAX | ||
71 | #define BLOCK_UNDEF UINT_MAX | ||
72 | #define BLOCK_ERROR (UINT_MAX - 1) | ||
73 | #define BLOCK_MAX (UINT_MAX - 2) | ||
74 | |||
75 | #define EBLOCK_BAD (1 << 0) | ||
76 | #define EBLOCK_NOMAGIC (1 << 1) | ||
77 | #define EBLOCK_BITFLIP (1 << 2) | ||
78 | #define EBLOCK_FAILED (1 << 3) | ||
79 | #define EBLOCK_READERR (1 << 4) | ||
80 | #define EBLOCK_IDX_SHIFT 5 | ||
81 | |||
82 | struct swap_eb { | ||
83 | struct rb_node rb; | ||
84 | struct rb_root *root; | ||
85 | |||
86 | unsigned int flags; | ||
87 | unsigned int active_count; | ||
88 | unsigned int erase_count; | ||
89 | unsigned int pad; /* speeds up pointer decremtnt */ | ||
90 | }; | ||
91 | |||
92 | #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ | ||
93 | rb)->erase_count) | ||
94 | #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \ | ||
95 | rb)->erase_count) | ||
96 | |||
97 | struct mtdswap_tree { | ||
98 | struct rb_root root; | ||
99 | unsigned int count; | ||
100 | }; | ||
101 | |||
102 | enum { | ||
103 | MTDSWAP_CLEAN, | ||
104 | MTDSWAP_USED, | ||
105 | MTDSWAP_LOWFRAG, | ||
106 | MTDSWAP_HIFRAG, | ||
107 | MTDSWAP_DIRTY, | ||
108 | MTDSWAP_BITFLIP, | ||
109 | MTDSWAP_FAILING, | ||
110 | MTDSWAP_TREE_CNT, | ||
111 | }; | ||
112 | |||
113 | struct mtdswap_dev { | ||
114 | struct mtd_blktrans_dev *mbd_dev; | ||
115 | struct mtd_info *mtd; | ||
116 | struct device *dev; | ||
117 | |||
118 | unsigned int *page_data; | ||
119 | unsigned int *revmap; | ||
120 | |||
121 | unsigned int eblks; | ||
122 | unsigned int spare_eblks; | ||
123 | unsigned int pages_per_eblk; | ||
124 | unsigned int max_erase_count; | ||
125 | struct swap_eb *eb_data; | ||
126 | |||
127 | struct mtdswap_tree trees[MTDSWAP_TREE_CNT]; | ||
128 | |||
129 | unsigned long long sect_read_count; | ||
130 | unsigned long long sect_write_count; | ||
131 | unsigned long long mtd_write_count; | ||
132 | unsigned long long mtd_read_count; | ||
133 | unsigned long long discard_count; | ||
134 | unsigned long long discard_page_count; | ||
135 | |||
136 | unsigned int curr_write_pos; | ||
137 | struct swap_eb *curr_write; | ||
138 | |||
139 | char *page_buf; | ||
140 | char *oob_buf; | ||
141 | |||
142 | struct dentry *debugfs_root; | ||
143 | }; | ||
144 | |||
145 | struct mtdswap_oobdata { | ||
146 | __le16 magic; | ||
147 | __le32 count; | ||
148 | } __attribute__((packed)); | ||
149 | |||
150 | #define MTDSWAP_MAGIC_CLEAN 0x2095 | ||
151 | #define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1) | ||
152 | #define MTDSWAP_TYPE_CLEAN 0 | ||
153 | #define MTDSWAP_TYPE_DIRTY 1 | ||
154 | #define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata) | ||
155 | |||
156 | #define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */ | ||
157 | #define MTDSWAP_IO_RETRIES 3 | ||
158 | |||
159 | enum { | ||
160 | MTDSWAP_SCANNED_CLEAN, | ||
161 | MTDSWAP_SCANNED_DIRTY, | ||
162 | MTDSWAP_SCANNED_BITFLIP, | ||
163 | MTDSWAP_SCANNED_BAD, | ||
164 | }; | ||
165 | |||
166 | /* | ||
167 | * In the worst case mtdswap_writesect() has allocated the last clean | ||
168 | * page from the current block and is then pre-empted by the GC | ||
169 | * thread. The thread can consume a full erase block when moving a | ||
170 | * block. | ||
171 | */ | ||
172 | #define MIN_SPARE_EBLOCKS 2 | ||
173 | #define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1) | ||
174 | |||
175 | #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root) | ||
176 | #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL) | ||
177 | #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name)) | ||
178 | #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count) | ||
179 | |||
180 | #define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv) | ||
181 | |||
182 | static char partitions[128] = ""; | ||
183 | module_param_string(partitions, partitions, sizeof(partitions), 0444); | ||
184 | MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap " | ||
185 | "partitions=\"1,3,5\""); | ||
186 | |||
187 | static unsigned int spare_eblocks = 10; | ||
188 | module_param(spare_eblocks, uint, 0444); | ||
189 | MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for " | ||
190 | "garbage collection (default 10%)"); | ||
191 | |||
192 | static bool header; /* false */ | ||
193 | module_param(header, bool, 0444); | ||
194 | MODULE_PARM_DESC(header, | ||
195 | "Include builtin swap header (default 0, without header)"); | ||
196 | |||
197 | static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background); | ||
198 | |||
199 | static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) | ||
200 | { | ||
201 | return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; | ||
202 | } | ||
203 | |||
204 | static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) | ||
205 | { | ||
206 | unsigned int oldidx; | ||
207 | struct mtdswap_tree *tp; | ||
208 | |||
209 | if (eb->root) { | ||
210 | tp = container_of(eb->root, struct mtdswap_tree, root); | ||
211 | oldidx = tp - &d->trees[0]; | ||
212 | |||
213 | d->trees[oldidx].count--; | ||
214 | rb_erase(&eb->rb, eb->root); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb) | ||
219 | { | ||
220 | struct rb_node **p, *parent = NULL; | ||
221 | struct swap_eb *cur; | ||
222 | |||
223 | p = &root->rb_node; | ||
224 | while (*p) { | ||
225 | parent = *p; | ||
226 | cur = rb_entry(parent, struct swap_eb, rb); | ||
227 | if (eb->erase_count > cur->erase_count) | ||
228 | p = &(*p)->rb_right; | ||
229 | else | ||
230 | p = &(*p)->rb_left; | ||
231 | } | ||
232 | |||
233 | rb_link_node(&eb->rb, parent, p); | ||
234 | rb_insert_color(&eb->rb, root); | ||
235 | } | ||
236 | |||
237 | static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) | ||
238 | { | ||
239 | struct rb_root *root; | ||
240 | |||
241 | if (eb->root == &d->trees[idx].root) | ||
242 | return; | ||
243 | |||
244 | mtdswap_eb_detach(d, eb); | ||
245 | root = &d->trees[idx].root; | ||
246 | __mtdswap_rb_add(root, eb); | ||
247 | eb->root = root; | ||
248 | d->trees[idx].count++; | ||
249 | } | ||
250 | |||
251 | static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx) | ||
252 | { | ||
253 | struct rb_node *p; | ||
254 | unsigned int i; | ||
255 | |||
256 | p = rb_first(root); | ||
257 | i = 0; | ||
258 | while (i < idx && p) { | ||
259 | p = rb_next(p); | ||
260 | i++; | ||
261 | } | ||
262 | |||
263 | return p; | ||
264 | } | ||
265 | |||
266 | static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) | ||
267 | { | ||
268 | int ret; | ||
269 | loff_t offset; | ||
270 | |||
271 | d->spare_eblks--; | ||
272 | eb->flags |= EBLOCK_BAD; | ||
273 | mtdswap_eb_detach(d, eb); | ||
274 | eb->root = NULL; | ||
275 | |||
276 | /* badblocks not supported */ | ||
277 | if (!d->mtd->block_markbad) | ||
278 | return 1; | ||
279 | |||
280 | offset = mtdswap_eb_offset(d, eb); | ||
281 | dev_warn(d->dev, "Marking bad block at %08llx\n", offset); | ||
282 | ret = d->mtd->block_markbad(d->mtd, offset); | ||
283 | |||
284 | if (ret) { | ||
285 | dev_warn(d->dev, "Mark block bad failed for block at %08llx " | ||
286 | "error %d\n", offset, ret); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | return 1; | ||
291 | |||
292 | } | ||
293 | |||
294 | static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) | ||
295 | { | ||
296 | unsigned int marked = eb->flags & EBLOCK_FAILED; | ||
297 | struct swap_eb *curr_write = d->curr_write; | ||
298 | |||
299 | eb->flags |= EBLOCK_FAILED; | ||
300 | if (curr_write == eb) { | ||
301 | d->curr_write = NULL; | ||
302 | |||
303 | if (!marked && d->curr_write_pos != 0) { | ||
304 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
305 | return 0; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | return mtdswap_handle_badblock(d, eb); | ||
310 | } | ||
311 | |||
312 | static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, | ||
313 | struct mtd_oob_ops *ops) | ||
314 | { | ||
315 | int ret = d->mtd->read_oob(d->mtd, from, ops); | ||
316 | |||
317 | if (ret == -EUCLEAN) | ||
318 | return ret; | ||
319 | |||
320 | if (ret) { | ||
321 | dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n", | ||
322 | ret, from); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | if (ops->oobretlen < ops->ooblen) { | ||
327 | dev_warn(d->dev, "Read OOB return short read (%zd bytes not " | ||
328 | "%zd) for block at %08llx\n", | ||
329 | ops->oobretlen, ops->ooblen, from); | ||
330 | return -EIO; | ||
331 | } | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) | ||
337 | { | ||
338 | struct mtdswap_oobdata *data, *data2; | ||
339 | int ret; | ||
340 | loff_t offset; | ||
341 | struct mtd_oob_ops ops; | ||
342 | |||
343 | offset = mtdswap_eb_offset(d, eb); | ||
344 | |||
345 | /* Check first if the block is bad. */ | ||
346 | if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset)) | ||
347 | return MTDSWAP_SCANNED_BAD; | ||
348 | |||
349 | ops.ooblen = 2 * d->mtd->ecclayout->oobavail; | ||
350 | ops.oobbuf = d->oob_buf; | ||
351 | ops.ooboffs = 0; | ||
352 | ops.datbuf = NULL; | ||
353 | ops.mode = MTD_OOB_AUTO; | ||
354 | |||
355 | ret = mtdswap_read_oob(d, offset, &ops); | ||
356 | |||
357 | if (ret && ret != -EUCLEAN) | ||
358 | return ret; | ||
359 | |||
360 | data = (struct mtdswap_oobdata *)d->oob_buf; | ||
361 | data2 = (struct mtdswap_oobdata *) | ||
362 | (d->oob_buf + d->mtd->ecclayout->oobavail); | ||
363 | |||
364 | if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { | ||
365 | eb->erase_count = le32_to_cpu(data->count); | ||
366 | if (ret == -EUCLEAN) | ||
367 | ret = MTDSWAP_SCANNED_BITFLIP; | ||
368 | else { | ||
369 | if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) | ||
370 | ret = MTDSWAP_SCANNED_DIRTY; | ||
371 | else | ||
372 | ret = MTDSWAP_SCANNED_CLEAN; | ||
373 | } | ||
374 | } else { | ||
375 | eb->flags |= EBLOCK_NOMAGIC; | ||
376 | ret = MTDSWAP_SCANNED_DIRTY; | ||
377 | } | ||
378 | |||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, | ||
383 | u16 marker) | ||
384 | { | ||
385 | struct mtdswap_oobdata n; | ||
386 | int ret; | ||
387 | loff_t offset; | ||
388 | struct mtd_oob_ops ops; | ||
389 | |||
390 | ops.ooboffs = 0; | ||
391 | ops.oobbuf = (uint8_t *)&n; | ||
392 | ops.mode = MTD_OOB_AUTO; | ||
393 | ops.datbuf = NULL; | ||
394 | |||
395 | if (marker == MTDSWAP_TYPE_CLEAN) { | ||
396 | n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN); | ||
397 | n.count = cpu_to_le32(eb->erase_count); | ||
398 | ops.ooblen = MTDSWAP_OOBSIZE; | ||
399 | offset = mtdswap_eb_offset(d, eb); | ||
400 | } else { | ||
401 | n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY); | ||
402 | ops.ooblen = sizeof(n.magic); | ||
403 | offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; | ||
404 | } | ||
405 | |||
406 | ret = d->mtd->write_oob(d->mtd, offset , &ops); | ||
407 | |||
408 | if (ret) { | ||
409 | dev_warn(d->dev, "Write OOB failed for block at %08llx " | ||
410 | "error %d\n", offset, ret); | ||
411 | if (ret == -EIO || ret == -EBADMSG) | ||
412 | mtdswap_handle_write_error(d, eb); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | if (ops.oobretlen != ops.ooblen) { | ||
417 | dev_warn(d->dev, "Short OOB write for block at %08llx: " | ||
418 | "%zd not %zd\n", | ||
419 | offset, ops.oobretlen, ops.ooblen); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * Are there any erase blocks without MAGIC_CLEAN header, presumably | ||
428 | * because power was cut off after erase but before header write? We | ||
429 | * need to guestimate the erase count. | ||
430 | */ | ||
431 | static void mtdswap_check_counts(struct mtdswap_dev *d) | ||
432 | { | ||
433 | struct rb_root hist_root = RB_ROOT; | ||
434 | struct rb_node *medrb; | ||
435 | struct swap_eb *eb; | ||
436 | unsigned int i, cnt, median; | ||
437 | |||
438 | cnt = 0; | ||
439 | for (i = 0; i < d->eblks; i++) { | ||
440 | eb = d->eb_data + i; | ||
441 | |||
442 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) | ||
443 | continue; | ||
444 | |||
445 | __mtdswap_rb_add(&hist_root, eb); | ||
446 | cnt++; | ||
447 | } | ||
448 | |||
449 | if (cnt == 0) | ||
450 | return; | ||
451 | |||
452 | medrb = mtdswap_rb_index(&hist_root, cnt / 2); | ||
453 | median = rb_entry(medrb, struct swap_eb, rb)->erase_count; | ||
454 | |||
455 | d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root); | ||
456 | |||
457 | for (i = 0; i < d->eblks; i++) { | ||
458 | eb = d->eb_data + i; | ||
459 | |||
460 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) | ||
461 | eb->erase_count = median; | ||
462 | |||
463 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) | ||
464 | continue; | ||
465 | |||
466 | rb_erase(&eb->rb, &hist_root); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static void mtdswap_scan_eblks(struct mtdswap_dev *d) | ||
471 | { | ||
472 | int status; | ||
473 | unsigned int i, idx; | ||
474 | struct swap_eb *eb; | ||
475 | |||
476 | for (i = 0; i < d->eblks; i++) { | ||
477 | eb = d->eb_data + i; | ||
478 | |||
479 | status = mtdswap_read_markers(d, eb); | ||
480 | if (status < 0) | ||
481 | eb->flags |= EBLOCK_READERR; | ||
482 | else if (status == MTDSWAP_SCANNED_BAD) { | ||
483 | eb->flags |= EBLOCK_BAD; | ||
484 | continue; | ||
485 | } | ||
486 | |||
487 | switch (status) { | ||
488 | case MTDSWAP_SCANNED_CLEAN: | ||
489 | idx = MTDSWAP_CLEAN; | ||
490 | break; | ||
491 | case MTDSWAP_SCANNED_DIRTY: | ||
492 | case MTDSWAP_SCANNED_BITFLIP: | ||
493 | idx = MTDSWAP_DIRTY; | ||
494 | break; | ||
495 | default: | ||
496 | idx = MTDSWAP_FAILING; | ||
497 | } | ||
498 | |||
499 | eb->flags |= (idx << EBLOCK_IDX_SHIFT); | ||
500 | } | ||
501 | |||
502 | mtdswap_check_counts(d); | ||
503 | |||
504 | for (i = 0; i < d->eblks; i++) { | ||
505 | eb = d->eb_data + i; | ||
506 | |||
507 | if (eb->flags & EBLOCK_BAD) | ||
508 | continue; | ||
509 | |||
510 | idx = eb->flags >> EBLOCK_IDX_SHIFT; | ||
511 | mtdswap_rb_add(d, eb, idx); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * Place eblk into a tree corresponding to its number of active blocks | ||
517 | * it contains. | ||
518 | */ | ||
519 | static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) | ||
520 | { | ||
521 | unsigned int weight = eb->active_count; | ||
522 | unsigned int maxweight = d->pages_per_eblk; | ||
523 | |||
524 | if (eb == d->curr_write) | ||
525 | return; | ||
526 | |||
527 | if (eb->flags & EBLOCK_BITFLIP) | ||
528 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | ||
529 | else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) | ||
530 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
531 | if (weight == maxweight) | ||
532 | mtdswap_rb_add(d, eb, MTDSWAP_USED); | ||
533 | else if (weight == 0) | ||
534 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | ||
535 | else if (weight > (maxweight/2)) | ||
536 | mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); | ||
537 | else | ||
538 | mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); | ||
539 | } | ||
540 | |||
541 | |||
542 | static void mtdswap_erase_callback(struct erase_info *done) | ||
543 | { | ||
544 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; | ||
545 | wake_up(wait_q); | ||
546 | } | ||
547 | |||
548 | static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) | ||
549 | { | ||
550 | struct mtd_info *mtd = d->mtd; | ||
551 | struct erase_info erase; | ||
552 | wait_queue_head_t wq; | ||
553 | unsigned int retries = 0; | ||
554 | int ret; | ||
555 | |||
556 | eb->erase_count++; | ||
557 | if (eb->erase_count > d->max_erase_count) | ||
558 | d->max_erase_count = eb->erase_count; | ||
559 | |||
560 | retry: | ||
561 | init_waitqueue_head(&wq); | ||
562 | memset(&erase, 0, sizeof(struct erase_info)); | ||
563 | |||
564 | erase.mtd = mtd; | ||
565 | erase.callback = mtdswap_erase_callback; | ||
566 | erase.addr = mtdswap_eb_offset(d, eb); | ||
567 | erase.len = mtd->erasesize; | ||
568 | erase.priv = (u_long)&wq; | ||
569 | |||
570 | ret = mtd->erase(mtd, &erase); | ||
571 | if (ret) { | ||
572 | if (retries++ < MTDSWAP_ERASE_RETRIES) { | ||
573 | dev_warn(d->dev, | ||
574 | "erase of erase block %#llx on %s failed", | ||
575 | erase.addr, mtd->name); | ||
576 | yield(); | ||
577 | goto retry; | ||
578 | } | ||
579 | |||
580 | dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", | ||
581 | erase.addr, mtd->name); | ||
582 | |||
583 | mtdswap_handle_badblock(d, eb); | ||
584 | return -EIO; | ||
585 | } | ||
586 | |||
587 | ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE || | ||
588 | erase.state == MTD_ERASE_FAILED); | ||
589 | if (ret) { | ||
590 | dev_err(d->dev, "Interrupted erase block %#llx erassure on %s", | ||
591 | erase.addr, mtd->name); | ||
592 | return -EINTR; | ||
593 | } | ||
594 | |||
595 | if (erase.state == MTD_ERASE_FAILED) { | ||
596 | if (retries++ < MTDSWAP_ERASE_RETRIES) { | ||
597 | dev_warn(d->dev, | ||
598 | "erase of erase block %#llx on %s failed", | ||
599 | erase.addr, mtd->name); | ||
600 | yield(); | ||
601 | goto retry; | ||
602 | } | ||
603 | |||
604 | mtdswap_handle_badblock(d, eb); | ||
605 | return -EIO; | ||
606 | } | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, | ||
612 | unsigned int *block) | ||
613 | { | ||
614 | int ret; | ||
615 | struct swap_eb *old_eb = d->curr_write; | ||
616 | struct rb_root *clean_root; | ||
617 | struct swap_eb *eb; | ||
618 | |||
619 | if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) { | ||
620 | do { | ||
621 | if (TREE_EMPTY(d, CLEAN)) | ||
622 | return -ENOSPC; | ||
623 | |||
624 | clean_root = TREE_ROOT(d, CLEAN); | ||
625 | eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); | ||
626 | rb_erase(&eb->rb, clean_root); | ||
627 | eb->root = NULL; | ||
628 | TREE_COUNT(d, CLEAN)--; | ||
629 | |||
630 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); | ||
631 | } while (ret == -EIO || ret == -EBADMSG); | ||
632 | |||
633 | if (ret) | ||
634 | return ret; | ||
635 | |||
636 | d->curr_write_pos = 0; | ||
637 | d->curr_write = eb; | ||
638 | if (old_eb) | ||
639 | mtdswap_store_eb(d, old_eb); | ||
640 | } | ||
641 | |||
642 | *block = (d->curr_write - d->eb_data) * d->pages_per_eblk + | ||
643 | d->curr_write_pos; | ||
644 | |||
645 | d->curr_write->active_count++; | ||
646 | d->revmap[*block] = page; | ||
647 | d->curr_write_pos++; | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d) | ||
653 | { | ||
654 | return TREE_COUNT(d, CLEAN) * d->pages_per_eblk + | ||
655 | d->pages_per_eblk - d->curr_write_pos; | ||
656 | } | ||
657 | |||
658 | static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d) | ||
659 | { | ||
660 | return mtdswap_free_page_cnt(d) > d->pages_per_eblk; | ||
661 | } | ||
662 | |||
663 | static int mtdswap_write_block(struct mtdswap_dev *d, char *buf, | ||
664 | unsigned int page, unsigned int *bp, int gc_context) | ||
665 | { | ||
666 | struct mtd_info *mtd = d->mtd; | ||
667 | struct swap_eb *eb; | ||
668 | size_t retlen; | ||
669 | loff_t writepos; | ||
670 | int ret; | ||
671 | |||
672 | retry: | ||
673 | if (!gc_context) | ||
674 | while (!mtdswap_enough_free_pages(d)) | ||
675 | if (mtdswap_gc(d, 0) > 0) | ||
676 | return -ENOSPC; | ||
677 | |||
678 | ret = mtdswap_map_free_block(d, page, bp); | ||
679 | eb = d->eb_data + (*bp / d->pages_per_eblk); | ||
680 | |||
681 | if (ret == -EIO || ret == -EBADMSG) { | ||
682 | d->curr_write = NULL; | ||
683 | eb->active_count--; | ||
684 | d->revmap[*bp] = PAGE_UNDEF; | ||
685 | goto retry; | ||
686 | } | ||
687 | |||
688 | if (ret < 0) | ||
689 | return ret; | ||
690 | |||
691 | writepos = (loff_t)*bp << PAGE_SHIFT; | ||
692 | ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); | ||
693 | if (ret == -EIO || ret == -EBADMSG) { | ||
694 | d->curr_write_pos--; | ||
695 | eb->active_count--; | ||
696 | d->revmap[*bp] = PAGE_UNDEF; | ||
697 | mtdswap_handle_write_error(d, eb); | ||
698 | goto retry; | ||
699 | } | ||
700 | |||
701 | if (ret < 0) { | ||
702 | dev_err(d->dev, "Write to MTD device failed: %d (%zd written)", | ||
703 | ret, retlen); | ||
704 | goto err; | ||
705 | } | ||
706 | |||
707 | if (retlen != PAGE_SIZE) { | ||
708 | dev_err(d->dev, "Short write to MTD device: %zd written", | ||
709 | retlen); | ||
710 | ret = -EIO; | ||
711 | goto err; | ||
712 | } | ||
713 | |||
714 | return ret; | ||
715 | |||
716 | err: | ||
717 | d->curr_write_pos--; | ||
718 | eb->active_count--; | ||
719 | d->revmap[*bp] = PAGE_UNDEF; | ||
720 | |||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, | ||
725 | unsigned int *newblock) | ||
726 | { | ||
727 | struct mtd_info *mtd = d->mtd; | ||
728 | struct swap_eb *eb, *oldeb; | ||
729 | int ret; | ||
730 | size_t retlen; | ||
731 | unsigned int page, retries; | ||
732 | loff_t readpos; | ||
733 | |||
734 | page = d->revmap[oldblock]; | ||
735 | readpos = (loff_t) oldblock << PAGE_SHIFT; | ||
736 | retries = 0; | ||
737 | |||
738 | retry: | ||
739 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); | ||
740 | |||
741 | if (ret < 0 && ret != -EUCLEAN) { | ||
742 | oldeb = d->eb_data + oldblock / d->pages_per_eblk; | ||
743 | oldeb->flags |= EBLOCK_READERR; | ||
744 | |||
745 | dev_err(d->dev, "Read Error: %d (block %u)\n", ret, | ||
746 | oldblock); | ||
747 | retries++; | ||
748 | if (retries < MTDSWAP_IO_RETRIES) | ||
749 | goto retry; | ||
750 | |||
751 | goto read_error; | ||
752 | } | ||
753 | |||
754 | if (retlen != PAGE_SIZE) { | ||
755 | dev_err(d->dev, "Short read: %zd (block %u)\n", retlen, | ||
756 | oldblock); | ||
757 | ret = -EIO; | ||
758 | goto read_error; | ||
759 | } | ||
760 | |||
761 | ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1); | ||
762 | if (ret < 0) { | ||
763 | d->page_data[page] = BLOCK_ERROR; | ||
764 | dev_err(d->dev, "Write error: %d\n", ret); | ||
765 | return ret; | ||
766 | } | ||
767 | |||
768 | eb = d->eb_data + *newblock / d->pages_per_eblk; | ||
769 | d->page_data[page] = *newblock; | ||
770 | d->revmap[oldblock] = PAGE_UNDEF; | ||
771 | eb = d->eb_data + oldblock / d->pages_per_eblk; | ||
772 | eb->active_count--; | ||
773 | |||
774 | return 0; | ||
775 | |||
776 | read_error: | ||
777 | d->page_data[page] = BLOCK_ERROR; | ||
778 | d->revmap[oldblock] = PAGE_UNDEF; | ||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) | ||
783 | { | ||
784 | unsigned int i, block, eblk_base, newblock; | ||
785 | int ret, errcode; | ||
786 | |||
787 | errcode = 0; | ||
788 | eblk_base = (eb - d->eb_data) * d->pages_per_eblk; | ||
789 | |||
790 | for (i = 0; i < d->pages_per_eblk; i++) { | ||
791 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
792 | return -ENOSPC; | ||
793 | |||
794 | block = eblk_base + i; | ||
795 | if (d->revmap[block] == PAGE_UNDEF) | ||
796 | continue; | ||
797 | |||
798 | ret = mtdswap_move_block(d, block, &newblock); | ||
799 | if (ret < 0 && !errcode) | ||
800 | errcode = ret; | ||
801 | } | ||
802 | |||
803 | return errcode; | ||
804 | } | ||
805 | |||
806 | static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) | ||
807 | { | ||
808 | int idx, stopat; | ||
809 | |||
810 | if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) | ||
811 | stopat = MTDSWAP_LOWFRAG; | ||
812 | else | ||
813 | stopat = MTDSWAP_HIFRAG; | ||
814 | |||
815 | for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--) | ||
816 | if (d->trees[idx].root.rb_node != NULL) | ||
817 | return idx; | ||
818 | |||
819 | return -1; | ||
820 | } | ||
821 | |||
822 | static int mtdswap_wlfreq(unsigned int maxdiff) | ||
823 | { | ||
824 | unsigned int h, x, y, dist, base; | ||
825 | |||
826 | /* | ||
827 | * Calculate linear ramp down from f1 to f2 when maxdiff goes from | ||
828 | * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar | ||
829 | * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE. | ||
830 | */ | ||
831 | |||
832 | dist = maxdiff - MAX_ERASE_DIFF; | ||
833 | if (dist > COLLECT_NONDIRTY_BASE) | ||
834 | dist = COLLECT_NONDIRTY_BASE; | ||
835 | |||
836 | /* | ||
837 | * Modelling the slop as right angular triangle with base | ||
838 | * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is | ||
839 | * equal to the ratio h/base. | ||
840 | */ | ||
841 | h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2; | ||
842 | base = COLLECT_NONDIRTY_BASE; | ||
843 | |||
844 | x = dist - base; | ||
845 | y = (x * h + base / 2) / base; | ||
846 | |||
847 | return COLLECT_NONDIRTY_FREQ2 + y; | ||
848 | } | ||
849 | |||
850 | static int mtdswap_choose_wl_tree(struct mtdswap_dev *d) | ||
851 | { | ||
852 | static unsigned int pick_cnt; | ||
853 | unsigned int i, idx = -1, wear, max; | ||
854 | struct rb_root *root; | ||
855 | |||
856 | max = 0; | ||
857 | for (i = 0; i <= MTDSWAP_DIRTY; i++) { | ||
858 | root = &d->trees[i].root; | ||
859 | if (root->rb_node == NULL) | ||
860 | continue; | ||
861 | |||
862 | wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root); | ||
863 | if (wear > max) { | ||
864 | max = wear; | ||
865 | idx = i; | ||
866 | } | ||
867 | } | ||
868 | |||
869 | if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) { | ||
870 | pick_cnt = 0; | ||
871 | return idx; | ||
872 | } | ||
873 | |||
874 | pick_cnt++; | ||
875 | return -1; | ||
876 | } | ||
877 | |||
878 | static int mtdswap_choose_gc_tree(struct mtdswap_dev *d, | ||
879 | unsigned int background) | ||
880 | { | ||
881 | int idx; | ||
882 | |||
883 | if (TREE_NONEMPTY(d, FAILING) && | ||
884 | (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY)))) | ||
885 | return MTDSWAP_FAILING; | ||
886 | |||
887 | idx = mtdswap_choose_wl_tree(d); | ||
888 | if (idx >= MTDSWAP_CLEAN) | ||
889 | return idx; | ||
890 | |||
891 | return __mtdswap_choose_gc_tree(d); | ||
892 | } | ||
893 | |||
894 | static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d, | ||
895 | unsigned int background) | ||
896 | { | ||
897 | struct rb_root *rp = NULL; | ||
898 | struct swap_eb *eb = NULL; | ||
899 | int idx; | ||
900 | |||
901 | if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD && | ||
902 | TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING)) | ||
903 | return NULL; | ||
904 | |||
905 | idx = mtdswap_choose_gc_tree(d, background); | ||
906 | if (idx < 0) | ||
907 | return NULL; | ||
908 | |||
909 | rp = &d->trees[idx].root; | ||
910 | eb = rb_entry(rb_first(rp), struct swap_eb, rb); | ||
911 | |||
912 | rb_erase(&eb->rb, rp); | ||
913 | eb->root = NULL; | ||
914 | d->trees[idx].count--; | ||
915 | return eb; | ||
916 | } | ||
917 | |||
918 | static unsigned int mtdswap_test_patt(unsigned int i) | ||
919 | { | ||
920 | return i % 2 ? 0x55555555 : 0xAAAAAAAA; | ||
921 | } | ||
922 | |||
923 | static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, | ||
924 | struct swap_eb *eb) | ||
925 | { | ||
926 | struct mtd_info *mtd = d->mtd; | ||
927 | unsigned int test, i, j, patt, mtd_pages; | ||
928 | loff_t base, pos; | ||
929 | unsigned int *p1 = (unsigned int *)d->page_buf; | ||
930 | unsigned char *p2 = (unsigned char *)d->oob_buf; | ||
931 | struct mtd_oob_ops ops; | ||
932 | int ret; | ||
933 | |||
934 | ops.mode = MTD_OOB_AUTO; | ||
935 | ops.len = mtd->writesize; | ||
936 | ops.ooblen = mtd->ecclayout->oobavail; | ||
937 | ops.ooboffs = 0; | ||
938 | ops.datbuf = d->page_buf; | ||
939 | ops.oobbuf = d->oob_buf; | ||
940 | base = mtdswap_eb_offset(d, eb); | ||
941 | mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize; | ||
942 | |||
943 | for (test = 0; test < 2; test++) { | ||
944 | pos = base; | ||
945 | for (i = 0; i < mtd_pages; i++) { | ||
946 | patt = mtdswap_test_patt(test + i); | ||
947 | memset(d->page_buf, patt, mtd->writesize); | ||
948 | memset(d->oob_buf, patt, mtd->ecclayout->oobavail); | ||
949 | ret = mtd->write_oob(mtd, pos, &ops); | ||
950 | if (ret) | ||
951 | goto error; | ||
952 | |||
953 | pos += mtd->writesize; | ||
954 | } | ||
955 | |||
956 | pos = base; | ||
957 | for (i = 0; i < mtd_pages; i++) { | ||
958 | ret = mtd->read_oob(mtd, pos, &ops); | ||
959 | if (ret) | ||
960 | goto error; | ||
961 | |||
962 | patt = mtdswap_test_patt(test + i); | ||
963 | for (j = 0; j < mtd->writesize/sizeof(int); j++) | ||
964 | if (p1[j] != patt) | ||
965 | goto error; | ||
966 | |||
967 | for (j = 0; j < mtd->ecclayout->oobavail; j++) | ||
968 | if (p2[j] != (unsigned char)patt) | ||
969 | goto error; | ||
970 | |||
971 | pos += mtd->writesize; | ||
972 | } | ||
973 | |||
974 | ret = mtdswap_erase_block(d, eb); | ||
975 | if (ret) | ||
976 | goto error; | ||
977 | } | ||
978 | |||
979 | eb->flags &= ~EBLOCK_READERR; | ||
980 | return 1; | ||
981 | |||
982 | error: | ||
983 | mtdswap_handle_badblock(d, eb); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) | ||
988 | { | ||
989 | struct swap_eb *eb; | ||
990 | int ret; | ||
991 | |||
992 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
993 | return 1; | ||
994 | |||
995 | eb = mtdswap_pick_gc_eblk(d, background); | ||
996 | if (!eb) | ||
997 | return 1; | ||
998 | |||
999 | ret = mtdswap_gc_eblock(d, eb); | ||
1000 | if (ret == -ENOSPC) | ||
1001 | return 1; | ||
1002 | |||
1003 | if (eb->flags & EBLOCK_FAILED) { | ||
1004 | mtdswap_handle_badblock(d, eb); | ||
1005 | return 0; | ||
1006 | } | ||
1007 | |||
1008 | eb->flags &= ~EBLOCK_BITFLIP; | ||
1009 | ret = mtdswap_erase_block(d, eb); | ||
1010 | if ((eb->flags & EBLOCK_READERR) && | ||
1011 | (ret || !mtdswap_eblk_passes(d, eb))) | ||
1012 | return 0; | ||
1013 | |||
1014 | if (ret == 0) | ||
1015 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); | ||
1016 | |||
1017 | if (ret == 0) | ||
1018 | mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); | ||
1019 | else if (ret != -EIO && ret != -EBADMSG) | ||
1020 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | static void mtdswap_background(struct mtd_blktrans_dev *dev) | ||
1026 | { | ||
1027 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1028 | int ret; | ||
1029 | |||
1030 | while (1) { | ||
1031 | ret = mtdswap_gc(d, 1); | ||
1032 | if (ret || mtd_blktrans_cease_background(dev)) | ||
1033 | return; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | static void mtdswap_cleanup(struct mtdswap_dev *d) | ||
1038 | { | ||
1039 | vfree(d->eb_data); | ||
1040 | vfree(d->revmap); | ||
1041 | vfree(d->page_data); | ||
1042 | kfree(d->oob_buf); | ||
1043 | kfree(d->page_buf); | ||
1044 | } | ||
1045 | |||
1046 | static int mtdswap_flush(struct mtd_blktrans_dev *dev) | ||
1047 | { | ||
1048 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1049 | |||
1050 | if (d->mtd->sync) | ||
1051 | d->mtd->sync(d->mtd); | ||
1052 | return 0; | ||
1053 | } | ||
1054 | |||
1055 | static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size) | ||
1056 | { | ||
1057 | loff_t offset; | ||
1058 | unsigned int badcnt; | ||
1059 | |||
1060 | badcnt = 0; | ||
1061 | |||
1062 | if (mtd->block_isbad) | ||
1063 | for (offset = 0; offset < size; offset += mtd->erasesize) | ||
1064 | if (mtd->block_isbad(mtd, offset)) | ||
1065 | badcnt++; | ||
1066 | |||
1067 | return badcnt; | ||
1068 | } | ||
1069 | |||
1070 | static int mtdswap_writesect(struct mtd_blktrans_dev *dev, | ||
1071 | unsigned long page, char *buf) | ||
1072 | { | ||
1073 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1074 | unsigned int newblock, mapped; | ||
1075 | struct swap_eb *eb; | ||
1076 | int ret; | ||
1077 | |||
1078 | d->sect_write_count++; | ||
1079 | |||
1080 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
1081 | return -ENOSPC; | ||
1082 | |||
1083 | if (header) { | ||
1084 | /* Ignore writes to the header page */ | ||
1085 | if (unlikely(page == 0)) | ||
1086 | return 0; | ||
1087 | |||
1088 | page--; | ||
1089 | } | ||
1090 | |||
1091 | mapped = d->page_data[page]; | ||
1092 | if (mapped <= BLOCK_MAX) { | ||
1093 | eb = d->eb_data + (mapped / d->pages_per_eblk); | ||
1094 | eb->active_count--; | ||
1095 | mtdswap_store_eb(d, eb); | ||
1096 | d->page_data[page] = BLOCK_UNDEF; | ||
1097 | d->revmap[mapped] = PAGE_UNDEF; | ||
1098 | } | ||
1099 | |||
1100 | ret = mtdswap_write_block(d, buf, page, &newblock, 0); | ||
1101 | d->mtd_write_count++; | ||
1102 | |||
1103 | if (ret < 0) | ||
1104 | return ret; | ||
1105 | |||
1106 | eb = d->eb_data + (newblock / d->pages_per_eblk); | ||
1107 | d->page_data[page] = newblock; | ||
1108 | |||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | /* Provide a dummy swap header for the kernel */ | ||
1113 | static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf) | ||
1114 | { | ||
1115 | union swap_header *hd = (union swap_header *)(buf); | ||
1116 | |||
1117 | memset(buf, 0, PAGE_SIZE - 10); | ||
1118 | |||
1119 | hd->info.version = 1; | ||
1120 | hd->info.last_page = d->mbd_dev->size - 1; | ||
1121 | hd->info.nr_badpages = 0; | ||
1122 | |||
1123 | memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static int mtdswap_readsect(struct mtd_blktrans_dev *dev, | ||
1129 | unsigned long page, char *buf) | ||
1130 | { | ||
1131 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1132 | struct mtd_info *mtd = d->mtd; | ||
1133 | unsigned int realblock, retries; | ||
1134 | loff_t readpos; | ||
1135 | struct swap_eb *eb; | ||
1136 | size_t retlen; | ||
1137 | int ret; | ||
1138 | |||
1139 | d->sect_read_count++; | ||
1140 | |||
1141 | if (header) { | ||
1142 | if (unlikely(page == 0)) | ||
1143 | return mtdswap_auto_header(d, buf); | ||
1144 | |||
1145 | page--; | ||
1146 | } | ||
1147 | |||
1148 | realblock = d->page_data[page]; | ||
1149 | if (realblock > BLOCK_MAX) { | ||
1150 | memset(buf, 0x0, PAGE_SIZE); | ||
1151 | if (realblock == BLOCK_UNDEF) | ||
1152 | return 0; | ||
1153 | else | ||
1154 | return -EIO; | ||
1155 | } | ||
1156 | |||
1157 | eb = d->eb_data + (realblock / d->pages_per_eblk); | ||
1158 | BUG_ON(d->revmap[realblock] == PAGE_UNDEF); | ||
1159 | |||
1160 | readpos = (loff_t)realblock << PAGE_SHIFT; | ||
1161 | retries = 0; | ||
1162 | |||
1163 | retry: | ||
1164 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); | ||
1165 | |||
1166 | d->mtd_read_count++; | ||
1167 | if (ret == -EUCLEAN) { | ||
1168 | eb->flags |= EBLOCK_BITFLIP; | ||
1169 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | ||
1170 | ret = 0; | ||
1171 | } | ||
1172 | |||
1173 | if (ret < 0) { | ||
1174 | dev_err(d->dev, "Read error %d\n", ret); | ||
1175 | eb->flags |= EBLOCK_READERR; | ||
1176 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
1177 | retries++; | ||
1178 | if (retries < MTDSWAP_IO_RETRIES) | ||
1179 | goto retry; | ||
1180 | |||
1181 | return ret; | ||
1182 | } | ||
1183 | |||
1184 | if (retlen != PAGE_SIZE) { | ||
1185 | dev_err(d->dev, "Short read %zd\n", retlen); | ||
1186 | return -EIO; | ||
1187 | } | ||
1188 | |||
1189 | return 0; | ||
1190 | } | ||
1191 | |||
1192 | static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first, | ||
1193 | unsigned nr_pages) | ||
1194 | { | ||
1195 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1196 | unsigned long page; | ||
1197 | struct swap_eb *eb; | ||
1198 | unsigned int mapped; | ||
1199 | |||
1200 | d->discard_count++; | ||
1201 | |||
1202 | for (page = first; page < first + nr_pages; page++) { | ||
1203 | mapped = d->page_data[page]; | ||
1204 | if (mapped <= BLOCK_MAX) { | ||
1205 | eb = d->eb_data + (mapped / d->pages_per_eblk); | ||
1206 | eb->active_count--; | ||
1207 | mtdswap_store_eb(d, eb); | ||
1208 | d->page_data[page] = BLOCK_UNDEF; | ||
1209 | d->revmap[mapped] = PAGE_UNDEF; | ||
1210 | d->discard_page_count++; | ||
1211 | } else if (mapped == BLOCK_ERROR) { | ||
1212 | d->page_data[page] = BLOCK_UNDEF; | ||
1213 | d->discard_page_count++; | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static int mtdswap_show(struct seq_file *s, void *data) | ||
1221 | { | ||
1222 | struct mtdswap_dev *d = (struct mtdswap_dev *) s->private; | ||
1223 | unsigned long sum; | ||
1224 | unsigned int count[MTDSWAP_TREE_CNT]; | ||
1225 | unsigned int min[MTDSWAP_TREE_CNT]; | ||
1226 | unsigned int max[MTDSWAP_TREE_CNT]; | ||
1227 | unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages; | ||
1228 | uint64_t use_size; | ||
1229 | char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip", | ||
1230 | "failing"}; | ||
1231 | |||
1232 | mutex_lock(&d->mbd_dev->lock); | ||
1233 | |||
1234 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) { | ||
1235 | struct rb_root *root = &d->trees[i].root; | ||
1236 | |||
1237 | if (root->rb_node) { | ||
1238 | count[i] = d->trees[i].count; | ||
1239 | min[i] = rb_entry(rb_first(root), struct swap_eb, | ||
1240 | rb)->erase_count; | ||
1241 | max[i] = rb_entry(rb_last(root), struct swap_eb, | ||
1242 | rb)->erase_count; | ||
1243 | } else | ||
1244 | count[i] = 0; | ||
1245 | } | ||
1246 | |||
1247 | if (d->curr_write) { | ||
1248 | cw = 1; | ||
1249 | cwp = d->curr_write_pos; | ||
1250 | cwecount = d->curr_write->erase_count; | ||
1251 | } | ||
1252 | |||
1253 | sum = 0; | ||
1254 | for (i = 0; i < d->eblks; i++) | ||
1255 | sum += d->eb_data[i].erase_count; | ||
1256 | |||
1257 | use_size = (uint64_t)d->eblks * d->mtd->erasesize; | ||
1258 | bb_cnt = mtdswap_badblocks(d->mtd, use_size); | ||
1259 | |||
1260 | mapped = 0; | ||
1261 | pages = d->mbd_dev->size; | ||
1262 | for (i = 0; i < pages; i++) | ||
1263 | if (d->page_data[i] != BLOCK_UNDEF) | ||
1264 | mapped++; | ||
1265 | |||
1266 | mutex_unlock(&d->mbd_dev->lock); | ||
1267 | |||
1268 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) { | ||
1269 | if (!count[i]) | ||
1270 | continue; | ||
1271 | |||
1272 | if (min[i] != max[i]) | ||
1273 | seq_printf(s, "%s:\t%5d erase blocks, erased min %d, " | ||
1274 | "max %d times\n", | ||
1275 | name[i], count[i], min[i], max[i]); | ||
1276 | else | ||
1277 | seq_printf(s, "%s:\t%5d erase blocks, all erased %d " | ||
1278 | "times\n", name[i], count[i], min[i]); | ||
1279 | } | ||
1280 | |||
1281 | if (bb_cnt) | ||
1282 | seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt); | ||
1283 | |||
1284 | if (cw) | ||
1285 | seq_printf(s, "current erase block: %u pages used, %u free, " | ||
1286 | "erased %u times\n", | ||
1287 | cwp, d->pages_per_eblk - cwp, cwecount); | ||
1288 | |||
1289 | seq_printf(s, "total erasures: %lu\n", sum); | ||
1290 | |||
1291 | seq_printf(s, "\n"); | ||
1292 | |||
1293 | seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count); | ||
1294 | seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count); | ||
1295 | seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count); | ||
1296 | seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count); | ||
1297 | seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count); | ||
1298 | seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count); | ||
1299 | |||
1300 | seq_printf(s, "\n"); | ||
1301 | seq_printf(s, "total pages: %u\n", pages); | ||
1302 | seq_printf(s, "pages mapped: %u\n", mapped); | ||
1303 | |||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | static int mtdswap_open(struct inode *inode, struct file *file) | ||
1308 | { | ||
1309 | return single_open(file, mtdswap_show, inode->i_private); | ||
1310 | } | ||
1311 | |||
1312 | static const struct file_operations mtdswap_fops = { | ||
1313 | .open = mtdswap_open, | ||
1314 | .read = seq_read, | ||
1315 | .llseek = seq_lseek, | ||
1316 | .release = single_release, | ||
1317 | }; | ||
1318 | |||
1319 | static int mtdswap_add_debugfs(struct mtdswap_dev *d) | ||
1320 | { | ||
1321 | struct gendisk *gd = d->mbd_dev->disk; | ||
1322 | struct device *dev = disk_to_dev(gd); | ||
1323 | |||
1324 | struct dentry *root; | ||
1325 | struct dentry *dent; | ||
1326 | |||
1327 | root = debugfs_create_dir(gd->disk_name, NULL); | ||
1328 | if (IS_ERR(root)) | ||
1329 | return 0; | ||
1330 | |||
1331 | if (!root) { | ||
1332 | dev_err(dev, "failed to initialize debugfs\n"); | ||
1333 | return -1; | ||
1334 | } | ||
1335 | |||
1336 | d->debugfs_root = root; | ||
1337 | |||
1338 | dent = debugfs_create_file("stats", S_IRUSR, root, d, | ||
1339 | &mtdswap_fops); | ||
1340 | if (!dent) { | ||
1341 | dev_err(d->dev, "debugfs_create_file failed\n"); | ||
1342 | debugfs_remove_recursive(root); | ||
1343 | d->debugfs_root = NULL; | ||
1344 | return -1; | ||
1345 | } | ||
1346 | |||
1347 | return 0; | ||
1348 | } | ||
1349 | |||
1350 | static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, | ||
1351 | unsigned int spare_cnt) | ||
1352 | { | ||
1353 | struct mtd_info *mtd = d->mbd_dev->mtd; | ||
1354 | unsigned int i, eblk_bytes, pages, blocks; | ||
1355 | int ret = -ENOMEM; | ||
1356 | |||
1357 | d->mtd = mtd; | ||
1358 | d->eblks = eblocks; | ||
1359 | d->spare_eblks = spare_cnt; | ||
1360 | d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT; | ||
1361 | |||
1362 | pages = d->mbd_dev->size; | ||
1363 | blocks = eblocks * d->pages_per_eblk; | ||
1364 | |||
1365 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) | ||
1366 | d->trees[i].root = RB_ROOT; | ||
1367 | |||
1368 | d->page_data = vmalloc(sizeof(int)*pages); | ||
1369 | if (!d->page_data) | ||
1370 | goto page_data_fail; | ||
1371 | |||
1372 | d->revmap = vmalloc(sizeof(int)*blocks); | ||
1373 | if (!d->revmap) | ||
1374 | goto revmap_fail; | ||
1375 | |||
1376 | eblk_bytes = sizeof(struct swap_eb)*d->eblks; | ||
1377 | d->eb_data = vmalloc(eblk_bytes); | ||
1378 | if (!d->eb_data) | ||
1379 | goto eb_data_fail; | ||
1380 | |||
1381 | memset(d->eb_data, 0, eblk_bytes); | ||
1382 | for (i = 0; i < pages; i++) | ||
1383 | d->page_data[i] = BLOCK_UNDEF; | ||
1384 | |||
1385 | for (i = 0; i < blocks; i++) | ||
1386 | d->revmap[i] = PAGE_UNDEF; | ||
1387 | |||
1388 | d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1389 | if (!d->page_buf) | ||
1390 | goto page_buf_fail; | ||
1391 | |||
1392 | d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL); | ||
1393 | if (!d->oob_buf) | ||
1394 | goto oob_buf_fail; | ||
1395 | |||
1396 | mtdswap_scan_eblks(d); | ||
1397 | |||
1398 | return 0; | ||
1399 | |||
1400 | oob_buf_fail: | ||
1401 | kfree(d->page_buf); | ||
1402 | page_buf_fail: | ||
1403 | vfree(d->eb_data); | ||
1404 | eb_data_fail: | ||
1405 | vfree(d->revmap); | ||
1406 | revmap_fail: | ||
1407 | vfree(d->page_data); | ||
1408 | page_data_fail: | ||
1409 | printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret); | ||
1410 | return ret; | ||
1411 | } | ||
1412 | |||
1413 | static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | ||
1414 | { | ||
1415 | struct mtdswap_dev *d; | ||
1416 | struct mtd_blktrans_dev *mbd_dev; | ||
1417 | char *parts; | ||
1418 | char *this_opt; | ||
1419 | unsigned long part; | ||
1420 | unsigned int eblocks, eavailable, bad_blocks, spare_cnt; | ||
1421 | uint64_t swap_size, use_size, size_limit; | ||
1422 | struct nand_ecclayout *oinfo; | ||
1423 | int ret; | ||
1424 | |||
1425 | parts = &partitions[0]; | ||
1426 | if (!*parts) | ||
1427 | return; | ||
1428 | |||
1429 | while ((this_opt = strsep(&parts, ",")) != NULL) { | ||
1430 | if (strict_strtoul(this_opt, 0, &part) < 0) | ||
1431 | return; | ||
1432 | |||
1433 | if (mtd->index == part) | ||
1434 | break; | ||
1435 | } | ||
1436 | |||
1437 | if (mtd->index != part) | ||
1438 | return; | ||
1439 | |||
1440 | if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) { | ||
1441 | printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE " | ||
1442 | "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE); | ||
1443 | return; | ||
1444 | } | ||
1445 | |||
1446 | if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) { | ||
1447 | printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size" | ||
1448 | " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize); | ||
1449 | return; | ||
1450 | } | ||
1451 | |||
1452 | oinfo = mtd->ecclayout; | ||
1453 | if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { | ||
1454 | printk(KERN_ERR "%s: Not enough free bytes in OOB, " | ||
1455 | "%d available, %lu needed.\n", | ||
1456 | MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); | ||
1457 | return; | ||
1458 | } | ||
1459 | |||
1460 | if (spare_eblocks > 100) | ||
1461 | spare_eblocks = 100; | ||
1462 | |||
1463 | use_size = mtd->size; | ||
1464 | size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE; | ||
1465 | |||
1466 | if (mtd->size > size_limit) { | ||
1467 | printk(KERN_WARNING "%s: Device too large. Limiting size to " | ||
1468 | "%llu bytes\n", MTDSWAP_PREFIX, size_limit); | ||
1469 | use_size = size_limit; | ||
1470 | } | ||
1471 | |||
1472 | eblocks = mtd_div_by_eb(use_size, mtd); | ||
1473 | use_size = eblocks * mtd->erasesize; | ||
1474 | bad_blocks = mtdswap_badblocks(mtd, use_size); | ||
1475 | eavailable = eblocks - bad_blocks; | ||
1476 | |||
1477 | if (eavailable < MIN_ERASE_BLOCKS) { | ||
1478 | printk(KERN_ERR "%s: Not enough erase blocks. %u available, " | ||
1479 | "%d needed\n", MTDSWAP_PREFIX, eavailable, | ||
1480 | MIN_ERASE_BLOCKS); | ||
1481 | return; | ||
1482 | } | ||
1483 | |||
1484 | spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100); | ||
1485 | |||
1486 | if (spare_cnt < MIN_SPARE_EBLOCKS) | ||
1487 | spare_cnt = MIN_SPARE_EBLOCKS; | ||
1488 | |||
1489 | if (spare_cnt > eavailable - 1) | ||
1490 | spare_cnt = eavailable - 1; | ||
1491 | |||
1492 | swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize + | ||
1493 | (header ? PAGE_SIZE : 0); | ||
1494 | |||
1495 | printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, " | ||
1496 | "%u spare, %u bad blocks\n", | ||
1497 | MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks); | ||
1498 | |||
1499 | d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL); | ||
1500 | if (!d) | ||
1501 | return; | ||
1502 | |||
1503 | mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); | ||
1504 | if (!mbd_dev) { | ||
1505 | kfree(d); | ||
1506 | return; | ||
1507 | } | ||
1508 | |||
1509 | d->mbd_dev = mbd_dev; | ||
1510 | mbd_dev->priv = d; | ||
1511 | |||
1512 | mbd_dev->mtd = mtd; | ||
1513 | mbd_dev->devnum = mtd->index; | ||
1514 | mbd_dev->size = swap_size >> PAGE_SHIFT; | ||
1515 | mbd_dev->tr = tr; | ||
1516 | |||
1517 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
1518 | mbd_dev->readonly = 1; | ||
1519 | |||
1520 | if (mtdswap_init(d, eblocks, spare_cnt) < 0) | ||
1521 | goto init_failed; | ||
1522 | |||
1523 | if (add_mtd_blktrans_dev(mbd_dev) < 0) | ||
1524 | goto cleanup; | ||
1525 | |||
1526 | d->dev = disk_to_dev(mbd_dev->disk); | ||
1527 | |||
1528 | ret = mtdswap_add_debugfs(d); | ||
1529 | if (ret < 0) | ||
1530 | goto debugfs_failed; | ||
1531 | |||
1532 | return; | ||
1533 | |||
1534 | debugfs_failed: | ||
1535 | del_mtd_blktrans_dev(mbd_dev); | ||
1536 | |||
1537 | cleanup: | ||
1538 | mtdswap_cleanup(d); | ||
1539 | |||
1540 | init_failed: | ||
1541 | kfree(mbd_dev); | ||
1542 | kfree(d); | ||
1543 | } | ||
1544 | |||
1545 | static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev) | ||
1546 | { | ||
1547 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1548 | |||
1549 | debugfs_remove_recursive(d->debugfs_root); | ||
1550 | del_mtd_blktrans_dev(dev); | ||
1551 | mtdswap_cleanup(d); | ||
1552 | kfree(d); | ||
1553 | } | ||
1554 | |||
1555 | static struct mtd_blktrans_ops mtdswap_ops = { | ||
1556 | .name = "mtdswap", | ||
1557 | .major = 0, | ||
1558 | .part_bits = 0, | ||
1559 | .blksize = PAGE_SIZE, | ||
1560 | .flush = mtdswap_flush, | ||
1561 | .readsect = mtdswap_readsect, | ||
1562 | .writesect = mtdswap_writesect, | ||
1563 | .discard = mtdswap_discard, | ||
1564 | .background = mtdswap_background, | ||
1565 | .add_mtd = mtdswap_add_mtd, | ||
1566 | .remove_dev = mtdswap_remove_dev, | ||
1567 | .owner = THIS_MODULE, | ||
1568 | }; | ||
1569 | |||
1570 | static int __init mtdswap_modinit(void) | ||
1571 | { | ||
1572 | return register_mtd_blktrans(&mtdswap_ops); | ||
1573 | } | ||
1574 | |||
1575 | static void __exit mtdswap_modexit(void) | ||
1576 | { | ||
1577 | deregister_mtd_blktrans(&mtdswap_ops); | ||
1578 | } | ||
1579 | |||
1580 | module_init(mtdswap_modinit); | ||
1581 | module_exit(mtdswap_modexit); | ||
1582 | |||
1583 | |||
1584 | MODULE_LICENSE("GPL"); | ||
1585 | MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); | ||
1586 | MODULE_DESCRIPTION("Block device access to an MTD suitable for using as " | ||
1587 | "swap space"); | ||
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 4f6c06f16328..a92054e945e1 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -31,6 +31,21 @@ config MTD_NAND_VERIFY_WRITE | |||
31 | device thinks the write was successful, a bit could have been | 31 | device thinks the write was successful, a bit could have been |
32 | flipped accidentally due to device wear or something else. | 32 | flipped accidentally due to device wear or something else. |
33 | 33 | ||
34 | config MTD_NAND_BCH | ||
35 | tristate | ||
36 | select BCH | ||
37 | depends on MTD_NAND_ECC_BCH | ||
38 | default MTD_NAND | ||
39 | |||
40 | config MTD_NAND_ECC_BCH | ||
41 | bool "Support software BCH ECC" | ||
42 | default n | ||
43 | help | ||
44 | This enables support for software BCH error correction. Binary BCH | ||
45 | codes are more powerful and cpu intensive than traditional Hamming | ||
46 | ECC codes. They are used with NAND devices requiring more than 1 bit | ||
47 | of error correction. | ||
48 | |||
34 | config MTD_SM_COMMON | 49 | config MTD_SM_COMMON |
35 | tristate | 50 | tristate |
36 | default n | 51 | default n |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 8ad6faec72cb..5745d831168e 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_MTD_NAND) += nand.o | 5 | obj-$(CONFIG_MTD_NAND) += nand.o |
6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o | 6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o |
7 | obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o | ||
7 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o | 8 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o |
8 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o | 9 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o |
9 | 10 | ||
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index ccce0f03b5dc..6fae04b3fc6d 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -48,6 +48,9 @@ | |||
48 | #define no_ecc 0 | 48 | #define no_ecc 0 |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | static int use_dma = 1; | ||
52 | module_param(use_dma, int, 0); | ||
53 | |||
51 | static int on_flash_bbt = 0; | 54 | static int on_flash_bbt = 0; |
52 | module_param(on_flash_bbt, int, 0); | 55 | module_param(on_flash_bbt, int, 0); |
53 | 56 | ||
@@ -89,11 +92,20 @@ struct atmel_nand_host { | |||
89 | struct nand_chip nand_chip; | 92 | struct nand_chip nand_chip; |
90 | struct mtd_info mtd; | 93 | struct mtd_info mtd; |
91 | void __iomem *io_base; | 94 | void __iomem *io_base; |
95 | dma_addr_t io_phys; | ||
92 | struct atmel_nand_data *board; | 96 | struct atmel_nand_data *board; |
93 | struct device *dev; | 97 | struct device *dev; |
94 | void __iomem *ecc; | 98 | void __iomem *ecc; |
99 | |||
100 | struct completion comp; | ||
101 | struct dma_chan *dma_chan; | ||
95 | }; | 102 | }; |
96 | 103 | ||
104 | static int cpu_has_dma(void) | ||
105 | { | ||
106 | return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); | ||
107 | } | ||
108 | |||
97 | /* | 109 | /* |
98 | * Enable NAND. | 110 | * Enable NAND. |
99 | */ | 111 | */ |
@@ -150,7 +162,7 @@ static int atmel_nand_device_ready(struct mtd_info *mtd) | |||
150 | /* | 162 | /* |
151 | * Minimal-overhead PIO for data access. | 163 | * Minimal-overhead PIO for data access. |
152 | */ | 164 | */ |
153 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | 165 | static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) |
154 | { | 166 | { |
155 | struct nand_chip *nand_chip = mtd->priv; | 167 | struct nand_chip *nand_chip = mtd->priv; |
156 | 168 | ||
@@ -164,7 +176,7 @@ static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) | |||
164 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); | 176 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); |
165 | } | 177 | } |
166 | 178 | ||
167 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | 179 | static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) |
168 | { | 180 | { |
169 | struct nand_chip *nand_chip = mtd->priv; | 181 | struct nand_chip *nand_chip = mtd->priv; |
170 | 182 | ||
@@ -178,6 +190,121 @@ static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) | |||
178 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); | 190 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); |
179 | } | 191 | } |
180 | 192 | ||
193 | static void dma_complete_func(void *completion) | ||
194 | { | ||
195 | complete(completion); | ||
196 | } | ||
197 | |||
198 | static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, | ||
199 | int is_read) | ||
200 | { | ||
201 | struct dma_device *dma_dev; | ||
202 | enum dma_ctrl_flags flags; | ||
203 | dma_addr_t dma_src_addr, dma_dst_addr, phys_addr; | ||
204 | struct dma_async_tx_descriptor *tx = NULL; | ||
205 | dma_cookie_t cookie; | ||
206 | struct nand_chip *chip = mtd->priv; | ||
207 | struct atmel_nand_host *host = chip->priv; | ||
208 | void *p = buf; | ||
209 | int err = -EIO; | ||
210 | enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
211 | |||
212 | if (buf >= high_memory) { | ||
213 | struct page *pg; | ||
214 | |||
215 | if (((size_t)buf & PAGE_MASK) != | ||
216 | ((size_t)(buf + len - 1) & PAGE_MASK)) { | ||
217 | dev_warn(host->dev, "Buffer not fit in one page\n"); | ||
218 | goto err_buf; | ||
219 | } | ||
220 | |||
221 | pg = vmalloc_to_page(buf); | ||
222 | if (pg == 0) { | ||
223 | dev_err(host->dev, "Failed to vmalloc_to_page\n"); | ||
224 | goto err_buf; | ||
225 | } | ||
226 | p = page_address(pg) + ((size_t)buf & ~PAGE_MASK); | ||
227 | } | ||
228 | |||
229 | dma_dev = host->dma_chan->device; | ||
230 | |||
231 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | ||
232 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
233 | |||
234 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); | ||
235 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { | ||
236 | dev_err(host->dev, "Failed to dma_map_single\n"); | ||
237 | goto err_buf; | ||
238 | } | ||
239 | |||
240 | if (is_read) { | ||
241 | dma_src_addr = host->io_phys; | ||
242 | dma_dst_addr = phys_addr; | ||
243 | } else { | ||
244 | dma_src_addr = phys_addr; | ||
245 | dma_dst_addr = host->io_phys; | ||
246 | } | ||
247 | |||
248 | tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, | ||
249 | dma_src_addr, len, flags); | ||
250 | if (!tx) { | ||
251 | dev_err(host->dev, "Failed to prepare DMA memcpy\n"); | ||
252 | goto err_dma; | ||
253 | } | ||
254 | |||
255 | init_completion(&host->comp); | ||
256 | tx->callback = dma_complete_func; | ||
257 | tx->callback_param = &host->comp; | ||
258 | |||
259 | cookie = tx->tx_submit(tx); | ||
260 | if (dma_submit_error(cookie)) { | ||
261 | dev_err(host->dev, "Failed to do DMA tx_submit\n"); | ||
262 | goto err_dma; | ||
263 | } | ||
264 | |||
265 | dma_async_issue_pending(host->dma_chan); | ||
266 | wait_for_completion(&host->comp); | ||
267 | |||
268 | err = 0; | ||
269 | |||
270 | err_dma: | ||
271 | dma_unmap_single(dma_dev->dev, phys_addr, len, dir); | ||
272 | err_buf: | ||
273 | if (err != 0) | ||
274 | dev_warn(host->dev, "Fall back to CPU I/O\n"); | ||
275 | return err; | ||
276 | } | ||
277 | |||
278 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | ||
279 | { | ||
280 | struct nand_chip *chip = mtd->priv; | ||
281 | struct atmel_nand_host *host = chip->priv; | ||
282 | |||
283 | if (use_dma && len >= mtd->oobsize) | ||
284 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) | ||
285 | return; | ||
286 | |||
287 | if (host->board->bus_width_16) | ||
288 | atmel_read_buf16(mtd, buf, len); | ||
289 | else | ||
290 | atmel_read_buf8(mtd, buf, len); | ||
291 | } | ||
292 | |||
293 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | ||
294 | { | ||
295 | struct nand_chip *chip = mtd->priv; | ||
296 | struct atmel_nand_host *host = chip->priv; | ||
297 | |||
298 | if (use_dma && len >= mtd->oobsize) | ||
299 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) | ||
300 | return; | ||
301 | |||
302 | if (host->board->bus_width_16) | ||
303 | atmel_write_buf16(mtd, buf, len); | ||
304 | else | ||
305 | atmel_write_buf8(mtd, buf, len); | ||
306 | } | ||
307 | |||
181 | /* | 308 | /* |
182 | * Calculate HW ECC | 309 | * Calculate HW ECC |
183 | * | 310 | * |
@@ -398,6 +525,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
398 | return -ENOMEM; | 525 | return -ENOMEM; |
399 | } | 526 | } |
400 | 527 | ||
528 | host->io_phys = (dma_addr_t)mem->start; | ||
529 | |||
401 | host->io_base = ioremap(mem->start, mem->end - mem->start + 1); | 530 | host->io_base = ioremap(mem->start, mem->end - mem->start + 1); |
402 | if (host->io_base == NULL) { | 531 | if (host->io_base == NULL) { |
403 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); | 532 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); |
@@ -448,14 +577,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
448 | 577 | ||
449 | nand_chip->chip_delay = 20; /* 20us command delay time */ | 578 | nand_chip->chip_delay = 20; /* 20us command delay time */ |
450 | 579 | ||
451 | if (host->board->bus_width_16) { /* 16-bit bus width */ | 580 | if (host->board->bus_width_16) /* 16-bit bus width */ |
452 | nand_chip->options |= NAND_BUSWIDTH_16; | 581 | nand_chip->options |= NAND_BUSWIDTH_16; |
453 | nand_chip->read_buf = atmel_read_buf16; | 582 | |
454 | nand_chip->write_buf = atmel_write_buf16; | 583 | nand_chip->read_buf = atmel_read_buf; |
455 | } else { | 584 | nand_chip->write_buf = atmel_write_buf; |
456 | nand_chip->read_buf = atmel_read_buf; | ||
457 | nand_chip->write_buf = atmel_write_buf; | ||
458 | } | ||
459 | 585 | ||
460 | platform_set_drvdata(pdev, host); | 586 | platform_set_drvdata(pdev, host); |
461 | atmel_nand_enable(host); | 587 | atmel_nand_enable(host); |
@@ -473,6 +599,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
473 | nand_chip->options |= NAND_USE_FLASH_BBT; | 599 | nand_chip->options |= NAND_USE_FLASH_BBT; |
474 | } | 600 | } |
475 | 601 | ||
602 | if (cpu_has_dma() && use_dma) { | ||
603 | dma_cap_mask_t mask; | ||
604 | |||
605 | dma_cap_zero(mask); | ||
606 | dma_cap_set(DMA_MEMCPY, mask); | ||
607 | host->dma_chan = dma_request_channel(mask, 0, NULL); | ||
608 | if (!host->dma_chan) { | ||
609 | dev_err(host->dev, "Failed to request DMA channel\n"); | ||
610 | use_dma = 0; | ||
611 | } | ||
612 | } | ||
613 | if (use_dma) | ||
614 | dev_info(host->dev, "Using DMA for NAND access.\n"); | ||
615 | else | ||
616 | dev_info(host->dev, "No DMA support for NAND access.\n"); | ||
617 | |||
476 | /* first scan to find the device and get the page size */ | 618 | /* first scan to find the device and get the page size */ |
477 | if (nand_scan_ident(mtd, 1, NULL)) { | 619 | if (nand_scan_ident(mtd, 1, NULL)) { |
478 | res = -ENXIO; | 620 | res = -ENXIO; |
@@ -555,6 +697,8 @@ err_scan_ident: | |||
555 | err_no_card: | 697 | err_no_card: |
556 | atmel_nand_disable(host); | 698 | atmel_nand_disable(host); |
557 | platform_set_drvdata(pdev, NULL); | 699 | platform_set_drvdata(pdev, NULL); |
700 | if (host->dma_chan) | ||
701 | dma_release_channel(host->dma_chan); | ||
558 | if (host->ecc) | 702 | if (host->ecc) |
559 | iounmap(host->ecc); | 703 | iounmap(host->ecc); |
560 | err_ecc_ioremap: | 704 | err_ecc_ioremap: |
@@ -578,6 +722,10 @@ static int __exit atmel_nand_remove(struct platform_device *pdev) | |||
578 | 722 | ||
579 | if (host->ecc) | 723 | if (host->ecc) |
580 | iounmap(host->ecc); | 724 | iounmap(host->ecc); |
725 | |||
726 | if (host->dma_chan) | ||
727 | dma_release_channel(host->dma_chan); | ||
728 | |||
581 | iounmap(host->io_base); | 729 | iounmap(host->io_base); |
582 | kfree(host); | 730 | kfree(host); |
583 | 731 | ||
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index a90fde3ede28..aff3468867ac 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -37,9 +37,6 @@ | |||
37 | #include <mach/nand.h> | 37 | #include <mach/nand.h> |
38 | #include <mach/aemif.h> | 38 | #include <mach/aemif.h> |
39 | 39 | ||
40 | #include <asm/mach-types.h> | ||
41 | |||
42 | |||
43 | /* | 40 | /* |
44 | * This is a device driver for the NAND flash controller found on the | 41 | * This is a device driver for the NAND flash controller found on the |
45 | * various DaVinci family chips. It handles up to four SoC chipselects, | 42 | * various DaVinci family chips. It handles up to four SoC chipselects, |
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index c2f95437e5e9..0b81b5b499d1 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/err.h> | ||
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
34 | #include <linux/io.h> | 35 | #include <linux/io.h> |
@@ -757,9 +758,9 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
757 | 758 | ||
758 | /* Enable NFC clock */ | 759 | /* Enable NFC clock */ |
759 | prv->clk = clk_get(dev, "nfc_clk"); | 760 | prv->clk = clk_get(dev, "nfc_clk"); |
760 | if (!prv->clk) { | 761 | if (IS_ERR(prv->clk)) { |
761 | dev_err(dev, "Unable to acquire NFC clock!\n"); | 762 | dev_err(dev, "Unable to acquire NFC clock!\n"); |
762 | retval = -ENODEV; | 763 | retval = PTR_ERR(prv->clk); |
763 | goto error; | 764 | goto error; |
764 | } | 765 | } |
765 | 766 | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 5ae1d9ee2cf1..42a95fb41504 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -211,6 +211,31 @@ static struct nand_ecclayout nandv2_hw_eccoob_largepage = { | |||
211 | } | 211 | } |
212 | }; | 212 | }; |
213 | 213 | ||
214 | /* OOB description for 4096 byte pages with 128 byte OOB */ | ||
215 | static struct nand_ecclayout nandv2_hw_eccoob_4k = { | ||
216 | .eccbytes = 8 * 9, | ||
217 | .eccpos = { | ||
218 | 7, 8, 9, 10, 11, 12, 13, 14, 15, | ||
219 | 23, 24, 25, 26, 27, 28, 29, 30, 31, | ||
220 | 39, 40, 41, 42, 43, 44, 45, 46, 47, | ||
221 | 55, 56, 57, 58, 59, 60, 61, 62, 63, | ||
222 | 71, 72, 73, 74, 75, 76, 77, 78, 79, | ||
223 | 87, 88, 89, 90, 91, 92, 93, 94, 95, | ||
224 | 103, 104, 105, 106, 107, 108, 109, 110, 111, | ||
225 | 119, 120, 121, 122, 123, 124, 125, 126, 127, | ||
226 | }, | ||
227 | .oobfree = { | ||
228 | {.offset = 2, .length = 4}, | ||
229 | {.offset = 16, .length = 7}, | ||
230 | {.offset = 32, .length = 7}, | ||
231 | {.offset = 48, .length = 7}, | ||
232 | {.offset = 64, .length = 7}, | ||
233 | {.offset = 80, .length = 7}, | ||
234 | {.offset = 96, .length = 7}, | ||
235 | {.offset = 112, .length = 7}, | ||
236 | } | ||
237 | }; | ||
238 | |||
214 | #ifdef CONFIG_MTD_PARTITIONS | 239 | #ifdef CONFIG_MTD_PARTITIONS |
215 | static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; | 240 | static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; |
216 | #endif | 241 | #endif |
@@ -641,9 +666,9 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
641 | 666 | ||
642 | n = min(n, len); | 667 | n = min(n, len); |
643 | 668 | ||
644 | memcpy(buf, host->data_buf + col, len); | 669 | memcpy(buf, host->data_buf + col, n); |
645 | 670 | ||
646 | host->buf_start += len; | 671 | host->buf_start += n; |
647 | } | 672 | } |
648 | 673 | ||
649 | /* Used by the upper layer to verify the data in NAND Flash | 674 | /* Used by the upper layer to verify the data in NAND Flash |
@@ -1185,6 +1210,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1185 | 1210 | ||
1186 | if (mtd->writesize == 2048) | 1211 | if (mtd->writesize == 2048) |
1187 | this->ecc.layout = oob_largepage; | 1212 | this->ecc.layout = oob_largepage; |
1213 | if (nfc_is_v21() && mtd->writesize == 4096) | ||
1214 | this->ecc.layout = &nandv2_hw_eccoob_4k; | ||
1188 | 1215 | ||
1189 | /* second phase scan */ | 1216 | /* second phase scan */ |
1190 | if (nand_scan_tail(mtd)) { | 1217 | if (nand_scan_tail(mtd)) { |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a9c6ce745767..85cfc061d41c 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/mtd/mtd.h> | 42 | #include <linux/mtd/mtd.h> |
43 | #include <linux/mtd/nand.h> | 43 | #include <linux/mtd/nand.h> |
44 | #include <linux/mtd/nand_ecc.h> | 44 | #include <linux/mtd/nand_ecc.h> |
45 | #include <linux/mtd/nand_bch.h> | ||
45 | #include <linux/interrupt.h> | 46 | #include <linux/interrupt.h> |
46 | #include <linux/bitops.h> | 47 | #include <linux/bitops.h> |
47 | #include <linux/leds.h> | 48 | #include <linux/leds.h> |
@@ -2377,7 +2378,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2377 | return -EINVAL; | 2378 | return -EINVAL; |
2378 | } | 2379 | } |
2379 | 2380 | ||
2380 | /* Do not allow reads past end of device */ | 2381 | /* Do not allow write past end of device */ |
2381 | if (unlikely(to >= mtd->size || | 2382 | if (unlikely(to >= mtd->size || |
2382 | ops->ooboffs + ops->ooblen > | 2383 | ops->ooboffs + ops->ooblen > |
2383 | ((mtd->size >> chip->page_shift) - | 2384 | ((mtd->size >> chip->page_shift) - |
@@ -3248,7 +3249,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3248 | /* | 3249 | /* |
3249 | * If no default placement scheme is given, select an appropriate one | 3250 | * If no default placement scheme is given, select an appropriate one |
3250 | */ | 3251 | */ |
3251 | if (!chip->ecc.layout) { | 3252 | if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { |
3252 | switch (mtd->oobsize) { | 3253 | switch (mtd->oobsize) { |
3253 | case 8: | 3254 | case 8: |
3254 | chip->ecc.layout = &nand_oob_8; | 3255 | chip->ecc.layout = &nand_oob_8; |
@@ -3351,6 +3352,40 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3351 | chip->ecc.bytes = 3; | 3352 | chip->ecc.bytes = 3; |
3352 | break; | 3353 | break; |
3353 | 3354 | ||
3355 | case NAND_ECC_SOFT_BCH: | ||
3356 | if (!mtd_nand_has_bch()) { | ||
3357 | printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); | ||
3358 | BUG(); | ||
3359 | } | ||
3360 | chip->ecc.calculate = nand_bch_calculate_ecc; | ||
3361 | chip->ecc.correct = nand_bch_correct_data; | ||
3362 | chip->ecc.read_page = nand_read_page_swecc; | ||
3363 | chip->ecc.read_subpage = nand_read_subpage; | ||
3364 | chip->ecc.write_page = nand_write_page_swecc; | ||
3365 | chip->ecc.read_page_raw = nand_read_page_raw; | ||
3366 | chip->ecc.write_page_raw = nand_write_page_raw; | ||
3367 | chip->ecc.read_oob = nand_read_oob_std; | ||
3368 | chip->ecc.write_oob = nand_write_oob_std; | ||
3369 | /* | ||
3370 | * Board driver should supply ecc.size and ecc.bytes values to | ||
3371 | * select how many bits are correctable; see nand_bch_init() | ||
3372 | * for details. | ||
3373 | * Otherwise, default to 4 bits for large page devices | ||
3374 | */ | ||
3375 | if (!chip->ecc.size && (mtd->oobsize >= 64)) { | ||
3376 | chip->ecc.size = 512; | ||
3377 | chip->ecc.bytes = 7; | ||
3378 | } | ||
3379 | chip->ecc.priv = nand_bch_init(mtd, | ||
3380 | chip->ecc.size, | ||
3381 | chip->ecc.bytes, | ||
3382 | &chip->ecc.layout); | ||
3383 | if (!chip->ecc.priv) { | ||
3384 | printk(KERN_WARNING "BCH ECC initialization failed!\n"); | ||
3385 | BUG(); | ||
3386 | } | ||
3387 | break; | ||
3388 | |||
3354 | case NAND_ECC_NONE: | 3389 | case NAND_ECC_NONE: |
3355 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " | 3390 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " |
3356 | "This is not recommended !!\n"); | 3391 | "This is not recommended !!\n"); |
@@ -3501,6 +3536,9 @@ void nand_release(struct mtd_info *mtd) | |||
3501 | { | 3536 | { |
3502 | struct nand_chip *chip = mtd->priv; | 3537 | struct nand_chip *chip = mtd->priv; |
3503 | 3538 | ||
3539 | if (chip->ecc.mode == NAND_ECC_SOFT_BCH) | ||
3540 | nand_bch_free((struct nand_bch_control *)chip->ecc.priv); | ||
3541 | |||
3504 | #ifdef CONFIG_MTD_PARTITIONS | 3542 | #ifdef CONFIG_MTD_PARTITIONS |
3505 | /* Deregister partitions */ | 3543 | /* Deregister partitions */ |
3506 | del_mtd_partitions(mtd); | 3544 | del_mtd_partitions(mtd); |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 6ebd869993aa..a1e8b30078d9 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -1101,12 +1101,16 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
1101 | static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) | 1101 | static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) |
1102 | { | 1102 | { |
1103 | struct nand_chip *this = mtd->priv; | 1103 | struct nand_chip *this = mtd->priv; |
1104 | u32 pattern_len = bd->len; | 1104 | u32 pattern_len; |
1105 | u32 bits = bd->options & NAND_BBT_NRBITS_MSK; | 1105 | u32 bits; |
1106 | u32 table_size; | 1106 | u32 table_size; |
1107 | 1107 | ||
1108 | if (!bd) | 1108 | if (!bd) |
1109 | return; | 1109 | return; |
1110 | |||
1111 | pattern_len = bd->len; | ||
1112 | bits = bd->options & NAND_BBT_NRBITS_MSK; | ||
1113 | |||
1110 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && | 1114 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && |
1111 | !(this->options & NAND_USE_FLASH_BBT)); | 1115 | !(this->options & NAND_USE_FLASH_BBT)); |
1112 | BUG_ON(!bits); | 1116 | BUG_ON(!bits); |
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c new file mode 100644 index 000000000000..0f931e757116 --- /dev/null +++ b/drivers/mtd/nand/nand_bch.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * This file provides ECC correction for more than 1 bit per block of data, | ||
3 | * using binary BCH codes. It relies on the generic BCH library lib/bch.c. | ||
4 | * | ||
5 | * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> | ||
6 | * | ||
7 | * This file is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 or (at your option) any | ||
10 | * later version. | ||
11 | * | ||
12 | * This file is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
15 | * for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this file; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/mtd/mtd.h> | ||
28 | #include <linux/mtd/nand.h> | ||
29 | #include <linux/mtd/nand_bch.h> | ||
30 | #include <linux/bch.h> | ||
31 | |||
32 | /** | ||
33 | * struct nand_bch_control - private NAND BCH control structure | ||
34 | * @bch: BCH control structure | ||
35 | * @ecclayout: private ecc layout for this BCH configuration | ||
36 | * @errloc: error location array | ||
37 | * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid | ||
38 | */ | ||
39 | struct nand_bch_control { | ||
40 | struct bch_control *bch; | ||
41 | struct nand_ecclayout ecclayout; | ||
42 | unsigned int *errloc; | ||
43 | unsigned char *eccmask; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block | ||
48 | * @mtd: MTD block structure | ||
49 | * @buf: input buffer with raw data | ||
50 | * @code: output buffer with ECC | ||
51 | */ | ||
52 | int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | ||
53 | unsigned char *code) | ||
54 | { | ||
55 | const struct nand_chip *chip = mtd->priv; | ||
56 | struct nand_bch_control *nbc = chip->ecc.priv; | ||
57 | unsigned int i; | ||
58 | |||
59 | memset(code, 0, chip->ecc.bytes); | ||
60 | encode_bch(nbc->bch, buf, chip->ecc.size, code); | ||
61 | |||
62 | /* apply mask so that an erased page is a valid codeword */ | ||
63 | for (i = 0; i < chip->ecc.bytes; i++) | ||
64 | code[i] ^= nbc->eccmask[i]; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | EXPORT_SYMBOL(nand_bch_calculate_ecc); | ||
69 | |||
70 | /** | ||
71 | * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s) | ||
72 | * @mtd: MTD block structure | ||
73 | * @buf: raw data read from the chip | ||
74 | * @read_ecc: ECC from the chip | ||
75 | * @calc_ecc: the ECC calculated from raw data | ||
76 | * | ||
77 | * Detect and correct bit errors for a data byte block | ||
78 | */ | ||
79 | int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, | ||
80 | unsigned char *read_ecc, unsigned char *calc_ecc) | ||
81 | { | ||
82 | const struct nand_chip *chip = mtd->priv; | ||
83 | struct nand_bch_control *nbc = chip->ecc.priv; | ||
84 | unsigned int *errloc = nbc->errloc; | ||
85 | int i, count; | ||
86 | |||
87 | count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, | ||
88 | NULL, errloc); | ||
89 | if (count > 0) { | ||
90 | for (i = 0; i < count; i++) { | ||
91 | if (errloc[i] < (chip->ecc.size*8)) | ||
92 | /* error is located in data, correct it */ | ||
93 | buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); | ||
94 | /* else error in ecc, no action needed */ | ||
95 | |||
96 | DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", | ||
97 | __func__, errloc[i]); | ||
98 | } | ||
99 | } else if (count < 0) { | ||
100 | printk(KERN_ERR "ecc unrecoverable error\n"); | ||
101 | count = -1; | ||
102 | } | ||
103 | return count; | ||
104 | } | ||
105 | EXPORT_SYMBOL(nand_bch_correct_data); | ||
106 | |||
107 | /** | ||
108 | * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction | ||
109 | * @mtd: MTD block structure | ||
110 | * @eccsize: ecc block size in bytes | ||
111 | * @eccbytes: ecc length in bytes | ||
112 | * @ecclayout: output default layout | ||
113 | * | ||
114 | * Returns: | ||
115 | * a pointer to a new NAND BCH control structure, or NULL upon failure | ||
116 | * | ||
117 | * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes | ||
118 | * are used to compute BCH parameters m (Galois field order) and t (error | ||
119 | * correction capability). @eccbytes should be equal to the number of bytes | ||
120 | * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8. | ||
121 | * | ||
122 | * Example: to configure 4 bit correction per 512 bytes, you should pass | ||
123 | * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) | ||
124 | * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) | ||
125 | */ | ||
126 | struct nand_bch_control * | ||
127 | nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, | ||
128 | struct nand_ecclayout **ecclayout) | ||
129 | { | ||
130 | unsigned int m, t, eccsteps, i; | ||
131 | struct nand_ecclayout *layout; | ||
132 | struct nand_bch_control *nbc = NULL; | ||
133 | unsigned char *erased_page; | ||
134 | |||
135 | if (!eccsize || !eccbytes) { | ||
136 | printk(KERN_WARNING "ecc parameters not supplied\n"); | ||
137 | goto fail; | ||
138 | } | ||
139 | |||
140 | m = fls(1+8*eccsize); | ||
141 | t = (eccbytes*8)/m; | ||
142 | |||
143 | nbc = kzalloc(sizeof(*nbc), GFP_KERNEL); | ||
144 | if (!nbc) | ||
145 | goto fail; | ||
146 | |||
147 | nbc->bch = init_bch(m, t, 0); | ||
148 | if (!nbc->bch) | ||
149 | goto fail; | ||
150 | |||
151 | /* verify that eccbytes has the expected value */ | ||
152 | if (nbc->bch->ecc_bytes != eccbytes) { | ||
153 | printk(KERN_WARNING "invalid eccbytes %u, should be %u\n", | ||
154 | eccbytes, nbc->bch->ecc_bytes); | ||
155 | goto fail; | ||
156 | } | ||
157 | |||
158 | eccsteps = mtd->writesize/eccsize; | ||
159 | |||
160 | /* if no ecc placement scheme was provided, build one */ | ||
161 | if (!*ecclayout) { | ||
162 | |||
163 | /* handle large page devices only */ | ||
164 | if (mtd->oobsize < 64) { | ||
165 | printk(KERN_WARNING "must provide an oob scheme for " | ||
166 | "oobsize %d\n", mtd->oobsize); | ||
167 | goto fail; | ||
168 | } | ||
169 | |||
170 | layout = &nbc->ecclayout; | ||
171 | layout->eccbytes = eccsteps*eccbytes; | ||
172 | |||
173 | /* reserve 2 bytes for bad block marker */ | ||
174 | if (layout->eccbytes+2 > mtd->oobsize) { | ||
175 | printk(KERN_WARNING "no suitable oob scheme available " | ||
176 | "for oobsize %d eccbytes %u\n", mtd->oobsize, | ||
177 | eccbytes); | ||
178 | goto fail; | ||
179 | } | ||
180 | /* put ecc bytes at oob tail */ | ||
181 | for (i = 0; i < layout->eccbytes; i++) | ||
182 | layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; | ||
183 | |||
184 | layout->oobfree[0].offset = 2; | ||
185 | layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; | ||
186 | |||
187 | *ecclayout = layout; | ||
188 | } | ||
189 | |||
190 | /* sanity checks */ | ||
191 | if (8*(eccsize+eccbytes) >= (1 << m)) { | ||
192 | printk(KERN_WARNING "eccsize %u is too large\n", eccsize); | ||
193 | goto fail; | ||
194 | } | ||
195 | if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) { | ||
196 | printk(KERN_WARNING "invalid ecc layout\n"); | ||
197 | goto fail; | ||
198 | } | ||
199 | |||
200 | nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL); | ||
201 | nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL); | ||
202 | if (!nbc->eccmask || !nbc->errloc) | ||
203 | goto fail; | ||
204 | /* | ||
205 | * compute and store the inverted ecc of an erased ecc block | ||
206 | */ | ||
207 | erased_page = kmalloc(eccsize, GFP_KERNEL); | ||
208 | if (!erased_page) | ||
209 | goto fail; | ||
210 | |||
211 | memset(erased_page, 0xff, eccsize); | ||
212 | memset(nbc->eccmask, 0, eccbytes); | ||
213 | encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); | ||
214 | kfree(erased_page); | ||
215 | |||
216 | for (i = 0; i < eccbytes; i++) | ||
217 | nbc->eccmask[i] ^= 0xff; | ||
218 | |||
219 | return nbc; | ||
220 | fail: | ||
221 | nand_bch_free(nbc); | ||
222 | return NULL; | ||
223 | } | ||
224 | EXPORT_SYMBOL(nand_bch_init); | ||
225 | |||
226 | /** | ||
227 | * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources | ||
228 | * @nbc: NAND BCH control structure | ||
229 | */ | ||
230 | void nand_bch_free(struct nand_bch_control *nbc) | ||
231 | { | ||
232 | if (nbc) { | ||
233 | free_bch(nbc->bch); | ||
234 | kfree(nbc->errloc); | ||
235 | kfree(nbc->eccmask); | ||
236 | kfree(nbc); | ||
237 | } | ||
238 | } | ||
239 | EXPORT_SYMBOL(nand_bch_free); | ||
240 | |||
241 | MODULE_LICENSE("GPL"); | ||
242 | MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); | ||
243 | MODULE_DESCRIPTION("NAND software BCH ECC support"); | ||
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index a5aa99f014ba..213181be0d9a 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/mtd/mtd.h> | 35 | #include <linux/mtd/mtd.h> |
36 | #include <linux/mtd/nand.h> | 36 | #include <linux/mtd/nand.h> |
37 | #include <linux/mtd/nand_bch.h> | ||
37 | #include <linux/mtd/partitions.h> | 38 | #include <linux/mtd/partitions.h> |
38 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
39 | #include <linux/list.h> | 40 | #include <linux/list.h> |
@@ -108,6 +109,7 @@ static unsigned int rptwear = 0; | |||
108 | static unsigned int overridesize = 0; | 109 | static unsigned int overridesize = 0; |
109 | static char *cache_file = NULL; | 110 | static char *cache_file = NULL; |
110 | static unsigned int bbt; | 111 | static unsigned int bbt; |
112 | static unsigned int bch; | ||
111 | 113 | ||
112 | module_param(first_id_byte, uint, 0400); | 114 | module_param(first_id_byte, uint, 0400); |
113 | module_param(second_id_byte, uint, 0400); | 115 | module_param(second_id_byte, uint, 0400); |
@@ -132,6 +134,7 @@ module_param(rptwear, uint, 0400); | |||
132 | module_param(overridesize, uint, 0400); | 134 | module_param(overridesize, uint, 0400); |
133 | module_param(cache_file, charp, 0400); | 135 | module_param(cache_file, charp, 0400); |
134 | module_param(bbt, uint, 0400); | 136 | module_param(bbt, uint, 0400); |
137 | module_param(bch, uint, 0400); | ||
135 | 138 | ||
136 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); | 139 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); |
137 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); | 140 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); |
@@ -165,6 +168,8 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I | |||
165 | " e.g. 5 means a size of 32 erase blocks"); | 168 | " e.g. 5 means a size of 32 erase blocks"); |
166 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); | 169 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); |
167 | MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); | 170 | MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); |
171 | MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " | ||
172 | "be correctable in 512-byte blocks"); | ||
168 | 173 | ||
169 | /* The largest possible page size */ | 174 | /* The largest possible page size */ |
170 | #define NS_LARGEST_PAGE_SIZE 4096 | 175 | #define NS_LARGEST_PAGE_SIZE 4096 |
@@ -2309,7 +2314,43 @@ static int __init ns_init_module(void) | |||
2309 | if ((retval = parse_gravepages()) != 0) | 2314 | if ((retval = parse_gravepages()) != 0) |
2310 | goto error; | 2315 | goto error; |
2311 | 2316 | ||
2312 | if ((retval = nand_scan(nsmtd, 1)) != 0) { | 2317 | retval = nand_scan_ident(nsmtd, 1, NULL); |
2318 | if (retval) { | ||
2319 | NS_ERR("cannot scan NAND Simulator device\n"); | ||
2320 | if (retval > 0) | ||
2321 | retval = -ENXIO; | ||
2322 | goto error; | ||
2323 | } | ||
2324 | |||
2325 | if (bch) { | ||
2326 | unsigned int eccsteps, eccbytes; | ||
2327 | if (!mtd_nand_has_bch()) { | ||
2328 | NS_ERR("BCH ECC support is disabled\n"); | ||
2329 | retval = -EINVAL; | ||
2330 | goto error; | ||
2331 | } | ||
2332 | /* use 512-byte ecc blocks */ | ||
2333 | eccsteps = nsmtd->writesize/512; | ||
2334 | eccbytes = (bch*13+7)/8; | ||
2335 | /* do not bother supporting small page devices */ | ||
2336 | if ((nsmtd->oobsize < 64) || !eccsteps) { | ||
2337 | NS_ERR("bch not available on small page devices\n"); | ||
2338 | retval = -EINVAL; | ||
2339 | goto error; | ||
2340 | } | ||
2341 | if ((eccbytes*eccsteps+2) > nsmtd->oobsize) { | ||
2342 | NS_ERR("invalid bch value %u\n", bch); | ||
2343 | retval = -EINVAL; | ||
2344 | goto error; | ||
2345 | } | ||
2346 | chip->ecc.mode = NAND_ECC_SOFT_BCH; | ||
2347 | chip->ecc.size = 512; | ||
2348 | chip->ecc.bytes = eccbytes; | ||
2349 | NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); | ||
2350 | } | ||
2351 | |||
2352 | retval = nand_scan_tail(nsmtd); | ||
2353 | if (retval) { | ||
2313 | NS_ERR("can't register NAND Simulator\n"); | 2354 | NS_ERR("can't register NAND Simulator\n"); |
2314 | if (retval > 0) | 2355 | if (retval > 0) |
2315 | retval = -ENXIO; | 2356 | retval = -ENXIO; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 7b8f1fffc528..da9a351c9d79 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -668,6 +668,8 @@ static void gen_true_ecc(u8 *ecc_buf) | |||
668 | * | 668 | * |
669 | * This function compares two ECC's and indicates if there is an error. | 669 | * This function compares two ECC's and indicates if there is an error. |
670 | * If the error can be corrected it will be corrected to the buffer. | 670 | * If the error can be corrected it will be corrected to the buffer. |
671 | * If there is no error, %0 is returned. If there is an error but it | ||
672 | * was corrected, %1 is returned. Otherwise, %-1 is returned. | ||
671 | */ | 673 | */ |
672 | static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | 674 | static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ |
673 | u8 *ecc_data2, /* read from register */ | 675 | u8 *ecc_data2, /* read from register */ |
@@ -773,7 +775,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
773 | 775 | ||
774 | page_data[find_byte] ^= (1 << find_bit); | 776 | page_data[find_byte] ^= (1 << find_bit); |
775 | 777 | ||
776 | return 0; | 778 | return 1; |
777 | default: | 779 | default: |
778 | if (isEccFF) { | 780 | if (isEccFF) { |
779 | if (ecc_data2[0] == 0 && | 781 | if (ecc_data2[0] == 0 && |
@@ -794,8 +796,11 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
794 | * @calc_ecc: ecc read from HW ECC registers | 796 | * @calc_ecc: ecc read from HW ECC registers |
795 | * | 797 | * |
796 | * Compares the ecc read from nand spare area with ECC registers values | 798 | * Compares the ecc read from nand spare area with ECC registers values |
797 | * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection | 799 | * and if ECC's mismatched, it will call 'omap_compare_ecc' for error |
798 | * and correction. | 800 | * detection and correction. If there are no errors, %0 is returned. If |
801 | * there were errors and all of the errors were corrected, the number of | ||
802 | * corrected errors is returned. If uncorrectable errors exist, %-1 is | ||
803 | * returned. | ||
799 | */ | 804 | */ |
800 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | 805 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, |
801 | u_char *read_ecc, u_char *calc_ecc) | 806 | u_char *read_ecc, u_char *calc_ecc) |
@@ -803,6 +808,7 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | |||
803 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 808 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
804 | mtd); | 809 | mtd); |
805 | int blockCnt = 0, i = 0, ret = 0; | 810 | int blockCnt = 0, i = 0, ret = 0; |
811 | int stat = 0; | ||
806 | 812 | ||
807 | /* Ex NAND_ECC_HW12_2048 */ | 813 | /* Ex NAND_ECC_HW12_2048 */ |
808 | if ((info->nand.ecc.mode == NAND_ECC_HW) && | 814 | if ((info->nand.ecc.mode == NAND_ECC_HW) && |
@@ -816,12 +822,14 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | |||
816 | ret = omap_compare_ecc(read_ecc, calc_ecc, dat); | 822 | ret = omap_compare_ecc(read_ecc, calc_ecc, dat); |
817 | if (ret < 0) | 823 | if (ret < 0) |
818 | return ret; | 824 | return ret; |
825 | /* keep track of the number of corrected errors */ | ||
826 | stat += ret; | ||
819 | } | 827 | } |
820 | read_ecc += 3; | 828 | read_ecc += 3; |
821 | calc_ecc += 3; | 829 | calc_ecc += 3; |
822 | dat += 512; | 830 | dat += 512; |
823 | } | 831 | } |
824 | return 0; | 832 | return stat; |
825 | } | 833 | } |
826 | 834 | ||
827 | /** | 835 | /** |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index ea2c288df3f6..ab7f4c33ced6 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <plat/pxa3xx_nand.h> | 27 | #include <plat/pxa3xx_nand.h> |
28 | 28 | ||
29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) | 29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) |
30 | #define NAND_STOP_DELAY (2 * HZ/50) | ||
31 | #define PAGE_CHUNK_SIZE (2048) | ||
30 | 32 | ||
31 | /* registers and bit definitions */ | 33 | /* registers and bit definitions */ |
32 | #define NDCR (0x00) /* Control register */ | 34 | #define NDCR (0x00) /* Control register */ |
@@ -52,16 +54,18 @@ | |||
52 | #define NDCR_ND_MODE (0x3 << 21) | 54 | #define NDCR_ND_MODE (0x3 << 21) |
53 | #define NDCR_NAND_MODE (0x0) | 55 | #define NDCR_NAND_MODE (0x0) |
54 | #define NDCR_CLR_PG_CNT (0x1 << 20) | 56 | #define NDCR_CLR_PG_CNT (0x1 << 20) |
55 | #define NDCR_CLR_ECC (0x1 << 19) | 57 | #define NDCR_STOP_ON_UNCOR (0x1 << 19) |
56 | #define NDCR_RD_ID_CNT_MASK (0x7 << 16) | 58 | #define NDCR_RD_ID_CNT_MASK (0x7 << 16) |
57 | #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) | 59 | #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) |
58 | 60 | ||
59 | #define NDCR_RA_START (0x1 << 15) | 61 | #define NDCR_RA_START (0x1 << 15) |
60 | #define NDCR_PG_PER_BLK (0x1 << 14) | 62 | #define NDCR_PG_PER_BLK (0x1 << 14) |
61 | #define NDCR_ND_ARB_EN (0x1 << 12) | 63 | #define NDCR_ND_ARB_EN (0x1 << 12) |
64 | #define NDCR_INT_MASK (0xFFF) | ||
62 | 65 | ||
63 | #define NDSR_MASK (0xfff) | 66 | #define NDSR_MASK (0xfff) |
64 | #define NDSR_RDY (0x1 << 11) | 67 | #define NDSR_RDY (0x1 << 12) |
68 | #define NDSR_FLASH_RDY (0x1 << 11) | ||
65 | #define NDSR_CS0_PAGED (0x1 << 10) | 69 | #define NDSR_CS0_PAGED (0x1 << 10) |
66 | #define NDSR_CS1_PAGED (0x1 << 9) | 70 | #define NDSR_CS1_PAGED (0x1 << 9) |
67 | #define NDSR_CS0_CMDD (0x1 << 8) | 71 | #define NDSR_CS0_CMDD (0x1 << 8) |
@@ -74,6 +78,7 @@ | |||
74 | #define NDSR_RDDREQ (0x1 << 1) | 78 | #define NDSR_RDDREQ (0x1 << 1) |
75 | #define NDSR_WRCMDREQ (0x1) | 79 | #define NDSR_WRCMDREQ (0x1) |
76 | 80 | ||
81 | #define NDCB0_ST_ROW_EN (0x1 << 26) | ||
77 | #define NDCB0_AUTO_RS (0x1 << 25) | 82 | #define NDCB0_AUTO_RS (0x1 << 25) |
78 | #define NDCB0_CSEL (0x1 << 24) | 83 | #define NDCB0_CSEL (0x1 << 24) |
79 | #define NDCB0_CMD_TYPE_MASK (0x7 << 21) | 84 | #define NDCB0_CMD_TYPE_MASK (0x7 << 21) |
@@ -104,18 +109,21 @@ enum { | |||
104 | }; | 109 | }; |
105 | 110 | ||
106 | enum { | 111 | enum { |
107 | STATE_READY = 0, | 112 | STATE_IDLE = 0, |
108 | STATE_CMD_HANDLE, | 113 | STATE_CMD_HANDLE, |
109 | STATE_DMA_READING, | 114 | STATE_DMA_READING, |
110 | STATE_DMA_WRITING, | 115 | STATE_DMA_WRITING, |
111 | STATE_DMA_DONE, | 116 | STATE_DMA_DONE, |
112 | STATE_PIO_READING, | 117 | STATE_PIO_READING, |
113 | STATE_PIO_WRITING, | 118 | STATE_PIO_WRITING, |
119 | STATE_CMD_DONE, | ||
120 | STATE_READY, | ||
114 | }; | 121 | }; |
115 | 122 | ||
116 | struct pxa3xx_nand_info { | 123 | struct pxa3xx_nand_info { |
117 | struct nand_chip nand_chip; | 124 | struct nand_chip nand_chip; |
118 | 125 | ||
126 | struct nand_hw_control controller; | ||
119 | struct platform_device *pdev; | 127 | struct platform_device *pdev; |
120 | struct pxa3xx_nand_cmdset *cmdset; | 128 | struct pxa3xx_nand_cmdset *cmdset; |
121 | 129 | ||
@@ -126,6 +134,7 @@ struct pxa3xx_nand_info { | |||
126 | unsigned int buf_start; | 134 | unsigned int buf_start; |
127 | unsigned int buf_count; | 135 | unsigned int buf_count; |
128 | 136 | ||
137 | struct mtd_info *mtd; | ||
129 | /* DMA information */ | 138 | /* DMA information */ |
130 | int drcmr_dat; | 139 | int drcmr_dat; |
131 | int drcmr_cmd; | 140 | int drcmr_cmd; |
@@ -149,6 +158,7 @@ struct pxa3xx_nand_info { | |||
149 | 158 | ||
150 | int use_ecc; /* use HW ECC ? */ | 159 | int use_ecc; /* use HW ECC ? */ |
151 | int use_dma; /* use DMA ? */ | 160 | int use_dma; /* use DMA ? */ |
161 | int is_ready; | ||
152 | 162 | ||
153 | unsigned int page_size; /* page size of attached chip */ | 163 | unsigned int page_size; /* page size of attached chip */ |
154 | unsigned int data_size; /* data size in FIFO */ | 164 | unsigned int data_size; /* data size in FIFO */ |
@@ -201,20 +211,22 @@ static struct pxa3xx_nand_timing timing[] = { | |||
201 | }; | 211 | }; |
202 | 212 | ||
203 | static struct pxa3xx_nand_flash builtin_flash_types[] = { | 213 | static struct pxa3xx_nand_flash builtin_flash_types[] = { |
204 | { 0, 0, 2048, 8, 8, 0, &default_cmdset, &timing[0] }, | 214 | { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] }, |
205 | { 0x46ec, 32, 512, 16, 16, 4096, &default_cmdset, &timing[1] }, | 215 | { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] }, |
206 | { 0xdaec, 64, 2048, 8, 8, 2048, &default_cmdset, &timing[1] }, | 216 | { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] }, |
207 | { 0xd7ec, 128, 4096, 8, 8, 8192, &default_cmdset, &timing[1] }, | 217 | { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] }, |
208 | { 0xa12c, 64, 2048, 8, 8, 1024, &default_cmdset, &timing[2] }, | 218 | { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] }, |
209 | { 0xb12c, 64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] }, | 219 | { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] }, |
210 | { 0xdc2c, 64, 2048, 8, 8, 4096, &default_cmdset, &timing[2] }, | 220 | { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] }, |
211 | { 0xcc2c, 64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] }, | 221 | { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] }, |
212 | { 0xba20, 64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] }, | 222 | { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] }, |
213 | }; | 223 | }; |
214 | 224 | ||
215 | /* Define a default flash type setting serve as flash detecting only */ | 225 | /* Define a default flash type setting serve as flash detecting only */ |
216 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) | 226 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) |
217 | 227 | ||
228 | const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; | ||
229 | |||
218 | #define NDTR0_tCH(c) (min((c), 7) << 19) | 230 | #define NDTR0_tCH(c) (min((c), 7) << 19) |
219 | #define NDTR0_tCS(c) (min((c), 7) << 16) | 231 | #define NDTR0_tCS(c) (min((c), 7) << 16) |
220 | #define NDTR0_tWH(c) (min((c), 7) << 11) | 232 | #define NDTR0_tWH(c) (min((c), 7) << 11) |
@@ -252,25 +264,6 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, | |||
252 | nand_writel(info, NDTR1CS0, ndtr1); | 264 | nand_writel(info, NDTR1CS0, ndtr1); |
253 | } | 265 | } |
254 | 266 | ||
255 | #define WAIT_EVENT_TIMEOUT 10 | ||
256 | |||
257 | static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event) | ||
258 | { | ||
259 | int timeout = WAIT_EVENT_TIMEOUT; | ||
260 | uint32_t ndsr; | ||
261 | |||
262 | while (timeout--) { | ||
263 | ndsr = nand_readl(info, NDSR) & NDSR_MASK; | ||
264 | if (ndsr & event) { | ||
265 | nand_writel(info, NDSR, ndsr); | ||
266 | return 0; | ||
267 | } | ||
268 | udelay(10); | ||
269 | } | ||
270 | |||
271 | return -ETIMEDOUT; | ||
272 | } | ||
273 | |||
274 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | 267 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) |
275 | { | 268 | { |
276 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; | 269 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; |
@@ -291,69 +284,45 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | |||
291 | } | 284 | } |
292 | } | 285 | } |
293 | 286 | ||
294 | static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, | 287 | /** |
295 | uint16_t cmd, int column, int page_addr) | 288 | * NOTE: it is a must to set ND_RUN firstly, then write |
289 | * command buffer, otherwise, it does not work. | ||
290 | * We enable all the interrupt at the same time, and | ||
291 | * let pxa3xx_nand_irq to handle all logic. | ||
292 | */ | ||
293 | static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) | ||
296 | { | 294 | { |
297 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 295 | uint32_t ndcr; |
298 | pxa3xx_set_datasize(info); | ||
299 | |||
300 | /* generate values for NDCBx registers */ | ||
301 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | ||
302 | info->ndcb1 = 0; | ||
303 | info->ndcb2 = 0; | ||
304 | info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles); | ||
305 | |||
306 | if (info->col_addr_cycles == 2) { | ||
307 | /* large block, 2 cycles for column address | ||
308 | * row address starts from 3rd cycle | ||
309 | */ | ||
310 | info->ndcb1 |= page_addr << 16; | ||
311 | if (info->row_addr_cycles == 3) | ||
312 | info->ndcb2 = (page_addr >> 16) & 0xff; | ||
313 | } else | ||
314 | /* small block, 1 cycles for column address | ||
315 | * row address starts from 2nd cycle | ||
316 | */ | ||
317 | info->ndcb1 = page_addr << 8; | ||
318 | |||
319 | if (cmd == cmdset->program) | ||
320 | info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; | ||
321 | 296 | ||
322 | return 0; | 297 | ndcr = info->reg_ndcr; |
323 | } | 298 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; |
299 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | ||
300 | ndcr |= NDCR_ND_RUN; | ||
324 | 301 | ||
325 | static int prepare_erase_cmd(struct pxa3xx_nand_info *info, | 302 | /* clear status bits and run */ |
326 | uint16_t cmd, int page_addr) | 303 | nand_writel(info, NDCR, 0); |
327 | { | 304 | nand_writel(info, NDSR, NDSR_MASK); |
328 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | 305 | nand_writel(info, NDCR, ndcr); |
329 | info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3); | ||
330 | info->ndcb1 = page_addr; | ||
331 | info->ndcb2 = 0; | ||
332 | return 0; | ||
333 | } | 306 | } |
334 | 307 | ||
335 | static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd) | 308 | static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) |
336 | { | 309 | { |
337 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 310 | uint32_t ndcr; |
338 | 311 | int timeout = NAND_STOP_DELAY; | |
339 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | ||
340 | info->ndcb1 = 0; | ||
341 | info->ndcb2 = 0; | ||
342 | 312 | ||
343 | info->oob_size = 0; | 313 | /* wait RUN bit in NDCR become 0 */ |
344 | if (cmd == cmdset->read_id) { | 314 | ndcr = nand_readl(info, NDCR); |
345 | info->ndcb0 |= NDCB0_CMD_TYPE(3); | 315 | while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) { |
346 | info->data_size = 8; | 316 | ndcr = nand_readl(info, NDCR); |
347 | } else if (cmd == cmdset->read_status) { | 317 | udelay(1); |
348 | info->ndcb0 |= NDCB0_CMD_TYPE(4); | 318 | } |
349 | info->data_size = 8; | ||
350 | } else if (cmd == cmdset->reset || cmd == cmdset->lock || | ||
351 | cmd == cmdset->unlock) { | ||
352 | info->ndcb0 |= NDCB0_CMD_TYPE(5); | ||
353 | } else | ||
354 | return -EINVAL; | ||
355 | 319 | ||
356 | return 0; | 320 | if (timeout <= 0) { |
321 | ndcr &= ~NDCR_ND_RUN; | ||
322 | nand_writel(info, NDCR, ndcr); | ||
323 | } | ||
324 | /* clear status bits */ | ||
325 | nand_writel(info, NDSR, NDSR_MASK); | ||
357 | } | 326 | } |
358 | 327 | ||
359 | static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | 328 | static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) |
@@ -372,39 +341,8 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
372 | nand_writel(info, NDCR, ndcr | int_mask); | 341 | nand_writel(info, NDCR, ndcr | int_mask); |
373 | } | 342 | } |
374 | 343 | ||
375 | /* NOTE: it is a must to set ND_RUN firstly, then write command buffer | 344 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
376 | * otherwise, it does not work | ||
377 | */ | ||
378 | static int write_cmd(struct pxa3xx_nand_info *info) | ||
379 | { | 345 | { |
380 | uint32_t ndcr; | ||
381 | |||
382 | /* clear status bits and run */ | ||
383 | nand_writel(info, NDSR, NDSR_MASK); | ||
384 | |||
385 | ndcr = info->reg_ndcr; | ||
386 | |||
387 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; | ||
388 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | ||
389 | ndcr |= NDCR_ND_RUN; | ||
390 | |||
391 | nand_writel(info, NDCR, ndcr); | ||
392 | |||
393 | if (wait_for_event(info, NDSR_WRCMDREQ)) { | ||
394 | printk(KERN_ERR "timed out writing command\n"); | ||
395 | return -ETIMEDOUT; | ||
396 | } | ||
397 | |||
398 | nand_writel(info, NDCB0, info->ndcb0); | ||
399 | nand_writel(info, NDCB0, info->ndcb1); | ||
400 | nand_writel(info, NDCB0, info->ndcb2); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int handle_data_pio(struct pxa3xx_nand_info *info) | ||
405 | { | ||
406 | int ret, timeout = CHIP_DELAY_TIMEOUT; | ||
407 | |||
408 | switch (info->state) { | 346 | switch (info->state) { |
409 | case STATE_PIO_WRITING: | 347 | case STATE_PIO_WRITING: |
410 | __raw_writesl(info->mmio_base + NDDB, info->data_buff, | 348 | __raw_writesl(info->mmio_base + NDDB, info->data_buff, |
@@ -412,14 +350,6 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) | |||
412 | if (info->oob_size > 0) | 350 | if (info->oob_size > 0) |
413 | __raw_writesl(info->mmio_base + NDDB, info->oob_buff, | 351 | __raw_writesl(info->mmio_base + NDDB, info->oob_buff, |
414 | DIV_ROUND_UP(info->oob_size, 4)); | 352 | DIV_ROUND_UP(info->oob_size, 4)); |
415 | |||
416 | enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
417 | |||
418 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
419 | if (!ret) { | ||
420 | printk(KERN_ERR "program command time out\n"); | ||
421 | return -1; | ||
422 | } | ||
423 | break; | 353 | break; |
424 | case STATE_PIO_READING: | 354 | case STATE_PIO_READING: |
425 | __raw_readsl(info->mmio_base + NDDB, info->data_buff, | 355 | __raw_readsl(info->mmio_base + NDDB, info->data_buff, |
@@ -431,14 +361,11 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) | |||
431 | default: | 361 | default: |
432 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | 362 | printk(KERN_ERR "%s: invalid state %d\n", __func__, |
433 | info->state); | 363 | info->state); |
434 | return -EINVAL; | 364 | BUG(); |
435 | } | 365 | } |
436 | |||
437 | info->state = STATE_READY; | ||
438 | return 0; | ||
439 | } | 366 | } |
440 | 367 | ||
441 | static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) | 368 | static void start_data_dma(struct pxa3xx_nand_info *info) |
442 | { | 369 | { |
443 | struct pxa_dma_desc *desc = info->data_desc; | 370 | struct pxa_dma_desc *desc = info->data_desc; |
444 | int dma_len = ALIGN(info->data_size + info->oob_size, 32); | 371 | int dma_len = ALIGN(info->data_size + info->oob_size, 32); |
@@ -446,14 +373,21 @@ static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) | |||
446 | desc->ddadr = DDADR_STOP; | 373 | desc->ddadr = DDADR_STOP; |
447 | desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; | 374 | desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; |
448 | 375 | ||
449 | if (dir_out) { | 376 | switch (info->state) { |
377 | case STATE_DMA_WRITING: | ||
450 | desc->dsadr = info->data_buff_phys; | 378 | desc->dsadr = info->data_buff_phys; |
451 | desc->dtadr = info->mmio_phys + NDDB; | 379 | desc->dtadr = info->mmio_phys + NDDB; |
452 | desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; | 380 | desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; |
453 | } else { | 381 | break; |
382 | case STATE_DMA_READING: | ||
454 | desc->dtadr = info->data_buff_phys; | 383 | desc->dtadr = info->data_buff_phys; |
455 | desc->dsadr = info->mmio_phys + NDDB; | 384 | desc->dsadr = info->mmio_phys + NDDB; |
456 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; | 385 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; |
386 | break; | ||
387 | default: | ||
388 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | ||
389 | info->state); | ||
390 | BUG(); | ||
457 | } | 391 | } |
458 | 392 | ||
459 | DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; | 393 | DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; |
@@ -471,93 +405,62 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data) | |||
471 | 405 | ||
472 | if (dcsr & DCSR_BUSERR) { | 406 | if (dcsr & DCSR_BUSERR) { |
473 | info->retcode = ERR_DMABUSERR; | 407 | info->retcode = ERR_DMABUSERR; |
474 | complete(&info->cmd_complete); | ||
475 | } | 408 | } |
476 | 409 | ||
477 | if (info->state == STATE_DMA_WRITING) { | 410 | info->state = STATE_DMA_DONE; |
478 | info->state = STATE_DMA_DONE; | 411 | enable_int(info, NDCR_INT_MASK); |
479 | enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | 412 | nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); |
480 | } else { | ||
481 | info->state = STATE_READY; | ||
482 | complete(&info->cmd_complete); | ||
483 | } | ||
484 | } | 413 | } |
485 | 414 | ||
486 | static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | 415 | static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) |
487 | { | 416 | { |
488 | struct pxa3xx_nand_info *info = devid; | 417 | struct pxa3xx_nand_info *info = devid; |
489 | unsigned int status; | 418 | unsigned int status, is_completed = 0; |
490 | 419 | ||
491 | status = nand_readl(info, NDSR); | 420 | status = nand_readl(info, NDSR); |
492 | 421 | ||
493 | if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) { | 422 | if (status & NDSR_DBERR) |
494 | if (status & NDSR_DBERR) | 423 | info->retcode = ERR_DBERR; |
495 | info->retcode = ERR_DBERR; | 424 | if (status & NDSR_SBERR) |
496 | else if (status & NDSR_SBERR) | 425 | info->retcode = ERR_SBERR; |
497 | info->retcode = ERR_SBERR; | 426 | if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) { |
498 | 427 | /* whether use dma to transfer data */ | |
499 | disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | ||
500 | |||
501 | if (info->use_dma) { | ||
502 | info->state = STATE_DMA_READING; | ||
503 | start_data_dma(info, 0); | ||
504 | } else { | ||
505 | info->state = STATE_PIO_READING; | ||
506 | complete(&info->cmd_complete); | ||
507 | } | ||
508 | } else if (status & NDSR_WRDREQ) { | ||
509 | disable_int(info, NDSR_WRDREQ); | ||
510 | if (info->use_dma) { | 428 | if (info->use_dma) { |
511 | info->state = STATE_DMA_WRITING; | 429 | disable_int(info, NDCR_INT_MASK); |
512 | start_data_dma(info, 1); | 430 | info->state = (status & NDSR_RDDREQ) ? |
431 | STATE_DMA_READING : STATE_DMA_WRITING; | ||
432 | start_data_dma(info); | ||
433 | goto NORMAL_IRQ_EXIT; | ||
513 | } else { | 434 | } else { |
514 | info->state = STATE_PIO_WRITING; | 435 | info->state = (status & NDSR_RDDREQ) ? |
515 | complete(&info->cmd_complete); | 436 | STATE_PIO_READING : STATE_PIO_WRITING; |
437 | handle_data_pio(info); | ||
516 | } | 438 | } |
517 | } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) { | ||
518 | if (status & NDSR_CS0_BBD) | ||
519 | info->retcode = ERR_BBERR; | ||
520 | |||
521 | disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
522 | info->state = STATE_READY; | ||
523 | complete(&info->cmd_complete); | ||
524 | } | 439 | } |
525 | nand_writel(info, NDSR, status); | 440 | if (status & NDSR_CS0_CMDD) { |
526 | return IRQ_HANDLED; | 441 | info->state = STATE_CMD_DONE; |
527 | } | 442 | is_completed = 1; |
528 | |||
529 | static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event) | ||
530 | { | ||
531 | uint32_t ndcr; | ||
532 | int ret, timeout = CHIP_DELAY_TIMEOUT; | ||
533 | |||
534 | if (write_cmd(info)) { | ||
535 | info->retcode = ERR_SENDCMD; | ||
536 | goto fail_stop; | ||
537 | } | 443 | } |
538 | 444 | if (status & NDSR_FLASH_RDY) { | |
539 | info->state = STATE_CMD_HANDLE; | 445 | info->is_ready = 1; |
540 | 446 | info->state = STATE_READY; | |
541 | enable_int(info, event); | ||
542 | |||
543 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
544 | if (!ret) { | ||
545 | printk(KERN_ERR "command execution timed out\n"); | ||
546 | info->retcode = ERR_SENDCMD; | ||
547 | goto fail_stop; | ||
548 | } | 447 | } |
549 | 448 | ||
550 | if (info->use_dma == 0 && info->data_size > 0) | 449 | if (status & NDSR_WRCMDREQ) { |
551 | if (handle_data_pio(info)) | 450 | nand_writel(info, NDSR, NDSR_WRCMDREQ); |
552 | goto fail_stop; | 451 | status &= ~NDSR_WRCMDREQ; |
553 | 452 | info->state = STATE_CMD_HANDLE; | |
554 | return 0; | 453 | nand_writel(info, NDCB0, info->ndcb0); |
454 | nand_writel(info, NDCB0, info->ndcb1); | ||
455 | nand_writel(info, NDCB0, info->ndcb2); | ||
456 | } | ||
555 | 457 | ||
556 | fail_stop: | 458 | /* clear NDSR to let the controller exit the IRQ */ |
557 | ndcr = nand_readl(info, NDCR); | 459 | nand_writel(info, NDSR, status); |
558 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | 460 | if (is_completed) |
559 | udelay(10); | 461 | complete(&info->cmd_complete); |
560 | return -ETIMEDOUT; | 462 | NORMAL_IRQ_EXIT: |
463 | return IRQ_HANDLED; | ||
561 | } | 464 | } |
562 | 465 | ||
563 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) | 466 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) |
@@ -574,125 +477,218 @@ static inline int is_buf_blank(uint8_t *buf, size_t len) | |||
574 | return 1; | 477 | return 1; |
575 | } | 478 | } |
576 | 479 | ||
577 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | 480 | static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, |
578 | int column, int page_addr) | 481 | uint16_t column, int page_addr) |
579 | { | 482 | { |
580 | struct pxa3xx_nand_info *info = mtd->priv; | 483 | uint16_t cmd; |
581 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 484 | int addr_cycle, exec_cmd, ndcb0; |
582 | int ret; | 485 | struct mtd_info *mtd = info->mtd; |
486 | |||
487 | ndcb0 = 0; | ||
488 | addr_cycle = 0; | ||
489 | exec_cmd = 1; | ||
490 | |||
491 | /* reset data and oob column point to handle data */ | ||
492 | info->buf_start = 0; | ||
493 | info->buf_count = 0; | ||
494 | info->oob_size = 0; | ||
495 | info->use_ecc = 0; | ||
496 | info->is_ready = 0; | ||
497 | info->retcode = ERR_NONE; | ||
583 | 498 | ||
584 | info->use_dma = (use_dma) ? 1 : 0; | 499 | switch (command) { |
585 | info->use_ecc = 0; | 500 | case NAND_CMD_READ0: |
586 | info->data_size = 0; | 501 | case NAND_CMD_PAGEPROG: |
587 | info->state = STATE_READY; | 502 | info->use_ecc = 1; |
503 | case NAND_CMD_READOOB: | ||
504 | pxa3xx_set_datasize(info); | ||
505 | break; | ||
506 | case NAND_CMD_SEQIN: | ||
507 | exec_cmd = 0; | ||
508 | break; | ||
509 | default: | ||
510 | info->ndcb1 = 0; | ||
511 | info->ndcb2 = 0; | ||
512 | break; | ||
513 | } | ||
588 | 514 | ||
589 | init_completion(&info->cmd_complete); | 515 | info->ndcb0 = ndcb0; |
516 | addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles | ||
517 | + info->col_addr_cycles); | ||
590 | 518 | ||
591 | switch (command) { | 519 | switch (command) { |
592 | case NAND_CMD_READOOB: | 520 | case NAND_CMD_READOOB: |
593 | /* disable HW ECC to get all the OOB data */ | 521 | case NAND_CMD_READ0: |
594 | info->buf_count = mtd->writesize + mtd->oobsize; | 522 | cmd = info->cmdset->read1; |
595 | info->buf_start = mtd->writesize + column; | 523 | if (command == NAND_CMD_READOOB) |
596 | memset(info->data_buff, 0xFF, info->buf_count); | 524 | info->buf_start = mtd->writesize + column; |
525 | else | ||
526 | info->buf_start = column; | ||
597 | 527 | ||
598 | if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) | 528 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) |
599 | break; | 529 | info->ndcb0 |= NDCB0_CMD_TYPE(0) |
530 | | addr_cycle | ||
531 | | (cmd & NDCB0_CMD1_MASK); | ||
532 | else | ||
533 | info->ndcb0 |= NDCB0_CMD_TYPE(0) | ||
534 | | NDCB0_DBC | ||
535 | | addr_cycle | ||
536 | | cmd; | ||
600 | 537 | ||
601 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | 538 | case NAND_CMD_SEQIN: |
539 | /* small page addr setting */ | ||
540 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { | ||
541 | info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | ||
542 | | (column & 0xFF); | ||
602 | 543 | ||
603 | /* We only are OOB, so if the data has error, does not matter */ | 544 | info->ndcb2 = 0; |
604 | if (info->retcode == ERR_DBERR) | 545 | } else { |
605 | info->retcode = ERR_NONE; | 546 | info->ndcb1 = ((page_addr & 0xFFFF) << 16) |
606 | break; | 547 | | (column & 0xFFFF); |
548 | |||
549 | if (page_addr & 0xFF0000) | ||
550 | info->ndcb2 = (page_addr & 0xFF0000) >> 16; | ||
551 | else | ||
552 | info->ndcb2 = 0; | ||
553 | } | ||
607 | 554 | ||
608 | case NAND_CMD_READ0: | ||
609 | info->use_ecc = 1; | ||
610 | info->retcode = ERR_NONE; | ||
611 | info->buf_start = column; | ||
612 | info->buf_count = mtd->writesize + mtd->oobsize; | 555 | info->buf_count = mtd->writesize + mtd->oobsize; |
613 | memset(info->data_buff, 0xFF, info->buf_count); | 556 | memset(info->data_buff, 0xFF, info->buf_count); |
614 | 557 | ||
615 | if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) | 558 | break; |
559 | |||
560 | case NAND_CMD_PAGEPROG: | ||
561 | if (is_buf_blank(info->data_buff, | ||
562 | (mtd->writesize + mtd->oobsize))) { | ||
563 | exec_cmd = 0; | ||
616 | break; | 564 | break; |
565 | } | ||
617 | 566 | ||
618 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | 567 | cmd = info->cmdset->program; |
568 | info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | ||
569 | | NDCB0_AUTO_RS | ||
570 | | NDCB0_ST_ROW_EN | ||
571 | | NDCB0_DBC | ||
572 | | cmd | ||
573 | | addr_cycle; | ||
574 | break; | ||
619 | 575 | ||
620 | if (info->retcode == ERR_DBERR) { | 576 | case NAND_CMD_READID: |
621 | /* for blank page (all 0xff), HW will calculate its ECC as | 577 | cmd = info->cmdset->read_id; |
622 | * 0, which is different from the ECC information within | 578 | info->buf_count = info->read_id_bytes; |
623 | * OOB, ignore such double bit errors | 579 | info->ndcb0 |= NDCB0_CMD_TYPE(3) |
624 | */ | 580 | | NDCB0_ADDR_CYC(1) |
625 | if (is_buf_blank(info->data_buff, mtd->writesize)) | 581 | | cmd; |
626 | info->retcode = ERR_NONE; | 582 | |
627 | } | 583 | info->data_size = 8; |
628 | break; | 584 | break; |
629 | case NAND_CMD_SEQIN: | 585 | case NAND_CMD_STATUS: |
630 | info->buf_start = column; | 586 | cmd = info->cmdset->read_status; |
631 | info->buf_count = mtd->writesize + mtd->oobsize; | 587 | info->buf_count = 1; |
632 | memset(info->data_buff, 0xff, info->buf_count); | 588 | info->ndcb0 |= NDCB0_CMD_TYPE(4) |
589 | | NDCB0_ADDR_CYC(1) | ||
590 | | cmd; | ||
633 | 591 | ||
634 | /* save column/page_addr for next CMD_PAGEPROG */ | 592 | info->data_size = 8; |
635 | info->seqin_column = column; | ||
636 | info->seqin_page_addr = page_addr; | ||
637 | break; | 593 | break; |
638 | case NAND_CMD_PAGEPROG: | ||
639 | info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1; | ||
640 | 594 | ||
641 | if (prepare_read_prog_cmd(info, cmdset->program, | 595 | case NAND_CMD_ERASE1: |
642 | info->seqin_column, info->seqin_page_addr)) | 596 | cmd = info->cmdset->erase; |
643 | break; | 597 | info->ndcb0 |= NDCB0_CMD_TYPE(2) |
598 | | NDCB0_AUTO_RS | ||
599 | | NDCB0_ADDR_CYC(3) | ||
600 | | NDCB0_DBC | ||
601 | | cmd; | ||
602 | info->ndcb1 = page_addr; | ||
603 | info->ndcb2 = 0; | ||
644 | 604 | ||
645 | pxa3xx_nand_do_cmd(info, NDSR_WRDREQ); | ||
646 | break; | 605 | break; |
647 | case NAND_CMD_ERASE1: | 606 | case NAND_CMD_RESET: |
648 | if (prepare_erase_cmd(info, cmdset->erase, page_addr)) | 607 | cmd = info->cmdset->reset; |
649 | break; | 608 | info->ndcb0 |= NDCB0_CMD_TYPE(5) |
609 | | cmd; | ||
650 | 610 | ||
651 | pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
652 | break; | 611 | break; |
612 | |||
653 | case NAND_CMD_ERASE2: | 613 | case NAND_CMD_ERASE2: |
614 | exec_cmd = 0; | ||
654 | break; | 615 | break; |
655 | case NAND_CMD_READID: | ||
656 | case NAND_CMD_STATUS: | ||
657 | info->use_dma = 0; /* force PIO read */ | ||
658 | info->buf_start = 0; | ||
659 | info->buf_count = (command == NAND_CMD_READID) ? | ||
660 | info->read_id_bytes : 1; | ||
661 | |||
662 | if (prepare_other_cmd(info, (command == NAND_CMD_READID) ? | ||
663 | cmdset->read_id : cmdset->read_status)) | ||
664 | break; | ||
665 | 616 | ||
666 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ); | 617 | default: |
618 | exec_cmd = 0; | ||
619 | printk(KERN_ERR "pxa3xx-nand: non-supported" | ||
620 | " command %x\n", command); | ||
667 | break; | 621 | break; |
668 | case NAND_CMD_RESET: | 622 | } |
669 | if (prepare_other_cmd(info, cmdset->reset)) | ||
670 | break; | ||
671 | 623 | ||
672 | ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD); | 624 | return exec_cmd; |
673 | if (ret == 0) { | 625 | } |
674 | int timeout = 2; | ||
675 | uint32_t ndcr; | ||
676 | 626 | ||
677 | while (timeout--) { | 627 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, |
678 | if (nand_readl(info, NDSR) & NDSR_RDY) | 628 | int column, int page_addr) |
679 | break; | 629 | { |
680 | msleep(10); | 630 | struct pxa3xx_nand_info *info = mtd->priv; |
681 | } | 631 | int ret, exec_cmd; |
682 | 632 | ||
683 | ndcr = nand_readl(info, NDCR); | 633 | /* |
684 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | 634 | * if this is a x16 device ,then convert the input |
635 | * "byte" address into a "word" address appropriate | ||
636 | * for indexing a word-oriented device | ||
637 | */ | ||
638 | if (info->reg_ndcr & NDCR_DWIDTH_M) | ||
639 | column /= 2; | ||
640 | |||
641 | exec_cmd = prepare_command_pool(info, command, column, page_addr); | ||
642 | if (exec_cmd) { | ||
643 | init_completion(&info->cmd_complete); | ||
644 | pxa3xx_nand_start(info); | ||
645 | |||
646 | ret = wait_for_completion_timeout(&info->cmd_complete, | ||
647 | CHIP_DELAY_TIMEOUT); | ||
648 | if (!ret) { | ||
649 | printk(KERN_ERR "Wait time out!!!\n"); | ||
650 | /* Stop State Machine for next command cycle */ | ||
651 | pxa3xx_nand_stop(info); | ||
685 | } | 652 | } |
686 | break; | 653 | info->state = STATE_IDLE; |
687 | default: | ||
688 | printk(KERN_ERR "non-supported command.\n"); | ||
689 | break; | ||
690 | } | 654 | } |
655 | } | ||
656 | |||
657 | static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, | ||
658 | struct nand_chip *chip, const uint8_t *buf) | ||
659 | { | ||
660 | chip->write_buf(mtd, buf, mtd->writesize); | ||
661 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
662 | } | ||
691 | 663 | ||
692 | if (info->retcode == ERR_DBERR) { | 664 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, |
693 | printk(KERN_ERR "double bit error @ page %08x\n", page_addr); | 665 | struct nand_chip *chip, uint8_t *buf, int page) |
694 | info->retcode = ERR_NONE; | 666 | { |
667 | struct pxa3xx_nand_info *info = mtd->priv; | ||
668 | |||
669 | chip->read_buf(mtd, buf, mtd->writesize); | ||
670 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
671 | |||
672 | if (info->retcode == ERR_SBERR) { | ||
673 | switch (info->use_ecc) { | ||
674 | case 1: | ||
675 | mtd->ecc_stats.corrected++; | ||
676 | break; | ||
677 | case 0: | ||
678 | default: | ||
679 | break; | ||
680 | } | ||
681 | } else if (info->retcode == ERR_DBERR) { | ||
682 | /* | ||
683 | * for blank page (all 0xff), HW will calculate its ECC as | ||
684 | * 0, which is different from the ECC information within | ||
685 | * OOB, ignore such double bit errors | ||
686 | */ | ||
687 | if (is_buf_blank(buf, mtd->writesize)) | ||
688 | mtd->ecc_stats.failed++; | ||
695 | } | 689 | } |
690 | |||
691 | return 0; | ||
696 | } | 692 | } |
697 | 693 | ||
698 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) | 694 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) |
@@ -769,73 +765,12 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) | |||
769 | return 0; | 765 | return 0; |
770 | } | 766 | } |
771 | 767 | ||
772 | static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode) | ||
773 | { | ||
774 | return; | ||
775 | } | ||
776 | |||
777 | static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd, | ||
778 | const uint8_t *dat, uint8_t *ecc_code) | ||
779 | { | ||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd, | ||
784 | uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) | ||
785 | { | ||
786 | struct pxa3xx_nand_info *info = mtd->priv; | ||
787 | /* | ||
788 | * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we | ||
789 | * consider it as a ecc error which will tell the caller the | ||
790 | * read fail We have distinguish all the errors, but the | ||
791 | * nand_read_ecc only check this function return value | ||
792 | * | ||
793 | * Corrected (single-bit) errors must also be noted. | ||
794 | */ | ||
795 | if (info->retcode == ERR_SBERR) | ||
796 | return 1; | ||
797 | else if (info->retcode != ERR_NONE) | ||
798 | return -1; | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) | ||
804 | { | ||
805 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | ||
806 | uint32_t ndcr; | ||
807 | uint8_t id_buff[8]; | ||
808 | |||
809 | if (prepare_other_cmd(info, cmdset->read_id)) { | ||
810 | printk(KERN_ERR "failed to prepare command\n"); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | |||
814 | /* Send command */ | ||
815 | if (write_cmd(info)) | ||
816 | goto fail_timeout; | ||
817 | |||
818 | /* Wait for CMDDM(command done successfully) */ | ||
819 | if (wait_for_event(info, NDSR_RDDREQ)) | ||
820 | goto fail_timeout; | ||
821 | |||
822 | __raw_readsl(info->mmio_base + NDDB, id_buff, 2); | ||
823 | *id = id_buff[0] | (id_buff[1] << 8); | ||
824 | return 0; | ||
825 | |||
826 | fail_timeout: | ||
827 | ndcr = nand_readl(info, NDCR); | ||
828 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | ||
829 | udelay(10); | ||
830 | return -ETIMEDOUT; | ||
831 | } | ||
832 | |||
833 | static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | 768 | static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, |
834 | const struct pxa3xx_nand_flash *f) | 769 | const struct pxa3xx_nand_flash *f) |
835 | { | 770 | { |
836 | struct platform_device *pdev = info->pdev; | 771 | struct platform_device *pdev = info->pdev; |
837 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; | 772 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
838 | uint32_t ndcr = 0x00000FFF; /* disable all interrupts */ | 773 | uint32_t ndcr = 0x0; /* enable all interrupts */ |
839 | 774 | ||
840 | if (f->page_size != 2048 && f->page_size != 512) | 775 | if (f->page_size != 2048 && f->page_size != 512) |
841 | return -EINVAL; | 776 | return -EINVAL; |
@@ -844,9 +779,8 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
844 | return -EINVAL; | 779 | return -EINVAL; |
845 | 780 | ||
846 | /* calculate flash information */ | 781 | /* calculate flash information */ |
847 | info->cmdset = f->cmdset; | 782 | info->cmdset = &default_cmdset; |
848 | info->page_size = f->page_size; | 783 | info->page_size = f->page_size; |
849 | info->oob_buff = info->data_buff + f->page_size; | ||
850 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; | 784 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; |
851 | 785 | ||
852 | /* calculate addressing information */ | 786 | /* calculate addressing information */ |
@@ -876,87 +810,18 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
876 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) | 810 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) |
877 | { | 811 | { |
878 | uint32_t ndcr = nand_readl(info, NDCR); | 812 | uint32_t ndcr = nand_readl(info, NDCR); |
879 | struct nand_flash_dev *type = NULL; | ||
880 | uint32_t id = -1, page_per_block, num_blocks; | ||
881 | int i; | ||
882 | |||
883 | page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32; | ||
884 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; | 813 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; |
885 | /* set info fields needed to __readid */ | 814 | /* set info fields needed to read id */ |
886 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; | 815 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; |
887 | info->reg_ndcr = ndcr; | 816 | info->reg_ndcr = ndcr; |
888 | info->cmdset = &default_cmdset; | 817 | info->cmdset = &default_cmdset; |
889 | 818 | ||
890 | if (__readid(info, &id)) | ||
891 | return -ENODEV; | ||
892 | |||
893 | /* Lookup the flash id */ | ||
894 | id = (id >> 8) & 0xff; /* device id is byte 2 */ | ||
895 | for (i = 0; nand_flash_ids[i].name != NULL; i++) { | ||
896 | if (id == nand_flash_ids[i].id) { | ||
897 | type = &nand_flash_ids[i]; | ||
898 | break; | ||
899 | } | ||
900 | } | ||
901 | |||
902 | if (!type) | ||
903 | return -ENODEV; | ||
904 | |||
905 | /* fill the missing flash information */ | ||
906 | i = __ffs(page_per_block * info->page_size); | ||
907 | num_blocks = type->chipsize << (20 - i); | ||
908 | |||
909 | /* calculate addressing information */ | ||
910 | info->col_addr_cycles = (info->page_size == 2048) ? 2 : 1; | ||
911 | |||
912 | if (num_blocks * page_per_block > 65536) | ||
913 | info->row_addr_cycles = 3; | ||
914 | else | ||
915 | info->row_addr_cycles = 2; | ||
916 | |||
917 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); | 819 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); |
918 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); | 820 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); |
919 | 821 | ||
920 | return 0; | 822 | return 0; |
921 | } | 823 | } |
922 | 824 | ||
923 | static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, | ||
924 | const struct pxa3xx_nand_platform_data *pdata) | ||
925 | { | ||
926 | const struct pxa3xx_nand_flash *f; | ||
927 | uint32_t id = -1; | ||
928 | int i; | ||
929 | |||
930 | if (pdata->keep_config) | ||
931 | if (pxa3xx_nand_detect_config(info) == 0) | ||
932 | return 0; | ||
933 | |||
934 | /* we use default timing to detect id */ | ||
935 | f = DEFAULT_FLASH_TYPE; | ||
936 | pxa3xx_nand_config_flash(info, f); | ||
937 | if (__readid(info, &id)) | ||
938 | goto fail_detect; | ||
939 | |||
940 | for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) { | ||
941 | /* we first choose the flash definition from platfrom */ | ||
942 | if (i < pdata->num_flash) | ||
943 | f = pdata->flash + i; | ||
944 | else | ||
945 | f = &builtin_flash_types[i - pdata->num_flash + 1]; | ||
946 | if (f->chip_id == id) { | ||
947 | dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id); | ||
948 | pxa3xx_nand_config_flash(info, f); | ||
949 | return 0; | ||
950 | } | ||
951 | } | ||
952 | |||
953 | dev_warn(&info->pdev->dev, | ||
954 | "failed to detect configured nand flash; found %04x instead of\n", | ||
955 | id); | ||
956 | fail_detect: | ||
957 | return -ENODEV; | ||
958 | } | ||
959 | |||
960 | /* the maximum possible buffer size for large page with OOB data | 825 | /* the maximum possible buffer size for large page with OOB data |
961 | * is: 2048 + 64 = 2112 bytes, allocate a page here for both the | 826 | * is: 2048 + 64 = 2112 bytes, allocate a page here for both the |
962 | * data buffer and the DMA descriptor | 827 | * data buffer and the DMA descriptor |
@@ -998,82 +863,144 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
998 | return 0; | 863 | return 0; |
999 | } | 864 | } |
1000 | 865 | ||
1001 | static struct nand_ecclayout hw_smallpage_ecclayout = { | 866 | static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) |
1002 | .eccbytes = 6, | 867 | { |
1003 | .eccpos = {8, 9, 10, 11, 12, 13 }, | 868 | struct mtd_info *mtd = info->mtd; |
1004 | .oobfree = { {2, 6} } | 869 | struct nand_chip *chip = mtd->priv; |
1005 | }; | ||
1006 | 870 | ||
1007 | static struct nand_ecclayout hw_largepage_ecclayout = { | 871 | /* use the common timing to make a try */ |
1008 | .eccbytes = 24, | 872 | pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); |
1009 | .eccpos = { | 873 | chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); |
1010 | 40, 41, 42, 43, 44, 45, 46, 47, | 874 | if (info->is_ready) |
1011 | 48, 49, 50, 51, 52, 53, 54, 55, | 875 | return 1; |
1012 | 56, 57, 58, 59, 60, 61, 62, 63}, | 876 | else |
1013 | .oobfree = { {2, 38} } | 877 | return 0; |
1014 | }; | 878 | } |
1015 | 879 | ||
1016 | static void pxa3xx_nand_init_mtd(struct mtd_info *mtd, | 880 | static int pxa3xx_nand_scan(struct mtd_info *mtd) |
1017 | struct pxa3xx_nand_info *info) | ||
1018 | { | 881 | { |
1019 | struct nand_chip *this = &info->nand_chip; | 882 | struct pxa3xx_nand_info *info = mtd->priv; |
1020 | 883 | struct platform_device *pdev = info->pdev; | |
1021 | this->options = (info->reg_ndcr & NDCR_DWIDTH_C) ? NAND_BUSWIDTH_16: 0; | 884 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
1022 | 885 | struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; | |
1023 | this->waitfunc = pxa3xx_nand_waitfunc; | 886 | const struct pxa3xx_nand_flash *f = NULL; |
1024 | this->select_chip = pxa3xx_nand_select_chip; | 887 | struct nand_chip *chip = mtd->priv; |
1025 | this->dev_ready = pxa3xx_nand_dev_ready; | 888 | uint32_t id = -1; |
1026 | this->cmdfunc = pxa3xx_nand_cmdfunc; | 889 | uint64_t chipsize; |
1027 | this->read_word = pxa3xx_nand_read_word; | 890 | int i, ret, num; |
1028 | this->read_byte = pxa3xx_nand_read_byte; | 891 | |
1029 | this->read_buf = pxa3xx_nand_read_buf; | 892 | if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) |
1030 | this->write_buf = pxa3xx_nand_write_buf; | 893 | goto KEEP_CONFIG; |
1031 | this->verify_buf = pxa3xx_nand_verify_buf; | 894 | |
1032 | 895 | ret = pxa3xx_nand_sensing(info); | |
1033 | this->ecc.mode = NAND_ECC_HW; | 896 | if (!ret) { |
1034 | this->ecc.hwctl = pxa3xx_nand_ecc_hwctl; | 897 | kfree(mtd); |
1035 | this->ecc.calculate = pxa3xx_nand_ecc_calculate; | 898 | info->mtd = NULL; |
1036 | this->ecc.correct = pxa3xx_nand_ecc_correct; | 899 | printk(KERN_INFO "There is no nand chip on cs 0!\n"); |
1037 | this->ecc.size = info->page_size; | 900 | |
1038 | 901 | return -EINVAL; | |
1039 | if (info->page_size == 2048) | 902 | } |
1040 | this->ecc.layout = &hw_largepage_ecclayout; | 903 | |
904 | chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); | ||
905 | id = *((uint16_t *)(info->data_buff)); | ||
906 | if (id != 0) | ||
907 | printk(KERN_INFO "Detect a flash id %x\n", id); | ||
908 | else { | ||
909 | kfree(mtd); | ||
910 | info->mtd = NULL; | ||
911 | printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n"); | ||
912 | |||
913 | return -EINVAL; | ||
914 | } | ||
915 | |||
916 | num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; | ||
917 | for (i = 0; i < num; i++) { | ||
918 | if (i < pdata->num_flash) | ||
919 | f = pdata->flash + i; | ||
920 | else | ||
921 | f = &builtin_flash_types[i - pdata->num_flash + 1]; | ||
922 | |||
923 | /* find the chip in default list */ | ||
924 | if (f->chip_id == id) | ||
925 | break; | ||
926 | } | ||
927 | |||
928 | if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { | ||
929 | kfree(mtd); | ||
930 | info->mtd = NULL; | ||
931 | printk(KERN_ERR "ERROR!! flash not defined!!!\n"); | ||
932 | |||
933 | return -EINVAL; | ||
934 | } | ||
935 | |||
936 | pxa3xx_nand_config_flash(info, f); | ||
937 | pxa3xx_flash_ids[0].name = f->name; | ||
938 | pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; | ||
939 | pxa3xx_flash_ids[0].pagesize = f->page_size; | ||
940 | chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; | ||
941 | pxa3xx_flash_ids[0].chipsize = chipsize >> 20; | ||
942 | pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; | ||
943 | if (f->flash_width == 16) | ||
944 | pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; | ||
945 | KEEP_CONFIG: | ||
946 | if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) | ||
947 | return -ENODEV; | ||
948 | /* calculate addressing information */ | ||
949 | info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; | ||
950 | info->oob_buff = info->data_buff + mtd->writesize; | ||
951 | if ((mtd->size >> chip->page_shift) > 65536) | ||
952 | info->row_addr_cycles = 3; | ||
1041 | else | 953 | else |
1042 | this->ecc.layout = &hw_smallpage_ecclayout; | 954 | info->row_addr_cycles = 2; |
955 | mtd->name = mtd_names[0]; | ||
956 | chip->ecc.mode = NAND_ECC_HW; | ||
957 | chip->ecc.size = f->page_size; | ||
958 | |||
959 | chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0; | ||
960 | chip->options |= NAND_NO_AUTOINCR; | ||
961 | chip->options |= NAND_NO_READRDY; | ||
1043 | 962 | ||
1044 | this->chip_delay = 25; | 963 | return nand_scan_tail(mtd); |
1045 | } | 964 | } |
1046 | 965 | ||
1047 | static int pxa3xx_nand_probe(struct platform_device *pdev) | 966 | static |
967 | struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev) | ||
1048 | { | 968 | { |
1049 | struct pxa3xx_nand_platform_data *pdata; | ||
1050 | struct pxa3xx_nand_info *info; | 969 | struct pxa3xx_nand_info *info; |
1051 | struct nand_chip *this; | 970 | struct nand_chip *chip; |
1052 | struct mtd_info *mtd; | 971 | struct mtd_info *mtd; |
1053 | struct resource *r; | 972 | struct resource *r; |
1054 | int ret = 0, irq; | 973 | int ret, irq; |
1055 | |||
1056 | pdata = pdev->dev.platform_data; | ||
1057 | |||
1058 | if (!pdata) { | ||
1059 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
1060 | return -ENODEV; | ||
1061 | } | ||
1062 | 974 | ||
1063 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), | 975 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), |
1064 | GFP_KERNEL); | 976 | GFP_KERNEL); |
1065 | if (!mtd) { | 977 | if (!mtd) { |
1066 | dev_err(&pdev->dev, "failed to allocate memory\n"); | 978 | dev_err(&pdev->dev, "failed to allocate memory\n"); |
1067 | return -ENOMEM; | 979 | return NULL; |
1068 | } | 980 | } |
1069 | 981 | ||
1070 | info = (struct pxa3xx_nand_info *)(&mtd[1]); | 982 | info = (struct pxa3xx_nand_info *)(&mtd[1]); |
983 | chip = (struct nand_chip *)(&mtd[1]); | ||
1071 | info->pdev = pdev; | 984 | info->pdev = pdev; |
1072 | 985 | info->mtd = mtd; | |
1073 | this = &info->nand_chip; | ||
1074 | mtd->priv = info; | 986 | mtd->priv = info; |
1075 | mtd->owner = THIS_MODULE; | 987 | mtd->owner = THIS_MODULE; |
1076 | 988 | ||
989 | chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; | ||
990 | chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; | ||
991 | chip->controller = &info->controller; | ||
992 | chip->waitfunc = pxa3xx_nand_waitfunc; | ||
993 | chip->select_chip = pxa3xx_nand_select_chip; | ||
994 | chip->dev_ready = pxa3xx_nand_dev_ready; | ||
995 | chip->cmdfunc = pxa3xx_nand_cmdfunc; | ||
996 | chip->read_word = pxa3xx_nand_read_word; | ||
997 | chip->read_byte = pxa3xx_nand_read_byte; | ||
998 | chip->read_buf = pxa3xx_nand_read_buf; | ||
999 | chip->write_buf = pxa3xx_nand_write_buf; | ||
1000 | chip->verify_buf = pxa3xx_nand_verify_buf; | ||
1001 | |||
1002 | spin_lock_init(&chip->controller->lock); | ||
1003 | init_waitqueue_head(&chip->controller->wq); | ||
1077 | info->clk = clk_get(&pdev->dev, NULL); | 1004 | info->clk = clk_get(&pdev->dev, NULL); |
1078 | if (IS_ERR(info->clk)) { | 1005 | if (IS_ERR(info->clk)) { |
1079 | dev_err(&pdev->dev, "failed to get nand clock\n"); | 1006 | dev_err(&pdev->dev, "failed to get nand clock\n"); |
@@ -1141,43 +1068,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1141 | goto fail_free_buf; | 1068 | goto fail_free_buf; |
1142 | } | 1069 | } |
1143 | 1070 | ||
1144 | ret = pxa3xx_nand_detect_flash(info, pdata); | 1071 | platform_set_drvdata(pdev, info); |
1145 | if (ret) { | ||
1146 | dev_err(&pdev->dev, "failed to detect flash\n"); | ||
1147 | ret = -ENODEV; | ||
1148 | goto fail_free_irq; | ||
1149 | } | ||
1150 | |||
1151 | pxa3xx_nand_init_mtd(mtd, info); | ||
1152 | |||
1153 | platform_set_drvdata(pdev, mtd); | ||
1154 | |||
1155 | if (nand_scan(mtd, 1)) { | ||
1156 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1157 | ret = -ENXIO; | ||
1158 | goto fail_free_irq; | ||
1159 | } | ||
1160 | |||
1161 | #ifdef CONFIG_MTD_PARTITIONS | ||
1162 | if (mtd_has_cmdlinepart()) { | ||
1163 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
1164 | struct mtd_partition *parts; | ||
1165 | int nr_parts; | ||
1166 | |||
1167 | nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0); | ||
1168 | |||
1169 | if (nr_parts) | ||
1170 | return add_mtd_partitions(mtd, parts, nr_parts); | ||
1171 | } | ||
1172 | 1072 | ||
1173 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); | 1073 | return info; |
1174 | #else | ||
1175 | return 0; | ||
1176 | #endif | ||
1177 | 1074 | ||
1178 | fail_free_irq: | ||
1179 | free_irq(irq, info); | ||
1180 | fail_free_buf: | 1075 | fail_free_buf: |
1076 | free_irq(irq, info); | ||
1181 | if (use_dma) { | 1077 | if (use_dma) { |
1182 | pxa_free_dma(info->data_dma_ch); | 1078 | pxa_free_dma(info->data_dma_ch); |
1183 | dma_free_coherent(&pdev->dev, info->data_buff_size, | 1079 | dma_free_coherent(&pdev->dev, info->data_buff_size, |
@@ -1193,22 +1089,18 @@ fail_put_clk: | |||
1193 | clk_put(info->clk); | 1089 | clk_put(info->clk); |
1194 | fail_free_mtd: | 1090 | fail_free_mtd: |
1195 | kfree(mtd); | 1091 | kfree(mtd); |
1196 | return ret; | 1092 | return NULL; |
1197 | } | 1093 | } |
1198 | 1094 | ||
1199 | static int pxa3xx_nand_remove(struct platform_device *pdev) | 1095 | static int pxa3xx_nand_remove(struct platform_device *pdev) |
1200 | { | 1096 | { |
1201 | struct mtd_info *mtd = platform_get_drvdata(pdev); | 1097 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1202 | struct pxa3xx_nand_info *info = mtd->priv; | 1098 | struct mtd_info *mtd = info->mtd; |
1203 | struct resource *r; | 1099 | struct resource *r; |
1204 | int irq; | 1100 | int irq; |
1205 | 1101 | ||
1206 | platform_set_drvdata(pdev, NULL); | 1102 | platform_set_drvdata(pdev, NULL); |
1207 | 1103 | ||
1208 | del_mtd_device(mtd); | ||
1209 | #ifdef CONFIG_MTD_PARTITIONS | ||
1210 | del_mtd_partitions(mtd); | ||
1211 | #endif | ||
1212 | irq = platform_get_irq(pdev, 0); | 1104 | irq = platform_get_irq(pdev, 0); |
1213 | if (irq >= 0) | 1105 | if (irq >= 0) |
1214 | free_irq(irq, info); | 1106 | free_irq(irq, info); |
@@ -1226,17 +1118,62 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1226 | clk_disable(info->clk); | 1118 | clk_disable(info->clk); |
1227 | clk_put(info->clk); | 1119 | clk_put(info->clk); |
1228 | 1120 | ||
1229 | kfree(mtd); | 1121 | if (mtd) { |
1122 | del_mtd_device(mtd); | ||
1123 | #ifdef CONFIG_MTD_PARTITIONS | ||
1124 | del_mtd_partitions(mtd); | ||
1125 | #endif | ||
1126 | kfree(mtd); | ||
1127 | } | ||
1230 | return 0; | 1128 | return 0; |
1231 | } | 1129 | } |
1232 | 1130 | ||
1131 | static int pxa3xx_nand_probe(struct platform_device *pdev) | ||
1132 | { | ||
1133 | struct pxa3xx_nand_platform_data *pdata; | ||
1134 | struct pxa3xx_nand_info *info; | ||
1135 | |||
1136 | pdata = pdev->dev.platform_data; | ||
1137 | if (!pdata) { | ||
1138 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
1139 | return -ENODEV; | ||
1140 | } | ||
1141 | |||
1142 | info = alloc_nand_resource(pdev); | ||
1143 | if (info == NULL) | ||
1144 | return -ENOMEM; | ||
1145 | |||
1146 | if (pxa3xx_nand_scan(info->mtd)) { | ||
1147 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1148 | pxa3xx_nand_remove(pdev); | ||
1149 | return -ENODEV; | ||
1150 | } | ||
1151 | |||
1152 | #ifdef CONFIG_MTD_PARTITIONS | ||
1153 | if (mtd_has_cmdlinepart()) { | ||
1154 | const char *probes[] = { "cmdlinepart", NULL }; | ||
1155 | struct mtd_partition *parts; | ||
1156 | int nr_parts; | ||
1157 | |||
1158 | nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); | ||
1159 | |||
1160 | if (nr_parts) | ||
1161 | return add_mtd_partitions(info->mtd, parts, nr_parts); | ||
1162 | } | ||
1163 | |||
1164 | return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); | ||
1165 | #else | ||
1166 | return 0; | ||
1167 | #endif | ||
1168 | } | ||
1169 | |||
1233 | #ifdef CONFIG_PM | 1170 | #ifdef CONFIG_PM |
1234 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | 1171 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) |
1235 | { | 1172 | { |
1236 | struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); | 1173 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1237 | struct pxa3xx_nand_info *info = mtd->priv; | 1174 | struct mtd_info *mtd = info->mtd; |
1238 | 1175 | ||
1239 | if (info->state != STATE_READY) { | 1176 | if (info->state) { |
1240 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); | 1177 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); |
1241 | return -EAGAIN; | 1178 | return -EAGAIN; |
1242 | } | 1179 | } |
@@ -1246,8 +1183,8 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | |||
1246 | 1183 | ||
1247 | static int pxa3xx_nand_resume(struct platform_device *pdev) | 1184 | static int pxa3xx_nand_resume(struct platform_device *pdev) |
1248 | { | 1185 | { |
1249 | struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); | 1186 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1250 | struct pxa3xx_nand_info *info = mtd->priv; | 1187 | struct mtd_info *mtd = info->mtd; |
1251 | 1188 | ||
1252 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); | 1189 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); |
1253 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); | 1190 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 14a49abe057e..f591f615d3f6 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -629,6 +629,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
629 | { | 629 | { |
630 | struct omap_onenand_platform_data *pdata; | 630 | struct omap_onenand_platform_data *pdata; |
631 | struct omap2_onenand *c; | 631 | struct omap2_onenand *c; |
632 | struct onenand_chip *this; | ||
632 | int r; | 633 | int r; |
633 | 634 | ||
634 | pdata = pdev->dev.platform_data; | 635 | pdata = pdev->dev.platform_data; |
@@ -726,9 +727,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
726 | 727 | ||
727 | c->mtd.dev.parent = &pdev->dev; | 728 | c->mtd.dev.parent = &pdev->dev; |
728 | 729 | ||
730 | this = &c->onenand; | ||
729 | if (c->dma_channel >= 0) { | 731 | if (c->dma_channel >= 0) { |
730 | struct onenand_chip *this = &c->onenand; | ||
731 | |||
732 | this->wait = omap2_onenand_wait; | 732 | this->wait = omap2_onenand_wait; |
733 | if (cpu_is_omap34xx()) { | 733 | if (cpu_is_omap34xx()) { |
734 | this->read_bufferram = omap3_onenand_read_bufferram; | 734 | this->read_bufferram = omap3_onenand_read_bufferram; |
@@ -749,6 +749,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
749 | c->onenand.disable = omap2_onenand_disable; | 749 | c->onenand.disable = omap2_onenand_disable; |
750 | } | 750 | } |
751 | 751 | ||
752 | if (pdata->skip_initial_unlocking) | ||
753 | this->options |= ONENAND_SKIP_INITIAL_UNLOCKING; | ||
754 | |||
752 | if ((r = onenand_scan(&c->mtd, 1)) < 0) | 755 | if ((r = onenand_scan(&c->mtd, 1)) < 0) |
753 | goto err_release_regulator; | 756 | goto err_release_regulator; |
754 | 757 | ||
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index bac41caa8df7..56a8b2005bda 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -1132,6 +1132,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1132 | onenand_update_bufferram(mtd, from, !ret); | 1132 | onenand_update_bufferram(mtd, from, !ret); |
1133 | if (ret == -EBADMSG) | 1133 | if (ret == -EBADMSG) |
1134 | ret = 0; | 1134 | ret = 0; |
1135 | if (ret) | ||
1136 | break; | ||
1135 | } | 1137 | } |
1136 | 1138 | ||
1137 | this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); | 1139 | this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); |
@@ -1646,11 +1648,10 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1646 | int ret = 0; | 1648 | int ret = 0; |
1647 | int thislen, column; | 1649 | int thislen, column; |
1648 | 1650 | ||
1651 | column = addr & (this->writesize - 1); | ||
1652 | |||
1649 | while (len != 0) { | 1653 | while (len != 0) { |
1650 | thislen = min_t(int, this->writesize, len); | 1654 | thislen = min_t(int, this->writesize - column, len); |
1651 | column = addr & (this->writesize - 1); | ||
1652 | if (column + thislen > this->writesize) | ||
1653 | thislen = this->writesize - column; | ||
1654 | 1655 | ||
1655 | this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); | 1656 | this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); |
1656 | 1657 | ||
@@ -1664,12 +1665,13 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1664 | 1665 | ||
1665 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); | 1666 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); |
1666 | 1667 | ||
1667 | if (memcmp(buf, this->verify_buf, thislen)) | 1668 | if (memcmp(buf, this->verify_buf + column, thislen)) |
1668 | return -EBADMSG; | 1669 | return -EBADMSG; |
1669 | 1670 | ||
1670 | len -= thislen; | 1671 | len -= thislen; |
1671 | buf += thislen; | 1672 | buf += thislen; |
1672 | addr += thislen; | 1673 | addr += thislen; |
1674 | column = 0; | ||
1673 | } | 1675 | } |
1674 | 1676 | ||
1675 | return 0; | 1677 | return 0; |
@@ -4083,7 +4085,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
4083 | mtd->writebufsize = mtd->writesize; | 4085 | mtd->writebufsize = mtd->writesize; |
4084 | 4086 | ||
4085 | /* Unlock whole block */ | 4087 | /* Unlock whole block */ |
4086 | this->unlock_all(mtd); | 4088 | if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING)) |
4089 | this->unlock_all(mtd); | ||
4087 | 4090 | ||
4088 | ret = this->scan_bbt(mtd); | 4091 | ret = this->scan_bbt(mtd); |
4089 | if ((!FLEXONENAND(this)) || ret) | 4092 | if ((!FLEXONENAND(this)) || ret) |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index ac0d6a8613b5..2b0daae4018d 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -64,12 +64,16 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |||
64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); | 64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); |
65 | 65 | ||
66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); | 66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); |
67 | if (!vendor) | ||
68 | goto error1; | ||
67 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); | 69 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); |
68 | vendor[vendor_len] = 0; | 70 | vendor[vendor_len] = 0; |
69 | 71 | ||
70 | /* Initialize sysfs attributes */ | 72 | /* Initialize sysfs attributes */ |
71 | vendor_attribute = | 73 | vendor_attribute = |
72 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); | 74 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); |
75 | if (!vendor_attribute) | ||
76 | goto error2; | ||
73 | 77 | ||
74 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); | 78 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); |
75 | 79 | ||
@@ -83,12 +87,24 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |||
83 | /* Create array of pointers to the attributes */ | 87 | /* Create array of pointers to the attributes */ |
84 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), | 88 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), |
85 | GFP_KERNEL); | 89 | GFP_KERNEL); |
90 | if (!attributes) | ||
91 | goto error3; | ||
86 | attributes[0] = &vendor_attribute->dev_attr.attr; | 92 | attributes[0] = &vendor_attribute->dev_attr.attr; |
87 | 93 | ||
88 | /* Finally create the attribute group */ | 94 | /* Finally create the attribute group */ |
89 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); | 95 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); |
96 | if (!attr_group) | ||
97 | goto error4; | ||
90 | attr_group->attrs = attributes; | 98 | attr_group->attrs = attributes; |
91 | return attr_group; | 99 | return attr_group; |
100 | error4: | ||
101 | kfree(attributes); | ||
102 | error3: | ||
103 | kfree(vendor_attribute); | ||
104 | error2: | ||
105 | kfree(vendor); | ||
106 | error1: | ||
107 | return NULL; | ||
92 | } | 108 | } |
93 | 109 | ||
94 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) | 110 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) |
@@ -1178,6 +1194,8 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
1178 | } | 1194 | } |
1179 | 1195 | ||
1180 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); | 1196 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); |
1197 | if (!ftl->disk_attributes) | ||
1198 | goto error6; | ||
1181 | trans->disk_attributes = ftl->disk_attributes; | 1199 | trans->disk_attributes = ftl->disk_attributes; |
1182 | 1200 | ||
1183 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", | 1201 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 161feeb7b8b9..627d4e2466a3 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * | 16 | * |
17 | * Test read and write speed of a MTD device. | 17 | * Test read and write speed of a MTD device. |
18 | * | 18 | * |
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | 19 | * Author: Adrian Hunter <adrian.hunter@nokia.com> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
@@ -33,6 +33,11 @@ static int dev; | |||
33 | module_param(dev, int, S_IRUGO); | 33 | module_param(dev, int, S_IRUGO); |
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 34 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
35 | 35 | ||
36 | static int count; | ||
37 | module_param(count, int, S_IRUGO); | ||
38 | MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use " | ||
39 | "(0 means use all)"); | ||
40 | |||
36 | static struct mtd_info *mtd; | 41 | static struct mtd_info *mtd; |
37 | static unsigned char *iobuf; | 42 | static unsigned char *iobuf; |
38 | static unsigned char *bbt; | 43 | static unsigned char *bbt; |
@@ -89,6 +94,33 @@ static int erase_eraseblock(int ebnum) | |||
89 | return 0; | 94 | return 0; |
90 | } | 95 | } |
91 | 96 | ||
97 | static int multiblock_erase(int ebnum, int blocks) | ||
98 | { | ||
99 | int err; | ||
100 | struct erase_info ei; | ||
101 | loff_t addr = ebnum * mtd->erasesize; | ||
102 | |||
103 | memset(&ei, 0, sizeof(struct erase_info)); | ||
104 | ei.mtd = mtd; | ||
105 | ei.addr = addr; | ||
106 | ei.len = mtd->erasesize * blocks; | ||
107 | |||
108 | err = mtd->erase(mtd, &ei); | ||
109 | if (err) { | ||
110 | printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n", | ||
111 | err, ebnum, blocks); | ||
112 | return err; | ||
113 | } | ||
114 | |||
115 | if (ei.state == MTD_ERASE_FAILED) { | ||
116 | printk(PRINT_PREF "some erase error occurred at EB %d," | ||
117 | "blocks %d\n", ebnum, blocks); | ||
118 | return -EIO; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
92 | static int erase_whole_device(void) | 124 | static int erase_whole_device(void) |
93 | { | 125 | { |
94 | int err; | 126 | int err; |
@@ -282,13 +314,16 @@ static inline void stop_timing(void) | |||
282 | 314 | ||
283 | static long calc_speed(void) | 315 | static long calc_speed(void) |
284 | { | 316 | { |
285 | long ms, k, speed; | 317 | uint64_t k; |
318 | long ms; | ||
286 | 319 | ||
287 | ms = (finish.tv_sec - start.tv_sec) * 1000 + | 320 | ms = (finish.tv_sec - start.tv_sec) * 1000 + |
288 | (finish.tv_usec - start.tv_usec) / 1000; | 321 | (finish.tv_usec - start.tv_usec) / 1000; |
289 | k = goodebcnt * mtd->erasesize / 1024; | 322 | if (ms == 0) |
290 | speed = (k * 1000) / ms; | 323 | return 0; |
291 | return speed; | 324 | k = goodebcnt * (mtd->erasesize / 1024) * 1000; |
325 | do_div(k, ms); | ||
326 | return k; | ||
292 | } | 327 | } |
293 | 328 | ||
294 | static int scan_for_bad_eraseblocks(void) | 329 | static int scan_for_bad_eraseblocks(void) |
@@ -320,13 +355,16 @@ out: | |||
320 | 355 | ||
321 | static int __init mtd_speedtest_init(void) | 356 | static int __init mtd_speedtest_init(void) |
322 | { | 357 | { |
323 | int err, i; | 358 | int err, i, blocks, j, k; |
324 | long speed; | 359 | long speed; |
325 | uint64_t tmp; | 360 | uint64_t tmp; |
326 | 361 | ||
327 | printk(KERN_INFO "\n"); | 362 | printk(KERN_INFO "\n"); |
328 | printk(KERN_INFO "=================================================\n"); | 363 | printk(KERN_INFO "=================================================\n"); |
329 | printk(PRINT_PREF "MTD device: %d\n", dev); | 364 | if (count) |
365 | printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); | ||
366 | else | ||
367 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
330 | 368 | ||
331 | mtd = get_mtd_device(NULL, dev); | 369 | mtd = get_mtd_device(NULL, dev); |
332 | if (IS_ERR(mtd)) { | 370 | if (IS_ERR(mtd)) { |
@@ -353,6 +391,9 @@ static int __init mtd_speedtest_init(void) | |||
353 | (unsigned long long)mtd->size, mtd->erasesize, | 391 | (unsigned long long)mtd->size, mtd->erasesize, |
354 | pgsize, ebcnt, pgcnt, mtd->oobsize); | 392 | pgsize, ebcnt, pgcnt, mtd->oobsize); |
355 | 393 | ||
394 | if (count > 0 && count < ebcnt) | ||
395 | ebcnt = count; | ||
396 | |||
356 | err = -ENOMEM; | 397 | err = -ENOMEM; |
357 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); | 398 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); |
358 | if (!iobuf) { | 399 | if (!iobuf) { |
@@ -484,6 +525,31 @@ static int __init mtd_speedtest_init(void) | |||
484 | speed = calc_speed(); | 525 | speed = calc_speed(); |
485 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); | 526 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); |
486 | 527 | ||
528 | /* Multi-block erase all eraseblocks */ | ||
529 | for (k = 1; k < 7; k++) { | ||
530 | blocks = 1 << k; | ||
531 | printk(PRINT_PREF "Testing %dx multi-block erase speed\n", | ||
532 | blocks); | ||
533 | start_timing(); | ||
534 | for (i = 0; i < ebcnt; ) { | ||
535 | for (j = 0; j < blocks && (i + j) < ebcnt; j++) | ||
536 | if (bbt[i + j]) | ||
537 | break; | ||
538 | if (j < 1) { | ||
539 | i++; | ||
540 | continue; | ||
541 | } | ||
542 | err = multiblock_erase(i, j); | ||
543 | if (err) | ||
544 | goto out; | ||
545 | cond_resched(); | ||
546 | i += j; | ||
547 | } | ||
548 | stop_timing(); | ||
549 | speed = calc_speed(); | ||
550 | printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n", | ||
551 | blocks, speed); | ||
552 | } | ||
487 | printk(PRINT_PREF "finished\n"); | 553 | printk(PRINT_PREF "finished\n"); |
488 | out: | 554 | out: |
489 | kfree(iobuf); | 555 | kfree(iobuf); |
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c index 11204e8aab5f..334eae53a3db 100644 --- a/drivers/mtd/tests/mtd_subpagetest.c +++ b/drivers/mtd/tests/mtd_subpagetest.c | |||
@@ -394,6 +394,11 @@ static int __init mtd_subpagetest_init(void) | |||
394 | } | 394 | } |
395 | 395 | ||
396 | subpgsize = mtd->writesize >> mtd->subpage_sft; | 396 | subpgsize = mtd->writesize >> mtd->subpage_sft; |
397 | tmp = mtd->size; | ||
398 | do_div(tmp, mtd->erasesize); | ||
399 | ebcnt = tmp; | ||
400 | pgcnt = mtd->erasesize / mtd->writesize; | ||
401 | |||
397 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 402 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
398 | "page size %u, subpage size %u, count of eraseblocks %u, " | 403 | "page size %u, subpage size %u, count of eraseblocks %u, " |
399 | "pages per eraseblock %u, OOB size %u\n", | 404 | "pages per eraseblock %u, OOB size %u\n", |
@@ -413,11 +418,6 @@ static int __init mtd_subpagetest_init(void) | |||
413 | goto out; | 418 | goto out; |
414 | } | 419 | } |
415 | 420 | ||
416 | tmp = mtd->size; | ||
417 | do_div(tmp, mtd->erasesize); | ||
418 | ebcnt = tmp; | ||
419 | pgcnt = mtd->erasesize / mtd->writesize; | ||
420 | |||
421 | err = scan_for_bad_eraseblocks(); | 421 | err = scan_for_bad_eraseblocks(); |
422 | if (err) | 422 | if (err) |
423 | goto out; | 423 | goto out; |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 22abfb39d813..68d45ba2d9b9 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -1237,8 +1237,17 @@ static int bfin_mac_enable(struct phy_device *phydev) | |||
1237 | 1237 | ||
1238 | if (phydev->interface == PHY_INTERFACE_MODE_RMII) { | 1238 | if (phydev->interface == PHY_INTERFACE_MODE_RMII) { |
1239 | opmode |= RMII; /* For Now only 100MBit are supported */ | 1239 | opmode |= RMII; /* For Now only 100MBit are supported */ |
1240 | #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 | 1240 | #if defined(CONFIG_BF537) || defined(CONFIG_BF536) |
1241 | opmode |= TE; | 1241 | if (__SILICON_REVISION__ < 3) { |
1242 | /* | ||
1243 | * This isn't publicly documented (fun times!), but in | ||
1244 | * silicon <=0.2, the RX and TX pins are clocked together. | ||
1245 | * So in order to recv, we must enable the transmit side | ||
1246 | * as well. This will cause a spurious TX interrupt too, | ||
1247 | * but we can easily consume that. | ||
1248 | */ | ||
1249 | opmode |= TE; | ||
1250 | } | ||
1242 | #endif | 1251 | #endif |
1243 | } | 1252 | } |
1244 | 1253 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d1865cc97313..8e6d618b5305 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -8317,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = { | |||
8317 | #endif | 8317 | #endif |
8318 | }; | 8318 | }; |
8319 | 8319 | ||
8320 | static void inline vlan_features_add(struct net_device *dev, u32 flags) | 8320 | static inline void vlan_features_add(struct net_device *dev, u32 flags) |
8321 | { | 8321 | { |
8322 | dev->vlan_features |= flags; | 8322 | dev->vlan_features |= flags; |
8323 | } | 8323 | } |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 110eda01843c..31552959aed7 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -588,14 +588,9 @@ static void c_can_chip_config(struct net_device *dev) | |||
588 | { | 588 | { |
589 | struct c_can_priv *priv = netdev_priv(dev); | 589 | struct c_can_priv *priv = netdev_priv(dev); |
590 | 590 | ||
591 | if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) | 591 | /* enable automatic retransmission */ |
592 | /* disable automatic retransmission */ | 592 | priv->write_reg(priv, &priv->regs->control, |
593 | priv->write_reg(priv, &priv->regs->control, | 593 | CONTROL_ENABLE_AR); |
594 | CONTROL_DISABLE_AR); | ||
595 | else | ||
596 | /* enable automatic retransmission */ | ||
597 | priv->write_reg(priv, &priv->regs->control, | ||
598 | CONTROL_ENABLE_AR); | ||
599 | 594 | ||
600 | if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & | 595 | if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & |
601 | CAN_CTRLMODE_LOOPBACK)) { | 596 | CAN_CTRLMODE_LOOPBACK)) { |
@@ -704,7 +699,6 @@ static void c_can_do_tx(struct net_device *dev) | |||
704 | 699 | ||
705 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | 700 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { |
706 | msg_obj_no = get_tx_echo_msg_obj(priv); | 701 | msg_obj_no = get_tx_echo_msg_obj(priv); |
707 | c_can_inval_msg_object(dev, 0, msg_obj_no); | ||
708 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); | 702 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); |
709 | if (!(val & (1 << msg_obj_no))) { | 703 | if (!(val & (1 << msg_obj_no))) { |
710 | can_get_echo_skb(dev, | 704 | can_get_echo_skb(dev, |
@@ -713,6 +707,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
713 | &priv->regs->ifregs[0].msg_cntrl) | 707 | &priv->regs->ifregs[0].msg_cntrl) |
714 | & IF_MCONT_DLC_MASK; | 708 | & IF_MCONT_DLC_MASK; |
715 | stats->tx_packets++; | 709 | stats->tx_packets++; |
710 | c_can_inval_msg_object(dev, 0, msg_obj_no); | ||
716 | } | 711 | } |
717 | } | 712 | } |
718 | 713 | ||
@@ -1112,8 +1107,7 @@ struct net_device *alloc_c_can_dev(void) | |||
1112 | priv->can.bittiming_const = &c_can_bittiming_const; | 1107 | priv->can.bittiming_const = &c_can_bittiming_const; |
1113 | priv->can.do_set_mode = c_can_set_mode; | 1108 | priv->can.do_set_mode = c_can_set_mode; |
1114 | priv->can.do_get_berr_counter = c_can_get_berr_counter; | 1109 | priv->can.do_get_berr_counter = c_can_get_berr_counter; |
1115 | priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT | | 1110 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | |
1116 | CAN_CTRLMODE_LOOPBACK | | ||
1117 | CAN_CTRLMODE_LISTENONLY | | 1111 | CAN_CTRLMODE_LISTENONLY | |
1118 | CAN_CTRLMODE_BERR_REPORTING; | 1112 | CAN_CTRLMODE_BERR_REPORTING; |
1119 | 1113 | ||
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e629b961ae2d..cc90824f2c9c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
@@ -73,7 +73,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
73 | void __iomem *addr; | 73 | void __iomem *addr; |
74 | struct net_device *dev; | 74 | struct net_device *dev; |
75 | struct c_can_priv *priv; | 75 | struct c_can_priv *priv; |
76 | struct resource *mem, *irq; | 76 | struct resource *mem; |
77 | int irq; | ||
77 | #ifdef CONFIG_HAVE_CLK | 78 | #ifdef CONFIG_HAVE_CLK |
78 | struct clk *clk; | 79 | struct clk *clk; |
79 | 80 | ||
@@ -88,8 +89,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
88 | 89 | ||
89 | /* get the platform data */ | 90 | /* get the platform data */ |
90 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 91 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
91 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 92 | irq = platform_get_irq(pdev, 0); |
92 | if (!mem || (irq <= 0)) { | 93 | if (!mem || irq <= 0) { |
93 | ret = -ENODEV; | 94 | ret = -ENODEV; |
94 | goto exit_free_clk; | 95 | goto exit_free_clk; |
95 | } | 96 | } |
@@ -117,7 +118,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
117 | 118 | ||
118 | priv = netdev_priv(dev); | 119 | priv = netdev_priv(dev); |
119 | 120 | ||
120 | dev->irq = irq->start; | 121 | dev->irq = irq; |
121 | priv->regs = addr; | 122 | priv->regs = addr; |
122 | #ifdef CONFIG_HAVE_CLK | 123 | #ifdef CONFIG_HAVE_CLK |
123 | priv->can.clock.freq = clk_get_rate(clk); | 124 | priv->can.clock.freq = clk_get_rate(clk); |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 4d538a4e9d55..910893143295 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -1983,14 +1983,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
1983 | { | 1983 | { |
1984 | struct port_info *pi = netdev_priv(dev); | 1984 | struct port_info *pi = netdev_priv(dev); |
1985 | struct adapter *adapter = pi->adapter; | 1985 | struct adapter *adapter = pi->adapter; |
1986 | struct qset_params *qsp = &adapter->params.sge.qset[0]; | 1986 | struct qset_params *qsp; |
1987 | struct sge_qset *qs = &adapter->sge.qs[0]; | 1987 | struct sge_qset *qs; |
1988 | int i; | ||
1988 | 1989 | ||
1989 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) | 1990 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) |
1990 | return -EINVAL; | 1991 | return -EINVAL; |
1991 | 1992 | ||
1992 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | 1993 | for (i = 0; i < pi->nqsets; i++) { |
1993 | t3_update_qset_coalesce(qs, qsp); | 1994 | qsp = &adapter->params.sge.qset[i]; |
1995 | qs = &adapter->sge.qs[i]; | ||
1996 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | ||
1997 | t3_update_qset_coalesce(qs, qsp); | ||
1998 | } | ||
1999 | |||
1994 | return 0; | 2000 | return 0; |
1995 | } | 2001 | } |
1996 | 2002 | ||
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 317708113601..b7af5bab9937 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -621,9 +621,9 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) | |||
621 | /* change in wol state, update IRQ state */ | 621 | /* change in wol state, update IRQ state */ |
622 | 622 | ||
623 | if (!dm->wake_state) | 623 | if (!dm->wake_state) |
624 | set_irq_wake(dm->irq_wake, 1); | 624 | irq_set_irq_wake(dm->irq_wake, 1); |
625 | else if (dm->wake_state & !opts) | 625 | else if (dm->wake_state & !opts) |
626 | set_irq_wake(dm->irq_wake, 0); | 626 | irq_set_irq_wake(dm->irq_wake, 0); |
627 | } | 627 | } |
628 | 628 | ||
629 | dm->wake_state = opts; | 629 | dm->wake_state = opts; |
@@ -1424,13 +1424,13 @@ dm9000_probe(struct platform_device *pdev) | |||
1424 | } else { | 1424 | } else { |
1425 | 1425 | ||
1426 | /* test to see if irq is really wakeup capable */ | 1426 | /* test to see if irq is really wakeup capable */ |
1427 | ret = set_irq_wake(db->irq_wake, 1); | 1427 | ret = irq_set_irq_wake(db->irq_wake, 1); |
1428 | if (ret) { | 1428 | if (ret) { |
1429 | dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", | 1429 | dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", |
1430 | db->irq_wake, ret); | 1430 | db->irq_wake, ret); |
1431 | ret = 0; | 1431 | ret = 0; |
1432 | } else { | 1432 | } else { |
1433 | set_irq_wake(db->irq_wake, 0); | 1433 | irq_set_irq_wake(db->irq_wake, 0); |
1434 | db->wake_supported = 1; | 1434 | db->wake_supported = 1; |
1435 | } | 1435 | } |
1436 | } | 1436 | } |
diff --git a/drivers/net/jme.c b/drivers/net/jme.c index f690474f4409..994c80939c7a 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c | |||
@@ -273,7 +273,7 @@ jme_clear_pm(struct jme_adapter *jme) | |||
273 | { | 273 | { |
274 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); | 274 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); |
275 | pci_set_power_state(jme->pdev, PCI_D0); | 275 | pci_set_power_state(jme->pdev, PCI_D0); |
276 | pci_enable_wake(jme->pdev, PCI_D0, false); | 276 | device_set_wakeup_enable(&jme->pdev->dev, false); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int | 279 | static int |
@@ -2538,6 +2538,8 @@ jme_set_wol(struct net_device *netdev, | |||
2538 | 2538 | ||
2539 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); | 2539 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); |
2540 | 2540 | ||
2541 | device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs); | ||
2542 | |||
2541 | return 0; | 2543 | return 0; |
2542 | } | 2544 | } |
2543 | 2545 | ||
@@ -3172,9 +3174,9 @@ jme_shutdown(struct pci_dev *pdev) | |||
3172 | } | 3174 | } |
3173 | 3175 | ||
3174 | #ifdef CONFIG_PM | 3176 | #ifdef CONFIG_PM |
3175 | static int | 3177 | static int jme_suspend(struct device *dev) |
3176 | jme_suspend(struct pci_dev *pdev, pm_message_t state) | ||
3177 | { | 3178 | { |
3179 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3178 | struct net_device *netdev = pci_get_drvdata(pdev); | 3180 | struct net_device *netdev = pci_get_drvdata(pdev); |
3179 | struct jme_adapter *jme = netdev_priv(netdev); | 3181 | struct jme_adapter *jme = netdev_priv(netdev); |
3180 | 3182 | ||
@@ -3206,22 +3208,18 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3206 | tasklet_hi_enable(&jme->rxclean_task); | 3208 | tasklet_hi_enable(&jme->rxclean_task); |
3207 | tasklet_hi_enable(&jme->rxempty_task); | 3209 | tasklet_hi_enable(&jme->rxempty_task); |
3208 | 3210 | ||
3209 | pci_save_state(pdev); | ||
3210 | jme_powersave_phy(jme); | 3211 | jme_powersave_phy(jme); |
3211 | pci_enable_wake(jme->pdev, PCI_D3hot, true); | ||
3212 | pci_set_power_state(pdev, PCI_D3hot); | ||
3213 | 3212 | ||
3214 | return 0; | 3213 | return 0; |
3215 | } | 3214 | } |
3216 | 3215 | ||
3217 | static int | 3216 | static int jme_resume(struct device *dev) |
3218 | jme_resume(struct pci_dev *pdev) | ||
3219 | { | 3217 | { |
3218 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3220 | struct net_device *netdev = pci_get_drvdata(pdev); | 3219 | struct net_device *netdev = pci_get_drvdata(pdev); |
3221 | struct jme_adapter *jme = netdev_priv(netdev); | 3220 | struct jme_adapter *jme = netdev_priv(netdev); |
3222 | 3221 | ||
3223 | jme_clear_pm(jme); | 3222 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); |
3224 | pci_restore_state(pdev); | ||
3225 | 3223 | ||
3226 | jme_phy_on(jme); | 3224 | jme_phy_on(jme); |
3227 | if (test_bit(JME_FLAG_SSET, &jme->flags)) | 3225 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
@@ -3238,6 +3236,13 @@ jme_resume(struct pci_dev *pdev) | |||
3238 | 3236 | ||
3239 | return 0; | 3237 | return 0; |
3240 | } | 3238 | } |
3239 | |||
3240 | static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); | ||
3241 | #define JME_PM_OPS (&jme_pm_ops) | ||
3242 | |||
3243 | #else | ||
3244 | |||
3245 | #define JME_PM_OPS NULL | ||
3241 | #endif | 3246 | #endif |
3242 | 3247 | ||
3243 | static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { | 3248 | static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { |
@@ -3251,11 +3256,8 @@ static struct pci_driver jme_driver = { | |||
3251 | .id_table = jme_pci_tbl, | 3256 | .id_table = jme_pci_tbl, |
3252 | .probe = jme_init_one, | 3257 | .probe = jme_init_one, |
3253 | .remove = __devexit_p(jme_remove_one), | 3258 | .remove = __devexit_p(jme_remove_one), |
3254 | #ifdef CONFIG_PM | ||
3255 | .suspend = jme_suspend, | ||
3256 | .resume = jme_resume, | ||
3257 | #endif /* CONFIG_PM */ | ||
3258 | .shutdown = jme_shutdown, | 3259 | .shutdown = jme_shutdown, |
3260 | .driver.pm = JME_PM_OPS, | ||
3259 | }; | 3261 | }; |
3260 | 3262 | ||
3261 | static int __init | 3263 | static int __init |
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 540a8dcbcc46..7f7d5708a658 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c | |||
@@ -4898,7 +4898,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) | |||
4898 | goto unlock; | 4898 | goto unlock; |
4899 | } | 4899 | } |
4900 | skb_copy_and_csum_dev(org_skb, skb->data); | 4900 | skb_copy_and_csum_dev(org_skb, skb->data); |
4901 | org_skb->ip_summed = 0; | 4901 | org_skb->ip_summed = CHECKSUM_NONE; |
4902 | skb->len = org_skb->len; | 4902 | skb->len = org_skb->len; |
4903 | copy_old_skb(org_skb, skb); | 4903 | copy_old_skb(org_skb, skb); |
4904 | } | 4904 | } |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 5762ebde4455..4f158baa0246 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -742,6 +742,9 @@ int mlx4_en_start_port(struct net_device *dev) | |||
742 | 0, MLX4_PROT_ETH)) | 742 | 0, MLX4_PROT_ETH)) |
743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | 743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); |
744 | 744 | ||
745 | /* Must redo promiscuous mode setup. */ | ||
746 | priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); | ||
747 | |||
745 | /* Schedule multicast task to populate multicast list */ | 748 | /* Schedule multicast task to populate multicast list */ |
746 | queue_work(mdev->workqueue, &priv->mcast_task); | 749 | queue_work(mdev->workqueue, &priv->mcast_task); |
747 | 750 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 1f4e8680a96a..673dc600c891 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, | |||
1312 | * page into an skb */ | 1312 | * page into an skb */ |
1313 | 1313 | ||
1314 | static inline int | 1314 | static inline int |
1315 | myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | 1315 | myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, |
1316 | int bytes, int len, __wsum csum) | 1316 | int lro_enabled) |
1317 | { | 1317 | { |
1318 | struct myri10ge_priv *mgp = ss->mgp; | 1318 | struct myri10ge_priv *mgp = ss->mgp; |
1319 | struct sk_buff *skb; | 1319 | struct sk_buff *skb; |
1320 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; | 1320 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; |
1321 | int i, idx, hlen, remainder; | 1321 | struct myri10ge_rx_buf *rx; |
1322 | int i, idx, hlen, remainder, bytes; | ||
1322 | struct pci_dev *pdev = mgp->pdev; | 1323 | struct pci_dev *pdev = mgp->pdev; |
1323 | struct net_device *dev = mgp->dev; | 1324 | struct net_device *dev = mgp->dev; |
1324 | u8 *va; | 1325 | u8 *va; |
1325 | 1326 | ||
1327 | if (len <= mgp->small_bytes) { | ||
1328 | rx = &ss->rx_small; | ||
1329 | bytes = mgp->small_bytes; | ||
1330 | } else { | ||
1331 | rx = &ss->rx_big; | ||
1332 | bytes = mgp->big_bytes; | ||
1333 | } | ||
1334 | |||
1326 | len += MXGEFW_PAD; | 1335 | len += MXGEFW_PAD; |
1327 | idx = rx->cnt & rx->mask; | 1336 | idx = rx->cnt & rx->mask; |
1328 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; | 1337 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; |
@@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | |||
1341 | remainder -= MYRI10GE_ALLOC_SIZE; | 1350 | remainder -= MYRI10GE_ALLOC_SIZE; |
1342 | } | 1351 | } |
1343 | 1352 | ||
1344 | if (dev->features & NETIF_F_LRO) { | 1353 | if (lro_enabled) { |
1345 | rx_frags[0].page_offset += MXGEFW_PAD; | 1354 | rx_frags[0].page_offset += MXGEFW_PAD; |
1346 | rx_frags[0].size -= MXGEFW_PAD; | 1355 | rx_frags[0].size -= MXGEFW_PAD; |
1347 | len -= MXGEFW_PAD; | 1356 | len -= MXGEFW_PAD; |
@@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1463 | { | 1472 | { |
1464 | struct myri10ge_rx_done *rx_done = &ss->rx_done; | 1473 | struct myri10ge_rx_done *rx_done = &ss->rx_done; |
1465 | struct myri10ge_priv *mgp = ss->mgp; | 1474 | struct myri10ge_priv *mgp = ss->mgp; |
1466 | struct net_device *netdev = mgp->dev; | 1475 | |
1467 | unsigned long rx_bytes = 0; | 1476 | unsigned long rx_bytes = 0; |
1468 | unsigned long rx_packets = 0; | 1477 | unsigned long rx_packets = 0; |
1469 | unsigned long rx_ok; | 1478 | unsigned long rx_ok; |
@@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1474 | u16 length; | 1483 | u16 length; |
1475 | __wsum checksum; | 1484 | __wsum checksum; |
1476 | 1485 | ||
1486 | /* | ||
1487 | * Prevent compiler from generating more than one ->features memory | ||
1488 | * access to avoid theoretical race condition with functions that | ||
1489 | * change NETIF_F_LRO flag at runtime. | ||
1490 | */ | ||
1491 | bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; | ||
1492 | |||
1477 | while (rx_done->entry[idx].length != 0 && work_done < budget) { | 1493 | while (rx_done->entry[idx].length != 0 && work_done < budget) { |
1478 | length = ntohs(rx_done->entry[idx].length); | 1494 | length = ntohs(rx_done->entry[idx].length); |
1479 | rx_done->entry[idx].length = 0; | 1495 | rx_done->entry[idx].length = 0; |
1480 | checksum = csum_unfold(rx_done->entry[idx].checksum); | 1496 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
1481 | if (length <= mgp->small_bytes) | 1497 | rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); |
1482 | rx_ok = myri10ge_rx_done(ss, &ss->rx_small, | ||
1483 | mgp->small_bytes, | ||
1484 | length, checksum); | ||
1485 | else | ||
1486 | rx_ok = myri10ge_rx_done(ss, &ss->rx_big, | ||
1487 | mgp->big_bytes, | ||
1488 | length, checksum); | ||
1489 | rx_packets += rx_ok; | 1498 | rx_packets += rx_ok; |
1490 | rx_bytes += rx_ok * (unsigned long)length; | 1499 | rx_bytes += rx_ok * (unsigned long)length; |
1491 | cnt++; | 1500 | cnt++; |
@@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1497 | ss->stats.rx_packets += rx_packets; | 1506 | ss->stats.rx_packets += rx_packets; |
1498 | ss->stats.rx_bytes += rx_bytes; | 1507 | ss->stats.rx_bytes += rx_bytes; |
1499 | 1508 | ||
1500 | if (netdev->features & NETIF_F_LRO) | 1509 | if (lro_enabled) |
1501 | lro_flush_all(&rx_done->lro_mgr); | 1510 | lro_flush_all(&rx_done->lro_mgr); |
1502 | 1511 | ||
1503 | /* restock receive rings if needed */ | 1512 | /* restock receive rings if needed */ |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 653d308e0f5d..3bdcc803ec68 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -871,7 +871,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) | |||
871 | struct netxen_adapter *adapter = netdev_priv(netdev); | 871 | struct netxen_adapter *adapter = netdev_priv(netdev); |
872 | int hw_lro; | 872 | int hw_lro; |
873 | 873 | ||
874 | if (data & ~ETH_FLAG_LRO) | 874 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
875 | return -EINVAL; | 875 | return -EINVAL; |
876 | 876 | ||
877 | if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) | 877 | if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) |
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 4c14510e2a87..45b2755d6cba 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) | |||
1003 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1003 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
1004 | int hw_lro; | 1004 | int hw_lro; |
1005 | 1005 | ||
1006 | if (data & ~ETH_FLAG_LRO) | 1006 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
1007 | return -EINVAL; | 1007 | return -EINVAL; |
1008 | 1008 | ||
1009 | if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) | 1009 | if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 2ad6364103ea..356e74d20b80 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) | |||
6726 | int rc = 0; | 6726 | int rc = 0; |
6727 | int changed = 0; | 6727 | int changed = 0; |
6728 | 6728 | ||
6729 | if (data & ~ETH_FLAG_LRO) | 6729 | if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) |
6730 | return -EINVAL; | 6730 | return -EINVAL; |
6731 | 6731 | ||
6732 | if (data & ETH_FLAG_LRO) { | 6732 | if (data & ETH_FLAG_LRO) { |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ebec88882c3b..73c942d85f07 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -48,9 +48,9 @@ | |||
48 | #include <net/ip.h> | 48 | #include <net/ip.h> |
49 | 49 | ||
50 | #include <asm/system.h> | 50 | #include <asm/system.h> |
51 | #include <asm/io.h> | 51 | #include <linux/io.h> |
52 | #include <asm/byteorder.h> | 52 | #include <asm/byteorder.h> |
53 | #include <asm/uaccess.h> | 53 | #include <linux/uaccess.h> |
54 | 54 | ||
55 | #ifdef CONFIG_SPARC | 55 | #ifdef CONFIG_SPARC |
56 | #include <asm/idprom.h> | 56 | #include <asm/idprom.h> |
@@ -13118,7 +13118,7 @@ done: | |||
13118 | 13118 | ||
13119 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | 13119 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); |
13120 | 13120 | ||
13121 | static void inline vlan_features_add(struct net_device *dev, unsigned long flags) | 13121 | static inline void vlan_features_add(struct net_device *dev, unsigned long flags) |
13122 | { | 13122 | { |
13123 | dev->vlan_features |= flags; | 13123 | dev->vlan_features |= flags; |
13124 | } | 13124 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 81254be85b92..51f2ef142a5b 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -304,8 +304,8 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; |
305 | unsigned long flags; | 305 | unsigned long flags; |
306 | 306 | ||
307 | if (data & ~ETH_FLAG_LRO) | 307 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
308 | return -EOPNOTSUPP; | 308 | return -EINVAL; |
309 | 309 | ||
310 | if (lro_requested ^ lro_present) { | 310 | if (lro_requested ^ lro_present) { |
311 | /* toggle the LRO feature*/ | 311 | /* toggle the LRO feature*/ |
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index 1dd3a21b3a43..c5eb034107fd 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c | |||
@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data) | |||
1117 | struct vxgedev *vdev = netdev_priv(dev); | 1117 | struct vxgedev *vdev = netdev_priv(dev); |
1118 | enum vxge_hw_status status; | 1118 | enum vxge_hw_status status; |
1119 | 1119 | ||
1120 | if (data & ~ETH_FLAG_RXHASH) | 1120 | if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) |
1121 | return -EOPNOTSUPP; | 1121 | return -EINVAL; |
1122 | 1122 | ||
1123 | if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) | 1123 | if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) |
1124 | return 0; | 1124 | return 0; |
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 18d24b7b1e34..7ecc0bda57b3 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c | |||
@@ -649,8 +649,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) | |||
649 | goto err_free_common; | 649 | goto err_free_common; |
650 | } | 650 | } |
651 | 651 | ||
652 | set_irq_type(gpio_to_irq(p54spi_gpio_irq), | 652 | irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); |
653 | IRQ_TYPE_EDGE_RISING); | ||
654 | 653 | ||
655 | disable_irq(gpio_to_irq(p54spi_gpio_irq)); | 654 | disable_irq(gpio_to_irq(p54spi_gpio_irq)); |
656 | 655 | ||
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c index d550b5e68d3c..f51a0241a440 100644 --- a/drivers/net/wireless/wl1251/sdio.c +++ b/drivers/net/wireless/wl1251/sdio.c | |||
@@ -265,7 +265,7 @@ static int wl1251_sdio_probe(struct sdio_func *func, | |||
265 | goto disable; | 265 | goto disable; |
266 | } | 266 | } |
267 | 267 | ||
268 | set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); | 268 | irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); |
269 | disable_irq(wl->irq); | 269 | disable_irq(wl->irq); |
270 | 270 | ||
271 | wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; | 271 | wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; |
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c index ac872b38960f..af6448c4d3e2 100644 --- a/drivers/net/wireless/wl1251/spi.c +++ b/drivers/net/wireless/wl1251/spi.c | |||
@@ -286,7 +286,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi) | |||
286 | goto out_free; | 286 | goto out_free; |
287 | } | 287 | } |
288 | 288 | ||
289 | set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); | 289 | irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); |
290 | 290 | ||
291 | disable_irq(wl->irq); | 291 | disable_irq(wl->irq); |
292 | 292 | ||
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index deeec32a5803..103095bbe8c0 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c | |||
@@ -340,7 +340,7 @@ static int __init eisa_probe(struct parisc_device *dev) | |||
340 | /* Reserve IRQ2 */ | 340 | /* Reserve IRQ2 */ |
341 | setup_irq(2, &irq2_action); | 341 | setup_irq(2, &irq2_action); |
342 | for (i = 0; i < 16; i++) { | 342 | for (i = 0; i < 16; i++) { |
343 | set_irq_chip_and_handler(i, &eisa_interrupt_type, | 343 | irq_set_chip_and_handler(i, &eisa_interrupt_type, |
344 | handle_simple_irq); | 344 | handle_simple_irq); |
345 | } | 345 | } |
346 | 346 | ||
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index ef31080cf591..1bab5a2cd359 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c | |||
@@ -152,8 +152,8 @@ int gsc_assign_irq(struct irq_chip *type, void *data) | |||
152 | if (irq > GSC_IRQ_MAX) | 152 | if (irq > GSC_IRQ_MAX) |
153 | return NO_IRQ; | 153 | return NO_IRQ; |
154 | 154 | ||
155 | set_irq_chip_and_handler(irq, type, handle_simple_irq); | 155 | irq_set_chip_and_handler(irq, type, handle_simple_irq); |
156 | set_irq_chip_data(irq, data); | 156 | irq_set_chip_data(irq, data); |
157 | 157 | ||
158 | return irq++; | 158 | return irq++; |
159 | } | 159 | } |
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a4d8ff66a639..e3b76d409dee 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -355,7 +355,8 @@ int superio_fixup_irq(struct pci_dev *pcidev) | |||
355 | #endif | 355 | #endif |
356 | 356 | ||
357 | for (i = 0; i < 16; i++) { | 357 | for (i = 0; i < 16; i++) { |
358 | set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq); | 358 | irq_set_chip_and_handler(i, &superio_interrupt_type, |
359 | handle_simple_irq); | ||
359 | } | 360 | } |
360 | 361 | ||
361 | /* | 362 | /* |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 09933eb9126b..12e02bf92c4a 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -1226,7 +1226,7 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | |||
1226 | 1226 | ||
1227 | void dmar_msi_unmask(struct irq_data *data) | 1227 | void dmar_msi_unmask(struct irq_data *data) |
1228 | { | 1228 | { |
1229 | struct intel_iommu *iommu = irq_data_get_irq_data(data); | 1229 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1230 | unsigned long flag; | 1230 | unsigned long flag; |
1231 | 1231 | ||
1232 | /* unmask it */ | 1232 | /* unmask it */ |
@@ -1240,7 +1240,7 @@ void dmar_msi_unmask(struct irq_data *data) | |||
1240 | void dmar_msi_mask(struct irq_data *data) | 1240 | void dmar_msi_mask(struct irq_data *data) |
1241 | { | 1241 | { |
1242 | unsigned long flag; | 1242 | unsigned long flag; |
1243 | struct intel_iommu *iommu = irq_data_get_irq_data(data); | 1243 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1244 | 1244 | ||
1245 | /* mask it */ | 1245 | /* mask it */ |
1246 | spin_lock_irqsave(&iommu->register_lock, flag); | 1246 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1252,7 +1252,7 @@ void dmar_msi_mask(struct irq_data *data) | |||
1252 | 1252 | ||
1253 | void dmar_msi_write(int irq, struct msi_msg *msg) | 1253 | void dmar_msi_write(int irq, struct msi_msg *msg) |
1254 | { | 1254 | { |
1255 | struct intel_iommu *iommu = get_irq_data(irq); | 1255 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1256 | unsigned long flag; | 1256 | unsigned long flag; |
1257 | 1257 | ||
1258 | spin_lock_irqsave(&iommu->register_lock, flag); | 1258 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1264,7 +1264,7 @@ void dmar_msi_write(int irq, struct msi_msg *msg) | |||
1264 | 1264 | ||
1265 | void dmar_msi_read(int irq, struct msi_msg *msg) | 1265 | void dmar_msi_read(int irq, struct msi_msg *msg) |
1266 | { | 1266 | { |
1267 | struct intel_iommu *iommu = get_irq_data(irq); | 1267 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1268 | unsigned long flag; | 1268 | unsigned long flag; |
1269 | 1269 | ||
1270 | spin_lock_irqsave(&iommu->register_lock, flag); | 1270 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1382,12 +1382,12 @@ int dmar_set_interrupt(struct intel_iommu *iommu) | |||
1382 | return -EINVAL; | 1382 | return -EINVAL; |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | set_irq_data(irq, iommu); | 1385 | irq_set_handler_data(irq, iommu); |
1386 | iommu->irq = irq; | 1386 | iommu->irq = irq; |
1387 | 1387 | ||
1388 | ret = arch_setup_dmar_msi(irq); | 1388 | ret = arch_setup_dmar_msi(irq); |
1389 | if (ret) { | 1389 | if (ret) { |
1390 | set_irq_data(irq, NULL); | 1390 | irq_set_handler_data(irq, NULL); |
1391 | iommu->irq = 0; | 1391 | iommu->irq = 0; |
1392 | destroy_irq(irq); | 1392 | destroy_irq(irq); |
1393 | return ret; | 1393 | return ret; |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 834842aa5bbf..db057b6fe0c8 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
@@ -34,7 +34,7 @@ struct ht_irq_cfg { | |||
34 | 34 | ||
35 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | 35 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) |
36 | { | 36 | { |
37 | struct ht_irq_cfg *cfg = get_irq_data(irq); | 37 | struct ht_irq_cfg *cfg = irq_get_handler_data(irq); |
38 | unsigned long flags; | 38 | unsigned long flags; |
39 | spin_lock_irqsave(&ht_irq_lock, flags); | 39 | spin_lock_irqsave(&ht_irq_lock, flags); |
40 | if (cfg->msg.address_lo != msg->address_lo) { | 40 | if (cfg->msg.address_lo != msg->address_lo) { |
@@ -53,13 +53,13 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | |||
53 | 53 | ||
54 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | 54 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) |
55 | { | 55 | { |
56 | struct ht_irq_cfg *cfg = get_irq_data(irq); | 56 | struct ht_irq_cfg *cfg = irq_get_handler_data(irq); |
57 | *msg = cfg->msg; | 57 | *msg = cfg->msg; |
58 | } | 58 | } |
59 | 59 | ||
60 | void mask_ht_irq(struct irq_data *data) | 60 | void mask_ht_irq(struct irq_data *data) |
61 | { | 61 | { |
62 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); | 62 | struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); |
63 | struct ht_irq_msg msg = cfg->msg; | 63 | struct ht_irq_msg msg = cfg->msg; |
64 | 64 | ||
65 | msg.address_lo |= 1; | 65 | msg.address_lo |= 1; |
@@ -68,7 +68,7 @@ void mask_ht_irq(struct irq_data *data) | |||
68 | 68 | ||
69 | void unmask_ht_irq(struct irq_data *data) | 69 | void unmask_ht_irq(struct irq_data *data) |
70 | { | 70 | { |
71 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); | 71 | struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); |
72 | struct ht_irq_msg msg = cfg->msg; | 72 | struct ht_irq_msg msg = cfg->msg; |
73 | 73 | ||
74 | msg.address_lo &= ~1; | 74 | msg.address_lo &= ~1; |
@@ -126,7 +126,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) | |||
126 | kfree(cfg); | 126 | kfree(cfg); |
127 | return -EBUSY; | 127 | return -EBUSY; |
128 | } | 128 | } |
129 | set_irq_data(irq, cfg); | 129 | irq_set_handler_data(irq, cfg); |
130 | 130 | ||
131 | if (arch_setup_ht_irq(irq, dev) < 0) { | 131 | if (arch_setup_ht_irq(irq, dev) < 0) { |
132 | ht_destroy_irq(irq); | 132 | ht_destroy_irq(irq); |
@@ -162,9 +162,9 @@ void ht_destroy_irq(unsigned int irq) | |||
162 | { | 162 | { |
163 | struct ht_irq_cfg *cfg; | 163 | struct ht_irq_cfg *cfg; |
164 | 164 | ||
165 | cfg = get_irq_data(irq); | 165 | cfg = irq_get_handler_data(irq); |
166 | set_irq_chip(irq, NULL); | 166 | irq_set_chip(irq, NULL); |
167 | set_irq_data(irq, NULL); | 167 | irq_set_handler_data(irq, NULL); |
168 | destroy_irq(irq); | 168 | destroy_irq(irq); |
169 | 169 | ||
170 | kfree(cfg); | 170 | kfree(cfg); |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index a4115f1afe1f..7da3bef60d87 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1206,7 +1206,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1206 | iommu_disable_translation(iommu); | 1206 | iommu_disable_translation(iommu); |
1207 | 1207 | ||
1208 | if (iommu->irq) { | 1208 | if (iommu->irq) { |
1209 | set_irq_data(iommu->irq, NULL); | 1209 | irq_set_handler_data(iommu->irq, NULL); |
1210 | /* This will mask the irq */ | 1210 | /* This will mask the irq */ |
1211 | free_irq(iommu->irq, iommu); | 1211 | free_irq(iommu->irq, iommu); |
1212 | destroy_irq(iommu->irq); | 1212 | destroy_irq(iommu->irq); |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index ec87cd66f3eb..a22557b20283 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -50,7 +50,7 @@ static DEFINE_SPINLOCK(irq_2_ir_lock); | |||
50 | 50 | ||
51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
52 | { | 52 | { |
53 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 53 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
54 | return cfg ? &cfg->irq_2_iommu : NULL; | 54 | return cfg ? &cfg->irq_2_iommu : NULL; |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 44b0aeee83e5..2f10328bf661 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -236,7 +236,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
236 | 236 | ||
237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
238 | { | 238 | { |
239 | struct msi_desc *entry = get_irq_msi(irq); | 239 | struct msi_desc *entry = irq_get_msi_desc(irq); |
240 | 240 | ||
241 | __read_msi_msg(entry, msg); | 241 | __read_msi_msg(entry, msg); |
242 | } | 242 | } |
@@ -253,7 +253,7 @@ void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
253 | 253 | ||
254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | 254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
255 | { | 255 | { |
256 | struct msi_desc *entry = get_irq_msi(irq); | 256 | struct msi_desc *entry = irq_get_msi_desc(irq); |
257 | 257 | ||
258 | __get_cached_msi_msg(entry, msg); | 258 | __get_cached_msi_msg(entry, msg); |
259 | } | 259 | } |
@@ -297,7 +297,7 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
297 | 297 | ||
298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
299 | { | 299 | { |
300 | struct msi_desc *entry = get_irq_msi(irq); | 300 | struct msi_desc *entry = irq_get_msi_desc(irq); |
301 | 301 | ||
302 | __write_msi_msg(entry, msg); | 302 | __write_msi_msg(entry, msg); |
303 | } | 303 | } |
@@ -354,7 +354,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
354 | if (!dev->msi_enabled) | 354 | if (!dev->msi_enabled) |
355 | return; | 355 | return; |
356 | 356 | ||
357 | entry = get_irq_msi(dev->irq); | 357 | entry = irq_get_msi_desc(dev->irq); |
358 | pos = entry->msi_attrib.pos; | 358 | pos = entry->msi_attrib.pos; |
359 | 359 | ||
360 | pci_intx_for_msi(dev, 0); | 360 | pci_intx_for_msi(dev, 0); |
@@ -519,7 +519,7 @@ static void msix_program_entries(struct pci_dev *dev, | |||
519 | PCI_MSIX_ENTRY_VECTOR_CTRL; | 519 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
520 | 520 | ||
521 | entries[i].vector = entry->irq; | 521 | entries[i].vector = entry->irq; |
522 | set_irq_msi(entry->irq, entry); | 522 | irq_set_msi_desc(entry->irq, entry); |
523 | entry->masked = readl(entry->mask_base + offset); | 523 | entry->masked = readl(entry->mask_base + offset); |
524 | msix_mask_irq(entry, 1); | 524 | msix_mask_irq(entry, 1); |
525 | i++; | 525 | i++; |
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c index eae9cbe37a3e..49221395101e 100644 --- a/drivers/pcmcia/bfin_cf_pcmcia.c +++ b/drivers/pcmcia/bfin_cf_pcmcia.c | |||
@@ -235,7 +235,7 @@ static int __devinit bfin_cf_probe(struct platform_device *pdev) | |||
235 | cf->irq = irq; | 235 | cf->irq = irq; |
236 | cf->socket.pci_irq = irq; | 236 | cf->socket.pci_irq = irq; |
237 | 237 | ||
238 | set_irq_type(irq, IRQF_TRIGGER_LOW); | 238 | irq_set_irq_type(irq, IRQF_TRIGGER_LOW); |
239 | 239 | ||
240 | io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 240 | io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
241 | attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 241 | attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 27575e6378a1..01757f18a208 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c | |||
@@ -181,7 +181,7 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) | |||
181 | /* all other (older) Db1x00 boards use a GPIO to show | 181 | /* all other (older) Db1x00 boards use a GPIO to show |
182 | * card detection status: use both-edge triggers. | 182 | * card detection status: use both-edge triggers. |
183 | */ | 183 | */ |
184 | set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); | 184 | irq_set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); |
185 | ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, | 185 | ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, |
186 | 0, "pcmcia_carddetect", sock); | 186 | 0, "pcmcia_carddetect", sock); |
187 | 187 | ||
diff --git a/drivers/pcmcia/sa1100_nanoengine.c b/drivers/pcmcia/sa1100_nanoengine.c index 3d2652e2f5ae..93b9c9ba57c3 100644 --- a/drivers/pcmcia/sa1100_nanoengine.c +++ b/drivers/pcmcia/sa1100_nanoengine.c | |||
@@ -86,7 +86,7 @@ static int nanoengine_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
86 | GPDR &= ~nano_skts[i].input_pins; | 86 | GPDR &= ~nano_skts[i].input_pins; |
87 | GPDR |= nano_skts[i].output_pins; | 87 | GPDR |= nano_skts[i].output_pins; |
88 | GPCR = nano_skts[i].clear_outputs; | 88 | GPCR = nano_skts[i].clear_outputs; |
89 | set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); | 89 | irq_set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); |
90 | skt->socket.pci_irq = nano_skts[i].pci_irq; | 90 | skt->socket.pci_irq = nano_skts[i].pci_irq; |
91 | 91 | ||
92 | return soc_pcmcia_request_irqs(skt, | 92 | return soc_pcmcia_request_irqs(skt, |
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 5a9a392eacdf..768f9572a8c8 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -155,11 +155,11 @@ static int soc_common_pcmcia_config_skt( | |||
155 | */ | 155 | */ |
156 | if (skt->irq_state != 1 && state->io_irq) { | 156 | if (skt->irq_state != 1 && state->io_irq) { |
157 | skt->irq_state = 1; | 157 | skt->irq_state = 1; |
158 | set_irq_type(skt->socket.pci_irq, | 158 | irq_set_irq_type(skt->socket.pci_irq, |
159 | IRQ_TYPE_EDGE_FALLING); | 159 | IRQ_TYPE_EDGE_FALLING); |
160 | } else if (skt->irq_state == 1 && state->io_irq == 0) { | 160 | } else if (skt->irq_state == 1 && state->io_irq == 0) { |
161 | skt->irq_state = 0; | 161 | skt->irq_state = 0; |
162 | set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); | 162 | irq_set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); |
163 | } | 163 | } |
164 | 164 | ||
165 | skt->cs_state = *state; | 165 | skt->cs_state = *state; |
@@ -537,7 +537,7 @@ int soc_pcmcia_request_irqs(struct soc_pcmcia_socket *skt, | |||
537 | IRQF_DISABLED, irqs[i].str, skt); | 537 | IRQF_DISABLED, irqs[i].str, skt); |
538 | if (res) | 538 | if (res) |
539 | break; | 539 | break; |
540 | set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); | 540 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); |
541 | } | 541 | } |
542 | 542 | ||
543 | if (res) { | 543 | if (res) { |
@@ -570,7 +570,7 @@ void soc_pcmcia_disable_irqs(struct soc_pcmcia_socket *skt, | |||
570 | 570 | ||
571 | for (i = 0; i < nr; i++) | 571 | for (i = 0; i < nr; i++) |
572 | if (irqs[i].sock == skt->nr) | 572 | if (irqs[i].sock == skt->nr) |
573 | set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); | 573 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); |
574 | } | 574 | } |
575 | EXPORT_SYMBOL(soc_pcmcia_disable_irqs); | 575 | EXPORT_SYMBOL(soc_pcmcia_disable_irqs); |
576 | 576 | ||
@@ -581,8 +581,8 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, | |||
581 | 581 | ||
582 | for (i = 0; i < nr; i++) | 582 | for (i = 0; i < nr; i++) |
583 | if (irqs[i].sock == skt->nr) { | 583 | if (irqs[i].sock == skt->nr) { |
584 | set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); | 584 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); |
585 | set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); | 585 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); |
586 | } | 586 | } |
587 | } | 587 | } |
588 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); | 588 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); |
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index 3b67a1b6a197..379f4218857d 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c | |||
@@ -274,7 +274,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev) | |||
274 | * edge detector. | 274 | * edge detector. |
275 | */ | 275 | */ |
276 | irq = gpio_to_irq(GPIO_CDA); | 276 | irq = gpio_to_irq(GPIO_CDA); |
277 | set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); | 277 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); |
278 | ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); | 278 | ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); |
279 | if (ret) { | 279 | if (ret) { |
280 | dev_err(&pdev->dev, "cannot setup cd irq\n"); | 280 | dev_err(&pdev->dev, "cannot setup cd irq\n"); |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 222dfb737b11..2ee442c2a5db 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -101,6 +101,19 @@ config DELL_WMI | |||
101 | To compile this driver as a module, choose M here: the module will | 101 | To compile this driver as a module, choose M here: the module will |
102 | be called dell-wmi. | 102 | be called dell-wmi. |
103 | 103 | ||
104 | config DELL_WMI_AIO | ||
105 | tristate "WMI Hotkeys for Dell All-In-One series" | ||
106 | depends on ACPI_WMI | ||
107 | depends on INPUT | ||
108 | select INPUT_SPARSEKMAP | ||
109 | ---help--- | ||
110 | Say Y here if you want to support WMI-based hotkeys on Dell | ||
111 | All-In-One machines. | ||
112 | |||
113 | To compile this driver as a module, choose M here: the module will | ||
114 | be called dell-wmi. | ||
115 | |||
116 | |||
104 | config FUJITSU_LAPTOP | 117 | config FUJITSU_LAPTOP |
105 | tristate "Fujitsu Laptop Extras" | 118 | tristate "Fujitsu Laptop Extras" |
106 | depends on ACPI | 119 | depends on ACPI |
@@ -438,23 +451,53 @@ config EEEPC_LAPTOP | |||
438 | Bluetooth, backlight and allows powering on/off some other | 451 | Bluetooth, backlight and allows powering on/off some other |
439 | devices. | 452 | devices. |
440 | 453 | ||
441 | If you have an Eee PC laptop, say Y or M here. | 454 | If you have an Eee PC laptop, say Y or M here. If this driver |
455 | doesn't work on your Eee PC, try eeepc-wmi instead. | ||
442 | 456 | ||
443 | config EEEPC_WMI | 457 | config ASUS_WMI |
444 | tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)" | 458 | tristate "ASUS WMI Driver (EXPERIMENTAL)" |
445 | depends on ACPI_WMI | 459 | depends on ACPI_WMI |
446 | depends on INPUT | 460 | depends on INPUT |
461 | depends on HWMON | ||
447 | depends on EXPERIMENTAL | 462 | depends on EXPERIMENTAL |
448 | depends on BACKLIGHT_CLASS_DEVICE | 463 | depends on BACKLIGHT_CLASS_DEVICE |
449 | depends on RFKILL || RFKILL = n | 464 | depends on RFKILL || RFKILL = n |
465 | depends on HOTPLUG_PCI | ||
450 | select INPUT_SPARSEKMAP | 466 | select INPUT_SPARSEKMAP |
451 | select LEDS_CLASS | 467 | select LEDS_CLASS |
452 | select NEW_LEDS | 468 | select NEW_LEDS |
453 | ---help--- | 469 | ---help--- |
454 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. | 470 | Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new |
471 | Asus Notebooks). | ||
455 | 472 | ||
456 | To compile this driver as a module, choose M here: the module will | 473 | To compile this driver as a module, choose M here: the module will |
457 | be called eeepc-wmi. | 474 | be called asus-wmi. |
475 | |||
476 | config ASUS_NB_WMI | ||
477 | tristate "Asus Notebook WMI Driver (EXPERIMENTAL)" | ||
478 | depends on ASUS_WMI | ||
479 | ---help--- | ||
480 | This is a driver for newer Asus notebooks. It adds extra features | ||
481 | like wireless radio and bluetooth control, leds, hotkeys, backlight... | ||
482 | |||
483 | For more informations, see | ||
484 | <file:Documentation/ABI/testing/sysfs-platform-asus-wmi> | ||
485 | |||
486 | If you have an ACPI-WMI compatible Asus Notebook, say Y or M | ||
487 | here. | ||
488 | |||
489 | config EEEPC_WMI | ||
490 | tristate "Eee PC WMI Driver (EXPERIMENTAL)" | ||
491 | depends on ASUS_WMI | ||
492 | ---help--- | ||
493 | This is a driver for newer Eee PC laptops. It adds extra features | ||
494 | like wireless radio and bluetooth control, leds, hotkeys, backlight... | ||
495 | |||
496 | For more informations, see | ||
497 | <file:Documentation/ABI/testing/sysfs-platform-asus-wmi> | ||
498 | |||
499 | If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M | ||
500 | here. | ||
458 | 501 | ||
459 | config ACPI_WMI | 502 | config ACPI_WMI |
460 | tristate "WMI" | 503 | tristate "WMI" |
@@ -616,6 +659,21 @@ config GPIO_INTEL_PMIC | |||
616 | Say Y here to support GPIO via the SCU IPC interface | 659 | Say Y here to support GPIO via the SCU IPC interface |
617 | on Intel MID platforms. | 660 | on Intel MID platforms. |
618 | 661 | ||
662 | config INTEL_MID_POWER_BUTTON | ||
663 | tristate "power button driver for Intel MID platforms" | ||
664 | depends on INTEL_SCU_IPC && INPUT | ||
665 | help | ||
666 | This driver handles the power button on the Intel MID platforms. | ||
667 | |||
668 | If unsure, say N. | ||
669 | |||
670 | config INTEL_MFLD_THERMAL | ||
671 | tristate "Thermal driver for Intel Medfield platform" | ||
672 | depends on INTEL_SCU_IPC && THERMAL | ||
673 | help | ||
674 | Say Y here to enable thermal driver support for the Intel Medfield | ||
675 | platform. | ||
676 | |||
619 | config RAR_REGISTER | 677 | config RAR_REGISTER |
620 | bool "Restricted Access Region Register Driver" | 678 | bool "Restricted Access Region Register Driver" |
621 | depends on PCI && X86_MRST | 679 | depends on PCI && X86_MRST |
@@ -672,4 +730,26 @@ config XO1_RFKILL | |||
672 | Support for enabling/disabling the WLAN interface on the OLPC XO-1 | 730 | Support for enabling/disabling the WLAN interface on the OLPC XO-1 |
673 | laptop. | 731 | laptop. |
674 | 732 | ||
733 | config XO15_EBOOK | ||
734 | tristate "OLPC XO-1.5 ebook switch" | ||
735 | depends on ACPI && INPUT | ||
736 | ---help--- | ||
737 | Support for the ebook switch on the OLPC XO-1.5 laptop. | ||
738 | |||
739 | This switch is triggered as the screen is rotated and folded down to | ||
740 | convert the device into ebook form. | ||
741 | |||
742 | config SAMSUNG_LAPTOP | ||
743 | tristate "Samsung Laptop driver" | ||
744 | depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86 | ||
745 | ---help--- | ||
746 | This module implements a driver for a wide range of different | ||
747 | Samsung laptops. It offers control over the different | ||
748 | function keys, wireless LED, LCD backlight level, and | ||
749 | sometimes provides a "performance_control" sysfs file to allow | ||
750 | the performance level of the laptop to be changed. | ||
751 | |||
752 | To compile this driver as a module, choose M here: the module | ||
753 | will be called samsung-laptop. | ||
754 | |||
675 | endif # X86_PLATFORM_DEVICES | 755 | endif # X86_PLATFORM_DEVICES |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 299aefb3e74c..029e8861d086 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -3,6 +3,8 @@ | |||
3 | # x86 Platform-Specific Drivers | 3 | # x86 Platform-Specific Drivers |
4 | # | 4 | # |
5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o | 5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o |
6 | obj-$(CONFIG_ASUS_WMI) += asus-wmi.o | ||
7 | obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o | ||
6 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o | 8 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o |
7 | obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o | 9 | obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o |
8 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | 10 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o |
@@ -10,6 +12,7 @@ obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o | |||
10 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | 12 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o |
11 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 13 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
12 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o | 14 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o |
15 | obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o | ||
13 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | 16 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o |
14 | obj-$(CONFIG_ACERHDF) += acerhdf.o | 17 | obj-$(CONFIG_ACERHDF) += acerhdf.o |
15 | obj-$(CONFIG_HP_ACCEL) += hp_accel.o | 18 | obj-$(CONFIG_HP_ACCEL) += hp_accel.o |
@@ -29,9 +32,13 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o | |||
29 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o | 32 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o |
30 | obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o | 33 | obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o |
31 | obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o | 34 | obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o |
32 | obj-$(CONFIG_INTEL_SCU_IPC_UTIL)+= intel_scu_ipcutil.o | 35 | obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o |
36 | obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o | ||
33 | obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o | 37 | obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o |
34 | obj-$(CONFIG_INTEL_IPS) += intel_ips.o | 38 | obj-$(CONFIG_INTEL_IPS) += intel_ips.o |
35 | obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o | 39 | obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o |
36 | obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o | 40 | obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o |
41 | obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o | ||
37 | obj-$(CONFIG_IBM_RTL) += ibm_rtl.o | 42 | obj-$(CONFIG_IBM_RTL) += ibm_rtl.o |
43 | obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o | ||
44 | obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o | ||
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c9784705f6ac..5ea6c3477d17 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -22,6 +22,8 @@ | |||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
26 | |||
25 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 28 | #include <linux/module.h> |
27 | #include <linux/init.h> | 29 | #include <linux/init.h> |
@@ -46,12 +48,6 @@ MODULE_AUTHOR("Carlos Corbacho"); | |||
46 | MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); | 48 | MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); |
47 | MODULE_LICENSE("GPL"); | 49 | MODULE_LICENSE("GPL"); |
48 | 50 | ||
49 | #define ACER_LOGPREFIX "acer-wmi: " | ||
50 | #define ACER_ERR KERN_ERR ACER_LOGPREFIX | ||
51 | #define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX | ||
52 | #define ACER_INFO KERN_INFO ACER_LOGPREFIX | ||
53 | #define ACER_WARNING KERN_WARNING ACER_LOGPREFIX | ||
54 | |||
55 | /* | 51 | /* |
56 | * Magic Number | 52 | * Magic Number |
57 | * Meaning is unknown - this number is required for writing to ACPI for AMW0 | 53 | * Meaning is unknown - this number is required for writing to ACPI for AMW0 |
@@ -84,7 +80,7 @@ MODULE_LICENSE("GPL"); | |||
84 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" | 80 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" |
85 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" | 81 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" |
86 | #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" | 82 | #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" |
87 | #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" | 83 | #define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A" |
88 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" | 84 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" |
89 | 85 | ||
90 | /* | 86 | /* |
@@ -93,7 +89,7 @@ MODULE_LICENSE("GPL"); | |||
93 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" | 89 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" |
94 | 90 | ||
95 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); | 91 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); |
96 | MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"); | 92 | MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); |
97 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); | 93 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); |
98 | 94 | ||
99 | enum acer_wmi_event_ids { | 95 | enum acer_wmi_event_ids { |
@@ -108,7 +104,7 @@ static const struct key_entry acer_wmi_keymap[] = { | |||
108 | {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ | 104 | {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ |
109 | {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ | 105 | {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ |
110 | {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ | 106 | {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ |
111 | {KE_KEY, 0x82, {KEY_F22} }, /* Touch Pad On/Off */ | 107 | {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */ |
112 | {KE_END, 0} | 108 | {KE_END, 0} |
113 | }; | 109 | }; |
114 | 110 | ||
@@ -221,6 +217,7 @@ struct acer_debug { | |||
221 | static struct rfkill *wireless_rfkill; | 217 | static struct rfkill *wireless_rfkill; |
222 | static struct rfkill *bluetooth_rfkill; | 218 | static struct rfkill *bluetooth_rfkill; |
223 | static struct rfkill *threeg_rfkill; | 219 | static struct rfkill *threeg_rfkill; |
220 | static bool rfkill_inited; | ||
224 | 221 | ||
225 | /* Each low-level interface must define at least some of the following */ | 222 | /* Each low-level interface must define at least some of the following */ |
226 | struct wmi_interface { | 223 | struct wmi_interface { |
@@ -845,7 +842,7 @@ static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) | |||
845 | has_type_aa = true; | 842 | has_type_aa = true; |
846 | type_aa = (struct hotkey_function_type_aa *) header; | 843 | type_aa = (struct hotkey_function_type_aa *) header; |
847 | 844 | ||
848 | printk(ACER_INFO "Function bitmap for Communication Button: 0x%x\n", | 845 | pr_info("Function bitmap for Communication Button: 0x%x\n", |
849 | type_aa->commun_func_bitmap); | 846 | type_aa->commun_func_bitmap); |
850 | 847 | ||
851 | if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) | 848 | if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) |
@@ -991,6 +988,7 @@ static int __devinit acer_led_init(struct device *dev) | |||
991 | 988 | ||
992 | static void acer_led_exit(void) | 989 | static void acer_led_exit(void) |
993 | { | 990 | { |
991 | set_u32(LED_OFF, ACER_CAP_MAILLED); | ||
994 | led_classdev_unregister(&mail_led); | 992 | led_classdev_unregister(&mail_led); |
995 | } | 993 | } |
996 | 994 | ||
@@ -1036,7 +1034,7 @@ static int __devinit acer_backlight_init(struct device *dev) | |||
1036 | bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, | 1034 | bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, |
1037 | &props); | 1035 | &props); |
1038 | if (IS_ERR(bd)) { | 1036 | if (IS_ERR(bd)) { |
1039 | printk(ACER_ERR "Could not register Acer backlight device\n"); | 1037 | pr_err("Could not register Acer backlight device\n"); |
1040 | acer_backlight_device = NULL; | 1038 | acer_backlight_device = NULL; |
1041 | return PTR_ERR(bd); | 1039 | return PTR_ERR(bd); |
1042 | } | 1040 | } |
@@ -1083,8 +1081,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device) | |||
1083 | return AE_ERROR; | 1081 | return AE_ERROR; |
1084 | } | 1082 | } |
1085 | if (obj->buffer.length != 8) { | 1083 | if (obj->buffer.length != 8) { |
1086 | printk(ACER_WARNING "Unknown buffer length %d\n", | 1084 | pr_warning("Unknown buffer length %d\n", obj->buffer.length); |
1087 | obj->buffer.length); | ||
1088 | kfree(obj); | 1085 | kfree(obj); |
1089 | return AE_ERROR; | 1086 | return AE_ERROR; |
1090 | } | 1087 | } |
@@ -1093,7 +1090,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device) | |||
1093 | kfree(obj); | 1090 | kfree(obj); |
1094 | 1091 | ||
1095 | if (return_value.error_code || return_value.ec_return_value) | 1092 | if (return_value.error_code || return_value.ec_return_value) |
1096 | printk(ACER_WARNING "Get Device Status failed: " | 1093 | pr_warning("Get Device Status failed: " |
1097 | "0x%x - 0x%x\n", return_value.error_code, | 1094 | "0x%x - 0x%x\n", return_value.error_code, |
1098 | return_value.ec_return_value); | 1095 | return_value.ec_return_value); |
1099 | else | 1096 | else |
@@ -1161,9 +1158,13 @@ static int acer_rfkill_set(void *data, bool blocked) | |||
1161 | { | 1158 | { |
1162 | acpi_status status; | 1159 | acpi_status status; |
1163 | u32 cap = (unsigned long)data; | 1160 | u32 cap = (unsigned long)data; |
1164 | status = set_u32(!blocked, cap); | 1161 | |
1165 | if (ACPI_FAILURE(status)) | 1162 | if (rfkill_inited) { |
1166 | return -ENODEV; | 1163 | status = set_u32(!blocked, cap); |
1164 | if (ACPI_FAILURE(status)) | ||
1165 | return -ENODEV; | ||
1166 | } | ||
1167 | |||
1167 | return 0; | 1168 | return 0; |
1168 | } | 1169 | } |
1169 | 1170 | ||
@@ -1187,14 +1188,16 @@ static struct rfkill *acer_rfkill_register(struct device *dev, | |||
1187 | return ERR_PTR(-ENOMEM); | 1188 | return ERR_PTR(-ENOMEM); |
1188 | 1189 | ||
1189 | status = get_device_status(&state, cap); | 1190 | status = get_device_status(&state, cap); |
1190 | if (ACPI_SUCCESS(status)) | ||
1191 | rfkill_init_sw_state(rfkill_dev, !state); | ||
1192 | 1191 | ||
1193 | err = rfkill_register(rfkill_dev); | 1192 | err = rfkill_register(rfkill_dev); |
1194 | if (err) { | 1193 | if (err) { |
1195 | rfkill_destroy(rfkill_dev); | 1194 | rfkill_destroy(rfkill_dev); |
1196 | return ERR_PTR(err); | 1195 | return ERR_PTR(err); |
1197 | } | 1196 | } |
1197 | |||
1198 | if (ACPI_SUCCESS(status)) | ||
1199 | rfkill_set_sw_state(rfkill_dev, !state); | ||
1200 | |||
1198 | return rfkill_dev; | 1201 | return rfkill_dev; |
1199 | } | 1202 | } |
1200 | 1203 | ||
@@ -1229,14 +1232,19 @@ static int acer_rfkill_init(struct device *dev) | |||
1229 | } | 1232 | } |
1230 | } | 1233 | } |
1231 | 1234 | ||
1232 | schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); | 1235 | rfkill_inited = true; |
1236 | |||
1237 | if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) | ||
1238 | schedule_delayed_work(&acer_rfkill_work, | ||
1239 | round_jiffies_relative(HZ)); | ||
1233 | 1240 | ||
1234 | return 0; | 1241 | return 0; |
1235 | } | 1242 | } |
1236 | 1243 | ||
1237 | static void acer_rfkill_exit(void) | 1244 | static void acer_rfkill_exit(void) |
1238 | { | 1245 | { |
1239 | cancel_delayed_work_sync(&acer_rfkill_work); | 1246 | if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) |
1247 | cancel_delayed_work_sync(&acer_rfkill_work); | ||
1240 | 1248 | ||
1241 | rfkill_unregister(wireless_rfkill); | 1249 | rfkill_unregister(wireless_rfkill); |
1242 | rfkill_destroy(wireless_rfkill); | 1250 | rfkill_destroy(wireless_rfkill); |
@@ -1309,7 +1317,7 @@ static void acer_wmi_notify(u32 value, void *context) | |||
1309 | 1317 | ||
1310 | status = wmi_get_event_data(value, &response); | 1318 | status = wmi_get_event_data(value, &response); |
1311 | if (status != AE_OK) { | 1319 | if (status != AE_OK) { |
1312 | printk(ACER_WARNING "bad event status 0x%x\n", status); | 1320 | pr_warning("bad event status 0x%x\n", status); |
1313 | return; | 1321 | return; |
1314 | } | 1322 | } |
1315 | 1323 | ||
@@ -1318,14 +1326,12 @@ static void acer_wmi_notify(u32 value, void *context) | |||
1318 | if (!obj) | 1326 | if (!obj) |
1319 | return; | 1327 | return; |
1320 | if (obj->type != ACPI_TYPE_BUFFER) { | 1328 | if (obj->type != ACPI_TYPE_BUFFER) { |
1321 | printk(ACER_WARNING "Unknown response received %d\n", | 1329 | pr_warning("Unknown response received %d\n", obj->type); |
1322 | obj->type); | ||
1323 | kfree(obj); | 1330 | kfree(obj); |
1324 | return; | 1331 | return; |
1325 | } | 1332 | } |
1326 | if (obj->buffer.length != 8) { | 1333 | if (obj->buffer.length != 8) { |
1327 | printk(ACER_WARNING "Unknown buffer length %d\n", | 1334 | pr_warning("Unknown buffer length %d\n", obj->buffer.length); |
1328 | obj->buffer.length); | ||
1329 | kfree(obj); | 1335 | kfree(obj); |
1330 | return; | 1336 | return; |
1331 | } | 1337 | } |
@@ -1335,13 +1341,26 @@ static void acer_wmi_notify(u32 value, void *context) | |||
1335 | 1341 | ||
1336 | switch (return_value.function) { | 1342 | switch (return_value.function) { |
1337 | case WMID_HOTKEY_EVENT: | 1343 | case WMID_HOTKEY_EVENT: |
1344 | if (return_value.device_state) { | ||
1345 | u16 device_state = return_value.device_state; | ||
1346 | pr_debug("deivces states: 0x%x\n", device_state); | ||
1347 | if (has_cap(ACER_CAP_WIRELESS)) | ||
1348 | rfkill_set_sw_state(wireless_rfkill, | ||
1349 | !(device_state & ACER_WMID3_GDS_WIRELESS)); | ||
1350 | if (has_cap(ACER_CAP_BLUETOOTH)) | ||
1351 | rfkill_set_sw_state(bluetooth_rfkill, | ||
1352 | !(device_state & ACER_WMID3_GDS_BLUETOOTH)); | ||
1353 | if (has_cap(ACER_CAP_THREEG)) | ||
1354 | rfkill_set_sw_state(threeg_rfkill, | ||
1355 | !(device_state & ACER_WMID3_GDS_THREEG)); | ||
1356 | } | ||
1338 | if (!sparse_keymap_report_event(acer_wmi_input_dev, | 1357 | if (!sparse_keymap_report_event(acer_wmi_input_dev, |
1339 | return_value.key_num, 1, true)) | 1358 | return_value.key_num, 1, true)) |
1340 | printk(ACER_WARNING "Unknown key number - 0x%x\n", | 1359 | pr_warning("Unknown key number - 0x%x\n", |
1341 | return_value.key_num); | 1360 | return_value.key_num); |
1342 | break; | 1361 | break; |
1343 | default: | 1362 | default: |
1344 | printk(ACER_WARNING "Unknown function number - %d - %d\n", | 1363 | pr_warning("Unknown function number - %d - %d\n", |
1345 | return_value.function, return_value.key_num); | 1364 | return_value.function, return_value.key_num); |
1346 | break; | 1365 | break; |
1347 | } | 1366 | } |
@@ -1370,8 +1389,7 @@ wmid3_set_lm_mode(struct lm_input_params *params, | |||
1370 | return AE_ERROR; | 1389 | return AE_ERROR; |
1371 | } | 1390 | } |
1372 | if (obj->buffer.length != 4) { | 1391 | if (obj->buffer.length != 4) { |
1373 | printk(ACER_WARNING "Unknown buffer length %d\n", | 1392 | pr_warning("Unknown buffer length %d\n", obj->buffer.length); |
1374 | obj->buffer.length); | ||
1375 | kfree(obj); | 1393 | kfree(obj); |
1376 | return AE_ERROR; | 1394 | return AE_ERROR; |
1377 | } | 1395 | } |
@@ -1396,11 +1414,11 @@ static int acer_wmi_enable_ec_raw(void) | |||
1396 | status = wmid3_set_lm_mode(¶ms, &return_value); | 1414 | status = wmid3_set_lm_mode(¶ms, &return_value); |
1397 | 1415 | ||
1398 | if (return_value.error_code || return_value.ec_return_value) | 1416 | if (return_value.error_code || return_value.ec_return_value) |
1399 | printk(ACER_WARNING "Enabling EC raw mode failed: " | 1417 | pr_warning("Enabling EC raw mode failed: " |
1400 | "0x%x - 0x%x\n", return_value.error_code, | 1418 | "0x%x - 0x%x\n", return_value.error_code, |
1401 | return_value.ec_return_value); | 1419 | return_value.ec_return_value); |
1402 | else | 1420 | else |
1403 | printk(ACER_INFO "Enabled EC raw mode"); | 1421 | pr_info("Enabled EC raw mode"); |
1404 | 1422 | ||
1405 | return status; | 1423 | return status; |
1406 | } | 1424 | } |
@@ -1419,7 +1437,7 @@ static int acer_wmi_enable_lm(void) | |||
1419 | status = wmid3_set_lm_mode(¶ms, &return_value); | 1437 | status = wmid3_set_lm_mode(¶ms, &return_value); |
1420 | 1438 | ||
1421 | if (return_value.error_code || return_value.ec_return_value) | 1439 | if (return_value.error_code || return_value.ec_return_value) |
1422 | printk(ACER_WARNING "Enabling Launch Manager failed: " | 1440 | pr_warning("Enabling Launch Manager failed: " |
1423 | "0x%x - 0x%x\n", return_value.error_code, | 1441 | "0x%x - 0x%x\n", return_value.error_code, |
1424 | return_value.ec_return_value); | 1442 | return_value.ec_return_value); |
1425 | 1443 | ||
@@ -1553,6 +1571,7 @@ pm_message_t state) | |||
1553 | 1571 | ||
1554 | if (has_cap(ACER_CAP_MAILLED)) { | 1572 | if (has_cap(ACER_CAP_MAILLED)) { |
1555 | get_u32(&value, ACER_CAP_MAILLED); | 1573 | get_u32(&value, ACER_CAP_MAILLED); |
1574 | set_u32(LED_OFF, ACER_CAP_MAILLED); | ||
1556 | data->mailled = value; | 1575 | data->mailled = value; |
1557 | } | 1576 | } |
1558 | 1577 | ||
@@ -1580,6 +1599,17 @@ static int acer_platform_resume(struct platform_device *device) | |||
1580 | return 0; | 1599 | return 0; |
1581 | } | 1600 | } |
1582 | 1601 | ||
1602 | static void acer_platform_shutdown(struct platform_device *device) | ||
1603 | { | ||
1604 | struct acer_data *data = &interface->data; | ||
1605 | |||
1606 | if (!data) | ||
1607 | return; | ||
1608 | |||
1609 | if (has_cap(ACER_CAP_MAILLED)) | ||
1610 | set_u32(LED_OFF, ACER_CAP_MAILLED); | ||
1611 | } | ||
1612 | |||
1583 | static struct platform_driver acer_platform_driver = { | 1613 | static struct platform_driver acer_platform_driver = { |
1584 | .driver = { | 1614 | .driver = { |
1585 | .name = "acer-wmi", | 1615 | .name = "acer-wmi", |
@@ -1589,6 +1619,7 @@ static struct platform_driver acer_platform_driver = { | |||
1589 | .remove = acer_platform_remove, | 1619 | .remove = acer_platform_remove, |
1590 | .suspend = acer_platform_suspend, | 1620 | .suspend = acer_platform_suspend, |
1591 | .resume = acer_platform_resume, | 1621 | .resume = acer_platform_resume, |
1622 | .shutdown = acer_platform_shutdown, | ||
1592 | }; | 1623 | }; |
1593 | 1624 | ||
1594 | static struct platform_device *acer_platform_device; | 1625 | static struct platform_device *acer_platform_device; |
@@ -1636,7 +1667,7 @@ static int create_debugfs(void) | |||
1636 | { | 1667 | { |
1637 | interface->debug.root = debugfs_create_dir("acer-wmi", NULL); | 1668 | interface->debug.root = debugfs_create_dir("acer-wmi", NULL); |
1638 | if (!interface->debug.root) { | 1669 | if (!interface->debug.root) { |
1639 | printk(ACER_ERR "Failed to create debugfs directory"); | 1670 | pr_err("Failed to create debugfs directory"); |
1640 | return -ENOMEM; | 1671 | return -ENOMEM; |
1641 | } | 1672 | } |
1642 | 1673 | ||
@@ -1657,11 +1688,10 @@ static int __init acer_wmi_init(void) | |||
1657 | { | 1688 | { |
1658 | int err; | 1689 | int err; |
1659 | 1690 | ||
1660 | printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); | 1691 | pr_info("Acer Laptop ACPI-WMI Extras\n"); |
1661 | 1692 | ||
1662 | if (dmi_check_system(acer_blacklist)) { | 1693 | if (dmi_check_system(acer_blacklist)) { |
1663 | printk(ACER_INFO "Blacklisted hardware detected - " | 1694 | pr_info("Blacklisted hardware detected - not loading\n"); |
1664 | "not loading\n"); | ||
1665 | return -ENODEV; | 1695 | return -ENODEV; |
1666 | } | 1696 | } |
1667 | 1697 | ||
@@ -1678,12 +1708,11 @@ static int __init acer_wmi_init(void) | |||
1678 | 1708 | ||
1679 | if (wmi_has_guid(WMID_GUID2) && interface) { | 1709 | if (wmi_has_guid(WMID_GUID2) && interface) { |
1680 | if (ACPI_FAILURE(WMID_set_capabilities())) { | 1710 | if (ACPI_FAILURE(WMID_set_capabilities())) { |
1681 | printk(ACER_ERR "Unable to detect available WMID " | 1711 | pr_err("Unable to detect available WMID devices\n"); |
1682 | "devices\n"); | ||
1683 | return -ENODEV; | 1712 | return -ENODEV; |
1684 | } | 1713 | } |
1685 | } else if (!wmi_has_guid(WMID_GUID2) && interface) { | 1714 | } else if (!wmi_has_guid(WMID_GUID2) && interface) { |
1686 | printk(ACER_ERR "No WMID device detection method found\n"); | 1715 | pr_err("No WMID device detection method found\n"); |
1687 | return -ENODEV; | 1716 | return -ENODEV; |
1688 | } | 1717 | } |
1689 | 1718 | ||
@@ -1691,8 +1720,7 @@ static int __init acer_wmi_init(void) | |||
1691 | interface = &AMW0_interface; | 1720 | interface = &AMW0_interface; |
1692 | 1721 | ||
1693 | if (ACPI_FAILURE(AMW0_set_capabilities())) { | 1722 | if (ACPI_FAILURE(AMW0_set_capabilities())) { |
1694 | printk(ACER_ERR "Unable to detect available AMW0 " | 1723 | pr_err("Unable to detect available AMW0 devices\n"); |
1695 | "devices\n"); | ||
1696 | return -ENODEV; | 1724 | return -ENODEV; |
1697 | } | 1725 | } |
1698 | } | 1726 | } |
@@ -1701,8 +1729,7 @@ static int __init acer_wmi_init(void) | |||
1701 | AMW0_find_mailled(); | 1729 | AMW0_find_mailled(); |
1702 | 1730 | ||
1703 | if (!interface) { | 1731 | if (!interface) { |
1704 | printk(ACER_INFO "No or unsupported WMI interface, unable to " | 1732 | pr_err("No or unsupported WMI interface, unable to load\n"); |
1705 | "load\n"); | ||
1706 | return -ENODEV; | 1733 | return -ENODEV; |
1707 | } | 1734 | } |
1708 | 1735 | ||
@@ -1710,22 +1737,22 @@ static int __init acer_wmi_init(void) | |||
1710 | 1737 | ||
1711 | if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { | 1738 | if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { |
1712 | interface->capability &= ~ACER_CAP_BRIGHTNESS; | 1739 | interface->capability &= ~ACER_CAP_BRIGHTNESS; |
1713 | printk(ACER_INFO "Brightness must be controlled by " | 1740 | pr_info("Brightness must be controlled by " |
1714 | "generic video driver\n"); | 1741 | "generic video driver\n"); |
1715 | } | 1742 | } |
1716 | 1743 | ||
1717 | if (wmi_has_guid(WMID_GUID3)) { | 1744 | if (wmi_has_guid(WMID_GUID3)) { |
1718 | if (ec_raw_mode) { | 1745 | if (ec_raw_mode) { |
1719 | if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { | 1746 | if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { |
1720 | printk(ACER_ERR "Cannot enable EC raw mode\n"); | 1747 | pr_err("Cannot enable EC raw mode\n"); |
1721 | return -ENODEV; | 1748 | return -ENODEV; |
1722 | } | 1749 | } |
1723 | } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { | 1750 | } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { |
1724 | printk(ACER_ERR "Cannot enable Launch Manager mode\n"); | 1751 | pr_err("Cannot enable Launch Manager mode\n"); |
1725 | return -ENODEV; | 1752 | return -ENODEV; |
1726 | } | 1753 | } |
1727 | } else if (ec_raw_mode) { | 1754 | } else if (ec_raw_mode) { |
1728 | printk(ACER_INFO "No WMID EC raw mode enable method\n"); | 1755 | pr_info("No WMID EC raw mode enable method\n"); |
1729 | } | 1756 | } |
1730 | 1757 | ||
1731 | if (wmi_has_guid(ACERWMID_EVENT_GUID)) { | 1758 | if (wmi_has_guid(ACERWMID_EVENT_GUID)) { |
@@ -1736,7 +1763,7 @@ static int __init acer_wmi_init(void) | |||
1736 | 1763 | ||
1737 | err = platform_driver_register(&acer_platform_driver); | 1764 | err = platform_driver_register(&acer_platform_driver); |
1738 | if (err) { | 1765 | if (err) { |
1739 | printk(ACER_ERR "Unable to register platform driver.\n"); | 1766 | pr_err("Unable to register platform driver.\n"); |
1740 | goto error_platform_register; | 1767 | goto error_platform_register; |
1741 | } | 1768 | } |
1742 | 1769 | ||
@@ -1791,7 +1818,7 @@ static void __exit acer_wmi_exit(void) | |||
1791 | platform_device_unregister(acer_platform_device); | 1818 | platform_device_unregister(acer_platform_device); |
1792 | platform_driver_unregister(&acer_platform_driver); | 1819 | platform_driver_unregister(&acer_platform_driver); |
1793 | 1820 | ||
1794 | printk(ACER_INFO "Acer Laptop WMI Extras unloaded\n"); | 1821 | pr_info("Acer Laptop WMI Extras unloaded\n"); |
1795 | return; | 1822 | return; |
1796 | } | 1823 | } |
1797 | 1824 | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 5a6f7d7575d6..c53b3ff7978a 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * John Belmonte - ACPI code for Toshiba laptop was a good starting point. | 29 | * John Belmonte - ACPI code for Toshiba laptop was a good starting point. |
30 | * Eric Burghard - LED display support for W1N | 30 | * Eric Burghard - LED display support for W1N |
31 | * Josh Green - Light Sens support | 31 | * Josh Green - Light Sens support |
32 | * Thomas Tuttle - His first patch for led support was very helpfull | 32 | * Thomas Tuttle - His first patch for led support was very helpful |
33 | * Sam Lin - GPS support | 33 | * Sam Lin - GPS support |
34 | */ | 34 | */ |
35 | 35 | ||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/input/sparse-keymap.h> | 50 | #include <linux/input/sparse-keymap.h> |
51 | #include <linux/rfkill.h> | 51 | #include <linux/rfkill.h> |
52 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
53 | #include <linux/dmi.h> | ||
53 | #include <acpi/acpi_drivers.h> | 54 | #include <acpi/acpi_drivers.h> |
54 | #include <acpi/acpi_bus.h> | 55 | #include <acpi/acpi_bus.h> |
55 | 56 | ||
@@ -157,46 +158,9 @@ MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot " | |||
157 | #define METHOD_BRIGHTNESS_SET "SPLV" | 158 | #define METHOD_BRIGHTNESS_SET "SPLV" |
158 | #define METHOD_BRIGHTNESS_GET "GPLV" | 159 | #define METHOD_BRIGHTNESS_GET "GPLV" |
159 | 160 | ||
160 | /* Backlight */ | ||
161 | static acpi_handle lcd_switch_handle; | ||
162 | static char *lcd_switch_paths[] = { | ||
163 | "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ | ||
164 | "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ | ||
165 | "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ | ||
166 | "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ | ||
167 | "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ | ||
168 | "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ | ||
169 | "\\_SB.PCI0.PX40.Q10", /* S1x */ | ||
170 | "\\Q10"}; /* A2x, L2D, L3D, M2E */ | ||
171 | |||
172 | /* Display */ | 161 | /* Display */ |
173 | #define METHOD_SWITCH_DISPLAY "SDSP" | 162 | #define METHOD_SWITCH_DISPLAY "SDSP" |
174 | 163 | ||
175 | static acpi_handle display_get_handle; | ||
176 | static char *display_get_paths[] = { | ||
177 | /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ | ||
178 | "\\_SB.PCI0.P0P1.VGA.GETD", | ||
179 | /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ | ||
180 | "\\_SB.PCI0.P0P2.VGA.GETD", | ||
181 | /* A6V A6Q */ | ||
182 | "\\_SB.PCI0.P0P3.VGA.GETD", | ||
183 | /* A6T, A6M */ | ||
184 | "\\_SB.PCI0.P0PA.VGA.GETD", | ||
185 | /* L3C */ | ||
186 | "\\_SB.PCI0.PCI1.VGAC.NMAP", | ||
187 | /* Z96F */ | ||
188 | "\\_SB.PCI0.VGA.GETD", | ||
189 | /* A2D */ | ||
190 | "\\ACTD", | ||
191 | /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ | ||
192 | "\\ADVG", | ||
193 | /* P30 */ | ||
194 | "\\DNXT", | ||
195 | /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ | ||
196 | "\\INFB", | ||
197 | /* A3F A6F A3N A3L M6N W3N W6A */ | ||
198 | "\\SSTE"}; | ||
199 | |||
200 | #define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ | 164 | #define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ |
201 | #define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ | 165 | #define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ |
202 | 166 | ||
@@ -246,7 +210,6 @@ struct asus_laptop { | |||
246 | 210 | ||
247 | int wireless_status; | 211 | int wireless_status; |
248 | bool have_rsts; | 212 | bool have_rsts; |
249 | int lcd_state; | ||
250 | 213 | ||
251 | struct rfkill *gps_rfkill; | 214 | struct rfkill *gps_rfkill; |
252 | 215 | ||
@@ -559,48 +522,6 @@ error: | |||
559 | /* | 522 | /* |
560 | * Backlight device | 523 | * Backlight device |
561 | */ | 524 | */ |
562 | static int asus_lcd_status(struct asus_laptop *asus) | ||
563 | { | ||
564 | return asus->lcd_state; | ||
565 | } | ||
566 | |||
567 | static int asus_lcd_set(struct asus_laptop *asus, int value) | ||
568 | { | ||
569 | int lcd = 0; | ||
570 | acpi_status status = 0; | ||
571 | |||
572 | lcd = !!value; | ||
573 | |||
574 | if (lcd == asus_lcd_status(asus)) | ||
575 | return 0; | ||
576 | |||
577 | if (!lcd_switch_handle) | ||
578 | return -ENODEV; | ||
579 | |||
580 | status = acpi_evaluate_object(lcd_switch_handle, | ||
581 | NULL, NULL, NULL); | ||
582 | |||
583 | if (ACPI_FAILURE(status)) { | ||
584 | pr_warning("Error switching LCD\n"); | ||
585 | return -ENODEV; | ||
586 | } | ||
587 | |||
588 | asus->lcd_state = lcd; | ||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | static void lcd_blank(struct asus_laptop *asus, int blank) | ||
593 | { | ||
594 | struct backlight_device *bd = asus->backlight_device; | ||
595 | |||
596 | asus->lcd_state = (blank == FB_BLANK_UNBLANK); | ||
597 | |||
598 | if (bd) { | ||
599 | bd->props.power = blank; | ||
600 | backlight_update_status(bd); | ||
601 | } | ||
602 | } | ||
603 | |||
604 | static int asus_read_brightness(struct backlight_device *bd) | 525 | static int asus_read_brightness(struct backlight_device *bd) |
605 | { | 526 | { |
606 | struct asus_laptop *asus = bl_get_data(bd); | 527 | struct asus_laptop *asus = bl_get_data(bd); |
@@ -628,16 +549,9 @@ static int asus_set_brightness(struct backlight_device *bd, int value) | |||
628 | 549 | ||
629 | static int update_bl_status(struct backlight_device *bd) | 550 | static int update_bl_status(struct backlight_device *bd) |
630 | { | 551 | { |
631 | struct asus_laptop *asus = bl_get_data(bd); | ||
632 | int rv; | ||
633 | int value = bd->props.brightness; | 552 | int value = bd->props.brightness; |
634 | 553 | ||
635 | rv = asus_set_brightness(bd, value); | 554 | return asus_set_brightness(bd, value); |
636 | if (rv) | ||
637 | return rv; | ||
638 | |||
639 | value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0; | ||
640 | return asus_lcd_set(asus, value); | ||
641 | } | 555 | } |
642 | 556 | ||
643 | static const struct backlight_ops asusbl_ops = { | 557 | static const struct backlight_ops asusbl_ops = { |
@@ -661,8 +575,7 @@ static int asus_backlight_init(struct asus_laptop *asus) | |||
661 | struct backlight_properties props; | 575 | struct backlight_properties props; |
662 | 576 | ||
663 | if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) || | 577 | if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) || |
664 | acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) || | 578 | acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL)) |
665 | !lcd_switch_handle) | ||
666 | return 0; | 579 | return 0; |
667 | 580 | ||
668 | memset(&props, 0, sizeof(struct backlight_properties)); | 581 | memset(&props, 0, sizeof(struct backlight_properties)); |
@@ -971,41 +884,6 @@ static void asus_set_display(struct asus_laptop *asus, int value) | |||
971 | return; | 884 | return; |
972 | } | 885 | } |
973 | 886 | ||
974 | static int read_display(struct asus_laptop *asus) | ||
975 | { | ||
976 | unsigned long long value = 0; | ||
977 | acpi_status rv = AE_OK; | ||
978 | |||
979 | /* | ||
980 | * In most of the case, we know how to set the display, but sometime | ||
981 | * we can't read it | ||
982 | */ | ||
983 | if (display_get_handle) { | ||
984 | rv = acpi_evaluate_integer(display_get_handle, NULL, | ||
985 | NULL, &value); | ||
986 | if (ACPI_FAILURE(rv)) | ||
987 | pr_warning("Error reading display status\n"); | ||
988 | } | ||
989 | |||
990 | value &= 0x0F; /* needed for some models, shouldn't hurt others */ | ||
991 | |||
992 | return value; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * Now, *this* one could be more user-friendly, but so far, no-one has | ||
997 | * complained. The significance of bits is the same as in store_disp() | ||
998 | */ | ||
999 | static ssize_t show_disp(struct device *dev, | ||
1000 | struct device_attribute *attr, char *buf) | ||
1001 | { | ||
1002 | struct asus_laptop *asus = dev_get_drvdata(dev); | ||
1003 | |||
1004 | if (!display_get_handle) | ||
1005 | return -ENODEV; | ||
1006 | return sprintf(buf, "%d\n", read_display(asus)); | ||
1007 | } | ||
1008 | |||
1009 | /* | 887 | /* |
1010 | * Experimental support for display switching. As of now: 1 should activate | 888 | * Experimental support for display switching. As of now: 1 should activate |
1011 | * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. | 889 | * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. |
@@ -1247,15 +1125,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event) | |||
1247 | struct asus_laptop *asus = acpi_driver_data(device); | 1125 | struct asus_laptop *asus = acpi_driver_data(device); |
1248 | u16 count; | 1126 | u16 count; |
1249 | 1127 | ||
1250 | /* | ||
1251 | * We need to tell the backlight device when the backlight power is | ||
1252 | * switched | ||
1253 | */ | ||
1254 | if (event == ATKD_LCD_ON) | ||
1255 | lcd_blank(asus, FB_BLANK_UNBLANK); | ||
1256 | else if (event == ATKD_LCD_OFF) | ||
1257 | lcd_blank(asus, FB_BLANK_POWERDOWN); | ||
1258 | |||
1259 | /* TODO Find a better way to handle events count. */ | 1128 | /* TODO Find a better way to handle events count. */ |
1260 | count = asus->event_count[event % 128]++; | 1129 | count = asus->event_count[event % 128]++; |
1261 | acpi_bus_generate_proc_event(asus->device, event, count); | 1130 | acpi_bus_generate_proc_event(asus->device, event, count); |
@@ -1282,7 +1151,7 @@ static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, | |||
1282 | show_bluetooth, store_bluetooth); | 1151 | show_bluetooth, store_bluetooth); |
1283 | static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); | 1152 | static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); |
1284 | static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); | 1153 | static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); |
1285 | static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); | 1154 | static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp); |
1286 | static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); | 1155 | static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); |
1287 | static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); | 1156 | static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); |
1288 | static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); | 1157 | static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); |
@@ -1393,26 +1262,6 @@ static struct platform_driver platform_driver = { | |||
1393 | } | 1262 | } |
1394 | }; | 1263 | }; |
1395 | 1264 | ||
1396 | static int asus_handle_init(char *name, acpi_handle * handle, | ||
1397 | char **paths, int num_paths) | ||
1398 | { | ||
1399 | int i; | ||
1400 | acpi_status status; | ||
1401 | |||
1402 | for (i = 0; i < num_paths; i++) { | ||
1403 | status = acpi_get_handle(NULL, paths[i], handle); | ||
1404 | if (ACPI_SUCCESS(status)) | ||
1405 | return 0; | ||
1406 | } | ||
1407 | |||
1408 | *handle = NULL; | ||
1409 | return -ENODEV; | ||
1410 | } | ||
1411 | |||
1412 | #define ASUS_HANDLE_INIT(object) \ | ||
1413 | asus_handle_init(#object, &object##_handle, object##_paths, \ | ||
1414 | ARRAY_SIZE(object##_paths)) | ||
1415 | |||
1416 | /* | 1265 | /* |
1417 | * This function is used to initialize the context with right values. In this | 1266 | * This function is used to initialize the context with right values. In this |
1418 | * method, we can make all the detection we want, and modify the asus_laptop | 1267 | * method, we can make all the detection we want, and modify the asus_laptop |
@@ -1498,10 +1347,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus) | |||
1498 | if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) | 1347 | if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) |
1499 | asus->have_rsts = true; | 1348 | asus->have_rsts = true; |
1500 | 1349 | ||
1501 | /* Scheduled for removal */ | ||
1502 | ASUS_HANDLE_INIT(lcd_switch); | ||
1503 | ASUS_HANDLE_INIT(display_get); | ||
1504 | |||
1505 | kfree(model); | 1350 | kfree(model); |
1506 | 1351 | ||
1507 | return AE_OK; | 1352 | return AE_OK; |
@@ -1553,10 +1398,23 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus) | |||
1553 | asus_als_level(asus, asus->light_level); | 1398 | asus_als_level(asus, asus->light_level); |
1554 | } | 1399 | } |
1555 | 1400 | ||
1556 | asus->lcd_state = 1; /* LCD should be on when the module load */ | ||
1557 | return result; | 1401 | return result; |
1558 | } | 1402 | } |
1559 | 1403 | ||
1404 | static void __devinit asus_dmi_check(void) | ||
1405 | { | ||
1406 | const char *model; | ||
1407 | |||
1408 | model = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
1409 | if (!model) | ||
1410 | return; | ||
1411 | |||
1412 | /* On L1400B WLED control the sound card, don't mess with it ... */ | ||
1413 | if (strncmp(model, "L1400B", 6) == 0) { | ||
1414 | wlan_status = -1; | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1560 | static bool asus_device_present; | 1418 | static bool asus_device_present; |
1561 | 1419 | ||
1562 | static int __devinit asus_acpi_add(struct acpi_device *device) | 1420 | static int __devinit asus_acpi_add(struct acpi_device *device) |
@@ -1575,6 +1433,8 @@ static int __devinit asus_acpi_add(struct acpi_device *device) | |||
1575 | device->driver_data = asus; | 1433 | device->driver_data = asus; |
1576 | asus->device = device; | 1434 | asus->device = device; |
1577 | 1435 | ||
1436 | asus_dmi_check(); | ||
1437 | |||
1578 | result = asus_acpi_init(asus); | 1438 | result = asus_acpi_init(asus); |
1579 | if (result) | 1439 | if (result) |
1580 | goto fail_platform; | 1440 | goto fail_platform; |
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c new file mode 100644 index 000000000000..0580d99b0798 --- /dev/null +++ b/drivers/platform/x86/asus-nb-wmi.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Asus Notebooks WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/input.h> | ||
27 | #include <linux/input/sparse-keymap.h> | ||
28 | |||
29 | #include "asus-wmi.h" | ||
30 | |||
31 | #define ASUS_NB_WMI_FILE "asus-nb-wmi" | ||
32 | |||
33 | MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>"); | ||
34 | MODULE_DESCRIPTION("Asus Notebooks WMI Hotkey Driver"); | ||
35 | MODULE_LICENSE("GPL"); | ||
36 | |||
37 | #define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C" | ||
38 | |||
39 | MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID); | ||
40 | |||
41 | static const struct key_entry asus_nb_wmi_keymap[] = { | ||
42 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | ||
43 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, | ||
44 | { KE_KEY, 0x32, { KEY_MUTE } }, | ||
45 | { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */ | ||
46 | { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */ | ||
47 | { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, | ||
48 | { KE_KEY, 0x41, { KEY_NEXTSONG } }, | ||
49 | { KE_KEY, 0x43, { KEY_STOPCD } }, | ||
50 | { KE_KEY, 0x45, { KEY_PLAYPAUSE } }, | ||
51 | { KE_KEY, 0x4c, { KEY_MEDIA } }, | ||
52 | { KE_KEY, 0x50, { KEY_EMAIL } }, | ||
53 | { KE_KEY, 0x51, { KEY_WWW } }, | ||
54 | { KE_KEY, 0x55, { KEY_CALC } }, | ||
55 | { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */ | ||
56 | { KE_KEY, 0x5D, { KEY_WLAN } }, | ||
57 | { KE_KEY, 0x5E, { KEY_WLAN } }, | ||
58 | { KE_KEY, 0x5F, { KEY_WLAN } }, | ||
59 | { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, | ||
60 | { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, | ||
61 | { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, | ||
62 | { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, | ||
63 | { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, | ||
64 | { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, | ||
65 | { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, | ||
66 | { KE_KEY, 0x82, { KEY_CAMERA } }, | ||
67 | { KE_KEY, 0x88, { KEY_RFKILL } }, | ||
68 | { KE_KEY, 0x8A, { KEY_PROG1 } }, | ||
69 | { KE_KEY, 0x95, { KEY_MEDIA } }, | ||
70 | { KE_KEY, 0x99, { KEY_PHONE } }, | ||
71 | { KE_KEY, 0xb5, { KEY_CALC } }, | ||
72 | { KE_KEY, 0xc4, { KEY_KBDILLUMUP } }, | ||
73 | { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } }, | ||
74 | { KE_END, 0}, | ||
75 | }; | ||
76 | |||
77 | static struct asus_wmi_driver asus_nb_wmi_driver = { | ||
78 | .name = ASUS_NB_WMI_FILE, | ||
79 | .owner = THIS_MODULE, | ||
80 | .event_guid = ASUS_NB_WMI_EVENT_GUID, | ||
81 | .keymap = asus_nb_wmi_keymap, | ||
82 | .input_name = "Asus WMI hotkeys", | ||
83 | .input_phys = ASUS_NB_WMI_FILE "/input0", | ||
84 | }; | ||
85 | |||
86 | |||
87 | static int __init asus_nb_wmi_init(void) | ||
88 | { | ||
89 | return asus_wmi_register_driver(&asus_nb_wmi_driver); | ||
90 | } | ||
91 | |||
92 | static void __exit asus_nb_wmi_exit(void) | ||
93 | { | ||
94 | asus_wmi_unregister_driver(&asus_nb_wmi_driver); | ||
95 | } | ||
96 | |||
97 | module_init(asus_nb_wmi_init); | ||
98 | module_exit(asus_nb_wmi_exit); | ||
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c new file mode 100644 index 000000000000..efc776cb0c66 --- /dev/null +++ b/drivers/platform/x86/asus-wmi.c | |||
@@ -0,0 +1,1656 @@ | |||
1 | /* | ||
2 | * Asus PC WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Intel Corporation. | ||
5 | * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> | ||
6 | * | ||
7 | * Portions based on wistron_btns.c: | ||
8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | ||
9 | * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> | ||
10 | * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | |||
27 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/input.h> | ||
35 | #include <linux/input/sparse-keymap.h> | ||
36 | #include <linux/fb.h> | ||
37 | #include <linux/backlight.h> | ||
38 | #include <linux/leds.h> | ||
39 | #include <linux/rfkill.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/pci_hotplug.h> | ||
42 | #include <linux/hwmon.h> | ||
43 | #include <linux/hwmon-sysfs.h> | ||
44 | #include <linux/debugfs.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/platform_device.h> | ||
47 | #include <acpi/acpi_bus.h> | ||
48 | #include <acpi/acpi_drivers.h> | ||
49 | |||
50 | #include "asus-wmi.h" | ||
51 | |||
52 | MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>, " | ||
53 | "Yong Wang <yong.y.wang@intel.com>"); | ||
54 | MODULE_DESCRIPTION("Asus Generic WMI Driver"); | ||
55 | MODULE_LICENSE("GPL"); | ||
56 | |||
57 | #define to_platform_driver(drv) \ | ||
58 | (container_of((drv), struct platform_driver, driver)) | ||
59 | |||
60 | #define to_asus_wmi_driver(pdrv) \ | ||
61 | (container_of((pdrv), struct asus_wmi_driver, platform_driver)) | ||
62 | |||
63 | #define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
64 | |||
65 | #define NOTIFY_BRNUP_MIN 0x11 | ||
66 | #define NOTIFY_BRNUP_MAX 0x1f | ||
67 | #define NOTIFY_BRNDOWN_MIN 0x20 | ||
68 | #define NOTIFY_BRNDOWN_MAX 0x2e | ||
69 | |||
70 | /* WMI Methods */ | ||
71 | #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ | ||
72 | #define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */ | ||
73 | #define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */ | ||
74 | #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ | ||
75 | #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ | ||
76 | #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ | ||
77 | #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ | ||
78 | #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ | ||
79 | #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ | ||
80 | #define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ | ||
81 | #define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ | ||
82 | #define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ | ||
83 | #define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ | ||
84 | #define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ | ||
85 | #define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ | ||
86 | #define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ | ||
87 | #define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */ | ||
88 | #define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */ | ||
89 | #define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */ | ||
90 | |||
91 | #define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE | ||
92 | |||
93 | /* Wireless */ | ||
94 | #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 | ||
95 | #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 | ||
96 | #define ASUS_WMI_DEVID_WLAN 0x00010011 | ||
97 | #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 | ||
98 | #define ASUS_WMI_DEVID_GPS 0x00010015 | ||
99 | #define ASUS_WMI_DEVID_WIMAX 0x00010017 | ||
100 | #define ASUS_WMI_DEVID_WWAN3G 0x00010019 | ||
101 | #define ASUS_WMI_DEVID_UWB 0x00010021 | ||
102 | |||
103 | /* Leds */ | ||
104 | /* 0x000200XX and 0x000400XX */ | ||
105 | |||
106 | /* Backlight and Brightness */ | ||
107 | #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 | ||
108 | #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 | ||
109 | #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 | ||
110 | #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ | ||
111 | |||
112 | /* Misc */ | ||
113 | #define ASUS_WMI_DEVID_CAMERA 0x00060013 | ||
114 | |||
115 | /* Storage */ | ||
116 | #define ASUS_WMI_DEVID_CARDREADER 0x00080013 | ||
117 | |||
118 | /* Input */ | ||
119 | #define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 | ||
120 | #define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 | ||
121 | |||
122 | /* Fan, Thermal */ | ||
123 | #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 | ||
124 | #define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 | ||
125 | |||
126 | /* Power */ | ||
127 | #define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 | ||
128 | |||
129 | /* DSTS masks */ | ||
130 | #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 | ||
131 | #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 | ||
132 | #define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000 | ||
133 | #define ASUS_WMI_DSTS_USER_BIT 0x00020000 | ||
134 | #define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 | ||
135 | #define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF | ||
136 | #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 | ||
137 | |||
138 | struct bios_args { | ||
139 | u32 arg0; | ||
140 | u32 arg1; | ||
141 | } __packed; | ||
142 | |||
143 | /* | ||
144 | * <platform>/ - debugfs root directory | ||
145 | * dev_id - current dev_id | ||
146 | * ctrl_param - current ctrl_param | ||
147 | * method_id - current method_id | ||
148 | * devs - call DEVS(dev_id, ctrl_param) and print result | ||
149 | * dsts - call DSTS(dev_id) and print result | ||
150 | * call - call method_id(dev_id, ctrl_param) and print result | ||
151 | */ | ||
152 | struct asus_wmi_debug { | ||
153 | struct dentry *root; | ||
154 | u32 method_id; | ||
155 | u32 dev_id; | ||
156 | u32 ctrl_param; | ||
157 | }; | ||
158 | |||
159 | struct asus_rfkill { | ||
160 | struct asus_wmi *asus; | ||
161 | struct rfkill *rfkill; | ||
162 | u32 dev_id; | ||
163 | }; | ||
164 | |||
165 | struct asus_wmi { | ||
166 | int dsts_id; | ||
167 | int spec; | ||
168 | int sfun; | ||
169 | |||
170 | struct input_dev *inputdev; | ||
171 | struct backlight_device *backlight_device; | ||
172 | struct device *hwmon_device; | ||
173 | struct platform_device *platform_device; | ||
174 | |||
175 | struct led_classdev tpd_led; | ||
176 | int tpd_led_wk; | ||
177 | struct workqueue_struct *led_workqueue; | ||
178 | struct work_struct tpd_led_work; | ||
179 | |||
180 | struct asus_rfkill wlan; | ||
181 | struct asus_rfkill bluetooth; | ||
182 | struct asus_rfkill wimax; | ||
183 | struct asus_rfkill wwan3g; | ||
184 | |||
185 | struct hotplug_slot *hotplug_slot; | ||
186 | struct mutex hotplug_lock; | ||
187 | struct mutex wmi_lock; | ||
188 | struct workqueue_struct *hotplug_workqueue; | ||
189 | struct work_struct hotplug_work; | ||
190 | |||
191 | struct asus_wmi_debug debug; | ||
192 | |||
193 | struct asus_wmi_driver *driver; | ||
194 | }; | ||
195 | |||
196 | static int asus_wmi_input_init(struct asus_wmi *asus) | ||
197 | { | ||
198 | int err; | ||
199 | |||
200 | asus->inputdev = input_allocate_device(); | ||
201 | if (!asus->inputdev) | ||
202 | return -ENOMEM; | ||
203 | |||
204 | asus->inputdev->name = asus->driver->input_phys; | ||
205 | asus->inputdev->phys = asus->driver->input_name; | ||
206 | asus->inputdev->id.bustype = BUS_HOST; | ||
207 | asus->inputdev->dev.parent = &asus->platform_device->dev; | ||
208 | |||
209 | err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); | ||
210 | if (err) | ||
211 | goto err_free_dev; | ||
212 | |||
213 | err = input_register_device(asus->inputdev); | ||
214 | if (err) | ||
215 | goto err_free_keymap; | ||
216 | |||
217 | return 0; | ||
218 | |||
219 | err_free_keymap: | ||
220 | sparse_keymap_free(asus->inputdev); | ||
221 | err_free_dev: | ||
222 | input_free_device(asus->inputdev); | ||
223 | return err; | ||
224 | } | ||
225 | |||
226 | static void asus_wmi_input_exit(struct asus_wmi *asus) | ||
227 | { | ||
228 | if (asus->inputdev) { | ||
229 | sparse_keymap_free(asus->inputdev); | ||
230 | input_unregister_device(asus->inputdev); | ||
231 | } | ||
232 | |||
233 | asus->inputdev = NULL; | ||
234 | } | ||
235 | |||
236 | static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, | ||
237 | u32 *retval) | ||
238 | { | ||
239 | struct bios_args args = { | ||
240 | .arg0 = arg0, | ||
241 | .arg1 = arg1, | ||
242 | }; | ||
243 | struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; | ||
244 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
245 | acpi_status status; | ||
246 | union acpi_object *obj; | ||
247 | u32 tmp; | ||
248 | |||
249 | status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id, | ||
250 | &input, &output); | ||
251 | |||
252 | if (ACPI_FAILURE(status)) | ||
253 | goto exit; | ||
254 | |||
255 | obj = (union acpi_object *)output.pointer; | ||
256 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
257 | tmp = (u32) obj->integer.value; | ||
258 | else | ||
259 | tmp = 0; | ||
260 | |||
261 | if (retval) | ||
262 | *retval = tmp; | ||
263 | |||
264 | kfree(obj); | ||
265 | |||
266 | exit: | ||
267 | if (ACPI_FAILURE(status)) | ||
268 | return -EIO; | ||
269 | |||
270 | if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) | ||
271 | return -ENODEV; | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) | ||
277 | { | ||
278 | return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval); | ||
279 | } | ||
280 | |||
281 | static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, | ||
282 | u32 *retval) | ||
283 | { | ||
284 | return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id, | ||
285 | ctrl_param, retval); | ||
286 | } | ||
287 | |||
288 | /* Helper for special devices with magic return codes */ | ||
289 | static int asus_wmi_get_devstate_bits(struct asus_wmi *asus, | ||
290 | u32 dev_id, u32 mask) | ||
291 | { | ||
292 | u32 retval = 0; | ||
293 | int err; | ||
294 | |||
295 | err = asus_wmi_get_devstate(asus, dev_id, &retval); | ||
296 | |||
297 | if (err < 0) | ||
298 | return err; | ||
299 | |||
300 | if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT)) | ||
301 | return -ENODEV; | ||
302 | |||
303 | if (mask == ASUS_WMI_DSTS_STATUS_BIT) { | ||
304 | if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT) | ||
305 | return -ENODEV; | ||
306 | } | ||
307 | |||
308 | return retval & mask; | ||
309 | } | ||
310 | |||
311 | static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id) | ||
312 | { | ||
313 | return asus_wmi_get_devstate_bits(asus, dev_id, | ||
314 | ASUS_WMI_DSTS_STATUS_BIT); | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * LEDs | ||
319 | */ | ||
320 | /* | ||
321 | * These functions actually update the LED's, and are called from a | ||
322 | * workqueue. By doing this as separate work rather than when the LED | ||
323 | * subsystem asks, we avoid messing with the Asus ACPI stuff during a | ||
324 | * potentially bad time, such as a timer interrupt. | ||
325 | */ | ||
326 | static void tpd_led_update(struct work_struct *work) | ||
327 | { | ||
328 | int ctrl_param; | ||
329 | struct asus_wmi *asus; | ||
330 | |||
331 | asus = container_of(work, struct asus_wmi, tpd_led_work); | ||
332 | |||
333 | ctrl_param = asus->tpd_led_wk; | ||
334 | asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL); | ||
335 | } | ||
336 | |||
337 | static void tpd_led_set(struct led_classdev *led_cdev, | ||
338 | enum led_brightness value) | ||
339 | { | ||
340 | struct asus_wmi *asus; | ||
341 | |||
342 | asus = container_of(led_cdev, struct asus_wmi, tpd_led); | ||
343 | |||
344 | asus->tpd_led_wk = !!value; | ||
345 | queue_work(asus->led_workqueue, &asus->tpd_led_work); | ||
346 | } | ||
347 | |||
348 | static int read_tpd_led_state(struct asus_wmi *asus) | ||
349 | { | ||
350 | return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED); | ||
351 | } | ||
352 | |||
353 | static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) | ||
354 | { | ||
355 | struct asus_wmi *asus; | ||
356 | |||
357 | asus = container_of(led_cdev, struct asus_wmi, tpd_led); | ||
358 | |||
359 | return read_tpd_led_state(asus); | ||
360 | } | ||
361 | |||
362 | static int asus_wmi_led_init(struct asus_wmi *asus) | ||
363 | { | ||
364 | int rv; | ||
365 | |||
366 | if (read_tpd_led_state(asus) < 0) | ||
367 | return 0; | ||
368 | |||
369 | asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); | ||
370 | if (!asus->led_workqueue) | ||
371 | return -ENOMEM; | ||
372 | INIT_WORK(&asus->tpd_led_work, tpd_led_update); | ||
373 | |||
374 | asus->tpd_led.name = "asus::touchpad"; | ||
375 | asus->tpd_led.brightness_set = tpd_led_set; | ||
376 | asus->tpd_led.brightness_get = tpd_led_get; | ||
377 | asus->tpd_led.max_brightness = 1; | ||
378 | |||
379 | rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led); | ||
380 | if (rv) { | ||
381 | destroy_workqueue(asus->led_workqueue); | ||
382 | return rv; | ||
383 | } | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static void asus_wmi_led_exit(struct asus_wmi *asus) | ||
389 | { | ||
390 | if (asus->tpd_led.dev) | ||
391 | led_classdev_unregister(&asus->tpd_led); | ||
392 | if (asus->led_workqueue) | ||
393 | destroy_workqueue(asus->led_workqueue); | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * PCI hotplug (for wlan rfkill) | ||
398 | */ | ||
399 | static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus) | ||
400 | { | ||
401 | int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); | ||
402 | |||
403 | if (result < 0) | ||
404 | return false; | ||
405 | return !result; | ||
406 | } | ||
407 | |||
408 | static void asus_rfkill_hotplug(struct asus_wmi *asus) | ||
409 | { | ||
410 | struct pci_dev *dev; | ||
411 | struct pci_bus *bus; | ||
412 | bool blocked; | ||
413 | bool absent; | ||
414 | u32 l; | ||
415 | |||
416 | mutex_lock(&asus->wmi_lock); | ||
417 | blocked = asus_wlan_rfkill_blocked(asus); | ||
418 | mutex_unlock(&asus->wmi_lock); | ||
419 | |||
420 | mutex_lock(&asus->hotplug_lock); | ||
421 | |||
422 | if (asus->wlan.rfkill) | ||
423 | rfkill_set_sw_state(asus->wlan.rfkill, blocked); | ||
424 | |||
425 | if (asus->hotplug_slot) { | ||
426 | bus = pci_find_bus(0, 1); | ||
427 | if (!bus) { | ||
428 | pr_warning("Unable to find PCI bus 1?\n"); | ||
429 | goto out_unlock; | ||
430 | } | ||
431 | |||
432 | if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { | ||
433 | pr_err("Unable to read PCI config space?\n"); | ||
434 | goto out_unlock; | ||
435 | } | ||
436 | absent = (l == 0xffffffff); | ||
437 | |||
438 | if (blocked != absent) { | ||
439 | pr_warning("BIOS says wireless lan is %s, " | ||
440 | "but the pci device is %s\n", | ||
441 | blocked ? "blocked" : "unblocked", | ||
442 | absent ? "absent" : "present"); | ||
443 | pr_warning("skipped wireless hotplug as probably " | ||
444 | "inappropriate for this model\n"); | ||
445 | goto out_unlock; | ||
446 | } | ||
447 | |||
448 | if (!blocked) { | ||
449 | dev = pci_get_slot(bus, 0); | ||
450 | if (dev) { | ||
451 | /* Device already present */ | ||
452 | pci_dev_put(dev); | ||
453 | goto out_unlock; | ||
454 | } | ||
455 | dev = pci_scan_single_device(bus, 0); | ||
456 | if (dev) { | ||
457 | pci_bus_assign_resources(bus); | ||
458 | if (pci_bus_add_device(dev)) | ||
459 | pr_err("Unable to hotplug wifi\n"); | ||
460 | } | ||
461 | } else { | ||
462 | dev = pci_get_slot(bus, 0); | ||
463 | if (dev) { | ||
464 | pci_remove_bus_device(dev); | ||
465 | pci_dev_put(dev); | ||
466 | } | ||
467 | } | ||
468 | } | ||
469 | |||
470 | out_unlock: | ||
471 | mutex_unlock(&asus->hotplug_lock); | ||
472 | } | ||
473 | |||
474 | static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data) | ||
475 | { | ||
476 | struct asus_wmi *asus = data; | ||
477 | |||
478 | if (event != ACPI_NOTIFY_BUS_CHECK) | ||
479 | return; | ||
480 | |||
481 | /* | ||
482 | * We can't call directly asus_rfkill_hotplug because most | ||
483 | * of the time WMBC is still being executed and not reetrant. | ||
484 | * There is currently no way to tell ACPICA that we want this | ||
485 | * method to be serialized, we schedule a asus_rfkill_hotplug | ||
486 | * call later, in a safer context. | ||
487 | */ | ||
488 | queue_work(asus->hotplug_workqueue, &asus->hotplug_work); | ||
489 | } | ||
490 | |||
491 | static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node) | ||
492 | { | ||
493 | acpi_status status; | ||
494 | acpi_handle handle; | ||
495 | |||
496 | status = acpi_get_handle(NULL, node, &handle); | ||
497 | |||
498 | if (ACPI_SUCCESS(status)) { | ||
499 | status = acpi_install_notify_handler(handle, | ||
500 | ACPI_SYSTEM_NOTIFY, | ||
501 | asus_rfkill_notify, asus); | ||
502 | if (ACPI_FAILURE(status)) | ||
503 | pr_warning("Failed to register notify on %s\n", node); | ||
504 | } else | ||
505 | return -ENODEV; | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node) | ||
511 | { | ||
512 | acpi_status status = AE_OK; | ||
513 | acpi_handle handle; | ||
514 | |||
515 | status = acpi_get_handle(NULL, node, &handle); | ||
516 | |||
517 | if (ACPI_SUCCESS(status)) { | ||
518 | status = acpi_remove_notify_handler(handle, | ||
519 | ACPI_SYSTEM_NOTIFY, | ||
520 | asus_rfkill_notify); | ||
521 | if (ACPI_FAILURE(status)) | ||
522 | pr_err("Error removing rfkill notify handler %s\n", | ||
523 | node); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot, | ||
528 | u8 *value) | ||
529 | { | ||
530 | struct asus_wmi *asus = hotplug_slot->private; | ||
531 | int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); | ||
532 | |||
533 | if (result < 0) | ||
534 | return result; | ||
535 | |||
536 | *value = !!result; | ||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot) | ||
541 | { | ||
542 | kfree(hotplug_slot->info); | ||
543 | kfree(hotplug_slot); | ||
544 | } | ||
545 | |||
546 | static struct hotplug_slot_ops asus_hotplug_slot_ops = { | ||
547 | .owner = THIS_MODULE, | ||
548 | .get_adapter_status = asus_get_adapter_status, | ||
549 | .get_power_status = asus_get_adapter_status, | ||
550 | }; | ||
551 | |||
552 | static void asus_hotplug_work(struct work_struct *work) | ||
553 | { | ||
554 | struct asus_wmi *asus; | ||
555 | |||
556 | asus = container_of(work, struct asus_wmi, hotplug_work); | ||
557 | asus_rfkill_hotplug(asus); | ||
558 | } | ||
559 | |||
560 | static int asus_setup_pci_hotplug(struct asus_wmi *asus) | ||
561 | { | ||
562 | int ret = -ENOMEM; | ||
563 | struct pci_bus *bus = pci_find_bus(0, 1); | ||
564 | |||
565 | if (!bus) { | ||
566 | pr_err("Unable to find wifi PCI bus\n"); | ||
567 | return -ENODEV; | ||
568 | } | ||
569 | |||
570 | asus->hotplug_workqueue = | ||
571 | create_singlethread_workqueue("hotplug_workqueue"); | ||
572 | if (!asus->hotplug_workqueue) | ||
573 | goto error_workqueue; | ||
574 | |||
575 | INIT_WORK(&asus->hotplug_work, asus_hotplug_work); | ||
576 | |||
577 | asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); | ||
578 | if (!asus->hotplug_slot) | ||
579 | goto error_slot; | ||
580 | |||
581 | asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), | ||
582 | GFP_KERNEL); | ||
583 | if (!asus->hotplug_slot->info) | ||
584 | goto error_info; | ||
585 | |||
586 | asus->hotplug_slot->private = asus; | ||
587 | asus->hotplug_slot->release = &asus_cleanup_pci_hotplug; | ||
588 | asus->hotplug_slot->ops = &asus_hotplug_slot_ops; | ||
589 | asus_get_adapter_status(asus->hotplug_slot, | ||
590 | &asus->hotplug_slot->info->adapter_status); | ||
591 | |||
592 | ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi"); | ||
593 | if (ret) { | ||
594 | pr_err("Unable to register hotplug slot - %d\n", ret); | ||
595 | goto error_register; | ||
596 | } | ||
597 | |||
598 | return 0; | ||
599 | |||
600 | error_register: | ||
601 | kfree(asus->hotplug_slot->info); | ||
602 | error_info: | ||
603 | kfree(asus->hotplug_slot); | ||
604 | asus->hotplug_slot = NULL; | ||
605 | error_slot: | ||
606 | destroy_workqueue(asus->hotplug_workqueue); | ||
607 | error_workqueue: | ||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Rfkill devices | ||
613 | */ | ||
614 | static int asus_rfkill_set(void *data, bool blocked) | ||
615 | { | ||
616 | struct asus_rfkill *priv = data; | ||
617 | u32 ctrl_param = !blocked; | ||
618 | |||
619 | return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); | ||
620 | } | ||
621 | |||
622 | static void asus_rfkill_query(struct rfkill *rfkill, void *data) | ||
623 | { | ||
624 | struct asus_rfkill *priv = data; | ||
625 | int result; | ||
626 | |||
627 | result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id); | ||
628 | |||
629 | if (result < 0) | ||
630 | return; | ||
631 | |||
632 | rfkill_set_sw_state(priv->rfkill, !result); | ||
633 | } | ||
634 | |||
635 | static int asus_rfkill_wlan_set(void *data, bool blocked) | ||
636 | { | ||
637 | struct asus_rfkill *priv = data; | ||
638 | struct asus_wmi *asus = priv->asus; | ||
639 | int ret; | ||
640 | |||
641 | /* | ||
642 | * This handler is enabled only if hotplug is enabled. | ||
643 | * In this case, the asus_wmi_set_devstate() will | ||
644 | * trigger a wmi notification and we need to wait | ||
645 | * this call to finish before being able to call | ||
646 | * any wmi method | ||
647 | */ | ||
648 | mutex_lock(&asus->wmi_lock); | ||
649 | ret = asus_rfkill_set(data, blocked); | ||
650 | mutex_unlock(&asus->wmi_lock); | ||
651 | return ret; | ||
652 | } | ||
653 | |||
654 | static const struct rfkill_ops asus_rfkill_wlan_ops = { | ||
655 | .set_block = asus_rfkill_wlan_set, | ||
656 | .query = asus_rfkill_query, | ||
657 | }; | ||
658 | |||
659 | static const struct rfkill_ops asus_rfkill_ops = { | ||
660 | .set_block = asus_rfkill_set, | ||
661 | .query = asus_rfkill_query, | ||
662 | }; | ||
663 | |||
664 | static int asus_new_rfkill(struct asus_wmi *asus, | ||
665 | struct asus_rfkill *arfkill, | ||
666 | const char *name, enum rfkill_type type, int dev_id) | ||
667 | { | ||
668 | int result = asus_wmi_get_devstate_simple(asus, dev_id); | ||
669 | struct rfkill **rfkill = &arfkill->rfkill; | ||
670 | |||
671 | if (result < 0) | ||
672 | return result; | ||
673 | |||
674 | arfkill->dev_id = dev_id; | ||
675 | arfkill->asus = asus; | ||
676 | |||
677 | if (dev_id == ASUS_WMI_DEVID_WLAN && asus->driver->hotplug_wireless) | ||
678 | *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type, | ||
679 | &asus_rfkill_wlan_ops, arfkill); | ||
680 | else | ||
681 | *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type, | ||
682 | &asus_rfkill_ops, arfkill); | ||
683 | |||
684 | if (!*rfkill) | ||
685 | return -EINVAL; | ||
686 | |||
687 | rfkill_init_sw_state(*rfkill, !result); | ||
688 | result = rfkill_register(*rfkill); | ||
689 | if (result) { | ||
690 | rfkill_destroy(*rfkill); | ||
691 | *rfkill = NULL; | ||
692 | return result; | ||
693 | } | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static void asus_wmi_rfkill_exit(struct asus_wmi *asus) | ||
698 | { | ||
699 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); | ||
700 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); | ||
701 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); | ||
702 | if (asus->wlan.rfkill) { | ||
703 | rfkill_unregister(asus->wlan.rfkill); | ||
704 | rfkill_destroy(asus->wlan.rfkill); | ||
705 | asus->wlan.rfkill = NULL; | ||
706 | } | ||
707 | /* | ||
708 | * Refresh pci hotplug in case the rfkill state was changed after | ||
709 | * asus_unregister_rfkill_notifier() | ||
710 | */ | ||
711 | asus_rfkill_hotplug(asus); | ||
712 | if (asus->hotplug_slot) | ||
713 | pci_hp_deregister(asus->hotplug_slot); | ||
714 | if (asus->hotplug_workqueue) | ||
715 | destroy_workqueue(asus->hotplug_workqueue); | ||
716 | |||
717 | if (asus->bluetooth.rfkill) { | ||
718 | rfkill_unregister(asus->bluetooth.rfkill); | ||
719 | rfkill_destroy(asus->bluetooth.rfkill); | ||
720 | asus->bluetooth.rfkill = NULL; | ||
721 | } | ||
722 | if (asus->wimax.rfkill) { | ||
723 | rfkill_unregister(asus->wimax.rfkill); | ||
724 | rfkill_destroy(asus->wimax.rfkill); | ||
725 | asus->wimax.rfkill = NULL; | ||
726 | } | ||
727 | if (asus->wwan3g.rfkill) { | ||
728 | rfkill_unregister(asus->wwan3g.rfkill); | ||
729 | rfkill_destroy(asus->wwan3g.rfkill); | ||
730 | asus->wwan3g.rfkill = NULL; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | static int asus_wmi_rfkill_init(struct asus_wmi *asus) | ||
735 | { | ||
736 | int result = 0; | ||
737 | |||
738 | mutex_init(&asus->hotplug_lock); | ||
739 | mutex_init(&asus->wmi_lock); | ||
740 | |||
741 | result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan", | ||
742 | RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN); | ||
743 | |||
744 | if (result && result != -ENODEV) | ||
745 | goto exit; | ||
746 | |||
747 | result = asus_new_rfkill(asus, &asus->bluetooth, | ||
748 | "asus-bluetooth", RFKILL_TYPE_BLUETOOTH, | ||
749 | ASUS_WMI_DEVID_BLUETOOTH); | ||
750 | |||
751 | if (result && result != -ENODEV) | ||
752 | goto exit; | ||
753 | |||
754 | result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax", | ||
755 | RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX); | ||
756 | |||
757 | if (result && result != -ENODEV) | ||
758 | goto exit; | ||
759 | |||
760 | result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g", | ||
761 | RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G); | ||
762 | |||
763 | if (result && result != -ENODEV) | ||
764 | goto exit; | ||
765 | |||
766 | if (!asus->driver->hotplug_wireless) | ||
767 | goto exit; | ||
768 | |||
769 | result = asus_setup_pci_hotplug(asus); | ||
770 | /* | ||
771 | * If we get -EBUSY then something else is handling the PCI hotplug - | ||
772 | * don't fail in this case | ||
773 | */ | ||
774 | if (result == -EBUSY) | ||
775 | result = 0; | ||
776 | |||
777 | asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); | ||
778 | asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); | ||
779 | asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); | ||
780 | /* | ||
781 | * Refresh pci hotplug in case the rfkill state was changed during | ||
782 | * setup. | ||
783 | */ | ||
784 | asus_rfkill_hotplug(asus); | ||
785 | |||
786 | exit: | ||
787 | if (result && result != -ENODEV) | ||
788 | asus_wmi_rfkill_exit(asus); | ||
789 | |||
790 | if (result == -ENODEV) | ||
791 | result = 0; | ||
792 | |||
793 | return result; | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * Hwmon device | ||
798 | */ | ||
799 | static ssize_t asus_hwmon_pwm1(struct device *dev, | ||
800 | struct device_attribute *attr, | ||
801 | char *buf) | ||
802 | { | ||
803 | struct asus_wmi *asus = dev_get_drvdata(dev); | ||
804 | u32 value; | ||
805 | int err; | ||
806 | |||
807 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value); | ||
808 | |||
809 | if (err < 0) | ||
810 | return err; | ||
811 | |||
812 | value |= 0xFF; | ||
813 | |||
814 | if (value == 1) /* Low Speed */ | ||
815 | value = 85; | ||
816 | else if (value == 2) | ||
817 | value = 170; | ||
818 | else if (value == 3) | ||
819 | value = 255; | ||
820 | else if (value != 0) { | ||
821 | pr_err("Unknown fan speed %#x", value); | ||
822 | value = -1; | ||
823 | } | ||
824 | |||
825 | return sprintf(buf, "%d\n", value); | ||
826 | } | ||
827 | |||
828 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); | ||
829 | |||
830 | static ssize_t | ||
831 | show_name(struct device *dev, struct device_attribute *attr, char *buf) | ||
832 | { | ||
833 | return sprintf(buf, "asus\n"); | ||
834 | } | ||
835 | static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); | ||
836 | |||
837 | static struct attribute *hwmon_attributes[] = { | ||
838 | &sensor_dev_attr_pwm1.dev_attr.attr, | ||
839 | &sensor_dev_attr_name.dev_attr.attr, | ||
840 | NULL | ||
841 | }; | ||
842 | |||
843 | static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, | ||
844 | struct attribute *attr, int idx) | ||
845 | { | ||
846 | struct device *dev = container_of(kobj, struct device, kobj); | ||
847 | struct platform_device *pdev = to_platform_device(dev->parent); | ||
848 | struct asus_wmi *asus = platform_get_drvdata(pdev); | ||
849 | bool ok = true; | ||
850 | int dev_id = -1; | ||
851 | u32 value = ASUS_WMI_UNSUPPORTED_METHOD; | ||
852 | |||
853 | if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) | ||
854 | dev_id = ASUS_WMI_DEVID_FAN_CTRL; | ||
855 | |||
856 | if (dev_id != -1) { | ||
857 | int err = asus_wmi_get_devstate(asus, dev_id, &value); | ||
858 | |||
859 | if (err < 0) | ||
860 | return err; | ||
861 | } | ||
862 | |||
863 | if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) { | ||
864 | /* | ||
865 | * We need to find a better way, probably using sfun, | ||
866 | * bits or spec ... | ||
867 | * Currently we disable it if: | ||
868 | * - ASUS_WMI_UNSUPPORTED_METHOD is returned | ||
869 | * - reverved bits are non-zero | ||
870 | * - sfun and presence bit are not set | ||
871 | */ | ||
872 | if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 | ||
873 | || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) | ||
874 | ok = false; | ||
875 | } | ||
876 | |||
877 | return ok ? attr->mode : 0; | ||
878 | } | ||
879 | |||
880 | static struct attribute_group hwmon_attribute_group = { | ||
881 | .is_visible = asus_hwmon_sysfs_is_visible, | ||
882 | .attrs = hwmon_attributes | ||
883 | }; | ||
884 | |||
885 | static void asus_wmi_hwmon_exit(struct asus_wmi *asus) | ||
886 | { | ||
887 | struct device *hwmon; | ||
888 | |||
889 | hwmon = asus->hwmon_device; | ||
890 | if (!hwmon) | ||
891 | return; | ||
892 | sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group); | ||
893 | hwmon_device_unregister(hwmon); | ||
894 | asus->hwmon_device = NULL; | ||
895 | } | ||
896 | |||
897 | static int asus_wmi_hwmon_init(struct asus_wmi *asus) | ||
898 | { | ||
899 | struct device *hwmon; | ||
900 | int result; | ||
901 | |||
902 | hwmon = hwmon_device_register(&asus->platform_device->dev); | ||
903 | if (IS_ERR(hwmon)) { | ||
904 | pr_err("Could not register asus hwmon device\n"); | ||
905 | return PTR_ERR(hwmon); | ||
906 | } | ||
907 | asus->hwmon_device = hwmon; | ||
908 | result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); | ||
909 | if (result) | ||
910 | asus_wmi_hwmon_exit(asus); | ||
911 | return result; | ||
912 | } | ||
913 | |||
914 | /* | ||
915 | * Backlight | ||
916 | */ | ||
917 | static int read_backlight_power(struct asus_wmi *asus) | ||
918 | { | ||
919 | int ret = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BACKLIGHT); | ||
920 | |||
921 | if (ret < 0) | ||
922 | return ret; | ||
923 | |||
924 | return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; | ||
925 | } | ||
926 | |||
927 | static int read_brightness_max(struct asus_wmi *asus) | ||
928 | { | ||
929 | u32 retval; | ||
930 | int err; | ||
931 | |||
932 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval); | ||
933 | |||
934 | if (err < 0) | ||
935 | return err; | ||
936 | |||
937 | retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK; | ||
938 | retval >>= 8; | ||
939 | |||
940 | if (!retval) | ||
941 | return -ENODEV; | ||
942 | |||
943 | return retval; | ||
944 | } | ||
945 | |||
946 | static int read_brightness(struct backlight_device *bd) | ||
947 | { | ||
948 | struct asus_wmi *asus = bl_get_data(bd); | ||
949 | u32 retval; | ||
950 | int err; | ||
951 | |||
952 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval); | ||
953 | |||
954 | if (err < 0) | ||
955 | return err; | ||
956 | |||
957 | return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK; | ||
958 | } | ||
959 | |||
960 | static int update_bl_status(struct backlight_device *bd) | ||
961 | { | ||
962 | struct asus_wmi *asus = bl_get_data(bd); | ||
963 | u32 ctrl_param; | ||
964 | int power, err; | ||
965 | |||
966 | ctrl_param = bd->props.brightness; | ||
967 | |||
968 | err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS, | ||
969 | ctrl_param, NULL); | ||
970 | |||
971 | if (err < 0) | ||
972 | return err; | ||
973 | |||
974 | power = read_backlight_power(asus); | ||
975 | if (power != -ENODEV && bd->props.power != power) { | ||
976 | ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK); | ||
977 | err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, | ||
978 | ctrl_param, NULL); | ||
979 | } | ||
980 | return err; | ||
981 | } | ||
982 | |||
983 | static const struct backlight_ops asus_wmi_bl_ops = { | ||
984 | .get_brightness = read_brightness, | ||
985 | .update_status = update_bl_status, | ||
986 | }; | ||
987 | |||
988 | static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code) | ||
989 | { | ||
990 | struct backlight_device *bd = asus->backlight_device; | ||
991 | int old = bd->props.brightness; | ||
992 | int new = old; | ||
993 | |||
994 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
995 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
996 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
997 | new = code - NOTIFY_BRNDOWN_MIN; | ||
998 | |||
999 | bd->props.brightness = new; | ||
1000 | backlight_update_status(bd); | ||
1001 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
1002 | |||
1003 | return old; | ||
1004 | } | ||
1005 | |||
1006 | static int asus_wmi_backlight_init(struct asus_wmi *asus) | ||
1007 | { | ||
1008 | struct backlight_device *bd; | ||
1009 | struct backlight_properties props; | ||
1010 | int max; | ||
1011 | int power; | ||
1012 | |||
1013 | max = read_brightness_max(asus); | ||
1014 | |||
1015 | if (max == -ENODEV) | ||
1016 | max = 0; | ||
1017 | else if (max < 0) | ||
1018 | return max; | ||
1019 | |||
1020 | power = read_backlight_power(asus); | ||
1021 | |||
1022 | if (power == -ENODEV) | ||
1023 | power = FB_BLANK_UNBLANK; | ||
1024 | else if (power < 0) | ||
1025 | return power; | ||
1026 | |||
1027 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1028 | props.max_brightness = max; | ||
1029 | bd = backlight_device_register(asus->driver->name, | ||
1030 | &asus->platform_device->dev, asus, | ||
1031 | &asus_wmi_bl_ops, &props); | ||
1032 | if (IS_ERR(bd)) { | ||
1033 | pr_err("Could not register backlight device\n"); | ||
1034 | return PTR_ERR(bd); | ||
1035 | } | ||
1036 | |||
1037 | asus->backlight_device = bd; | ||
1038 | |||
1039 | bd->props.brightness = read_brightness(bd); | ||
1040 | bd->props.power = power; | ||
1041 | backlight_update_status(bd); | ||
1042 | |||
1043 | return 0; | ||
1044 | } | ||
1045 | |||
1046 | static void asus_wmi_backlight_exit(struct asus_wmi *asus) | ||
1047 | { | ||
1048 | if (asus->backlight_device) | ||
1049 | backlight_device_unregister(asus->backlight_device); | ||
1050 | |||
1051 | asus->backlight_device = NULL; | ||
1052 | } | ||
1053 | |||
1054 | static void asus_wmi_notify(u32 value, void *context) | ||
1055 | { | ||
1056 | struct asus_wmi *asus = context; | ||
1057 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
1058 | union acpi_object *obj; | ||
1059 | acpi_status status; | ||
1060 | int code; | ||
1061 | int orig_code; | ||
1062 | |||
1063 | status = wmi_get_event_data(value, &response); | ||
1064 | if (status != AE_OK) { | ||
1065 | pr_err("bad event status 0x%x\n", status); | ||
1066 | return; | ||
1067 | } | ||
1068 | |||
1069 | obj = (union acpi_object *)response.pointer; | ||
1070 | |||
1071 | if (!obj || obj->type != ACPI_TYPE_INTEGER) | ||
1072 | goto exit; | ||
1073 | |||
1074 | code = obj->integer.value; | ||
1075 | orig_code = code; | ||
1076 | |||
1077 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
1078 | code = NOTIFY_BRNUP_MIN; | ||
1079 | else if (code >= NOTIFY_BRNDOWN_MIN && | ||
1080 | code <= NOTIFY_BRNDOWN_MAX) | ||
1081 | code = NOTIFY_BRNDOWN_MIN; | ||
1082 | |||
1083 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { | ||
1084 | if (!acpi_video_backlight_support()) | ||
1085 | asus_wmi_backlight_notify(asus, orig_code); | ||
1086 | } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true)) | ||
1087 | pr_info("Unknown key %x pressed\n", code); | ||
1088 | |||
1089 | exit: | ||
1090 | kfree(obj); | ||
1091 | } | ||
1092 | |||
1093 | /* | ||
1094 | * Sys helpers | ||
1095 | */ | ||
1096 | static int parse_arg(const char *buf, unsigned long count, int *val) | ||
1097 | { | ||
1098 | if (!count) | ||
1099 | return 0; | ||
1100 | if (sscanf(buf, "%i", val) != 1) | ||
1101 | return -EINVAL; | ||
1102 | return count; | ||
1103 | } | ||
1104 | |||
1105 | static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid, | ||
1106 | const char *buf, size_t count) | ||
1107 | { | ||
1108 | u32 retval; | ||
1109 | int rv, err, value; | ||
1110 | |||
1111 | value = asus_wmi_get_devstate_simple(asus, devid); | ||
1112 | if (value == -ENODEV) /* Check device presence */ | ||
1113 | return value; | ||
1114 | |||
1115 | rv = parse_arg(buf, count, &value); | ||
1116 | err = asus_wmi_set_devstate(devid, value, &retval); | ||
1117 | |||
1118 | if (err < 0) | ||
1119 | return err; | ||
1120 | |||
1121 | return rv; | ||
1122 | } | ||
1123 | |||
1124 | static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf) | ||
1125 | { | ||
1126 | int value = asus_wmi_get_devstate_simple(asus, devid); | ||
1127 | |||
1128 | if (value < 0) | ||
1129 | return value; | ||
1130 | |||
1131 | return sprintf(buf, "%d\n", value); | ||
1132 | } | ||
1133 | |||
1134 | #define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \ | ||
1135 | static ssize_t show_##_name(struct device *dev, \ | ||
1136 | struct device_attribute *attr, \ | ||
1137 | char *buf) \ | ||
1138 | { \ | ||
1139 | struct asus_wmi *asus = dev_get_drvdata(dev); \ | ||
1140 | \ | ||
1141 | return show_sys_wmi(asus, _cm, buf); \ | ||
1142 | } \ | ||
1143 | static ssize_t store_##_name(struct device *dev, \ | ||
1144 | struct device_attribute *attr, \ | ||
1145 | const char *buf, size_t count) \ | ||
1146 | { \ | ||
1147 | struct asus_wmi *asus = dev_get_drvdata(dev); \ | ||
1148 | \ | ||
1149 | return store_sys_wmi(asus, _cm, buf, count); \ | ||
1150 | } \ | ||
1151 | static struct device_attribute dev_attr_##_name = { \ | ||
1152 | .attr = { \ | ||
1153 | .name = __stringify(_name), \ | ||
1154 | .mode = _mode }, \ | ||
1155 | .show = show_##_name, \ | ||
1156 | .store = store_##_name, \ | ||
1157 | } | ||
1158 | |||
1159 | ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD); | ||
1160 | ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); | ||
1161 | ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); | ||
1162 | |||
1163 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, | ||
1164 | const char *buf, size_t count) | ||
1165 | { | ||
1166 | int value; | ||
1167 | |||
1168 | if (!count || sscanf(buf, "%i", &value) != 1) | ||
1169 | return -EINVAL; | ||
1170 | if (value < 0 || value > 2) | ||
1171 | return -EINVAL; | ||
1172 | |||
1173 | return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); | ||
1174 | } | ||
1175 | |||
1176 | static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); | ||
1177 | |||
1178 | static struct attribute *platform_attributes[] = { | ||
1179 | &dev_attr_cpufv.attr, | ||
1180 | &dev_attr_camera.attr, | ||
1181 | &dev_attr_cardr.attr, | ||
1182 | &dev_attr_touchpad.attr, | ||
1183 | NULL | ||
1184 | }; | ||
1185 | |||
1186 | static mode_t asus_sysfs_is_visible(struct kobject *kobj, | ||
1187 | struct attribute *attr, int idx) | ||
1188 | { | ||
1189 | struct device *dev = container_of(kobj, struct device, kobj); | ||
1190 | struct platform_device *pdev = to_platform_device(dev); | ||
1191 | struct asus_wmi *asus = platform_get_drvdata(pdev); | ||
1192 | bool ok = true; | ||
1193 | int devid = -1; | ||
1194 | |||
1195 | if (attr == &dev_attr_camera.attr) | ||
1196 | devid = ASUS_WMI_DEVID_CAMERA; | ||
1197 | else if (attr == &dev_attr_cardr.attr) | ||
1198 | devid = ASUS_WMI_DEVID_CARDREADER; | ||
1199 | else if (attr == &dev_attr_touchpad.attr) | ||
1200 | devid = ASUS_WMI_DEVID_TOUCHPAD; | ||
1201 | |||
1202 | if (devid != -1) | ||
1203 | ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); | ||
1204 | |||
1205 | return ok ? attr->mode : 0; | ||
1206 | } | ||
1207 | |||
1208 | static struct attribute_group platform_attribute_group = { | ||
1209 | .is_visible = asus_sysfs_is_visible, | ||
1210 | .attrs = platform_attributes | ||
1211 | }; | ||
1212 | |||
1213 | static void asus_wmi_sysfs_exit(struct platform_device *device) | ||
1214 | { | ||
1215 | sysfs_remove_group(&device->dev.kobj, &platform_attribute_group); | ||
1216 | } | ||
1217 | |||
1218 | static int asus_wmi_sysfs_init(struct platform_device *device) | ||
1219 | { | ||
1220 | return sysfs_create_group(&device->dev.kobj, &platform_attribute_group); | ||
1221 | } | ||
1222 | |||
1223 | /* | ||
1224 | * Platform device | ||
1225 | */ | ||
1226 | static int __init asus_wmi_platform_init(struct asus_wmi *asus) | ||
1227 | { | ||
1228 | int rv; | ||
1229 | |||
1230 | /* INIT enable hotkeys on some models */ | ||
1231 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv)) | ||
1232 | pr_info("Initialization: %#x", rv); | ||
1233 | |||
1234 | /* We don't know yet what to do with this version... */ | ||
1235 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { | ||
1236 | pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF); | ||
1237 | asus->spec = rv; | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * The SFUN method probably allows the original driver to get the list | ||
1242 | * of features supported by a given model. For now, 0x0100 or 0x0800 | ||
1243 | * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. | ||
1244 | * The significance of others is yet to be found. | ||
1245 | */ | ||
1246 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) { | ||
1247 | pr_info("SFUN value: %#x", rv); | ||
1248 | asus->sfun = rv; | ||
1249 | } | ||
1250 | |||
1251 | /* | ||
1252 | * Eee PC and Notebooks seems to have different method_id for DSTS, | ||
1253 | * but it may also be related to the BIOS's SPEC. | ||
1254 | * Note, on most Eeepc, there is no way to check if a method exist | ||
1255 | * or note, while on notebooks, they returns 0xFFFFFFFE on failure, | ||
1256 | * but once again, SPEC may probably be used for that kind of things. | ||
1257 | */ | ||
1258 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL)) | ||
1259 | asus->dsts_id = ASUS_WMI_METHODID_DSTS; | ||
1260 | else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL)) | ||
1261 | asus->dsts_id = ASUS_WMI_METHODID_DSTS2; | ||
1262 | |||
1263 | if (!asus->dsts_id) { | ||
1264 | pr_err("Can't find DSTS"); | ||
1265 | return -ENODEV; | ||
1266 | } | ||
1267 | |||
1268 | return asus_wmi_sysfs_init(asus->platform_device); | ||
1269 | } | ||
1270 | |||
1271 | static void asus_wmi_platform_exit(struct asus_wmi *asus) | ||
1272 | { | ||
1273 | asus_wmi_sysfs_exit(asus->platform_device); | ||
1274 | } | ||
1275 | |||
1276 | /* | ||
1277 | * debugfs | ||
1278 | */ | ||
1279 | struct asus_wmi_debugfs_node { | ||
1280 | struct asus_wmi *asus; | ||
1281 | char *name; | ||
1282 | int (*show) (struct seq_file *m, void *data); | ||
1283 | }; | ||
1284 | |||
1285 | static int show_dsts(struct seq_file *m, void *data) | ||
1286 | { | ||
1287 | struct asus_wmi *asus = m->private; | ||
1288 | int err; | ||
1289 | u32 retval = -1; | ||
1290 | |||
1291 | err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval); | ||
1292 | |||
1293 | if (err < 0) | ||
1294 | return err; | ||
1295 | |||
1296 | seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval); | ||
1297 | |||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | static int show_devs(struct seq_file *m, void *data) | ||
1302 | { | ||
1303 | struct asus_wmi *asus = m->private; | ||
1304 | int err; | ||
1305 | u32 retval = -1; | ||
1306 | |||
1307 | err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param, | ||
1308 | &retval); | ||
1309 | |||
1310 | if (err < 0) | ||
1311 | return err; | ||
1312 | |||
1313 | seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id, | ||
1314 | asus->debug.ctrl_param, retval); | ||
1315 | |||
1316 | return 0; | ||
1317 | } | ||
1318 | |||
1319 | static int show_call(struct seq_file *m, void *data) | ||
1320 | { | ||
1321 | struct asus_wmi *asus = m->private; | ||
1322 | struct bios_args args = { | ||
1323 | .arg0 = asus->debug.dev_id, | ||
1324 | .arg1 = asus->debug.ctrl_param, | ||
1325 | }; | ||
1326 | struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; | ||
1327 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
1328 | union acpi_object *obj; | ||
1329 | acpi_status status; | ||
1330 | |||
1331 | status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, | ||
1332 | 1, asus->debug.method_id, | ||
1333 | &input, &output); | ||
1334 | |||
1335 | if (ACPI_FAILURE(status)) | ||
1336 | return -EIO; | ||
1337 | |||
1338 | obj = (union acpi_object *)output.pointer; | ||
1339 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
1340 | seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id, | ||
1341 | asus->debug.dev_id, asus->debug.ctrl_param, | ||
1342 | (u32) obj->integer.value); | ||
1343 | else | ||
1344 | seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id, | ||
1345 | asus->debug.dev_id, asus->debug.ctrl_param, | ||
1346 | obj ? obj->type : -1); | ||
1347 | |||
1348 | kfree(obj); | ||
1349 | |||
1350 | return 0; | ||
1351 | } | ||
1352 | |||
1353 | static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = { | ||
1354 | {NULL, "devs", show_devs}, | ||
1355 | {NULL, "dsts", show_dsts}, | ||
1356 | {NULL, "call", show_call}, | ||
1357 | }; | ||
1358 | |||
1359 | static int asus_wmi_debugfs_open(struct inode *inode, struct file *file) | ||
1360 | { | ||
1361 | struct asus_wmi_debugfs_node *node = inode->i_private; | ||
1362 | |||
1363 | return single_open(file, node->show, node->asus); | ||
1364 | } | ||
1365 | |||
1366 | static const struct file_operations asus_wmi_debugfs_io_ops = { | ||
1367 | .owner = THIS_MODULE, | ||
1368 | .open = asus_wmi_debugfs_open, | ||
1369 | .read = seq_read, | ||
1370 | .llseek = seq_lseek, | ||
1371 | .release = single_release, | ||
1372 | }; | ||
1373 | |||
1374 | static void asus_wmi_debugfs_exit(struct asus_wmi *asus) | ||
1375 | { | ||
1376 | debugfs_remove_recursive(asus->debug.root); | ||
1377 | } | ||
1378 | |||
1379 | static int asus_wmi_debugfs_init(struct asus_wmi *asus) | ||
1380 | { | ||
1381 | struct dentry *dent; | ||
1382 | int i; | ||
1383 | |||
1384 | asus->debug.root = debugfs_create_dir(asus->driver->name, NULL); | ||
1385 | if (!asus->debug.root) { | ||
1386 | pr_err("failed to create debugfs directory"); | ||
1387 | goto error_debugfs; | ||
1388 | } | ||
1389 | |||
1390 | dent = debugfs_create_x32("method_id", S_IRUGO | S_IWUSR, | ||
1391 | asus->debug.root, &asus->debug.method_id); | ||
1392 | if (!dent) | ||
1393 | goto error_debugfs; | ||
1394 | |||
1395 | dent = debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR, | ||
1396 | asus->debug.root, &asus->debug.dev_id); | ||
1397 | if (!dent) | ||
1398 | goto error_debugfs; | ||
1399 | |||
1400 | dent = debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR, | ||
1401 | asus->debug.root, &asus->debug.ctrl_param); | ||
1402 | if (!dent) | ||
1403 | goto error_debugfs; | ||
1404 | |||
1405 | for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) { | ||
1406 | struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i]; | ||
1407 | |||
1408 | node->asus = asus; | ||
1409 | dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, | ||
1410 | asus->debug.root, node, | ||
1411 | &asus_wmi_debugfs_io_ops); | ||
1412 | if (!dent) { | ||
1413 | pr_err("failed to create debug file: %s\n", node->name); | ||
1414 | goto error_debugfs; | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | return 0; | ||
1419 | |||
1420 | error_debugfs: | ||
1421 | asus_wmi_debugfs_exit(asus); | ||
1422 | return -ENOMEM; | ||
1423 | } | ||
1424 | |||
1425 | /* | ||
1426 | * WMI Driver | ||
1427 | */ | ||
1428 | static int asus_wmi_add(struct platform_device *pdev) | ||
1429 | { | ||
1430 | struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); | ||
1431 | struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); | ||
1432 | struct asus_wmi *asus; | ||
1433 | acpi_status status; | ||
1434 | int err; | ||
1435 | |||
1436 | asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); | ||
1437 | if (!asus) | ||
1438 | return -ENOMEM; | ||
1439 | |||
1440 | asus->driver = wdrv; | ||
1441 | asus->platform_device = pdev; | ||
1442 | wdrv->platform_device = pdev; | ||
1443 | platform_set_drvdata(asus->platform_device, asus); | ||
1444 | |||
1445 | if (wdrv->quirks) | ||
1446 | wdrv->quirks(asus->driver); | ||
1447 | |||
1448 | err = asus_wmi_platform_init(asus); | ||
1449 | if (err) | ||
1450 | goto fail_platform; | ||
1451 | |||
1452 | err = asus_wmi_input_init(asus); | ||
1453 | if (err) | ||
1454 | goto fail_input; | ||
1455 | |||
1456 | err = asus_wmi_hwmon_init(asus); | ||
1457 | if (err) | ||
1458 | goto fail_hwmon; | ||
1459 | |||
1460 | err = asus_wmi_led_init(asus); | ||
1461 | if (err) | ||
1462 | goto fail_leds; | ||
1463 | |||
1464 | err = asus_wmi_rfkill_init(asus); | ||
1465 | if (err) | ||
1466 | goto fail_rfkill; | ||
1467 | |||
1468 | if (!acpi_video_backlight_support()) { | ||
1469 | err = asus_wmi_backlight_init(asus); | ||
1470 | if (err && err != -ENODEV) | ||
1471 | goto fail_backlight; | ||
1472 | } else | ||
1473 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
1474 | |||
1475 | status = wmi_install_notify_handler(asus->driver->event_guid, | ||
1476 | asus_wmi_notify, asus); | ||
1477 | if (ACPI_FAILURE(status)) { | ||
1478 | pr_err("Unable to register notify handler - %d\n", status); | ||
1479 | err = -ENODEV; | ||
1480 | goto fail_wmi_handler; | ||
1481 | } | ||
1482 | |||
1483 | err = asus_wmi_debugfs_init(asus); | ||
1484 | if (err) | ||
1485 | goto fail_debugfs; | ||
1486 | |||
1487 | return 0; | ||
1488 | |||
1489 | fail_debugfs: | ||
1490 | wmi_remove_notify_handler(asus->driver->event_guid); | ||
1491 | fail_wmi_handler: | ||
1492 | asus_wmi_backlight_exit(asus); | ||
1493 | fail_backlight: | ||
1494 | asus_wmi_rfkill_exit(asus); | ||
1495 | fail_rfkill: | ||
1496 | asus_wmi_led_exit(asus); | ||
1497 | fail_leds: | ||
1498 | asus_wmi_hwmon_exit(asus); | ||
1499 | fail_hwmon: | ||
1500 | asus_wmi_input_exit(asus); | ||
1501 | fail_input: | ||
1502 | asus_wmi_platform_exit(asus); | ||
1503 | fail_platform: | ||
1504 | kfree(asus); | ||
1505 | return err; | ||
1506 | } | ||
1507 | |||
1508 | static int asus_wmi_remove(struct platform_device *device) | ||
1509 | { | ||
1510 | struct asus_wmi *asus; | ||
1511 | |||
1512 | asus = platform_get_drvdata(device); | ||
1513 | wmi_remove_notify_handler(asus->driver->event_guid); | ||
1514 | asus_wmi_backlight_exit(asus); | ||
1515 | asus_wmi_input_exit(asus); | ||
1516 | asus_wmi_hwmon_exit(asus); | ||
1517 | asus_wmi_led_exit(asus); | ||
1518 | asus_wmi_rfkill_exit(asus); | ||
1519 | asus_wmi_debugfs_exit(asus); | ||
1520 | asus_wmi_platform_exit(asus); | ||
1521 | |||
1522 | kfree(asus); | ||
1523 | return 0; | ||
1524 | } | ||
1525 | |||
1526 | /* | ||
1527 | * Platform driver - hibernate/resume callbacks | ||
1528 | */ | ||
1529 | static int asus_hotk_thaw(struct device *device) | ||
1530 | { | ||
1531 | struct asus_wmi *asus = dev_get_drvdata(device); | ||
1532 | |||
1533 | if (asus->wlan.rfkill) { | ||
1534 | bool wlan; | ||
1535 | |||
1536 | /* | ||
1537 | * Work around bios bug - acpi _PTS turns off the wireless led | ||
1538 | * during suspend. Normally it restores it on resume, but | ||
1539 | * we should kick it ourselves in case hibernation is aborted. | ||
1540 | */ | ||
1541 | wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); | ||
1542 | asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL); | ||
1543 | } | ||
1544 | |||
1545 | return 0; | ||
1546 | } | ||
1547 | |||
1548 | static int asus_hotk_restore(struct device *device) | ||
1549 | { | ||
1550 | struct asus_wmi *asus = dev_get_drvdata(device); | ||
1551 | int bl; | ||
1552 | |||
1553 | /* Refresh both wlan rfkill state and pci hotplug */ | ||
1554 | if (asus->wlan.rfkill) | ||
1555 | asus_rfkill_hotplug(asus); | ||
1556 | |||
1557 | if (asus->bluetooth.rfkill) { | ||
1558 | bl = !asus_wmi_get_devstate_simple(asus, | ||
1559 | ASUS_WMI_DEVID_BLUETOOTH); | ||
1560 | rfkill_set_sw_state(asus->bluetooth.rfkill, bl); | ||
1561 | } | ||
1562 | if (asus->wimax.rfkill) { | ||
1563 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX); | ||
1564 | rfkill_set_sw_state(asus->wimax.rfkill, bl); | ||
1565 | } | ||
1566 | if (asus->wwan3g.rfkill) { | ||
1567 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); | ||
1568 | rfkill_set_sw_state(asus->wwan3g.rfkill, bl); | ||
1569 | } | ||
1570 | |||
1571 | return 0; | ||
1572 | } | ||
1573 | |||
1574 | static const struct dev_pm_ops asus_pm_ops = { | ||
1575 | .thaw = asus_hotk_thaw, | ||
1576 | .restore = asus_hotk_restore, | ||
1577 | }; | ||
1578 | |||
1579 | static int asus_wmi_probe(struct platform_device *pdev) | ||
1580 | { | ||
1581 | struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); | ||
1582 | struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); | ||
1583 | int ret; | ||
1584 | |||
1585 | if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { | ||
1586 | pr_warning("Management GUID not found\n"); | ||
1587 | return -ENODEV; | ||
1588 | } | ||
1589 | |||
1590 | if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) { | ||
1591 | pr_warning("Event GUID not found\n"); | ||
1592 | return -ENODEV; | ||
1593 | } | ||
1594 | |||
1595 | if (wdrv->probe) { | ||
1596 | ret = wdrv->probe(pdev); | ||
1597 | if (ret) | ||
1598 | return ret; | ||
1599 | } | ||
1600 | |||
1601 | return asus_wmi_add(pdev); | ||
1602 | } | ||
1603 | |||
1604 | static bool used; | ||
1605 | |||
1606 | int asus_wmi_register_driver(struct asus_wmi_driver *driver) | ||
1607 | { | ||
1608 | struct platform_driver *platform_driver; | ||
1609 | struct platform_device *platform_device; | ||
1610 | |||
1611 | if (used) | ||
1612 | return -EBUSY; | ||
1613 | |||
1614 | platform_driver = &driver->platform_driver; | ||
1615 | platform_driver->remove = asus_wmi_remove; | ||
1616 | platform_driver->driver.owner = driver->owner; | ||
1617 | platform_driver->driver.name = driver->name; | ||
1618 | platform_driver->driver.pm = &asus_pm_ops; | ||
1619 | |||
1620 | platform_device = platform_create_bundle(platform_driver, | ||
1621 | asus_wmi_probe, | ||
1622 | NULL, 0, NULL, 0); | ||
1623 | if (IS_ERR(platform_device)) | ||
1624 | return PTR_ERR(platform_device); | ||
1625 | |||
1626 | used = true; | ||
1627 | return 0; | ||
1628 | } | ||
1629 | EXPORT_SYMBOL_GPL(asus_wmi_register_driver); | ||
1630 | |||
1631 | void asus_wmi_unregister_driver(struct asus_wmi_driver *driver) | ||
1632 | { | ||
1633 | platform_device_unregister(driver->platform_device); | ||
1634 | platform_driver_unregister(&driver->platform_driver); | ||
1635 | used = false; | ||
1636 | } | ||
1637 | EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver); | ||
1638 | |||
1639 | static int __init asus_wmi_init(void) | ||
1640 | { | ||
1641 | if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { | ||
1642 | pr_info("Asus Management GUID not found"); | ||
1643 | return -ENODEV; | ||
1644 | } | ||
1645 | |||
1646 | pr_info("ASUS WMI generic driver loaded"); | ||
1647 | return 0; | ||
1648 | } | ||
1649 | |||
1650 | static void __exit asus_wmi_exit(void) | ||
1651 | { | ||
1652 | pr_info("ASUS WMI generic driver unloaded"); | ||
1653 | } | ||
1654 | |||
1655 | module_init(asus_wmi_init); | ||
1656 | module_exit(asus_wmi_exit); | ||
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h new file mode 100644 index 000000000000..c044522c8766 --- /dev/null +++ b/drivers/platform/x86/asus-wmi.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Asus PC WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Intel Corporation. | ||
5 | * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> | ||
6 | * | ||
7 | * Portions based on wistron_btns.c: | ||
8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | ||
9 | * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> | ||
10 | * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | |||
27 | #ifndef _ASUS_WMI_H_ | ||
28 | #define _ASUS_WMI_H_ | ||
29 | |||
30 | #include <linux/platform_device.h> | ||
31 | |||
32 | struct module; | ||
33 | struct key_entry; | ||
34 | struct asus_wmi; | ||
35 | |||
36 | struct asus_wmi_driver { | ||
37 | bool hotplug_wireless; | ||
38 | |||
39 | const char *name; | ||
40 | struct module *owner; | ||
41 | |||
42 | const char *event_guid; | ||
43 | |||
44 | const struct key_entry *keymap; | ||
45 | const char *input_name; | ||
46 | const char *input_phys; | ||
47 | |||
48 | int (*probe) (struct platform_device *device); | ||
49 | void (*quirks) (struct asus_wmi_driver *driver); | ||
50 | |||
51 | struct platform_driver platform_driver; | ||
52 | struct platform_device *platform_device; | ||
53 | }; | ||
54 | |||
55 | int asus_wmi_register_driver(struct asus_wmi_driver *driver); | ||
56 | void asus_wmi_unregister_driver(struct asus_wmi_driver *driver); | ||
57 | |||
58 | #endif /* !_ASUS_WMI_H_ */ | ||
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index eb95878fa583..c16a27641ced 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c | |||
@@ -201,7 +201,7 @@ static bool extra_features; | |||
201 | * into 0x4F and read a few bytes from the output, like so: | 201 | * into 0x4F and read a few bytes from the output, like so: |
202 | * u8 writeData = 0x33; | 202 | * u8 writeData = 0x33; |
203 | * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); | 203 | * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); |
204 | * That address is labled "fan1 table information" in the service manual. | 204 | * That address is labelled "fan1 table information" in the service manual. |
205 | * It should be clear which value in 'buffer' changes). This seems to be | 205 | * It should be clear which value in 'buffer' changes). This seems to be |
206 | * related to fan speed. It isn't a proper 'realtime' fan speed value | 206 | * related to fan speed. It isn't a proper 'realtime' fan speed value |
207 | * though, because physically stopping or speeding up the fan doesn't | 207 | * though, because physically stopping or speeding up the fan doesn't |
@@ -275,7 +275,7 @@ static int set_backlight_level(int level) | |||
275 | 275 | ||
276 | ec_write(BACKLIGHT_LEVEL_ADDR, level); | 276 | ec_write(BACKLIGHT_LEVEL_ADDR, level); |
277 | 277 | ||
278 | return 1; | 278 | return 0; |
279 | } | 279 | } |
280 | 280 | ||
281 | static int get_backlight_level(void) | 281 | static int get_backlight_level(void) |
@@ -763,7 +763,7 @@ static int dmi_check_cb(const struct dmi_system_id *id) | |||
763 | printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n", | 763 | printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n", |
764 | id->ident); | 764 | id->ident); |
765 | extra_features = false; | 765 | extra_features = false; |
766 | return 0; | 766 | return 1; |
767 | } | 767 | } |
768 | 768 | ||
769 | static int dmi_check_cb_extra(const struct dmi_system_id *id) | 769 | static int dmi_check_cb_extra(const struct dmi_system_id *id) |
@@ -772,7 +772,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id) | |||
772 | "enabling extra features\n", | 772 | "enabling extra features\n", |
773 | id->ident); | 773 | id->ident); |
774 | extra_features = true; | 774 | extra_features = true; |
775 | return 0; | 775 | return 1; |
776 | } | 776 | } |
777 | 777 | ||
778 | static struct dmi_system_id __initdata compal_dmi_table[] = { | 778 | static struct dmi_system_id __initdata compal_dmi_table[] = { |
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c new file mode 100644 index 000000000000..0ed84573ae1f --- /dev/null +++ b/drivers/platform/x86/dell-wmi-aio.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * WMI hotkeys support for Dell All-In-One series | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/input.h> | ||
25 | #include <linux/input/sparse-keymap.h> | ||
26 | #include <acpi/acpi_drivers.h> | ||
27 | #include <linux/acpi.h> | ||
28 | #include <linux/string.h> | ||
29 | |||
30 | MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series"); | ||
31 | MODULE_LICENSE("GPL"); | ||
32 | |||
33 | #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" | ||
34 | #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" | ||
35 | |||
36 | static const char *dell_wmi_aio_guids[] = { | ||
37 | EVENT_GUID1, | ||
38 | EVENT_GUID2, | ||
39 | NULL | ||
40 | }; | ||
41 | |||
42 | MODULE_ALIAS("wmi:"EVENT_GUID1); | ||
43 | MODULE_ALIAS("wmi:"EVENT_GUID2); | ||
44 | |||
45 | static const struct key_entry dell_wmi_aio_keymap[] = { | ||
46 | { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, | ||
47 | { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, | ||
48 | { KE_END, 0 } | ||
49 | }; | ||
50 | |||
51 | static struct input_dev *dell_wmi_aio_input_dev; | ||
52 | |||
53 | static void dell_wmi_aio_notify(u32 value, void *context) | ||
54 | { | ||
55 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
56 | union acpi_object *obj; | ||
57 | acpi_status status; | ||
58 | |||
59 | status = wmi_get_event_data(value, &response); | ||
60 | if (status != AE_OK) { | ||
61 | pr_info("bad event status 0x%x\n", status); | ||
62 | return; | ||
63 | } | ||
64 | |||
65 | obj = (union acpi_object *)response.pointer; | ||
66 | if (obj) { | ||
67 | unsigned int scancode; | ||
68 | |||
69 | switch (obj->type) { | ||
70 | case ACPI_TYPE_INTEGER: | ||
71 | /* Most All-In-One correctly return integer scancode */ | ||
72 | scancode = obj->integer.value; | ||
73 | sparse_keymap_report_event(dell_wmi_aio_input_dev, | ||
74 | scancode, 1, true); | ||
75 | break; | ||
76 | case ACPI_TYPE_BUFFER: | ||
77 | /* Broken machines return the scancode in a buffer */ | ||
78 | if (obj->buffer.pointer && obj->buffer.length > 0) { | ||
79 | scancode = obj->buffer.pointer[0]; | ||
80 | sparse_keymap_report_event( | ||
81 | dell_wmi_aio_input_dev, | ||
82 | scancode, 1, true); | ||
83 | } | ||
84 | break; | ||
85 | } | ||
86 | } | ||
87 | kfree(obj); | ||
88 | } | ||
89 | |||
90 | static int __init dell_wmi_aio_input_setup(void) | ||
91 | { | ||
92 | int err; | ||
93 | |||
94 | dell_wmi_aio_input_dev = input_allocate_device(); | ||
95 | |||
96 | if (!dell_wmi_aio_input_dev) | ||
97 | return -ENOMEM; | ||
98 | |||
99 | dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys"; | ||
100 | dell_wmi_aio_input_dev->phys = "wmi/input0"; | ||
101 | dell_wmi_aio_input_dev->id.bustype = BUS_HOST; | ||
102 | |||
103 | err = sparse_keymap_setup(dell_wmi_aio_input_dev, | ||
104 | dell_wmi_aio_keymap, NULL); | ||
105 | if (err) { | ||
106 | pr_err("Unable to setup input device keymap\n"); | ||
107 | goto err_free_dev; | ||
108 | } | ||
109 | err = input_register_device(dell_wmi_aio_input_dev); | ||
110 | if (err) { | ||
111 | pr_info("Unable to register input device\n"); | ||
112 | goto err_free_keymap; | ||
113 | } | ||
114 | return 0; | ||
115 | |||
116 | err_free_keymap: | ||
117 | sparse_keymap_free(dell_wmi_aio_input_dev); | ||
118 | err_free_dev: | ||
119 | input_free_device(dell_wmi_aio_input_dev); | ||
120 | return err; | ||
121 | } | ||
122 | |||
123 | static const char *dell_wmi_aio_find(void) | ||
124 | { | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; dell_wmi_aio_guids[i] != NULL; i++) | ||
128 | if (wmi_has_guid(dell_wmi_aio_guids[i])) | ||
129 | return dell_wmi_aio_guids[i]; | ||
130 | |||
131 | return NULL; | ||
132 | } | ||
133 | |||
134 | static int __init dell_wmi_aio_init(void) | ||
135 | { | ||
136 | int err; | ||
137 | const char *guid; | ||
138 | |||
139 | guid = dell_wmi_aio_find(); | ||
140 | if (!guid) { | ||
141 | pr_warning("No known WMI GUID found\n"); | ||
142 | return -ENXIO; | ||
143 | } | ||
144 | |||
145 | err = dell_wmi_aio_input_setup(); | ||
146 | if (err) | ||
147 | return err; | ||
148 | |||
149 | err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL); | ||
150 | if (err) { | ||
151 | pr_err("Unable to register notify handler - %d\n", err); | ||
152 | sparse_keymap_free(dell_wmi_aio_input_dev); | ||
153 | input_unregister_device(dell_wmi_aio_input_dev); | ||
154 | return err; | ||
155 | } | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static void __exit dell_wmi_aio_exit(void) | ||
161 | { | ||
162 | const char *guid; | ||
163 | |||
164 | guid = dell_wmi_aio_find(); | ||
165 | wmi_remove_notify_handler(guid); | ||
166 | sparse_keymap_free(dell_wmi_aio_input_dev); | ||
167 | input_unregister_device(dell_wmi_aio_input_dev); | ||
168 | } | ||
169 | |||
170 | module_init(dell_wmi_aio_init); | ||
171 | module_exit(dell_wmi_aio_exit); | ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 6605beac0d0e..5f2dd386152b 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -1322,7 +1322,7 @@ static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name) | |||
1322 | { | 1322 | { |
1323 | int dummy; | 1323 | int dummy; |
1324 | 1324 | ||
1325 | /* Some BIOSes do not report cm although it is avaliable. | 1325 | /* Some BIOSes do not report cm although it is available. |
1326 | Check if cm_getv[cm] works and, if yes, assume cm should be set. */ | 1326 | Check if cm_getv[cm] works and, if yes, assume cm should be set. */ |
1327 | if (!(eeepc->cm_supported & (1 << cm)) | 1327 | if (!(eeepc->cm_supported & (1 << cm)) |
1328 | && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) { | 1328 | && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) { |
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 4d38f98aa976..0ddc434fb93b 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Eee PC WMI hotkey driver | 2 | * Eee PC WMI hotkey driver |
3 | * | 3 | * |
4 | * Copyright(C) 2010 Intel Corporation. | 4 | * Copyright(C) 2010 Intel Corporation. |
5 | * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com> | 5 | * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> |
6 | * | 6 | * |
7 | * Portions based on wistron_btns.c: | 7 | * Portions based on wistron_btns.c: |
8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | 8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> |
@@ -29,841 +29,57 @@ | |||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/types.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/input.h> | 32 | #include <linux/input.h> |
35 | #include <linux/input/sparse-keymap.h> | 33 | #include <linux/input/sparse-keymap.h> |
36 | #include <linux/fb.h> | 34 | #include <linux/dmi.h> |
37 | #include <linux/backlight.h> | ||
38 | #include <linux/leds.h> | ||
39 | #include <linux/rfkill.h> | ||
40 | #include <linux/debugfs.h> | ||
41 | #include <linux/seq_file.h> | ||
42 | #include <linux/platform_device.h> | ||
43 | #include <acpi/acpi_bus.h> | 35 | #include <acpi/acpi_bus.h> |
44 | #include <acpi/acpi_drivers.h> | 36 | |
37 | #include "asus-wmi.h" | ||
45 | 38 | ||
46 | #define EEEPC_WMI_FILE "eeepc-wmi" | 39 | #define EEEPC_WMI_FILE "eeepc-wmi" |
47 | 40 | ||
48 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 41 | MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>"); |
49 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); | 42 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); |
50 | MODULE_LICENSE("GPL"); | 43 | MODULE_LICENSE("GPL"); |
51 | 44 | ||
52 | #define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ | 45 | #define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ |
53 | 46 | ||
54 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" | 47 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" |
55 | #define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
56 | 48 | ||
57 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); | 49 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); |
58 | MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID); | ||
59 | |||
60 | #define NOTIFY_BRNUP_MIN 0x11 | ||
61 | #define NOTIFY_BRNUP_MAX 0x1f | ||
62 | #define NOTIFY_BRNDOWN_MIN 0x20 | ||
63 | #define NOTIFY_BRNDOWN_MAX 0x2e | ||
64 | 50 | ||
65 | #define EEEPC_WMI_METHODID_DEVS 0x53564544 | 51 | static bool hotplug_wireless; |
66 | #define EEEPC_WMI_METHODID_DSTS 0x53544344 | ||
67 | #define EEEPC_WMI_METHODID_CFVS 0x53564643 | ||
68 | 52 | ||
69 | #define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012 | 53 | module_param(hotplug_wireless, bool, 0444); |
70 | #define EEEPC_WMI_DEVID_TPDLED 0x00100011 | 54 | MODULE_PARM_DESC(hotplug_wireless, |
71 | #define EEEPC_WMI_DEVID_WLAN 0x00010011 | 55 | "Enable hotplug for wireless device. " |
72 | #define EEEPC_WMI_DEVID_BLUETOOTH 0x00010013 | 56 | "If your laptop needs that, please report to " |
73 | #define EEEPC_WMI_DEVID_WWAN3G 0x00010019 | 57 | "acpi4asus-user@lists.sourceforge.net."); |
74 | 58 | ||
75 | static const struct key_entry eeepc_wmi_keymap[] = { | 59 | static const struct key_entry eeepc_wmi_keymap[] = { |
76 | /* Sleep already handled via generic ACPI code */ | 60 | /* Sleep already handled via generic ACPI code */ |
77 | { KE_KEY, 0x5d, { KEY_WLAN } }, | ||
78 | { KE_KEY, 0x32, { KEY_MUTE } }, | ||
79 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, | ||
80 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | 61 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, |
81 | { KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } }, | 62 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, |
82 | { KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } }, | 63 | { KE_KEY, 0x32, { KEY_MUTE } }, |
64 | { KE_KEY, 0x5c, { KEY_F15 } }, /* Power Gear key */ | ||
65 | { KE_KEY, 0x5d, { KEY_WLAN } }, | ||
66 | { KE_KEY, 0x6b, { KEY_TOUCHPAD_TOGGLE } }, /* Toggle Touchpad */ | ||
67 | { KE_KEY, 0x82, { KEY_CAMERA } }, | ||
68 | { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, | ||
69 | { KE_KEY, 0x88, { KEY_WLAN } }, | ||
83 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, | 70 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, |
84 | { KE_KEY, 0x6b, { KEY_F13 } }, /* Disable Touchpad */ | 71 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ |
85 | { KE_KEY, 0xe1, { KEY_F14 } }, | 72 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ |
86 | { KE_KEY, 0xe9, { KEY_DISPLAY_OFF } }, | 73 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, |
87 | { KE_KEY, 0xe0, { KEY_PROG1 } }, | 74 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, |
88 | { KE_KEY, 0x5c, { KEY_F15 } }, | 75 | { KE_KEY, 0xec, { KEY_CAMERA_UP } }, |
76 | { KE_KEY, 0xed, { KEY_CAMERA_DOWN } }, | ||
77 | { KE_KEY, 0xee, { KEY_CAMERA_LEFT } }, | ||
78 | { KE_KEY, 0xef, { KEY_CAMERA_RIGHT } }, | ||
89 | { KE_END, 0}, | 79 | { KE_END, 0}, |
90 | }; | 80 | }; |
91 | 81 | ||
92 | struct bios_args { | 82 | static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, |
93 | u32 dev_id; | ||
94 | u32 ctrl_param; | ||
95 | }; | ||
96 | |||
97 | /* | ||
98 | * eeepc-wmi/ - debugfs root directory | ||
99 | * dev_id - current dev_id | ||
100 | * ctrl_param - current ctrl_param | ||
101 | * devs - call DEVS(dev_id, ctrl_param) and print result | ||
102 | * dsts - call DSTS(dev_id) and print result | ||
103 | */ | ||
104 | struct eeepc_wmi_debug { | ||
105 | struct dentry *root; | ||
106 | u32 dev_id; | ||
107 | u32 ctrl_param; | ||
108 | }; | ||
109 | |||
110 | struct eeepc_wmi { | ||
111 | struct input_dev *inputdev; | ||
112 | struct backlight_device *backlight_device; | ||
113 | struct platform_device *platform_device; | ||
114 | |||
115 | struct led_classdev tpd_led; | ||
116 | int tpd_led_wk; | ||
117 | struct workqueue_struct *led_workqueue; | ||
118 | struct work_struct tpd_led_work; | ||
119 | |||
120 | struct rfkill *wlan_rfkill; | ||
121 | struct rfkill *bluetooth_rfkill; | ||
122 | struct rfkill *wwan3g_rfkill; | ||
123 | |||
124 | struct eeepc_wmi_debug debug; | ||
125 | }; | ||
126 | |||
127 | /* Only used in eeepc_wmi_init() and eeepc_wmi_exit() */ | ||
128 | static struct platform_device *platform_device; | ||
129 | |||
130 | static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc) | ||
131 | { | ||
132 | int err; | ||
133 | |||
134 | eeepc->inputdev = input_allocate_device(); | ||
135 | if (!eeepc->inputdev) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | eeepc->inputdev->name = "Eee PC WMI hotkeys"; | ||
139 | eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0"; | ||
140 | eeepc->inputdev->id.bustype = BUS_HOST; | ||
141 | eeepc->inputdev->dev.parent = &eeepc->platform_device->dev; | ||
142 | |||
143 | err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL); | ||
144 | if (err) | ||
145 | goto err_free_dev; | ||
146 | |||
147 | err = input_register_device(eeepc->inputdev); | ||
148 | if (err) | ||
149 | goto err_free_keymap; | ||
150 | |||
151 | return 0; | ||
152 | |||
153 | err_free_keymap: | ||
154 | sparse_keymap_free(eeepc->inputdev); | ||
155 | err_free_dev: | ||
156 | input_free_device(eeepc->inputdev); | ||
157 | return err; | ||
158 | } | ||
159 | |||
160 | static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc) | ||
161 | { | ||
162 | if (eeepc->inputdev) { | ||
163 | sparse_keymap_free(eeepc->inputdev); | ||
164 | input_unregister_device(eeepc->inputdev); | ||
165 | } | ||
166 | |||
167 | eeepc->inputdev = NULL; | ||
168 | } | ||
169 | |||
170 | static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *retval) | ||
171 | { | ||
172 | struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id }; | ||
173 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
174 | union acpi_object *obj; | ||
175 | acpi_status status; | ||
176 | u32 tmp; | ||
177 | |||
178 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
179 | 1, EEEPC_WMI_METHODID_DSTS, &input, &output); | ||
180 | |||
181 | if (ACPI_FAILURE(status)) | ||
182 | return status; | ||
183 | |||
184 | obj = (union acpi_object *)output.pointer; | ||
185 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
186 | tmp = (u32)obj->integer.value; | ||
187 | else | ||
188 | tmp = 0; | ||
189 | |||
190 | if (retval) | ||
191 | *retval = tmp; | ||
192 | |||
193 | kfree(obj); | ||
194 | |||
195 | return status; | ||
196 | |||
197 | } | ||
198 | |||
199 | static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param, | ||
200 | u32 *retval) | ||
201 | { | ||
202 | struct bios_args args = { | ||
203 | .dev_id = dev_id, | ||
204 | .ctrl_param = ctrl_param, | ||
205 | }; | ||
206 | struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; | ||
207 | acpi_status status; | ||
208 | |||
209 | if (!retval) { | ||
210 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1, | ||
211 | EEEPC_WMI_METHODID_DEVS, | ||
212 | &input, NULL); | ||
213 | } else { | ||
214 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
215 | union acpi_object *obj; | ||
216 | u32 tmp; | ||
217 | |||
218 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1, | ||
219 | EEEPC_WMI_METHODID_DEVS, | ||
220 | &input, &output); | ||
221 | |||
222 | if (ACPI_FAILURE(status)) | ||
223 | return status; | ||
224 | |||
225 | obj = (union acpi_object *)output.pointer; | ||
226 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
227 | tmp = (u32)obj->integer.value; | ||
228 | else | ||
229 | tmp = 0; | ||
230 | |||
231 | *retval = tmp; | ||
232 | |||
233 | kfree(obj); | ||
234 | } | ||
235 | |||
236 | return status; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * LEDs | ||
241 | */ | ||
242 | /* | ||
243 | * These functions actually update the LED's, and are called from a | ||
244 | * workqueue. By doing this as separate work rather than when the LED | ||
245 | * subsystem asks, we avoid messing with the Eeepc ACPI stuff during a | ||
246 | * potentially bad time, such as a timer interrupt. | ||
247 | */ | ||
248 | static void tpd_led_update(struct work_struct *work) | ||
249 | { | ||
250 | int ctrl_param; | ||
251 | struct eeepc_wmi *eeepc; | ||
252 | |||
253 | eeepc = container_of(work, struct eeepc_wmi, tpd_led_work); | ||
254 | |||
255 | ctrl_param = eeepc->tpd_led_wk; | ||
256 | eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_TPDLED, ctrl_param, NULL); | ||
257 | } | ||
258 | |||
259 | static void tpd_led_set(struct led_classdev *led_cdev, | ||
260 | enum led_brightness value) | ||
261 | { | ||
262 | struct eeepc_wmi *eeepc; | ||
263 | |||
264 | eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led); | ||
265 | |||
266 | eeepc->tpd_led_wk = !!value; | ||
267 | queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work); | ||
268 | } | ||
269 | |||
270 | static int read_tpd_state(struct eeepc_wmi *eeepc) | ||
271 | { | ||
272 | u32 retval; | ||
273 | acpi_status status; | ||
274 | |||
275 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_TPDLED, &retval); | ||
276 | |||
277 | if (ACPI_FAILURE(status)) | ||
278 | return -1; | ||
279 | else if (!retval || retval == 0x00060000) | ||
280 | /* | ||
281 | * if touchpad led is present, DSTS will set some bits, | ||
282 | * usually 0x00020000. | ||
283 | * 0x00060000 means that the device is not supported | ||
284 | */ | ||
285 | return -ENODEV; | ||
286 | else | ||
287 | /* Status is stored in the first bit */ | ||
288 | return retval & 0x1; | ||
289 | } | ||
290 | |||
291 | static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) | ||
292 | { | ||
293 | struct eeepc_wmi *eeepc; | ||
294 | |||
295 | eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led); | ||
296 | |||
297 | return read_tpd_state(eeepc); | ||
298 | } | ||
299 | |||
300 | static int eeepc_wmi_led_init(struct eeepc_wmi *eeepc) | ||
301 | { | ||
302 | int rv; | ||
303 | |||
304 | if (read_tpd_state(eeepc) < 0) | ||
305 | return 0; | ||
306 | |||
307 | eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue"); | ||
308 | if (!eeepc->led_workqueue) | ||
309 | return -ENOMEM; | ||
310 | INIT_WORK(&eeepc->tpd_led_work, tpd_led_update); | ||
311 | |||
312 | eeepc->tpd_led.name = "eeepc::touchpad"; | ||
313 | eeepc->tpd_led.brightness_set = tpd_led_set; | ||
314 | eeepc->tpd_led.brightness_get = tpd_led_get; | ||
315 | eeepc->tpd_led.max_brightness = 1; | ||
316 | |||
317 | rv = led_classdev_register(&eeepc->platform_device->dev, | ||
318 | &eeepc->tpd_led); | ||
319 | if (rv) { | ||
320 | destroy_workqueue(eeepc->led_workqueue); | ||
321 | return rv; | ||
322 | } | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void eeepc_wmi_led_exit(struct eeepc_wmi *eeepc) | ||
328 | { | ||
329 | if (eeepc->tpd_led.dev) | ||
330 | led_classdev_unregister(&eeepc->tpd_led); | ||
331 | if (eeepc->led_workqueue) | ||
332 | destroy_workqueue(eeepc->led_workqueue); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Rfkill devices | ||
337 | */ | ||
338 | static int eeepc_rfkill_set(void *data, bool blocked) | ||
339 | { | ||
340 | int dev_id = (unsigned long)data; | ||
341 | u32 ctrl_param = !blocked; | ||
342 | |||
343 | return eeepc_wmi_set_devstate(dev_id, ctrl_param, NULL); | ||
344 | } | ||
345 | |||
346 | static void eeepc_rfkill_query(struct rfkill *rfkill, void *data) | ||
347 | { | ||
348 | int dev_id = (unsigned long)data; | ||
349 | u32 retval; | ||
350 | acpi_status status; | ||
351 | |||
352 | status = eeepc_wmi_get_devstate(dev_id, &retval); | ||
353 | |||
354 | if (ACPI_FAILURE(status)) | ||
355 | return ; | ||
356 | |||
357 | rfkill_set_sw_state(rfkill, !(retval & 0x1)); | ||
358 | } | ||
359 | |||
360 | static const struct rfkill_ops eeepc_rfkill_ops = { | ||
361 | .set_block = eeepc_rfkill_set, | ||
362 | .query = eeepc_rfkill_query, | ||
363 | }; | ||
364 | |||
365 | static int eeepc_new_rfkill(struct eeepc_wmi *eeepc, | ||
366 | struct rfkill **rfkill, | ||
367 | const char *name, | ||
368 | enum rfkill_type type, int dev_id) | ||
369 | { | ||
370 | int result; | ||
371 | u32 retval; | ||
372 | acpi_status status; | ||
373 | |||
374 | status = eeepc_wmi_get_devstate(dev_id, &retval); | ||
375 | |||
376 | if (ACPI_FAILURE(status)) | ||
377 | return -1; | ||
378 | |||
379 | /* If the device is present, DSTS will always set some bits | ||
380 | * 0x00070000 - 1110000000000000000 - device supported | ||
381 | * 0x00060000 - 1100000000000000000 - not supported | ||
382 | * 0x00020000 - 0100000000000000000 - device supported | ||
383 | * 0x00010000 - 0010000000000000000 - not supported / special mode ? | ||
384 | */ | ||
385 | if (!retval || retval == 0x00060000) | ||
386 | return -ENODEV; | ||
387 | |||
388 | *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type, | ||
389 | &eeepc_rfkill_ops, (void *)(long)dev_id); | ||
390 | |||
391 | if (!*rfkill) | ||
392 | return -EINVAL; | ||
393 | |||
394 | rfkill_init_sw_state(*rfkill, !(retval & 0x1)); | ||
395 | result = rfkill_register(*rfkill); | ||
396 | if (result) { | ||
397 | rfkill_destroy(*rfkill); | ||
398 | *rfkill = NULL; | ||
399 | return result; | ||
400 | } | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static void eeepc_wmi_rfkill_exit(struct eeepc_wmi *eeepc) | ||
405 | { | ||
406 | if (eeepc->wlan_rfkill) { | ||
407 | rfkill_unregister(eeepc->wlan_rfkill); | ||
408 | rfkill_destroy(eeepc->wlan_rfkill); | ||
409 | eeepc->wlan_rfkill = NULL; | ||
410 | } | ||
411 | if (eeepc->bluetooth_rfkill) { | ||
412 | rfkill_unregister(eeepc->bluetooth_rfkill); | ||
413 | rfkill_destroy(eeepc->bluetooth_rfkill); | ||
414 | eeepc->bluetooth_rfkill = NULL; | ||
415 | } | ||
416 | if (eeepc->wwan3g_rfkill) { | ||
417 | rfkill_unregister(eeepc->wwan3g_rfkill); | ||
418 | rfkill_destroy(eeepc->wwan3g_rfkill); | ||
419 | eeepc->wwan3g_rfkill = NULL; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | static int eeepc_wmi_rfkill_init(struct eeepc_wmi *eeepc) | ||
424 | { | ||
425 | int result = 0; | ||
426 | |||
427 | result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill, | ||
428 | "eeepc-wlan", RFKILL_TYPE_WLAN, | ||
429 | EEEPC_WMI_DEVID_WLAN); | ||
430 | |||
431 | if (result && result != -ENODEV) | ||
432 | goto exit; | ||
433 | |||
434 | result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill, | ||
435 | "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH, | ||
436 | EEEPC_WMI_DEVID_BLUETOOTH); | ||
437 | |||
438 | if (result && result != -ENODEV) | ||
439 | goto exit; | ||
440 | |||
441 | result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill, | ||
442 | "eeepc-wwan3g", RFKILL_TYPE_WWAN, | ||
443 | EEEPC_WMI_DEVID_WWAN3G); | ||
444 | |||
445 | if (result && result != -ENODEV) | ||
446 | goto exit; | ||
447 | |||
448 | exit: | ||
449 | if (result && result != -ENODEV) | ||
450 | eeepc_wmi_rfkill_exit(eeepc); | ||
451 | |||
452 | if (result == -ENODEV) | ||
453 | result = 0; | ||
454 | |||
455 | return result; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * Backlight | ||
460 | */ | ||
461 | static int read_brightness(struct backlight_device *bd) | ||
462 | { | ||
463 | u32 retval; | ||
464 | acpi_status status; | ||
465 | |||
466 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &retval); | ||
467 | |||
468 | if (ACPI_FAILURE(status)) | ||
469 | return -1; | ||
470 | else | ||
471 | return retval & 0xFF; | ||
472 | } | ||
473 | |||
474 | static int update_bl_status(struct backlight_device *bd) | ||
475 | { | ||
476 | |||
477 | u32 ctrl_param; | ||
478 | acpi_status status; | ||
479 | |||
480 | ctrl_param = bd->props.brightness; | ||
481 | |||
482 | status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, | ||
483 | ctrl_param, NULL); | ||
484 | |||
485 | if (ACPI_FAILURE(status)) | ||
486 | return -1; | ||
487 | else | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static const struct backlight_ops eeepc_wmi_bl_ops = { | ||
492 | .get_brightness = read_brightness, | ||
493 | .update_status = update_bl_status, | ||
494 | }; | ||
495 | |||
496 | static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code) | ||
497 | { | ||
498 | struct backlight_device *bd = eeepc->backlight_device; | ||
499 | int old = bd->props.brightness; | ||
500 | int new = old; | ||
501 | |||
502 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
503 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
504 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
505 | new = code - NOTIFY_BRNDOWN_MIN; | ||
506 | |||
507 | bd->props.brightness = new; | ||
508 | backlight_update_status(bd); | ||
509 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
510 | |||
511 | return old; | ||
512 | } | ||
513 | |||
514 | static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc) | ||
515 | { | ||
516 | struct backlight_device *bd; | ||
517 | struct backlight_properties props; | ||
518 | |||
519 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
520 | props.max_brightness = 15; | ||
521 | bd = backlight_device_register(EEEPC_WMI_FILE, | ||
522 | &eeepc->platform_device->dev, eeepc, | ||
523 | &eeepc_wmi_bl_ops, &props); | ||
524 | if (IS_ERR(bd)) { | ||
525 | pr_err("Could not register backlight device\n"); | ||
526 | return PTR_ERR(bd); | ||
527 | } | ||
528 | |||
529 | eeepc->backlight_device = bd; | ||
530 | |||
531 | bd->props.brightness = read_brightness(bd); | ||
532 | bd->props.power = FB_BLANK_UNBLANK; | ||
533 | backlight_update_status(bd); | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc) | ||
539 | { | ||
540 | if (eeepc->backlight_device) | ||
541 | backlight_device_unregister(eeepc->backlight_device); | ||
542 | |||
543 | eeepc->backlight_device = NULL; | ||
544 | } | ||
545 | |||
546 | static void eeepc_wmi_notify(u32 value, void *context) | ||
547 | { | ||
548 | struct eeepc_wmi *eeepc = context; | ||
549 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
550 | union acpi_object *obj; | ||
551 | acpi_status status; | ||
552 | int code; | ||
553 | int orig_code; | ||
554 | |||
555 | status = wmi_get_event_data(value, &response); | ||
556 | if (status != AE_OK) { | ||
557 | pr_err("bad event status 0x%x\n", status); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | obj = (union acpi_object *)response.pointer; | ||
562 | |||
563 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | ||
564 | code = obj->integer.value; | ||
565 | orig_code = code; | ||
566 | |||
567 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
568 | code = NOTIFY_BRNUP_MIN; | ||
569 | else if (code >= NOTIFY_BRNDOWN_MIN && | ||
570 | code <= NOTIFY_BRNDOWN_MAX) | ||
571 | code = NOTIFY_BRNDOWN_MIN; | ||
572 | |||
573 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { | ||
574 | if (!acpi_video_backlight_support()) | ||
575 | eeepc_wmi_backlight_notify(eeepc, orig_code); | ||
576 | } | ||
577 | |||
578 | if (!sparse_keymap_report_event(eeepc->inputdev, | ||
579 | code, 1, true)) | ||
580 | pr_info("Unknown key %x pressed\n", code); | ||
581 | } | ||
582 | |||
583 | kfree(obj); | ||
584 | } | ||
585 | |||
586 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, | ||
587 | const char *buf, size_t count) | ||
588 | { | ||
589 | int value; | ||
590 | struct acpi_buffer input = { (acpi_size)sizeof(value), &value }; | ||
591 | acpi_status status; | ||
592 | |||
593 | if (!count || sscanf(buf, "%i", &value) != 1) | ||
594 | return -EINVAL; | ||
595 | if (value < 0 || value > 2) | ||
596 | return -EINVAL; | ||
597 | |||
598 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
599 | 1, EEEPC_WMI_METHODID_CFVS, &input, NULL); | ||
600 | |||
601 | if (ACPI_FAILURE(status)) | ||
602 | return -EIO; | ||
603 | else | ||
604 | return count; | ||
605 | } | ||
606 | |||
607 | static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); | ||
608 | |||
609 | static struct attribute *platform_attributes[] = { | ||
610 | &dev_attr_cpufv.attr, | ||
611 | NULL | ||
612 | }; | ||
613 | |||
614 | static struct attribute_group platform_attribute_group = { | ||
615 | .attrs = platform_attributes | ||
616 | }; | ||
617 | |||
618 | static void eeepc_wmi_sysfs_exit(struct platform_device *device) | ||
619 | { | ||
620 | sysfs_remove_group(&device->dev.kobj, &platform_attribute_group); | ||
621 | } | ||
622 | |||
623 | static int eeepc_wmi_sysfs_init(struct platform_device *device) | ||
624 | { | ||
625 | return sysfs_create_group(&device->dev.kobj, &platform_attribute_group); | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * Platform device | ||
630 | */ | ||
631 | static int __init eeepc_wmi_platform_init(struct eeepc_wmi *eeepc) | ||
632 | { | ||
633 | int err; | ||
634 | |||
635 | eeepc->platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1); | ||
636 | if (!eeepc->platform_device) | ||
637 | return -ENOMEM; | ||
638 | platform_set_drvdata(eeepc->platform_device, eeepc); | ||
639 | |||
640 | err = platform_device_add(eeepc->platform_device); | ||
641 | if (err) | ||
642 | goto fail_platform_device; | ||
643 | |||
644 | err = eeepc_wmi_sysfs_init(eeepc->platform_device); | ||
645 | if (err) | ||
646 | goto fail_sysfs; | ||
647 | return 0; | ||
648 | |||
649 | fail_sysfs: | ||
650 | platform_device_del(eeepc->platform_device); | ||
651 | fail_platform_device: | ||
652 | platform_device_put(eeepc->platform_device); | ||
653 | return err; | ||
654 | } | ||
655 | |||
656 | static void eeepc_wmi_platform_exit(struct eeepc_wmi *eeepc) | ||
657 | { | ||
658 | eeepc_wmi_sysfs_exit(eeepc->platform_device); | ||
659 | platform_device_unregister(eeepc->platform_device); | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * debugfs | ||
664 | */ | ||
665 | struct eeepc_wmi_debugfs_node { | ||
666 | struct eeepc_wmi *eeepc; | ||
667 | char *name; | ||
668 | int (*show)(struct seq_file *m, void *data); | ||
669 | }; | ||
670 | |||
671 | static int show_dsts(struct seq_file *m, void *data) | ||
672 | { | ||
673 | struct eeepc_wmi *eeepc = m->private; | ||
674 | acpi_status status; | ||
675 | u32 retval = -1; | ||
676 | |||
677 | status = eeepc_wmi_get_devstate(eeepc->debug.dev_id, &retval); | ||
678 | |||
679 | if (ACPI_FAILURE(status)) | ||
680 | return -EIO; | ||
681 | |||
682 | seq_printf(m, "DSTS(%x) = %x\n", eeepc->debug.dev_id, retval); | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | static int show_devs(struct seq_file *m, void *data) | ||
688 | { | ||
689 | struct eeepc_wmi *eeepc = m->private; | ||
690 | acpi_status status; | ||
691 | u32 retval = -1; | ||
692 | |||
693 | status = eeepc_wmi_set_devstate(eeepc->debug.dev_id, | ||
694 | eeepc->debug.ctrl_param, &retval); | ||
695 | if (ACPI_FAILURE(status)) | ||
696 | return -EIO; | ||
697 | |||
698 | seq_printf(m, "DEVS(%x, %x) = %x\n", eeepc->debug.dev_id, | ||
699 | eeepc->debug.ctrl_param, retval); | ||
700 | |||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static struct eeepc_wmi_debugfs_node eeepc_wmi_debug_files[] = { | ||
705 | { NULL, "devs", show_devs }, | ||
706 | { NULL, "dsts", show_dsts }, | ||
707 | }; | ||
708 | |||
709 | static int eeepc_wmi_debugfs_open(struct inode *inode, struct file *file) | ||
710 | { | ||
711 | struct eeepc_wmi_debugfs_node *node = inode->i_private; | ||
712 | |||
713 | return single_open(file, node->show, node->eeepc); | ||
714 | } | ||
715 | |||
716 | static const struct file_operations eeepc_wmi_debugfs_io_ops = { | ||
717 | .owner = THIS_MODULE, | ||
718 | .open = eeepc_wmi_debugfs_open, | ||
719 | .read = seq_read, | ||
720 | .llseek = seq_lseek, | ||
721 | .release = single_release, | ||
722 | }; | ||
723 | |||
724 | static void eeepc_wmi_debugfs_exit(struct eeepc_wmi *eeepc) | ||
725 | { | ||
726 | debugfs_remove_recursive(eeepc->debug.root); | ||
727 | } | ||
728 | |||
729 | static int eeepc_wmi_debugfs_init(struct eeepc_wmi *eeepc) | ||
730 | { | ||
731 | struct dentry *dent; | ||
732 | int i; | ||
733 | |||
734 | eeepc->debug.root = debugfs_create_dir(EEEPC_WMI_FILE, NULL); | ||
735 | if (!eeepc->debug.root) { | ||
736 | pr_err("failed to create debugfs directory"); | ||
737 | goto error_debugfs; | ||
738 | } | ||
739 | |||
740 | dent = debugfs_create_x32("dev_id", S_IRUGO|S_IWUSR, | ||
741 | eeepc->debug.root, &eeepc->debug.dev_id); | ||
742 | if (!dent) | ||
743 | goto error_debugfs; | ||
744 | |||
745 | dent = debugfs_create_x32("ctrl_param", S_IRUGO|S_IWUSR, | ||
746 | eeepc->debug.root, &eeepc->debug.ctrl_param); | ||
747 | if (!dent) | ||
748 | goto error_debugfs; | ||
749 | |||
750 | for (i = 0; i < ARRAY_SIZE(eeepc_wmi_debug_files); i++) { | ||
751 | struct eeepc_wmi_debugfs_node *node = &eeepc_wmi_debug_files[i]; | ||
752 | |||
753 | node->eeepc = eeepc; | ||
754 | dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, | ||
755 | eeepc->debug.root, node, | ||
756 | &eeepc_wmi_debugfs_io_ops); | ||
757 | if (!dent) { | ||
758 | pr_err("failed to create debug file: %s\n", node->name); | ||
759 | goto error_debugfs; | ||
760 | } | ||
761 | } | ||
762 | |||
763 | return 0; | ||
764 | |||
765 | error_debugfs: | ||
766 | eeepc_wmi_debugfs_exit(eeepc); | ||
767 | return -ENOMEM; | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * WMI Driver | ||
772 | */ | ||
773 | static struct platform_device * __init eeepc_wmi_add(void) | ||
774 | { | ||
775 | struct eeepc_wmi *eeepc; | ||
776 | acpi_status status; | ||
777 | int err; | ||
778 | |||
779 | eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL); | ||
780 | if (!eeepc) | ||
781 | return ERR_PTR(-ENOMEM); | ||
782 | |||
783 | /* | ||
784 | * Register the platform device first. It is used as a parent for the | ||
785 | * sub-devices below. | ||
786 | */ | ||
787 | err = eeepc_wmi_platform_init(eeepc); | ||
788 | if (err) | ||
789 | goto fail_platform; | ||
790 | |||
791 | err = eeepc_wmi_input_init(eeepc); | ||
792 | if (err) | ||
793 | goto fail_input; | ||
794 | |||
795 | err = eeepc_wmi_led_init(eeepc); | ||
796 | if (err) | ||
797 | goto fail_leds; | ||
798 | |||
799 | err = eeepc_wmi_rfkill_init(eeepc); | ||
800 | if (err) | ||
801 | goto fail_rfkill; | ||
802 | |||
803 | if (!acpi_video_backlight_support()) { | ||
804 | err = eeepc_wmi_backlight_init(eeepc); | ||
805 | if (err) | ||
806 | goto fail_backlight; | ||
807 | } else | ||
808 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
809 | |||
810 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | ||
811 | eeepc_wmi_notify, eeepc); | ||
812 | if (ACPI_FAILURE(status)) { | ||
813 | pr_err("Unable to register notify handler - %d\n", | ||
814 | status); | ||
815 | err = -ENODEV; | ||
816 | goto fail_wmi_handler; | ||
817 | } | ||
818 | |||
819 | err = eeepc_wmi_debugfs_init(eeepc); | ||
820 | if (err) | ||
821 | goto fail_debugfs; | ||
822 | |||
823 | return eeepc->platform_device; | ||
824 | |||
825 | fail_debugfs: | ||
826 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
827 | fail_wmi_handler: | ||
828 | eeepc_wmi_backlight_exit(eeepc); | ||
829 | fail_backlight: | ||
830 | eeepc_wmi_rfkill_exit(eeepc); | ||
831 | fail_rfkill: | ||
832 | eeepc_wmi_led_exit(eeepc); | ||
833 | fail_leds: | ||
834 | eeepc_wmi_input_exit(eeepc); | ||
835 | fail_input: | ||
836 | eeepc_wmi_platform_exit(eeepc); | ||
837 | fail_platform: | ||
838 | kfree(eeepc); | ||
839 | return ERR_PTR(err); | ||
840 | } | ||
841 | |||
842 | static int eeepc_wmi_remove(struct platform_device *device) | ||
843 | { | ||
844 | struct eeepc_wmi *eeepc; | ||
845 | |||
846 | eeepc = platform_get_drvdata(device); | ||
847 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
848 | eeepc_wmi_backlight_exit(eeepc); | ||
849 | eeepc_wmi_input_exit(eeepc); | ||
850 | eeepc_wmi_led_exit(eeepc); | ||
851 | eeepc_wmi_rfkill_exit(eeepc); | ||
852 | eeepc_wmi_debugfs_exit(eeepc); | ||
853 | eeepc_wmi_platform_exit(eeepc); | ||
854 | |||
855 | kfree(eeepc); | ||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | static struct platform_driver platform_driver = { | ||
860 | .driver = { | ||
861 | .name = EEEPC_WMI_FILE, | ||
862 | .owner = THIS_MODULE, | ||
863 | }, | ||
864 | }; | ||
865 | |||
866 | static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level, | ||
867 | void *context, void **retval) | 83 | void *context, void **retval) |
868 | { | 84 | { |
869 | pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID); | 85 | pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID); |
@@ -871,7 +87,7 @@ static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level, | |||
871 | return AE_CTRL_TERMINATE; | 87 | return AE_CTRL_TERMINATE; |
872 | } | 88 | } |
873 | 89 | ||
874 | static int __init eeepc_wmi_check_atkd(void) | 90 | static int eeepc_wmi_check_atkd(void) |
875 | { | 91 | { |
876 | acpi_status status; | 92 | acpi_status status; |
877 | bool found = false; | 93 | bool found = false; |
@@ -884,16 +100,8 @@ static int __init eeepc_wmi_check_atkd(void) | |||
884 | return -1; | 100 | return -1; |
885 | } | 101 | } |
886 | 102 | ||
887 | static int __init eeepc_wmi_init(void) | 103 | static int eeepc_wmi_probe(struct platform_device *pdev) |
888 | { | 104 | { |
889 | int err; | ||
890 | |||
891 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) || | ||
892 | !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) { | ||
893 | pr_warning("No known WMI GUID found\n"); | ||
894 | return -ENODEV; | ||
895 | } | ||
896 | |||
897 | if (eeepc_wmi_check_atkd()) { | 105 | if (eeepc_wmi_check_atkd()) { |
898 | pr_warning("WMI device present, but legacy ATKD device is also " | 106 | pr_warning("WMI device present, but legacy ATKD device is also " |
899 | "present and enabled."); | 107 | "present and enabled."); |
@@ -901,33 +109,59 @@ static int __init eeepc_wmi_init(void) | |||
901 | "acpi_osi=\"!Windows 2009\""); | 109 | "acpi_osi=\"!Windows 2009\""); |
902 | pr_warning("Can't load eeepc-wmi, use default acpi_osi " | 110 | pr_warning("Can't load eeepc-wmi, use default acpi_osi " |
903 | "(preferred) or eeepc-laptop"); | 111 | "(preferred) or eeepc-laptop"); |
904 | return -ENODEV; | 112 | return -EBUSY; |
905 | } | 113 | } |
114 | return 0; | ||
115 | } | ||
906 | 116 | ||
907 | platform_device = eeepc_wmi_add(); | 117 | static void eeepc_dmi_check(struct asus_wmi_driver *driver) |
908 | if (IS_ERR(platform_device)) { | 118 | { |
909 | err = PTR_ERR(platform_device); | 119 | const char *model; |
910 | goto fail_eeepc_wmi; | 120 | |
911 | } | 121 | model = dmi_get_system_info(DMI_PRODUCT_NAME); |
122 | if (!model) | ||
123 | return; | ||
912 | 124 | ||
913 | err = platform_driver_register(&platform_driver); | 125 | /* |
914 | if (err) { | 126 | * Whitelist for wlan hotplug |
915 | pr_warning("Unable to register platform driver\n"); | 127 | * |
916 | goto fail_platform_driver; | 128 | * Asus 1000H needs the current hotplug code to handle |
129 | * Fn+F2 correctly. We may add other Asus here later, but | ||
130 | * it seems that most of the laptops supported by asus-wmi | ||
131 | * don't need to be on this list | ||
132 | */ | ||
133 | if (strcmp(model, "1000H") == 0) { | ||
134 | driver->hotplug_wireless = true; | ||
135 | pr_info("wlan hotplug enabled\n"); | ||
917 | } | 136 | } |
137 | } | ||
138 | |||
139 | static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) | ||
140 | { | ||
141 | driver->hotplug_wireless = hotplug_wireless; | ||
142 | eeepc_dmi_check(driver); | ||
143 | } | ||
144 | |||
145 | static struct asus_wmi_driver asus_wmi_driver = { | ||
146 | .name = EEEPC_WMI_FILE, | ||
147 | .owner = THIS_MODULE, | ||
148 | .event_guid = EEEPC_WMI_EVENT_GUID, | ||
149 | .keymap = eeepc_wmi_keymap, | ||
150 | .input_name = "Eee PC WMI hotkeys", | ||
151 | .input_phys = EEEPC_WMI_FILE "/input0", | ||
152 | .probe = eeepc_wmi_probe, | ||
153 | .quirks = eeepc_wmi_quirks, | ||
154 | }; | ||
918 | 155 | ||
919 | return 0; | ||
920 | 156 | ||
921 | fail_platform_driver: | 157 | static int __init eeepc_wmi_init(void) |
922 | eeepc_wmi_remove(platform_device); | 158 | { |
923 | fail_eeepc_wmi: | 159 | return asus_wmi_register_driver(&asus_wmi_driver); |
924 | return err; | ||
925 | } | 160 | } |
926 | 161 | ||
927 | static void __exit eeepc_wmi_exit(void) | 162 | static void __exit eeepc_wmi_exit(void) |
928 | { | 163 | { |
929 | eeepc_wmi_remove(platform_device); | 164 | asus_wmi_unregister_driver(&asus_wmi_driver); |
930 | platform_driver_unregister(&platform_driver); | ||
931 | } | 165 | } |
932 | 166 | ||
933 | module_init(eeepc_wmi_init); | 167 | module_init(eeepc_wmi_init); |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 9e05af9c41cb..1bc4a7539ba9 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * HP WMI hotkeys | 2 | * HP WMI hotkeys |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Red Hat <mjg@redhat.com> | 4 | * Copyright (C) 2008 Red Hat <mjg@redhat.com> |
5 | * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi> | ||
5 | * | 6 | * |
6 | * Portions based on wistron_btns.c: | 7 | * Portions based on wistron_btns.c: |
7 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | 8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> |
@@ -51,6 +52,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
51 | #define HPWMI_HARDWARE_QUERY 0x4 | 52 | #define HPWMI_HARDWARE_QUERY 0x4 |
52 | #define HPWMI_WIRELESS_QUERY 0x5 | 53 | #define HPWMI_WIRELESS_QUERY 0x5 |
53 | #define HPWMI_HOTKEY_QUERY 0xc | 54 | #define HPWMI_HOTKEY_QUERY 0xc |
55 | #define HPWMI_WIRELESS2_QUERY 0x1b | ||
54 | 56 | ||
55 | #define PREFIX "HP WMI: " | 57 | #define PREFIX "HP WMI: " |
56 | #define UNIMP "Unimplemented " | 58 | #define UNIMP "Unimplemented " |
@@ -86,7 +88,46 @@ struct bios_args { | |||
86 | struct bios_return { | 88 | struct bios_return { |
87 | u32 sigpass; | 89 | u32 sigpass; |
88 | u32 return_code; | 90 | u32 return_code; |
89 | u32 value; | 91 | }; |
92 | |||
93 | enum hp_return_value { | ||
94 | HPWMI_RET_WRONG_SIGNATURE = 0x02, | ||
95 | HPWMI_RET_UNKNOWN_COMMAND = 0x03, | ||
96 | HPWMI_RET_UNKNOWN_CMDTYPE = 0x04, | ||
97 | HPWMI_RET_INVALID_PARAMETERS = 0x05, | ||
98 | }; | ||
99 | |||
100 | enum hp_wireless2_bits { | ||
101 | HPWMI_POWER_STATE = 0x01, | ||
102 | HPWMI_POWER_SOFT = 0x02, | ||
103 | HPWMI_POWER_BIOS = 0x04, | ||
104 | HPWMI_POWER_HARD = 0x08, | ||
105 | }; | ||
106 | |||
107 | #define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \ | ||
108 | != (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) | ||
109 | #define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT) | ||
110 | |||
111 | struct bios_rfkill2_device_state { | ||
112 | u8 radio_type; | ||
113 | u8 bus_type; | ||
114 | u16 vendor_id; | ||
115 | u16 product_id; | ||
116 | u16 subsys_vendor_id; | ||
117 | u16 subsys_product_id; | ||
118 | u8 rfkill_id; | ||
119 | u8 power; | ||
120 | u8 unknown[4]; | ||
121 | }; | ||
122 | |||
123 | /* 7 devices fit into the 128 byte buffer */ | ||
124 | #define HPWMI_MAX_RFKILL2_DEVICES 7 | ||
125 | |||
126 | struct bios_rfkill2_state { | ||
127 | u8 unknown[7]; | ||
128 | u8 count; | ||
129 | u8 pad[8]; | ||
130 | struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES]; | ||
90 | }; | 131 | }; |
91 | 132 | ||
92 | static const struct key_entry hp_wmi_keymap[] = { | 133 | static const struct key_entry hp_wmi_keymap[] = { |
@@ -108,6 +149,15 @@ static struct rfkill *wifi_rfkill; | |||
108 | static struct rfkill *bluetooth_rfkill; | 149 | static struct rfkill *bluetooth_rfkill; |
109 | static struct rfkill *wwan_rfkill; | 150 | static struct rfkill *wwan_rfkill; |
110 | 151 | ||
152 | struct rfkill2_device { | ||
153 | u8 id; | ||
154 | int num; | ||
155 | struct rfkill *rfkill; | ||
156 | }; | ||
157 | |||
158 | static int rfkill2_count; | ||
159 | static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES]; | ||
160 | |||
111 | static const struct dev_pm_ops hp_wmi_pm_ops = { | 161 | static const struct dev_pm_ops hp_wmi_pm_ops = { |
112 | .resume = hp_wmi_resume_handler, | 162 | .resume = hp_wmi_resume_handler, |
113 | .restore = hp_wmi_resume_handler, | 163 | .restore = hp_wmi_resume_handler, |
@@ -129,7 +179,8 @@ static struct platform_driver hp_wmi_driver = { | |||
129 | * query: The commandtype -> What should be queried | 179 | * query: The commandtype -> What should be queried |
130 | * write: The command -> 0 read, 1 write, 3 ODM specific | 180 | * write: The command -> 0 read, 1 write, 3 ODM specific |
131 | * buffer: Buffer used as input and/or output | 181 | * buffer: Buffer used as input and/or output |
132 | * buffersize: Size of buffer | 182 | * insize: Size of input buffer |
183 | * outsize: Size of output buffer | ||
133 | * | 184 | * |
134 | * returns zero on success | 185 | * returns zero on success |
135 | * an HP WMI query specific error code (which is positive) | 186 | * an HP WMI query specific error code (which is positive) |
@@ -140,25 +191,29 @@ static struct platform_driver hp_wmi_driver = { | |||
140 | * size. E.g. Battery info query (0x7) is defined to have 1 byte input | 191 | * size. E.g. Battery info query (0x7) is defined to have 1 byte input |
141 | * and 128 byte output. The caller would do: | 192 | * and 128 byte output. The caller would do: |
142 | * buffer = kzalloc(128, GFP_KERNEL); | 193 | * buffer = kzalloc(128, GFP_KERNEL); |
143 | * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) | 194 | * ret = hp_wmi_perform_query(0x7, 0, buffer, 1, 128) |
144 | */ | 195 | */ |
145 | static int hp_wmi_perform_query(int query, int write, u32 *buffer, | 196 | static int hp_wmi_perform_query(int query, int write, void *buffer, |
146 | int buffersize) | 197 | int insize, int outsize) |
147 | { | 198 | { |
148 | struct bios_return bios_return; | 199 | struct bios_return *bios_return; |
149 | acpi_status status; | 200 | int actual_outsize; |
150 | union acpi_object *obj; | 201 | union acpi_object *obj; |
151 | struct bios_args args = { | 202 | struct bios_args args = { |
152 | .signature = 0x55434553, | 203 | .signature = 0x55434553, |
153 | .command = write ? 0x2 : 0x1, | 204 | .command = write ? 0x2 : 0x1, |
154 | .commandtype = query, | 205 | .commandtype = query, |
155 | .datasize = buffersize, | 206 | .datasize = insize, |
156 | .data = *buffer, | 207 | .data = 0, |
157 | }; | 208 | }; |
158 | struct acpi_buffer input = { sizeof(struct bios_args), &args }; | 209 | struct acpi_buffer input = { sizeof(struct bios_args), &args }; |
159 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 210 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
160 | 211 | ||
161 | status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); | 212 | if (WARN_ON(insize > sizeof(args.data))) |
213 | return -EINVAL; | ||
214 | memcpy(&args.data, buffer, insize); | ||
215 | |||
216 | wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); | ||
162 | 217 | ||
163 | obj = output.pointer; | 218 | obj = output.pointer; |
164 | 219 | ||
@@ -169,10 +224,26 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer, | |||
169 | return -EINVAL; | 224 | return -EINVAL; |
170 | } | 225 | } |
171 | 226 | ||
172 | bios_return = *((struct bios_return *)obj->buffer.pointer); | 227 | bios_return = (struct bios_return *)obj->buffer.pointer; |
173 | 228 | ||
174 | memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); | 229 | if (bios_return->return_code) { |
230 | if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE) | ||
231 | printk(KERN_WARNING PREFIX "query 0x%x returned " | ||
232 | "error 0x%x\n", | ||
233 | query, bios_return->return_code); | ||
234 | kfree(obj); | ||
235 | return bios_return->return_code; | ||
236 | } | ||
237 | |||
238 | if (!outsize) { | ||
239 | /* ignore output data */ | ||
240 | kfree(obj); | ||
241 | return 0; | ||
242 | } | ||
175 | 243 | ||
244 | actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return))); | ||
245 | memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize); | ||
246 | memset(buffer + actual_outsize, 0, outsize - actual_outsize); | ||
176 | kfree(obj); | 247 | kfree(obj); |
177 | return 0; | 248 | return 0; |
178 | } | 249 | } |
@@ -181,7 +252,7 @@ static int hp_wmi_display_state(void) | |||
181 | { | 252 | { |
182 | int state = 0; | 253 | int state = 0; |
183 | int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, | 254 | int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, |
184 | sizeof(state)); | 255 | sizeof(state), sizeof(state)); |
185 | if (ret) | 256 | if (ret) |
186 | return -EINVAL; | 257 | return -EINVAL; |
187 | return state; | 258 | return state; |
@@ -191,7 +262,7 @@ static int hp_wmi_hddtemp_state(void) | |||
191 | { | 262 | { |
192 | int state = 0; | 263 | int state = 0; |
193 | int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, | 264 | int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, |
194 | sizeof(state)); | 265 | sizeof(state), sizeof(state)); |
195 | if (ret) | 266 | if (ret) |
196 | return -EINVAL; | 267 | return -EINVAL; |
197 | return state; | 268 | return state; |
@@ -201,7 +272,7 @@ static int hp_wmi_als_state(void) | |||
201 | { | 272 | { |
202 | int state = 0; | 273 | int state = 0; |
203 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, | 274 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, |
204 | sizeof(state)); | 275 | sizeof(state), sizeof(state)); |
205 | if (ret) | 276 | if (ret) |
206 | return -EINVAL; | 277 | return -EINVAL; |
207 | return state; | 278 | return state; |
@@ -211,7 +282,7 @@ static int hp_wmi_dock_state(void) | |||
211 | { | 282 | { |
212 | int state = 0; | 283 | int state = 0; |
213 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, | 284 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, |
214 | sizeof(state)); | 285 | sizeof(state), sizeof(state)); |
215 | 286 | ||
216 | if (ret) | 287 | if (ret) |
217 | return -EINVAL; | 288 | return -EINVAL; |
@@ -223,7 +294,7 @@ static int hp_wmi_tablet_state(void) | |||
223 | { | 294 | { |
224 | int state = 0; | 295 | int state = 0; |
225 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, | 296 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, |
226 | sizeof(state)); | 297 | sizeof(state), sizeof(state)); |
227 | if (ret) | 298 | if (ret) |
228 | return ret; | 299 | return ret; |
229 | 300 | ||
@@ -237,7 +308,7 @@ static int hp_wmi_set_block(void *data, bool blocked) | |||
237 | int ret; | 308 | int ret; |
238 | 309 | ||
239 | ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, | 310 | ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, |
240 | &query, sizeof(query)); | 311 | &query, sizeof(query), 0); |
241 | if (ret) | 312 | if (ret) |
242 | return -EINVAL; | 313 | return -EINVAL; |
243 | return 0; | 314 | return 0; |
@@ -252,7 +323,8 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) | |||
252 | int wireless = 0; | 323 | int wireless = 0; |
253 | int mask; | 324 | int mask; |
254 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, | 325 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, |
255 | &wireless, sizeof(wireless)); | 326 | &wireless, sizeof(wireless), |
327 | sizeof(wireless)); | ||
256 | /* TBD: Pass error */ | 328 | /* TBD: Pass error */ |
257 | 329 | ||
258 | mask = 0x200 << (r * 8); | 330 | mask = 0x200 << (r * 8); |
@@ -268,7 +340,8 @@ static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) | |||
268 | int wireless = 0; | 340 | int wireless = 0; |
269 | int mask; | 341 | int mask; |
270 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, | 342 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, |
271 | &wireless, sizeof(wireless)); | 343 | &wireless, sizeof(wireless), |
344 | sizeof(wireless)); | ||
272 | /* TBD: Pass error */ | 345 | /* TBD: Pass error */ |
273 | 346 | ||
274 | mask = 0x800 << (r * 8); | 347 | mask = 0x800 << (r * 8); |
@@ -279,6 +352,51 @@ static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) | |||
279 | return true; | 352 | return true; |
280 | } | 353 | } |
281 | 354 | ||
355 | static int hp_wmi_rfkill2_set_block(void *data, bool blocked) | ||
356 | { | ||
357 | int rfkill_id = (int)(long)data; | ||
358 | char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked }; | ||
359 | |||
360 | if (hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 1, | ||
361 | buffer, sizeof(buffer), 0)) | ||
362 | return -EINVAL; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static const struct rfkill_ops hp_wmi_rfkill2_ops = { | ||
367 | .set_block = hp_wmi_rfkill2_set_block, | ||
368 | }; | ||
369 | |||
370 | static int hp_wmi_rfkill2_refresh(void) | ||
371 | { | ||
372 | int err, i; | ||
373 | struct bios_rfkill2_state state; | ||
374 | |||
375 | err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state, | ||
376 | 0, sizeof(state)); | ||
377 | if (err) | ||
378 | return err; | ||
379 | |||
380 | for (i = 0; i < rfkill2_count; i++) { | ||
381 | int num = rfkill2[i].num; | ||
382 | struct bios_rfkill2_device_state *devstate; | ||
383 | devstate = &state.device[num]; | ||
384 | |||
385 | if (num >= state.count || | ||
386 | devstate->rfkill_id != rfkill2[i].id) { | ||
387 | printk(KERN_WARNING PREFIX "power configuration of " | ||
388 | "the wireless devices unexpectedly changed\n"); | ||
389 | continue; | ||
390 | } | ||
391 | |||
392 | rfkill_set_states(rfkill2[i].rfkill, | ||
393 | IS_SWBLOCKED(devstate->power), | ||
394 | IS_HWBLOCKED(devstate->power)); | ||
395 | } | ||
396 | |||
397 | return 0; | ||
398 | } | ||
399 | |||
282 | static ssize_t show_display(struct device *dev, struct device_attribute *attr, | 400 | static ssize_t show_display(struct device *dev, struct device_attribute *attr, |
283 | char *buf) | 401 | char *buf) |
284 | { | 402 | { |
@@ -329,7 +447,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr, | |||
329 | { | 447 | { |
330 | u32 tmp = simple_strtoul(buf, NULL, 10); | 448 | u32 tmp = simple_strtoul(buf, NULL, 10); |
331 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, | 449 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, |
332 | sizeof(tmp)); | 450 | sizeof(tmp), sizeof(tmp)); |
333 | if (ret) | 451 | if (ret) |
334 | return -EINVAL; | 452 | return -EINVAL; |
335 | 453 | ||
@@ -402,6 +520,7 @@ static void hp_wmi_notify(u32 value, void *context) | |||
402 | case HPWMI_BEZEL_BUTTON: | 520 | case HPWMI_BEZEL_BUTTON: |
403 | ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, | 521 | ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, |
404 | &key_code, | 522 | &key_code, |
523 | sizeof(key_code), | ||
405 | sizeof(key_code)); | 524 | sizeof(key_code)); |
406 | if (ret) | 525 | if (ret) |
407 | break; | 526 | break; |
@@ -412,6 +531,11 @@ static void hp_wmi_notify(u32 value, void *context) | |||
412 | key_code); | 531 | key_code); |
413 | break; | 532 | break; |
414 | case HPWMI_WIRELESS: | 533 | case HPWMI_WIRELESS: |
534 | if (rfkill2_count) { | ||
535 | hp_wmi_rfkill2_refresh(); | ||
536 | break; | ||
537 | } | ||
538 | |||
415 | if (wifi_rfkill) | 539 | if (wifi_rfkill) |
416 | rfkill_set_states(wifi_rfkill, | 540 | rfkill_set_states(wifi_rfkill, |
417 | hp_wmi_get_sw_state(HPWMI_WIFI), | 541 | hp_wmi_get_sw_state(HPWMI_WIFI), |
@@ -502,32 +626,16 @@ static void cleanup_sysfs(struct platform_device *device) | |||
502 | device_remove_file(&device->dev, &dev_attr_tablet); | 626 | device_remove_file(&device->dev, &dev_attr_tablet); |
503 | } | 627 | } |
504 | 628 | ||
505 | static int __devinit hp_wmi_bios_setup(struct platform_device *device) | 629 | static int __devinit hp_wmi_rfkill_setup(struct platform_device *device) |
506 | { | 630 | { |
507 | int err; | 631 | int err; |
508 | int wireless = 0; | 632 | int wireless = 0; |
509 | 633 | ||
510 | err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, | 634 | err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, |
511 | sizeof(wireless)); | 635 | sizeof(wireless), sizeof(wireless)); |
512 | if (err) | 636 | if (err) |
513 | return err; | 637 | return err; |
514 | 638 | ||
515 | err = device_create_file(&device->dev, &dev_attr_display); | ||
516 | if (err) | ||
517 | goto add_sysfs_error; | ||
518 | err = device_create_file(&device->dev, &dev_attr_hddtemp); | ||
519 | if (err) | ||
520 | goto add_sysfs_error; | ||
521 | err = device_create_file(&device->dev, &dev_attr_als); | ||
522 | if (err) | ||
523 | goto add_sysfs_error; | ||
524 | err = device_create_file(&device->dev, &dev_attr_dock); | ||
525 | if (err) | ||
526 | goto add_sysfs_error; | ||
527 | err = device_create_file(&device->dev, &dev_attr_tablet); | ||
528 | if (err) | ||
529 | goto add_sysfs_error; | ||
530 | |||
531 | if (wireless & 0x1) { | 639 | if (wireless & 0x1) { |
532 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, | 640 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, |
533 | RFKILL_TYPE_WLAN, | 641 | RFKILL_TYPE_WLAN, |
@@ -573,14 +681,131 @@ static int __devinit hp_wmi_bios_setup(struct platform_device *device) | |||
573 | return 0; | 681 | return 0; |
574 | register_wwan_err: | 682 | register_wwan_err: |
575 | rfkill_destroy(wwan_rfkill); | 683 | rfkill_destroy(wwan_rfkill); |
684 | wwan_rfkill = NULL; | ||
576 | if (bluetooth_rfkill) | 685 | if (bluetooth_rfkill) |
577 | rfkill_unregister(bluetooth_rfkill); | 686 | rfkill_unregister(bluetooth_rfkill); |
578 | register_bluetooth_error: | 687 | register_bluetooth_error: |
579 | rfkill_destroy(bluetooth_rfkill); | 688 | rfkill_destroy(bluetooth_rfkill); |
689 | bluetooth_rfkill = NULL; | ||
580 | if (wifi_rfkill) | 690 | if (wifi_rfkill) |
581 | rfkill_unregister(wifi_rfkill); | 691 | rfkill_unregister(wifi_rfkill); |
582 | register_wifi_error: | 692 | register_wifi_error: |
583 | rfkill_destroy(wifi_rfkill); | 693 | rfkill_destroy(wifi_rfkill); |
694 | wifi_rfkill = NULL; | ||
695 | return err; | ||
696 | } | ||
697 | |||
698 | static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device) | ||
699 | { | ||
700 | int err, i; | ||
701 | struct bios_rfkill2_state state; | ||
702 | err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state, | ||
703 | 0, sizeof(state)); | ||
704 | if (err) | ||
705 | return err; | ||
706 | |||
707 | if (state.count > HPWMI_MAX_RFKILL2_DEVICES) { | ||
708 | printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n"); | ||
709 | return -EINVAL; | ||
710 | } | ||
711 | |||
712 | for (i = 0; i < state.count; i++) { | ||
713 | struct rfkill *rfkill; | ||
714 | enum rfkill_type type; | ||
715 | char *name; | ||
716 | switch (state.device[i].radio_type) { | ||
717 | case HPWMI_WIFI: | ||
718 | type = RFKILL_TYPE_WLAN; | ||
719 | name = "hp-wifi"; | ||
720 | break; | ||
721 | case HPWMI_BLUETOOTH: | ||
722 | type = RFKILL_TYPE_BLUETOOTH; | ||
723 | name = "hp-bluetooth"; | ||
724 | break; | ||
725 | case HPWMI_WWAN: | ||
726 | type = RFKILL_TYPE_WWAN; | ||
727 | name = "hp-wwan"; | ||
728 | break; | ||
729 | default: | ||
730 | printk(KERN_WARNING PREFIX "unknown device type 0x%x\n", | ||
731 | state.device[i].radio_type); | ||
732 | continue; | ||
733 | } | ||
734 | |||
735 | if (!state.device[i].vendor_id) { | ||
736 | printk(KERN_WARNING PREFIX "zero device %d while %d " | ||
737 | "reported\n", i, state.count); | ||
738 | continue; | ||
739 | } | ||
740 | |||
741 | rfkill = rfkill_alloc(name, &device->dev, type, | ||
742 | &hp_wmi_rfkill2_ops, (void *)(long)i); | ||
743 | if (!rfkill) { | ||
744 | err = -ENOMEM; | ||
745 | goto fail; | ||
746 | } | ||
747 | |||
748 | rfkill2[rfkill2_count].id = state.device[i].rfkill_id; | ||
749 | rfkill2[rfkill2_count].num = i; | ||
750 | rfkill2[rfkill2_count].rfkill = rfkill; | ||
751 | |||
752 | rfkill_init_sw_state(rfkill, | ||
753 | IS_SWBLOCKED(state.device[i].power)); | ||
754 | rfkill_set_hw_state(rfkill, | ||
755 | IS_HWBLOCKED(state.device[i].power)); | ||
756 | |||
757 | if (!(state.device[i].power & HPWMI_POWER_BIOS)) | ||
758 | printk(KERN_INFO PREFIX "device %s blocked by BIOS\n", | ||
759 | name); | ||
760 | |||
761 | err = rfkill_register(rfkill); | ||
762 | if (err) { | ||
763 | rfkill_destroy(rfkill); | ||
764 | goto fail; | ||
765 | } | ||
766 | |||
767 | rfkill2_count++; | ||
768 | } | ||
769 | |||
770 | return 0; | ||
771 | fail: | ||
772 | for (; rfkill2_count > 0; rfkill2_count--) { | ||
773 | rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill); | ||
774 | rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill); | ||
775 | } | ||
776 | return err; | ||
777 | } | ||
778 | |||
779 | static int __devinit hp_wmi_bios_setup(struct platform_device *device) | ||
780 | { | ||
781 | int err; | ||
782 | |||
783 | /* clear detected rfkill devices */ | ||
784 | wifi_rfkill = NULL; | ||
785 | bluetooth_rfkill = NULL; | ||
786 | wwan_rfkill = NULL; | ||
787 | rfkill2_count = 0; | ||
788 | |||
789 | if (hp_wmi_rfkill_setup(device)) | ||
790 | hp_wmi_rfkill2_setup(device); | ||
791 | |||
792 | err = device_create_file(&device->dev, &dev_attr_display); | ||
793 | if (err) | ||
794 | goto add_sysfs_error; | ||
795 | err = device_create_file(&device->dev, &dev_attr_hddtemp); | ||
796 | if (err) | ||
797 | goto add_sysfs_error; | ||
798 | err = device_create_file(&device->dev, &dev_attr_als); | ||
799 | if (err) | ||
800 | goto add_sysfs_error; | ||
801 | err = device_create_file(&device->dev, &dev_attr_dock); | ||
802 | if (err) | ||
803 | goto add_sysfs_error; | ||
804 | err = device_create_file(&device->dev, &dev_attr_tablet); | ||
805 | if (err) | ||
806 | goto add_sysfs_error; | ||
807 | return 0; | ||
808 | |||
584 | add_sysfs_error: | 809 | add_sysfs_error: |
585 | cleanup_sysfs(device); | 810 | cleanup_sysfs(device); |
586 | return err; | 811 | return err; |
@@ -588,8 +813,14 @@ add_sysfs_error: | |||
588 | 813 | ||
589 | static int __exit hp_wmi_bios_remove(struct platform_device *device) | 814 | static int __exit hp_wmi_bios_remove(struct platform_device *device) |
590 | { | 815 | { |
816 | int i; | ||
591 | cleanup_sysfs(device); | 817 | cleanup_sysfs(device); |
592 | 818 | ||
819 | for (i = 0; i < rfkill2_count; i++) { | ||
820 | rfkill_unregister(rfkill2[i].rfkill); | ||
821 | rfkill_destroy(rfkill2[i].rfkill); | ||
822 | } | ||
823 | |||
593 | if (wifi_rfkill) { | 824 | if (wifi_rfkill) { |
594 | rfkill_unregister(wifi_rfkill); | 825 | rfkill_unregister(wifi_rfkill); |
595 | rfkill_destroy(wifi_rfkill); | 826 | rfkill_destroy(wifi_rfkill); |
@@ -622,6 +853,9 @@ static int hp_wmi_resume_handler(struct device *device) | |||
622 | input_sync(hp_wmi_input_dev); | 853 | input_sync(hp_wmi_input_dev); |
623 | } | 854 | } |
624 | 855 | ||
856 | if (rfkill2_count) | ||
857 | hp_wmi_rfkill2_refresh(); | ||
858 | |||
625 | if (wifi_rfkill) | 859 | if (wifi_rfkill) |
626 | rfkill_set_states(wifi_rfkill, | 860 | rfkill_set_states(wifi_rfkill, |
627 | hp_wmi_get_sw_state(HPWMI_WIFI), | 861 | hp_wmi_get_sw_state(HPWMI_WIFI), |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 114d95247cdf..21b101899bae 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -459,6 +459,8 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) | |||
459 | if (test_bit(vpc_bit, &vpc1)) { | 459 | if (test_bit(vpc_bit, &vpc1)) { |
460 | if (vpc_bit == 9) | 460 | if (vpc_bit == 9) |
461 | ideapad_sync_rfk_state(adevice); | 461 | ideapad_sync_rfk_state(adevice); |
462 | else if (vpc_bit == 4) | ||
463 | read_ec_data(handle, 0x12, &vpc2); | ||
462 | else | 464 | else |
463 | ideapad_input_report(priv, vpc_bit); | 465 | ideapad_input_report(priv, vpc_bit); |
464 | } | 466 | } |
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 1294a39373ba..85c8ad43c0c5 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c | |||
@@ -1111,7 +1111,7 @@ static int ips_monitor(void *data) | |||
1111 | last_msecs = jiffies_to_msecs(jiffies); | 1111 | last_msecs = jiffies_to_msecs(jiffies); |
1112 | expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); | 1112 | expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); |
1113 | 1113 | ||
1114 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1114 | __set_current_state(TASK_INTERRUPTIBLE); |
1115 | mod_timer(&timer, expire); | 1115 | mod_timer(&timer, expire); |
1116 | schedule(); | 1116 | schedule(); |
1117 | 1117 | ||
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c new file mode 100644 index 000000000000..213e79ba68d5 --- /dev/null +++ b/drivers/platform/x86/intel_mid_powerbtn.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * Power button driver for Medfield. | ||
3 | * | ||
4 | * Copyright (C) 2010 Intel Corp | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/input.h> | ||
26 | #include <asm/intel_scu_ipc.h> | ||
27 | |||
28 | #define DRIVER_NAME "msic_power_btn" | ||
29 | |||
30 | #define MSIC_IRQ_STAT 0x02 | ||
31 | #define MSIC_IRQ_PB (1 << 0) | ||
32 | #define MSIC_PB_CONFIG 0x3e | ||
33 | #define MSIC_PB_STATUS 0x3f | ||
34 | #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */ | ||
35 | |||
36 | struct mfld_pb_priv { | ||
37 | struct input_dev *input; | ||
38 | unsigned int irq; | ||
39 | }; | ||
40 | |||
41 | static irqreturn_t mfld_pb_isr(int irq, void *dev_id) | ||
42 | { | ||
43 | struct mfld_pb_priv *priv = dev_id; | ||
44 | int ret; | ||
45 | u8 pbstat; | ||
46 | |||
47 | ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat); | ||
48 | if (ret < 0) | ||
49 | return IRQ_HANDLED; | ||
50 | |||
51 | input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL)); | ||
52 | input_sync(priv->input); | ||
53 | |||
54 | return IRQ_HANDLED; | ||
55 | } | ||
56 | |||
57 | static int __devinit mfld_pb_probe(struct platform_device *pdev) | ||
58 | { | ||
59 | struct mfld_pb_priv *priv; | ||
60 | struct input_dev *input; | ||
61 | int irq; | ||
62 | int error; | ||
63 | |||
64 | irq = platform_get_irq(pdev, 0); | ||
65 | if (irq < 0) | ||
66 | return -EINVAL; | ||
67 | |||
68 | priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL); | ||
69 | input = input_allocate_device(); | ||
70 | if (!priv || !input) { | ||
71 | error = -ENOMEM; | ||
72 | goto err_free_mem; | ||
73 | } | ||
74 | |||
75 | priv->input = input; | ||
76 | priv->irq = irq; | ||
77 | |||
78 | input->name = pdev->name; | ||
79 | input->phys = "power-button/input0"; | ||
80 | input->id.bustype = BUS_HOST; | ||
81 | input->dev.parent = &pdev->dev; | ||
82 | |||
83 | input_set_capability(input, EV_KEY, KEY_POWER); | ||
84 | |||
85 | error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr, | ||
86 | 0, DRIVER_NAME, priv); | ||
87 | if (error) { | ||
88 | dev_err(&pdev->dev, | ||
89 | "unable to request irq %d for mfld power button\n", | ||
90 | irq); | ||
91 | goto err_free_mem; | ||
92 | } | ||
93 | |||
94 | error = input_register_device(input); | ||
95 | if (error) { | ||
96 | dev_err(&pdev->dev, | ||
97 | "unable to register input dev, error %d\n", error); | ||
98 | goto err_free_irq; | ||
99 | } | ||
100 | |||
101 | platform_set_drvdata(pdev, priv); | ||
102 | return 0; | ||
103 | |||
104 | err_free_irq: | ||
105 | free_irq(priv->irq, priv); | ||
106 | err_free_mem: | ||
107 | input_free_device(input); | ||
108 | kfree(priv); | ||
109 | return error; | ||
110 | } | ||
111 | |||
112 | static int __devexit mfld_pb_remove(struct platform_device *pdev) | ||
113 | { | ||
114 | struct mfld_pb_priv *priv = platform_get_drvdata(pdev); | ||
115 | |||
116 | free_irq(priv->irq, priv); | ||
117 | input_unregister_device(priv->input); | ||
118 | kfree(priv); | ||
119 | |||
120 | platform_set_drvdata(pdev, NULL); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static struct platform_driver mfld_pb_driver = { | ||
125 | .driver = { | ||
126 | .name = DRIVER_NAME, | ||
127 | .owner = THIS_MODULE, | ||
128 | }, | ||
129 | .probe = mfld_pb_probe, | ||
130 | .remove = __devexit_p(mfld_pb_remove), | ||
131 | }; | ||
132 | |||
133 | static int __init mfld_pb_init(void) | ||
134 | { | ||
135 | return platform_driver_register(&mfld_pb_driver); | ||
136 | } | ||
137 | module_init(mfld_pb_init); | ||
138 | |||
139 | static void __exit mfld_pb_exit(void) | ||
140 | { | ||
141 | platform_driver_unregister(&mfld_pb_driver); | ||
142 | } | ||
143 | module_exit(mfld_pb_exit); | ||
144 | |||
145 | MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>"); | ||
146 | MODULE_DESCRIPTION("Intel Medfield Power Button Driver"); | ||
147 | MODULE_LICENSE("GPL v2"); | ||
148 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c new file mode 100644 index 000000000000..6c12db503161 --- /dev/null +++ b/drivers/platform/x86/intel_mid_thermal.c | |||
@@ -0,0 +1,576 @@ | |||
1 | /* | ||
2 | * intel_mid_thermal.c - Intel MID platform thermal driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Intel Corporation | ||
5 | * | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
22 | * Author: Durgadoss R <durgadoss.r@intel.com> | ||
23 | */ | ||
24 | |||
25 | #define pr_fmt(fmt) "intel_mid_thermal: " fmt | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/err.h> | ||
30 | #include <linux/param.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/pm.h> | ||
35 | #include <linux/thermal.h> | ||
36 | |||
37 | #include <asm/intel_scu_ipc.h> | ||
38 | |||
39 | /* Number of thermal sensors */ | ||
40 | #define MSIC_THERMAL_SENSORS 4 | ||
41 | |||
42 | /* ADC1 - thermal registers */ | ||
43 | #define MSIC_THERM_ADC1CNTL1 0x1C0 | ||
44 | #define MSIC_ADC_ENBL 0x10 | ||
45 | #define MSIC_ADC_START 0x08 | ||
46 | |||
47 | #define MSIC_THERM_ADC1CNTL3 0x1C2 | ||
48 | #define MSIC_ADCTHERM_ENBL 0x04 | ||
49 | #define MSIC_ADCRRDATA_ENBL 0x05 | ||
50 | #define MSIC_CHANL_MASK_VAL 0x0F | ||
51 | |||
52 | #define MSIC_STOPBIT_MASK 16 | ||
53 | #define MSIC_ADCTHERM_MASK 4 | ||
54 | #define ADC_CHANLS_MAX 15 /* Number of ADC channels */ | ||
55 | #define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS) | ||
56 | |||
57 | /* ADC channel code values */ | ||
58 | #define SKIN_SENSOR0_CODE 0x08 | ||
59 | #define SKIN_SENSOR1_CODE 0x09 | ||
60 | #define SYS_SENSOR_CODE 0x0A | ||
61 | #define MSIC_DIE_SENSOR_CODE 0x03 | ||
62 | |||
63 | #define SKIN_THERM_SENSOR0 0 | ||
64 | #define SKIN_THERM_SENSOR1 1 | ||
65 | #define SYS_THERM_SENSOR2 2 | ||
66 | #define MSIC_DIE_THERM_SENSOR3 3 | ||
67 | |||
68 | /* ADC code range */ | ||
69 | #define ADC_MAX 977 | ||
70 | #define ADC_MIN 162 | ||
71 | #define ADC_VAL0C 887 | ||
72 | #define ADC_VAL20C 720 | ||
73 | #define ADC_VAL40C 508 | ||
74 | #define ADC_VAL60C 315 | ||
75 | |||
76 | /* ADC base addresses */ | ||
77 | #define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */ | ||
78 | #define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */ | ||
79 | |||
80 | /* MSIC die attributes */ | ||
81 | #define MSIC_DIE_ADC_MIN 488 | ||
82 | #define MSIC_DIE_ADC_MAX 1004 | ||
83 | |||
84 | /* This holds the address of the first free ADC channel, | ||
85 | * among the 15 channels | ||
86 | */ | ||
87 | static int channel_index; | ||
88 | |||
89 | struct platform_info { | ||
90 | struct platform_device *pdev; | ||
91 | struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS]; | ||
92 | }; | ||
93 | |||
94 | struct thermal_device_info { | ||
95 | unsigned int chnl_addr; | ||
96 | int direct; | ||
97 | /* This holds the current temperature in millidegree celsius */ | ||
98 | long curr_temp; | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * to_msic_die_temp - converts adc_val to msic_die temperature | ||
103 | * @adc_val: ADC value to be converted | ||
104 | * | ||
105 | * Can sleep | ||
106 | */ | ||
107 | static int to_msic_die_temp(uint16_t adc_val) | ||
108 | { | ||
109 | return (368 * (adc_val) / 1000) - 220; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * is_valid_adc - checks whether the adc code is within the defined range | ||
114 | * @min: minimum value for the sensor | ||
115 | * @max: maximum value for the sensor | ||
116 | * | ||
117 | * Can sleep | ||
118 | */ | ||
119 | static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max) | ||
120 | { | ||
121 | return (adc_val >= min) && (adc_val <= max); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * adc_to_temp - converts the ADC code to temperature in C | ||
126 | * @direct: true if ths channel is direct index | ||
127 | * @adc_val: the adc_val that needs to be converted | ||
128 | * @tp: temperature return value | ||
129 | * | ||
130 | * Linear approximation is used to covert the skin adc value into temperature. | ||
131 | * This technique is used to avoid very long look-up table to get | ||
132 | * the appropriate temp value from ADC value. | ||
133 | * The adc code vs sensor temp curve is split into five parts | ||
134 | * to achieve very close approximate temp value with less than | ||
135 | * 0.5C error | ||
136 | */ | ||
137 | static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp) | ||
138 | { | ||
139 | int temp; | ||
140 | |||
141 | /* Direct conversion for die temperature */ | ||
142 | if (direct) { | ||
143 | if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) { | ||
144 | *tp = to_msic_die_temp(adc_val) * 1000; | ||
145 | return 0; | ||
146 | } | ||
147 | return -ERANGE; | ||
148 | } | ||
149 | |||
150 | if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX)) | ||
151 | return -ERANGE; | ||
152 | |||
153 | /* Linear approximation for skin temperature */ | ||
154 | if (adc_val > ADC_VAL0C) | ||
155 | temp = 177 - (adc_val/5); | ||
156 | else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C)) | ||
157 | temp = 111 - (adc_val/8); | ||
158 | else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C)) | ||
159 | temp = 92 - (adc_val/10); | ||
160 | else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C)) | ||
161 | temp = 91 - (adc_val/10); | ||
162 | else | ||
163 | temp = 112 - (adc_val/6); | ||
164 | |||
165 | /* Convert temperature in celsius to milli degree celsius */ | ||
166 | *tp = temp * 1000; | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * mid_read_temp - read sensors for temperature | ||
172 | * @temp: holds the current temperature for the sensor after reading | ||
173 | * | ||
174 | * reads the adc_code from the channel and converts it to real | ||
175 | * temperature. The converted value is stored in temp. | ||
176 | * | ||
177 | * Can sleep | ||
178 | */ | ||
179 | static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp) | ||
180 | { | ||
181 | struct thermal_device_info *td_info = tzd->devdata; | ||
182 | uint16_t adc_val, addr; | ||
183 | uint8_t data = 0; | ||
184 | int ret; | ||
185 | unsigned long curr_temp; | ||
186 | |||
187 | |||
188 | addr = td_info->chnl_addr; | ||
189 | |||
190 | /* Enable the msic for conversion before reading */ | ||
191 | ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL); | ||
192 | if (ret) | ||
193 | return ret; | ||
194 | |||
195 | /* Re-toggle the RRDATARD bit (temporary workaround) */ | ||
196 | ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL); | ||
197 | if (ret) | ||
198 | return ret; | ||
199 | |||
200 | /* Read the higher bits of data */ | ||
201 | ret = intel_scu_ipc_ioread8(addr, &data); | ||
202 | if (ret) | ||
203 | return ret; | ||
204 | |||
205 | /* Shift bits to accomodate the lower two data bits */ | ||
206 | adc_val = (data << 2); | ||
207 | addr++; | ||
208 | |||
209 | ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */ | ||
210 | if (ret) | ||
211 | return ret; | ||
212 | |||
213 | /* Adding lower two bits to the higher bits */ | ||
214 | data &= 03; | ||
215 | adc_val += data; | ||
216 | |||
217 | /* Convert ADC value to temperature */ | ||
218 | ret = adc_to_temp(td_info->direct, adc_val, &curr_temp); | ||
219 | if (ret == 0) | ||
220 | *temp = td_info->curr_temp = curr_temp; | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * configure_adc - enables/disables the ADC for conversion | ||
226 | * @val: zero: disables the ADC non-zero:enables the ADC | ||
227 | * | ||
228 | * Enable/Disable the ADC depending on the argument | ||
229 | * | ||
230 | * Can sleep | ||
231 | */ | ||
232 | static int configure_adc(int val) | ||
233 | { | ||
234 | int ret; | ||
235 | uint8_t data; | ||
236 | |||
237 | ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data); | ||
238 | if (ret) | ||
239 | return ret; | ||
240 | |||
241 | if (val) { | ||
242 | /* Enable and start the ADC */ | ||
243 | data |= (MSIC_ADC_ENBL | MSIC_ADC_START); | ||
244 | } else { | ||
245 | /* Just stop the ADC */ | ||
246 | data &= (~MSIC_ADC_START); | ||
247 | } | ||
248 | |||
249 | return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data); | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * set_up_therm_channel - enable thermal channel for conversion | ||
254 | * @base_addr: index of free msic ADC channel | ||
255 | * | ||
256 | * Enable all the three channels for conversion | ||
257 | * | ||
258 | * Can sleep | ||
259 | */ | ||
260 | static int set_up_therm_channel(u16 base_addr) | ||
261 | { | ||
262 | int ret; | ||
263 | |||
264 | /* Enable all the sensor channels */ | ||
265 | ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE); | ||
266 | if (ret) | ||
267 | return ret; | ||
268 | |||
269 | ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE); | ||
270 | if (ret) | ||
271 | return ret; | ||
272 | |||
273 | ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE); | ||
274 | if (ret) | ||
275 | return ret; | ||
276 | |||
277 | /* Since this is the last channel, set the stop bit | ||
278 | to 1 by ORing the DIE_SENSOR_CODE with 0x10 */ | ||
279 | ret = intel_scu_ipc_iowrite8(base_addr + 3, | ||
280 | (MSIC_DIE_SENSOR_CODE | 0x10)); | ||
281 | if (ret) | ||
282 | return ret; | ||
283 | |||
284 | /* Enable ADC and start it */ | ||
285 | return configure_adc(1); | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * reset_stopbit - sets the stop bit to 0 on the given channel | ||
290 | * @addr: address of the channel | ||
291 | * | ||
292 | * Can sleep | ||
293 | */ | ||
294 | static int reset_stopbit(uint16_t addr) | ||
295 | { | ||
296 | int ret; | ||
297 | uint8_t data; | ||
298 | ret = intel_scu_ipc_ioread8(addr, &data); | ||
299 | if (ret) | ||
300 | return ret; | ||
301 | /* Set the stop bit to zero */ | ||
302 | return intel_scu_ipc_iowrite8(addr, (data & 0xEF)); | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * find_free_channel - finds an empty channel for conversion | ||
307 | * | ||
308 | * If the ADC is not enabled then start using 0th channel | ||
309 | * itself. Otherwise find an empty channel by looking for a | ||
310 | * channel in which the stopbit is set to 1. returns the index | ||
311 | * of the first free channel if succeeds or an error code. | ||
312 | * | ||
313 | * Context: can sleep | ||
314 | * | ||
315 | * FIXME: Ultimately the channel allocator will move into the intel_scu_ipc | ||
316 | * code. | ||
317 | */ | ||
318 | static int find_free_channel(void) | ||
319 | { | ||
320 | int ret; | ||
321 | int i; | ||
322 | uint8_t data; | ||
323 | |||
324 | /* check whether ADC is enabled */ | ||
325 | ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data); | ||
326 | if (ret) | ||
327 | return ret; | ||
328 | |||
329 | if ((data & MSIC_ADC_ENBL) == 0) | ||
330 | return 0; | ||
331 | |||
332 | /* ADC is already enabled; Looking for an empty channel */ | ||
333 | for (i = 0; i < ADC_CHANLS_MAX; i++) { | ||
334 | ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data); | ||
335 | if (ret) | ||
336 | return ret; | ||
337 | |||
338 | if (data & MSIC_STOPBIT_MASK) { | ||
339 | ret = i; | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * mid_initialize_adc - initializing the ADC | ||
348 | * @dev: our device structure | ||
349 | * | ||
350 | * Initialize the ADC for reading thermistor values. Can sleep. | ||
351 | */ | ||
352 | static int mid_initialize_adc(struct device *dev) | ||
353 | { | ||
354 | u8 data; | ||
355 | u16 base_addr; | ||
356 | int ret; | ||
357 | |||
358 | /* | ||
359 | * Ensure that adctherm is disabled before we | ||
360 | * initialize the ADC | ||
361 | */ | ||
362 | ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data); | ||
363 | if (ret) | ||
364 | return ret; | ||
365 | |||
366 | if (data & MSIC_ADCTHERM_MASK) | ||
367 | dev_warn(dev, "ADCTHERM already set"); | ||
368 | |||
369 | /* Index of the first channel in which the stop bit is set */ | ||
370 | channel_index = find_free_channel(); | ||
371 | if (channel_index < 0) { | ||
372 | dev_err(dev, "No free ADC channels"); | ||
373 | return channel_index; | ||
374 | } | ||
375 | |||
376 | base_addr = ADC_CHNL_START_ADDR + channel_index; | ||
377 | |||
378 | if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) { | ||
379 | /* Reset stop bit for channels other than 0 and 12 */ | ||
380 | ret = reset_stopbit(base_addr); | ||
381 | if (ret) | ||
382 | return ret; | ||
383 | |||
384 | /* Index of the first free channel */ | ||
385 | base_addr++; | ||
386 | channel_index++; | ||
387 | } | ||
388 | |||
389 | ret = set_up_therm_channel(base_addr); | ||
390 | if (ret) { | ||
391 | dev_err(dev, "unable to enable ADC"); | ||
392 | return ret; | ||
393 | } | ||
394 | dev_dbg(dev, "ADC initialization successful"); | ||
395 | return ret; | ||
396 | } | ||
397 | |||
398 | /** | ||
399 | * initialize_sensor - sets default temp and timer ranges | ||
400 | * @index: index of the sensor | ||
401 | * | ||
402 | * Context: can sleep | ||
403 | */ | ||
404 | static struct thermal_device_info *initialize_sensor(int index) | ||
405 | { | ||
406 | struct thermal_device_info *td_info = | ||
407 | kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL); | ||
408 | |||
409 | if (!td_info) | ||
410 | return NULL; | ||
411 | |||
412 | /* Set the base addr of the channel for this sensor */ | ||
413 | td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index); | ||
414 | /* Sensor 3 is direct conversion */ | ||
415 | if (index == 3) | ||
416 | td_info->direct = 1; | ||
417 | return td_info; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * mid_thermal_resume - resume routine | ||
422 | * @pdev: platform device structure | ||
423 | * | ||
424 | * mid thermal resume: re-initializes the adc. Can sleep. | ||
425 | */ | ||
426 | static int mid_thermal_resume(struct platform_device *pdev) | ||
427 | { | ||
428 | return mid_initialize_adc(&pdev->dev); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * mid_thermal_suspend - suspend routine | ||
433 | * @pdev: platform device structure | ||
434 | * | ||
435 | * mid thermal suspend implements the suspend functionality | ||
436 | * by stopping the ADC. Can sleep. | ||
437 | */ | ||
438 | static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg) | ||
439 | { | ||
440 | /* | ||
441 | * This just stops the ADC and does not disable it. | ||
442 | * temporary workaround until we have a generic ADC driver. | ||
443 | * If 0 is passed, it disables the ADC. | ||
444 | */ | ||
445 | return configure_adc(0); | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * read_curr_temp - reads the current temperature and stores in temp | ||
450 | * @temp: holds the current temperature value after reading | ||
451 | * | ||
452 | * Can sleep | ||
453 | */ | ||
454 | static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp) | ||
455 | { | ||
456 | WARN_ON(tzd == NULL); | ||
457 | return mid_read_temp(tzd, temp); | ||
458 | } | ||
459 | |||
460 | /* Can't be const */ | ||
461 | static struct thermal_zone_device_ops tzd_ops = { | ||
462 | .get_temp = read_curr_temp, | ||
463 | }; | ||
464 | |||
465 | |||
466 | /** | ||
467 | * mid_thermal_probe - mfld thermal initialize | ||
468 | * @pdev: platform device structure | ||
469 | * | ||
470 | * mid thermal probe initializes the hardware and registers | ||
471 | * all the sensors with the generic thermal framework. Can sleep. | ||
472 | */ | ||
473 | static int mid_thermal_probe(struct platform_device *pdev) | ||
474 | { | ||
475 | static char *name[MSIC_THERMAL_SENSORS] = { | ||
476 | "skin0", "skin1", "sys", "msicdie" | ||
477 | }; | ||
478 | |||
479 | int ret; | ||
480 | int i; | ||
481 | struct platform_info *pinfo; | ||
482 | |||
483 | pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL); | ||
484 | if (!pinfo) | ||
485 | return -ENOMEM; | ||
486 | |||
487 | /* Initializing the hardware */ | ||
488 | ret = mid_initialize_adc(&pdev->dev); | ||
489 | if (ret) { | ||
490 | dev_err(&pdev->dev, "ADC init failed"); | ||
491 | kfree(pinfo); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | /* Register each sensor with the generic thermal framework*/ | ||
496 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { | ||
497 | pinfo->tzd[i] = thermal_zone_device_register(name[i], | ||
498 | 0, initialize_sensor(i), | ||
499 | &tzd_ops, 0, 0, 0, 0); | ||
500 | if (IS_ERR(pinfo->tzd[i])) | ||
501 | goto reg_fail; | ||
502 | } | ||
503 | |||
504 | pinfo->pdev = pdev; | ||
505 | platform_set_drvdata(pdev, pinfo); | ||
506 | return 0; | ||
507 | |||
508 | reg_fail: | ||
509 | ret = PTR_ERR(pinfo->tzd[i]); | ||
510 | while (--i >= 0) | ||
511 | thermal_zone_device_unregister(pinfo->tzd[i]); | ||
512 | configure_adc(0); | ||
513 | kfree(pinfo); | ||
514 | return ret; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * mid_thermal_remove - mfld thermal finalize | ||
519 | * @dev: platform device structure | ||
520 | * | ||
521 | * MLFD thermal remove unregisters all the sensors from the generic | ||
522 | * thermal framework. Can sleep. | ||
523 | */ | ||
524 | static int mid_thermal_remove(struct platform_device *pdev) | ||
525 | { | ||
526 | int i; | ||
527 | struct platform_info *pinfo = platform_get_drvdata(pdev); | ||
528 | |||
529 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) | ||
530 | thermal_zone_device_unregister(pinfo->tzd[i]); | ||
531 | |||
532 | platform_set_drvdata(pdev, NULL); | ||
533 | |||
534 | /* Stop the ADC */ | ||
535 | return configure_adc(0); | ||
536 | } | ||
537 | |||
538 | /********************************************************************* | ||
539 | * Driver initialisation and finalization | ||
540 | *********************************************************************/ | ||
541 | |||
542 | #define DRIVER_NAME "msic_sensor" | ||
543 | |||
544 | static const struct platform_device_id therm_id_table[] = { | ||
545 | { DRIVER_NAME, 1 }, | ||
546 | { } | ||
547 | }; | ||
548 | |||
549 | static struct platform_driver mid_thermal_driver = { | ||
550 | .driver = { | ||
551 | .name = DRIVER_NAME, | ||
552 | .owner = THIS_MODULE, | ||
553 | }, | ||
554 | .probe = mid_thermal_probe, | ||
555 | .suspend = mid_thermal_suspend, | ||
556 | .resume = mid_thermal_resume, | ||
557 | .remove = __devexit_p(mid_thermal_remove), | ||
558 | .id_table = therm_id_table, | ||
559 | }; | ||
560 | |||
561 | static int __init mid_thermal_module_init(void) | ||
562 | { | ||
563 | return platform_driver_register(&mid_thermal_driver); | ||
564 | } | ||
565 | |||
566 | static void __exit mid_thermal_module_exit(void) | ||
567 | { | ||
568 | platform_driver_unregister(&mid_thermal_driver); | ||
569 | } | ||
570 | |||
571 | module_init(mid_thermal_module_init); | ||
572 | module_exit(mid_thermal_module_exit); | ||
573 | |||
574 | MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>"); | ||
575 | MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver"); | ||
576 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 61433d492862..d653104b59cb 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
@@ -257,9 +257,11 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | for (i = 0; i < 8; i++) { | 259 | for (i = 0; i < 8; i++) { |
260 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, | 260 | irq_set_chip_and_handler_name(i + pg->irq_base, |
261 | handle_simple_irq, "demux"); | 261 | &pmic_irqchip, |
262 | set_irq_chip_data(i + pg->irq_base, pg); | 262 | handle_simple_irq, |
263 | "demux"); | ||
264 | irq_set_chip_data(i + pg->irq_base, pg); | ||
263 | } | 265 | } |
264 | return 0; | 266 | return 0; |
265 | err: | 267 | err: |
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c index 2b11a33325e6..bde47e9080cd 100644 --- a/drivers/platform/x86/intel_rar_register.c +++ b/drivers/platform/x86/intel_rar_register.c | |||
@@ -485,7 +485,7 @@ EXPORT_SYMBOL(rar_lock); | |||
485 | * | 485 | * |
486 | * The register_rar function is to used by other device drivers | 486 | * The register_rar function is to used by other device drivers |
487 | * to ensure that this driver is ready. As we cannot be sure of | 487 | * to ensure that this driver is ready. As we cannot be sure of |
488 | * the compile/execute order of drivers in ther kernel, it is | 488 | * the compile/execute order of drivers in the kernel, it is |
489 | * best to give this driver a callback function to call when | 489 | * best to give this driver a callback function to call when |
490 | * it is ready to give out addresses. The callback function | 490 | * it is ready to give out addresses. The callback function |
491 | * would have those steps that continue the initialization of | 491 | * would have those steps that continue the initialization of |
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index a91d510a798b..940accbe28d3 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * as published by the Free Software Foundation; version 2 | 9 | * as published by the Free Software Foundation; version 2 |
10 | * of the License. | 10 | * of the License. |
11 | * | 11 | * |
12 | * SCU runing in ARC processor communicates with other entity running in IA | 12 | * SCU running in ARC processor communicates with other entity running in IA |
13 | * core through IPC mechanism which in turn messaging between IA core ad SCU. | 13 | * core through IPC mechanism which in turn messaging between IA core ad SCU. |
14 | * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and | 14 | * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and |
15 | * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with | 15 | * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with |
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 142d38579314..23fb2afda00b 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c | |||
@@ -51,6 +51,8 @@ | |||
51 | * laptop as MSI S270. YMMV. | 51 | * laptop as MSI S270. YMMV. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
55 | |||
54 | #include <linux/module.h> | 56 | #include <linux/module.h> |
55 | #include <linux/kernel.h> | 57 | #include <linux/kernel.h> |
56 | #include <linux/init.h> | 58 | #include <linux/init.h> |
@@ -60,6 +62,8 @@ | |||
60 | #include <linux/platform_device.h> | 62 | #include <linux/platform_device.h> |
61 | #include <linux/rfkill.h> | 63 | #include <linux/rfkill.h> |
62 | #include <linux/i8042.h> | 64 | #include <linux/i8042.h> |
65 | #include <linux/input.h> | ||
66 | #include <linux/input/sparse-keymap.h> | ||
63 | 67 | ||
64 | #define MSI_DRIVER_VERSION "0.5" | 68 | #define MSI_DRIVER_VERSION "0.5" |
65 | 69 | ||
@@ -78,6 +82,9 @@ | |||
78 | #define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d | 82 | #define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d |
79 | #define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0) | 83 | #define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0) |
80 | 84 | ||
85 | #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 | ||
86 | #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) | ||
87 | |||
81 | static int msi_laptop_resume(struct platform_device *device); | 88 | static int msi_laptop_resume(struct platform_device *device); |
82 | 89 | ||
83 | #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f | 90 | #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f |
@@ -90,6 +97,14 @@ static int auto_brightness; | |||
90 | module_param(auto_brightness, int, 0); | 97 | module_param(auto_brightness, int, 0); |
91 | MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)"); | 98 | MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)"); |
92 | 99 | ||
100 | static const struct key_entry msi_laptop_keymap[] = { | ||
101 | {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, /* Touch Pad On */ | ||
102 | {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },/* Touch Pad On */ | ||
103 | {KE_END, 0} | ||
104 | }; | ||
105 | |||
106 | static struct input_dev *msi_laptop_input_dev; | ||
107 | |||
93 | static bool old_ec_model; | 108 | static bool old_ec_model; |
94 | static int wlan_s, bluetooth_s, threeg_s; | 109 | static int wlan_s, bluetooth_s, threeg_s; |
95 | static int threeg_exists; | 110 | static int threeg_exists; |
@@ -432,8 +447,7 @@ static struct platform_device *msipf_device; | |||
432 | 447 | ||
433 | static int dmi_check_cb(const struct dmi_system_id *id) | 448 | static int dmi_check_cb(const struct dmi_system_id *id) |
434 | { | 449 | { |
435 | printk(KERN_INFO "msi-laptop: Identified laptop model '%s'.\n", | 450 | pr_info("Identified laptop model '%s'.\n", id->ident); |
436 | id->ident); | ||
437 | return 1; | 451 | return 1; |
438 | } | 452 | } |
439 | 453 | ||
@@ -605,6 +619,21 @@ static void msi_update_rfkill(struct work_struct *ignored) | |||
605 | } | 619 | } |
606 | static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill); | 620 | static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill); |
607 | 621 | ||
622 | static void msi_send_touchpad_key(struct work_struct *ignored) | ||
623 | { | ||
624 | u8 rdata; | ||
625 | int result; | ||
626 | |||
627 | result = ec_read(MSI_STANDARD_EC_TOUCHPAD_ADDRESS, &rdata); | ||
628 | if (result < 0) | ||
629 | return; | ||
630 | |||
631 | sparse_keymap_report_event(msi_laptop_input_dev, | ||
632 | (rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ? | ||
633 | KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true); | ||
634 | } | ||
635 | static DECLARE_DELAYED_WORK(msi_touchpad_work, msi_send_touchpad_key); | ||
636 | |||
608 | static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, | 637 | static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, |
609 | struct serio *port) | 638 | struct serio *port) |
610 | { | 639 | { |
@@ -613,12 +642,17 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, | |||
613 | if (str & 0x20) | 642 | if (str & 0x20) |
614 | return false; | 643 | return false; |
615 | 644 | ||
616 | /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan*/ | 645 | /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/ |
617 | if (unlikely(data == 0xe0)) { | 646 | if (unlikely(data == 0xe0)) { |
618 | extended = true; | 647 | extended = true; |
619 | return false; | 648 | return false; |
620 | } else if (unlikely(extended)) { | 649 | } else if (unlikely(extended)) { |
650 | extended = false; | ||
621 | switch (data) { | 651 | switch (data) { |
652 | case 0xE4: | ||
653 | schedule_delayed_work(&msi_touchpad_work, | ||
654 | round_jiffies_relative(0.5 * HZ)); | ||
655 | break; | ||
622 | case 0x54: | 656 | case 0x54: |
623 | case 0x62: | 657 | case 0x62: |
624 | case 0x76: | 658 | case 0x76: |
@@ -626,7 +660,6 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, | |||
626 | round_jiffies_relative(0.5 * HZ)); | 660 | round_jiffies_relative(0.5 * HZ)); |
627 | break; | 661 | break; |
628 | } | 662 | } |
629 | extended = false; | ||
630 | } | 663 | } |
631 | 664 | ||
632 | return false; | 665 | return false; |
@@ -731,6 +764,42 @@ static int msi_laptop_resume(struct platform_device *device) | |||
731 | return 0; | 764 | return 0; |
732 | } | 765 | } |
733 | 766 | ||
767 | static int __init msi_laptop_input_setup(void) | ||
768 | { | ||
769 | int err; | ||
770 | |||
771 | msi_laptop_input_dev = input_allocate_device(); | ||
772 | if (!msi_laptop_input_dev) | ||
773 | return -ENOMEM; | ||
774 | |||
775 | msi_laptop_input_dev->name = "MSI Laptop hotkeys"; | ||
776 | msi_laptop_input_dev->phys = "msi-laptop/input0"; | ||
777 | msi_laptop_input_dev->id.bustype = BUS_HOST; | ||
778 | |||
779 | err = sparse_keymap_setup(msi_laptop_input_dev, | ||
780 | msi_laptop_keymap, NULL); | ||
781 | if (err) | ||
782 | goto err_free_dev; | ||
783 | |||
784 | err = input_register_device(msi_laptop_input_dev); | ||
785 | if (err) | ||
786 | goto err_free_keymap; | ||
787 | |||
788 | return 0; | ||
789 | |||
790 | err_free_keymap: | ||
791 | sparse_keymap_free(msi_laptop_input_dev); | ||
792 | err_free_dev: | ||
793 | input_free_device(msi_laptop_input_dev); | ||
794 | return err; | ||
795 | } | ||
796 | |||
797 | static void msi_laptop_input_destroy(void) | ||
798 | { | ||
799 | sparse_keymap_free(msi_laptop_input_dev); | ||
800 | input_unregister_device(msi_laptop_input_dev); | ||
801 | } | ||
802 | |||
734 | static int load_scm_model_init(struct platform_device *sdev) | 803 | static int load_scm_model_init(struct platform_device *sdev) |
735 | { | 804 | { |
736 | u8 data; | 805 | u8 data; |
@@ -759,16 +828,23 @@ static int load_scm_model_init(struct platform_device *sdev) | |||
759 | if (result < 0) | 828 | if (result < 0) |
760 | goto fail_rfkill; | 829 | goto fail_rfkill; |
761 | 830 | ||
831 | /* setup input device */ | ||
832 | result = msi_laptop_input_setup(); | ||
833 | if (result) | ||
834 | goto fail_input; | ||
835 | |||
762 | result = i8042_install_filter(msi_laptop_i8042_filter); | 836 | result = i8042_install_filter(msi_laptop_i8042_filter); |
763 | if (result) { | 837 | if (result) { |
764 | printk(KERN_ERR | 838 | pr_err("Unable to install key filter\n"); |
765 | "msi-laptop: Unable to install key filter\n"); | ||
766 | goto fail_filter; | 839 | goto fail_filter; |
767 | } | 840 | } |
768 | 841 | ||
769 | return 0; | 842 | return 0; |
770 | 843 | ||
771 | fail_filter: | 844 | fail_filter: |
845 | msi_laptop_input_destroy(); | ||
846 | |||
847 | fail_input: | ||
772 | rfkill_cleanup(); | 848 | rfkill_cleanup(); |
773 | 849 | ||
774 | fail_rfkill: | 850 | fail_rfkill: |
@@ -799,7 +875,7 @@ static int __init msi_init(void) | |||
799 | /* Register backlight stuff */ | 875 | /* Register backlight stuff */ |
800 | 876 | ||
801 | if (acpi_video_backlight_support()) { | 877 | if (acpi_video_backlight_support()) { |
802 | printk(KERN_INFO "MSI: Brightness ignored, must be controlled " | 878 | pr_info("Brightness ignored, must be controlled " |
803 | "by ACPI video driver\n"); | 879 | "by ACPI video driver\n"); |
804 | } else { | 880 | } else { |
805 | struct backlight_properties props; | 881 | struct backlight_properties props; |
@@ -854,7 +930,7 @@ static int __init msi_init(void) | |||
854 | if (auto_brightness != 2) | 930 | if (auto_brightness != 2) |
855 | set_auto_brightness(auto_brightness); | 931 | set_auto_brightness(auto_brightness); |
856 | 932 | ||
857 | printk(KERN_INFO "msi-laptop: driver "MSI_DRIVER_VERSION" successfully loaded.\n"); | 933 | pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n"); |
858 | 934 | ||
859 | return 0; | 935 | return 0; |
860 | 936 | ||
@@ -886,6 +962,7 @@ static void __exit msi_cleanup(void) | |||
886 | { | 962 | { |
887 | if (load_scm_model) { | 963 | if (load_scm_model) { |
888 | i8042_remove_filter(msi_laptop_i8042_filter); | 964 | i8042_remove_filter(msi_laptop_i8042_filter); |
965 | msi_laptop_input_destroy(); | ||
889 | cancel_delayed_work_sync(&msi_rfkill_work); | 966 | cancel_delayed_work_sync(&msi_rfkill_work); |
890 | rfkill_cleanup(); | 967 | rfkill_cleanup(); |
891 | } | 968 | } |
@@ -901,7 +978,7 @@ static void __exit msi_cleanup(void) | |||
901 | if (auto_brightness != 2) | 978 | if (auto_brightness != 2) |
902 | set_auto_brightness(1); | 979 | set_auto_brightness(1); |
903 | 980 | ||
904 | printk(KERN_INFO "msi-laptop: driver unloaded.\n"); | 981 | pr_info("driver unloaded.\n"); |
905 | } | 982 | } |
906 | 983 | ||
907 | module_init(msi_init); | 984 | module_init(msi_init); |
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c new file mode 100644 index 000000000000..de434c6dc2d6 --- /dev/null +++ b/drivers/platform/x86/samsung-laptop.c | |||
@@ -0,0 +1,832 @@ | |||
1 | /* | ||
2 | * Samsung Laptop driver | ||
3 | * | ||
4 | * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de) | ||
5 | * Copyright (C) 2009,2011 Novell Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/backlight.h> | ||
20 | #include <linux/fb.h> | ||
21 | #include <linux/dmi.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/rfkill.h> | ||
24 | |||
25 | /* | ||
26 | * This driver is needed because a number of Samsung laptops do not hook | ||
27 | * their control settings through ACPI. So we have to poke around in the | ||
28 | * BIOS to do things like brightness values, and "special" key controls. | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * We have 0 - 8 as valid brightness levels. The specs say that level 0 should | ||
33 | * be reserved by the BIOS (which really doesn't make much sense), we tell | ||
34 | * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8 | ||
35 | */ | ||
36 | #define MAX_BRIGHT 0x07 | ||
37 | |||
38 | |||
39 | #define SABI_IFACE_MAIN 0x00 | ||
40 | #define SABI_IFACE_SUB 0x02 | ||
41 | #define SABI_IFACE_COMPLETE 0x04 | ||
42 | #define SABI_IFACE_DATA 0x05 | ||
43 | |||
44 | /* Structure to get data back to the calling function */ | ||
45 | struct sabi_retval { | ||
46 | u8 retval[20]; | ||
47 | }; | ||
48 | |||
49 | struct sabi_header_offsets { | ||
50 | u8 port; | ||
51 | u8 re_mem; | ||
52 | u8 iface_func; | ||
53 | u8 en_mem; | ||
54 | u8 data_offset; | ||
55 | u8 data_segment; | ||
56 | }; | ||
57 | |||
58 | struct sabi_commands { | ||
59 | /* | ||
60 | * Brightness is 0 - 8, as described above. | ||
61 | * Value 0 is for the BIOS to use | ||
62 | */ | ||
63 | u8 get_brightness; | ||
64 | u8 set_brightness; | ||
65 | |||
66 | /* | ||
67 | * first byte: | ||
68 | * 0x00 - wireless is off | ||
69 | * 0x01 - wireless is on | ||
70 | * second byte: | ||
71 | * 0x02 - 3G is off | ||
72 | * 0x03 - 3G is on | ||
73 | * TODO, verify 3G is correct, that doesn't seem right... | ||
74 | */ | ||
75 | u8 get_wireless_button; | ||
76 | u8 set_wireless_button; | ||
77 | |||
78 | /* 0 is off, 1 is on */ | ||
79 | u8 get_backlight; | ||
80 | u8 set_backlight; | ||
81 | |||
82 | /* | ||
83 | * 0x80 or 0x00 - no action | ||
84 | * 0x81 - recovery key pressed | ||
85 | */ | ||
86 | u8 get_recovery_mode; | ||
87 | u8 set_recovery_mode; | ||
88 | |||
89 | /* | ||
90 | * on seclinux: 0 is low, 1 is high, | ||
91 | * on swsmi: 0 is normal, 1 is silent, 2 is turbo | ||
92 | */ | ||
93 | u8 get_performance_level; | ||
94 | u8 set_performance_level; | ||
95 | |||
96 | /* | ||
97 | * Tell the BIOS that Linux is running on this machine. | ||
98 | * 81 is on, 80 is off | ||
99 | */ | ||
100 | u8 set_linux; | ||
101 | }; | ||
102 | |||
103 | struct sabi_performance_level { | ||
104 | const char *name; | ||
105 | u8 value; | ||
106 | }; | ||
107 | |||
108 | struct sabi_config { | ||
109 | const char *test_string; | ||
110 | u16 main_function; | ||
111 | const struct sabi_header_offsets header_offsets; | ||
112 | const struct sabi_commands commands; | ||
113 | const struct sabi_performance_level performance_levels[4]; | ||
114 | u8 min_brightness; | ||
115 | u8 max_brightness; | ||
116 | }; | ||
117 | |||
118 | static const struct sabi_config sabi_configs[] = { | ||
119 | { | ||
120 | .test_string = "SECLINUX", | ||
121 | |||
122 | .main_function = 0x4c49, | ||
123 | |||
124 | .header_offsets = { | ||
125 | .port = 0x00, | ||
126 | .re_mem = 0x02, | ||
127 | .iface_func = 0x03, | ||
128 | .en_mem = 0x04, | ||
129 | .data_offset = 0x05, | ||
130 | .data_segment = 0x07, | ||
131 | }, | ||
132 | |||
133 | .commands = { | ||
134 | .get_brightness = 0x00, | ||
135 | .set_brightness = 0x01, | ||
136 | |||
137 | .get_wireless_button = 0x02, | ||
138 | .set_wireless_button = 0x03, | ||
139 | |||
140 | .get_backlight = 0x04, | ||
141 | .set_backlight = 0x05, | ||
142 | |||
143 | .get_recovery_mode = 0x06, | ||
144 | .set_recovery_mode = 0x07, | ||
145 | |||
146 | .get_performance_level = 0x08, | ||
147 | .set_performance_level = 0x09, | ||
148 | |||
149 | .set_linux = 0x0a, | ||
150 | }, | ||
151 | |||
152 | .performance_levels = { | ||
153 | { | ||
154 | .name = "silent", | ||
155 | .value = 0, | ||
156 | }, | ||
157 | { | ||
158 | .name = "normal", | ||
159 | .value = 1, | ||
160 | }, | ||
161 | { }, | ||
162 | }, | ||
163 | .min_brightness = 1, | ||
164 | .max_brightness = 8, | ||
165 | }, | ||
166 | { | ||
167 | .test_string = "SwSmi@", | ||
168 | |||
169 | .main_function = 0x5843, | ||
170 | |||
171 | .header_offsets = { | ||
172 | .port = 0x00, | ||
173 | .re_mem = 0x04, | ||
174 | .iface_func = 0x02, | ||
175 | .en_mem = 0x03, | ||
176 | .data_offset = 0x05, | ||
177 | .data_segment = 0x07, | ||
178 | }, | ||
179 | |||
180 | .commands = { | ||
181 | .get_brightness = 0x10, | ||
182 | .set_brightness = 0x11, | ||
183 | |||
184 | .get_wireless_button = 0x12, | ||
185 | .set_wireless_button = 0x13, | ||
186 | |||
187 | .get_backlight = 0x2d, | ||
188 | .set_backlight = 0x2e, | ||
189 | |||
190 | .get_recovery_mode = 0xff, | ||
191 | .set_recovery_mode = 0xff, | ||
192 | |||
193 | .get_performance_level = 0x31, | ||
194 | .set_performance_level = 0x32, | ||
195 | |||
196 | .set_linux = 0xff, | ||
197 | }, | ||
198 | |||
199 | .performance_levels = { | ||
200 | { | ||
201 | .name = "normal", | ||
202 | .value = 0, | ||
203 | }, | ||
204 | { | ||
205 | .name = "silent", | ||
206 | .value = 1, | ||
207 | }, | ||
208 | { | ||
209 | .name = "overclock", | ||
210 | .value = 2, | ||
211 | }, | ||
212 | { }, | ||
213 | }, | ||
214 | .min_brightness = 0, | ||
215 | .max_brightness = 8, | ||
216 | }, | ||
217 | { }, | ||
218 | }; | ||
219 | |||
220 | static const struct sabi_config *sabi_config; | ||
221 | |||
222 | static void __iomem *sabi; | ||
223 | static void __iomem *sabi_iface; | ||
224 | static void __iomem *f0000_segment; | ||
225 | static struct backlight_device *backlight_device; | ||
226 | static struct mutex sabi_mutex; | ||
227 | static struct platform_device *sdev; | ||
228 | static struct rfkill *rfk; | ||
229 | |||
230 | static int force; | ||
231 | module_param(force, bool, 0); | ||
232 | MODULE_PARM_DESC(force, | ||
233 | "Disable the DMI check and forces the driver to be loaded"); | ||
234 | |||
235 | static int debug; | ||
236 | module_param(debug, bool, S_IRUGO | S_IWUSR); | ||
237 | MODULE_PARM_DESC(debug, "Debug enabled or not"); | ||
238 | |||
239 | static int sabi_get_command(u8 command, struct sabi_retval *sretval) | ||
240 | { | ||
241 | int retval = 0; | ||
242 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
243 | u8 complete, iface_data; | ||
244 | |||
245 | mutex_lock(&sabi_mutex); | ||
246 | |||
247 | /* enable memory to be able to write to it */ | ||
248 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
249 | |||
250 | /* write out the command */ | ||
251 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
252 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
253 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
254 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
255 | |||
256 | /* write protect memory to make it safe */ | ||
257 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
258 | |||
259 | /* see if the command actually succeeded */ | ||
260 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
261 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
262 | if (complete != 0xaa || iface_data == 0xff) { | ||
263 | pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
264 | command, complete, iface_data); | ||
265 | retval = -EINVAL; | ||
266 | goto exit; | ||
267 | } | ||
268 | /* | ||
269 | * Save off the data into a structure so the caller use it. | ||
270 | * Right now we only want the first 4 bytes, | ||
271 | * There are commands that need more, but not for the ones we | ||
272 | * currently care about. | ||
273 | */ | ||
274 | sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA); | ||
275 | sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1); | ||
276 | sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2); | ||
277 | sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3); | ||
278 | |||
279 | exit: | ||
280 | mutex_unlock(&sabi_mutex); | ||
281 | return retval; | ||
282 | |||
283 | } | ||
284 | |||
285 | static int sabi_set_command(u8 command, u8 data) | ||
286 | { | ||
287 | int retval = 0; | ||
288 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
289 | u8 complete, iface_data; | ||
290 | |||
291 | mutex_lock(&sabi_mutex); | ||
292 | |||
293 | /* enable memory to be able to write to it */ | ||
294 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
295 | |||
296 | /* write out the command */ | ||
297 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
298 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
299 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
300 | writeb(data, sabi_iface + SABI_IFACE_DATA); | ||
301 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
302 | |||
303 | /* write protect memory to make it safe */ | ||
304 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
305 | |||
306 | /* see if the command actually succeeded */ | ||
307 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
308 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
309 | if (complete != 0xaa || iface_data == 0xff) { | ||
310 | pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
311 | command, complete, iface_data); | ||
312 | retval = -EINVAL; | ||
313 | } | ||
314 | |||
315 | mutex_unlock(&sabi_mutex); | ||
316 | return retval; | ||
317 | } | ||
318 | |||
319 | static void test_backlight(void) | ||
320 | { | ||
321 | struct sabi_retval sretval; | ||
322 | |||
323 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
324 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
325 | |||
326 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
327 | printk(KERN_DEBUG "backlight should be off\n"); | ||
328 | |||
329 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
330 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
331 | |||
332 | msleep(1000); | ||
333 | |||
334 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
335 | printk(KERN_DEBUG "backlight should be on\n"); | ||
336 | |||
337 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
338 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
339 | } | ||
340 | |||
341 | static void test_wireless(void) | ||
342 | { | ||
343 | struct sabi_retval sretval; | ||
344 | |||
345 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
346 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
347 | |||
348 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
349 | printk(KERN_DEBUG "wireless led should be off\n"); | ||
350 | |||
351 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
352 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
353 | |||
354 | msleep(1000); | ||
355 | |||
356 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
357 | printk(KERN_DEBUG "wireless led should be on\n"); | ||
358 | |||
359 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
360 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
361 | } | ||
362 | |||
363 | static u8 read_brightness(void) | ||
364 | { | ||
365 | struct sabi_retval sretval; | ||
366 | int user_brightness = 0; | ||
367 | int retval; | ||
368 | |||
369 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
370 | &sretval); | ||
371 | if (!retval) { | ||
372 | user_brightness = sretval.retval[0]; | ||
373 | if (user_brightness != 0) | ||
374 | user_brightness -= sabi_config->min_brightness; | ||
375 | } | ||
376 | return user_brightness; | ||
377 | } | ||
378 | |||
379 | static void set_brightness(u8 user_brightness) | ||
380 | { | ||
381 | u8 user_level = user_brightness - sabi_config->min_brightness; | ||
382 | |||
383 | sabi_set_command(sabi_config->commands.set_brightness, user_level); | ||
384 | } | ||
385 | |||
386 | static int get_brightness(struct backlight_device *bd) | ||
387 | { | ||
388 | return (int)read_brightness(); | ||
389 | } | ||
390 | |||
391 | static int update_status(struct backlight_device *bd) | ||
392 | { | ||
393 | set_brightness(bd->props.brightness); | ||
394 | |||
395 | if (bd->props.power == FB_BLANK_UNBLANK) | ||
396 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
397 | else | ||
398 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static const struct backlight_ops backlight_ops = { | ||
403 | .get_brightness = get_brightness, | ||
404 | .update_status = update_status, | ||
405 | }; | ||
406 | |||
407 | static int rfkill_set(void *data, bool blocked) | ||
408 | { | ||
409 | /* Do something with blocked...*/ | ||
410 | /* | ||
411 | * blocked == false is on | ||
412 | * blocked == true is off | ||
413 | */ | ||
414 | if (blocked) | ||
415 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
416 | else | ||
417 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static struct rfkill_ops rfkill_ops = { | ||
423 | .set_block = rfkill_set, | ||
424 | }; | ||
425 | |||
426 | static int init_wireless(struct platform_device *sdev) | ||
427 | { | ||
428 | int retval; | ||
429 | |||
430 | rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN, | ||
431 | &rfkill_ops, NULL); | ||
432 | if (!rfk) | ||
433 | return -ENOMEM; | ||
434 | |||
435 | retval = rfkill_register(rfk); | ||
436 | if (retval) { | ||
437 | rfkill_destroy(rfk); | ||
438 | return -ENODEV; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static void destroy_wireless(void) | ||
445 | { | ||
446 | rfkill_unregister(rfk); | ||
447 | rfkill_destroy(rfk); | ||
448 | } | ||
449 | |||
450 | static ssize_t get_performance_level(struct device *dev, | ||
451 | struct device_attribute *attr, char *buf) | ||
452 | { | ||
453 | struct sabi_retval sretval; | ||
454 | int retval; | ||
455 | int i; | ||
456 | |||
457 | /* Read the state */ | ||
458 | retval = sabi_get_command(sabi_config->commands.get_performance_level, | ||
459 | &sretval); | ||
460 | if (retval) | ||
461 | return retval; | ||
462 | |||
463 | /* The logic is backwards, yeah, lots of fun... */ | ||
464 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
465 | if (sretval.retval[0] == sabi_config->performance_levels[i].value) | ||
466 | return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); | ||
467 | } | ||
468 | return sprintf(buf, "%s\n", "unknown"); | ||
469 | } | ||
470 | |||
471 | static ssize_t set_performance_level(struct device *dev, | ||
472 | struct device_attribute *attr, const char *buf, | ||
473 | size_t count) | ||
474 | { | ||
475 | if (count >= 1) { | ||
476 | int i; | ||
477 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
478 | const struct sabi_performance_level *level = | ||
479 | &sabi_config->performance_levels[i]; | ||
480 | if (!strncasecmp(level->name, buf, strlen(level->name))) { | ||
481 | sabi_set_command(sabi_config->commands.set_performance_level, | ||
482 | level->value); | ||
483 | break; | ||
484 | } | ||
485 | } | ||
486 | if (!sabi_config->performance_levels[i].name) | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | return count; | ||
490 | } | ||
491 | static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, | ||
492 | get_performance_level, set_performance_level); | ||
493 | |||
494 | |||
495 | static int __init dmi_check_cb(const struct dmi_system_id *id) | ||
496 | { | ||
497 | pr_info("found laptop model '%s'\n", | ||
498 | id->ident); | ||
499 | return 1; | ||
500 | } | ||
501 | |||
502 | static struct dmi_system_id __initdata samsung_dmi_table[] = { | ||
503 | { | ||
504 | .ident = "N128", | ||
505 | .matches = { | ||
506 | DMI_MATCH(DMI_SYS_VENDOR, | ||
507 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
508 | DMI_MATCH(DMI_PRODUCT_NAME, "N128"), | ||
509 | DMI_MATCH(DMI_BOARD_NAME, "N128"), | ||
510 | }, | ||
511 | .callback = dmi_check_cb, | ||
512 | }, | ||
513 | { | ||
514 | .ident = "N130", | ||
515 | .matches = { | ||
516 | DMI_MATCH(DMI_SYS_VENDOR, | ||
517 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
518 | DMI_MATCH(DMI_PRODUCT_NAME, "N130"), | ||
519 | DMI_MATCH(DMI_BOARD_NAME, "N130"), | ||
520 | }, | ||
521 | .callback = dmi_check_cb, | ||
522 | }, | ||
523 | { | ||
524 | .ident = "X125", | ||
525 | .matches = { | ||
526 | DMI_MATCH(DMI_SYS_VENDOR, | ||
527 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
528 | DMI_MATCH(DMI_PRODUCT_NAME, "X125"), | ||
529 | DMI_MATCH(DMI_BOARD_NAME, "X125"), | ||
530 | }, | ||
531 | .callback = dmi_check_cb, | ||
532 | }, | ||
533 | { | ||
534 | .ident = "X120/X170", | ||
535 | .matches = { | ||
536 | DMI_MATCH(DMI_SYS_VENDOR, | ||
537 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
538 | DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"), | ||
539 | DMI_MATCH(DMI_BOARD_NAME, "X120/X170"), | ||
540 | }, | ||
541 | .callback = dmi_check_cb, | ||
542 | }, | ||
543 | { | ||
544 | .ident = "NC10", | ||
545 | .matches = { | ||
546 | DMI_MATCH(DMI_SYS_VENDOR, | ||
547 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
548 | DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), | ||
549 | DMI_MATCH(DMI_BOARD_NAME, "NC10"), | ||
550 | }, | ||
551 | .callback = dmi_check_cb, | ||
552 | }, | ||
553 | { | ||
554 | .ident = "NP-Q45", | ||
555 | .matches = { | ||
556 | DMI_MATCH(DMI_SYS_VENDOR, | ||
557 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
558 | DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), | ||
559 | DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), | ||
560 | }, | ||
561 | .callback = dmi_check_cb, | ||
562 | }, | ||
563 | { | ||
564 | .ident = "X360", | ||
565 | .matches = { | ||
566 | DMI_MATCH(DMI_SYS_VENDOR, | ||
567 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
568 | DMI_MATCH(DMI_PRODUCT_NAME, "X360"), | ||
569 | DMI_MATCH(DMI_BOARD_NAME, "X360"), | ||
570 | }, | ||
571 | .callback = dmi_check_cb, | ||
572 | }, | ||
573 | { | ||
574 | .ident = "R518", | ||
575 | .matches = { | ||
576 | DMI_MATCH(DMI_SYS_VENDOR, | ||
577 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
578 | DMI_MATCH(DMI_PRODUCT_NAME, "R518"), | ||
579 | DMI_MATCH(DMI_BOARD_NAME, "R518"), | ||
580 | }, | ||
581 | .callback = dmi_check_cb, | ||
582 | }, | ||
583 | { | ||
584 | .ident = "R519/R719", | ||
585 | .matches = { | ||
586 | DMI_MATCH(DMI_SYS_VENDOR, | ||
587 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
588 | DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"), | ||
589 | DMI_MATCH(DMI_BOARD_NAME, "R519/R719"), | ||
590 | }, | ||
591 | .callback = dmi_check_cb, | ||
592 | }, | ||
593 | { | ||
594 | .ident = "N150/N210/N220", | ||
595 | .matches = { | ||
596 | DMI_MATCH(DMI_SYS_VENDOR, | ||
597 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
598 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), | ||
599 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), | ||
600 | }, | ||
601 | .callback = dmi_check_cb, | ||
602 | }, | ||
603 | { | ||
604 | .ident = "N150P/N210P/N220P", | ||
605 | .matches = { | ||
606 | DMI_MATCH(DMI_SYS_VENDOR, | ||
607 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
608 | DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"), | ||
609 | DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"), | ||
610 | }, | ||
611 | .callback = dmi_check_cb, | ||
612 | }, | ||
613 | { | ||
614 | .ident = "R530/R730", | ||
615 | .matches = { | ||
616 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
617 | DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), | ||
618 | DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), | ||
619 | }, | ||
620 | .callback = dmi_check_cb, | ||
621 | }, | ||
622 | { | ||
623 | .ident = "NF110/NF210/NF310", | ||
624 | .matches = { | ||
625 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
626 | DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), | ||
627 | DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), | ||
628 | }, | ||
629 | .callback = dmi_check_cb, | ||
630 | }, | ||
631 | { | ||
632 | .ident = "N145P/N250P/N260P", | ||
633 | .matches = { | ||
634 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
635 | DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), | ||
636 | DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), | ||
637 | }, | ||
638 | .callback = dmi_check_cb, | ||
639 | }, | ||
640 | { | ||
641 | .ident = "R70/R71", | ||
642 | .matches = { | ||
643 | DMI_MATCH(DMI_SYS_VENDOR, | ||
644 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
645 | DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"), | ||
646 | DMI_MATCH(DMI_BOARD_NAME, "R70/R71"), | ||
647 | }, | ||
648 | .callback = dmi_check_cb, | ||
649 | }, | ||
650 | { | ||
651 | .ident = "P460", | ||
652 | .matches = { | ||
653 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
654 | DMI_MATCH(DMI_PRODUCT_NAME, "P460"), | ||
655 | DMI_MATCH(DMI_BOARD_NAME, "P460"), | ||
656 | }, | ||
657 | .callback = dmi_check_cb, | ||
658 | }, | ||
659 | { }, | ||
660 | }; | ||
661 | MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); | ||
662 | |||
663 | static int find_signature(void __iomem *memcheck, const char *testStr) | ||
664 | { | ||
665 | int i = 0; | ||
666 | int loca; | ||
667 | |||
668 | for (loca = 0; loca < 0xffff; loca++) { | ||
669 | char temp = readb(memcheck + loca); | ||
670 | |||
671 | if (temp == testStr[i]) { | ||
672 | if (i == strlen(testStr)-1) | ||
673 | break; | ||
674 | ++i; | ||
675 | } else { | ||
676 | i = 0; | ||
677 | } | ||
678 | } | ||
679 | return loca; | ||
680 | } | ||
681 | |||
682 | static int __init samsung_init(void) | ||
683 | { | ||
684 | struct backlight_properties props; | ||
685 | struct sabi_retval sretval; | ||
686 | unsigned int ifaceP; | ||
687 | int i; | ||
688 | int loca; | ||
689 | int retval; | ||
690 | |||
691 | mutex_init(&sabi_mutex); | ||
692 | |||
693 | if (!force && !dmi_check_system(samsung_dmi_table)) | ||
694 | return -ENODEV; | ||
695 | |||
696 | f0000_segment = ioremap_nocache(0xf0000, 0xffff); | ||
697 | if (!f0000_segment) { | ||
698 | pr_err("Can't map the segment at 0xf0000\n"); | ||
699 | return -EINVAL; | ||
700 | } | ||
701 | |||
702 | /* Try to find one of the signatures in memory to find the header */ | ||
703 | for (i = 0; sabi_configs[i].test_string != 0; ++i) { | ||
704 | sabi_config = &sabi_configs[i]; | ||
705 | loca = find_signature(f0000_segment, sabi_config->test_string); | ||
706 | if (loca != 0xffff) | ||
707 | break; | ||
708 | } | ||
709 | |||
710 | if (loca == 0xffff) { | ||
711 | pr_err("This computer does not support SABI\n"); | ||
712 | goto error_no_signature; | ||
713 | } | ||
714 | |||
715 | /* point to the SMI port Number */ | ||
716 | loca += 1; | ||
717 | sabi = (f0000_segment + loca); | ||
718 | |||
719 | if (debug) { | ||
720 | printk(KERN_DEBUG "This computer supports SABI==%x\n", | ||
721 | loca + 0xf0000 - 6); | ||
722 | printk(KERN_DEBUG "SABI header:\n"); | ||
723 | printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", | ||
724 | readw(sabi + sabi_config->header_offsets.port)); | ||
725 | printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", | ||
726 | readb(sabi + sabi_config->header_offsets.iface_func)); | ||
727 | printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", | ||
728 | readb(sabi + sabi_config->header_offsets.en_mem)); | ||
729 | printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", | ||
730 | readb(sabi + sabi_config->header_offsets.re_mem)); | ||
731 | printk(KERN_DEBUG " SABI data offset = 0x%04x\n", | ||
732 | readw(sabi + sabi_config->header_offsets.data_offset)); | ||
733 | printk(KERN_DEBUG " SABI data segment = 0x%04x\n", | ||
734 | readw(sabi + sabi_config->header_offsets.data_segment)); | ||
735 | } | ||
736 | |||
737 | /* Get a pointer to the SABI Interface */ | ||
738 | ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; | ||
739 | ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; | ||
740 | sabi_iface = ioremap_nocache(ifaceP, 16); | ||
741 | if (!sabi_iface) { | ||
742 | pr_err("Can't remap %x\n", ifaceP); | ||
743 | goto exit; | ||
744 | } | ||
745 | if (debug) { | ||
746 | printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP); | ||
747 | printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface); | ||
748 | |||
749 | test_backlight(); | ||
750 | test_wireless(); | ||
751 | |||
752 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
753 | &sretval); | ||
754 | printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); | ||
755 | } | ||
756 | |||
757 | /* Turn on "Linux" mode in the BIOS */ | ||
758 | if (sabi_config->commands.set_linux != 0xff) { | ||
759 | retval = sabi_set_command(sabi_config->commands.set_linux, | ||
760 | 0x81); | ||
761 | if (retval) { | ||
762 | pr_warn("Linux mode was not set!\n"); | ||
763 | goto error_no_platform; | ||
764 | } | ||
765 | } | ||
766 | |||
767 | /* knock up a platform device to hang stuff off of */ | ||
768 | sdev = platform_device_register_simple("samsung", -1, NULL, 0); | ||
769 | if (IS_ERR(sdev)) | ||
770 | goto error_no_platform; | ||
771 | |||
772 | /* create a backlight device to talk to this one */ | ||
773 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
774 | props.max_brightness = sabi_config->max_brightness; | ||
775 | backlight_device = backlight_device_register("samsung", &sdev->dev, | ||
776 | NULL, &backlight_ops, | ||
777 | &props); | ||
778 | if (IS_ERR(backlight_device)) | ||
779 | goto error_no_backlight; | ||
780 | |||
781 | backlight_device->props.brightness = read_brightness(); | ||
782 | backlight_device->props.power = FB_BLANK_UNBLANK; | ||
783 | backlight_update_status(backlight_device); | ||
784 | |||
785 | retval = init_wireless(sdev); | ||
786 | if (retval) | ||
787 | goto error_no_rfk; | ||
788 | |||
789 | retval = device_create_file(&sdev->dev, &dev_attr_performance_level); | ||
790 | if (retval) | ||
791 | goto error_file_create; | ||
792 | |||
793 | exit: | ||
794 | return 0; | ||
795 | |||
796 | error_file_create: | ||
797 | destroy_wireless(); | ||
798 | |||
799 | error_no_rfk: | ||
800 | backlight_device_unregister(backlight_device); | ||
801 | |||
802 | error_no_backlight: | ||
803 | platform_device_unregister(sdev); | ||
804 | |||
805 | error_no_platform: | ||
806 | iounmap(sabi_iface); | ||
807 | |||
808 | error_no_signature: | ||
809 | iounmap(f0000_segment); | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | static void __exit samsung_exit(void) | ||
814 | { | ||
815 | /* Turn off "Linux" mode in the BIOS */ | ||
816 | if (sabi_config->commands.set_linux != 0xff) | ||
817 | sabi_set_command(sabi_config->commands.set_linux, 0x80); | ||
818 | |||
819 | device_remove_file(&sdev->dev, &dev_attr_performance_level); | ||
820 | backlight_device_unregister(backlight_device); | ||
821 | destroy_wireless(); | ||
822 | iounmap(sabi_iface); | ||
823 | iounmap(f0000_segment); | ||
824 | platform_device_unregister(sdev); | ||
825 | } | ||
826 | |||
827 | module_init(samsung_init); | ||
828 | module_exit(samsung_exit); | ||
829 | |||
830 | MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>"); | ||
831 | MODULE_DESCRIPTION("Samsung Backlight driver"); | ||
832 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 13d8d63bcca9..e642f5f29504 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -71,8 +71,9 @@ | |||
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #define DRV_PFX "sony-laptop: " | 73 | #define DRV_PFX "sony-laptop: " |
74 | #define dprintk(msg...) do { \ | 74 | #define dprintk(msg...) do { \ |
75 | if (debug) printk(KERN_WARNING DRV_PFX msg); \ | 75 | if (debug) \ |
76 | pr_warn(DRV_PFX msg); \ | ||
76 | } while (0) | 77 | } while (0) |
77 | 78 | ||
78 | #define SONY_LAPTOP_DRIVER_VERSION "0.6" | 79 | #define SONY_LAPTOP_DRIVER_VERSION "0.6" |
@@ -124,6 +125,19 @@ MODULE_PARM_DESC(minor, | |||
124 | "default is -1 (automatic)"); | 125 | "default is -1 (automatic)"); |
125 | #endif | 126 | #endif |
126 | 127 | ||
128 | static int kbd_backlight; /* = 1 */ | ||
129 | module_param(kbd_backlight, int, 0444); | ||
130 | MODULE_PARM_DESC(kbd_backlight, | ||
131 | "set this to 0 to disable keyboard backlight, " | ||
132 | "1 to enable it (default: 0)"); | ||
133 | |||
134 | static int kbd_backlight_timeout; /* = 0 */ | ||
135 | module_param(kbd_backlight_timeout, int, 0444); | ||
136 | MODULE_PARM_DESC(kbd_backlight_timeout, | ||
137 | "set this to 0 to set the default 10 seconds timeout, " | ||
138 | "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " | ||
139 | "(default: 0)"); | ||
140 | |||
127 | enum sony_nc_rfkill { | 141 | enum sony_nc_rfkill { |
128 | SONY_WIFI, | 142 | SONY_WIFI, |
129 | SONY_BLUETOOTH, | 143 | SONY_BLUETOOTH, |
@@ -402,7 +416,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device) | |||
402 | error = kfifo_alloc(&sony_laptop_input.fifo, | 416 | error = kfifo_alloc(&sony_laptop_input.fifo, |
403 | SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); | 417 | SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); |
404 | if (error) { | 418 | if (error) { |
405 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); | 419 | pr_err(DRV_PFX "kfifo_alloc failed\n"); |
406 | goto err_dec_users; | 420 | goto err_dec_users; |
407 | } | 421 | } |
408 | 422 | ||
@@ -591,7 +605,7 @@ struct sony_nc_value { | |||
591 | int value; /* current setting */ | 605 | int value; /* current setting */ |
592 | int valid; /* Has ever been set */ | 606 | int valid; /* Has ever been set */ |
593 | int debug; /* active only in debug mode ? */ | 607 | int debug; /* active only in debug mode ? */ |
594 | struct device_attribute devattr; /* sysfs atribute */ | 608 | struct device_attribute devattr; /* sysfs attribute */ |
595 | }; | 609 | }; |
596 | 610 | ||
597 | #define SNC_HANDLE_NAMES(_name, _values...) \ | 611 | #define SNC_HANDLE_NAMES(_name, _values...) \ |
@@ -686,7 +700,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) | |||
686 | return 0; | 700 | return 0; |
687 | } | 701 | } |
688 | 702 | ||
689 | printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n"); | 703 | pr_warn(DRV_PFX "acpi_callreadfunc failed\n"); |
690 | 704 | ||
691 | return -1; | 705 | return -1; |
692 | } | 706 | } |
@@ -712,7 +726,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value, | |||
712 | if (status == AE_OK) { | 726 | if (status == AE_OK) { |
713 | if (result != NULL) { | 727 | if (result != NULL) { |
714 | if (out_obj.type != ACPI_TYPE_INTEGER) { | 728 | if (out_obj.type != ACPI_TYPE_INTEGER) { |
715 | printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad " | 729 | pr_warn(DRV_PFX "acpi_evaluate_object bad " |
716 | "return type\n"); | 730 | "return type\n"); |
717 | return -1; | 731 | return -1; |
718 | } | 732 | } |
@@ -721,34 +735,103 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value, | |||
721 | return 0; | 735 | return 0; |
722 | } | 736 | } |
723 | 737 | ||
724 | printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n"); | 738 | pr_warn(DRV_PFX "acpi_evaluate_object failed\n"); |
725 | 739 | ||
726 | return -1; | 740 | return -1; |
727 | } | 741 | } |
728 | 742 | ||
729 | static int sony_find_snc_handle(int handle) | 743 | struct sony_nc_handles { |
744 | u16 cap[0x10]; | ||
745 | struct device_attribute devattr; | ||
746 | }; | ||
747 | |||
748 | static struct sony_nc_handles *handles; | ||
749 | |||
750 | static ssize_t sony_nc_handles_show(struct device *dev, | ||
751 | struct device_attribute *attr, char *buffer) | ||
752 | { | ||
753 | ssize_t len = 0; | ||
754 | int i; | ||
755 | |||
756 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { | ||
757 | len += snprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ", | ||
758 | handles->cap[i]); | ||
759 | } | ||
760 | len += snprintf(buffer + len, PAGE_SIZE - len, "\n"); | ||
761 | |||
762 | return len; | ||
763 | } | ||
764 | |||
765 | static int sony_nc_handles_setup(struct platform_device *pd) | ||
730 | { | 766 | { |
731 | int i; | 767 | int i; |
732 | int result; | 768 | int result; |
733 | 769 | ||
734 | for (i = 0x20; i < 0x30; i++) { | 770 | handles = kzalloc(sizeof(*handles), GFP_KERNEL); |
735 | acpi_callsetfunc(sony_nc_acpi_handle, "SN00", i, &result); | 771 | if (!handles) |
736 | if (result == handle) | 772 | return -ENOMEM; |
737 | return i-0x20; | 773 | |
774 | sysfs_attr_init(&handles->devattr.attr); | ||
775 | handles->devattr.attr.name = "handles"; | ||
776 | handles->devattr.attr.mode = S_IRUGO; | ||
777 | handles->devattr.show = sony_nc_handles_show; | ||
778 | |||
779 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { | ||
780 | if (!acpi_callsetfunc(sony_nc_acpi_handle, | ||
781 | "SN00", i + 0x20, &result)) { | ||
782 | dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n", | ||
783 | result, i); | ||
784 | handles->cap[i] = result; | ||
785 | } | ||
786 | } | ||
787 | |||
788 | /* allow reading capabilities via sysfs */ | ||
789 | if (device_create_file(&pd->dev, &handles->devattr)) { | ||
790 | kfree(handles); | ||
791 | handles = NULL; | ||
792 | return -1; | ||
793 | } | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static int sony_nc_handles_cleanup(struct platform_device *pd) | ||
799 | { | ||
800 | if (handles) { | ||
801 | device_remove_file(&pd->dev, &handles->devattr); | ||
802 | kfree(handles); | ||
803 | handles = NULL; | ||
738 | } | 804 | } |
805 | return 0; | ||
806 | } | ||
739 | 807 | ||
808 | static int sony_find_snc_handle(int handle) | ||
809 | { | ||
810 | int i; | ||
811 | for (i = 0; i < 0x10; i++) { | ||
812 | if (handles->cap[i] == handle) { | ||
813 | dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", | ||
814 | handle, i); | ||
815 | return i; | ||
816 | } | ||
817 | } | ||
818 | dprintk("handle 0x%.4x not found\n", handle); | ||
740 | return -1; | 819 | return -1; |
741 | } | 820 | } |
742 | 821 | ||
743 | static int sony_call_snc_handle(int handle, int argument, int *result) | 822 | static int sony_call_snc_handle(int handle, int argument, int *result) |
744 | { | 823 | { |
824 | int ret = 0; | ||
745 | int offset = sony_find_snc_handle(handle); | 825 | int offset = sony_find_snc_handle(handle); |
746 | 826 | ||
747 | if (offset < 0) | 827 | if (offset < 0) |
748 | return -1; | 828 | return -1; |
749 | 829 | ||
750 | return acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, | 830 | ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, |
751 | result); | 831 | result); |
832 | dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument, | ||
833 | *result); | ||
834 | return ret; | ||
752 | } | 835 | } |
753 | 836 | ||
754 | /* | 837 | /* |
@@ -857,11 +940,39 @@ static int sony_backlight_get_brightness(struct backlight_device *bd) | |||
857 | return value - 1; | 940 | return value - 1; |
858 | } | 941 | } |
859 | 942 | ||
860 | static struct backlight_device *sony_backlight_device; | 943 | static int sony_nc_get_brightness_ng(struct backlight_device *bd) |
944 | { | ||
945 | int result; | ||
946 | int *handle = (int *)bl_get_data(bd); | ||
947 | |||
948 | sony_call_snc_handle(*handle, 0x0200, &result); | ||
949 | |||
950 | return result & 0xff; | ||
951 | } | ||
952 | |||
953 | static int sony_nc_update_status_ng(struct backlight_device *bd) | ||
954 | { | ||
955 | int value, result; | ||
956 | int *handle = (int *)bl_get_data(bd); | ||
957 | |||
958 | value = bd->props.brightness; | ||
959 | sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); | ||
960 | |||
961 | return sony_nc_get_brightness_ng(bd); | ||
962 | } | ||
963 | |||
861 | static const struct backlight_ops sony_backlight_ops = { | 964 | static const struct backlight_ops sony_backlight_ops = { |
965 | .options = BL_CORE_SUSPENDRESUME, | ||
862 | .update_status = sony_backlight_update_status, | 966 | .update_status = sony_backlight_update_status, |
863 | .get_brightness = sony_backlight_get_brightness, | 967 | .get_brightness = sony_backlight_get_brightness, |
864 | }; | 968 | }; |
969 | static const struct backlight_ops sony_backlight_ng_ops = { | ||
970 | .options = BL_CORE_SUSPENDRESUME, | ||
971 | .update_status = sony_nc_update_status_ng, | ||
972 | .get_brightness = sony_nc_get_brightness_ng, | ||
973 | }; | ||
974 | static int backlight_ng_handle; | ||
975 | static struct backlight_device *sony_backlight_device; | ||
865 | 976 | ||
866 | /* | 977 | /* |
867 | * New SNC-only Vaios event mapping to driver known keys | 978 | * New SNC-only Vaios event mapping to driver known keys |
@@ -972,7 +1083,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) | |||
972 | } | 1083 | } |
973 | 1084 | ||
974 | if (!key_event->data) | 1085 | if (!key_event->data) |
975 | printk(KERN_INFO DRV_PFX | 1086 | pr_info(DRV_PFX |
976 | "Unknown event: 0x%x 0x%x\n", | 1087 | "Unknown event: 0x%x 0x%x\n", |
977 | key_handle, | 1088 | key_handle, |
978 | ev); | 1089 | ev); |
@@ -996,7 +1107,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level, | |||
996 | struct acpi_device_info *info; | 1107 | struct acpi_device_info *info; |
997 | 1108 | ||
998 | if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { | 1109 | if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { |
999 | printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", | 1110 | pr_warn(DRV_PFX "method: name: %4.4s, args %X\n", |
1000 | (char *)&info->name, info->param_count); | 1111 | (char *)&info->name, info->param_count); |
1001 | 1112 | ||
1002 | kfree(info); | 1113 | kfree(info); |
@@ -1037,7 +1148,7 @@ static int sony_nc_resume(struct acpi_device *device) | |||
1037 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, | 1148 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, |
1038 | item->value, NULL); | 1149 | item->value, NULL); |
1039 | if (ret < 0) { | 1150 | if (ret < 0) { |
1040 | printk("%s: %d\n", __func__, ret); | 1151 | pr_err(DRV_PFX "%s: %d\n", __func__, ret); |
1041 | break; | 1152 | break; |
1042 | } | 1153 | } |
1043 | } | 1154 | } |
@@ -1054,11 +1165,6 @@ static int sony_nc_resume(struct acpi_device *device) | |||
1054 | sony_nc_function_setup(device); | 1165 | sony_nc_function_setup(device); |
1055 | } | 1166 | } |
1056 | 1167 | ||
1057 | /* set the last requested brightness level */ | ||
1058 | if (sony_backlight_device && | ||
1059 | sony_backlight_update_status(sony_backlight_device) < 0) | ||
1060 | printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n"); | ||
1061 | |||
1062 | /* re-read rfkill state */ | 1168 | /* re-read rfkill state */ |
1063 | sony_nc_rfkill_update(); | 1169 | sony_nc_rfkill_update(); |
1064 | 1170 | ||
@@ -1206,12 +1312,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device) | |||
1206 | 1312 | ||
1207 | device_enum = (union acpi_object *) buffer.pointer; | 1313 | device_enum = (union acpi_object *) buffer.pointer; |
1208 | if (!device_enum) { | 1314 | if (!device_enum) { |
1209 | pr_err("Invalid SN06 return object\n"); | 1315 | pr_err(DRV_PFX "No SN06 return object."); |
1210 | goto out_no_enum; | 1316 | goto out_no_enum; |
1211 | } | 1317 | } |
1212 | if (device_enum->type != ACPI_TYPE_BUFFER) { | 1318 | if (device_enum->type != ACPI_TYPE_BUFFER) { |
1213 | pr_err("Invalid SN06 return object type 0x%.2x\n", | 1319 | pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n", |
1214 | device_enum->type); | 1320 | device_enum->type); |
1215 | goto out_no_enum; | 1321 | goto out_no_enum; |
1216 | } | 1322 | } |
1217 | 1323 | ||
@@ -1245,6 +1351,209 @@ out_no_enum: | |||
1245 | return; | 1351 | return; |
1246 | } | 1352 | } |
1247 | 1353 | ||
1354 | /* Keyboard backlight feature */ | ||
1355 | #define KBDBL_HANDLER 0x137 | ||
1356 | #define KBDBL_PRESENT 0xB00 | ||
1357 | #define SET_MODE 0xC00 | ||
1358 | #define SET_TIMEOUT 0xE00 | ||
1359 | |||
1360 | struct kbd_backlight { | ||
1361 | int mode; | ||
1362 | int timeout; | ||
1363 | struct device_attribute mode_attr; | ||
1364 | struct device_attribute timeout_attr; | ||
1365 | }; | ||
1366 | |||
1367 | static struct kbd_backlight *kbdbl_handle; | ||
1368 | |||
1369 | static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) | ||
1370 | { | ||
1371 | int result; | ||
1372 | |||
1373 | if (value > 1) | ||
1374 | return -EINVAL; | ||
1375 | |||
1376 | if (sony_call_snc_handle(KBDBL_HANDLER, | ||
1377 | (value << 0x10) | SET_MODE, &result)) | ||
1378 | return -EIO; | ||
1379 | |||
1380 | kbdbl_handle->mode = value; | ||
1381 | |||
1382 | return 0; | ||
1383 | } | ||
1384 | |||
1385 | static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev, | ||
1386 | struct device_attribute *attr, | ||
1387 | const char *buffer, size_t count) | ||
1388 | { | ||
1389 | int ret = 0; | ||
1390 | unsigned long value; | ||
1391 | |||
1392 | if (count > 31) | ||
1393 | return -EINVAL; | ||
1394 | |||
1395 | if (strict_strtoul(buffer, 10, &value)) | ||
1396 | return -EINVAL; | ||
1397 | |||
1398 | ret = __sony_nc_kbd_backlight_mode_set(value); | ||
1399 | if (ret < 0) | ||
1400 | return ret; | ||
1401 | |||
1402 | return count; | ||
1403 | } | ||
1404 | |||
1405 | static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev, | ||
1406 | struct device_attribute *attr, char *buffer) | ||
1407 | { | ||
1408 | ssize_t count = 0; | ||
1409 | count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode); | ||
1410 | return count; | ||
1411 | } | ||
1412 | |||
1413 | static int __sony_nc_kbd_backlight_timeout_set(u8 value) | ||
1414 | { | ||
1415 | int result; | ||
1416 | |||
1417 | if (value > 3) | ||
1418 | return -EINVAL; | ||
1419 | |||
1420 | if (sony_call_snc_handle(KBDBL_HANDLER, | ||
1421 | (value << 0x10) | SET_TIMEOUT, &result)) | ||
1422 | return -EIO; | ||
1423 | |||
1424 | kbdbl_handle->timeout = value; | ||
1425 | |||
1426 | return 0; | ||
1427 | } | ||
1428 | |||
1429 | static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev, | ||
1430 | struct device_attribute *attr, | ||
1431 | const char *buffer, size_t count) | ||
1432 | { | ||
1433 | int ret = 0; | ||
1434 | unsigned long value; | ||
1435 | |||
1436 | if (count > 31) | ||
1437 | return -EINVAL; | ||
1438 | |||
1439 | if (strict_strtoul(buffer, 10, &value)) | ||
1440 | return -EINVAL; | ||
1441 | |||
1442 | ret = __sony_nc_kbd_backlight_timeout_set(value); | ||
1443 | if (ret < 0) | ||
1444 | return ret; | ||
1445 | |||
1446 | return count; | ||
1447 | } | ||
1448 | |||
1449 | static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev, | ||
1450 | struct device_attribute *attr, char *buffer) | ||
1451 | { | ||
1452 | ssize_t count = 0; | ||
1453 | count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout); | ||
1454 | return count; | ||
1455 | } | ||
1456 | |||
1457 | static int sony_nc_kbd_backlight_setup(struct platform_device *pd) | ||
1458 | { | ||
1459 | int result; | ||
1460 | |||
1461 | if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) | ||
1462 | return 0; | ||
1463 | if (!(result & 0x02)) | ||
1464 | return 0; | ||
1465 | |||
1466 | kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL); | ||
1467 | if (!kbdbl_handle) | ||
1468 | return -ENOMEM; | ||
1469 | |||
1470 | sysfs_attr_init(&kbdbl_handle->mode_attr.attr); | ||
1471 | kbdbl_handle->mode_attr.attr.name = "kbd_backlight"; | ||
1472 | kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
1473 | kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show; | ||
1474 | kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store; | ||
1475 | |||
1476 | sysfs_attr_init(&kbdbl_handle->timeout_attr.attr); | ||
1477 | kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout"; | ||
1478 | kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
1479 | kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; | ||
1480 | kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; | ||
1481 | |||
1482 | if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr)) | ||
1483 | goto outkzalloc; | ||
1484 | |||
1485 | if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr)) | ||
1486 | goto outmode; | ||
1487 | |||
1488 | __sony_nc_kbd_backlight_mode_set(kbd_backlight); | ||
1489 | __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout); | ||
1490 | |||
1491 | return 0; | ||
1492 | |||
1493 | outmode: | ||
1494 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); | ||
1495 | outkzalloc: | ||
1496 | kfree(kbdbl_handle); | ||
1497 | kbdbl_handle = NULL; | ||
1498 | return -1; | ||
1499 | } | ||
1500 | |||
1501 | static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) | ||
1502 | { | ||
1503 | if (kbdbl_handle) { | ||
1504 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); | ||
1505 | device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); | ||
1506 | kfree(kbdbl_handle); | ||
1507 | } | ||
1508 | return 0; | ||
1509 | } | ||
1510 | |||
1511 | static void sony_nc_backlight_setup(void) | ||
1512 | { | ||
1513 | acpi_handle unused; | ||
1514 | int max_brightness = 0; | ||
1515 | const struct backlight_ops *ops = NULL; | ||
1516 | struct backlight_properties props; | ||
1517 | |||
1518 | if (sony_find_snc_handle(0x12f) != -1) { | ||
1519 | backlight_ng_handle = 0x12f; | ||
1520 | ops = &sony_backlight_ng_ops; | ||
1521 | max_brightness = 0xff; | ||
1522 | |||
1523 | } else if (sony_find_snc_handle(0x137) != -1) { | ||
1524 | backlight_ng_handle = 0x137; | ||
1525 | ops = &sony_backlight_ng_ops; | ||
1526 | max_brightness = 0xff; | ||
1527 | |||
1528 | } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", | ||
1529 | &unused))) { | ||
1530 | ops = &sony_backlight_ops; | ||
1531 | max_brightness = SONY_MAX_BRIGHTNESS - 1; | ||
1532 | |||
1533 | } else | ||
1534 | return; | ||
1535 | |||
1536 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1537 | props.type = BACKLIGHT_PLATFORM; | ||
1538 | props.max_brightness = max_brightness; | ||
1539 | sony_backlight_device = backlight_device_register("sony", NULL, | ||
1540 | &backlight_ng_handle, | ||
1541 | ops, &props); | ||
1542 | |||
1543 | if (IS_ERR(sony_backlight_device)) { | ||
1544 | pr_warning(DRV_PFX "unable to register backlight device\n"); | ||
1545 | sony_backlight_device = NULL; | ||
1546 | } else | ||
1547 | sony_backlight_device->props.brightness = | ||
1548 | ops->get_brightness(sony_backlight_device); | ||
1549 | } | ||
1550 | |||
1551 | static void sony_nc_backlight_cleanup(void) | ||
1552 | { | ||
1553 | if (sony_backlight_device) | ||
1554 | backlight_device_unregister(sony_backlight_device); | ||
1555 | } | ||
1556 | |||
1248 | static int sony_nc_add(struct acpi_device *device) | 1557 | static int sony_nc_add(struct acpi_device *device) |
1249 | { | 1558 | { |
1250 | acpi_status status; | 1559 | acpi_status status; |
@@ -1252,8 +1561,8 @@ static int sony_nc_add(struct acpi_device *device) | |||
1252 | acpi_handle handle; | 1561 | acpi_handle handle; |
1253 | struct sony_nc_value *item; | 1562 | struct sony_nc_value *item; |
1254 | 1563 | ||
1255 | printk(KERN_INFO DRV_PFX "%s v%s.\n", | 1564 | pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME, |
1256 | SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); | 1565 | SONY_LAPTOP_DRIVER_VERSION); |
1257 | 1566 | ||
1258 | sony_nc_acpi_device = device; | 1567 | sony_nc_acpi_device = device; |
1259 | strcpy(acpi_device_class(device), "sony/hotkey"); | 1568 | strcpy(acpi_device_class(device), "sony/hotkey"); |
@@ -1269,13 +1578,18 @@ static int sony_nc_add(struct acpi_device *device) | |||
1269 | goto outwalk; | 1578 | goto outwalk; |
1270 | } | 1579 | } |
1271 | 1580 | ||
1581 | result = sony_pf_add(); | ||
1582 | if (result) | ||
1583 | goto outpresent; | ||
1584 | |||
1272 | if (debug) { | 1585 | if (debug) { |
1273 | status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle, | 1586 | status = acpi_walk_namespace(ACPI_TYPE_METHOD, |
1274 | 1, sony_walk_callback, NULL, NULL, NULL); | 1587 | sony_nc_acpi_handle, 1, sony_walk_callback, |
1588 | NULL, NULL, NULL); | ||
1275 | if (ACPI_FAILURE(status)) { | 1589 | if (ACPI_FAILURE(status)) { |
1276 | printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n"); | 1590 | pr_warn(DRV_PFX "unable to walk acpi resources\n"); |
1277 | result = -ENODEV; | 1591 | result = -ENODEV; |
1278 | goto outwalk; | 1592 | goto outpresent; |
1279 | } | 1593 | } |
1280 | } | 1594 | } |
1281 | 1595 | ||
@@ -1288,6 +1602,12 @@ static int sony_nc_add(struct acpi_device *device) | |||
1288 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", | 1602 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", |
1289 | &handle))) { | 1603 | &handle))) { |
1290 | dprintk("Doing SNC setup\n"); | 1604 | dprintk("Doing SNC setup\n"); |
1605 | result = sony_nc_handles_setup(sony_pf_device); | ||
1606 | if (result) | ||
1607 | goto outpresent; | ||
1608 | result = sony_nc_kbd_backlight_setup(sony_pf_device); | ||
1609 | if (result) | ||
1610 | goto outsnc; | ||
1291 | sony_nc_function_setup(device); | 1611 | sony_nc_function_setup(device); |
1292 | sony_nc_rfkill_setup(device); | 1612 | sony_nc_rfkill_setup(device); |
1293 | } | 1613 | } |
@@ -1295,40 +1615,17 @@ static int sony_nc_add(struct acpi_device *device) | |||
1295 | /* setup input devices and helper fifo */ | 1615 | /* setup input devices and helper fifo */ |
1296 | result = sony_laptop_setup_input(device); | 1616 | result = sony_laptop_setup_input(device); |
1297 | if (result) { | 1617 | if (result) { |
1298 | printk(KERN_ERR DRV_PFX | 1618 | pr_err(DRV_PFX "Unable to create input devices.\n"); |
1299 | "Unable to create input devices.\n"); | 1619 | goto outkbdbacklight; |
1300 | goto outwalk; | ||
1301 | } | 1620 | } |
1302 | 1621 | ||
1303 | if (acpi_video_backlight_support()) { | 1622 | if (acpi_video_backlight_support()) { |
1304 | printk(KERN_INFO DRV_PFX "brightness ignored, must be " | 1623 | pr_info(DRV_PFX "brightness ignored, must be " |
1305 | "controlled by ACPI video driver\n"); | 1624 | "controlled by ACPI video driver\n"); |
1306 | } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", | 1625 | } else { |
1307 | &handle))) { | 1626 | sony_nc_backlight_setup(); |
1308 | struct backlight_properties props; | ||
1309 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1310 | props.type = BACKLIGHT_PLATFORM; | ||
1311 | props.max_brightness = SONY_MAX_BRIGHTNESS - 1; | ||
1312 | sony_backlight_device = backlight_device_register("sony", NULL, | ||
1313 | NULL, | ||
1314 | &sony_backlight_ops, | ||
1315 | &props); | ||
1316 | |||
1317 | if (IS_ERR(sony_backlight_device)) { | ||
1318 | printk(KERN_WARNING DRV_PFX "unable to register backlight device\n"); | ||
1319 | sony_backlight_device = NULL; | ||
1320 | } else { | ||
1321 | sony_backlight_device->props.brightness = | ||
1322 | sony_backlight_get_brightness | ||
1323 | (sony_backlight_device); | ||
1324 | } | ||
1325 | |||
1326 | } | 1627 | } |
1327 | 1628 | ||
1328 | result = sony_pf_add(); | ||
1329 | if (result) | ||
1330 | goto outbacklight; | ||
1331 | |||
1332 | /* create sony_pf sysfs attributes related to the SNC device */ | 1629 | /* create sony_pf sysfs attributes related to the SNC device */ |
1333 | for (item = sony_nc_values; item->name; ++item) { | 1630 | for (item = sony_nc_values; item->name; ++item) { |
1334 | 1631 | ||
@@ -1374,14 +1671,19 @@ static int sony_nc_add(struct acpi_device *device) | |||
1374 | for (item = sony_nc_values; item->name; ++item) { | 1671 | for (item = sony_nc_values; item->name; ++item) { |
1375 | device_remove_file(&sony_pf_device->dev, &item->devattr); | 1672 | device_remove_file(&sony_pf_device->dev, &item->devattr); |
1376 | } | 1673 | } |
1377 | sony_pf_remove(); | 1674 | sony_nc_backlight_cleanup(); |
1378 | |||
1379 | outbacklight: | ||
1380 | if (sony_backlight_device) | ||
1381 | backlight_device_unregister(sony_backlight_device); | ||
1382 | 1675 | ||
1383 | sony_laptop_remove_input(); | 1676 | sony_laptop_remove_input(); |
1384 | 1677 | ||
1678 | outkbdbacklight: | ||
1679 | sony_nc_kbd_backlight_cleanup(sony_pf_device); | ||
1680 | |||
1681 | outsnc: | ||
1682 | sony_nc_handles_cleanup(sony_pf_device); | ||
1683 | |||
1684 | outpresent: | ||
1685 | sony_pf_remove(); | ||
1686 | |||
1385 | outwalk: | 1687 | outwalk: |
1386 | sony_nc_rfkill_cleanup(); | 1688 | sony_nc_rfkill_cleanup(); |
1387 | return result; | 1689 | return result; |
@@ -1391,8 +1693,7 @@ static int sony_nc_remove(struct acpi_device *device, int type) | |||
1391 | { | 1693 | { |
1392 | struct sony_nc_value *item; | 1694 | struct sony_nc_value *item; |
1393 | 1695 | ||
1394 | if (sony_backlight_device) | 1696 | sony_nc_backlight_cleanup(); |
1395 | backlight_device_unregister(sony_backlight_device); | ||
1396 | 1697 | ||
1397 | sony_nc_acpi_device = NULL; | 1698 | sony_nc_acpi_device = NULL; |
1398 | 1699 | ||
@@ -1400,6 +1701,8 @@ static int sony_nc_remove(struct acpi_device *device, int type) | |||
1400 | device_remove_file(&sony_pf_device->dev, &item->devattr); | 1701 | device_remove_file(&sony_pf_device->dev, &item->devattr); |
1401 | } | 1702 | } |
1402 | 1703 | ||
1704 | sony_nc_kbd_backlight_cleanup(sony_pf_device); | ||
1705 | sony_nc_handles_cleanup(sony_pf_device); | ||
1403 | sony_pf_remove(); | 1706 | sony_pf_remove(); |
1404 | sony_laptop_remove_input(); | 1707 | sony_laptop_remove_input(); |
1405 | sony_nc_rfkill_cleanup(); | 1708 | sony_nc_rfkill_cleanup(); |
@@ -1438,7 +1741,6 @@ static struct acpi_driver sony_nc_driver = { | |||
1438 | #define SONYPI_DEVICE_TYPE1 0x00000001 | 1741 | #define SONYPI_DEVICE_TYPE1 0x00000001 |
1439 | #define SONYPI_DEVICE_TYPE2 0x00000002 | 1742 | #define SONYPI_DEVICE_TYPE2 0x00000002 |
1440 | #define SONYPI_DEVICE_TYPE3 0x00000004 | 1743 | #define SONYPI_DEVICE_TYPE3 0x00000004 |
1441 | #define SONYPI_DEVICE_TYPE4 0x00000008 | ||
1442 | 1744 | ||
1443 | #define SONYPI_TYPE1_OFFSET 0x04 | 1745 | #define SONYPI_TYPE1_OFFSET 0x04 |
1444 | #define SONYPI_TYPE2_OFFSET 0x12 | 1746 | #define SONYPI_TYPE2_OFFSET 0x12 |
@@ -1584,8 +1886,8 @@ static struct sonypi_event sonypi_blueev[] = { | |||
1584 | 1886 | ||
1585 | /* The set of possible wireless events */ | 1887 | /* The set of possible wireless events */ |
1586 | static struct sonypi_event sonypi_wlessev[] = { | 1888 | static struct sonypi_event sonypi_wlessev[] = { |
1587 | { 0x59, SONYPI_EVENT_WIRELESS_ON }, | 1889 | { 0x59, SONYPI_EVENT_IGNORE }, |
1588 | { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, | 1890 | { 0x5a, SONYPI_EVENT_IGNORE }, |
1589 | { 0, 0 } | 1891 | { 0, 0 } |
1590 | }; | 1892 | }; |
1591 | 1893 | ||
@@ -1842,7 +2144,7 @@ out: | |||
1842 | if (pcidev) | 2144 | if (pcidev) |
1843 | pci_dev_put(pcidev); | 2145 | pci_dev_put(pcidev); |
1844 | 2146 | ||
1845 | printk(KERN_INFO DRV_PFX "detected Type%d model\n", | 2147 | pr_info(DRV_PFX "detected Type%d model\n", |
1846 | dev->model == SONYPI_DEVICE_TYPE1 ? 1 : | 2148 | dev->model == SONYPI_DEVICE_TYPE1 ? 1 : |
1847 | dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); | 2149 | dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); |
1848 | } | 2150 | } |
@@ -1890,7 +2192,7 @@ static int __sony_pic_camera_ready(void) | |||
1890 | static int __sony_pic_camera_off(void) | 2192 | static int __sony_pic_camera_off(void) |
1891 | { | 2193 | { |
1892 | if (!camera) { | 2194 | if (!camera) { |
1893 | printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); | 2195 | pr_warn(DRV_PFX "camera control not enabled\n"); |
1894 | return -ENODEV; | 2196 | return -ENODEV; |
1895 | } | 2197 | } |
1896 | 2198 | ||
@@ -1910,7 +2212,7 @@ static int __sony_pic_camera_on(void) | |||
1910 | int i, j, x; | 2212 | int i, j, x; |
1911 | 2213 | ||
1912 | if (!camera) { | 2214 | if (!camera) { |
1913 | printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); | 2215 | pr_warn(DRV_PFX "camera control not enabled\n"); |
1914 | return -ENODEV; | 2216 | return -ENODEV; |
1915 | } | 2217 | } |
1916 | 2218 | ||
@@ -1933,7 +2235,7 @@ static int __sony_pic_camera_on(void) | |||
1933 | } | 2235 | } |
1934 | 2236 | ||
1935 | if (j == 0) { | 2237 | if (j == 0) { |
1936 | printk(KERN_WARNING DRV_PFX "failed to power on camera\n"); | 2238 | pr_warn(DRV_PFX "failed to power on camera\n"); |
1937 | return -ENODEV; | 2239 | return -ENODEV; |
1938 | } | 2240 | } |
1939 | 2241 | ||
@@ -1989,7 +2291,7 @@ int sony_pic_camera_command(int command, u8 value) | |||
1989 | ITERATIONS_SHORT); | 2291 | ITERATIONS_SHORT); |
1990 | break; | 2292 | break; |
1991 | default: | 2293 | default: |
1992 | printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n", | 2294 | pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n", |
1993 | command); | 2295 | command); |
1994 | break; | 2296 | break; |
1995 | } | 2297 | } |
@@ -2396,7 +2698,7 @@ static int sonypi_compat_init(void) | |||
2396 | error = | 2698 | error = |
2397 | kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); | 2699 | kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); |
2398 | if (error) { | 2700 | if (error) { |
2399 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); | 2701 | pr_err(DRV_PFX "kfifo_alloc failed\n"); |
2400 | return error; | 2702 | return error; |
2401 | } | 2703 | } |
2402 | 2704 | ||
@@ -2406,11 +2708,11 @@ static int sonypi_compat_init(void) | |||
2406 | sonypi_misc_device.minor = minor; | 2708 | sonypi_misc_device.minor = minor; |
2407 | error = misc_register(&sonypi_misc_device); | 2709 | error = misc_register(&sonypi_misc_device); |
2408 | if (error) { | 2710 | if (error) { |
2409 | printk(KERN_ERR DRV_PFX "misc_register failed\n"); | 2711 | pr_err(DRV_PFX "misc_register failed\n"); |
2410 | goto err_free_kfifo; | 2712 | goto err_free_kfifo; |
2411 | } | 2713 | } |
2412 | if (minor == -1) | 2714 | if (minor == -1) |
2413 | printk(KERN_INFO DRV_PFX "device allocated minor is %d\n", | 2715 | pr_info(DRV_PFX "device allocated minor is %d\n", |
2414 | sonypi_misc_device.minor); | 2716 | sonypi_misc_device.minor); |
2415 | 2717 | ||
2416 | return 0; | 2718 | return 0; |
@@ -2470,8 +2772,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) | |||
2470 | } | 2772 | } |
2471 | for (i = 0; i < p->interrupt_count; i++) { | 2773 | for (i = 0; i < p->interrupt_count; i++) { |
2472 | if (!p->interrupts[i]) { | 2774 | if (!p->interrupts[i]) { |
2473 | printk(KERN_WARNING DRV_PFX | 2775 | pr_warn(DRV_PFX "Invalid IRQ %d\n", |
2474 | "Invalid IRQ %d\n", | ||
2475 | p->interrupts[i]); | 2776 | p->interrupts[i]); |
2476 | continue; | 2777 | continue; |
2477 | } | 2778 | } |
@@ -2510,7 +2811,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) | |||
2510 | ioport->io2.address_length); | 2811 | ioport->io2.address_length); |
2511 | } | 2812 | } |
2512 | else { | 2813 | else { |
2513 | printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); | 2814 | pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); |
2514 | return AE_ERROR; | 2815 | return AE_ERROR; |
2515 | } | 2816 | } |
2516 | return AE_OK; | 2817 | return AE_OK; |
@@ -2538,7 +2839,7 @@ static int sony_pic_possible_resources(struct acpi_device *device) | |||
2538 | dprintk("Evaluating _STA\n"); | 2839 | dprintk("Evaluating _STA\n"); |
2539 | result = acpi_bus_get_status(device); | 2840 | result = acpi_bus_get_status(device); |
2540 | if (result) { | 2841 | if (result) { |
2541 | printk(KERN_WARNING DRV_PFX "Unable to read status\n"); | 2842 | pr_warn(DRV_PFX "Unable to read status\n"); |
2542 | goto end; | 2843 | goto end; |
2543 | } | 2844 | } |
2544 | 2845 | ||
@@ -2554,8 +2855,7 @@ static int sony_pic_possible_resources(struct acpi_device *device) | |||
2554 | status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, | 2855 | status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, |
2555 | sony_pic_read_possible_resource, &spic_dev); | 2856 | sony_pic_read_possible_resource, &spic_dev); |
2556 | if (ACPI_FAILURE(status)) { | 2857 | if (ACPI_FAILURE(status)) { |
2557 | printk(KERN_WARNING DRV_PFX | 2858 | pr_warn(DRV_PFX "Failure evaluating %s\n", |
2558 | "Failure evaluating %s\n", | ||
2559 | METHOD_NAME__PRS); | 2859 | METHOD_NAME__PRS); |
2560 | result = -ENODEV; | 2860 | result = -ENODEV; |
2561 | } | 2861 | } |
@@ -2669,7 +2969,7 @@ static int sony_pic_enable(struct acpi_device *device, | |||
2669 | 2969 | ||
2670 | /* check for total failure */ | 2970 | /* check for total failure */ |
2671 | if (ACPI_FAILURE(status)) { | 2971 | if (ACPI_FAILURE(status)) { |
2672 | printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n"); | 2972 | pr_err(DRV_PFX "Error evaluating _SRS\n"); |
2673 | result = -ENODEV; | 2973 | result = -ENODEV; |
2674 | goto end; | 2974 | goto end; |
2675 | } | 2975 | } |
@@ -2725,6 +3025,9 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) | |||
2725 | if (ev == dev->event_types[i].events[j].data) { | 3025 | if (ev == dev->event_types[i].events[j].data) { |
2726 | device_event = | 3026 | device_event = |
2727 | dev->event_types[i].events[j].event; | 3027 | dev->event_types[i].events[j].event; |
3028 | /* some events may require ignoring */ | ||
3029 | if (!device_event) | ||
3030 | return IRQ_HANDLED; | ||
2728 | goto found; | 3031 | goto found; |
2729 | } | 3032 | } |
2730 | } | 3033 | } |
@@ -2744,7 +3047,6 @@ found: | |||
2744 | sony_laptop_report_input_event(device_event); | 3047 | sony_laptop_report_input_event(device_event); |
2745 | acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); | 3048 | acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); |
2746 | sonypi_compat_report_event(device_event); | 3049 | sonypi_compat_report_event(device_event); |
2747 | |||
2748 | return IRQ_HANDLED; | 3050 | return IRQ_HANDLED; |
2749 | } | 3051 | } |
2750 | 3052 | ||
@@ -2759,7 +3061,7 @@ static int sony_pic_remove(struct acpi_device *device, int type) | |||
2759 | struct sony_pic_irq *irq, *tmp_irq; | 3061 | struct sony_pic_irq *irq, *tmp_irq; |
2760 | 3062 | ||
2761 | if (sony_pic_disable(device)) { | 3063 | if (sony_pic_disable(device)) { |
2762 | printk(KERN_ERR DRV_PFX "Couldn't disable device.\n"); | 3064 | pr_err(DRV_PFX "Couldn't disable device.\n"); |
2763 | return -ENXIO; | 3065 | return -ENXIO; |
2764 | } | 3066 | } |
2765 | 3067 | ||
@@ -2799,8 +3101,8 @@ static int sony_pic_add(struct acpi_device *device) | |||
2799 | struct sony_pic_ioport *io, *tmp_io; | 3101 | struct sony_pic_ioport *io, *tmp_io; |
2800 | struct sony_pic_irq *irq, *tmp_irq; | 3102 | struct sony_pic_irq *irq, *tmp_irq; |
2801 | 3103 | ||
2802 | printk(KERN_INFO DRV_PFX "%s v%s.\n", | 3104 | pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME, |
2803 | SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); | 3105 | SONY_LAPTOP_DRIVER_VERSION); |
2804 | 3106 | ||
2805 | spic_dev.acpi_dev = device; | 3107 | spic_dev.acpi_dev = device; |
2806 | strcpy(acpi_device_class(device), "sony/hotkey"); | 3108 | strcpy(acpi_device_class(device), "sony/hotkey"); |
@@ -2810,16 +3112,14 @@ static int sony_pic_add(struct acpi_device *device) | |||
2810 | /* read _PRS resources */ | 3112 | /* read _PRS resources */ |
2811 | result = sony_pic_possible_resources(device); | 3113 | result = sony_pic_possible_resources(device); |
2812 | if (result) { | 3114 | if (result) { |
2813 | printk(KERN_ERR DRV_PFX | 3115 | pr_err(DRV_PFX "Unable to read possible resources.\n"); |
2814 | "Unable to read possible resources.\n"); | ||
2815 | goto err_free_resources; | 3116 | goto err_free_resources; |
2816 | } | 3117 | } |
2817 | 3118 | ||
2818 | /* setup input devices and helper fifo */ | 3119 | /* setup input devices and helper fifo */ |
2819 | result = sony_laptop_setup_input(device); | 3120 | result = sony_laptop_setup_input(device); |
2820 | if (result) { | 3121 | if (result) { |
2821 | printk(KERN_ERR DRV_PFX | 3122 | pr_err(DRV_PFX "Unable to create input devices.\n"); |
2822 | "Unable to create input devices.\n"); | ||
2823 | goto err_free_resources; | 3123 | goto err_free_resources; |
2824 | } | 3124 | } |
2825 | 3125 | ||
@@ -2829,7 +3129,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2829 | /* request io port */ | 3129 | /* request io port */ |
2830 | list_for_each_entry_reverse(io, &spic_dev.ioports, list) { | 3130 | list_for_each_entry_reverse(io, &spic_dev.ioports, list) { |
2831 | if (request_region(io->io1.minimum, io->io1.address_length, | 3131 | if (request_region(io->io1.minimum, io->io1.address_length, |
2832 | "Sony Programable I/O Device")) { | 3132 | "Sony Programmable I/O Device")) { |
2833 | dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", | 3133 | dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", |
2834 | io->io1.minimum, io->io1.maximum, | 3134 | io->io1.minimum, io->io1.maximum, |
2835 | io->io1.address_length); | 3135 | io->io1.address_length); |
@@ -2837,7 +3137,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2837 | if (io->io2.minimum) { | 3137 | if (io->io2.minimum) { |
2838 | if (request_region(io->io2.minimum, | 3138 | if (request_region(io->io2.minimum, |
2839 | io->io2.address_length, | 3139 | io->io2.address_length, |
2840 | "Sony Programable I/O Device")) { | 3140 | "Sony Programmable I/O Device")) { |
2841 | dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", | 3141 | dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", |
2842 | io->io2.minimum, io->io2.maximum, | 3142 | io->io2.minimum, io->io2.maximum, |
2843 | io->io2.address_length); | 3143 | io->io2.address_length); |
@@ -2860,7 +3160,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2860 | } | 3160 | } |
2861 | } | 3161 | } |
2862 | if (!spic_dev.cur_ioport) { | 3162 | if (!spic_dev.cur_ioport) { |
2863 | printk(KERN_ERR DRV_PFX "Failed to request_region.\n"); | 3163 | pr_err(DRV_PFX "Failed to request_region.\n"); |
2864 | result = -ENODEV; | 3164 | result = -ENODEV; |
2865 | goto err_remove_compat; | 3165 | goto err_remove_compat; |
2866 | } | 3166 | } |
@@ -2880,7 +3180,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2880 | } | 3180 | } |
2881 | } | 3181 | } |
2882 | if (!spic_dev.cur_irq) { | 3182 | if (!spic_dev.cur_irq) { |
2883 | printk(KERN_ERR DRV_PFX "Failed to request_irq.\n"); | 3183 | pr_err(DRV_PFX "Failed to request_irq.\n"); |
2884 | result = -ENODEV; | 3184 | result = -ENODEV; |
2885 | goto err_release_region; | 3185 | goto err_release_region; |
2886 | } | 3186 | } |
@@ -2888,7 +3188,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2888 | /* set resource status _SRS */ | 3188 | /* set resource status _SRS */ |
2889 | result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); | 3189 | result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); |
2890 | if (result) { | 3190 | if (result) { |
2891 | printk(KERN_ERR DRV_PFX "Couldn't enable device.\n"); | 3191 | pr_err(DRV_PFX "Couldn't enable device.\n"); |
2892 | goto err_free_irq; | 3192 | goto err_free_irq; |
2893 | } | 3193 | } |
2894 | 3194 | ||
@@ -2997,8 +3297,7 @@ static int __init sony_laptop_init(void) | |||
2997 | if (!no_spic && dmi_check_system(sonypi_dmi_table)) { | 3297 | if (!no_spic && dmi_check_system(sonypi_dmi_table)) { |
2998 | result = acpi_bus_register_driver(&sony_pic_driver); | 3298 | result = acpi_bus_register_driver(&sony_pic_driver); |
2999 | if (result) { | 3299 | if (result) { |
3000 | printk(KERN_ERR DRV_PFX | 3300 | pr_err(DRV_PFX "Unable to register SPIC driver."); |
3001 | "Unable to register SPIC driver."); | ||
3002 | goto out; | 3301 | goto out; |
3003 | } | 3302 | } |
3004 | spic_drv_registered = 1; | 3303 | spic_drv_registered = 1; |
@@ -3006,7 +3305,7 @@ static int __init sony_laptop_init(void) | |||
3006 | 3305 | ||
3007 | result = acpi_bus_register_driver(&sony_nc_driver); | 3306 | result = acpi_bus_register_driver(&sony_nc_driver); |
3008 | if (result) { | 3307 | if (result) { |
3009 | printk(KERN_ERR DRV_PFX "Unable to register SNC driver."); | 3308 | pr_err(DRV_PFX "Unable to register SNC driver."); |
3010 | goto out_unregister_pic; | 3309 | goto out_unregister_pic; |
3011 | } | 3310 | } |
3012 | 3311 | ||
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 947bdcaa0ce9..a08561f5349e 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -2407,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, | |||
2407 | * This code is supposed to duplicate the IBM firmware behaviour: | 2407 | * This code is supposed to duplicate the IBM firmware behaviour: |
2408 | * - Pressing MUTE issues mute hotkey message, even when already mute | 2408 | * - Pressing MUTE issues mute hotkey message, even when already mute |
2409 | * - Pressing Volume up/down issues volume up/down hotkey messages, | 2409 | * - Pressing Volume up/down issues volume up/down hotkey messages, |
2410 | * even when already at maximum or minumum volume | 2410 | * even when already at maximum or minimum volume |
2411 | * - The act of unmuting issues volume up/down notification, | 2411 | * - The act of unmuting issues volume up/down notification, |
2412 | * depending which key was used to unmute | 2412 | * depending which key was used to unmute |
2413 | * | 2413 | * |
@@ -2990,7 +2990,7 @@ static void tpacpi_send_radiosw_update(void) | |||
2990 | * rfkill input events, or we will race the rfkill core input | 2990 | * rfkill input events, or we will race the rfkill core input |
2991 | * handler. | 2991 | * handler. |
2992 | * | 2992 | * |
2993 | * tpacpi_inputdev_send_mutex works as a syncronization point | 2993 | * tpacpi_inputdev_send_mutex works as a synchronization point |
2994 | * for the above. | 2994 | * for the above. |
2995 | * | 2995 | * |
2996 | * We optimize to avoid numerous calls to hotkey_get_wlsw. | 2996 | * We optimize to avoid numerous calls to hotkey_get_wlsw. |
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c new file mode 100644 index 000000000000..c1372ed9d2e9 --- /dev/null +++ b/drivers/platform/x86/xo15-ebook.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * OLPC XO-1.5 ebook switch driver | ||
3 | * (based on generic ACPI button driver) | ||
4 | * | ||
5 | * Copyright (C) 2009 Paul Fox <pgf@laptop.org> | ||
6 | * Copyright (C) 2010 One Laptop per Child | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or (at | ||
11 | * your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/input.h> | ||
19 | #include <acpi/acpi_bus.h> | ||
20 | #include <acpi/acpi_drivers.h> | ||
21 | |||
22 | #define MODULE_NAME "xo15-ebook" | ||
23 | #define PREFIX MODULE_NAME ": " | ||
24 | |||
25 | #define XO15_EBOOK_CLASS MODULE_NAME | ||
26 | #define XO15_EBOOK_TYPE_UNKNOWN 0x00 | ||
27 | #define XO15_EBOOK_NOTIFY_STATUS 0x80 | ||
28 | |||
29 | #define XO15_EBOOK_SUBCLASS "ebook" | ||
30 | #define XO15_EBOOK_HID "XO15EBK" | ||
31 | #define XO15_EBOOK_DEVICE_NAME "EBook Switch" | ||
32 | |||
33 | ACPI_MODULE_NAME(MODULE_NAME); | ||
34 | |||
35 | MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | |||
38 | static const struct acpi_device_id ebook_device_ids[] = { | ||
39 | { XO15_EBOOK_HID, 0 }, | ||
40 | { "", 0 }, | ||
41 | }; | ||
42 | MODULE_DEVICE_TABLE(acpi, ebook_device_ids); | ||
43 | |||
44 | struct ebook_switch { | ||
45 | struct input_dev *input; | ||
46 | char phys[32]; /* for input device */ | ||
47 | }; | ||
48 | |||
49 | static int ebook_send_state(struct acpi_device *device) | ||
50 | { | ||
51 | struct ebook_switch *button = acpi_driver_data(device); | ||
52 | unsigned long long state; | ||
53 | acpi_status status; | ||
54 | |||
55 | status = acpi_evaluate_integer(device->handle, "EBK", NULL, &state); | ||
56 | if (ACPI_FAILURE(status)) | ||
57 | return -EIO; | ||
58 | |||
59 | /* input layer checks if event is redundant */ | ||
60 | input_report_switch(button->input, SW_TABLET_MODE, !state); | ||
61 | input_sync(button->input); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static void ebook_switch_notify(struct acpi_device *device, u32 event) | ||
66 | { | ||
67 | switch (event) { | ||
68 | case ACPI_FIXED_HARDWARE_EVENT: | ||
69 | case XO15_EBOOK_NOTIFY_STATUS: | ||
70 | ebook_send_state(device); | ||
71 | break; | ||
72 | default: | ||
73 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
74 | "Unsupported event [0x%x]\n", event)); | ||
75 | break; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | static int ebook_switch_resume(struct acpi_device *device) | ||
80 | { | ||
81 | return ebook_send_state(device); | ||
82 | } | ||
83 | |||
84 | static int ebook_switch_add(struct acpi_device *device) | ||
85 | { | ||
86 | struct ebook_switch *button; | ||
87 | struct input_dev *input; | ||
88 | const char *hid = acpi_device_hid(device); | ||
89 | char *name, *class; | ||
90 | int error; | ||
91 | |||
92 | button = kzalloc(sizeof(struct ebook_switch), GFP_KERNEL); | ||
93 | if (!button) | ||
94 | return -ENOMEM; | ||
95 | |||
96 | device->driver_data = button; | ||
97 | |||
98 | button->input = input = input_allocate_device(); | ||
99 | if (!input) { | ||
100 | error = -ENOMEM; | ||
101 | goto err_free_button; | ||
102 | } | ||
103 | |||
104 | name = acpi_device_name(device); | ||
105 | class = acpi_device_class(device); | ||
106 | |||
107 | if (strcmp(hid, XO15_EBOOK_HID)) { | ||
108 | printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); | ||
109 | error = -ENODEV; | ||
110 | goto err_free_input; | ||
111 | } | ||
112 | |||
113 | strcpy(name, XO15_EBOOK_DEVICE_NAME); | ||
114 | sprintf(class, "%s/%s", XO15_EBOOK_CLASS, XO15_EBOOK_SUBCLASS); | ||
115 | |||
116 | snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); | ||
117 | |||
118 | input->name = name; | ||
119 | input->phys = button->phys; | ||
120 | input->id.bustype = BUS_HOST; | ||
121 | input->dev.parent = &device->dev; | ||
122 | |||
123 | input->evbit[0] = BIT_MASK(EV_SW); | ||
124 | set_bit(SW_TABLET_MODE, input->swbit); | ||
125 | |||
126 | error = input_register_device(input); | ||
127 | if (error) | ||
128 | goto err_free_input; | ||
129 | |||
130 | ebook_send_state(device); | ||
131 | |||
132 | if (device->wakeup.flags.valid) { | ||
133 | /* Button's GPE is run-wake GPE */ | ||
134 | acpi_enable_gpe(device->wakeup.gpe_device, | ||
135 | device->wakeup.gpe_number); | ||
136 | device_set_wakeup_enable(&device->dev, true); | ||
137 | } | ||
138 | |||
139 | return 0; | ||
140 | |||
141 | err_free_input: | ||
142 | input_free_device(input); | ||
143 | err_free_button: | ||
144 | kfree(button); | ||
145 | return error; | ||
146 | } | ||
147 | |||
148 | static int ebook_switch_remove(struct acpi_device *device, int type) | ||
149 | { | ||
150 | struct ebook_switch *button = acpi_driver_data(device); | ||
151 | |||
152 | input_unregister_device(button->input); | ||
153 | kfree(button); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static struct acpi_driver xo15_ebook_driver = { | ||
158 | .name = MODULE_NAME, | ||
159 | .class = XO15_EBOOK_CLASS, | ||
160 | .ids = ebook_device_ids, | ||
161 | .ops = { | ||
162 | .add = ebook_switch_add, | ||
163 | .resume = ebook_switch_resume, | ||
164 | .remove = ebook_switch_remove, | ||
165 | .notify = ebook_switch_notify, | ||
166 | }, | ||
167 | }; | ||
168 | |||
169 | static int __init xo15_ebook_init(void) | ||
170 | { | ||
171 | return acpi_bus_register_driver(&xo15_ebook_driver); | ||
172 | } | ||
173 | |||
174 | static void __exit xo15_ebook_exit(void) | ||
175 | { | ||
176 | acpi_bus_unregister_driver(&xo15_ebook_driver); | ||
177 | } | ||
178 | |||
179 | module_init(xo15_ebook_init); | ||
180 | module_exit(xo15_ebook_exit); | ||
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index 2a9ab89f83b8..e5ced3a4c1ed 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c | |||
@@ -215,8 +215,8 @@ static int __devinit z2_batt_probe(struct i2c_client *client, | |||
215 | if (ret) | 215 | if (ret) |
216 | goto err2; | 216 | goto err2; |
217 | 217 | ||
218 | set_irq_type(gpio_to_irq(info->charge_gpio), | 218 | irq_set_irq_type(gpio_to_irq(info->charge_gpio), |
219 | IRQ_TYPE_EDGE_BOTH); | 219 | IRQ_TYPE_EDGE_BOTH); |
220 | ret = request_irq(gpio_to_irq(info->charge_gpio), | 220 | ret = request_irq(gpio_to_irq(info->charge_gpio), |
221 | z2_charge_switch_irq, IRQF_DISABLED, | 221 | z2_charge_switch_irq, IRQF_DISABLED, |
222 | "AC Detect", charger); | 222 | "AC Detect", charger); |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index de75f67f4cc3..b9f29e0d4295 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
@@ -126,7 +126,7 @@ config REGULATOR_MAX8998 | |||
126 | and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages. | 126 | and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages. |
127 | 127 | ||
128 | config REGULATOR_TWL4030 | 128 | config REGULATOR_TWL4030 |
129 | bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC" | 129 | bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC" |
130 | depends on TWL4030_CORE | 130 | depends on TWL4030_CORE |
131 | help | 131 | help |
132 | This driver supports the voltage regulators provided by | 132 | This driver supports the voltage regulators provided by |
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 2dec589a8908..b1d77946e9c6 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c | |||
@@ -206,29 +206,6 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) | |||
206 | return err; | 206 | return err; |
207 | } | 207 | } |
208 | 208 | ||
209 | /* Per-regulator power on delay from spec */ | ||
210 | switch (abreg->regreg) { | ||
211 | case AB3100_LDO_A: /* Fallthrough */ | ||
212 | case AB3100_LDO_C: /* Fallthrough */ | ||
213 | case AB3100_LDO_D: /* Fallthrough */ | ||
214 | case AB3100_LDO_E: /* Fallthrough */ | ||
215 | case AB3100_LDO_H: /* Fallthrough */ | ||
216 | case AB3100_LDO_K: | ||
217 | udelay(200); | ||
218 | break; | ||
219 | case AB3100_LDO_F: | ||
220 | udelay(600); | ||
221 | break; | ||
222 | case AB3100_LDO_G: | ||
223 | udelay(400); | ||
224 | break; | ||
225 | case AB3100_BUCK: | ||
226 | mdelay(1); | ||
227 | break; | ||
228 | default: | ||
229 | break; | ||
230 | } | ||
231 | |||
232 | return 0; | 209 | return 0; |
233 | } | 210 | } |
234 | 211 | ||
@@ -450,11 +427,37 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg) | |||
450 | return abreg->plfdata->external_voltage; | 427 | return abreg->plfdata->external_voltage; |
451 | } | 428 | } |
452 | 429 | ||
430 | static int ab3100_enable_time_regulator(struct regulator_dev *reg) | ||
431 | { | ||
432 | struct ab3100_regulator *abreg = reg->reg_data; | ||
433 | |||
434 | /* Per-regulator power on delay from spec */ | ||
435 | switch (abreg->regreg) { | ||
436 | case AB3100_LDO_A: /* Fallthrough */ | ||
437 | case AB3100_LDO_C: /* Fallthrough */ | ||
438 | case AB3100_LDO_D: /* Fallthrough */ | ||
439 | case AB3100_LDO_E: /* Fallthrough */ | ||
440 | case AB3100_LDO_H: /* Fallthrough */ | ||
441 | case AB3100_LDO_K: | ||
442 | return 200; | ||
443 | case AB3100_LDO_F: | ||
444 | return 600; | ||
445 | case AB3100_LDO_G: | ||
446 | return 400; | ||
447 | case AB3100_BUCK: | ||
448 | return 1000; | ||
449 | default: | ||
450 | break; | ||
451 | } | ||
452 | return 0; | ||
453 | } | ||
454 | |||
453 | static struct regulator_ops regulator_ops_fixed = { | 455 | static struct regulator_ops regulator_ops_fixed = { |
454 | .enable = ab3100_enable_regulator, | 456 | .enable = ab3100_enable_regulator, |
455 | .disable = ab3100_disable_regulator, | 457 | .disable = ab3100_disable_regulator, |
456 | .is_enabled = ab3100_is_enabled_regulator, | 458 | .is_enabled = ab3100_is_enabled_regulator, |
457 | .get_voltage = ab3100_get_voltage_regulator, | 459 | .get_voltage = ab3100_get_voltage_regulator, |
460 | .enable_time = ab3100_enable_time_regulator, | ||
458 | }; | 461 | }; |
459 | 462 | ||
460 | static struct regulator_ops regulator_ops_variable = { | 463 | static struct regulator_ops regulator_ops_variable = { |
@@ -464,6 +467,7 @@ static struct regulator_ops regulator_ops_variable = { | |||
464 | .get_voltage = ab3100_get_voltage_regulator, | 467 | .get_voltage = ab3100_get_voltage_regulator, |
465 | .set_voltage = ab3100_set_voltage_regulator, | 468 | .set_voltage = ab3100_set_voltage_regulator, |
466 | .list_voltage = ab3100_list_voltage_regulator, | 469 | .list_voltage = ab3100_list_voltage_regulator, |
470 | .enable_time = ab3100_enable_time_regulator, | ||
467 | }; | 471 | }; |
468 | 472 | ||
469 | static struct regulator_ops regulator_ops_variable_sleepable = { | 473 | static struct regulator_ops regulator_ops_variable_sleepable = { |
@@ -474,6 +478,7 @@ static struct regulator_ops regulator_ops_variable_sleepable = { | |||
474 | .set_voltage = ab3100_set_voltage_regulator, | 478 | .set_voltage = ab3100_set_voltage_regulator, |
475 | .set_suspend_voltage = ab3100_set_suspend_voltage_regulator, | 479 | .set_suspend_voltage = ab3100_set_suspend_voltage_regulator, |
476 | .list_voltage = ab3100_list_voltage_regulator, | 480 | .list_voltage = ab3100_list_voltage_regulator, |
481 | .enable_time = ab3100_enable_time_regulator, | ||
477 | }; | 482 | }; |
478 | 483 | ||
479 | /* | 484 | /* |
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index d9a052c53aec..02f3c2333c83 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * AB8500 peripheral regulators | 9 | * AB8500 peripheral regulators |
10 | * | 10 | * |
11 | * AB8500 supports the following regulators: | 11 | * AB8500 supports the following regulators: |
12 | * VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA | 12 | * VAUX1/2/3, VINTCORE, VTVOUT, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA |
13 | */ | 13 | */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -38,6 +38,7 @@ | |||
38 | * @voltage_mask: mask to control regulator voltage | 38 | * @voltage_mask: mask to control regulator voltage |
39 | * @voltages: supported voltage table | 39 | * @voltages: supported voltage table |
40 | * @voltages_len: number of supported voltages for the regulator | 40 | * @voltages_len: number of supported voltages for the regulator |
41 | * @delay: startup/set voltage delay in us | ||
41 | */ | 42 | */ |
42 | struct ab8500_regulator_info { | 43 | struct ab8500_regulator_info { |
43 | struct device *dev; | 44 | struct device *dev; |
@@ -55,6 +56,7 @@ struct ab8500_regulator_info { | |||
55 | u8 voltage_mask; | 56 | u8 voltage_mask; |
56 | int const *voltages; | 57 | int const *voltages; |
57 | int voltages_len; | 58 | int voltages_len; |
59 | unsigned int delay; | ||
58 | }; | 60 | }; |
59 | 61 | ||
60 | /* voltage tables for the vauxn/vintcore supplies */ | 62 | /* voltage tables for the vauxn/vintcore supplies */ |
@@ -290,6 +292,29 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev, | |||
290 | return ret; | 292 | return ret; |
291 | } | 293 | } |
292 | 294 | ||
295 | static int ab8500_regulator_enable_time(struct regulator_dev *rdev) | ||
296 | { | ||
297 | struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); | ||
298 | |||
299 | return info->delay; | ||
300 | } | ||
301 | |||
302 | static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev, | ||
303 | unsigned int old_sel, | ||
304 | unsigned int new_sel) | ||
305 | { | ||
306 | struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); | ||
307 | int ret; | ||
308 | |||
309 | /* If the regulator isn't on, it won't take time here */ | ||
310 | ret = ab8500_regulator_is_enabled(rdev); | ||
311 | if (ret < 0) | ||
312 | return ret; | ||
313 | if (!ret) | ||
314 | return 0; | ||
315 | return info->delay; | ||
316 | } | ||
317 | |||
293 | static struct regulator_ops ab8500_regulator_ops = { | 318 | static struct regulator_ops ab8500_regulator_ops = { |
294 | .enable = ab8500_regulator_enable, | 319 | .enable = ab8500_regulator_enable, |
295 | .disable = ab8500_regulator_disable, | 320 | .disable = ab8500_regulator_disable, |
@@ -297,6 +322,8 @@ static struct regulator_ops ab8500_regulator_ops = { | |||
297 | .get_voltage = ab8500_regulator_get_voltage, | 322 | .get_voltage = ab8500_regulator_get_voltage, |
298 | .set_voltage = ab8500_regulator_set_voltage, | 323 | .set_voltage = ab8500_regulator_set_voltage, |
299 | .list_voltage = ab8500_list_voltage, | 324 | .list_voltage = ab8500_list_voltage, |
325 | .enable_time = ab8500_regulator_enable_time, | ||
326 | .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, | ||
300 | }; | 327 | }; |
301 | 328 | ||
302 | static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) | 329 | static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) |
@@ -317,6 +344,8 @@ static struct regulator_ops ab8500_regulator_fixed_ops = { | |||
317 | .is_enabled = ab8500_regulator_is_enabled, | 344 | .is_enabled = ab8500_regulator_is_enabled, |
318 | .get_voltage = ab8500_fixed_get_voltage, | 345 | .get_voltage = ab8500_fixed_get_voltage, |
319 | .list_voltage = ab8500_list_voltage, | 346 | .list_voltage = ab8500_list_voltage, |
347 | .enable_time = ab8500_regulator_enable_time, | ||
348 | .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, | ||
320 | }; | 349 | }; |
321 | 350 | ||
322 | static struct ab8500_regulator_info | 351 | static struct ab8500_regulator_info |
@@ -426,12 +455,28 @@ static struct ab8500_regulator_info | |||
426 | .owner = THIS_MODULE, | 455 | .owner = THIS_MODULE, |
427 | .n_voltages = 1, | 456 | .n_voltages = 1, |
428 | }, | 457 | }, |
458 | .delay = 10000, | ||
429 | .fixed_uV = 2000000, | 459 | .fixed_uV = 2000000, |
430 | .update_bank = 0x03, | 460 | .update_bank = 0x03, |
431 | .update_reg = 0x80, | 461 | .update_reg = 0x80, |
432 | .update_mask = 0x82, | 462 | .update_mask = 0x82, |
433 | .update_val_enable = 0x02, | 463 | .update_val_enable = 0x02, |
434 | }, | 464 | }, |
465 | [AB8500_LDO_USB] = { | ||
466 | .desc = { | ||
467 | .name = "LDO-USB", | ||
468 | .ops = &ab8500_regulator_fixed_ops, | ||
469 | .type = REGULATOR_VOLTAGE, | ||
470 | .id = AB8500_LDO_USB, | ||
471 | .owner = THIS_MODULE, | ||
472 | .n_voltages = 1, | ||
473 | }, | ||
474 | .fixed_uV = 3300000, | ||
475 | .update_bank = 0x03, | ||
476 | .update_reg = 0x82, | ||
477 | .update_mask = 0x03, | ||
478 | .update_val_enable = 0x01, | ||
479 | }, | ||
435 | [AB8500_LDO_AUDIO] = { | 480 | [AB8500_LDO_AUDIO] = { |
436 | .desc = { | 481 | .desc = { |
437 | .name = "LDO-AUDIO", | 482 | .name = "LDO-AUDIO", |
@@ -511,6 +556,186 @@ static struct ab8500_regulator_info | |||
511 | 556 | ||
512 | }; | 557 | }; |
513 | 558 | ||
559 | struct ab8500_reg_init { | ||
560 | u8 bank; | ||
561 | u8 addr; | ||
562 | u8 mask; | ||
563 | }; | ||
564 | |||
565 | #define REG_INIT(_id, _bank, _addr, _mask) \ | ||
566 | [_id] = { \ | ||
567 | .bank = _bank, \ | ||
568 | .addr = _addr, \ | ||
569 | .mask = _mask, \ | ||
570 | } | ||
571 | |||
572 | static struct ab8500_reg_init ab8500_reg_init[] = { | ||
573 | /* | ||
574 | * 0x30, VanaRequestCtrl | ||
575 | * 0x0C, VpllRequestCtrl | ||
576 | * 0xc0, VextSupply1RequestCtrl | ||
577 | */ | ||
578 | REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc), | ||
579 | /* | ||
580 | * 0x03, VextSupply2RequestCtrl | ||
581 | * 0x0c, VextSupply3RequestCtrl | ||
582 | * 0x30, Vaux1RequestCtrl | ||
583 | * 0xc0, Vaux2RequestCtrl | ||
584 | */ | ||
585 | REG_INIT(AB8500_REGUREQUESTCTRL3, 0x03, 0x05, 0xff), | ||
586 | /* | ||
587 | * 0x03, Vaux3RequestCtrl | ||
588 | * 0x04, SwHPReq | ||
589 | */ | ||
590 | REG_INIT(AB8500_REGUREQUESTCTRL4, 0x03, 0x06, 0x07), | ||
591 | /* | ||
592 | * 0x08, VanaSysClkReq1HPValid | ||
593 | * 0x20, Vaux1SysClkReq1HPValid | ||
594 | * 0x40, Vaux2SysClkReq1HPValid | ||
595 | * 0x80, Vaux3SysClkReq1HPValid | ||
596 | */ | ||
597 | REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xe8), | ||
598 | /* | ||
599 | * 0x10, VextSupply1SysClkReq1HPValid | ||
600 | * 0x20, VextSupply2SysClkReq1HPValid | ||
601 | * 0x40, VextSupply3SysClkReq1HPValid | ||
602 | */ | ||
603 | REG_INIT(AB8500_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x70), | ||
604 | /* | ||
605 | * 0x08, VanaHwHPReq1Valid | ||
606 | * 0x20, Vaux1HwHPReq1Valid | ||
607 | * 0x40, Vaux2HwHPReq1Valid | ||
608 | * 0x80, Vaux3HwHPReq1Valid | ||
609 | */ | ||
610 | REG_INIT(AB8500_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xe8), | ||
611 | /* | ||
612 | * 0x01, VextSupply1HwHPReq1Valid | ||
613 | * 0x02, VextSupply2HwHPReq1Valid | ||
614 | * 0x04, VextSupply3HwHPReq1Valid | ||
615 | */ | ||
616 | REG_INIT(AB8500_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07), | ||
617 | /* | ||
618 | * 0x08, VanaHwHPReq2Valid | ||
619 | * 0x20, Vaux1HwHPReq2Valid | ||
620 | * 0x40, Vaux2HwHPReq2Valid | ||
621 | * 0x80, Vaux3HwHPReq2Valid | ||
622 | */ | ||
623 | REG_INIT(AB8500_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xe8), | ||
624 | /* | ||
625 | * 0x01, VextSupply1HwHPReq2Valid | ||
626 | * 0x02, VextSupply2HwHPReq2Valid | ||
627 | * 0x04, VextSupply3HwHPReq2Valid | ||
628 | */ | ||
629 | REG_INIT(AB8500_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07), | ||
630 | /* | ||
631 | * 0x20, VanaSwHPReqValid | ||
632 | * 0x80, Vaux1SwHPReqValid | ||
633 | */ | ||
634 | REG_INIT(AB8500_REGUSWHPREQVALID1, 0x03, 0x0d, 0xa0), | ||
635 | /* | ||
636 | * 0x01, Vaux2SwHPReqValid | ||
637 | * 0x02, Vaux3SwHPReqValid | ||
638 | * 0x04, VextSupply1SwHPReqValid | ||
639 | * 0x08, VextSupply2SwHPReqValid | ||
640 | * 0x10, VextSupply3SwHPReqValid | ||
641 | */ | ||
642 | REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f), | ||
643 | /* | ||
644 | * 0x02, SysClkReq2Valid1 | ||
645 | * ... | ||
646 | * 0x80, SysClkReq8Valid1 | ||
647 | */ | ||
648 | REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe), | ||
649 | /* | ||
650 | * 0x02, SysClkReq2Valid2 | ||
651 | * ... | ||
652 | * 0x80, SysClkReq8Valid2 | ||
653 | */ | ||
654 | REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe), | ||
655 | /* | ||
656 | * 0x02, VTVoutEna | ||
657 | * 0x04, Vintcore12Ena | ||
658 | * 0x38, Vintcore12Sel | ||
659 | * 0x40, Vintcore12LP | ||
660 | * 0x80, VTVoutLP | ||
661 | */ | ||
662 | REG_INIT(AB8500_REGUMISC1, 0x03, 0x80, 0xfe), | ||
663 | /* | ||
664 | * 0x02, VaudioEna | ||
665 | * 0x04, VdmicEna | ||
666 | * 0x08, Vamic1Ena | ||
667 | * 0x10, Vamic2Ena | ||
668 | */ | ||
669 | REG_INIT(AB8500_VAUDIOSUPPLY, 0x03, 0x83, 0x1e), | ||
670 | /* | ||
671 | * 0x01, Vamic1_dzout | ||
672 | * 0x02, Vamic2_dzout | ||
673 | */ | ||
674 | REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03), | ||
675 | /* | ||
676 | * 0x0c, VanaRegu | ||
677 | * 0x03, VpllRegu | ||
678 | */ | ||
679 | REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f), | ||
680 | /* | ||
681 | * 0x01, VrefDDREna | ||
682 | * 0x02, VrefDDRSleepMode | ||
683 | */ | ||
684 | REG_INIT(AB8500_VREFDDR, 0x04, 0x07, 0x03), | ||
685 | /* | ||
686 | * 0x03, VextSupply1Regu | ||
687 | * 0x0c, VextSupply2Regu | ||
688 | * 0x30, VextSupply3Regu | ||
689 | * 0x40, ExtSupply2Bypass | ||
690 | * 0x80, ExtSupply3Bypass | ||
691 | */ | ||
692 | REG_INIT(AB8500_EXTSUPPLYREGU, 0x04, 0x08, 0xff), | ||
693 | /* | ||
694 | * 0x03, Vaux1Regu | ||
695 | * 0x0c, Vaux2Regu | ||
696 | */ | ||
697 | REG_INIT(AB8500_VAUX12REGU, 0x04, 0x09, 0x0f), | ||
698 | /* | ||
699 | * 0x03, Vaux3Regu | ||
700 | */ | ||
701 | REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03), | ||
702 | /* | ||
703 | * 0x3f, Vsmps1Sel1 | ||
704 | */ | ||
705 | REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f), | ||
706 | /* | ||
707 | * 0x0f, Vaux1Sel | ||
708 | */ | ||
709 | REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f), | ||
710 | /* | ||
711 | * 0x0f, Vaux2Sel | ||
712 | */ | ||
713 | REG_INIT(AB8500_VAUX2SEL, 0x04, 0x20, 0x0f), | ||
714 | /* | ||
715 | * 0x07, Vaux3Sel | ||
716 | */ | ||
717 | REG_INIT(AB8500_VRF1VAUX3SEL, 0x04, 0x21, 0x07), | ||
718 | /* | ||
719 | * 0x01, VextSupply12LP | ||
720 | */ | ||
721 | REG_INIT(AB8500_REGUCTRL2SPARE, 0x04, 0x22, 0x01), | ||
722 | /* | ||
723 | * 0x04, Vaux1Disch | ||
724 | * 0x08, Vaux2Disch | ||
725 | * 0x10, Vaux3Disch | ||
726 | * 0x20, Vintcore12Disch | ||
727 | * 0x40, VTVoutDisch | ||
728 | * 0x80, VaudioDisch | ||
729 | */ | ||
730 | REG_INIT(AB8500_REGUCTRLDISCH, 0x04, 0x43, 0xfc), | ||
731 | /* | ||
732 | * 0x02, VanaDisch | ||
733 | * 0x04, VdmicPullDownEna | ||
734 | * 0x10, VdmicDisch | ||
735 | */ | ||
736 | REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16), | ||
737 | }; | ||
738 | |||
514 | static __devinit int ab8500_regulator_probe(struct platform_device *pdev) | 739 | static __devinit int ab8500_regulator_probe(struct platform_device *pdev) |
515 | { | 740 | { |
516 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); | 741 | struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); |
@@ -529,10 +754,51 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) | |||
529 | 754 | ||
530 | /* make sure the platform data has the correct size */ | 755 | /* make sure the platform data has the correct size */ |
531 | if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) { | 756 | if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) { |
532 | dev_err(&pdev->dev, "platform configuration error\n"); | 757 | dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); |
533 | return -EINVAL; | 758 | return -EINVAL; |
534 | } | 759 | } |
535 | 760 | ||
761 | /* initialize registers */ | ||
762 | for (i = 0; i < pdata->num_regulator_reg_init; i++) { | ||
763 | int id; | ||
764 | u8 value; | ||
765 | |||
766 | id = pdata->regulator_reg_init[i].id; | ||
767 | value = pdata->regulator_reg_init[i].value; | ||
768 | |||
769 | /* check for configuration errors */ | ||
770 | if (id >= AB8500_NUM_REGULATOR_REGISTERS) { | ||
771 | dev_err(&pdev->dev, | ||
772 | "Configuration error: id outside range.\n"); | ||
773 | return -EINVAL; | ||
774 | } | ||
775 | if (value & ~ab8500_reg_init[id].mask) { | ||
776 | dev_err(&pdev->dev, | ||
777 | "Configuration error: value outside mask.\n"); | ||
778 | return -EINVAL; | ||
779 | } | ||
780 | |||
781 | /* initialize register */ | ||
782 | err = abx500_mask_and_set_register_interruptible(&pdev->dev, | ||
783 | ab8500_reg_init[id].bank, | ||
784 | ab8500_reg_init[id].addr, | ||
785 | ab8500_reg_init[id].mask, | ||
786 | value); | ||
787 | if (err < 0) { | ||
788 | dev_err(&pdev->dev, | ||
789 | "Failed to initialize 0x%02x, 0x%02x.\n", | ||
790 | ab8500_reg_init[id].bank, | ||
791 | ab8500_reg_init[id].addr); | ||
792 | return err; | ||
793 | } | ||
794 | dev_vdbg(&pdev->dev, | ||
795 | " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", | ||
796 | ab8500_reg_init[id].bank, | ||
797 | ab8500_reg_init[id].addr, | ||
798 | ab8500_reg_init[id].mask, | ||
799 | value); | ||
800 | } | ||
801 | |||
536 | /* register all regulators */ | 802 | /* register all regulators */ |
537 | for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { | 803 | for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { |
538 | struct ab8500_regulator_info *info = NULL; | 804 | struct ab8500_regulator_info *info = NULL; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 9fa20957847d..3ffc6979d164 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1629,6 +1629,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1629 | int min_uV, int max_uV) | 1629 | int min_uV, int max_uV) |
1630 | { | 1630 | { |
1631 | int ret; | 1631 | int ret; |
1632 | int delay = 0; | ||
1632 | unsigned int selector; | 1633 | unsigned int selector; |
1633 | 1634 | ||
1634 | trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); | 1635 | trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); |
@@ -1662,6 +1663,22 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1662 | } | 1663 | } |
1663 | } | 1664 | } |
1664 | 1665 | ||
1666 | /* | ||
1667 | * If we can't obtain the old selector there is not enough | ||
1668 | * info to call set_voltage_time_sel(). | ||
1669 | */ | ||
1670 | if (rdev->desc->ops->set_voltage_time_sel && | ||
1671 | rdev->desc->ops->get_voltage_sel) { | ||
1672 | unsigned int old_selector = 0; | ||
1673 | |||
1674 | ret = rdev->desc->ops->get_voltage_sel(rdev); | ||
1675 | if (ret < 0) | ||
1676 | return ret; | ||
1677 | old_selector = ret; | ||
1678 | delay = rdev->desc->ops->set_voltage_time_sel(rdev, | ||
1679 | old_selector, selector); | ||
1680 | } | ||
1681 | |||
1665 | if (best_val != INT_MAX) { | 1682 | if (best_val != INT_MAX) { |
1666 | ret = rdev->desc->ops->set_voltage_sel(rdev, selector); | 1683 | ret = rdev->desc->ops->set_voltage_sel(rdev, selector); |
1667 | selector = best_val; | 1684 | selector = best_val; |
@@ -1672,6 +1689,14 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1672 | ret = -EINVAL; | 1689 | ret = -EINVAL; |
1673 | } | 1690 | } |
1674 | 1691 | ||
1692 | /* Insert any necessary delays */ | ||
1693 | if (delay >= 1000) { | ||
1694 | mdelay(delay / 1000); | ||
1695 | udelay(delay % 1000); | ||
1696 | } else if (delay) { | ||
1697 | udelay(delay); | ||
1698 | } | ||
1699 | |||
1675 | if (ret == 0) | 1700 | if (ret == 0) |
1676 | _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, | 1701 | _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, |
1677 | NULL); | 1702 | NULL); |
@@ -1740,6 +1765,51 @@ out: | |||
1740 | EXPORT_SYMBOL_GPL(regulator_set_voltage); | 1765 | EXPORT_SYMBOL_GPL(regulator_set_voltage); |
1741 | 1766 | ||
1742 | /** | 1767 | /** |
1768 | * regulator_set_voltage_time - get raise/fall time | ||
1769 | * @regulator: regulator source | ||
1770 | * @old_uV: starting voltage in microvolts | ||
1771 | * @new_uV: target voltage in microvolts | ||
1772 | * | ||
1773 | * Provided with the starting and ending voltage, this function attempts to | ||
1774 | * calculate the time in microseconds required to rise or fall to this new | ||
1775 | * voltage. | ||
1776 | */ | ||
1777 | int regulator_set_voltage_time(struct regulator *regulator, | ||
1778 | int old_uV, int new_uV) | ||
1779 | { | ||
1780 | struct regulator_dev *rdev = regulator->rdev; | ||
1781 | struct regulator_ops *ops = rdev->desc->ops; | ||
1782 | int old_sel = -1; | ||
1783 | int new_sel = -1; | ||
1784 | int voltage; | ||
1785 | int i; | ||
1786 | |||
1787 | /* Currently requires operations to do this */ | ||
1788 | if (!ops->list_voltage || !ops->set_voltage_time_sel | ||
1789 | || !rdev->desc->n_voltages) | ||
1790 | return -EINVAL; | ||
1791 | |||
1792 | for (i = 0; i < rdev->desc->n_voltages; i++) { | ||
1793 | /* We only look for exact voltage matches here */ | ||
1794 | voltage = regulator_list_voltage(regulator, i); | ||
1795 | if (voltage < 0) | ||
1796 | return -EINVAL; | ||
1797 | if (voltage == 0) | ||
1798 | continue; | ||
1799 | if (voltage == old_uV) | ||
1800 | old_sel = i; | ||
1801 | if (voltage == new_uV) | ||
1802 | new_sel = i; | ||
1803 | } | ||
1804 | |||
1805 | if (old_sel < 0 || new_sel < 0) | ||
1806 | return -EINVAL; | ||
1807 | |||
1808 | return ops->set_voltage_time_sel(rdev, old_sel, new_sel); | ||
1809 | } | ||
1810 | EXPORT_SYMBOL_GPL(regulator_set_voltage_time); | ||
1811 | |||
1812 | /** | ||
1743 | * regulator_sync_voltage - re-apply last regulator output voltage | 1813 | * regulator_sync_voltage - re-apply last regulator output voltage |
1744 | * @regulator: regulator source | 1814 | * @regulator: regulator source |
1745 | * | 1815 | * |
@@ -2565,8 +2635,11 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
2565 | init_data->consumer_supplies[i].dev, | 2635 | init_data->consumer_supplies[i].dev, |
2566 | init_data->consumer_supplies[i].dev_name, | 2636 | init_data->consumer_supplies[i].dev_name, |
2567 | init_data->consumer_supplies[i].supply); | 2637 | init_data->consumer_supplies[i].supply); |
2568 | if (ret < 0) | 2638 | if (ret < 0) { |
2639 | dev_err(dev, "Failed to set supply %s\n", | ||
2640 | init_data->consumer_supplies[i].supply); | ||
2569 | goto unset_supplies; | 2641 | goto unset_supplies; |
2642 | } | ||
2570 | } | 2643 | } |
2571 | 2644 | ||
2572 | list_add(&rdev->list, ®ulator_list); | 2645 | list_add(&rdev->list, ®ulator_list); |
@@ -2653,6 +2726,47 @@ out: | |||
2653 | EXPORT_SYMBOL_GPL(regulator_suspend_prepare); | 2726 | EXPORT_SYMBOL_GPL(regulator_suspend_prepare); |
2654 | 2727 | ||
2655 | /** | 2728 | /** |
2729 | * regulator_suspend_finish - resume regulators from system wide suspend | ||
2730 | * | ||
2731 | * Turn on regulators that might be turned off by regulator_suspend_prepare | ||
2732 | * and that should be turned on according to the regulators properties. | ||
2733 | */ | ||
2734 | int regulator_suspend_finish(void) | ||
2735 | { | ||
2736 | struct regulator_dev *rdev; | ||
2737 | int ret = 0, error; | ||
2738 | |||
2739 | mutex_lock(®ulator_list_mutex); | ||
2740 | list_for_each_entry(rdev, ®ulator_list, list) { | ||
2741 | struct regulator_ops *ops = rdev->desc->ops; | ||
2742 | |||
2743 | mutex_lock(&rdev->mutex); | ||
2744 | if ((rdev->use_count > 0 || rdev->constraints->always_on) && | ||
2745 | ops->enable) { | ||
2746 | error = ops->enable(rdev); | ||
2747 | if (error) | ||
2748 | ret = error; | ||
2749 | } else { | ||
2750 | if (!has_full_constraints) | ||
2751 | goto unlock; | ||
2752 | if (!ops->disable) | ||
2753 | goto unlock; | ||
2754 | if (ops->is_enabled && !ops->is_enabled(rdev)) | ||
2755 | goto unlock; | ||
2756 | |||
2757 | error = ops->disable(rdev); | ||
2758 | if (error) | ||
2759 | ret = error; | ||
2760 | } | ||
2761 | unlock: | ||
2762 | mutex_unlock(&rdev->mutex); | ||
2763 | } | ||
2764 | mutex_unlock(®ulator_list_mutex); | ||
2765 | return ret; | ||
2766 | } | ||
2767 | EXPORT_SYMBOL_GPL(regulator_suspend_finish); | ||
2768 | |||
2769 | /** | ||
2656 | * regulator_has_full_constraints - the system has fully specified constraints | 2770 | * regulator_has_full_constraints - the system has fully specified constraints |
2657 | * | 2771 | * |
2658 | * Calling this function will cause the regulator API to disable all | 2772 | * Calling this function will cause the regulator API to disable all |
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 01ef7e9903bb..77e0cfb30b23 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c | |||
@@ -1185,6 +1185,7 @@ static const struct platform_device_id max8997_pmic_id[] = { | |||
1185 | { "max8997-pmic", 0}, | 1185 | { "max8997-pmic", 0}, |
1186 | { }, | 1186 | { }, |
1187 | }; | 1187 | }; |
1188 | MODULE_DEVICE_TABLE(platform, max8997_pmic_id); | ||
1188 | 1189 | ||
1189 | static struct platform_driver max8997_pmic_driver = { | 1190 | static struct platform_driver max8997_pmic_driver = { |
1190 | .driver = { | 1191 | .driver = { |
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 0ec49ca527a8..43410266f993 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c | |||
@@ -887,6 +887,7 @@ static const struct platform_device_id max8998_pmic_id[] = { | |||
887 | { "lp3974-pmic", TYPE_LP3974 }, | 887 | { "lp3974-pmic", TYPE_LP3974 }, |
888 | { } | 888 | { } |
889 | }; | 889 | }; |
890 | MODULE_DEVICE_TABLE(platform, max8998_pmic_id); | ||
890 | 891 | ||
891 | static struct platform_driver max8998_pmic_driver = { | 892 | static struct platform_driver max8998_pmic_driver = { |
892 | .driver = { | 893 | .driver = { |
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c index 176a6be5a8ce..9166aa0a9df7 100644 --- a/drivers/regulator/tps6524x-regulator.c +++ b/drivers/regulator/tps6524x-regulator.c | |||
@@ -596,7 +596,7 @@ static struct regulator_ops regulator_ops = { | |||
596 | .get_current_limit = get_current_limit, | 596 | .get_current_limit = get_current_limit, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | static int __devexit pmic_remove(struct spi_device *spi) | 599 | static int pmic_remove(struct spi_device *spi) |
600 | { | 600 | { |
601 | struct tps6524x *hw = spi_get_drvdata(spi); | 601 | struct tps6524x *hw = spi_get_drvdata(spi); |
602 | int i; | 602 | int i; |
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 06df898842c0..e93453b1b978 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
@@ -565,9 +565,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
565 | } | 565 | } |
566 | 566 | ||
567 | irq = platform_get_irq_byname(pdev, "UV"); | 567 | irq = platform_get_irq_byname(pdev, "UV"); |
568 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, | 568 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, |
569 | IRQF_TRIGGER_RISING, dcdc->name, | 569 | IRQF_TRIGGER_RISING, dcdc->name, dcdc); |
570 | dcdc); | ||
571 | if (ret != 0) { | 570 | if (ret != 0) { |
572 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 571 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
573 | irq, ret); | 572 | irq, ret); |
@@ -575,9 +574,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
575 | } | 574 | } |
576 | 575 | ||
577 | irq = platform_get_irq_byname(pdev, "HC"); | 576 | irq = platform_get_irq_byname(pdev, "HC"); |
578 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq, | 577 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq, |
579 | IRQF_TRIGGER_RISING, dcdc->name, | 578 | IRQF_TRIGGER_RISING, dcdc->name, dcdc); |
580 | dcdc); | ||
581 | if (ret != 0) { | 579 | if (ret != 0) { |
582 | dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", | 580 | dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", |
583 | irq, ret); | 581 | irq, ret); |
@@ -589,7 +587,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
589 | return 0; | 587 | return 0; |
590 | 588 | ||
591 | err_uv: | 589 | err_uv: |
592 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 590 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
593 | err_regulator: | 591 | err_regulator: |
594 | regulator_unregister(dcdc->regulator); | 592 | regulator_unregister(dcdc->regulator); |
595 | err: | 593 | err: |
@@ -606,8 +604,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev) | |||
606 | 604 | ||
607 | platform_set_drvdata(pdev, NULL); | 605 | platform_set_drvdata(pdev, NULL); |
608 | 606 | ||
609 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); | 607 | free_irq(platform_get_irq_byname(pdev, "HC"), dcdc); |
610 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 608 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
611 | regulator_unregister(dcdc->regulator); | 609 | regulator_unregister(dcdc->regulator); |
612 | if (dcdc->dvs_gpio) | 610 | if (dcdc->dvs_gpio) |
613 | gpio_free(dcdc->dvs_gpio); | 611 | gpio_free(dcdc->dvs_gpio); |
@@ -756,9 +754,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev) | |||
756 | } | 754 | } |
757 | 755 | ||
758 | irq = platform_get_irq_byname(pdev, "UV"); | 756 | irq = platform_get_irq_byname(pdev, "UV"); |
759 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, | 757 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, |
760 | IRQF_TRIGGER_RISING, dcdc->name, | 758 | IRQF_TRIGGER_RISING, dcdc->name, dcdc); |
761 | dcdc); | ||
762 | if (ret != 0) { | 759 | if (ret != 0) { |
763 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 760 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
764 | irq, ret); | 761 | irq, ret); |
@@ -783,7 +780,7 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev) | |||
783 | 780 | ||
784 | platform_set_drvdata(pdev, NULL); | 781 | platform_set_drvdata(pdev, NULL); |
785 | 782 | ||
786 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 783 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
787 | regulator_unregister(dcdc->regulator); | 784 | regulator_unregister(dcdc->regulator); |
788 | kfree(dcdc); | 785 | kfree(dcdc); |
789 | 786 | ||
@@ -885,9 +882,9 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev) | |||
885 | } | 882 | } |
886 | 883 | ||
887 | irq = platform_get_irq_byname(pdev, "UV"); | 884 | irq = platform_get_irq_byname(pdev, "UV"); |
888 | ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, | 885 | ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, |
889 | IRQF_TRIGGER_RISING, dcdc->name, | 886 | IRQF_TRIGGER_RISING, dcdc->name, |
890 | dcdc); | 887 | dcdc); |
891 | if (ret != 0) { | 888 | if (ret != 0) { |
892 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 889 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
893 | irq, ret); | 890 | irq, ret); |
@@ -908,11 +905,10 @@ err: | |||
908 | static __devexit int wm831x_boostp_remove(struct platform_device *pdev) | 905 | static __devexit int wm831x_boostp_remove(struct platform_device *pdev) |
909 | { | 906 | { |
910 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); | 907 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); |
911 | struct wm831x *wm831x = dcdc->wm831x; | ||
912 | 908 | ||
913 | platform_set_drvdata(pdev, NULL); | 909 | platform_set_drvdata(pdev, NULL); |
914 | 910 | ||
915 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); | 911 | free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); |
916 | regulator_unregister(dcdc->regulator); | 912 | regulator_unregister(dcdc->regulator); |
917 | kfree(dcdc); | 913 | kfree(dcdc); |
918 | 914 | ||
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c index 6c446cd6ad54..01f27c7f4236 100644 --- a/drivers/regulator/wm831x-isink.c +++ b/drivers/regulator/wm831x-isink.c | |||
@@ -198,9 +198,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev) | |||
198 | } | 198 | } |
199 | 199 | ||
200 | irq = platform_get_irq(pdev, 0); | 200 | irq = platform_get_irq(pdev, 0); |
201 | ret = wm831x_request_irq(wm831x, irq, wm831x_isink_irq, | 201 | ret = request_threaded_irq(irq, NULL, wm831x_isink_irq, |
202 | IRQF_TRIGGER_RISING, isink->name, | 202 | IRQF_TRIGGER_RISING, isink->name, isink); |
203 | isink); | ||
204 | if (ret != 0) { | 203 | if (ret != 0) { |
205 | dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n", | 204 | dev_err(&pdev->dev, "Failed to request ISINK IRQ %d: %d\n", |
206 | irq, ret); | 205 | irq, ret); |
@@ -221,11 +220,10 @@ err: | |||
221 | static __devexit int wm831x_isink_remove(struct platform_device *pdev) | 220 | static __devexit int wm831x_isink_remove(struct platform_device *pdev) |
222 | { | 221 | { |
223 | struct wm831x_isink *isink = platform_get_drvdata(pdev); | 222 | struct wm831x_isink *isink = platform_get_drvdata(pdev); |
224 | struct wm831x *wm831x = isink->wm831x; | ||
225 | 223 | ||
226 | platform_set_drvdata(pdev, NULL); | 224 | platform_set_drvdata(pdev, NULL); |
227 | 225 | ||
228 | wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink); | 226 | free_irq(platform_get_irq(pdev, 0), isink); |
229 | 227 | ||
230 | regulator_unregister(isink->regulator); | 228 | regulator_unregister(isink->regulator); |
231 | kfree(isink); | 229 | kfree(isink); |
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index c94fc5b7cd5b..2220cf8defb1 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c | |||
@@ -354,9 +354,9 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev) | |||
354 | } | 354 | } |
355 | 355 | ||
356 | irq = platform_get_irq_byname(pdev, "UV"); | 356 | irq = platform_get_irq_byname(pdev, "UV"); |
357 | ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq, | 357 | ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, |
358 | IRQF_TRIGGER_RISING, ldo->name, | 358 | IRQF_TRIGGER_RISING, ldo->name, |
359 | ldo); | 359 | ldo); |
360 | if (ret != 0) { | 360 | if (ret != 0) { |
361 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 361 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
362 | irq, ret); | 362 | irq, ret); |
@@ -377,11 +377,10 @@ err: | |||
377 | static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev) | 377 | static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev) |
378 | { | 378 | { |
379 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); | 379 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); |
380 | struct wm831x *wm831x = ldo->wm831x; | ||
381 | 380 | ||
382 | platform_set_drvdata(pdev, NULL); | 381 | platform_set_drvdata(pdev, NULL); |
383 | 382 | ||
384 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); | 383 | free_irq(platform_get_irq_byname(pdev, "UV"), ldo); |
385 | regulator_unregister(ldo->regulator); | 384 | regulator_unregister(ldo->regulator); |
386 | kfree(ldo); | 385 | kfree(ldo); |
387 | 386 | ||
@@ -619,9 +618,8 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev) | |||
619 | } | 618 | } |
620 | 619 | ||
621 | irq = platform_get_irq_byname(pdev, "UV"); | 620 | irq = platform_get_irq_byname(pdev, "UV"); |
622 | ret = wm831x_request_irq(wm831x, irq, wm831x_ldo_uv_irq, | 621 | ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, |
623 | IRQF_TRIGGER_RISING, ldo->name, | 622 | IRQF_TRIGGER_RISING, ldo->name, ldo); |
624 | ldo); | ||
625 | if (ret != 0) { | 623 | if (ret != 0) { |
626 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", | 624 | dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", |
627 | irq, ret); | 625 | irq, ret); |
@@ -642,9 +640,8 @@ err: | |||
642 | static __devexit int wm831x_aldo_remove(struct platform_device *pdev) | 640 | static __devexit int wm831x_aldo_remove(struct platform_device *pdev) |
643 | { | 641 | { |
644 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); | 642 | struct wm831x_ldo *ldo = platform_get_drvdata(pdev); |
645 | struct wm831x *wm831x = ldo->wm831x; | ||
646 | 643 | ||
647 | wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); | 644 | free_irq(platform_get_irq_byname(pdev, "UV"), ldo); |
648 | regulator_unregister(ldo->regulator); | 645 | regulator_unregister(ldo->regulator); |
649 | kfree(ldo); | 646 | kfree(ldo); |
650 | 647 | ||
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index e55dc1ac83ab..6ac55fd48413 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -782,11 +782,11 @@ static void sh_rtc_set_irq_wake(struct device *dev, int enabled) | |||
782 | struct platform_device *pdev = to_platform_device(dev); | 782 | struct platform_device *pdev = to_platform_device(dev); |
783 | struct sh_rtc *rtc = platform_get_drvdata(pdev); | 783 | struct sh_rtc *rtc = platform_get_drvdata(pdev); |
784 | 784 | ||
785 | set_irq_wake(rtc->periodic_irq, enabled); | 785 | irq_set_irq_wake(rtc->periodic_irq, enabled); |
786 | 786 | ||
787 | if (rtc->carry_irq > 0) { | 787 | if (rtc->carry_irq > 0) { |
788 | set_irq_wake(rtc->carry_irq, enabled); | 788 | irq_set_irq_wake(rtc->carry_irq, enabled); |
789 | set_irq_wake(rtc->alarm_irq, enabled); | 789 | irq_set_irq_wake(rtc->alarm_irq, enabled); |
790 | } | 790 | } |
791 | } | 791 | } |
792 | 792 | ||
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 5833afbf08d7..c6ca115c71df 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c | |||
@@ -63,7 +63,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level) | |||
63 | 63 | ||
64 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | 64 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) |
65 | { | 65 | { |
66 | generic_handle_irq((unsigned int)get_irq_data(irq)); | 66 | generic_handle_irq((unsigned int)irq_get_handler_data(irq)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void __init intc_register_irq(struct intc_desc *desc, | 69 | static void __init intc_register_irq(struct intc_desc *desc, |
@@ -116,9 +116,9 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
116 | irq_data = irq_get_irq_data(irq); | 116 | irq_data = irq_get_irq_data(irq); |
117 | 117 | ||
118 | disable_irq_nosync(irq); | 118 | disable_irq_nosync(irq); |
119 | set_irq_chip_and_handler_name(irq, &d->chip, | 119 | irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq, |
120 | handle_level_irq, "level"); | 120 | "level"); |
121 | set_irq_chip_data(irq, (void *)data[primary]); | 121 | irq_set_chip_data(irq, (void *)data[primary]); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * set priority level | 124 | * set priority level |
@@ -340,9 +340,9 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
340 | vect2->enum_id = 0; | 340 | vect2->enum_id = 0; |
341 | 341 | ||
342 | /* redirect this interrupts to the first one */ | 342 | /* redirect this interrupts to the first one */ |
343 | set_irq_chip(irq2, &dummy_irq_chip); | 343 | irq_set_chip(irq2, &dummy_irq_chip); |
344 | set_irq_chained_handler(irq2, intc_redirect_irq); | 344 | irq_set_chained_handler(irq2, intc_redirect_irq); |
345 | set_irq_data(irq2, (void *)irq); | 345 | irq_set_handler_data(irq2, (void *)irq); |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
@@ -387,19 +387,16 @@ static int intc_suspend(void) | |||
387 | /* enable wakeup irqs belonging to this intc controller */ | 387 | /* enable wakeup irqs belonging to this intc controller */ |
388 | for_each_active_irq(irq) { | 388 | for_each_active_irq(irq) { |
389 | struct irq_data *data; | 389 | struct irq_data *data; |
390 | struct irq_desc *desc; | ||
391 | struct irq_chip *chip; | 390 | struct irq_chip *chip; |
392 | 391 | ||
393 | data = irq_get_irq_data(irq); | 392 | data = irq_get_irq_data(irq); |
394 | chip = irq_data_get_irq_chip(data); | 393 | chip = irq_data_get_irq_chip(data); |
395 | if (chip != &d->chip) | 394 | if (chip != &d->chip) |
396 | continue; | 395 | continue; |
397 | desc = irq_to_desc(irq); | 396 | if (irqd_is_wakeup_set(data)) |
398 | if ((desc->status & IRQ_WAKEUP)) | ||
399 | chip->irq_enable(data); | 397 | chip->irq_enable(data); |
400 | } | 398 | } |
401 | } | 399 | } |
402 | |||
403 | return 0; | 400 | return 0; |
404 | } | 401 | } |
405 | 402 | ||
@@ -412,7 +409,6 @@ static void intc_resume(void) | |||
412 | 409 | ||
413 | for_each_active_irq(irq) { | 410 | for_each_active_irq(irq) { |
414 | struct irq_data *data; | 411 | struct irq_data *data; |
415 | struct irq_desc *desc; | ||
416 | struct irq_chip *chip; | 412 | struct irq_chip *chip; |
417 | 413 | ||
418 | data = irq_get_irq_data(irq); | 414 | data = irq_get_irq_data(irq); |
@@ -423,8 +419,7 @@ static void intc_resume(void) | |||
423 | */ | 419 | */ |
424 | if (chip != &d->chip) | 420 | if (chip != &d->chip) |
425 | continue; | 421 | continue; |
426 | desc = irq_to_desc(irq); | 422 | if (irqd_irq_disabled(data)) |
427 | if (desc->status & IRQ_DISABLED) | ||
428 | chip->irq_disable(data); | 423 | chip->irq_disable(data); |
429 | else | 424 | else |
430 | chip->irq_enable(data); | 425 | chip->irq_enable(data); |
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index 4e0ff7181164..ce5f81d7cc6b 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c | |||
@@ -110,7 +110,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | |||
110 | { | 110 | { |
111 | struct irq_data *data = irq_get_irq_data(irq); | 111 | struct irq_data *data = irq_get_irq_data(irq); |
112 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 112 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
113 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data); | 113 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); |
114 | struct intc_desc_int *d = get_intc_desc(irq); | 114 | struct intc_desc_int *d = get_intc_desc(irq); |
115 | 115 | ||
116 | chip->irq_mask_ack(data); | 116 | chip->irq_mask_ack(data); |
@@ -118,7 +118,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | |||
118 | for_each_virq(entry, vlist) { | 118 | for_each_virq(entry, vlist) { |
119 | unsigned long addr, handle; | 119 | unsigned long addr, handle; |
120 | 120 | ||
121 | handle = (unsigned long)get_irq_data(entry->irq); | 121 | handle = (unsigned long)irq_get_handler_data(entry->irq); |
122 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); | 122 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); |
123 | 123 | ||
124 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) | 124 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) |
@@ -229,13 +229,13 @@ restart: | |||
229 | 229 | ||
230 | intc_irq_xlate_set(irq, entry->enum_id, d); | 230 | intc_irq_xlate_set(irq, entry->enum_id, d); |
231 | 231 | ||
232 | set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq), | 232 | irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), |
233 | handle_simple_irq, "virq"); | 233 | handle_simple_irq, "virq"); |
234 | set_irq_chip_data(irq, get_irq_chip_data(entry->pirq)); | 234 | irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); |
235 | 235 | ||
236 | set_irq_data(irq, (void *)entry->handle); | 236 | irq_set_handler_data(irq, (void *)entry->handle); |
237 | 237 | ||
238 | set_irq_chained_handler(entry->pirq, intc_virq_handler); | 238 | irq_set_chained_handler(entry->pirq, intc_virq_handler); |
239 | add_virq_to_pirq(entry->pirq, irq); | 239 | add_virq_to_pirq(entry->pirq, irq); |
240 | 240 | ||
241 | radix_tree_tag_clear(&d->tree, entry->enum_id, | 241 | radix_tree_tag_clear(&d->tree, entry->enum_id, |
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c index e3556ff43bb9..ac5bbc8722e5 100644 --- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c +++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c | |||
@@ -341,7 +341,7 @@ int bcmsdh_register_oob_intr(void *dhdp) | |||
341 | if (error) | 341 | if (error) |
342 | return -ENODEV; | 342 | return -ENODEV; |
343 | 343 | ||
344 | set_irq_wake(sdhcinfo->oob_irq, 1); | 344 | irq_set_irq_wake(sdhcinfo->oob_irq, 1); |
345 | sdhcinfo->oob_irq_registered = true; | 345 | sdhcinfo->oob_irq_registered = true; |
346 | } | 346 | } |
347 | 347 | ||
@@ -352,7 +352,7 @@ void bcmsdh_unregister_oob_intr(void) | |||
352 | { | 352 | { |
353 | SDLX_MSG(("%s: Enter\n", __func__)); | 353 | SDLX_MSG(("%s: Enter\n", __func__)); |
354 | 354 | ||
355 | set_irq_wake(sdhcinfo->oob_irq, 0); | 355 | irq_set_irq_wake(sdhcinfo->oob_irq, 0); |
356 | disable_irq(sdhcinfo->oob_irq); /* just in case.. */ | 356 | disable_irq(sdhcinfo->oob_irq); /* just in case.. */ |
357 | free_irq(sdhcinfo->oob_irq, NULL); | 357 | free_irq(sdhcinfo->oob_irq, NULL); |
358 | sdhcinfo->oob_irq_registered = false; | 358 | sdhcinfo->oob_irq_registered = false; |
diff --git a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c index ea9b733c3926..21cdb0637beb 100644 --- a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c +++ b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c | |||
@@ -597,7 +597,7 @@ static int cy_as_hal_configure_interrupts(void *dev_p) | |||
597 | int result; | 597 | int result; |
598 | int irq_pin = AST_INT; | 598 | int irq_pin = AST_INT; |
599 | 599 | ||
600 | set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); | 600 | irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); |
601 | 601 | ||
602 | /* | 602 | /* |
603 | * for shared IRQS must provide non NULL device ptr | 603 | * for shared IRQS must provide non NULL device ptr |
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c index 842cd9214a5e..289729daba80 100644 --- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c +++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c | |||
@@ -1191,7 +1191,7 @@ static int cyasblkdev_add_disks(int bus_num, | |||
1191 | bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; | 1191 | bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; |
1192 | bd->user_disk_1->minors = 8; | 1192 | bd->user_disk_1->minors = 8; |
1193 | bd->user_disk_1->fops = &cyasblkdev_bdops; | 1193 | bd->user_disk_1->fops = &cyasblkdev_bdops; |
1194 | bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE; | 1194 | bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE; |
1195 | bd->user_disk_1->private_data = bd; | 1195 | bd->user_disk_1->private_data = bd; |
1196 | bd->user_disk_1->queue = bd->queue.queue; | 1196 | bd->user_disk_1->queue = bd->queue.queue; |
1197 | bd->dbgprn_flags = DBGPRN_RD_RQ; | 1197 | bd->dbgprn_flags = DBGPRN_RD_RQ; |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index c35f1a73bc8b..52fdf60bdbe2 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -178,7 +178,7 @@ static int __init xen_hvc_init(void) | |||
178 | if (xencons_irq < 0) | 178 | if (xencons_irq < 0) |
179 | xencons_irq = 0; /* NO_IRQ */ | 179 | xencons_irq = 0; /* NO_IRQ */ |
180 | else | 180 | else |
181 | set_irq_noprobe(xencons_irq); | 181 | irq_set_noprobe(xencons_irq); |
182 | 182 | ||
183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); | 183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); |
184 | if (IS_ERR(hp)) | 184 | if (IS_ERR(hp)) |
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index 2e7fc9cee9cc..b906f11f7c1a 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c | |||
@@ -1644,7 +1644,7 @@ static int __devinit msm_hs_probe(struct platform_device *pdev) | |||
1644 | if (unlikely(uport->irq < 0)) | 1644 | if (unlikely(uport->irq < 0)) |
1645 | return -ENXIO; | 1645 | return -ENXIO; |
1646 | 1646 | ||
1647 | if (unlikely(set_irq_wake(uport->irq, 1))) | 1647 | if (unlikely(irq_set_irq_wake(uport->irq, 1))) |
1648 | return -ENXIO; | 1648 | return -ENXIO; |
1649 | 1649 | ||
1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) | 1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) |
@@ -1658,7 +1658,7 @@ static int __devinit msm_hs_probe(struct platform_device *pdev) | |||
1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) | 1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) |
1659 | return -ENXIO; | 1659 | return -ENXIO; |
1660 | 1660 | ||
1661 | if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1))) | 1661 | if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1))) |
1662 | return -ENXIO; | 1662 | return -ENXIO; |
1663 | } | 1663 | } |
1664 | 1664 | ||
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 38193f4e980e..44e4deb362e1 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c | |||
@@ -3832,7 +3832,7 @@ static int oxu_drv_probe(struct platform_device *pdev) | |||
3832 | return -EBUSY; | 3832 | return -EBUSY; |
3833 | } | 3833 | } |
3834 | 3834 | ||
3835 | ret = set_irq_type(irq, IRQF_TRIGGER_FALLING); | 3835 | ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); |
3836 | if (ret) { | 3836 | if (ret) { |
3837 | dev_err(&pdev->dev, "error setting irq type\n"); | 3837 | dev_err(&pdev->dev, "error setting irq type\n"); |
3838 | ret = -EFAULT; | 3838 | ret = -EFAULT; |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 2ba3b070ed0b..c47aac4a1f98 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -943,7 +943,7 @@ static void tusb_musb_enable(struct musb *musb) | |||
943 | musb_writel(tbase, TUSB_INT_CTRL_CONF, | 943 | musb_writel(tbase, TUSB_INT_CTRL_CONF, |
944 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); | 944 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); |
945 | 945 | ||
946 | set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); | 946 | irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); |
947 | 947 | ||
948 | /* maybe force into the Default-A OTG state machine */ | 948 | /* maybe force into the Default-A OTG state machine */ |
949 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) | 949 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) |
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index f885c868a04d..aa250cebecd2 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c | |||
@@ -135,40 +135,40 @@ static void vlynq_reset(struct vlynq_device *dev) | |||
135 | msleep(5); | 135 | msleep(5); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void vlynq_irq_unmask(unsigned int irq) | 138 | static void vlynq_irq_unmask(struct irq_data *d) |
139 | { | 139 | { |
140 | u32 val; | 140 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
141 | struct vlynq_device *dev = get_irq_chip_data(irq); | ||
142 | int virq; | 141 | int virq; |
142 | u32 val; | ||
143 | 143 | ||
144 | BUG_ON(!dev); | 144 | BUG_ON(!dev); |
145 | virq = irq - dev->irq_start; | 145 | virq = d->irq - dev->irq_start; |
146 | val = readl(&dev->remote->int_device[virq >> 2]); | 146 | val = readl(&dev->remote->int_device[virq >> 2]); |
147 | val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); | 147 | val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); |
148 | writel(val, &dev->remote->int_device[virq >> 2]); | 148 | writel(val, &dev->remote->int_device[virq >> 2]); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void vlynq_irq_mask(unsigned int irq) | 151 | static void vlynq_irq_mask(struct irq_data *d) |
152 | { | 152 | { |
153 | u32 val; | 153 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
154 | struct vlynq_device *dev = get_irq_chip_data(irq); | ||
155 | int virq; | 154 | int virq; |
155 | u32 val; | ||
156 | 156 | ||
157 | BUG_ON(!dev); | 157 | BUG_ON(!dev); |
158 | virq = irq - dev->irq_start; | 158 | virq = d->irq - dev->irq_start; |
159 | val = readl(&dev->remote->int_device[virq >> 2]); | 159 | val = readl(&dev->remote->int_device[virq >> 2]); |
160 | val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); | 160 | val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); |
161 | writel(val, &dev->remote->int_device[virq >> 2]); | 161 | writel(val, &dev->remote->int_device[virq >> 2]); |
162 | } | 162 | } |
163 | 163 | ||
164 | static int vlynq_irq_type(unsigned int irq, unsigned int flow_type) | 164 | static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type) |
165 | { | 165 | { |
166 | u32 val; | 166 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
167 | struct vlynq_device *dev = get_irq_chip_data(irq); | ||
168 | int virq; | 167 | int virq; |
168 | u32 val; | ||
169 | 169 | ||
170 | BUG_ON(!dev); | 170 | BUG_ON(!dev); |
171 | virq = irq - dev->irq_start; | 171 | virq = d->irq - dev->irq_start; |
172 | val = readl(&dev->remote->int_device[virq >> 2]); | 172 | val = readl(&dev->remote->int_device[virq >> 2]); |
173 | switch (flow_type & IRQ_TYPE_SENSE_MASK) { | 173 | switch (flow_type & IRQ_TYPE_SENSE_MASK) { |
174 | case IRQ_TYPE_EDGE_RISING: | 174 | case IRQ_TYPE_EDGE_RISING: |
@@ -192,10 +192,9 @@ static int vlynq_irq_type(unsigned int irq, unsigned int flow_type) | |||
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
194 | 194 | ||
195 | static void vlynq_local_ack(unsigned int irq) | 195 | static void vlynq_local_ack(struct irq_data *d) |
196 | { | 196 | { |
197 | struct vlynq_device *dev = get_irq_chip_data(irq); | 197 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
198 | |||
199 | u32 status = readl(&dev->local->status); | 198 | u32 status = readl(&dev->local->status); |
200 | 199 | ||
201 | pr_debug("%s: local status: 0x%08x\n", | 200 | pr_debug("%s: local status: 0x%08x\n", |
@@ -203,10 +202,9 @@ static void vlynq_local_ack(unsigned int irq) | |||
203 | writel(status, &dev->local->status); | 202 | writel(status, &dev->local->status); |
204 | } | 203 | } |
205 | 204 | ||
206 | static void vlynq_remote_ack(unsigned int irq) | 205 | static void vlynq_remote_ack(struct irq_data *d) |
207 | { | 206 | { |
208 | struct vlynq_device *dev = get_irq_chip_data(irq); | 207 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
209 | |||
210 | u32 status = readl(&dev->remote->status); | 208 | u32 status = readl(&dev->remote->status); |
211 | 209 | ||
212 | pr_debug("%s: remote status: 0x%08x\n", | 210 | pr_debug("%s: remote status: 0x%08x\n", |
@@ -238,23 +236,23 @@ static irqreturn_t vlynq_irq(int irq, void *dev_id) | |||
238 | 236 | ||
239 | static struct irq_chip vlynq_irq_chip = { | 237 | static struct irq_chip vlynq_irq_chip = { |
240 | .name = "vlynq", | 238 | .name = "vlynq", |
241 | .unmask = vlynq_irq_unmask, | 239 | .irq_unmask = vlynq_irq_unmask, |
242 | .mask = vlynq_irq_mask, | 240 | .irq_mask = vlynq_irq_mask, |
243 | .set_type = vlynq_irq_type, | 241 | .irq_set_type = vlynq_irq_type, |
244 | }; | 242 | }; |
245 | 243 | ||
246 | static struct irq_chip vlynq_local_chip = { | 244 | static struct irq_chip vlynq_local_chip = { |
247 | .name = "vlynq local error", | 245 | .name = "vlynq local error", |
248 | .unmask = vlynq_irq_unmask, | 246 | .irq_unmask = vlynq_irq_unmask, |
249 | .mask = vlynq_irq_mask, | 247 | .irq_mask = vlynq_irq_mask, |
250 | .ack = vlynq_local_ack, | 248 | .irq_ack = vlynq_local_ack, |
251 | }; | 249 | }; |
252 | 250 | ||
253 | static struct irq_chip vlynq_remote_chip = { | 251 | static struct irq_chip vlynq_remote_chip = { |
254 | .name = "vlynq local error", | 252 | .name = "vlynq local error", |
255 | .unmask = vlynq_irq_unmask, | 253 | .irq_unmask = vlynq_irq_unmask, |
256 | .mask = vlynq_irq_mask, | 254 | .irq_mask = vlynq_irq_mask, |
257 | .ack = vlynq_remote_ack, | 255 | .irq_ack = vlynq_remote_ack, |
258 | }; | 256 | }; |
259 | 257 | ||
260 | static int vlynq_setup_irq(struct vlynq_device *dev) | 258 | static int vlynq_setup_irq(struct vlynq_device *dev) |
@@ -291,17 +289,17 @@ static int vlynq_setup_irq(struct vlynq_device *dev) | |||
291 | for (i = dev->irq_start; i <= dev->irq_end; i++) { | 289 | for (i = dev->irq_start; i <= dev->irq_end; i++) { |
292 | virq = i - dev->irq_start; | 290 | virq = i - dev->irq_start; |
293 | if (virq == dev->local_irq) { | 291 | if (virq == dev->local_irq) { |
294 | set_irq_chip_and_handler(i, &vlynq_local_chip, | 292 | irq_set_chip_and_handler(i, &vlynq_local_chip, |
295 | handle_level_irq); | 293 | handle_level_irq); |
296 | set_irq_chip_data(i, dev); | 294 | irq_set_chip_data(i, dev); |
297 | } else if (virq == dev->remote_irq) { | 295 | } else if (virq == dev->remote_irq) { |
298 | set_irq_chip_and_handler(i, &vlynq_remote_chip, | 296 | irq_set_chip_and_handler(i, &vlynq_remote_chip, |
299 | handle_level_irq); | 297 | handle_level_irq); |
300 | set_irq_chip_data(i, dev); | 298 | irq_set_chip_data(i, dev); |
301 | } else { | 299 | } else { |
302 | set_irq_chip_and_handler(i, &vlynq_irq_chip, | 300 | irq_set_chip_and_handler(i, &vlynq_irq_chip, |
303 | handle_simple_irq); | 301 | handle_simple_irq); |
304 | set_irq_chip_data(i, dev); | 302 | irq_set_chip_data(i, dev); |
305 | writel(0, &dev->remote->int_device[virq >> 2]); | 303 | writel(0, &dev->remote->int_device[virq >> 2]); |
306 | } | 304 | } |
307 | } | 305 | } |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 95921b77cf86..2f4fa02744a5 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -368,9 +368,9 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
368 | ds1wm_data->active_high = plat->active_high; | 368 | ds1wm_data->active_high = plat->active_high; |
369 | 369 | ||
370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) | 370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) |
371 | set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); | 371 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); |
372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) | 372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) |
373 | set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); | 373 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); |
374 | 374 | ||
375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, | 375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, |
376 | "ds1wm", ds1wm_data); | 376 | "ds1wm", ds1wm_data); |
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 596ba604e78d..51b5551b4e3f 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c | |||
@@ -202,7 +202,6 @@ static struct miscdevice davinci_wdt_miscdev = { | |||
202 | static int __devinit davinci_wdt_probe(struct platform_device *pdev) | 202 | static int __devinit davinci_wdt_probe(struct platform_device *pdev) |
203 | { | 203 | { |
204 | int ret = 0, size; | 204 | int ret = 0, size; |
205 | struct resource *res; | ||
206 | struct device *dev = &pdev->dev; | 205 | struct device *dev = &pdev->dev; |
207 | 206 | ||
208 | wdt_clk = clk_get(dev, NULL); | 207 | wdt_clk = clk_get(dev, NULL); |
@@ -216,31 +215,31 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev) | |||
216 | 215 | ||
217 | dev_info(dev, "heartbeat %d sec\n", heartbeat); | 216 | dev_info(dev, "heartbeat %d sec\n", heartbeat); |
218 | 217 | ||
219 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 218 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
220 | if (res == NULL) { | 219 | if (wdt_mem == NULL) { |
221 | dev_err(dev, "failed to get memory region resource\n"); | 220 | dev_err(dev, "failed to get memory region resource\n"); |
222 | return -ENOENT; | 221 | return -ENOENT; |
223 | } | 222 | } |
224 | 223 | ||
225 | size = resource_size(res); | 224 | size = resource_size(wdt_mem); |
226 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 225 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
227 | |||
228 | if (wdt_mem == NULL) { | ||
229 | dev_err(dev, "failed to get memory region\n"); | 226 | dev_err(dev, "failed to get memory region\n"); |
230 | return -ENOENT; | 227 | return -ENOENT; |
231 | } | 228 | } |
232 | 229 | ||
233 | wdt_base = ioremap(res->start, size); | 230 | wdt_base = ioremap(wdt_mem->start, size); |
234 | if (!wdt_base) { | 231 | if (!wdt_base) { |
235 | dev_err(dev, "failed to map memory region\n"); | 232 | dev_err(dev, "failed to map memory region\n"); |
233 | release_mem_region(wdt_mem->start, size); | ||
234 | wdt_mem = NULL; | ||
236 | return -ENOMEM; | 235 | return -ENOMEM; |
237 | } | 236 | } |
238 | 237 | ||
239 | ret = misc_register(&davinci_wdt_miscdev); | 238 | ret = misc_register(&davinci_wdt_miscdev); |
240 | if (ret < 0) { | 239 | if (ret < 0) { |
241 | dev_err(dev, "cannot register misc device\n"); | 240 | dev_err(dev, "cannot register misc device\n"); |
242 | release_resource(wdt_mem); | 241 | release_mem_region(wdt_mem->start, size); |
243 | kfree(wdt_mem); | 242 | wdt_mem = NULL; |
244 | } else { | 243 | } else { |
245 | set_bit(WDT_DEVICE_INITED, &wdt_status); | 244 | set_bit(WDT_DEVICE_INITED, &wdt_status); |
246 | } | 245 | } |
@@ -253,8 +252,7 @@ static int __devexit davinci_wdt_remove(struct platform_device *pdev) | |||
253 | { | 252 | { |
254 | misc_deregister(&davinci_wdt_miscdev); | 253 | misc_deregister(&davinci_wdt_miscdev); |
255 | if (wdt_mem) { | 254 | if (wdt_mem) { |
256 | release_resource(wdt_mem); | 255 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
257 | kfree(wdt_mem); | ||
258 | wdt_mem = NULL; | 256 | wdt_mem = NULL; |
259 | } | 257 | } |
260 | 258 | ||
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 7a82ce5a6337..73ba2fd8e591 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
@@ -270,7 +270,6 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
270 | { | 270 | { |
271 | int ret = 0; | 271 | int ret = 0; |
272 | int size; | 272 | int size; |
273 | struct resource *res; | ||
274 | struct device *dev = &pdev->dev; | 273 | struct device *dev = &pdev->dev; |
275 | struct max63xx_timeout *table; | 274 | struct max63xx_timeout *table; |
276 | 275 | ||
@@ -294,21 +293,19 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
294 | 293 | ||
295 | max63xx_pdev = pdev; | 294 | max63xx_pdev = pdev; |
296 | 295 | ||
297 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 296 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
298 | if (res == NULL) { | 297 | if (wdt_mem == NULL) { |
299 | dev_err(dev, "failed to get memory region resource\n"); | 298 | dev_err(dev, "failed to get memory region resource\n"); |
300 | return -ENOENT; | 299 | return -ENOENT; |
301 | } | 300 | } |
302 | 301 | ||
303 | size = resource_size(res); | 302 | size = resource_size(wdt_mem); |
304 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 303 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
305 | |||
306 | if (wdt_mem == NULL) { | ||
307 | dev_err(dev, "failed to get memory region\n"); | 304 | dev_err(dev, "failed to get memory region\n"); |
308 | return -ENOENT; | 305 | return -ENOENT; |
309 | } | 306 | } |
310 | 307 | ||
311 | wdt_base = ioremap(res->start, size); | 308 | wdt_base = ioremap(wdt_mem->start, size); |
312 | if (!wdt_base) { | 309 | if (!wdt_base) { |
313 | dev_err(dev, "failed to map memory region\n"); | 310 | dev_err(dev, "failed to map memory region\n"); |
314 | ret = -ENOMEM; | 311 | ret = -ENOMEM; |
@@ -326,8 +323,8 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
326 | out_unmap: | 323 | out_unmap: |
327 | iounmap(wdt_base); | 324 | iounmap(wdt_base); |
328 | out_request: | 325 | out_request: |
329 | release_resource(wdt_mem); | 326 | release_mem_region(wdt_mem->start, size); |
330 | kfree(wdt_mem); | 327 | wdt_mem = NULL; |
331 | 328 | ||
332 | return ret; | 329 | return ret; |
333 | } | 330 | } |
@@ -336,8 +333,7 @@ static int __devexit max63xx_wdt_remove(struct platform_device *pdev) | |||
336 | { | 333 | { |
337 | misc_deregister(&max63xx_wdt_miscdev); | 334 | misc_deregister(&max63xx_wdt_miscdev); |
338 | if (wdt_mem) { | 335 | if (wdt_mem) { |
339 | release_resource(wdt_mem); | 336 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
340 | kfree(wdt_mem); | ||
341 | wdt_mem = NULL; | 337 | wdt_mem = NULL; |
342 | } | 338 | } |
343 | 339 | ||
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index 267377a5a83e..afa78a54711e 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c | |||
@@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(pci, tco_pci_tbl); | |||
302 | * Init & exit routines | 302 | * Init & exit routines |
303 | */ | 303 | */ |
304 | 304 | ||
305 | static unsigned char __init nv_tco_getdevice(void) | 305 | static unsigned char __devinit nv_tco_getdevice(void) |
306 | { | 306 | { |
307 | struct pci_dev *dev = NULL; | 307 | struct pci_dev *dev = NULL; |
308 | u32 val; | 308 | u32 val; |
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index c7cf4cbf8ab3..614933225560 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c | |||
@@ -254,7 +254,6 @@ static struct miscdevice pnx4008_wdt_miscdev = { | |||
254 | static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) | 254 | static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) |
255 | { | 255 | { |
256 | int ret = 0, size; | 256 | int ret = 0, size; |
257 | struct resource *res; | ||
258 | 257 | ||
259 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) | 258 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) |
260 | heartbeat = DEFAULT_HEARTBEAT; | 259 | heartbeat = DEFAULT_HEARTBEAT; |
@@ -262,42 +261,42 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) | |||
262 | printk(KERN_INFO MODULE_NAME | 261 | printk(KERN_INFO MODULE_NAME |
263 | "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); | 262 | "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); |
264 | 263 | ||
265 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 264 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
266 | if (res == NULL) { | 265 | if (wdt_mem == NULL) { |
267 | printk(KERN_INFO MODULE_NAME | 266 | printk(KERN_INFO MODULE_NAME |
268 | "failed to get memory region resouce\n"); | 267 | "failed to get memory region resouce\n"); |
269 | return -ENOENT; | 268 | return -ENOENT; |
270 | } | 269 | } |
271 | 270 | ||
272 | size = resource_size(res); | 271 | size = resource_size(wdt_mem); |
273 | wdt_mem = request_mem_region(res->start, size, pdev->name); | ||
274 | 272 | ||
275 | if (wdt_mem == NULL) { | 273 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
276 | printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); | 274 | printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); |
277 | return -ENOENT; | 275 | return -ENOENT; |
278 | } | 276 | } |
279 | wdt_base = (void __iomem *)IO_ADDRESS(res->start); | 277 | wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); |
280 | 278 | ||
281 | wdt_clk = clk_get(&pdev->dev, NULL); | 279 | wdt_clk = clk_get(&pdev->dev, NULL); |
282 | if (IS_ERR(wdt_clk)) { | 280 | if (IS_ERR(wdt_clk)) { |
283 | ret = PTR_ERR(wdt_clk); | 281 | ret = PTR_ERR(wdt_clk); |
284 | release_resource(wdt_mem); | 282 | release_mem_region(wdt_mem->start, size); |
285 | kfree(wdt_mem); | 283 | wdt_mem = NULL; |
286 | goto out; | 284 | goto out; |
287 | } | 285 | } |
288 | 286 | ||
289 | ret = clk_enable(wdt_clk); | 287 | ret = clk_enable(wdt_clk); |
290 | if (ret) { | 288 | if (ret) { |
291 | release_resource(wdt_mem); | 289 | release_mem_region(wdt_mem->start, size); |
292 | kfree(wdt_mem); | 290 | wdt_mem = NULL; |
291 | clk_put(wdt_clk); | ||
293 | goto out; | 292 | goto out; |
294 | } | 293 | } |
295 | 294 | ||
296 | ret = misc_register(&pnx4008_wdt_miscdev); | 295 | ret = misc_register(&pnx4008_wdt_miscdev); |
297 | if (ret < 0) { | 296 | if (ret < 0) { |
298 | printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); | 297 | printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); |
299 | release_resource(wdt_mem); | 298 | release_mem_region(wdt_mem->start, size); |
300 | kfree(wdt_mem); | 299 | wdt_mem = NULL; |
301 | clk_disable(wdt_clk); | 300 | clk_disable(wdt_clk); |
302 | clk_put(wdt_clk); | 301 | clk_put(wdt_clk); |
303 | } else { | 302 | } else { |
@@ -320,8 +319,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) | |||
320 | clk_put(wdt_clk); | 319 | clk_put(wdt_clk); |
321 | 320 | ||
322 | if (wdt_mem) { | 321 | if (wdt_mem) { |
323 | release_resource(wdt_mem); | 322 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
324 | kfree(wdt_mem); | ||
325 | wdt_mem = NULL; | 323 | wdt_mem = NULL; |
326 | } | 324 | } |
327 | return 0; | 325 | return 0; |
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 25b39bf35925..f7f5aa00df60 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c | |||
@@ -402,7 +402,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void) | |||
402 | 402 | ||
403 | static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | 403 | static int __devinit s3c2410wdt_probe(struct platform_device *pdev) |
404 | { | 404 | { |
405 | struct resource *res; | ||
406 | struct device *dev; | 405 | struct device *dev; |
407 | unsigned int wtcon; | 406 | unsigned int wtcon; |
408 | int started = 0; | 407 | int started = 0; |
@@ -416,20 +415,19 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
416 | 415 | ||
417 | /* get the memory region for the watchdog timer */ | 416 | /* get the memory region for the watchdog timer */ |
418 | 417 | ||
419 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 418 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
420 | if (res == NULL) { | 419 | if (wdt_mem == NULL) { |
421 | dev_err(dev, "no memory resource specified\n"); | 420 | dev_err(dev, "no memory resource specified\n"); |
422 | return -ENOENT; | 421 | return -ENOENT; |
423 | } | 422 | } |
424 | 423 | ||
425 | size = resource_size(res); | 424 | size = resource_size(wdt_mem); |
426 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 425 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
427 | if (wdt_mem == NULL) { | ||
428 | dev_err(dev, "failed to get memory region\n"); | 426 | dev_err(dev, "failed to get memory region\n"); |
429 | return -EBUSY; | 427 | return -EBUSY; |
430 | } | 428 | } |
431 | 429 | ||
432 | wdt_base = ioremap(res->start, size); | 430 | wdt_base = ioremap(wdt_mem->start, size); |
433 | if (wdt_base == NULL) { | 431 | if (wdt_base == NULL) { |
434 | dev_err(dev, "failed to ioremap() region\n"); | 432 | dev_err(dev, "failed to ioremap() region\n"); |
435 | ret = -EINVAL; | 433 | ret = -EINVAL; |
@@ -524,8 +522,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
524 | iounmap(wdt_base); | 522 | iounmap(wdt_base); |
525 | 523 | ||
526 | err_req: | 524 | err_req: |
527 | release_resource(wdt_mem); | 525 | release_mem_region(wdt_mem->start, size); |
528 | kfree(wdt_mem); | 526 | wdt_mem = NULL; |
529 | 527 | ||
530 | return ret; | 528 | return ret; |
531 | } | 529 | } |
@@ -545,8 +543,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev) | |||
545 | 543 | ||
546 | iounmap(wdt_base); | 544 | iounmap(wdt_base); |
547 | 545 | ||
548 | release_resource(wdt_mem); | 546 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
549 | kfree(wdt_mem); | ||
550 | wdt_mem = NULL; | 547 | wdt_mem = NULL; |
551 | return 0; | 548 | return 0; |
552 | } | 549 | } |
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 100b114e3c3c..bf16ffb4d21e 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/init.h> | 48 | #include <linux/init.h> |
49 | #include <linux/jiffies.h> | 49 | #include <linux/jiffies.h> |
50 | #include <linux/uaccess.h> | 50 | #include <linux/uaccess.h> |
51 | #include <linux/kernel.h> | ||
51 | 52 | ||
52 | #define PFX "SoftDog: " | 53 | #define PFX "SoftDog: " |
53 | 54 | ||
@@ -75,6 +76,11 @@ MODULE_PARM_DESC(soft_noboot, | |||
75 | "Softdog action, set to 1 to ignore reboots, 0 to reboot " | 76 | "Softdog action, set to 1 to ignore reboots, 0 to reboot " |
76 | "(default depends on ONLY_TESTING)"); | 77 | "(default depends on ONLY_TESTING)"); |
77 | 78 | ||
79 | static int soft_panic; | ||
80 | module_param(soft_panic, int, 0); | ||
81 | MODULE_PARM_DESC(soft_panic, | ||
82 | "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); | ||
83 | |||
78 | /* | 84 | /* |
79 | * Our timer | 85 | * Our timer |
80 | */ | 86 | */ |
@@ -98,7 +104,10 @@ static void watchdog_fire(unsigned long data) | |||
98 | 104 | ||
99 | if (soft_noboot) | 105 | if (soft_noboot) |
100 | printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); | 106 | printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); |
101 | else { | 107 | else if (soft_panic) { |
108 | printk(KERN_CRIT PFX "Initiating panic.\n"); | ||
109 | panic("Software Watchdog Timer expired."); | ||
110 | } else { | ||
102 | printk(KERN_CRIT PFX "Initiating system reboot.\n"); | 111 | printk(KERN_CRIT PFX "Initiating system reboot.\n"); |
103 | emergency_restart(); | 112 | emergency_restart(); |
104 | printk(KERN_CRIT PFX "Reboot didn't ?????\n"); | 113 | printk(KERN_CRIT PFX "Reboot didn't ?????\n"); |
@@ -267,7 +276,8 @@ static struct notifier_block softdog_notifier = { | |||
267 | }; | 276 | }; |
268 | 277 | ||
269 | static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " | 278 | static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " |
270 | "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; | 279 | "initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d " |
280 | "(nowayout= %d)\n"; | ||
271 | 281 | ||
272 | static int __init watchdog_init(void) | 282 | static int __init watchdog_init(void) |
273 | { | 283 | { |
@@ -298,7 +308,7 @@ static int __init watchdog_init(void) | |||
298 | return ret; | 308 | return ret; |
299 | } | 309 | } |
300 | 310 | ||
301 | printk(banner, soft_noboot, soft_margin, nowayout); | 311 | printk(banner, soft_noboot, soft_margin, soft_panic, nowayout); |
302 | 312 | ||
303 | return 0; | 313 | return 0; |
304 | } | 314 | } |
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 1bc493848ed4..87e0527669d8 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #define PFX TCO_MODULE_NAME ": " | 42 | #define PFX TCO_MODULE_NAME ": " |
43 | 43 | ||
44 | /* internal variables */ | 44 | /* internal variables */ |
45 | static u32 tcobase_phys; | ||
45 | static void __iomem *tcobase; | 46 | static void __iomem *tcobase; |
46 | static unsigned int pm_iobase; | 47 | static unsigned int pm_iobase; |
47 | static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ | 48 | static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ |
@@ -305,10 +306,18 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) | |||
305 | /* Low three bits of BASE0 are reserved. */ | 306 | /* Low three bits of BASE0 are reserved. */ |
306 | val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); | 307 | val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); |
307 | 308 | ||
309 | if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, | ||
310 | "SP5100 TCO")) { | ||
311 | printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", | ||
312 | val); | ||
313 | goto unreg_region; | ||
314 | } | ||
315 | tcobase_phys = val; | ||
316 | |||
308 | tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); | 317 | tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); |
309 | if (tcobase == 0) { | 318 | if (tcobase == 0) { |
310 | printk(KERN_ERR PFX "failed to get tcobase address\n"); | 319 | printk(KERN_ERR PFX "failed to get tcobase address\n"); |
311 | goto unreg_region; | 320 | goto unreg_mem_region; |
312 | } | 321 | } |
313 | 322 | ||
314 | /* Enable watchdog decode bit */ | 323 | /* Enable watchdog decode bit */ |
@@ -346,7 +355,8 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) | |||
346 | /* Done */ | 355 | /* Done */ |
347 | return 1; | 356 | return 1; |
348 | 357 | ||
349 | iounmap(tcobase); | 358 | unreg_mem_region: |
359 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
350 | unreg_region: | 360 | unreg_region: |
351 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 361 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
352 | exit: | 362 | exit: |
@@ -401,6 +411,7 @@ static int __devinit sp5100_tco_init(struct platform_device *dev) | |||
401 | 411 | ||
402 | exit: | 412 | exit: |
403 | iounmap(tcobase); | 413 | iounmap(tcobase); |
414 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
404 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 415 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
405 | return ret; | 416 | return ret; |
406 | } | 417 | } |
@@ -414,6 +425,7 @@ static void __devexit sp5100_tco_cleanup(void) | |||
414 | /* Deregister */ | 425 | /* Deregister */ |
415 | misc_deregister(&sp5100_tco_miscdev); | 426 | misc_deregister(&sp5100_tco_miscdev); |
416 | iounmap(tcobase); | 427 | iounmap(tcobase); |
428 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
417 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 429 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
418 | } | 430 | } |
419 | 431 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 02b5a9c05cfa..036343ba204e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -122,7 +122,7 @@ static struct irq_chip xen_pirq_chip; | |||
122 | /* Get info for IRQ */ | 122 | /* Get info for IRQ */ |
123 | static struct irq_info *info_for_irq(unsigned irq) | 123 | static struct irq_info *info_for_irq(unsigned irq) |
124 | { | 124 | { |
125 | return get_irq_data(irq); | 125 | return irq_get_handler_data(irq); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* Constructors for packed IRQ information. */ | 128 | /* Constructors for packed IRQ information. */ |
@@ -403,7 +403,7 @@ static void xen_irq_init(unsigned irq) | |||
403 | 403 | ||
404 | info->type = IRQT_UNBOUND; | 404 | info->type = IRQT_UNBOUND; |
405 | 405 | ||
406 | set_irq_data(irq, info); | 406 | irq_set_handler_data(irq, info); |
407 | 407 | ||
408 | list_add_tail(&info->list, &xen_irq_list_head); | 408 | list_add_tail(&info->list, &xen_irq_list_head); |
409 | } | 409 | } |
@@ -458,11 +458,11 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) | |||
458 | 458 | ||
459 | static void xen_free_irq(unsigned irq) | 459 | static void xen_free_irq(unsigned irq) |
460 | { | 460 | { |
461 | struct irq_info *info = get_irq_data(irq); | 461 | struct irq_info *info = irq_get_handler_data(irq); |
462 | 462 | ||
463 | list_del(&info->list); | 463 | list_del(&info->list); |
464 | 464 | ||
465 | set_irq_data(irq, NULL); | 465 | irq_set_handler_data(irq, NULL); |
466 | 466 | ||
467 | kfree(info); | 467 | kfree(info); |
468 | 468 | ||
@@ -585,7 +585,7 @@ static void ack_pirq(struct irq_data *data) | |||
585 | { | 585 | { |
586 | int evtchn = evtchn_from_irq(data->irq); | 586 | int evtchn = evtchn_from_irq(data->irq); |
587 | 587 | ||
588 | move_native_irq(data->irq); | 588 | irq_move_irq(data); |
589 | 589 | ||
590 | if (VALID_EVTCHN(evtchn)) { | 590 | if (VALID_EVTCHN(evtchn)) { |
591 | mask_evtchn(evtchn); | 591 | mask_evtchn(evtchn); |
@@ -639,8 +639,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
639 | if (irq < 0) | 639 | if (irq < 0) |
640 | goto out; | 640 | goto out; |
641 | 641 | ||
642 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 642 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, |
643 | handle_level_irq, name); | 643 | name); |
644 | 644 | ||
645 | irq_op.irq = irq; | 645 | irq_op.irq = irq; |
646 | irq_op.vector = 0; | 646 | irq_op.vector = 0; |
@@ -690,8 +690,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
690 | if (irq == -1) | 690 | if (irq == -1) |
691 | goto out; | 691 | goto out; |
692 | 692 | ||
693 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 693 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, |
694 | handle_level_irq, name); | 694 | name); |
695 | 695 | ||
696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); | 696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); |
697 | ret = irq_set_msi_desc(irq, msidesc); | 697 | ret = irq_set_msi_desc(irq, msidesc); |
@@ -772,7 +772,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
772 | if (irq == -1) | 772 | if (irq == -1) |
773 | goto out; | 773 | goto out; |
774 | 774 | ||
775 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 775 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
776 | handle_fasteoi_irq, "event"); | 776 | handle_fasteoi_irq, "event"); |
777 | 777 | ||
778 | xen_irq_info_evtchn_init(irq, evtchn); | 778 | xen_irq_info_evtchn_init(irq, evtchn); |
@@ -799,7 +799,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
799 | if (irq < 0) | 799 | if (irq < 0) |
800 | goto out; | 800 | goto out; |
801 | 801 | ||
802 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 802 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
803 | handle_percpu_irq, "ipi"); | 803 | handle_percpu_irq, "ipi"); |
804 | 804 | ||
805 | bind_ipi.vcpu = cpu; | 805 | bind_ipi.vcpu = cpu; |
@@ -848,7 +848,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
848 | if (irq == -1) | 848 | if (irq == -1) |
849 | goto out; | 849 | goto out; |
850 | 850 | ||
851 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 851 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
852 | handle_percpu_irq, "virq"); | 852 | handle_percpu_irq, "virq"); |
853 | 853 | ||
854 | bind_virq.virq = virq; | 854 | bind_virq.virq = virq; |
@@ -1339,7 +1339,7 @@ static void ack_dynirq(struct irq_data *data) | |||
1339 | { | 1339 | { |
1340 | int evtchn = evtchn_from_irq(data->irq); | 1340 | int evtchn = evtchn_from_irq(data->irq); |
1341 | 1341 | ||
1342 | move_masked_irq(data->irq); | 1342 | irq_move_masked_irq(data); |
1343 | 1343 | ||
1344 | if (VALID_EVTCHN(evtchn)) | 1344 | if (VALID_EVTCHN(evtchn)) |
1345 | unmask_evtchn(evtchn); | 1345 | unmask_evtchn(evtchn); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 017ce600fbc6..b0f9e8fb0052 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -273,7 +273,7 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
273 | map->vma->vm_start + map->notify.addr; | 273 | map->vma->vm_start + map->notify.addr; |
274 | err = copy_to_user(tmp, &err, 1); | 274 | err = copy_to_user(tmp, &err, 1); |
275 | if (err) | 275 | if (err) |
276 | return err; | 276 | return -EFAULT; |
277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | 277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
278 | } else if (pgno >= offset && pgno < offset + pages) { | 278 | } else if (pgno >= offset && pgno < offset + pages) { |
279 | uint8_t *tmp = kmap(map->pages[pgno]); | 279 | uint8_t *tmp = kmap(map->pages[pgno]); |
@@ -662,7 +662,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
662 | if (map->flags) { | 662 | if (map->flags) { |
663 | if ((vma->vm_flags & VM_WRITE) && | 663 | if ((vma->vm_flags & VM_WRITE) && |
664 | (map->flags & GNTMAP_readonly)) | 664 | (map->flags & GNTMAP_readonly)) |
665 | return -EINVAL; | 665 | goto out_unlock_put; |
666 | } else { | 666 | } else { |
667 | map->flags = GNTMAP_host_map; | 667 | map->flags = GNTMAP_host_map; |
668 | if (!(vma->vm_flags & VM_WRITE)) | 668 | if (!(vma->vm_flags & VM_WRITE)) |
@@ -700,6 +700,8 @@ unlock_out: | |||
700 | spin_unlock(&priv->lock); | 700 | spin_unlock(&priv->lock); |
701 | return err; | 701 | return err; |
702 | 702 | ||
703 | out_unlock_put: | ||
704 | spin_unlock(&priv->lock); | ||
703 | out_put_map: | 705 | out_put_map: |
704 | if (use_ptemod) | 706 | if (use_ptemod) |
705 | map->vma = NULL; | 707 | map->vma = NULL; |