diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 11:06:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 11:06:56 -0400 |
commit | 4046136afbd1038d776bad9c59e1e4cca78186fb (patch) | |
tree | 1888ca7bd978c0bba891ac9ee51224fd06d1162e /drivers | |
parent | b55a0ff8df92646696c858a8fea4dbf38509f202 (diff) | |
parent | a100d88df1e924e5c9678fabf054d1bae7ab74fb (diff) |
Merge tag 'char-misc-3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc into next
Pull char/misc driver patches from Greg KH:
"Here is the big char / misc driver update for 3.16-rc1.
Lots of different driver updates for a variety of different drivers
and minor driver subsystems.
All have been in linux-next with no reported issues"
* tag 'char-misc-3.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (79 commits)
hv: use correct order when freeing monitor_pages
spmi: of: fixup generic SPMI devicetree binding example
applicom: dereferencing NULL on error path
misc: genwqe: fix uninitialized return value in genwqe_free_sync_sgl()
miscdevice.h: Simple syntax fix to make pointers consistent.
MAINTAINERS: Add miscdevice.h to file list for char/misc drivers.
mcb: Add support for shared PCI IRQs
drivers: Remove duplicate conditionally included subdirs
misc: atmel_pwm: only build for supported platforms
mei: me: move probe quirk to cfg structure
mei: add per device configuration
mei: me: read H_CSR after asserting reset
mei: me: drop harmful wait optimization
mei: me: fix hw ready reset flow
mei: fix memory leak of mei_clients array
uio: fix vma io range check in mmap
drivers: uio_dmem_genirq: Fix memory leak in uio_dmem_genirq_probe()
w1: do not unlock unheld list_mutex in __w1_remove_master_device()
w1: optional bundling of netlink kernel replies
connector: allow multiple messages to be sent in one packet
...
Diffstat (limited to 'drivers')
54 files changed, 2653 insertions, 718 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 1a1790e4de6a..f98b50d8251d 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -83,7 +83,6 @@ obj-$(CONFIG_PCCARD) += pcmcia/ | |||
83 | obj-$(CONFIG_DIO) += dio/ | 83 | obj-$(CONFIG_DIO) += dio/ |
84 | obj-$(CONFIG_SBUS) += sbus/ | 84 | obj-$(CONFIG_SBUS) += sbus/ |
85 | obj-$(CONFIG_ZORRO) += zorro/ | 85 | obj-$(CONFIG_ZORRO) += zorro/ |
86 | obj-$(CONFIG_MAC) += macintosh/ | ||
87 | obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ | 86 | obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ |
88 | obj-$(CONFIG_PARIDE) += block/paride/ | 87 | obj-$(CONFIG_PARIDE) += block/paride/ |
89 | obj-$(CONFIG_TC) += tc/ | 88 | obj-$(CONFIG_TC) += tc/ |
@@ -141,7 +140,6 @@ obj-y += clk/ | |||
141 | 140 | ||
142 | obj-$(CONFIG_MAILBOX) += mailbox/ | 141 | obj-$(CONFIG_MAILBOX) += mailbox/ |
143 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ | 142 | obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ |
144 | obj-$(CONFIG_NFC) += nfc/ | ||
145 | obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ | 143 | obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ |
146 | obj-$(CONFIG_REMOTEPROC) += remoteproc/ | 144 | obj-$(CONFIG_REMOTEPROC) += remoteproc/ |
147 | obj-$(CONFIG_RPMSG) += rpmsg/ | 145 | obj-$(CONFIG_RPMSG) += rpmsg/ |
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 974321a2508d..14790304b84b 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c | |||
@@ -345,7 +345,6 @@ out: | |||
345 | free_irq(apbs[i].irq, &dummy); | 345 | free_irq(apbs[i].irq, &dummy); |
346 | iounmap(apbs[i].RamIO); | 346 | iounmap(apbs[i].RamIO); |
347 | } | 347 | } |
348 | pci_disable_device(dev); | ||
349 | return ret; | 348 | return ret; |
350 | } | 349 | } |
351 | 350 | ||
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index b14f1d36f897..f612d68629dc 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -43,6 +43,8 @@ static struct cn_dev cdev; | |||
43 | static int cn_already_initialized; | 43 | static int cn_already_initialized; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * Sends mult (multiple) cn_msg at a time. | ||
47 | * | ||
46 | * msg->seq and msg->ack are used to determine message genealogy. | 48 | * msg->seq and msg->ack are used to determine message genealogy. |
47 | * When someone sends message it puts there locally unique sequence | 49 | * When someone sends message it puts there locally unique sequence |
48 | * and random acknowledge numbers. Sequence number may be copied into | 50 | * and random acknowledge numbers. Sequence number may be copied into |
@@ -62,10 +64,13 @@ static int cn_already_initialized; | |||
62 | * the acknowledgement number in the original message + 1, then it is | 64 | * the acknowledgement number in the original message + 1, then it is |
63 | * a new message. | 65 | * a new message. |
64 | * | 66 | * |
67 | * If msg->len != len, then additional cn_msg messages are expected following | ||
68 | * the first msg. | ||
69 | * | ||
65 | * The message is sent to, the portid if given, the group if given, both if | 70 | * The message is sent to, the portid if given, the group if given, both if |
66 | * both, or if both are zero then the group is looked up and sent there. | 71 | * both, or if both are zero then the group is looked up and sent there. |
67 | */ | 72 | */ |
68 | int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, | 73 | int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, |
69 | gfp_t gfp_mask) | 74 | gfp_t gfp_mask) |
70 | { | 75 | { |
71 | struct cn_callback_entry *__cbq; | 76 | struct cn_callback_entry *__cbq; |
@@ -98,7 +103,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, | |||
98 | if (!portid && !netlink_has_listeners(dev->nls, group)) | 103 | if (!portid && !netlink_has_listeners(dev->nls, group)) |
99 | return -ESRCH; | 104 | return -ESRCH; |
100 | 105 | ||
101 | size = sizeof(*msg) + msg->len; | 106 | size = sizeof(*msg) + len; |
102 | 107 | ||
103 | skb = nlmsg_new(size, gfp_mask); | 108 | skb = nlmsg_new(size, gfp_mask); |
104 | if (!skb) | 109 | if (!skb) |
@@ -121,6 +126,14 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, | |||
121 | gfp_mask); | 126 | gfp_mask); |
122 | return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); | 127 | return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); |
123 | } | 128 | } |
129 | EXPORT_SYMBOL_GPL(cn_netlink_send_mult); | ||
130 | |||
131 | /* same as cn_netlink_send_mult except msg->len is used for len */ | ||
132 | int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, | ||
133 | gfp_t gfp_mask) | ||
134 | { | ||
135 | return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); | ||
136 | } | ||
124 | EXPORT_SYMBOL_GPL(cn_netlink_send); | 137 | EXPORT_SYMBOL_GPL(cn_netlink_send); |
125 | 138 | ||
126 | /* | 139 | /* |
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index be56e8ac95e6..aebde489c291 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig | |||
@@ -28,13 +28,13 @@ config EXTCON_ADC_JACK | |||
28 | Say Y here to enable extcon device driver based on ADC values. | 28 | Say Y here to enable extcon device driver based on ADC values. |
29 | 29 | ||
30 | config EXTCON_MAX14577 | 30 | config EXTCON_MAX14577 |
31 | tristate "MAX14577 EXTCON Support" | 31 | tristate "MAX14577/77836 EXTCON Support" |
32 | depends on MFD_MAX14577 | 32 | depends on MFD_MAX14577 |
33 | select IRQ_DOMAIN | 33 | select IRQ_DOMAIN |
34 | select REGMAP_I2C | 34 | select REGMAP_I2C |
35 | help | 35 | help |
36 | If you say yes here you get support for the MUIC device of | 36 | If you say yes here you get support for the MUIC device of |
37 | Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory | 37 | Maxim MAX14577/77836. The MAX14577/77836 MUIC is a USB port accessory |
38 | detector and switch. | 38 | detector and switch. |
39 | 39 | ||
40 | config EXTCON_MAX77693 | 40 | config EXTCON_MAX77693 |
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c index e23f1c2e5053..e18f95be3733 100644 --- a/drivers/extcon/extcon-adc-jack.c +++ b/drivers/extcon/extcon-adc-jack.c | |||
@@ -39,7 +39,7 @@ | |||
39 | * @chan: iio channel being queried. | 39 | * @chan: iio channel being queried. |
40 | */ | 40 | */ |
41 | struct adc_jack_data { | 41 | struct adc_jack_data { |
42 | struct extcon_dev edev; | 42 | struct extcon_dev *edev; |
43 | 43 | ||
44 | const char **cable_names; | 44 | const char **cable_names; |
45 | int num_cables; | 45 | int num_cables; |
@@ -64,7 +64,7 @@ static void adc_jack_handler(struct work_struct *work) | |||
64 | 64 | ||
65 | ret = iio_read_channel_raw(data->chan, &adc_val); | 65 | ret = iio_read_channel_raw(data->chan, &adc_val); |
66 | if (ret < 0) { | 66 | if (ret < 0) { |
67 | dev_err(&data->edev.dev, "read channel() error: %d\n", ret); | 67 | dev_err(&data->edev->dev, "read channel() error: %d\n", ret); |
68 | return; | 68 | return; |
69 | } | 69 | } |
70 | 70 | ||
@@ -80,7 +80,7 @@ static void adc_jack_handler(struct work_struct *work) | |||
80 | } | 80 | } |
81 | /* if no def has met, it means state = 0 (no cables attached) */ | 81 | /* if no def has met, it means state = 0 (no cables attached) */ |
82 | 82 | ||
83 | extcon_set_state(&data->edev, state); | 83 | extcon_set_state(data->edev, state); |
84 | } | 84 | } |
85 | 85 | ||
86 | static irqreturn_t adc_jack_irq_thread(int irq, void *_data) | 86 | static irqreturn_t adc_jack_irq_thread(int irq, void *_data) |
@@ -102,33 +102,33 @@ static int adc_jack_probe(struct platform_device *pdev) | |||
102 | if (!data) | 102 | if (!data) |
103 | return -ENOMEM; | 103 | return -ENOMEM; |
104 | 104 | ||
105 | data->edev.name = pdata->name; | ||
106 | |||
107 | if (!pdata->cable_names) { | 105 | if (!pdata->cable_names) { |
108 | err = -EINVAL; | ||
109 | dev_err(&pdev->dev, "error: cable_names not defined.\n"); | 106 | dev_err(&pdev->dev, "error: cable_names not defined.\n"); |
110 | goto out; | 107 | return -EINVAL; |
111 | } | 108 | } |
112 | 109 | ||
113 | data->edev.dev.parent = &pdev->dev; | 110 | data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names); |
114 | data->edev.supported_cable = pdata->cable_names; | 111 | if (IS_ERR(data->edev)) { |
112 | dev_err(&pdev->dev, "failed to allocate extcon device\n"); | ||
113 | return -ENOMEM; | ||
114 | } | ||
115 | data->edev->dev.parent = &pdev->dev; | ||
116 | data->edev->name = pdata->name; | ||
115 | 117 | ||
116 | /* Check the length of array and set num_cables */ | 118 | /* Check the length of array and set num_cables */ |
117 | for (i = 0; data->edev.supported_cable[i]; i++) | 119 | for (i = 0; data->edev->supported_cable[i]; i++) |
118 | ; | 120 | ; |
119 | if (i == 0 || i > SUPPORTED_CABLE_MAX) { | 121 | if (i == 0 || i > SUPPORTED_CABLE_MAX) { |
120 | err = -EINVAL; | ||
121 | dev_err(&pdev->dev, "error: pdata->cable_names size = %d\n", | 122 | dev_err(&pdev->dev, "error: pdata->cable_names size = %d\n", |
122 | i - 1); | 123 | i - 1); |
123 | goto out; | 124 | return -EINVAL; |
124 | } | 125 | } |
125 | data->num_cables = i; | 126 | data->num_cables = i; |
126 | 127 | ||
127 | if (!pdata->adc_conditions || | 128 | if (!pdata->adc_conditions || |
128 | !pdata->adc_conditions[0].state) { | 129 | !pdata->adc_conditions[0].state) { |
129 | err = -EINVAL; | ||
130 | dev_err(&pdev->dev, "error: adc_conditions not defined.\n"); | 130 | dev_err(&pdev->dev, "error: adc_conditions not defined.\n"); |
131 | goto out; | 131 | return -EINVAL; |
132 | } | 132 | } |
133 | data->adc_conditions = pdata->adc_conditions; | 133 | data->adc_conditions = pdata->adc_conditions; |
134 | 134 | ||
@@ -138,10 +138,8 @@ static int adc_jack_probe(struct platform_device *pdev) | |||
138 | data->num_conditions = i; | 138 | data->num_conditions = i; |
139 | 139 | ||
140 | data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); | 140 | data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); |
141 | if (IS_ERR(data->chan)) { | 141 | if (IS_ERR(data->chan)) |
142 | err = PTR_ERR(data->chan); | 142 | return PTR_ERR(data->chan); |
143 | goto out; | ||
144 | } | ||
145 | 143 | ||
146 | data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms); | 144 | data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms); |
147 | 145 | ||
@@ -149,15 +147,14 @@ static int adc_jack_probe(struct platform_device *pdev) | |||
149 | 147 | ||
150 | platform_set_drvdata(pdev, data); | 148 | platform_set_drvdata(pdev, data); |
151 | 149 | ||
152 | err = extcon_dev_register(&data->edev); | 150 | err = devm_extcon_dev_register(&pdev->dev, data->edev); |
153 | if (err) | 151 | if (err) |
154 | goto out; | 152 | return err; |
155 | 153 | ||
156 | data->irq = platform_get_irq(pdev, 0); | 154 | data->irq = platform_get_irq(pdev, 0); |
157 | if (!data->irq) { | 155 | if (!data->irq) { |
158 | dev_err(&pdev->dev, "platform_get_irq failed\n"); | 156 | dev_err(&pdev->dev, "platform_get_irq failed\n"); |
159 | err = -ENODEV; | 157 | return -ENODEV; |
160 | goto err_irq; | ||
161 | } | 158 | } |
162 | 159 | ||
163 | err = request_any_context_irq(data->irq, adc_jack_irq_thread, | 160 | err = request_any_context_irq(data->irq, adc_jack_irq_thread, |
@@ -165,15 +162,10 @@ static int adc_jack_probe(struct platform_device *pdev) | |||
165 | 162 | ||
166 | if (err < 0) { | 163 | if (err < 0) { |
167 | dev_err(&pdev->dev, "error: irq %d\n", data->irq); | 164 | dev_err(&pdev->dev, "error: irq %d\n", data->irq); |
168 | goto err_irq; | 165 | return err; |
169 | } | 166 | } |
170 | 167 | ||
171 | return 0; | 168 | return 0; |
172 | |||
173 | err_irq: | ||
174 | extcon_dev_unregister(&data->edev); | ||
175 | out: | ||
176 | return err; | ||
177 | } | 169 | } |
178 | 170 | ||
179 | static int adc_jack_remove(struct platform_device *pdev) | 171 | static int adc_jack_remove(struct platform_device *pdev) |
@@ -182,7 +174,6 @@ static int adc_jack_remove(struct platform_device *pdev) | |||
182 | 174 | ||
183 | free_irq(data->irq, data); | 175 | free_irq(data->irq, data); |
184 | cancel_work_sync(&data->handler.work); | 176 | cancel_work_sync(&data->handler.work); |
185 | extcon_dev_unregister(&data->edev); | ||
186 | 177 | ||
187 | return 0; | 178 | return 0; |
188 | } | 179 | } |
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index 98a14f6143a7..6c84e3d12043 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c | |||
@@ -91,7 +91,7 @@ struct arizona_extcon_info { | |||
91 | 91 | ||
92 | int hpdet_ip; | 92 | int hpdet_ip; |
93 | 93 | ||
94 | struct extcon_dev edev; | 94 | struct extcon_dev *edev; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | static const struct arizona_micd_config micd_default_modes[] = { | 97 | static const struct arizona_micd_config micd_default_modes[] = { |
@@ -546,7 +546,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | /* If the cable was removed while measuring ignore the result */ | 548 | /* If the cable was removed while measuring ignore the result */ |
549 | ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL); | 549 | ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL); |
550 | if (ret < 0) { | 550 | if (ret < 0) { |
551 | dev_err(arizona->dev, "Failed to check cable state: %d\n", | 551 | dev_err(arizona->dev, "Failed to check cable state: %d\n", |
552 | ret); | 552 | ret); |
@@ -581,7 +581,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) | |||
581 | else | 581 | else |
582 | report = ARIZONA_CABLE_HEADPHONE; | 582 | report = ARIZONA_CABLE_HEADPHONE; |
583 | 583 | ||
584 | ret = extcon_set_cable_state_(&info->edev, report, true); | 584 | ret = extcon_set_cable_state_(info->edev, report, true); |
585 | if (ret != 0) | 585 | if (ret != 0) |
586 | dev_err(arizona->dev, "Failed to report HP/line: %d\n", | 586 | dev_err(arizona->dev, "Failed to report HP/line: %d\n", |
587 | ret); | 587 | ret); |
@@ -664,7 +664,7 @@ err: | |||
664 | ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); | 664 | ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); |
665 | 665 | ||
666 | /* Just report headphone */ | 666 | /* Just report headphone */ |
667 | ret = extcon_update_state(&info->edev, | 667 | ret = extcon_update_state(info->edev, |
668 | 1 << ARIZONA_CABLE_HEADPHONE, | 668 | 1 << ARIZONA_CABLE_HEADPHONE, |
669 | 1 << ARIZONA_CABLE_HEADPHONE); | 669 | 1 << ARIZONA_CABLE_HEADPHONE); |
670 | if (ret != 0) | 670 | if (ret != 0) |
@@ -723,7 +723,7 @@ err: | |||
723 | ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); | 723 | ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); |
724 | 724 | ||
725 | /* Just report headphone */ | 725 | /* Just report headphone */ |
726 | ret = extcon_update_state(&info->edev, | 726 | ret = extcon_update_state(info->edev, |
727 | 1 << ARIZONA_CABLE_HEADPHONE, | 727 | 1 << ARIZONA_CABLE_HEADPHONE, |
728 | 1 << ARIZONA_CABLE_HEADPHONE); | 728 | 1 << ARIZONA_CABLE_HEADPHONE); |
729 | if (ret != 0) | 729 | if (ret != 0) |
@@ -764,7 +764,7 @@ static void arizona_micd_detect(struct work_struct *work) | |||
764 | mutex_lock(&info->lock); | 764 | mutex_lock(&info->lock); |
765 | 765 | ||
766 | /* If the cable was removed while measuring ignore the result */ | 766 | /* If the cable was removed while measuring ignore the result */ |
767 | ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL); | 767 | ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL); |
768 | if (ret < 0) { | 768 | if (ret < 0) { |
769 | dev_err(arizona->dev, "Failed to check cable state: %d\n", | 769 | dev_err(arizona->dev, "Failed to check cable state: %d\n", |
770 | ret); | 770 | ret); |
@@ -812,7 +812,7 @@ static void arizona_micd_detect(struct work_struct *work) | |||
812 | if (info->detecting && (val & ARIZONA_MICD_LVL_8)) { | 812 | if (info->detecting && (val & ARIZONA_MICD_LVL_8)) { |
813 | arizona_identify_headphone(info); | 813 | arizona_identify_headphone(info); |
814 | 814 | ||
815 | ret = extcon_update_state(&info->edev, | 815 | ret = extcon_update_state(info->edev, |
816 | 1 << ARIZONA_CABLE_MICROPHONE, | 816 | 1 << ARIZONA_CABLE_MICROPHONE, |
817 | 1 << ARIZONA_CABLE_MICROPHONE); | 817 | 1 << ARIZONA_CABLE_MICROPHONE); |
818 | 818 | ||
@@ -999,7 +999,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data) | |||
999 | 999 | ||
1000 | if (info->last_jackdet == present) { | 1000 | if (info->last_jackdet == present) { |
1001 | dev_dbg(arizona->dev, "Detected jack\n"); | 1001 | dev_dbg(arizona->dev, "Detected jack\n"); |
1002 | ret = extcon_set_cable_state_(&info->edev, | 1002 | ret = extcon_set_cable_state_(info->edev, |
1003 | ARIZONA_CABLE_MECHANICAL, true); | 1003 | ARIZONA_CABLE_MECHANICAL, true); |
1004 | 1004 | ||
1005 | if (ret != 0) | 1005 | if (ret != 0) |
@@ -1038,7 +1038,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data) | |||
1038 | info->micd_ranges[i].key, 0); | 1038 | info->micd_ranges[i].key, 0); |
1039 | input_sync(info->input); | 1039 | input_sync(info->input); |
1040 | 1040 | ||
1041 | ret = extcon_update_state(&info->edev, 0xffffffff, 0); | 1041 | ret = extcon_update_state(info->edev, 0xffffffff, 0); |
1042 | if (ret != 0) | 1042 | if (ret != 0) |
1043 | dev_err(arizona->dev, "Removal report failed: %d\n", | 1043 | dev_err(arizona->dev, "Removal report failed: %d\n", |
1044 | ret); | 1044 | ret); |
@@ -1105,15 +1105,14 @@ static int arizona_extcon_probe(struct platform_device *pdev) | |||
1105 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); | 1105 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); |
1106 | if (!info) { | 1106 | if (!info) { |
1107 | dev_err(&pdev->dev, "Failed to allocate memory\n"); | 1107 | dev_err(&pdev->dev, "Failed to allocate memory\n"); |
1108 | ret = -ENOMEM; | 1108 | return -ENOMEM; |
1109 | goto err; | ||
1110 | } | 1109 | } |
1111 | 1110 | ||
1112 | info->micvdd = devm_regulator_get(arizona->dev, "MICVDD"); | 1111 | info->micvdd = devm_regulator_get(arizona->dev, "MICVDD"); |
1113 | if (IS_ERR(info->micvdd)) { | 1112 | if (IS_ERR(info->micvdd)) { |
1114 | ret = PTR_ERR(info->micvdd); | 1113 | ret = PTR_ERR(info->micvdd); |
1115 | dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret); | 1114 | dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret); |
1116 | goto err; | 1115 | return ret; |
1117 | } | 1116 | } |
1118 | 1117 | ||
1119 | mutex_init(&info->lock); | 1118 | mutex_init(&info->lock); |
@@ -1151,15 +1150,19 @@ static int arizona_extcon_probe(struct platform_device *pdev) | |||
1151 | break; | 1150 | break; |
1152 | } | 1151 | } |
1153 | 1152 | ||
1154 | info->edev.name = "Headset Jack"; | 1153 | info->edev = devm_extcon_dev_allocate(&pdev->dev, arizona_cable); |
1155 | info->edev.dev.parent = arizona->dev; | 1154 | if (IS_ERR(info->edev)) { |
1156 | info->edev.supported_cable = arizona_cable; | 1155 | dev_err(&pdev->dev, "failed to allocate extcon device\n"); |
1156 | return -ENOMEM; | ||
1157 | } | ||
1158 | info->edev->name = "Headset Jack"; | ||
1159 | info->edev->dev.parent = arizona->dev; | ||
1157 | 1160 | ||
1158 | ret = extcon_dev_register(&info->edev); | 1161 | ret = devm_extcon_dev_register(&pdev->dev, info->edev); |
1159 | if (ret < 0) { | 1162 | if (ret < 0) { |
1160 | dev_err(arizona->dev, "extcon_dev_register() failed: %d\n", | 1163 | dev_err(arizona->dev, "extcon_dev_register() failed: %d\n", |
1161 | ret); | 1164 | ret); |
1162 | goto err; | 1165 | return ret; |
1163 | } | 1166 | } |
1164 | 1167 | ||
1165 | info->input = devm_input_allocate_device(&pdev->dev); | 1168 | info->input = devm_input_allocate_device(&pdev->dev); |
@@ -1410,8 +1413,6 @@ err_rise: | |||
1410 | err_input: | 1413 | err_input: |
1411 | err_register: | 1414 | err_register: |
1412 | pm_runtime_disable(&pdev->dev); | 1415 | pm_runtime_disable(&pdev->dev); |
1413 | extcon_dev_unregister(&info->edev); | ||
1414 | err: | ||
1415 | return ret; | 1416 | return ret; |
1416 | } | 1417 | } |
1417 | 1418 | ||
@@ -1445,7 +1446,6 @@ static int arizona_extcon_remove(struct platform_device *pdev) | |||
1445 | regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE, | 1446 | regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE, |
1446 | ARIZONA_JD1_ENA, 0); | 1447 | ARIZONA_JD1_ENA, 0); |
1447 | arizona_clk32k_disable(arizona); | 1448 | arizona_clk32k_disable(arizona); |
1448 | extcon_dev_unregister(&info->edev); | ||
1449 | 1449 | ||
1450 | return 0; | 1450 | return 0; |
1451 | } | 1451 | } |
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index 7ab21aa6eaa1..18d42c0e4581 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c | |||
@@ -565,6 +565,100 @@ static void dummy_sysfs_dev_release(struct device *dev) | |||
565 | { | 565 | { |
566 | } | 566 | } |
567 | 567 | ||
568 | /* | ||
569 | * extcon_dev_allocate() - Allocate the memory of extcon device. | ||
570 | * @supported_cable: Array of supported cable names ending with NULL. | ||
571 | * If supported_cable is NULL, cable name related APIs | ||
572 | * are disabled. | ||
573 | * | ||
574 | * This function allocates the memory for extcon device without allocating | ||
575 | * memory in each extcon provider driver and initialize default setting for | ||
576 | * extcon device. | ||
577 | * | ||
578 | * Return the pointer of extcon device if success or ERR_PTR(err) if fail | ||
579 | */ | ||
580 | struct extcon_dev *extcon_dev_allocate(const char **supported_cable) | ||
581 | { | ||
582 | struct extcon_dev *edev; | ||
583 | |||
584 | edev = kzalloc(sizeof(*edev), GFP_KERNEL); | ||
585 | if (!edev) | ||
586 | return ERR_PTR(-ENOMEM); | ||
587 | |||
588 | edev->max_supported = 0; | ||
589 | edev->supported_cable = supported_cable; | ||
590 | |||
591 | return edev; | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * extcon_dev_free() - Free the memory of extcon device. | ||
596 | * @edev: the extcon device to free | ||
597 | */ | ||
598 | void extcon_dev_free(struct extcon_dev *edev) | ||
599 | { | ||
600 | kfree(edev); | ||
601 | } | ||
602 | EXPORT_SYMBOL_GPL(extcon_dev_free); | ||
603 | |||
604 | static int devm_extcon_dev_match(struct device *dev, void *res, void *data) | ||
605 | { | ||
606 | struct extcon_dev **r = res; | ||
607 | |||
608 | if (WARN_ON(!r || !*r)) | ||
609 | return 0; | ||
610 | |||
611 | return *r == data; | ||
612 | } | ||
613 | |||
614 | static void devm_extcon_dev_release(struct device *dev, void *res) | ||
615 | { | ||
616 | extcon_dev_free(*(struct extcon_dev **)res); | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * devm_extcon_dev_allocate - Allocate managed extcon device | ||
621 | * @dev: device owning the extcon device being created | ||
622 | * @supported_cable: Array of supported cable names ending with NULL. | ||
623 | * If supported_cable is NULL, cable name related APIs | ||
624 | * are disabled. | ||
625 | * | ||
626 | * This function manages automatically the memory of extcon device using device | ||
627 | * resource management and simplify the control of freeing the memory of extcon | ||
628 | * device. | ||
629 | * | ||
630 | * Returns the pointer memory of allocated extcon_dev if success | ||
631 | * or ERR_PTR(err) if fail | ||
632 | */ | ||
633 | struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, | ||
634 | const char **supported_cable) | ||
635 | { | ||
636 | struct extcon_dev **ptr, *edev; | ||
637 | |||
638 | ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL); | ||
639 | if (!ptr) | ||
640 | return ERR_PTR(-ENOMEM); | ||
641 | |||
642 | edev = extcon_dev_allocate(supported_cable); | ||
643 | if (IS_ERR(edev)) { | ||
644 | devres_free(ptr); | ||
645 | return edev; | ||
646 | } | ||
647 | |||
648 | *ptr = edev; | ||
649 | devres_add(dev, ptr); | ||
650 | |||
651 | return edev; | ||
652 | } | ||
653 | EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate); | ||
654 | |||
655 | void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev) | ||
656 | { | ||
657 | WARN_ON(devres_release(dev, devm_extcon_dev_release, | ||
658 | devm_extcon_dev_match, edev)); | ||
659 | } | ||
660 | EXPORT_SYMBOL_GPL(devm_extcon_dev_free); | ||
661 | |||
568 | /** | 662 | /** |
569 | * extcon_dev_register() - Register a new extcon device | 663 | * extcon_dev_register() - Register a new extcon device |
570 | * @edev : the new extcon device (should be allocated before calling) | 664 | * @edev : the new extcon device (should be allocated before calling) |
@@ -819,6 +913,63 @@ void extcon_dev_unregister(struct extcon_dev *edev) | |||
819 | } | 913 | } |
820 | EXPORT_SYMBOL_GPL(extcon_dev_unregister); | 914 | EXPORT_SYMBOL_GPL(extcon_dev_unregister); |
821 | 915 | ||
916 | static void devm_extcon_dev_unreg(struct device *dev, void *res) | ||
917 | { | ||
918 | extcon_dev_unregister(*(struct extcon_dev **)res); | ||
919 | } | ||
920 | |||
921 | /** | ||
922 | * devm_extcon_dev_register() - Resource-managed extcon_dev_register() | ||
923 | * @dev: device to allocate extcon device | ||
924 | * @edev: the new extcon device to register | ||
925 | * | ||
926 | * Managed extcon_dev_register() function. If extcon device is attached with | ||
927 | * this function, that extcon device is automatically unregistered on driver | ||
928 | * detach. Internally this function calls extcon_dev_register() function. | ||
929 | * To get more information, refer that function. | ||
930 | * | ||
931 | * If extcon device is registered with this function and the device needs to be | ||
932 | * unregistered separately, devm_extcon_dev_unregister() should be used. | ||
933 | * | ||
934 | * Returns 0 if success or negaive error number if failure. | ||
935 | */ | ||
936 | int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev) | ||
937 | { | ||
938 | struct extcon_dev **ptr; | ||
939 | int ret; | ||
940 | |||
941 | ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL); | ||
942 | if (!ptr) | ||
943 | return -ENOMEM; | ||
944 | |||
945 | ret = extcon_dev_register(edev); | ||
946 | if (ret) { | ||
947 | devres_free(ptr); | ||
948 | return ret; | ||
949 | } | ||
950 | |||
951 | *ptr = edev; | ||
952 | devres_add(dev, ptr); | ||
953 | |||
954 | return 0; | ||
955 | } | ||
956 | EXPORT_SYMBOL_GPL(devm_extcon_dev_register); | ||
957 | |||
958 | /** | ||
959 | * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister() | ||
960 | * @dev: device the extcon belongs to | ||
961 | * @edev: the extcon device to unregister | ||
962 | * | ||
963 | * Unregister extcon device that is registered with devm_extcon_dev_register() | ||
964 | * function. | ||
965 | */ | ||
966 | void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev) | ||
967 | { | ||
968 | WARN_ON(devres_release(dev, devm_extcon_dev_unreg, | ||
969 | devm_extcon_dev_match, edev)); | ||
970 | } | ||
971 | EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister); | ||
972 | |||
822 | #ifdef CONFIG_OF | 973 | #ifdef CONFIG_OF |
823 | /* | 974 | /* |
824 | * extcon_get_edev_by_phandle - Get the extcon device from devicetree | 975 | * extcon_get_edev_by_phandle - Get the extcon device from devicetree |
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c index 13d522255d81..645b28356819 100644 --- a/drivers/extcon/extcon-gpio.c +++ b/drivers/extcon/extcon-gpio.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/extcon/extcon-gpio.h> | 32 | #include <linux/extcon/extcon-gpio.h> |
33 | 33 | ||
34 | struct gpio_extcon_data { | 34 | struct gpio_extcon_data { |
35 | struct extcon_dev edev; | 35 | struct extcon_dev *edev; |
36 | unsigned gpio; | 36 | unsigned gpio; |
37 | bool gpio_active_low; | 37 | bool gpio_active_low; |
38 | const char *state_on; | 38 | const char *state_on; |
@@ -53,7 +53,7 @@ static void gpio_extcon_work(struct work_struct *work) | |||
53 | state = gpio_get_value(data->gpio); | 53 | state = gpio_get_value(data->gpio); |
54 | if (data->gpio_active_low) | 54 | if (data->gpio_active_low) |
55 | state = !state; | 55 | state = !state; |
56 | extcon_set_state(&data->edev, state); | 56 | extcon_set_state(data->edev, state); |
57 | } | 57 | } |
58 | 58 | ||
59 | static irqreturn_t gpio_irq_handler(int irq, void *dev_id) | 59 | static irqreturn_t gpio_irq_handler(int irq, void *dev_id) |
@@ -67,9 +67,10 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id) | |||
67 | 67 | ||
68 | static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf) | 68 | static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf) |
69 | { | 69 | { |
70 | struct gpio_extcon_data *extcon_data = | 70 | struct device *dev = edev->dev.parent; |
71 | container_of(edev, struct gpio_extcon_data, edev); | 71 | struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev); |
72 | const char *state; | 72 | const char *state; |
73 | |||
73 | if (extcon_get_state(edev)) | 74 | if (extcon_get_state(edev)) |
74 | state = extcon_data->state_on; | 75 | state = extcon_data->state_on; |
75 | else | 76 | else |
@@ -98,15 +99,21 @@ static int gpio_extcon_probe(struct platform_device *pdev) | |||
98 | if (!extcon_data) | 99 | if (!extcon_data) |
99 | return -ENOMEM; | 100 | return -ENOMEM; |
100 | 101 | ||
101 | extcon_data->edev.name = pdata->name; | 102 | extcon_data->edev = devm_extcon_dev_allocate(&pdev->dev, NULL); |
102 | extcon_data->edev.dev.parent = &pdev->dev; | 103 | if (IS_ERR(extcon_data->edev)) { |
104 | dev_err(&pdev->dev, "failed to allocate extcon device\n"); | ||
105 | return -ENOMEM; | ||
106 | } | ||
107 | extcon_data->edev->name = pdata->name; | ||
108 | extcon_data->edev->dev.parent = &pdev->dev; | ||
109 | |||
103 | extcon_data->gpio = pdata->gpio; | 110 | extcon_data->gpio = pdata->gpio; |
104 | extcon_data->gpio_active_low = pdata->gpio_active_low; | 111 | extcon_data->gpio_active_low = pdata->gpio_active_low; |
105 | extcon_data->state_on = pdata->state_on; | 112 | extcon_data->state_on = pdata->state_on; |
106 | extcon_data->state_off = pdata->state_off; | 113 | extcon_data->state_off = pdata->state_off; |
107 | extcon_data->check_on_resume = pdata->check_on_resume; | 114 | extcon_data->check_on_resume = pdata->check_on_resume; |
108 | if (pdata->state_on && pdata->state_off) | 115 | if (pdata->state_on && pdata->state_off) |
109 | extcon_data->edev.print_state = extcon_gpio_print_state; | 116 | extcon_data->edev->print_state = extcon_gpio_print_state; |
110 | 117 | ||
111 | ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, | 118 | ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, |
112 | pdev->name); | 119 | pdev->name); |
@@ -121,34 +128,27 @@ static int gpio_extcon_probe(struct platform_device *pdev) | |||
121 | msecs_to_jiffies(pdata->debounce); | 128 | msecs_to_jiffies(pdata->debounce); |
122 | } | 129 | } |
123 | 130 | ||
124 | ret = extcon_dev_register(&extcon_data->edev); | 131 | ret = devm_extcon_dev_register(&pdev->dev, extcon_data->edev); |
125 | if (ret < 0) | 132 | if (ret < 0) |
126 | return ret; | 133 | return ret; |
127 | 134 | ||
128 | INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); | 135 | INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); |
129 | 136 | ||
130 | extcon_data->irq = gpio_to_irq(extcon_data->gpio); | 137 | extcon_data->irq = gpio_to_irq(extcon_data->gpio); |
131 | if (extcon_data->irq < 0) { | 138 | if (extcon_data->irq < 0) |
132 | ret = extcon_data->irq; | 139 | return extcon_data->irq; |
133 | goto err; | ||
134 | } | ||
135 | 140 | ||
136 | ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler, | 141 | ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler, |
137 | pdata->irq_flags, pdev->name, | 142 | pdata->irq_flags, pdev->name, |
138 | extcon_data); | 143 | extcon_data); |
139 | if (ret < 0) | 144 | if (ret < 0) |
140 | goto err; | 145 | return ret; |
141 | 146 | ||
142 | platform_set_drvdata(pdev, extcon_data); | 147 | platform_set_drvdata(pdev, extcon_data); |
143 | /* Perform initial detection */ | 148 | /* Perform initial detection */ |
144 | gpio_extcon_work(&extcon_data->work.work); | 149 | gpio_extcon_work(&extcon_data->work.work); |
145 | 150 | ||
146 | return 0; | 151 | return 0; |
147 | |||
148 | err: | ||
149 | extcon_dev_unregister(&extcon_data->edev); | ||
150 | |||
151 | return ret; | ||
152 | } | 152 | } |
153 | 153 | ||
154 | static int gpio_extcon_remove(struct platform_device *pdev) | 154 | static int gpio_extcon_remove(struct platform_device *pdev) |
@@ -157,7 +157,6 @@ static int gpio_extcon_remove(struct platform_device *pdev) | |||
157 | 157 | ||
158 | cancel_delayed_work_sync(&extcon_data->work); | 158 | cancel_delayed_work_sync(&extcon_data->work); |
159 | free_irq(extcon_data->irq, extcon_data); | 159 | free_irq(extcon_data->irq, extcon_data); |
160 | extcon_dev_unregister(&extcon_data->edev); | ||
161 | 160 | ||
162 | return 0; | 161 | return 0; |
163 | } | 162 | } |
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c index 3846941801b8..d49e891b5675 100644 --- a/drivers/extcon/extcon-max14577.c +++ b/drivers/extcon/extcon-max14577.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC | 2 | * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Samsung Electrnoics | 4 | * Copyright (C) 2013,2014 Samsung Electrnoics |
5 | * Chanwoo Choi <cw00.choi@samsung.com> | 5 | * Chanwoo Choi <cw00.choi@samsung.com> |
6 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
@@ -24,7 +25,6 @@ | |||
24 | #include <linux/mfd/max14577-private.h> | 25 | #include <linux/mfd/max14577-private.h> |
25 | #include <linux/extcon.h> | 26 | #include <linux/extcon.h> |
26 | 27 | ||
27 | #define DEV_NAME "max14577-muic" | ||
28 | #define DELAY_MS_DEFAULT 17000 /* unit: millisecond */ | 28 | #define DELAY_MS_DEFAULT 17000 /* unit: millisecond */ |
29 | 29 | ||
30 | enum max14577_muic_adc_debounce_time { | 30 | enum max14577_muic_adc_debounce_time { |
@@ -40,6 +40,42 @@ enum max14577_muic_status { | |||
40 | MAX14577_MUIC_STATUS_END, | 40 | MAX14577_MUIC_STATUS_END, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /** | ||
44 | * struct max14577_muic_irq | ||
45 | * @irq: the index of irq list of MUIC device. | ||
46 | * @name: the name of irq. | ||
47 | * @virq: the virtual irq to use irq domain | ||
48 | */ | ||
49 | struct max14577_muic_irq { | ||
50 | unsigned int irq; | ||
51 | const char *name; | ||
52 | unsigned int virq; | ||
53 | }; | ||
54 | |||
55 | static struct max14577_muic_irq max14577_muic_irqs[] = { | ||
56 | { MAX14577_IRQ_INT1_ADC, "muic-ADC" }, | ||
57 | { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" }, | ||
58 | { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" }, | ||
59 | { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" }, | ||
60 | { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" }, | ||
61 | { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" }, | ||
62 | { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" }, | ||
63 | { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" }, | ||
64 | }; | ||
65 | |||
66 | static struct max14577_muic_irq max77836_muic_irqs[] = { | ||
67 | { MAX14577_IRQ_INT1_ADC, "muic-ADC" }, | ||
68 | { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" }, | ||
69 | { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" }, | ||
70 | { MAX77836_IRQ_INT1_ADC1K, "muic-ADC1K" }, | ||
71 | { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" }, | ||
72 | { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" }, | ||
73 | { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" }, | ||
74 | { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" }, | ||
75 | { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" }, | ||
76 | { MAX77836_IRQ_INT2_VIDRM, "muic-VIDRM" }, | ||
77 | }; | ||
78 | |||
43 | struct max14577_muic_info { | 79 | struct max14577_muic_info { |
44 | struct device *dev; | 80 | struct device *dev; |
45 | struct max14577 *max14577; | 81 | struct max14577 *max14577; |
@@ -48,6 +84,8 @@ struct max14577_muic_info { | |||
48 | int prev_chg_type; | 84 | int prev_chg_type; |
49 | u8 status[MAX14577_MUIC_STATUS_END]; | 85 | u8 status[MAX14577_MUIC_STATUS_END]; |
50 | 86 | ||
87 | struct max14577_muic_irq *muic_irqs; | ||
88 | unsigned int muic_irqs_num; | ||
51 | bool irq_adc; | 89 | bool irq_adc; |
52 | bool irq_chg; | 90 | bool irq_chg; |
53 | struct work_struct irq_work; | 91 | struct work_struct irq_work; |
@@ -74,29 +112,6 @@ enum max14577_muic_cable_group { | |||
74 | MAX14577_CABLE_GROUP_CHG, | 112 | MAX14577_CABLE_GROUP_CHG, |
75 | }; | 113 | }; |
76 | 114 | ||
77 | /** | ||
78 | * struct max14577_muic_irq | ||
79 | * @irq: the index of irq list of MUIC device. | ||
80 | * @name: the name of irq. | ||
81 | * @virq: the virtual irq to use irq domain | ||
82 | */ | ||
83 | struct max14577_muic_irq { | ||
84 | unsigned int irq; | ||
85 | const char *name; | ||
86 | unsigned int virq; | ||
87 | }; | ||
88 | |||
89 | static struct max14577_muic_irq muic_irqs[] = { | ||
90 | { MAX14577_IRQ_INT1_ADC, "muic-ADC" }, | ||
91 | { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" }, | ||
92 | { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" }, | ||
93 | { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" }, | ||
94 | { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" }, | ||
95 | { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" }, | ||
96 | { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" }, | ||
97 | { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" }, | ||
98 | }; | ||
99 | |||
100 | /* Define supported accessory type */ | 115 | /* Define supported accessory type */ |
101 | enum max14577_muic_acc_type { | 116 | enum max14577_muic_acc_type { |
102 | MAX14577_MUIC_ADC_GROUND = 0x0, | 117 | MAX14577_MUIC_ADC_GROUND = 0x0, |
@@ -528,21 +543,12 @@ static void max14577_muic_irq_work(struct work_struct *work) | |||
528 | return; | 543 | return; |
529 | } | 544 | } |
530 | 545 | ||
531 | static irqreturn_t max14577_muic_irq_handler(int irq, void *data) | 546 | /* |
547 | * Sets irq_adc or irq_chg in max14577_muic_info and returns 1. | ||
548 | * Returns 0 if irq_type does not match registered IRQ for this device type. | ||
549 | */ | ||
550 | static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type) | ||
532 | { | 551 | { |
533 | struct max14577_muic_info *info = data; | ||
534 | int i, irq_type = -1; | ||
535 | |||
536 | /* | ||
537 | * We may be called multiple times for different nested IRQ-s. | ||
538 | * Including changes in INT1_ADC and INT2_CGHTYP at once. | ||
539 | * However we only need to know whether it was ADC, charger | ||
540 | * or both interrupts so decode IRQ and turn on proper flags. | ||
541 | */ | ||
542 | for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) | ||
543 | if (irq == muic_irqs[i].virq) | ||
544 | irq_type = muic_irqs[i].irq; | ||
545 | |||
546 | switch (irq_type) { | 552 | switch (irq_type) { |
547 | case MAX14577_IRQ_INT1_ADC: | 553 | case MAX14577_IRQ_INT1_ADC: |
548 | case MAX14577_IRQ_INT1_ADCLOW: | 554 | case MAX14577_IRQ_INT1_ADCLOW: |
@@ -550,7 +556,7 @@ static irqreturn_t max14577_muic_irq_handler(int irq, void *data) | |||
550 | /* Handle all of accessory except for | 556 | /* Handle all of accessory except for |
551 | type of charger accessory */ | 557 | type of charger accessory */ |
552 | info->irq_adc = true; | 558 | info->irq_adc = true; |
553 | break; | 559 | return 1; |
554 | case MAX14577_IRQ_INT2_CHGTYP: | 560 | case MAX14577_IRQ_INT2_CHGTYP: |
555 | case MAX14577_IRQ_INT2_CHGDETRUN: | 561 | case MAX14577_IRQ_INT2_CHGDETRUN: |
556 | case MAX14577_IRQ_INT2_DCDTMR: | 562 | case MAX14577_IRQ_INT2_DCDTMR: |
@@ -558,8 +564,62 @@ static irqreturn_t max14577_muic_irq_handler(int irq, void *data) | |||
558 | case MAX14577_IRQ_INT2_VBVOLT: | 564 | case MAX14577_IRQ_INT2_VBVOLT: |
559 | /* Handle charger accessory */ | 565 | /* Handle charger accessory */ |
560 | info->irq_chg = true; | 566 | info->irq_chg = true; |
567 | return 1; | ||
568 | default: | ||
569 | return 0; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Sets irq_adc or irq_chg in max14577_muic_info and returns 1. | ||
575 | * Returns 0 if irq_type does not match registered IRQ for this device type. | ||
576 | */ | ||
577 | static int max77836_parse_irq(struct max14577_muic_info *info, int irq_type) | ||
578 | { | ||
579 | /* First check common max14577 interrupts */ | ||
580 | if (max14577_parse_irq(info, irq_type)) | ||
581 | return 1; | ||
582 | |||
583 | switch (irq_type) { | ||
584 | case MAX77836_IRQ_INT1_ADC1K: | ||
585 | info->irq_adc = true; | ||
586 | return 1; | ||
587 | case MAX77836_IRQ_INT2_VIDRM: | ||
588 | /* Handle charger accessory */ | ||
589 | info->irq_chg = true; | ||
590 | return 1; | ||
591 | default: | ||
592 | return 0; | ||
593 | } | ||
594 | } | ||
595 | |||
596 | static irqreturn_t max14577_muic_irq_handler(int irq, void *data) | ||
597 | { | ||
598 | struct max14577_muic_info *info = data; | ||
599 | int i, irq_type = -1; | ||
600 | bool irq_parsed; | ||
601 | |||
602 | /* | ||
603 | * We may be called multiple times for different nested IRQ-s. | ||
604 | * Including changes in INT1_ADC and INT2_CGHTYP at once. | ||
605 | * However we only need to know whether it was ADC, charger | ||
606 | * or both interrupts so decode IRQ and turn on proper flags. | ||
607 | */ | ||
608 | for (i = 0; i < info->muic_irqs_num; i++) | ||
609 | if (irq == info->muic_irqs[i].virq) | ||
610 | irq_type = info->muic_irqs[i].irq; | ||
611 | |||
612 | switch (info->max14577->dev_type) { | ||
613 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
614 | irq_parsed = max77836_parse_irq(info, irq_type); | ||
561 | break; | 615 | break; |
616 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
562 | default: | 617 | default: |
618 | irq_parsed = max14577_parse_irq(info, irq_type); | ||
619 | break; | ||
620 | } | ||
621 | |||
622 | if (!irq_parsed) { | ||
563 | dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n", | 623 | dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n", |
564 | irq_type); | 624 | irq_type); |
565 | return IRQ_HANDLED; | 625 | return IRQ_HANDLED; |
@@ -644,13 +704,24 @@ static int max14577_muic_probe(struct platform_device *pdev) | |||
644 | 704 | ||
645 | INIT_WORK(&info->irq_work, max14577_muic_irq_work); | 705 | INIT_WORK(&info->irq_work, max14577_muic_irq_work); |
646 | 706 | ||
707 | switch (max14577->dev_type) { | ||
708 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
709 | info->muic_irqs = max77836_muic_irqs; | ||
710 | info->muic_irqs_num = ARRAY_SIZE(max77836_muic_irqs); | ||
711 | break; | ||
712 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
713 | default: | ||
714 | info->muic_irqs = max14577_muic_irqs; | ||
715 | info->muic_irqs_num = ARRAY_SIZE(max14577_muic_irqs); | ||
716 | } | ||
717 | |||
647 | /* Support irq domain for max14577 MUIC device */ | 718 | /* Support irq domain for max14577 MUIC device */ |
648 | for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) { | 719 | for (i = 0; i < info->muic_irqs_num; i++) { |
649 | struct max14577_muic_irq *muic_irq = &muic_irqs[i]; | 720 | struct max14577_muic_irq *muic_irq = &info->muic_irqs[i]; |
650 | unsigned int virq = 0; | 721 | unsigned int virq = 0; |
651 | 722 | ||
652 | virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq); | 723 | virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq); |
653 | if (!virq) | 724 | if (virq <= 0) |
654 | return -EINVAL; | 725 | return -EINVAL; |
655 | muic_irq->virq = virq; | 726 | muic_irq->virq = virq; |
656 | 727 | ||
@@ -668,14 +739,16 @@ static int max14577_muic_probe(struct platform_device *pdev) | |||
668 | } | 739 | } |
669 | 740 | ||
670 | /* Initialize extcon device */ | 741 | /* Initialize extcon device */ |
671 | info->edev = devm_kzalloc(&pdev->dev, sizeof(*info->edev), GFP_KERNEL); | 742 | info->edev = devm_extcon_dev_allocate(&pdev->dev, |
672 | if (!info->edev) { | 743 | max14577_extcon_cable); |
744 | if (IS_ERR(info->edev)) { | ||
673 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); | 745 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); |
674 | return -ENOMEM; | 746 | return -ENOMEM; |
675 | } | 747 | } |
676 | info->edev->name = DEV_NAME; | 748 | |
677 | info->edev->supported_cable = max14577_extcon_cable; | 749 | info->edev->name = dev_name(&pdev->dev); |
678 | ret = extcon_dev_register(info->edev); | 750 | |
751 | ret = devm_extcon_dev_register(&pdev->dev, info->edev); | ||
679 | if (ret) { | 752 | if (ret) { |
680 | dev_err(&pdev->dev, "failed to register extcon device\n"); | 753 | dev_err(&pdev->dev, "failed to register extcon device\n"); |
681 | return ret; | 754 | return ret; |
@@ -694,7 +767,7 @@ static int max14577_muic_probe(struct platform_device *pdev) | |||
694 | MAX14577_REG_DEVICEID, &id); | 767 | MAX14577_REG_DEVICEID, &id); |
695 | if (ret < 0) { | 768 | if (ret < 0) { |
696 | dev_err(&pdev->dev, "failed to read revision number\n"); | 769 | dev_err(&pdev->dev, "failed to read revision number\n"); |
697 | goto err_extcon; | 770 | return ret; |
698 | } | 771 | } |
699 | dev_info(info->dev, "device ID : 0x%x\n", id); | 772 | dev_info(info->dev, "device ID : 0x%x\n", id); |
700 | 773 | ||
@@ -710,19 +783,10 @@ static int max14577_muic_probe(struct platform_device *pdev) | |||
710 | * driver should notify cable state to upper layer. | 783 | * driver should notify cable state to upper layer. |
711 | */ | 784 | */ |
712 | INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq); | 785 | INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq); |
713 | ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, | 786 | queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, |
714 | delay_jiffies); | 787 | delay_jiffies); |
715 | if (ret < 0) { | ||
716 | dev_err(&pdev->dev, | ||
717 | "failed to schedule delayed work for cable detect\n"); | ||
718 | goto err_extcon; | ||
719 | } | ||
720 | 788 | ||
721 | return ret; | 789 | return ret; |
722 | |||
723 | err_extcon: | ||
724 | extcon_dev_unregister(info->edev); | ||
725 | return ret; | ||
726 | } | 790 | } |
727 | 791 | ||
728 | static int max14577_muic_remove(struct platform_device *pdev) | 792 | static int max14577_muic_remove(struct platform_device *pdev) |
@@ -730,23 +794,30 @@ static int max14577_muic_remove(struct platform_device *pdev) | |||
730 | struct max14577_muic_info *info = platform_get_drvdata(pdev); | 794 | struct max14577_muic_info *info = platform_get_drvdata(pdev); |
731 | 795 | ||
732 | cancel_work_sync(&info->irq_work); | 796 | cancel_work_sync(&info->irq_work); |
733 | extcon_dev_unregister(info->edev); | ||
734 | 797 | ||
735 | return 0; | 798 | return 0; |
736 | } | 799 | } |
737 | 800 | ||
801 | static const struct platform_device_id max14577_muic_id[] = { | ||
802 | { "max14577-muic", MAXIM_DEVICE_TYPE_MAX14577, }, | ||
803 | { "max77836-muic", MAXIM_DEVICE_TYPE_MAX77836, }, | ||
804 | { } | ||
805 | }; | ||
806 | MODULE_DEVICE_TABLE(platform, max14577_muic_id); | ||
807 | |||
738 | static struct platform_driver max14577_muic_driver = { | 808 | static struct platform_driver max14577_muic_driver = { |
739 | .driver = { | 809 | .driver = { |
740 | .name = DEV_NAME, | 810 | .name = "max14577-muic", |
741 | .owner = THIS_MODULE, | 811 | .owner = THIS_MODULE, |
742 | }, | 812 | }, |
743 | .probe = max14577_muic_probe, | 813 | .probe = max14577_muic_probe, |
744 | .remove = max14577_muic_remove, | 814 | .remove = max14577_muic_remove, |
815 | .id_table = max14577_muic_id, | ||
745 | }; | 816 | }; |
746 | 817 | ||
747 | module_platform_driver(max14577_muic_driver); | 818 | module_platform_driver(max14577_muic_driver); |
748 | 819 | ||
749 | MODULE_DESCRIPTION("MAXIM 14577 Extcon driver"); | 820 | MODULE_DESCRIPTION("Maxim 14577/77836 Extcon driver"); |
750 | MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); | 821 | MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>"); |
751 | MODULE_LICENSE("GPL"); | 822 | MODULE_LICENSE("GPL"); |
752 | MODULE_ALIAS("platform:extcon-max14577"); | 823 | MODULE_ALIAS("platform:extcon-max14577"); |
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index da268fbc901b..2c7c3e191591 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c | |||
@@ -1175,25 +1175,24 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | /* Initialize extcon device */ | 1177 | /* Initialize extcon device */ |
1178 | info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev), | 1178 | info->edev = devm_extcon_dev_allocate(&pdev->dev, |
1179 | GFP_KERNEL); | 1179 | max77693_extcon_cable); |
1180 | if (!info->edev) { | 1180 | if (IS_ERR(info->edev)) { |
1181 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); | 1181 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); |
1182 | ret = -ENOMEM; | 1182 | ret = -ENOMEM; |
1183 | goto err_irq; | 1183 | goto err_irq; |
1184 | } | 1184 | } |
1185 | info->edev->name = DEV_NAME; | 1185 | info->edev->name = DEV_NAME; |
1186 | info->edev->dev.parent = &pdev->dev; | 1186 | info->edev->dev.parent = &pdev->dev; |
1187 | info->edev->supported_cable = max77693_extcon_cable; | 1187 | |
1188 | ret = extcon_dev_register(info->edev); | 1188 | ret = devm_extcon_dev_register(&pdev->dev, info->edev); |
1189 | if (ret) { | 1189 | if (ret) { |
1190 | dev_err(&pdev->dev, "failed to register extcon device\n"); | 1190 | dev_err(&pdev->dev, "failed to register extcon device\n"); |
1191 | goto err_irq; | 1191 | goto err_irq; |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | |||
1195 | /* Initialize MUIC register by using platform data or default data */ | 1194 | /* Initialize MUIC register by using platform data or default data */ |
1196 | if (pdata->muic_data) { | 1195 | if (pdata && pdata->muic_data) { |
1197 | init_data = pdata->muic_data->init_data; | 1196 | init_data = pdata->muic_data->init_data; |
1198 | num_init_data = pdata->muic_data->num_init_data; | 1197 | num_init_data = pdata->muic_data->num_init_data; |
1199 | } else { | 1198 | } else { |
@@ -1226,7 +1225,7 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1226 | = init_data[i].data; | 1225 | = init_data[i].data; |
1227 | } | 1226 | } |
1228 | 1227 | ||
1229 | if (pdata->muic_data) { | 1228 | if (pdata && pdata->muic_data) { |
1230 | struct max77693_muic_platform_data *muic_pdata | 1229 | struct max77693_muic_platform_data *muic_pdata |
1231 | = pdata->muic_data; | 1230 | = pdata->muic_data; |
1232 | 1231 | ||
@@ -1267,7 +1266,7 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1267 | MAX77693_MUIC_REG_ID, &id); | 1266 | MAX77693_MUIC_REG_ID, &id); |
1268 | if (ret < 0) { | 1267 | if (ret < 0) { |
1269 | dev_err(&pdev->dev, "failed to read revision number\n"); | 1268 | dev_err(&pdev->dev, "failed to read revision number\n"); |
1270 | goto err_extcon; | 1269 | goto err_irq; |
1271 | } | 1270 | } |
1272 | dev_info(info->dev, "device ID : 0x%x\n", id); | 1271 | dev_info(info->dev, "device ID : 0x%x\n", id); |
1273 | 1272 | ||
@@ -1283,12 +1282,11 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1283 | * driver should notify cable state to upper layer. | 1282 | * driver should notify cable state to upper layer. |
1284 | */ | 1283 | */ |
1285 | INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); | 1284 | INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); |
1286 | schedule_delayed_work(&info->wq_detcable, delay_jiffies); | 1285 | queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, |
1286 | delay_jiffies); | ||
1287 | 1287 | ||
1288 | return ret; | 1288 | return ret; |
1289 | 1289 | ||
1290 | err_extcon: | ||
1291 | extcon_dev_unregister(info->edev); | ||
1292 | err_irq: | 1290 | err_irq: |
1293 | while (--i >= 0) | 1291 | while (--i >= 0) |
1294 | free_irq(muic_irqs[i].virq, info); | 1292 | free_irq(muic_irqs[i].virq, info); |
@@ -1304,7 +1302,6 @@ static int max77693_muic_remove(struct platform_device *pdev) | |||
1304 | free_irq(muic_irqs[i].virq, info); | 1302 | free_irq(muic_irqs[i].virq, info); |
1305 | cancel_work_sync(&info->irq_work); | 1303 | cancel_work_sync(&info->irq_work); |
1306 | input_unregister_device(info->dock); | 1304 | input_unregister_device(info->dock); |
1307 | extcon_dev_unregister(info->edev); | ||
1308 | 1305 | ||
1309 | return 0; | 1306 | return 0; |
1310 | } | 1307 | } |
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index 6a00464658c5..d9f7f1baaa03 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c | |||
@@ -699,23 +699,22 @@ static int max8997_muic_probe(struct platform_device *pdev) | |||
699 | } | 699 | } |
700 | 700 | ||
701 | /* External connector */ | 701 | /* External connector */ |
702 | info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev), | 702 | info->edev = devm_extcon_dev_allocate(&pdev->dev, max8997_extcon_cable); |
703 | GFP_KERNEL); | 703 | if (IS_ERR(info->edev)) { |
704 | if (!info->edev) { | ||
705 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); | 704 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); |
706 | ret = -ENOMEM; | 705 | ret = -ENOMEM; |
707 | goto err_irq; | 706 | goto err_irq; |
708 | } | 707 | } |
709 | info->edev->name = DEV_NAME; | 708 | info->edev->name = DEV_NAME; |
710 | info->edev->dev.parent = &pdev->dev; | 709 | info->edev->dev.parent = &pdev->dev; |
711 | info->edev->supported_cable = max8997_extcon_cable; | 710 | |
712 | ret = extcon_dev_register(info->edev); | 711 | ret = devm_extcon_dev_register(&pdev->dev, info->edev); |
713 | if (ret) { | 712 | if (ret) { |
714 | dev_err(&pdev->dev, "failed to register extcon device\n"); | 713 | dev_err(&pdev->dev, "failed to register extcon device\n"); |
715 | goto err_irq; | 714 | goto err_irq; |
716 | } | 715 | } |
717 | 716 | ||
718 | if (pdata->muic_pdata) { | 717 | if (pdata && pdata->muic_pdata) { |
719 | struct max8997_muic_platform_data *muic_pdata | 718 | struct max8997_muic_platform_data *muic_pdata |
720 | = pdata->muic_pdata; | 719 | = pdata->muic_pdata; |
721 | 720 | ||
@@ -770,7 +769,8 @@ static int max8997_muic_probe(struct platform_device *pdev) | |||
770 | * driver should notify cable state to upper layer. | 769 | * driver should notify cable state to upper layer. |
771 | */ | 770 | */ |
772 | INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); | 771 | INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); |
773 | schedule_delayed_work(&info->wq_detcable, delay_jiffies); | 772 | queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, |
773 | delay_jiffies); | ||
774 | 774 | ||
775 | return 0; | 775 | return 0; |
776 | 776 | ||
@@ -789,8 +789,6 @@ static int max8997_muic_remove(struct platform_device *pdev) | |||
789 | free_irq(muic_irqs[i].virq, info); | 789 | free_irq(muic_irqs[i].virq, info); |
790 | cancel_work_sync(&info->irq_work); | 790 | cancel_work_sync(&info->irq_work); |
791 | 791 | ||
792 | extcon_dev_unregister(info->edev); | ||
793 | |||
794 | return 0; | 792 | return 0; |
795 | } | 793 | } |
796 | 794 | ||
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index ddff2b72f0a8..7417ce84eb2d 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/err.h> | 27 | #include <linux/err.h> |
27 | #include <linux/mfd/palmas.h> | 28 | #include <linux/mfd/palmas.h> |
28 | #include <linux/of.h> | 29 | #include <linux/of.h> |
@@ -56,7 +57,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb) | |||
56 | if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) { | 57 | if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) { |
57 | if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) { | 58 | if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) { |
58 | palmas_usb->linkstat = PALMAS_USB_STATE_VBUS; | 59 | palmas_usb->linkstat = PALMAS_USB_STATE_VBUS; |
59 | extcon_set_cable_state(&palmas_usb->edev, "USB", true); | 60 | extcon_set_cable_state(palmas_usb->edev, "USB", true); |
60 | dev_info(palmas_usb->dev, "USB cable is attached\n"); | 61 | dev_info(palmas_usb->dev, "USB cable is attached\n"); |
61 | } else { | 62 | } else { |
62 | dev_dbg(palmas_usb->dev, | 63 | dev_dbg(palmas_usb->dev, |
@@ -65,7 +66,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb) | |||
65 | } else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) { | 66 | } else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) { |
66 | if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) { | 67 | if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) { |
67 | palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; | 68 | palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; |
68 | extcon_set_cable_state(&palmas_usb->edev, "USB", false); | 69 | extcon_set_cable_state(palmas_usb->edev, "USB", false); |
69 | dev_info(palmas_usb->dev, "USB cable is detached\n"); | 70 | dev_info(palmas_usb->dev, "USB cable is detached\n"); |
70 | } else { | 71 | } else { |
71 | dev_dbg(palmas_usb->dev, | 72 | dev_dbg(palmas_usb->dev, |
@@ -92,7 +93,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) | |||
92 | PALMAS_USB_ID_INT_LATCH_CLR, | 93 | PALMAS_USB_ID_INT_LATCH_CLR, |
93 | PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); | 94 | PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); |
94 | palmas_usb->linkstat = PALMAS_USB_STATE_ID; | 95 | palmas_usb->linkstat = PALMAS_USB_STATE_ID; |
95 | extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); | 96 | extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true); |
96 | dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); | 97 | dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); |
97 | } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) && | 98 | } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) && |
98 | (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) { | 99 | (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) { |
@@ -100,17 +101,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) | |||
100 | PALMAS_USB_ID_INT_LATCH_CLR, | 101 | PALMAS_USB_ID_INT_LATCH_CLR, |
101 | PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); | 102 | PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); |
102 | palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; | 103 | palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; |
103 | extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false); | 104 | extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false); |
104 | dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); | 105 | dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); |
105 | } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) && | 106 | } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) && |
106 | (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) { | 107 | (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) { |
107 | palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; | 108 | palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; |
108 | extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false); | 109 | extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false); |
109 | dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); | 110 | dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); |
110 | } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) && | 111 | } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) && |
111 | (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { | 112 | (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { |
112 | palmas_usb->linkstat = PALMAS_USB_STATE_ID; | 113 | palmas_usb->linkstat = PALMAS_USB_STATE_ID; |
113 | extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); | 114 | extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true); |
114 | dev_info(palmas_usb->dev, " USB-HOST cable is attached\n"); | 115 | dev_info(palmas_usb->dev, " USB-HOST cable is attached\n"); |
115 | } | 116 | } |
116 | 117 | ||
@@ -186,13 +187,20 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
186 | 187 | ||
187 | platform_set_drvdata(pdev, palmas_usb); | 188 | platform_set_drvdata(pdev, palmas_usb); |
188 | 189 | ||
189 | palmas_usb->edev.supported_cable = palmas_extcon_cable; | 190 | palmas_usb->edev = devm_extcon_dev_allocate(&pdev->dev, |
190 | palmas_usb->edev.dev.parent = palmas_usb->dev; | 191 | palmas_extcon_cable); |
191 | palmas_usb->edev.mutually_exclusive = mutually_exclusive; | 192 | if (IS_ERR(palmas_usb->edev)) { |
193 | dev_err(&pdev->dev, "failed to allocate extcon device\n"); | ||
194 | return -ENOMEM; | ||
195 | } | ||
196 | palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL); | ||
197 | palmas_usb->edev->dev.parent = palmas_usb->dev; | ||
198 | palmas_usb->edev->mutually_exclusive = mutually_exclusive; | ||
192 | 199 | ||
193 | status = extcon_dev_register(&palmas_usb->edev); | 200 | status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); |
194 | if (status) { | 201 | if (status) { |
195 | dev_err(&pdev->dev, "failed to register extcon device\n"); | 202 | dev_err(&pdev->dev, "failed to register extcon device\n"); |
203 | kfree(palmas_usb->edev->name); | ||
196 | return status; | 204 | return status; |
197 | } | 205 | } |
198 | 206 | ||
@@ -206,7 +214,8 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
206 | if (status < 0) { | 214 | if (status < 0) { |
207 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", | 215 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", |
208 | palmas_usb->id_irq, status); | 216 | palmas_usb->id_irq, status); |
209 | goto fail_extcon; | 217 | kfree(palmas_usb->edev->name); |
218 | return status; | ||
210 | } | 219 | } |
211 | } | 220 | } |
212 | 221 | ||
@@ -220,25 +229,21 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
220 | if (status < 0) { | 229 | if (status < 0) { |
221 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", | 230 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", |
222 | palmas_usb->vbus_irq, status); | 231 | palmas_usb->vbus_irq, status); |
223 | goto fail_extcon; | 232 | kfree(palmas_usb->edev->name); |
233 | return status; | ||
224 | } | 234 | } |
225 | } | 235 | } |
226 | 236 | ||
227 | palmas_enable_irq(palmas_usb); | 237 | palmas_enable_irq(palmas_usb); |
228 | device_set_wakeup_capable(&pdev->dev, true); | 238 | device_set_wakeup_capable(&pdev->dev, true); |
229 | return 0; | 239 | return 0; |
230 | |||
231 | fail_extcon: | ||
232 | extcon_dev_unregister(&palmas_usb->edev); | ||
233 | |||
234 | return status; | ||
235 | } | 240 | } |
236 | 241 | ||
237 | static int palmas_usb_remove(struct platform_device *pdev) | 242 | static int palmas_usb_remove(struct platform_device *pdev) |
238 | { | 243 | { |
239 | struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); | 244 | struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); |
240 | 245 | ||
241 | extcon_dev_unregister(&palmas_usb->edev); | 246 | kfree(palmas_usb->edev->name); |
242 | 247 | ||
243 | return 0; | 248 | return 0; |
244 | } | 249 | } |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 602ca86a6488..284cf66489f4 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -471,18 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) | |||
471 | } | 471 | } |
472 | EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); | 472 | EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); |
473 | 473 | ||
474 | static void reset_channel_cb(void *arg) | ||
475 | { | ||
476 | struct vmbus_channel *channel = arg; | ||
477 | |||
478 | channel->onchannel_callback = NULL; | ||
479 | } | ||
480 | |||
474 | static void vmbus_close_internal(struct vmbus_channel *channel) | 481 | static void vmbus_close_internal(struct vmbus_channel *channel) |
475 | { | 482 | { |
476 | struct vmbus_channel_close_channel *msg; | 483 | struct vmbus_channel_close_channel *msg; |
477 | int ret; | 484 | int ret; |
478 | unsigned long flags; | ||
479 | 485 | ||
480 | channel->state = CHANNEL_OPEN_STATE; | 486 | channel->state = CHANNEL_OPEN_STATE; |
481 | channel->sc_creation_callback = NULL; | 487 | channel->sc_creation_callback = NULL; |
482 | /* Stop callback and cancel the timer asap */ | 488 | /* Stop callback and cancel the timer asap */ |
483 | spin_lock_irqsave(&channel->inbound_lock, flags); | 489 | if (channel->target_cpu != smp_processor_id()) |
484 | channel->onchannel_callback = NULL; | 490 | smp_call_function_single(channel->target_cpu, reset_channel_cb, |
485 | spin_unlock_irqrestore(&channel->inbound_lock, flags); | 491 | channel, true); |
492 | else | ||
493 | reset_channel_cb(channel); | ||
486 | 494 | ||
487 | /* Send a closing message */ | 495 | /* Send a closing message */ |
488 | 496 | ||
@@ -674,8 +682,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
674 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, | 682 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, |
675 | multi_pagebuffer->len); | 683 | multi_pagebuffer->len); |
676 | 684 | ||
677 | 685 | if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT) | |
678 | if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)) | ||
679 | return -EINVAL; | 686 | return -EINVAL; |
680 | 687 | ||
681 | /* | 688 | /* |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index fa920469bf10..6c8b032cacba 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -149,6 +149,7 @@ static struct vmbus_channel *alloc_channel(void) | |||
149 | spin_lock_init(&channel->sc_lock); | 149 | spin_lock_init(&channel->sc_lock); |
150 | 150 | ||
151 | INIT_LIST_HEAD(&channel->sc_list); | 151 | INIT_LIST_HEAD(&channel->sc_list); |
152 | INIT_LIST_HEAD(&channel->percpu_list); | ||
152 | 153 | ||
153 | channel->controlwq = create_workqueue("hv_vmbus_ctl"); | 154 | channel->controlwq = create_workqueue("hv_vmbus_ctl"); |
154 | if (!channel->controlwq) { | 155 | if (!channel->controlwq) { |
@@ -188,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel) | |||
188 | queue_work(vmbus_connection.work_queue, &channel->work); | 189 | queue_work(vmbus_connection.work_queue, &channel->work); |
189 | } | 190 | } |
190 | 191 | ||
192 | static void percpu_channel_enq(void *arg) | ||
193 | { | ||
194 | struct vmbus_channel *channel = arg; | ||
195 | int cpu = smp_processor_id(); | ||
196 | |||
197 | list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); | ||
198 | } | ||
191 | 199 | ||
200 | static void percpu_channel_deq(void *arg) | ||
201 | { | ||
202 | struct vmbus_channel *channel = arg; | ||
203 | |||
204 | list_del(&channel->percpu_list); | ||
205 | } | ||
192 | 206 | ||
193 | /* | 207 | /* |
194 | * vmbus_process_rescind_offer - | 208 | * vmbus_process_rescind_offer - |
@@ -210,6 +224,12 @@ static void vmbus_process_rescind_offer(struct work_struct *work) | |||
210 | msg.header.msgtype = CHANNELMSG_RELID_RELEASED; | 224 | msg.header.msgtype = CHANNELMSG_RELID_RELEASED; |
211 | vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); | 225 | vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); |
212 | 226 | ||
227 | if (channel->target_cpu != smp_processor_id()) | ||
228 | smp_call_function_single(channel->target_cpu, | ||
229 | percpu_channel_deq, channel, true); | ||
230 | else | ||
231 | percpu_channel_deq(channel); | ||
232 | |||
213 | if (channel->primary_channel == NULL) { | 233 | if (channel->primary_channel == NULL) { |
214 | spin_lock_irqsave(&vmbus_connection.channel_lock, flags); | 234 | spin_lock_irqsave(&vmbus_connection.channel_lock, flags); |
215 | list_del(&channel->listentry); | 235 | list_del(&channel->listentry); |
@@ -245,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work) | |||
245 | work); | 265 | work); |
246 | struct vmbus_channel *channel; | 266 | struct vmbus_channel *channel; |
247 | bool fnew = true; | 267 | bool fnew = true; |
268 | bool enq = false; | ||
248 | int ret; | 269 | int ret; |
249 | unsigned long flags; | 270 | unsigned long flags; |
250 | 271 | ||
@@ -264,12 +285,22 @@ static void vmbus_process_offer(struct work_struct *work) | |||
264 | } | 285 | } |
265 | } | 286 | } |
266 | 287 | ||
267 | if (fnew) | 288 | if (fnew) { |
268 | list_add_tail(&newchannel->listentry, | 289 | list_add_tail(&newchannel->listentry, |
269 | &vmbus_connection.chn_list); | 290 | &vmbus_connection.chn_list); |
291 | enq = true; | ||
292 | } | ||
270 | 293 | ||
271 | spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); | 294 | spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); |
272 | 295 | ||
296 | if (enq) { | ||
297 | if (newchannel->target_cpu != smp_processor_id()) | ||
298 | smp_call_function_single(newchannel->target_cpu, | ||
299 | percpu_channel_enq, | ||
300 | newchannel, true); | ||
301 | else | ||
302 | percpu_channel_enq(newchannel); | ||
303 | } | ||
273 | if (!fnew) { | 304 | if (!fnew) { |
274 | /* | 305 | /* |
275 | * Check to see if this is a sub-channel. | 306 | * Check to see if this is a sub-channel. |
@@ -282,6 +313,14 @@ static void vmbus_process_offer(struct work_struct *work) | |||
282 | spin_lock_irqsave(&channel->sc_lock, flags); | 313 | spin_lock_irqsave(&channel->sc_lock, flags); |
283 | list_add_tail(&newchannel->sc_list, &channel->sc_list); | 314 | list_add_tail(&newchannel->sc_list, &channel->sc_list); |
284 | spin_unlock_irqrestore(&channel->sc_lock, flags); | 315 | spin_unlock_irqrestore(&channel->sc_lock, flags); |
316 | |||
317 | if (newchannel->target_cpu != smp_processor_id()) | ||
318 | smp_call_function_single(newchannel->target_cpu, | ||
319 | percpu_channel_enq, | ||
320 | newchannel, true); | ||
321 | else | ||
322 | percpu_channel_enq(newchannel); | ||
323 | |||
285 | newchannel->state = CHANNEL_OPEN_STATE; | 324 | newchannel->state = CHANNEL_OPEN_STATE; |
286 | if (channel->sc_creation_callback != NULL) | 325 | if (channel->sc_creation_callback != NULL) |
287 | channel->sc_creation_callback(newchannel); | 326 | channel->sc_creation_callback(newchannel); |
@@ -365,7 +404,7 @@ static u32 next_vp; | |||
365 | * performance critical channels (IDE, SCSI and Network) will be uniformly | 404 | * performance critical channels (IDE, SCSI and Network) will be uniformly |
366 | * distributed across all available CPUs. | 405 | * distributed across all available CPUs. |
367 | */ | 406 | */ |
368 | static u32 get_vp_index(uuid_le *type_guid) | 407 | static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid) |
369 | { | 408 | { |
370 | u32 cur_cpu; | 409 | u32 cur_cpu; |
371 | int i; | 410 | int i; |
@@ -387,10 +426,13 @@ static u32 get_vp_index(uuid_le *type_guid) | |||
387 | * Also if the channel is not a performance critical | 426 | * Also if the channel is not a performance critical |
388 | * channel, bind it to cpu 0. | 427 | * channel, bind it to cpu 0. |
389 | */ | 428 | */ |
390 | return 0; | 429 | channel->target_cpu = 0; |
430 | channel->target_vp = 0; | ||
431 | return; | ||
391 | } | 432 | } |
392 | cur_cpu = (++next_vp % max_cpus); | 433 | cur_cpu = (++next_vp % max_cpus); |
393 | return hv_context.vp_index[cur_cpu]; | 434 | channel->target_cpu = cur_cpu; |
435 | channel->target_vp = hv_context.vp_index[cur_cpu]; | ||
394 | } | 436 | } |
395 | 437 | ||
396 | /* | 438 | /* |
@@ -438,7 +480,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) | |||
438 | offer->connection_id; | 480 | offer->connection_id; |
439 | } | 481 | } |
440 | 482 | ||
441 | newchannel->target_vp = get_vp_index(&offer->offer.if_type); | 483 | init_vp_index(newchannel, &offer->offer.if_type); |
442 | 484 | ||
443 | memcpy(&newchannel->offermsg, offer, | 485 | memcpy(&newchannel->offermsg, offer, |
444 | sizeof(struct vmbus_channel_offer_channel)); | 486 | sizeof(struct vmbus_channel_offer_channel)); |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 2e7801af466e..e84f4526eb36 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -224,8 +224,8 @@ cleanup: | |||
224 | vmbus_connection.int_page = NULL; | 224 | vmbus_connection.int_page = NULL; |
225 | } | 225 | } |
226 | 226 | ||
227 | free_pages((unsigned long)vmbus_connection.monitor_pages[0], 1); | 227 | free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0); |
228 | free_pages((unsigned long)vmbus_connection.monitor_pages[1], 1); | 228 | free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0); |
229 | vmbus_connection.monitor_pages[0] = NULL; | 229 | vmbus_connection.monitor_pages[0] = NULL; |
230 | vmbus_connection.monitor_pages[1] = NULL; | 230 | vmbus_connection.monitor_pages[1] = NULL; |
231 | 231 | ||
@@ -234,6 +234,28 @@ cleanup: | |||
234 | return ret; | 234 | return ret; |
235 | } | 235 | } |
236 | 236 | ||
237 | /* | ||
238 | * Map the given relid to the corresponding channel based on the | ||
239 | * per-cpu list of channels that have been affinitized to this CPU. | ||
240 | * This will be used in the channel callback path as we can do this | ||
241 | * mapping in a lock-free fashion. | ||
242 | */ | ||
243 | static struct vmbus_channel *pcpu_relid2channel(u32 relid) | ||
244 | { | ||
245 | struct vmbus_channel *channel; | ||
246 | struct vmbus_channel *found_channel = NULL; | ||
247 | int cpu = smp_processor_id(); | ||
248 | struct list_head *pcpu_head = &hv_context.percpu_list[cpu]; | ||
249 | |||
250 | list_for_each_entry(channel, pcpu_head, percpu_list) { | ||
251 | if (channel->offermsg.child_relid == relid) { | ||
252 | found_channel = channel; | ||
253 | break; | ||
254 | } | ||
255 | } | ||
256 | |||
257 | return found_channel; | ||
258 | } | ||
237 | 259 | ||
238 | /* | 260 | /* |
239 | * relid2channel - Get the channel object given its | 261 | * relid2channel - Get the channel object given its |
@@ -277,7 +299,6 @@ struct vmbus_channel *relid2channel(u32 relid) | |||
277 | static void process_chn_event(u32 relid) | 299 | static void process_chn_event(u32 relid) |
278 | { | 300 | { |
279 | struct vmbus_channel *channel; | 301 | struct vmbus_channel *channel; |
280 | unsigned long flags; | ||
281 | void *arg; | 302 | void *arg; |
282 | bool read_state; | 303 | bool read_state; |
283 | u32 bytes_to_read; | 304 | u32 bytes_to_read; |
@@ -286,7 +307,7 @@ static void process_chn_event(u32 relid) | |||
286 | * Find the channel based on this relid and invokes the | 307 | * Find the channel based on this relid and invokes the |
287 | * channel callback to process the event | 308 | * channel callback to process the event |
288 | */ | 309 | */ |
289 | channel = relid2channel(relid); | 310 | channel = pcpu_relid2channel(relid); |
290 | 311 | ||
291 | if (!channel) { | 312 | if (!channel) { |
292 | pr_err("channel not found for relid - %u\n", relid); | 313 | pr_err("channel not found for relid - %u\n", relid); |
@@ -296,13 +317,12 @@ static void process_chn_event(u32 relid) | |||
296 | /* | 317 | /* |
297 | * A channel once created is persistent even when there | 318 | * A channel once created is persistent even when there |
298 | * is no driver handling the device. An unloading driver | 319 | * is no driver handling the device. An unloading driver |
299 | * sets the onchannel_callback to NULL under the | 320 | * sets the onchannel_callback to NULL on the same CPU |
300 | * protection of the channel inbound_lock. Thus, checking | 321 | * as where this interrupt is handled (in an interrupt context). |
301 | * and invoking the driver specific callback takes care of | 322 | * Thus, checking and invoking the driver specific callback takes |
302 | * orderly unloading of the driver. | 323 | * care of orderly unloading of the driver. |
303 | */ | 324 | */ |
304 | 325 | ||
305 | spin_lock_irqsave(&channel->inbound_lock, flags); | ||
306 | if (channel->onchannel_callback != NULL) { | 326 | if (channel->onchannel_callback != NULL) { |
307 | arg = channel->channel_callback_context; | 327 | arg = channel->channel_callback_context; |
308 | read_state = channel->batched_reading; | 328 | read_state = channel->batched_reading; |
@@ -327,7 +347,6 @@ static void process_chn_event(u32 relid) | |||
327 | pr_err("no channel callback for relid - %u\n", relid); | 347 | pr_err("no channel callback for relid - %u\n", relid); |
328 | } | 348 | } |
329 | 349 | ||
330 | spin_unlock_irqrestore(&channel->inbound_lock, flags); | ||
331 | } | 350 | } |
332 | 351 | ||
333 | /* | 352 | /* |
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index bcb49502c3bf..edfc8488cb03 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c | |||
@@ -383,6 +383,8 @@ void hv_synic_init(void *arg) | |||
383 | */ | 383 | */ |
384 | rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); | 384 | rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); |
385 | hv_context.vp_index[cpu] = (u32)vp_index; | 385 | hv_context.vp_index[cpu] = (u32)vp_index; |
386 | |||
387 | INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); | ||
386 | return; | 388 | return; |
387 | } | 389 | } |
388 | 390 | ||
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 7e6d78dc9437..5e90c5d771a7 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/jiffies.h> | ||
22 | #include <linux/mman.h> | 23 | #include <linux/mman.h> |
23 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
@@ -459,6 +460,11 @@ static bool do_hot_add; | |||
459 | */ | 460 | */ |
460 | static uint pressure_report_delay = 45; | 461 | static uint pressure_report_delay = 45; |
461 | 462 | ||
463 | /* | ||
464 | * The last time we posted a pressure report to host. | ||
465 | */ | ||
466 | static unsigned long last_post_time; | ||
467 | |||
462 | module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); | 468 | module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); |
463 | MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); | 469 | MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); |
464 | 470 | ||
@@ -542,6 +548,7 @@ struct hv_dynmem_device { | |||
542 | 548 | ||
543 | static struct hv_dynmem_device dm_device; | 549 | static struct hv_dynmem_device dm_device; |
544 | 550 | ||
551 | static void post_status(struct hv_dynmem_device *dm); | ||
545 | #ifdef CONFIG_MEMORY_HOTPLUG | 552 | #ifdef CONFIG_MEMORY_HOTPLUG |
546 | 553 | ||
547 | static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) | 554 | static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) |
@@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, | |||
612 | * have not been "onlined" within the allowed time. | 619 | * have not been "onlined" within the allowed time. |
613 | */ | 620 | */ |
614 | wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); | 621 | wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); |
615 | 622 | post_status(&dm_device); | |
616 | } | 623 | } |
617 | 624 | ||
618 | return; | 625 | return; |
@@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm) | |||
951 | { | 958 | { |
952 | struct dm_status status; | 959 | struct dm_status status; |
953 | struct sysinfo val; | 960 | struct sysinfo val; |
961 | unsigned long now = jiffies; | ||
962 | unsigned long last_post = last_post_time; | ||
954 | 963 | ||
955 | if (pressure_report_delay > 0) { | 964 | if (pressure_report_delay > 0) { |
956 | --pressure_report_delay; | 965 | --pressure_report_delay; |
957 | return; | 966 | return; |
958 | } | 967 | } |
968 | |||
969 | if (!time_after(now, (last_post_time + HZ))) | ||
970 | return; | ||
971 | |||
959 | si_meminfo(&val); | 972 | si_meminfo(&val); |
960 | memset(&status, 0, sizeof(struct dm_status)); | 973 | memset(&status, 0, sizeof(struct dm_status)); |
961 | status.hdr.type = DM_STATUS_REPORT; | 974 | status.hdr.type = DM_STATUS_REPORT; |
@@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm) | |||
983 | if (status.hdr.trans_id != atomic_read(&trans_id)) | 996 | if (status.hdr.trans_id != atomic_read(&trans_id)) |
984 | return; | 997 | return; |
985 | 998 | ||
999 | /* | ||
1000 | * If the last post time that we sampled has changed, | ||
1001 | * we have raced, don't post the status. | ||
1002 | */ | ||
1003 | if (last_post != last_post_time) | ||
1004 | return; | ||
1005 | |||
1006 | last_post_time = jiffies; | ||
986 | vmbus_sendpacket(dm->dev->channel, &status, | 1007 | vmbus_sendpacket(dm->dev->channel, &status, |
987 | sizeof(struct dm_status), | 1008 | sizeof(struct dm_status), |
988 | (unsigned long)NULL, | 1009 | (unsigned long)NULL, |
@@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy) | |||
1117 | 1138 | ||
1118 | if (ret == -EAGAIN) | 1139 | if (ret == -EAGAIN) |
1119 | msleep(20); | 1140 | msleep(20); |
1120 | 1141 | post_status(&dm_device); | |
1121 | } while (ret == -EAGAIN); | 1142 | } while (ret == -EAGAIN); |
1122 | 1143 | ||
1123 | if (ret) { | 1144 | if (ret) { |
@@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm, | |||
1144 | struct dm_unballoon_response resp; | 1165 | struct dm_unballoon_response resp; |
1145 | int i; | 1166 | int i; |
1146 | 1167 | ||
1147 | for (i = 0; i < range_count; i++) | 1168 | for (i = 0; i < range_count; i++) { |
1148 | free_balloon_pages(dm, &range_array[i]); | 1169 | free_balloon_pages(dm, &range_array[i]); |
1170 | post_status(&dm_device); | ||
1171 | } | ||
1149 | 1172 | ||
1150 | if (req->more_pages == 1) | 1173 | if (req->more_pages == 1) |
1151 | return; | 1174 | return; |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 860134da8039..18d1a8404cbc 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
@@ -510,6 +510,11 @@ struct hv_context { | |||
510 | * basis. | 510 | * basis. |
511 | */ | 511 | */ |
512 | struct tasklet_struct *event_dpc[NR_CPUS]; | 512 | struct tasklet_struct *event_dpc[NR_CPUS]; |
513 | /* | ||
514 | * To optimize the mapping of relid to channel, maintain | ||
515 | * per-cpu list of the channels based on their CPU affinity. | ||
516 | */ | ||
517 | struct list_head percpu_list[NR_CPUS]; | ||
513 | }; | 518 | }; |
514 | 519 | ||
515 | extern struct hv_context hv_context; | 520 | extern struct hv_context hv_context; |
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index bbe12932d404..9018ab83517a 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c | |||
@@ -183,14 +183,14 @@ EXPORT_SYMBOL_GPL(mcb_device_register); | |||
183 | * | 183 | * |
184 | * Allocate a new @mcb_bus. | 184 | * Allocate a new @mcb_bus. |
185 | */ | 185 | */ |
186 | struct mcb_bus *mcb_alloc_bus(void) | 186 | struct mcb_bus *mcb_alloc_bus(struct device *carrier) |
187 | { | 187 | { |
188 | struct mcb_bus *bus; | 188 | struct mcb_bus *bus; |
189 | int bus_nr; | 189 | int bus_nr; |
190 | 190 | ||
191 | bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); | 191 | bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); |
192 | if (!bus) | 192 | if (!bus) |
193 | return NULL; | 193 | return ERR_PTR(-ENOMEM); |
194 | 194 | ||
195 | bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); | 195 | bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); |
196 | if (bus_nr < 0) { | 196 | if (bus_nr < 0) { |
@@ -200,7 +200,7 @@ struct mcb_bus *mcb_alloc_bus(void) | |||
200 | 200 | ||
201 | INIT_LIST_HEAD(&bus->children); | 201 | INIT_LIST_HEAD(&bus->children); |
202 | bus->bus_nr = bus_nr; | 202 | bus->bus_nr = bus_nr; |
203 | 203 | bus->carrier = carrier; | |
204 | return bus; | 204 | return bus; |
205 | } | 205 | } |
206 | EXPORT_SYMBOL_GPL(mcb_alloc_bus); | 206 | EXPORT_SYMBOL_GPL(mcb_alloc_bus); |
@@ -378,6 +378,13 @@ void mcb_release_mem(struct resource *mem) | |||
378 | } | 378 | } |
379 | EXPORT_SYMBOL_GPL(mcb_release_mem); | 379 | EXPORT_SYMBOL_GPL(mcb_release_mem); |
380 | 380 | ||
381 | static int __mcb_get_irq(struct mcb_device *dev) | ||
382 | { | ||
383 | struct resource *irq = &dev->irq; | ||
384 | |||
385 | return irq->start; | ||
386 | } | ||
387 | |||
381 | /** | 388 | /** |
382 | * mcb_get_irq() - Get device's IRQ number | 389 | * mcb_get_irq() - Get device's IRQ number |
383 | * @dev: The @mcb_device the IRQ is for | 390 | * @dev: The @mcb_device the IRQ is for |
@@ -386,9 +393,12 @@ EXPORT_SYMBOL_GPL(mcb_release_mem); | |||
386 | */ | 393 | */ |
387 | int mcb_get_irq(struct mcb_device *dev) | 394 | int mcb_get_irq(struct mcb_device *dev) |
388 | { | 395 | { |
389 | struct resource *irq = &dev->irq; | 396 | struct mcb_bus *bus = dev->bus; |
390 | 397 | ||
391 | return irq->start; | 398 | if (bus->get_irq) |
399 | return bus->get_irq(dev); | ||
400 | |||
401 | return __mcb_get_irq(dev); | ||
392 | } | 402 | } |
393 | EXPORT_SYMBOL_GPL(mcb_get_irq); | 403 | EXPORT_SYMBOL_GPL(mcb_get_irq); |
394 | 404 | ||
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index 99c742cbfb5b..b59181965643 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c | |||
@@ -20,6 +20,15 @@ struct priv { | |||
20 | void __iomem *base; | 20 | void __iomem *base; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | static int mcb_pci_get_irq(struct mcb_device *mdev) | ||
24 | { | ||
25 | struct mcb_bus *mbus = mdev->bus; | ||
26 | struct device *dev = mbus->carrier; | ||
27 | struct pci_dev *pdev = to_pci_dev(dev); | ||
28 | |||
29 | return pdev->irq; | ||
30 | } | ||
31 | |||
23 | static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 32 | static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
24 | { | 33 | { |
25 | struct priv *priv; | 34 | struct priv *priv; |
@@ -67,7 +76,13 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
67 | 76 | ||
68 | pci_set_drvdata(pdev, priv); | 77 | pci_set_drvdata(pdev, priv); |
69 | 78 | ||
70 | priv->bus = mcb_alloc_bus(); | 79 | priv->bus = mcb_alloc_bus(&pdev->dev); |
80 | if (IS_ERR(priv->bus)) { | ||
81 | ret = PTR_ERR(priv->bus); | ||
82 | goto err_drvdata; | ||
83 | } | ||
84 | |||
85 | priv->bus->get_irq = mcb_pci_get_irq; | ||
71 | 86 | ||
72 | ret = chameleon_parse_cells(priv->bus, mapbase, priv->base); | 87 | ret = chameleon_parse_cells(priv->bus, mapbase, priv->base); |
73 | if (ret < 0) | 88 | if (ret < 0) |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index f04ac62dd76b..6deb8a11c12f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -331,15 +331,15 @@ config MFD_88PM860X | |||
331 | battery-charger under the corresponding menus. | 331 | battery-charger under the corresponding menus. |
332 | 332 | ||
333 | config MFD_MAX14577 | 333 | config MFD_MAX14577 |
334 | bool "Maxim Semiconductor MAX14577 MUIC + Charger Support" | 334 | bool "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support" |
335 | depends on I2C=y | 335 | depends on I2C=y |
336 | select MFD_CORE | 336 | select MFD_CORE |
337 | select REGMAP_I2C | 337 | select REGMAP_I2C |
338 | select REGMAP_IRQ | 338 | select REGMAP_IRQ |
339 | select IRQ_DOMAIN | 339 | select IRQ_DOMAIN |
340 | help | 340 | help |
341 | Say yes here to add support for Maxim Semiconductor MAX14577. | 341 | Say yes here to add support for Maxim Semiconductor MAX14577 and |
342 | This is a Micro-USB IC with Charger controls on chip. | 342 | MAX77836 Micro-USB ICs with battery charger. |
343 | This driver provides common support for accessing the device; | 343 | This driver provides common support for accessing the device; |
344 | additional drivers must be enabled in order to use the functionality | 344 | additional drivers must be enabled in order to use the functionality |
345 | of the device. | 345 | of the device. |
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index 5f13cefe8def..484d372a4892 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * max14577.c - mfd core driver for the Maxim 14577 | 2 | * max14577.c - mfd core driver for the Maxim 14577/77836 |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Samsung Electrnoics | 4 | * Copyright (C) 2014 Samsung Electrnoics |
5 | * Chanwoo Choi <cw00.choi@samsung.com> | 5 | * Chanwoo Choi <cw00.choi@samsung.com> |
6 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> | 6 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> |
7 | * | 7 | * |
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/of_device.h> | ||
24 | #include <linux/mfd/core.h> | 25 | #include <linux/mfd/core.h> |
25 | #include <linux/mfd/max14577.h> | 26 | #include <linux/mfd/max14577.h> |
26 | #include <linux/mfd/max14577-private.h> | 27 | #include <linux/mfd/max14577-private.h> |
@@ -37,7 +38,38 @@ static struct mfd_cell max14577_devs[] = { | |||
37 | { .name = "max14577-charger", }, | 38 | { .name = "max14577-charger", }, |
38 | }; | 39 | }; |
39 | 40 | ||
40 | static bool max14577_volatile_reg(struct device *dev, unsigned int reg) | 41 | static struct mfd_cell max77836_devs[] = { |
42 | { | ||
43 | .name = "max77836-muic", | ||
44 | .of_compatible = "maxim,max77836-muic", | ||
45 | }, | ||
46 | { | ||
47 | .name = "max77836-regulator", | ||
48 | .of_compatible = "maxim,max77836-regulator", | ||
49 | }, | ||
50 | { | ||
51 | .name = "max77836-charger", | ||
52 | .of_compatible = "maxim,max77836-charger", | ||
53 | }, | ||
54 | { | ||
55 | .name = "max77836-battery", | ||
56 | .of_compatible = "maxim,max77836-battery", | ||
57 | }, | ||
58 | }; | ||
59 | |||
60 | static struct of_device_id max14577_dt_match[] = { | ||
61 | { | ||
62 | .compatible = "maxim,max14577", | ||
63 | .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, | ||
64 | }, | ||
65 | { | ||
66 | .compatible = "maxim,max77836", | ||
67 | .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, | ||
68 | }, | ||
69 | {}, | ||
70 | }; | ||
71 | |||
72 | static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg) | ||
41 | { | 73 | { |
42 | switch (reg) { | 74 | switch (reg) { |
43 | case MAX14577_REG_INT1 ... MAX14577_REG_STATUS3: | 75 | case MAX14577_REG_INT1 ... MAX14577_REG_STATUS3: |
@@ -48,49 +80,221 @@ static bool max14577_volatile_reg(struct device *dev, unsigned int reg) | |||
48 | return false; | 80 | return false; |
49 | } | 81 | } |
50 | 82 | ||
51 | static const struct regmap_config max14577_regmap_config = { | 83 | static bool max77836_muic_volatile_reg(struct device *dev, unsigned int reg) |
84 | { | ||
85 | /* Any max14577 volatile registers are also max77836 volatile. */ | ||
86 | if (max14577_muic_volatile_reg(dev, reg)) | ||
87 | return true; | ||
88 | |||
89 | switch (reg) { | ||
90 | case MAX77836_FG_REG_VCELL_MSB ... MAX77836_FG_REG_SOC_LSB: | ||
91 | case MAX77836_FG_REG_CRATE_MSB ... MAX77836_FG_REG_CRATE_LSB: | ||
92 | case MAX77836_FG_REG_STATUS_H ... MAX77836_FG_REG_STATUS_L: | ||
93 | case MAX77836_PMIC_REG_INTSRC: | ||
94 | case MAX77836_PMIC_REG_TOPSYS_INT: | ||
95 | case MAX77836_PMIC_REG_TOPSYS_STAT: | ||
96 | return true; | ||
97 | default: | ||
98 | break; | ||
99 | } | ||
100 | return false; | ||
101 | } | ||
102 | |||
103 | static const struct regmap_config max14577_muic_regmap_config = { | ||
52 | .reg_bits = 8, | 104 | .reg_bits = 8, |
53 | .val_bits = 8, | 105 | .val_bits = 8, |
54 | .volatile_reg = max14577_volatile_reg, | 106 | .volatile_reg = max14577_muic_volatile_reg, |
55 | .max_register = MAX14577_REG_END, | 107 | .max_register = MAX14577_REG_END, |
56 | }; | 108 | }; |
57 | 109 | ||
110 | static const struct regmap_config max77836_pmic_regmap_config = { | ||
111 | .reg_bits = 8, | ||
112 | .val_bits = 8, | ||
113 | .volatile_reg = max77836_muic_volatile_reg, | ||
114 | .max_register = MAX77836_PMIC_REG_END, | ||
115 | }; | ||
116 | |||
58 | static const struct regmap_irq max14577_irqs[] = { | 117 | static const struct regmap_irq max14577_irqs[] = { |
59 | /* INT1 interrupts */ | 118 | /* INT1 interrupts */ |
60 | { .reg_offset = 0, .mask = INT1_ADC_MASK, }, | 119 | { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, }, |
61 | { .reg_offset = 0, .mask = INT1_ADCLOW_MASK, }, | 120 | { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, }, |
62 | { .reg_offset = 0, .mask = INT1_ADCERR_MASK, }, | 121 | { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, }, |
63 | /* INT2 interrupts */ | 122 | /* INT2 interrupts */ |
64 | { .reg_offset = 1, .mask = INT2_CHGTYP_MASK, }, | 123 | { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, }, |
65 | { .reg_offset = 1, .mask = INT2_CHGDETRUN_MASK, }, | 124 | { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, }, |
66 | { .reg_offset = 1, .mask = INT2_DCDTMR_MASK, }, | 125 | { .reg_offset = 1, .mask = MAX14577_INT2_DCDTMR_MASK, }, |
67 | { .reg_offset = 1, .mask = INT2_DBCHG_MASK, }, | 126 | { .reg_offset = 1, .mask = MAX14577_INT2_DBCHG_MASK, }, |
68 | { .reg_offset = 1, .mask = INT2_VBVOLT_MASK, }, | 127 | { .reg_offset = 1, .mask = MAX14577_INT2_VBVOLT_MASK, }, |
69 | /* INT3 interrupts */ | 128 | /* INT3 interrupts */ |
70 | { .reg_offset = 2, .mask = INT3_EOC_MASK, }, | 129 | { .reg_offset = 2, .mask = MAX14577_INT3_EOC_MASK, }, |
71 | { .reg_offset = 2, .mask = INT3_CGMBC_MASK, }, | 130 | { .reg_offset = 2, .mask = MAX14577_INT3_CGMBC_MASK, }, |
72 | { .reg_offset = 2, .mask = INT3_OVP_MASK, }, | 131 | { .reg_offset = 2, .mask = MAX14577_INT3_OVP_MASK, }, |
73 | { .reg_offset = 2, .mask = INT3_MBCCHGERR_MASK, }, | 132 | { .reg_offset = 2, .mask = MAX14577_INT3_MBCCHGERR_MASK, }, |
74 | }; | 133 | }; |
75 | 134 | ||
76 | static const struct regmap_irq_chip max14577_irq_chip = { | 135 | static const struct regmap_irq_chip max14577_irq_chip = { |
77 | .name = "max14577", | 136 | .name = "max14577", |
78 | .status_base = MAX14577_REG_INT1, | 137 | .status_base = MAX14577_REG_INT1, |
79 | .mask_base = MAX14577_REG_INTMASK1, | 138 | .mask_base = MAX14577_REG_INTMASK1, |
80 | .mask_invert = 1, | 139 | .mask_invert = true, |
81 | .num_regs = 3, | 140 | .num_regs = 3, |
82 | .irqs = max14577_irqs, | 141 | .irqs = max14577_irqs, |
83 | .num_irqs = ARRAY_SIZE(max14577_irqs), | 142 | .num_irqs = ARRAY_SIZE(max14577_irqs), |
84 | }; | 143 | }; |
85 | 144 | ||
145 | static const struct regmap_irq max77836_muic_irqs[] = { | ||
146 | /* INT1 interrupts */ | ||
147 | { .reg_offset = 0, .mask = MAX14577_INT1_ADC_MASK, }, | ||
148 | { .reg_offset = 0, .mask = MAX14577_INT1_ADCLOW_MASK, }, | ||
149 | { .reg_offset = 0, .mask = MAX14577_INT1_ADCERR_MASK, }, | ||
150 | { .reg_offset = 0, .mask = MAX77836_INT1_ADC1K_MASK, }, | ||
151 | /* INT2 interrupts */ | ||
152 | { .reg_offset = 1, .mask = MAX14577_INT2_CHGTYP_MASK, }, | ||
153 | { .reg_offset = 1, .mask = MAX14577_INT2_CHGDETRUN_MASK, }, | ||
154 | { .reg_offset = 1, .mask = MAX14577_INT2_DCDTMR_MASK, }, | ||
155 | { .reg_offset = 1, .mask = MAX14577_INT2_DBCHG_MASK, }, | ||
156 | { .reg_offset = 1, .mask = MAX14577_INT2_VBVOLT_MASK, }, | ||
157 | { .reg_offset = 1, .mask = MAX77836_INT2_VIDRM_MASK, }, | ||
158 | /* INT3 interrupts */ | ||
159 | { .reg_offset = 2, .mask = MAX14577_INT3_EOC_MASK, }, | ||
160 | { .reg_offset = 2, .mask = MAX14577_INT3_CGMBC_MASK, }, | ||
161 | { .reg_offset = 2, .mask = MAX14577_INT3_OVP_MASK, }, | ||
162 | { .reg_offset = 2, .mask = MAX14577_INT3_MBCCHGERR_MASK, }, | ||
163 | }; | ||
164 | |||
165 | static const struct regmap_irq_chip max77836_muic_irq_chip = { | ||
166 | .name = "max77836-muic", | ||
167 | .status_base = MAX14577_REG_INT1, | ||
168 | .mask_base = MAX14577_REG_INTMASK1, | ||
169 | .mask_invert = true, | ||
170 | .num_regs = 3, | ||
171 | .irqs = max77836_muic_irqs, | ||
172 | .num_irqs = ARRAY_SIZE(max77836_muic_irqs), | ||
173 | }; | ||
174 | |||
175 | static const struct regmap_irq max77836_pmic_irqs[] = { | ||
176 | { .reg_offset = 0, .mask = MAX77836_TOPSYS_INT_T120C_MASK, }, | ||
177 | { .reg_offset = 0, .mask = MAX77836_TOPSYS_INT_T140C_MASK, }, | ||
178 | }; | ||
179 | |||
180 | static const struct regmap_irq_chip max77836_pmic_irq_chip = { | ||
181 | .name = "max77836-pmic", | ||
182 | .status_base = MAX77836_PMIC_REG_TOPSYS_INT, | ||
183 | .mask_base = MAX77836_PMIC_REG_TOPSYS_INT_MASK, | ||
184 | .mask_invert = false, | ||
185 | .num_regs = 1, | ||
186 | .irqs = max77836_pmic_irqs, | ||
187 | .num_irqs = ARRAY_SIZE(max77836_pmic_irqs), | ||
188 | }; | ||
189 | |||
190 | static void max14577_print_dev_type(struct max14577 *max14577) | ||
191 | { | ||
192 | u8 reg_data, vendor_id, device_id; | ||
193 | int ret; | ||
194 | |||
195 | ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID, | ||
196 | ®_data); | ||
197 | if (ret) { | ||
198 | dev_err(max14577->dev, | ||
199 | "Failed to read DEVICEID register: %d\n", ret); | ||
200 | return; | ||
201 | } | ||
202 | |||
203 | vendor_id = ((reg_data & DEVID_VENDORID_MASK) >> | ||
204 | DEVID_VENDORID_SHIFT); | ||
205 | device_id = ((reg_data & DEVID_DEVICEID_MASK) >> | ||
206 | DEVID_DEVICEID_SHIFT); | ||
207 | |||
208 | dev_info(max14577->dev, "Device type: %u (ID: 0x%x, vendor: 0x%x)\n", | ||
209 | max14577->dev_type, device_id, vendor_id); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Max77836 specific initialization code for driver probe. | ||
214 | * Adds new I2C dummy device, regmap and regmap IRQ chip. | ||
215 | * Unmasks Interrupt Source register. | ||
216 | * | ||
217 | * On success returns 0. | ||
218 | * On failure returns errno and reverts any changes done so far (e.g. remove | ||
219 | * I2C dummy device), except masking the INT SRC register. | ||
220 | */ | ||
221 | static int max77836_init(struct max14577 *max14577) | ||
222 | { | ||
223 | int ret; | ||
224 | u8 intsrc_mask; | ||
225 | |||
226 | max14577->i2c_pmic = i2c_new_dummy(max14577->i2c->adapter, | ||
227 | I2C_ADDR_PMIC); | ||
228 | if (!max14577->i2c_pmic) { | ||
229 | dev_err(max14577->dev, "Failed to register PMIC I2C device\n"); | ||
230 | return -ENODEV; | ||
231 | } | ||
232 | i2c_set_clientdata(max14577->i2c_pmic, max14577); | ||
233 | |||
234 | max14577->regmap_pmic = devm_regmap_init_i2c(max14577->i2c_pmic, | ||
235 | &max77836_pmic_regmap_config); | ||
236 | if (IS_ERR(max14577->regmap_pmic)) { | ||
237 | ret = PTR_ERR(max14577->regmap_pmic); | ||
238 | dev_err(max14577->dev, "Failed to allocate PMIC register map: %d\n", | ||
239 | ret); | ||
240 | goto err; | ||
241 | } | ||
242 | |||
243 | /* Un-mask MAX77836 Interrupt Source register */ | ||
244 | ret = max14577_read_reg(max14577->regmap_pmic, | ||
245 | MAX77836_PMIC_REG_INTSRC_MASK, &intsrc_mask); | ||
246 | if (ret < 0) { | ||
247 | dev_err(max14577->dev, "Failed to read PMIC register\n"); | ||
248 | goto err; | ||
249 | } | ||
250 | |||
251 | intsrc_mask &= ~(MAX77836_INTSRC_MASK_TOP_INT_MASK); | ||
252 | intsrc_mask &= ~(MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK); | ||
253 | ret = max14577_write_reg(max14577->regmap_pmic, | ||
254 | MAX77836_PMIC_REG_INTSRC_MASK, intsrc_mask); | ||
255 | if (ret < 0) { | ||
256 | dev_err(max14577->dev, "Failed to write PMIC register\n"); | ||
257 | goto err; | ||
258 | } | ||
259 | |||
260 | ret = regmap_add_irq_chip(max14577->regmap_pmic, max14577->irq, | ||
261 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED, | ||
262 | 0, &max77836_pmic_irq_chip, | ||
263 | &max14577->irq_data_pmic); | ||
264 | if (ret != 0) { | ||
265 | dev_err(max14577->dev, "Failed to request PMIC IRQ %d: %d\n", | ||
266 | max14577->irq, ret); | ||
267 | goto err; | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | |||
272 | err: | ||
273 | i2c_unregister_device(max14577->i2c_pmic); | ||
274 | |||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Max77836 specific de-initialization code for driver remove. | ||
280 | */ | ||
281 | static void max77836_remove(struct max14577 *max14577) | ||
282 | { | ||
283 | regmap_del_irq_chip(max14577->irq, max14577->irq_data_pmic); | ||
284 | i2c_unregister_device(max14577->i2c_pmic); | ||
285 | } | ||
286 | |||
86 | static int max14577_i2c_probe(struct i2c_client *i2c, | 287 | static int max14577_i2c_probe(struct i2c_client *i2c, |
87 | const struct i2c_device_id *id) | 288 | const struct i2c_device_id *id) |
88 | { | 289 | { |
89 | struct max14577 *max14577; | 290 | struct max14577 *max14577; |
90 | struct max14577_platform_data *pdata = dev_get_platdata(&i2c->dev); | 291 | struct max14577_platform_data *pdata = dev_get_platdata(&i2c->dev); |
91 | struct device_node *np = i2c->dev.of_node; | 292 | struct device_node *np = i2c->dev.of_node; |
92 | u8 reg_data; | ||
93 | int ret = 0; | 293 | int ret = 0; |
294 | const struct regmap_irq_chip *irq_chip; | ||
295 | struct mfd_cell *mfd_devs; | ||
296 | unsigned int mfd_devs_size; | ||
297 | int irq_flags; | ||
94 | 298 | ||
95 | if (np) { | 299 | if (np) { |
96 | pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL); | 300 | pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL); |
@@ -113,7 +317,8 @@ static int max14577_i2c_probe(struct i2c_client *i2c, | |||
113 | max14577->i2c = i2c; | 317 | max14577->i2c = i2c; |
114 | max14577->irq = i2c->irq; | 318 | max14577->irq = i2c->irq; |
115 | 319 | ||
116 | max14577->regmap = devm_regmap_init_i2c(i2c, &max14577_regmap_config); | 320 | max14577->regmap = devm_regmap_init_i2c(i2c, |
321 | &max14577_muic_regmap_config); | ||
117 | if (IS_ERR(max14577->regmap)) { | 322 | if (IS_ERR(max14577->regmap)) { |
118 | ret = PTR_ERR(max14577->regmap); | 323 | ret = PTR_ERR(max14577->regmap); |
119 | dev_err(max14577->dev, "Failed to allocate register map: %d\n", | 324 | dev_err(max14577->dev, "Failed to allocate register map: %d\n", |
@@ -121,23 +326,36 @@ static int max14577_i2c_probe(struct i2c_client *i2c, | |||
121 | return ret; | 326 | return ret; |
122 | } | 327 | } |
123 | 328 | ||
124 | ret = max14577_read_reg(max14577->regmap, MAX14577_REG_DEVICEID, | 329 | if (np) { |
125 | ®_data); | 330 | const struct of_device_id *of_id; |
126 | if (ret) { | 331 | |
127 | dev_err(max14577->dev, "Device not found on this channel: %d\n", | 332 | of_id = of_match_device(max14577_dt_match, &i2c->dev); |
128 | ret); | 333 | if (of_id) |
129 | return ret; | 334 | max14577->dev_type = (unsigned int)of_id->data; |
335 | } else { | ||
336 | max14577->dev_type = id->driver_data; | ||
337 | } | ||
338 | |||
339 | max14577_print_dev_type(max14577); | ||
340 | |||
341 | switch (max14577->dev_type) { | ||
342 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
343 | irq_chip = &max77836_muic_irq_chip; | ||
344 | mfd_devs = max77836_devs; | ||
345 | mfd_devs_size = ARRAY_SIZE(max77836_devs); | ||
346 | irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED; | ||
347 | break; | ||
348 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
349 | default: | ||
350 | irq_chip = &max14577_irq_chip; | ||
351 | mfd_devs = max14577_devs; | ||
352 | mfd_devs_size = ARRAY_SIZE(max14577_devs); | ||
353 | irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; | ||
354 | break; | ||
130 | } | 355 | } |
131 | max14577->vendor_id = ((reg_data & DEVID_VENDORID_MASK) >> | ||
132 | DEVID_VENDORID_SHIFT); | ||
133 | max14577->device_id = ((reg_data & DEVID_DEVICEID_MASK) >> | ||
134 | DEVID_DEVICEID_SHIFT); | ||
135 | dev_info(max14577->dev, "Device ID: 0x%x, vendor: 0x%x\n", | ||
136 | max14577->device_id, max14577->vendor_id); | ||
137 | 356 | ||
138 | ret = regmap_add_irq_chip(max14577->regmap, max14577->irq, | 357 | ret = regmap_add_irq_chip(max14577->regmap, max14577->irq, |
139 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0, | 358 | irq_flags, 0, irq_chip, |
140 | &max14577_irq_chip, | ||
141 | &max14577->irq_data); | 359 | &max14577->irq_data); |
142 | if (ret != 0) { | 360 | if (ret != 0) { |
143 | dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", | 361 | dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", |
@@ -145,8 +363,15 @@ static int max14577_i2c_probe(struct i2c_client *i2c, | |||
145 | return ret; | 363 | return ret; |
146 | } | 364 | } |
147 | 365 | ||
148 | ret = mfd_add_devices(max14577->dev, -1, max14577_devs, | 366 | /* Max77836 specific initialization code (additional regmap) */ |
149 | ARRAY_SIZE(max14577_devs), NULL, 0, | 367 | if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) { |
368 | ret = max77836_init(max14577); | ||
369 | if (ret < 0) | ||
370 | goto err_max77836; | ||
371 | } | ||
372 | |||
373 | ret = mfd_add_devices(max14577->dev, -1, mfd_devs, | ||
374 | mfd_devs_size, NULL, 0, | ||
150 | regmap_irq_get_domain(max14577->irq_data)); | 375 | regmap_irq_get_domain(max14577->irq_data)); |
151 | if (ret < 0) | 376 | if (ret < 0) |
152 | goto err_mfd; | 377 | goto err_mfd; |
@@ -156,6 +381,9 @@ static int max14577_i2c_probe(struct i2c_client *i2c, | |||
156 | return 0; | 381 | return 0; |
157 | 382 | ||
158 | err_mfd: | 383 | err_mfd: |
384 | if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) | ||
385 | max77836_remove(max14577); | ||
386 | err_max77836: | ||
159 | regmap_del_irq_chip(max14577->irq, max14577->irq_data); | 387 | regmap_del_irq_chip(max14577->irq, max14577->irq_data); |
160 | 388 | ||
161 | return ret; | 389 | return ret; |
@@ -167,12 +395,15 @@ static int max14577_i2c_remove(struct i2c_client *i2c) | |||
167 | 395 | ||
168 | mfd_remove_devices(max14577->dev); | 396 | mfd_remove_devices(max14577->dev); |
169 | regmap_del_irq_chip(max14577->irq, max14577->irq_data); | 397 | regmap_del_irq_chip(max14577->irq, max14577->irq_data); |
398 | if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) | ||
399 | max77836_remove(max14577); | ||
170 | 400 | ||
171 | return 0; | 401 | return 0; |
172 | } | 402 | } |
173 | 403 | ||
174 | static const struct i2c_device_id max14577_i2c_id[] = { | 404 | static const struct i2c_device_id max14577_i2c_id[] = { |
175 | { "max14577", 0 }, | 405 | { "max14577", MAXIM_DEVICE_TYPE_MAX14577, }, |
406 | { "max77836", MAXIM_DEVICE_TYPE_MAX77836, }, | ||
176 | { } | 407 | { } |
177 | }; | 408 | }; |
178 | MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); | 409 | MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); |
@@ -215,11 +446,6 @@ static int max14577_resume(struct device *dev) | |||
215 | } | 446 | } |
216 | #endif /* CONFIG_PM_SLEEP */ | 447 | #endif /* CONFIG_PM_SLEEP */ |
217 | 448 | ||
218 | static struct of_device_id max14577_dt_match[] = { | ||
219 | { .compatible = "maxim,max14577", }, | ||
220 | {}, | ||
221 | }; | ||
222 | |||
223 | static SIMPLE_DEV_PM_OPS(max14577_pm, max14577_suspend, max14577_resume); | 449 | static SIMPLE_DEV_PM_OPS(max14577_pm, max14577_suspend, max14577_resume); |
224 | 450 | ||
225 | static struct i2c_driver max14577_i2c_driver = { | 451 | static struct i2c_driver max14577_i2c_driver = { |
@@ -236,6 +462,9 @@ static struct i2c_driver max14577_i2c_driver = { | |||
236 | 462 | ||
237 | static int __init max14577_i2c_init(void) | 463 | static int __init max14577_i2c_init(void) |
238 | { | 464 | { |
465 | BUILD_BUG_ON(ARRAY_SIZE(max14577_i2c_id) != MAXIM_DEVICE_TYPE_NUM); | ||
466 | BUILD_BUG_ON(ARRAY_SIZE(max14577_dt_match) != MAXIM_DEVICE_TYPE_NUM); | ||
467 | |||
239 | return i2c_add_driver(&max14577_i2c_driver); | 468 | return i2c_add_driver(&max14577_i2c_driver); |
240 | } | 469 | } |
241 | subsys_initcall(max14577_i2c_init); | 470 | subsys_initcall(max14577_i2c_init); |
@@ -247,5 +476,5 @@ static void __exit max14577_i2c_exit(void) | |||
247 | module_exit(max14577_i2c_exit); | 476 | module_exit(max14577_i2c_exit); |
248 | 477 | ||
249 | MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>"); | 478 | MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>, Krzysztof Kozlowski <k.kozlowski@samsung.com>"); |
250 | MODULE_DESCRIPTION("MAXIM 14577 multi-function core driver"); | 479 | MODULE_DESCRIPTION("Maxim 14577/77836 multi-function core driver"); |
251 | MODULE_LICENSE("GPL"); | 480 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index d9663ef90ce8..a43d0c467274 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI | |||
54 | config ATMEL_PWM | 54 | config ATMEL_PWM |
55 | tristate "Atmel AT32/AT91 PWM support" | 55 | tristate "Atmel AT32/AT91 PWM support" |
56 | depends on HAVE_CLK | 56 | depends on HAVE_CLK |
57 | depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45 | ||
57 | help | 58 | help |
58 | This option enables device driver support for the PWM channels | 59 | This option enables device driver support for the PWM channels |
59 | on certain Atmel processors. Pulse Width Modulation is used for | 60 | on certain Atmel processors. Pulse Width Modulation is used for |
@@ -200,7 +201,7 @@ config ICS932S401 | |||
200 | 201 | ||
201 | config ATMEL_SSC | 202 | config ATMEL_SSC |
202 | tristate "Device driver for Atmel SSC peripheral" | 203 | tristate "Device driver for Atmel SSC peripheral" |
203 | depends on HAS_IOMEM | 204 | depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST) |
204 | ---help--- | 205 | ---help--- |
205 | This option enables device driver support for Atmel Synchronized | 206 | This option enables device driver support for Atmel Synchronized |
206 | Serial Communication peripheral (SSC). | 207 | Serial Communication peripheral (SSC). |
@@ -468,7 +469,7 @@ config BMP085_SPI | |||
468 | config PCH_PHUB | 469 | config PCH_PHUB |
469 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" | 470 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" |
470 | select GENERIC_NET_UTILS | 471 | select GENERIC_NET_UTILS |
471 | depends on PCI | 472 | depends on PCI && (X86_32 || COMPILE_TEST) |
472 | help | 473 | help |
473 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of | 474 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of |
474 | Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded | 475 | Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded |
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c index b7ebf8021d99..c72e96b523ed 100644 --- a/drivers/misc/arm-charlcd.c +++ b/drivers/misc/arm-charlcd.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/of.h> | ||
14 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
15 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
16 | #include <linux/io.h> | 17 | #include <linux/io.h> |
@@ -366,11 +367,17 @@ static const struct dev_pm_ops charlcd_pm_ops = { | |||
366 | .resume = charlcd_resume, | 367 | .resume = charlcd_resume, |
367 | }; | 368 | }; |
368 | 369 | ||
370 | static const struct of_device_id charlcd_match[] = { | ||
371 | { .compatible = "arm,versatile-lcd", }, | ||
372 | {} | ||
373 | }; | ||
374 | |||
369 | static struct platform_driver charlcd_driver = { | 375 | static struct platform_driver charlcd_driver = { |
370 | .driver = { | 376 | .driver = { |
371 | .name = DRIVERNAME, | 377 | .name = DRIVERNAME, |
372 | .owner = THIS_MODULE, | 378 | .owner = THIS_MODULE, |
373 | .pm = &charlcd_pm_ops, | 379 | .pm = &charlcd_pm_ops, |
380 | .of_match_table = of_match_ptr(charlcd_match), | ||
374 | }, | 381 | }, |
375 | .remove = __exit_p(charlcd_remove), | 382 | .remove = __exit_p(charlcd_remove), |
376 | }; | 383 | }; |
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index 6a672f9ef522..b909fb30232a 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c | |||
@@ -85,7 +85,6 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, | |||
85 | { | 85 | { |
86 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | 86 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); |
87 | struct i2c_client *client = to_i2c_client(dev); | 87 | struct i2c_client *client = to_i2c_client(dev); |
88 | char *endp; | ||
89 | u64 val; | 88 | u64 val; |
90 | __le32 val_le; | 89 | __le32 val_le; |
91 | int rc; | 90 | int rc; |
@@ -93,8 +92,8 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, | |||
93 | dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name); | 92 | dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name); |
94 | 93 | ||
95 | /* Decode input */ | 94 | /* Decode input */ |
96 | val = simple_strtoull(buf, &endp, 0); | 95 | rc = kstrtoull(buf, 0, &val); |
97 | if (buf == endp) { | 96 | if (rc < 0) { |
98 | dev_dbg(dev, "input string not a number\n"); | 97 | dev_dbg(dev, "input string not a number\n"); |
99 | return -EINVAL; | 98 | return -EINVAL; |
100 | } | 99 | } |
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c index 50d2096ea1c7..0a33ade64109 100644 --- a/drivers/misc/genwqe/card_debugfs.c +++ b/drivers/misc/genwqe/card_debugfs.c | |||
@@ -348,7 +348,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) | |||
348 | char name[64]; | 348 | char name[64]; |
349 | unsigned int i; | 349 | unsigned int i; |
350 | 350 | ||
351 | sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx); | 351 | sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx); |
352 | 352 | ||
353 | root = debugfs_create_dir(card_name, cd->debugfs_genwqe); | 353 | root = debugfs_create_dir(card_name, cd->debugfs_genwqe); |
354 | if (!root) { | 354 | if (!root) { |
@@ -454,7 +454,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) | |||
454 | } | 454 | } |
455 | 455 | ||
456 | for (i = 0; i < GENWQE_MAX_VFS; i++) { | 456 | for (i = 0; i < GENWQE_MAX_VFS; i++) { |
457 | sprintf(name, "vf%d_jobtimeout_msec", i); | 457 | sprintf(name, "vf%u_jobtimeout_msec", i); |
458 | 458 | ||
459 | file = debugfs_create_u32(name, 0666, root, | 459 | file = debugfs_create_u32(name, 0666, root, |
460 | &cd->vf_jobtimeout_msec[i]); | 460 | &cd->vf_jobtimeout_msec[i]); |
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index c00adfaa6279..62cc6bb3f62e 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c | |||
@@ -454,7 +454,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, | |||
454 | */ | 454 | */ |
455 | int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) | 455 | int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) |
456 | { | 456 | { |
457 | int rc; | 457 | int rc = 0; |
458 | struct pci_dev *pci_dev = cd->pci_dev; | 458 | struct pci_dev *pci_dev = cd->pci_dev; |
459 | 459 | ||
460 | if (sgl->fpage) { | 460 | if (sgl->fpage) { |
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index b8deb3455480..0d6234db00fa 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
@@ -111,8 +111,6 @@ int mei_amthif_host_init(struct mei_device *dev) | |||
111 | return ret; | 111 | return ret; |
112 | } | 112 | } |
113 | 113 | ||
114 | cl->state = MEI_FILE_CONNECTING; | ||
115 | |||
116 | ret = mei_cl_connect(cl, NULL); | 114 | ret = mei_cl_connect(cl, NULL); |
117 | 115 | ||
118 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 116 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index ddc5ac92a200..0e993ef28b94 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
@@ -247,7 +247,7 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
247 | return id; | 247 | return id; |
248 | 248 | ||
249 | if (length > dev->me_clients[id].props.max_msg_length) | 249 | if (length > dev->me_clients[id].props.max_msg_length) |
250 | return -EINVAL; | 250 | return -EFBIG; |
251 | 251 | ||
252 | cb = mei_io_cb_init(cl, NULL); | 252 | cb = mei_io_cb_init(cl, NULL); |
253 | if (!cb) | 253 | if (!cb) |
@@ -427,8 +427,6 @@ int mei_cl_enable_device(struct mei_cl_device *device) | |||
427 | 427 | ||
428 | mutex_lock(&dev->device_lock); | 428 | mutex_lock(&dev->device_lock); |
429 | 429 | ||
430 | cl->state = MEI_FILE_CONNECTING; | ||
431 | |||
432 | err = mei_cl_connect(cl, NULL); | 430 | err = mei_cl_connect(cl, NULL); |
433 | if (err < 0) { | 431 | if (err < 0) { |
434 | mutex_unlock(&dev->device_lock); | 432 | mutex_unlock(&dev->device_lock); |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 8c078b808cd3..59d20c599b16 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/pm_runtime.h> | ||
21 | 22 | ||
22 | #include <linux/mei.h> | 23 | #include <linux/mei.h> |
23 | 24 | ||
@@ -415,6 +416,10 @@ void mei_host_client_init(struct work_struct *work) | |||
415 | dev->reset_count = 0; | 416 | dev->reset_count = 0; |
416 | 417 | ||
417 | mutex_unlock(&dev->device_lock); | 418 | mutex_unlock(&dev->device_lock); |
419 | |||
420 | pm_runtime_mark_last_busy(&dev->pdev->dev); | ||
421 | dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); | ||
422 | pm_runtime_autosuspend(&dev->pdev->dev); | ||
418 | } | 423 | } |
419 | 424 | ||
420 | /** | 425 | /** |
@@ -425,6 +430,12 @@ void mei_host_client_init(struct work_struct *work) | |||
425 | */ | 430 | */ |
426 | bool mei_hbuf_acquire(struct mei_device *dev) | 431 | bool mei_hbuf_acquire(struct mei_device *dev) |
427 | { | 432 | { |
433 | if (mei_pg_state(dev) == MEI_PG_ON || | ||
434 | dev->pg_event == MEI_PG_EVENT_WAIT) { | ||
435 | dev_dbg(&dev->pdev->dev, "device is in pg\n"); | ||
436 | return false; | ||
437 | } | ||
438 | |||
428 | if (!dev->hbuf_is_ready) { | 439 | if (!dev->hbuf_is_ready) { |
429 | dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); | 440 | dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); |
430 | return false; | 441 | return false; |
@@ -460,9 +471,18 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
460 | if (cl->state != MEI_FILE_DISCONNECTING) | 471 | if (cl->state != MEI_FILE_DISCONNECTING) |
461 | return 0; | 472 | return 0; |
462 | 473 | ||
474 | rets = pm_runtime_get(&dev->pdev->dev); | ||
475 | if (rets < 0 && rets != -EINPROGRESS) { | ||
476 | pm_runtime_put_noidle(&dev->pdev->dev); | ||
477 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | ||
478 | return rets; | ||
479 | } | ||
480 | |||
463 | cb = mei_io_cb_init(cl, NULL); | 481 | cb = mei_io_cb_init(cl, NULL); |
464 | if (!cb) | 482 | if (!cb) { |
465 | return -ENOMEM; | 483 | rets = -ENOMEM; |
484 | goto free; | ||
485 | } | ||
466 | 486 | ||
467 | cb->fop_type = MEI_FOP_CLOSE; | 487 | cb->fop_type = MEI_FOP_CLOSE; |
468 | if (mei_hbuf_acquire(dev)) { | 488 | if (mei_hbuf_acquire(dev)) { |
@@ -494,8 +514,7 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
494 | cl_err(dev, cl, "wrong status client disconnect.\n"); | 514 | cl_err(dev, cl, "wrong status client disconnect.\n"); |
495 | 515 | ||
496 | if (err) | 516 | if (err) |
497 | cl_dbg(dev, cl, "wait failed disconnect err=%08x\n", | 517 | cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); |
498 | err); | ||
499 | 518 | ||
500 | cl_err(dev, cl, "failed to disconnect from FW client.\n"); | 519 | cl_err(dev, cl, "failed to disconnect from FW client.\n"); |
501 | } | 520 | } |
@@ -503,6 +522,10 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
503 | mei_io_list_flush(&dev->ctrl_rd_list, cl); | 522 | mei_io_list_flush(&dev->ctrl_rd_list, cl); |
504 | mei_io_list_flush(&dev->ctrl_wr_list, cl); | 523 | mei_io_list_flush(&dev->ctrl_wr_list, cl); |
505 | free: | 524 | free: |
525 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | ||
526 | pm_runtime_mark_last_busy(&dev->pdev->dev); | ||
527 | pm_runtime_put_autosuspend(&dev->pdev->dev); | ||
528 | |||
506 | mei_io_cb_free(cb); | 529 | mei_io_cb_free(cb); |
507 | return rets; | 530 | return rets; |
508 | } | 531 | } |
@@ -557,6 +580,13 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) | |||
557 | 580 | ||
558 | dev = cl->dev; | 581 | dev = cl->dev; |
559 | 582 | ||
583 | rets = pm_runtime_get(&dev->pdev->dev); | ||
584 | if (rets < 0 && rets != -EINPROGRESS) { | ||
585 | pm_runtime_put_noidle(&dev->pdev->dev); | ||
586 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | ||
587 | return rets; | ||
588 | } | ||
589 | |||
560 | cb = mei_io_cb_init(cl, file); | 590 | cb = mei_io_cb_init(cl, file); |
561 | if (!cb) { | 591 | if (!cb) { |
562 | rets = -ENOMEM; | 592 | rets = -ENOMEM; |
@@ -567,6 +597,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) | |||
567 | 597 | ||
568 | /* run hbuf acquire last so we don't have to undo */ | 598 | /* run hbuf acquire last so we don't have to undo */ |
569 | if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { | 599 | if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { |
600 | cl->state = MEI_FILE_CONNECTING; | ||
570 | if (mei_hbm_cl_connect_req(dev, cl)) { | 601 | if (mei_hbm_cl_connect_req(dev, cl)) { |
571 | rets = -ENODEV; | 602 | rets = -ENODEV; |
572 | goto out; | 603 | goto out; |
@@ -596,6 +627,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) | |||
596 | rets = cl->status; | 627 | rets = cl->status; |
597 | 628 | ||
598 | out: | 629 | out: |
630 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | ||
631 | pm_runtime_mark_last_busy(&dev->pdev->dev); | ||
632 | pm_runtime_put_autosuspend(&dev->pdev->dev); | ||
633 | |||
599 | mei_io_cb_free(cb); | 634 | mei_io_cb_free(cb); |
600 | return rets; | 635 | return rets; |
601 | } | 636 | } |
@@ -713,23 +748,31 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) | |||
713 | return -ENOTTY; | 748 | return -ENOTTY; |
714 | } | 749 | } |
715 | 750 | ||
751 | rets = pm_runtime_get(&dev->pdev->dev); | ||
752 | if (rets < 0 && rets != -EINPROGRESS) { | ||
753 | pm_runtime_put_noidle(&dev->pdev->dev); | ||
754 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | ||
755 | return rets; | ||
756 | } | ||
757 | |||
716 | cb = mei_io_cb_init(cl, NULL); | 758 | cb = mei_io_cb_init(cl, NULL); |
717 | if (!cb) | 759 | if (!cb) { |
718 | return -ENOMEM; | 760 | rets = -ENOMEM; |
761 | goto out; | ||
762 | } | ||
719 | 763 | ||
720 | /* always allocate at least client max message */ | 764 | /* always allocate at least client max message */ |
721 | length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); | 765 | length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); |
722 | rets = mei_io_cb_alloc_resp_buf(cb, length); | 766 | rets = mei_io_cb_alloc_resp_buf(cb, length); |
723 | if (rets) | 767 | if (rets) |
724 | goto err; | 768 | goto out; |
725 | 769 | ||
726 | cb->fop_type = MEI_FOP_READ; | 770 | cb->fop_type = MEI_FOP_READ; |
727 | if (mei_hbuf_acquire(dev)) { | 771 | if (mei_hbuf_acquire(dev)) { |
728 | if (mei_hbm_cl_flow_control_req(dev, cl)) { | 772 | rets = mei_hbm_cl_flow_control_req(dev, cl); |
729 | cl_err(dev, cl, "flow control send failed\n"); | 773 | if (rets < 0) |
730 | rets = -ENODEV; | 774 | goto out; |
731 | goto err; | 775 | |
732 | } | ||
733 | list_add_tail(&cb->list, &dev->read_list.list); | 776 | list_add_tail(&cb->list, &dev->read_list.list); |
734 | } else { | 777 | } else { |
735 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); | 778 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); |
@@ -737,9 +780,14 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) | |||
737 | 780 | ||
738 | cl->read_cb = cb; | 781 | cl->read_cb = cb; |
739 | 782 | ||
740 | return rets; | 783 | out: |
741 | err: | 784 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
742 | mei_io_cb_free(cb); | 785 | pm_runtime_mark_last_busy(&dev->pdev->dev); |
786 | pm_runtime_put_autosuspend(&dev->pdev->dev); | ||
787 | |||
788 | if (rets) | ||
789 | mei_io_cb_free(cb); | ||
790 | |||
743 | return rets; | 791 | return rets; |
744 | } | 792 | } |
745 | 793 | ||
@@ -776,7 +824,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
776 | return rets; | 824 | return rets; |
777 | 825 | ||
778 | if (rets == 0) { | 826 | if (rets == 0) { |
779 | cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); | 827 | cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); |
780 | return 0; | 828 | return 0; |
781 | } | 829 | } |
782 | 830 | ||
@@ -856,6 +904,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
856 | 904 | ||
857 | cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); | 905 | cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); |
858 | 906 | ||
907 | rets = pm_runtime_get(&dev->pdev->dev); | ||
908 | if (rets < 0 && rets != -EINPROGRESS) { | ||
909 | pm_runtime_put_noidle(&dev->pdev->dev); | ||
910 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | ||
911 | return rets; | ||
912 | } | ||
859 | 913 | ||
860 | cb->fop_type = MEI_FOP_WRITE; | 914 | cb->fop_type = MEI_FOP_WRITE; |
861 | cb->buf_idx = 0; | 915 | cb->buf_idx = 0; |
@@ -926,6 +980,10 @@ out: | |||
926 | 980 | ||
927 | rets = buf->size; | 981 | rets = buf->size; |
928 | err: | 982 | err: |
983 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | ||
984 | pm_runtime_mark_last_busy(&dev->pdev->dev); | ||
985 | pm_runtime_put_autosuspend(&dev->pdev->dev); | ||
986 | |||
929 | return rets; | 987 | return rets; |
930 | } | 988 | } |
931 | 989 | ||
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 4960288e543a..804106209d76 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
@@ -14,10 +14,12 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/export.h> | ||
17 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
18 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
19 | #include <linux/wait.h> | 20 | #include <linux/wait.h> |
20 | #include <linux/mei.h> | 21 | #include <linux/mei.h> |
22 | #include <linux/pm_runtime.h> | ||
21 | 23 | ||
22 | #include "mei_dev.h" | 24 | #include "mei_dev.h" |
23 | #include "hbm.h" | 25 | #include "hbm.h" |
@@ -58,6 +60,34 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) | |||
58 | } | 60 | } |
59 | 61 | ||
60 | /** | 62 | /** |
63 | * mei_hbm_idle - set hbm to idle state | ||
64 | * | ||
65 | * @dev: the device structure | ||
66 | */ | ||
67 | void mei_hbm_idle(struct mei_device *dev) | ||
68 | { | ||
69 | dev->init_clients_timer = 0; | ||
70 | dev->hbm_state = MEI_HBM_IDLE; | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * mei_hbm_reset - reset hbm counters and book keeping data structurs | ||
75 | * | ||
76 | * @dev: the device structure | ||
77 | */ | ||
78 | void mei_hbm_reset(struct mei_device *dev) | ||
79 | { | ||
80 | dev->me_clients_num = 0; | ||
81 | dev->me_client_presentation_num = 0; | ||
82 | dev->me_client_index = 0; | ||
83 | |||
84 | kfree(dev->me_clients); | ||
85 | dev->me_clients = NULL; | ||
86 | |||
87 | mei_hbm_idle(dev); | ||
88 | } | ||
89 | |||
90 | /** | ||
61 | * mei_hbm_me_cl_allocate - allocates storage for me clients | 91 | * mei_hbm_me_cl_allocate - allocates storage for me clients |
62 | * | 92 | * |
63 | * @dev: the device structure | 93 | * @dev: the device structure |
@@ -69,9 +99,7 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev) | |||
69 | struct mei_me_client *clients; | 99 | struct mei_me_client *clients; |
70 | int b; | 100 | int b; |
71 | 101 | ||
72 | dev->me_clients_num = 0; | 102 | mei_hbm_reset(dev); |
73 | dev->me_client_presentation_num = 0; | ||
74 | dev->me_client_index = 0; | ||
75 | 103 | ||
76 | /* count how many ME clients we have */ | 104 | /* count how many ME clients we have */ |
77 | for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) | 105 | for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) |
@@ -80,9 +108,6 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev) | |||
80 | if (dev->me_clients_num == 0) | 108 | if (dev->me_clients_num == 0) |
81 | return 0; | 109 | return 0; |
82 | 110 | ||
83 | kfree(dev->me_clients); | ||
84 | dev->me_clients = NULL; | ||
85 | |||
86 | dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n", | 111 | dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n", |
87 | dev->me_clients_num * sizeof(struct mei_me_client)); | 112 | dev->me_clients_num * sizeof(struct mei_me_client)); |
88 | /* allocate storage for ME clients representation */ | 113 | /* allocate storage for ME clients representation */ |
@@ -133,17 +158,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) | |||
133 | } | 158 | } |
134 | 159 | ||
135 | 160 | ||
136 | /** | ||
137 | * mei_hbm_idle - set hbm to idle state | ||
138 | * | ||
139 | * @dev: the device structure | ||
140 | */ | ||
141 | void mei_hbm_idle(struct mei_device *dev) | ||
142 | { | ||
143 | dev->init_clients_timer = 0; | ||
144 | dev->hbm_state = MEI_HBM_IDLE; | ||
145 | } | ||
146 | |||
147 | int mei_hbm_start_wait(struct mei_device *dev) | 161 | int mei_hbm_start_wait(struct mei_device *dev) |
148 | { | 162 | { |
149 | int ret; | 163 | int ret; |
@@ -289,6 +303,34 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
289 | return 0; | 303 | return 0; |
290 | } | 304 | } |
291 | 305 | ||
306 | /* | ||
307 | * mei_hbm_pg - sends pg command | ||
308 | * | ||
309 | * @dev: the device structure | ||
310 | * @pg_cmd: the pg command code | ||
311 | * | ||
312 | * This function returns -EIO on write failure | ||
313 | */ | ||
314 | int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) | ||
315 | { | ||
316 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | ||
317 | struct hbm_power_gate *req; | ||
318 | const size_t len = sizeof(struct hbm_power_gate); | ||
319 | int ret; | ||
320 | |||
321 | mei_hbm_hdr(mei_hdr, len); | ||
322 | |||
323 | req = (struct hbm_power_gate *)dev->wr_msg.data; | ||
324 | memset(req, 0, len); | ||
325 | req->hbm_cmd = pg_cmd; | ||
326 | |||
327 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | ||
328 | if (ret) | ||
329 | dev_err(&dev->pdev->dev, "power gate command write failed.\n"); | ||
330 | return ret; | ||
331 | } | ||
332 | EXPORT_SYMBOL_GPL(mei_hbm_pg); | ||
333 | |||
292 | /** | 334 | /** |
293 | * mei_hbm_stop_req - send stop request message | 335 | * mei_hbm_stop_req - send stop request message |
294 | * | 336 | * |
@@ -701,6 +743,27 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
701 | mei_hbm_cl_flow_control_res(dev, flow_control); | 743 | mei_hbm_cl_flow_control_res(dev, flow_control); |
702 | break; | 744 | break; |
703 | 745 | ||
746 | case MEI_PG_ISOLATION_ENTRY_RES_CMD: | ||
747 | dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n"); | ||
748 | dev->pg_event = MEI_PG_EVENT_RECEIVED; | ||
749 | if (waitqueue_active(&dev->wait_pg)) | ||
750 | wake_up(&dev->wait_pg); | ||
751 | break; | ||
752 | |||
753 | case MEI_PG_ISOLATION_EXIT_REQ_CMD: | ||
754 | dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n"); | ||
755 | dev->pg_event = MEI_PG_EVENT_RECEIVED; | ||
756 | if (waitqueue_active(&dev->wait_pg)) | ||
757 | wake_up(&dev->wait_pg); | ||
758 | else | ||
759 | /* | ||
760 | * If the driver is not waiting on this then | ||
761 | * this is HW initiated exit from PG. | ||
762 | * Start runtime pm resume sequence to exit from PG. | ||
763 | */ | ||
764 | pm_request_resume(&dev->pdev->dev); | ||
765 | break; | ||
766 | |||
704 | case HOST_CLIENT_PROPERTIES_RES_CMD: | 767 | case HOST_CLIENT_PROPERTIES_RES_CMD: |
705 | dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); | 768 | dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); |
706 | 769 | ||
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 20e8782711c0..683eb2835cec 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h | |||
@@ -50,6 +50,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) | |||
50 | } | 50 | } |
51 | 51 | ||
52 | void mei_hbm_idle(struct mei_device *dev); | 52 | void mei_hbm_idle(struct mei_device *dev); |
53 | void mei_hbm_reset(struct mei_device *dev); | ||
53 | int mei_hbm_start_req(struct mei_device *dev); | 54 | int mei_hbm_start_req(struct mei_device *dev); |
54 | int mei_hbm_start_wait(struct mei_device *dev); | 55 | int mei_hbm_start_wait(struct mei_device *dev); |
55 | int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); | 56 | int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); |
@@ -57,6 +58,7 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); | |||
57 | int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); | 58 | int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); |
58 | int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); | 59 | int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); |
59 | bool mei_hbm_version_is_supported(struct mei_device *dev); | 60 | bool mei_hbm_version_is_supported(struct mei_device *dev); |
61 | int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); | ||
60 | 62 | ||
61 | #endif /* _MEI_HBM_H_ */ | 63 | #endif /* _MEI_HBM_H_ */ |
62 | 64 | ||
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index cabc04383685..a7856c0ac576 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -133,6 +133,8 @@ | |||
133 | #define ME_CB_RW 8 | 133 | #define ME_CB_RW 8 |
134 | /* ME_CSR_HA - ME Control Status Host Access register (read only) */ | 134 | /* ME_CSR_HA - ME Control Status Host Access register (read only) */ |
135 | #define ME_CSR_HA 0xC | 135 | #define ME_CSR_HA 0xC |
136 | /* H_HGC_CSR - PGI register */ | ||
137 | #define H_HPG_CSR 0x10 | ||
136 | 138 | ||
137 | 139 | ||
138 | /* register bits of H_CSR (Host Control Status register) */ | 140 | /* register bits of H_CSR (Host Control Status register) */ |
@@ -162,6 +164,8 @@ access to ME_CBD */ | |||
162 | #define ME_CBWP_HRA 0x00FF0000 | 164 | #define ME_CBWP_HRA 0x00FF0000 |
163 | /* ME CB Read Pointer HRA - host read only access to ME_CBRP */ | 165 | /* ME CB Read Pointer HRA - host read only access to ME_CBRP */ |
164 | #define ME_CBRP_HRA 0x0000FF00 | 166 | #define ME_CBRP_HRA 0x0000FF00 |
167 | /* ME Power Gate Isolation Capability HRA - host ready only access */ | ||
168 | #define ME_PGIC_HRA 0x00000040 | ||
165 | /* ME Reset HRA - host read only access to ME_RST */ | 169 | /* ME Reset HRA - host read only access to ME_RST */ |
166 | #define ME_RST_HRA 0x00000010 | 170 | #define ME_RST_HRA 0x00000010 |
167 | /* ME Ready HRA - host read only access to ME_RDY */ | 171 | /* ME Ready HRA - host read only access to ME_RDY */ |
@@ -173,4 +177,9 @@ access to ME_CBD */ | |||
173 | /* ME Interrupt Enable HRA - host read only access to ME_IE */ | 177 | /* ME Interrupt Enable HRA - host read only access to ME_IE */ |
174 | #define ME_IE_HRA 0x00000001 | 178 | #define ME_IE_HRA 0x00000001 |
175 | 179 | ||
180 | |||
181 | /* register bits - H_HPG_CSR */ | ||
182 | #define H_HPG_CSR_PGIHEXR 0x00000001 | ||
183 | #define H_HPG_CSR_PGI 0x00000002 | ||
184 | |||
176 | #endif /* _MEI_HW_MEI_REGS_H_ */ | 185 | #endif /* _MEI_HW_MEI_REGS_H_ */ |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 8dbdaaef1af5..6a2d272cea43 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
@@ -109,10 +109,27 @@ static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) | |||
109 | */ | 109 | */ |
110 | static void mei_me_hw_config(struct mei_device *dev) | 110 | static void mei_me_hw_config(struct mei_device *dev) |
111 | { | 111 | { |
112 | struct mei_me_hw *hw = to_me_hw(dev); | ||
112 | u32 hcsr = mei_hcsr_read(to_me_hw(dev)); | 113 | u32 hcsr = mei_hcsr_read(to_me_hw(dev)); |
113 | /* Doesn't change in runtime */ | 114 | /* Doesn't change in runtime */ |
114 | dev->hbuf_depth = (hcsr & H_CBD) >> 24; | 115 | dev->hbuf_depth = (hcsr & H_CBD) >> 24; |
116 | |||
117 | hw->pg_state = MEI_PG_OFF; | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * mei_me_pg_state - translate internal pg state | ||
122 | * to the mei power gating state | ||
123 | * | ||
124 | * @hw - me hardware | ||
125 | * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise | ||
126 | */ | ||
127 | static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) | ||
128 | { | ||
129 | struct mei_me_hw *hw = to_me_hw(dev); | ||
130 | return hw->pg_state; | ||
115 | } | 131 | } |
132 | |||
116 | /** | 133 | /** |
117 | * mei_clear_interrupts - clear and stop interrupts | 134 | * mei_clear_interrupts - clear and stop interrupts |
118 | * | 135 | * |
@@ -164,6 +181,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev) | |||
164 | hcsr |= H_IG; | 181 | hcsr |= H_IG; |
165 | hcsr &= ~H_RST; | 182 | hcsr &= ~H_RST; |
166 | mei_hcsr_set(hw, hcsr); | 183 | mei_hcsr_set(hw, hcsr); |
184 | |||
185 | /* complete this write before we set host ready on another CPU */ | ||
186 | mmiowb(); | ||
167 | } | 187 | } |
168 | /** | 188 | /** |
169 | * mei_me_hw_reset - resets fw via mei csr register. | 189 | * mei_me_hw_reset - resets fw via mei csr register. |
@@ -183,8 +203,21 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | |||
183 | else | 203 | else |
184 | hcsr &= ~H_IE; | 204 | hcsr &= ~H_IE; |
185 | 205 | ||
206 | dev->recvd_hw_ready = false; | ||
186 | mei_me_reg_write(hw, H_CSR, hcsr); | 207 | mei_me_reg_write(hw, H_CSR, hcsr); |
187 | 208 | ||
209 | /* | ||
210 | * Host reads the H_CSR once to ensure that the | ||
211 | * posted write to H_CSR completes. | ||
212 | */ | ||
213 | hcsr = mei_hcsr_read(hw); | ||
214 | |||
215 | if ((hcsr & H_RST) == 0) | ||
216 | dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); | ||
217 | |||
218 | if ((hcsr & H_RDY) == H_RDY) | ||
219 | dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); | ||
220 | |||
188 | if (intr_enable == false) | 221 | if (intr_enable == false) |
189 | mei_me_hw_reset_release(dev); | 222 | mei_me_hw_reset_release(dev); |
190 | 223 | ||
@@ -201,6 +234,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | |||
201 | static void mei_me_host_set_ready(struct mei_device *dev) | 234 | static void mei_me_host_set_ready(struct mei_device *dev) |
202 | { | 235 | { |
203 | struct mei_me_hw *hw = to_me_hw(dev); | 236 | struct mei_me_hw *hw = to_me_hw(dev); |
237 | hw->host_hw_state = mei_hcsr_read(hw); | ||
204 | hw->host_hw_state |= H_IE | H_IG | H_RDY; | 238 | hw->host_hw_state |= H_IE | H_IG | H_RDY; |
205 | mei_hcsr_set(hw, hw->host_hw_state); | 239 | mei_hcsr_set(hw, hw->host_hw_state); |
206 | } | 240 | } |
@@ -233,10 +267,7 @@ static bool mei_me_hw_is_ready(struct mei_device *dev) | |||
233 | static int mei_me_hw_ready_wait(struct mei_device *dev) | 267 | static int mei_me_hw_ready_wait(struct mei_device *dev) |
234 | { | 268 | { |
235 | int err; | 269 | int err; |
236 | if (mei_me_hw_is_ready(dev)) | ||
237 | return 0; | ||
238 | 270 | ||
239 | dev->recvd_hw_ready = false; | ||
240 | mutex_unlock(&dev->device_lock); | 271 | mutex_unlock(&dev->device_lock); |
241 | err = wait_event_interruptible_timeout(dev->wait_hw_ready, | 272 | err = wait_event_interruptible_timeout(dev->wait_hw_ready, |
242 | dev->recvd_hw_ready, | 273 | dev->recvd_hw_ready, |
@@ -431,6 +462,144 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, | |||
431 | } | 462 | } |
432 | 463 | ||
433 | /** | 464 | /** |
465 | * mei_me_pg_enter - write pg enter register to mei device. | ||
466 | * | ||
467 | * @dev: the device structure | ||
468 | */ | ||
469 | static void mei_me_pg_enter(struct mei_device *dev) | ||
470 | { | ||
471 | struct mei_me_hw *hw = to_me_hw(dev); | ||
472 | u32 reg = mei_me_reg_read(hw, H_HPG_CSR); | ||
473 | reg |= H_HPG_CSR_PGI; | ||
474 | mei_me_reg_write(hw, H_HPG_CSR, reg); | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * mei_me_pg_enter - write pg enter register to mei device. | ||
479 | * | ||
480 | * @dev: the device structure | ||
481 | */ | ||
482 | static void mei_me_pg_exit(struct mei_device *dev) | ||
483 | { | ||
484 | struct mei_me_hw *hw = to_me_hw(dev); | ||
485 | u32 reg = mei_me_reg_read(hw, H_HPG_CSR); | ||
486 | |||
487 | WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); | ||
488 | |||
489 | reg |= H_HPG_CSR_PGIHEXR; | ||
490 | mei_me_reg_write(hw, H_HPG_CSR, reg); | ||
491 | } | ||
492 | |||
493 | /** | ||
494 | * mei_me_pg_set_sync - perform pg entry procedure | ||
495 | * | ||
496 | * @dev: the device structure | ||
497 | * | ||
498 | * returns 0 on success an error code otherwise | ||
499 | */ | ||
500 | int mei_me_pg_set_sync(struct mei_device *dev) | ||
501 | { | ||
502 | struct mei_me_hw *hw = to_me_hw(dev); | ||
503 | unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); | ||
504 | int ret; | ||
505 | |||
506 | dev->pg_event = MEI_PG_EVENT_WAIT; | ||
507 | |||
508 | ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); | ||
509 | if (ret) | ||
510 | return ret; | ||
511 | |||
512 | mutex_unlock(&dev->device_lock); | ||
513 | wait_event_timeout(dev->wait_pg, | ||
514 | dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); | ||
515 | mutex_lock(&dev->device_lock); | ||
516 | |||
517 | if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { | ||
518 | mei_me_pg_enter(dev); | ||
519 | ret = 0; | ||
520 | } else { | ||
521 | ret = -ETIME; | ||
522 | } | ||
523 | |||
524 | dev->pg_event = MEI_PG_EVENT_IDLE; | ||
525 | hw->pg_state = MEI_PG_ON; | ||
526 | |||
527 | return ret; | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * mei_me_pg_unset_sync - perform pg exit procedure | ||
532 | * | ||
533 | * @dev: the device structure | ||
534 | * | ||
535 | * returns 0 on success an error code otherwise | ||
536 | */ | ||
537 | int mei_me_pg_unset_sync(struct mei_device *dev) | ||
538 | { | ||
539 | struct mei_me_hw *hw = to_me_hw(dev); | ||
540 | unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); | ||
541 | int ret; | ||
542 | |||
543 | if (dev->pg_event == MEI_PG_EVENT_RECEIVED) | ||
544 | goto reply; | ||
545 | |||
546 | dev->pg_event = MEI_PG_EVENT_WAIT; | ||
547 | |||
548 | mei_me_pg_exit(dev); | ||
549 | |||
550 | mutex_unlock(&dev->device_lock); | ||
551 | wait_event_timeout(dev->wait_pg, | ||
552 | dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); | ||
553 | mutex_lock(&dev->device_lock); | ||
554 | |||
555 | reply: | ||
556 | if (dev->pg_event == MEI_PG_EVENT_RECEIVED) | ||
557 | ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); | ||
558 | else | ||
559 | ret = -ETIME; | ||
560 | |||
561 | dev->pg_event = MEI_PG_EVENT_IDLE; | ||
562 | hw->pg_state = MEI_PG_OFF; | ||
563 | |||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * mei_me_pg_is_enabled - detect if PG is supported by HW | ||
569 | * | ||
570 | * @dev: the device structure | ||
571 | * | ||
572 | * returns: true is pg supported, false otherwise | ||
573 | */ | ||
574 | static bool mei_me_pg_is_enabled(struct mei_device *dev) | ||
575 | { | ||
576 | struct mei_me_hw *hw = to_me_hw(dev); | ||
577 | u32 reg = mei_me_reg_read(hw, ME_CSR_HA); | ||
578 | |||
579 | if ((reg & ME_PGIC_HRA) == 0) | ||
580 | goto notsupported; | ||
581 | |||
582 | if (dev->version.major_version < HBM_MAJOR_VERSION_PGI) | ||
583 | goto notsupported; | ||
584 | |||
585 | if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && | ||
586 | dev->version.minor_version < HBM_MINOR_VERSION_PGI) | ||
587 | goto notsupported; | ||
588 | |||
589 | return true; | ||
590 | |||
591 | notsupported: | ||
592 | dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", | ||
593 | !!(reg & ME_PGIC_HRA), | ||
594 | dev->version.major_version, | ||
595 | dev->version.minor_version, | ||
596 | HBM_MAJOR_VERSION_PGI, | ||
597 | HBM_MINOR_VERSION_PGI); | ||
598 | |||
599 | return false; | ||
600 | } | ||
601 | |||
602 | /** | ||
434 | * mei_me_irq_quick_handler - The ISR of the MEI device | 603 | * mei_me_irq_quick_handler - The ISR of the MEI device |
435 | * | 604 | * |
436 | * @irq: The irq number | 605 | * @irq: The irq number |
@@ -491,14 +660,13 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
491 | /* check if we need to start the dev */ | 660 | /* check if we need to start the dev */ |
492 | if (!mei_host_is_ready(dev)) { | 661 | if (!mei_host_is_ready(dev)) { |
493 | if (mei_hw_is_ready(dev)) { | 662 | if (mei_hw_is_ready(dev)) { |
663 | mei_me_hw_reset_release(dev); | ||
494 | dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); | 664 | dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); |
495 | 665 | ||
496 | dev->recvd_hw_ready = true; | 666 | dev->recvd_hw_ready = true; |
497 | wake_up_interruptible(&dev->wait_hw_ready); | 667 | wake_up_interruptible(&dev->wait_hw_ready); |
498 | } else { | 668 | } else { |
499 | 669 | dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); | |
500 | dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); | ||
501 | mei_me_hw_reset_release(dev); | ||
502 | } | 670 | } |
503 | goto end; | 671 | goto end; |
504 | } | 672 | } |
@@ -524,9 +692,15 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
524 | 692 | ||
525 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); | 693 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); |
526 | 694 | ||
527 | rets = mei_irq_write_handler(dev, &complete_list); | 695 | /* |
528 | 696 | * During PG handshake only allowed write is the replay to the | |
529 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); | 697 | * PG exit message, so block calling write function |
698 | * if the pg state is not idle | ||
699 | */ | ||
700 | if (dev->pg_event == MEI_PG_EVENT_IDLE) { | ||
701 | rets = mei_irq_write_handler(dev, &complete_list); | ||
702 | dev->hbuf_is_ready = mei_hbuf_is_ready(dev); | ||
703 | } | ||
530 | 704 | ||
531 | mei_irq_compl_handler(dev, &complete_list); | 705 | mei_irq_compl_handler(dev, &complete_list); |
532 | 706 | ||
@@ -535,8 +709,65 @@ end: | |||
535 | mutex_unlock(&dev->device_lock); | 709 | mutex_unlock(&dev->device_lock); |
536 | return IRQ_HANDLED; | 710 | return IRQ_HANDLED; |
537 | } | 711 | } |
712 | |||
713 | /** | ||
714 | * mei_me_fw_status - retrieve fw status from the pci config space | ||
715 | * | ||
716 | * @dev: the device structure | ||
717 | * @fw_status: fw status registers storage | ||
718 | * | ||
719 | * returns 0 on success an error code otherwise | ||
720 | */ | ||
721 | static int mei_me_fw_status(struct mei_device *dev, | ||
722 | struct mei_fw_status *fw_status) | ||
723 | { | ||
724 | const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2}; | ||
725 | int i; | ||
726 | |||
727 | if (!fw_status) | ||
728 | return -EINVAL; | ||
729 | |||
730 | switch (dev->pdev->device) { | ||
731 | case MEI_DEV_ID_IBXPK_1: | ||
732 | case MEI_DEV_ID_IBXPK_2: | ||
733 | case MEI_DEV_ID_CPT_1: | ||
734 | case MEI_DEV_ID_PBG_1: | ||
735 | case MEI_DEV_ID_PPT_1: | ||
736 | case MEI_DEV_ID_PPT_2: | ||
737 | case MEI_DEV_ID_PPT_3: | ||
738 | case MEI_DEV_ID_LPT_H: | ||
739 | case MEI_DEV_ID_LPT_W: | ||
740 | case MEI_DEV_ID_LPT_LP: | ||
741 | case MEI_DEV_ID_LPT_HR: | ||
742 | case MEI_DEV_ID_WPT_LP: | ||
743 | fw_status->count = 2; | ||
744 | break; | ||
745 | case MEI_DEV_ID_ICH10_1: | ||
746 | case MEI_DEV_ID_ICH10_2: | ||
747 | case MEI_DEV_ID_ICH10_3: | ||
748 | case MEI_DEV_ID_ICH10_4: | ||
749 | fw_status->count = 1; | ||
750 | break; | ||
751 | default: | ||
752 | fw_status->count = 0; | ||
753 | break; | ||
754 | } | ||
755 | |||
756 | for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { | ||
757 | int ret; | ||
758 | ret = pci_read_config_dword(dev->pdev, | ||
759 | pci_cfg_reg[i], &fw_status->status[i]); | ||
760 | if (ret) | ||
761 | return ret; | ||
762 | } | ||
763 | return 0; | ||
764 | } | ||
765 | |||
538 | static const struct mei_hw_ops mei_me_hw_ops = { | 766 | static const struct mei_hw_ops mei_me_hw_ops = { |
539 | 767 | ||
768 | .pg_state = mei_me_pg_state, | ||
769 | |||
770 | .fw_status = mei_me_fw_status, | ||
540 | .host_is_ready = mei_me_host_is_ready, | 771 | .host_is_ready = mei_me_host_is_ready, |
541 | 772 | ||
542 | .hw_is_ready = mei_me_hw_is_ready, | 773 | .hw_is_ready = mei_me_hw_is_ready, |
@@ -544,6 +775,8 @@ static const struct mei_hw_ops mei_me_hw_ops = { | |||
544 | .hw_config = mei_me_hw_config, | 775 | .hw_config = mei_me_hw_config, |
545 | .hw_start = mei_me_hw_start, | 776 | .hw_start = mei_me_hw_start, |
546 | 777 | ||
778 | .pg_is_enabled = mei_me_pg_is_enabled, | ||
779 | |||
547 | .intr_clear = mei_me_intr_clear, | 780 | .intr_clear = mei_me_intr_clear, |
548 | .intr_enable = mei_me_intr_enable, | 781 | .intr_enable = mei_me_intr_enable, |
549 | .intr_disable = mei_me_intr_disable, | 782 | .intr_disable = mei_me_intr_disable, |
@@ -559,14 +792,81 @@ static const struct mei_hw_ops mei_me_hw_ops = { | |||
559 | .read = mei_me_read_slots | 792 | .read = mei_me_read_slots |
560 | }; | 793 | }; |
561 | 794 | ||
795 | static bool mei_me_fw_type_nm(struct pci_dev *pdev) | ||
796 | { | ||
797 | u32 reg; | ||
798 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); | ||
799 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ | ||
800 | return (reg & 0x600) == 0x200; | ||
801 | } | ||
802 | |||
803 | #define MEI_CFG_FW_NM \ | ||
804 | .quirk_probe = mei_me_fw_type_nm | ||
805 | |||
806 | static bool mei_me_fw_type_sps(struct pci_dev *pdev) | ||
807 | { | ||
808 | u32 reg; | ||
809 | /* Read ME FW Status check for SPS Firmware */ | ||
810 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | ||
811 | /* if bits [19:16] = 15, running SPS Firmware */ | ||
812 | return (reg & 0xf0000) == 0xf0000; | ||
813 | } | ||
814 | |||
815 | #define MEI_CFG_FW_SPS \ | ||
816 | .quirk_probe = mei_me_fw_type_sps | ||
817 | |||
818 | |||
819 | #define MEI_CFG_LEGACY_HFS \ | ||
820 | .fw_status.count = 0 | ||
821 | |||
822 | #define MEI_CFG_ICH_HFS \ | ||
823 | .fw_status.count = 1, \ | ||
824 | .fw_status.status[0] = PCI_CFG_HFS_1 | ||
825 | |||
826 | #define MEI_CFG_PCH_HFS \ | ||
827 | .fw_status.count = 2, \ | ||
828 | .fw_status.status[0] = PCI_CFG_HFS_1, \ | ||
829 | .fw_status.status[1] = PCI_CFG_HFS_2 | ||
830 | |||
831 | |||
832 | /* ICH Legacy devices */ | ||
833 | const struct mei_cfg mei_me_legacy_cfg = { | ||
834 | MEI_CFG_LEGACY_HFS, | ||
835 | }; | ||
836 | |||
837 | /* ICH devices */ | ||
838 | const struct mei_cfg mei_me_ich_cfg = { | ||
839 | MEI_CFG_ICH_HFS, | ||
840 | }; | ||
841 | |||
842 | /* PCH devices */ | ||
843 | const struct mei_cfg mei_me_pch_cfg = { | ||
844 | MEI_CFG_PCH_HFS, | ||
845 | }; | ||
846 | |||
847 | |||
848 | /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ | ||
849 | const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { | ||
850 | MEI_CFG_PCH_HFS, | ||
851 | MEI_CFG_FW_NM, | ||
852 | }; | ||
853 | |||
854 | /* PCH Lynx Point with quirk for SPS Firmware exclusion */ | ||
855 | const struct mei_cfg mei_me_lpt_cfg = { | ||
856 | MEI_CFG_PCH_HFS, | ||
857 | MEI_CFG_FW_SPS, | ||
858 | }; | ||
859 | |||
562 | /** | 860 | /** |
563 | * mei_me_dev_init - allocates and initializes the mei device structure | 861 | * mei_me_dev_init - allocates and initializes the mei device structure |
564 | * | 862 | * |
565 | * @pdev: The pci device structure | 863 | * @pdev: The pci device structure |
864 | * @cfg: per device generation config | ||
566 | * | 865 | * |
567 | * returns The mei_device_device pointer on success, NULL on failure. | 866 | * returns The mei_device_device pointer on success, NULL on failure. |
568 | */ | 867 | */ |
569 | struct mei_device *mei_me_dev_init(struct pci_dev *pdev) | 868 | struct mei_device *mei_me_dev_init(struct pci_dev *pdev, |
869 | const struct mei_cfg *cfg) | ||
570 | { | 870 | { |
571 | struct mei_device *dev; | 871 | struct mei_device *dev; |
572 | 872 | ||
@@ -575,7 +875,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev) | |||
575 | if (!dev) | 875 | if (!dev) |
576 | return NULL; | 876 | return NULL; |
577 | 877 | ||
578 | mei_device_init(dev); | 878 | mei_device_init(dev, cfg); |
579 | 879 | ||
580 | dev->ops = &mei_me_hw_ops; | 880 | dev->ops = &mei_me_hw_ops; |
581 | 881 | ||
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 893d5119fa9b..12b0f4bbe1f1 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #include "mei_dev.h" | 24 | #include "mei_dev.h" |
25 | #include "client.h" | 25 | #include "client.h" |
26 | 26 | ||
27 | #define MEI_ME_RPM_TIMEOUT 500 /* ms */ | ||
28 | |||
27 | struct mei_me_hw { | 29 | struct mei_me_hw { |
28 | void __iomem *mem_addr; | 30 | void __iomem *mem_addr; |
29 | /* | 31 | /* |
@@ -31,11 +33,22 @@ struct mei_me_hw { | |||
31 | */ | 33 | */ |
32 | u32 host_hw_state; | 34 | u32 host_hw_state; |
33 | u32 me_hw_state; | 35 | u32 me_hw_state; |
36 | enum mei_pg_state pg_state; | ||
34 | }; | 37 | }; |
35 | 38 | ||
36 | #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) | 39 | #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) |
37 | 40 | ||
38 | struct mei_device *mei_me_dev_init(struct pci_dev *pdev); | 41 | extern const struct mei_cfg mei_me_legacy_cfg; |
42 | extern const struct mei_cfg mei_me_ich_cfg; | ||
43 | extern const struct mei_cfg mei_me_pch_cfg; | ||
44 | extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg; | ||
45 | extern const struct mei_cfg mei_me_lpt_cfg; | ||
46 | |||
47 | struct mei_device *mei_me_dev_init(struct pci_dev *pdev, | ||
48 | const struct mei_cfg *cfg); | ||
49 | |||
50 | int mei_me_pg_set_sync(struct mei_device *dev); | ||
51 | int mei_me_pg_unset_sync(struct mei_device *dev); | ||
39 | 52 | ||
40 | irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); | 53 | irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); |
41 | irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); | 54 | irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); |
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h index 7283c24c1af1..f19229c4e655 100644 --- a/drivers/misc/mei/hw-txe-regs.h +++ b/drivers/misc/mei/hw-txe-regs.h | |||
@@ -89,7 +89,7 @@ enum { | |||
89 | # define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000 | 89 | # define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000 |
90 | # define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000 | 90 | # define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000 |
91 | # define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000 | 91 | # define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000 |
92 | 92 | #define PCI_CFG_TXE_FW_STS1 0x48 | |
93 | 93 | ||
94 | #define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */ | 94 | #define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */ |
95 | 95 | ||
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index f60182a52f96..93273783dec5 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c | |||
@@ -158,7 +158,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) | |||
158 | dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", | 158 | dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", |
159 | hw->aliveness, req); | 159 | hw->aliveness, req); |
160 | if (do_req) { | 160 | if (do_req) { |
161 | hw->recvd_aliveness = false; | 161 | dev->pg_event = MEI_PG_EVENT_WAIT; |
162 | mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); | 162 | mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); |
163 | } | 163 | } |
164 | return do_req; | 164 | return do_req; |
@@ -213,6 +213,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) | |||
213 | do { | 213 | do { |
214 | hw->aliveness = mei_txe_aliveness_get(dev); | 214 | hw->aliveness = mei_txe_aliveness_get(dev); |
215 | if (hw->aliveness == expected) { | 215 | if (hw->aliveness == expected) { |
216 | dev->pg_event = MEI_PG_EVENT_IDLE; | ||
216 | dev_dbg(&dev->pdev->dev, | 217 | dev_dbg(&dev->pdev->dev, |
217 | "aliveness settled after %d msecs\n", t); | 218 | "aliveness settled after %d msecs\n", t); |
218 | return t; | 219 | return t; |
@@ -223,6 +224,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) | |||
223 | t += MSEC_PER_SEC / 5; | 224 | t += MSEC_PER_SEC / 5; |
224 | } while (t < SEC_ALIVENESS_WAIT_TIMEOUT); | 225 | } while (t < SEC_ALIVENESS_WAIT_TIMEOUT); |
225 | 226 | ||
227 | dev->pg_event = MEI_PG_EVENT_IDLE; | ||
226 | dev_err(&dev->pdev->dev, "aliveness timed out\n"); | 228 | dev_err(&dev->pdev->dev, "aliveness timed out\n"); |
227 | return -ETIME; | 229 | return -ETIME; |
228 | } | 230 | } |
@@ -249,19 +251,22 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) | |||
249 | return 0; | 251 | return 0; |
250 | 252 | ||
251 | mutex_unlock(&dev->device_lock); | 253 | mutex_unlock(&dev->device_lock); |
252 | err = wait_event_timeout(hw->wait_aliveness, | 254 | err = wait_event_timeout(hw->wait_aliveness_resp, |
253 | hw->recvd_aliveness, timeout); | 255 | dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); |
254 | mutex_lock(&dev->device_lock); | 256 | mutex_lock(&dev->device_lock); |
255 | 257 | ||
256 | hw->aliveness = mei_txe_aliveness_get(dev); | 258 | hw->aliveness = mei_txe_aliveness_get(dev); |
257 | ret = hw->aliveness == expected ? 0 : -ETIME; | 259 | ret = hw->aliveness == expected ? 0 : -ETIME; |
258 | 260 | ||
259 | if (ret) | 261 | if (ret) |
260 | dev_err(&dev->pdev->dev, "aliveness timed out"); | 262 | dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", |
263 | err, hw->aliveness, dev->pg_event); | ||
261 | else | 264 | else |
262 | dev_dbg(&dev->pdev->dev, "aliveness settled after %d msecs\n", | 265 | dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", |
263 | jiffies_to_msecs(timeout - err)); | 266 | jiffies_to_msecs(timeout - err), |
264 | hw->recvd_aliveness = false; | 267 | hw->aliveness, dev->pg_event); |
268 | |||
269 | dev->pg_event = MEI_PG_EVENT_IDLE; | ||
265 | return ret; | 270 | return ret; |
266 | } | 271 | } |
267 | 272 | ||
@@ -280,6 +285,32 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) | |||
280 | } | 285 | } |
281 | 286 | ||
282 | /** | 287 | /** |
288 | * mei_txe_pg_is_enabled - detect if PG is supported by HW | ||
289 | * | ||
290 | * @dev: the device structure | ||
291 | * | ||
292 | * returns: true is pg supported, false otherwise | ||
293 | */ | ||
294 | static bool mei_txe_pg_is_enabled(struct mei_device *dev) | ||
295 | { | ||
296 | return true; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * mei_txe_pg_state - translate aliveness register value | ||
301 | * to the mei power gating state | ||
302 | * | ||
303 | * @dev: the device structure | ||
304 | * | ||
305 | * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise | ||
306 | */ | ||
307 | static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) | ||
308 | { | ||
309 | struct mei_txe_hw *hw = to_txe_hw(dev); | ||
310 | return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; | ||
311 | } | ||
312 | |||
313 | /** | ||
283 | * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt | 314 | * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt |
284 | * | 315 | * |
285 | * @dev: the device structure | 316 | * @dev: the device structure |
@@ -589,7 +620,10 @@ static int mei_txe_write(struct mei_device *dev, | |||
589 | mei_txe_input_ready_interrupt_enable(dev); | 620 | mei_txe_input_ready_interrupt_enable(dev); |
590 | 621 | ||
591 | if (!mei_txe_is_input_ready(dev)) { | 622 | if (!mei_txe_is_input_ready(dev)) { |
592 | dev_err(&dev->pdev->dev, "Input is not ready"); | 623 | struct mei_fw_status fw_status; |
624 | mei_fw_status(dev, &fw_status); | ||
625 | dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n", | ||
626 | FW_STS_PRM(fw_status)); | ||
593 | return -EAGAIN; | 627 | return -EAGAIN; |
594 | } | 628 | } |
595 | 629 | ||
@@ -960,9 +994,9 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) | |||
960 | /* Clear the interrupt cause */ | 994 | /* Clear the interrupt cause */ |
961 | dev_dbg(&dev->pdev->dev, | 995 | dev_dbg(&dev->pdev->dev, |
962 | "Aliveness Interrupt: Status: %d\n", hw->aliveness); | 996 | "Aliveness Interrupt: Status: %d\n", hw->aliveness); |
963 | hw->recvd_aliveness = true; | 997 | dev->pg_event = MEI_PG_EVENT_RECEIVED; |
964 | if (waitqueue_active(&hw->wait_aliveness)) | 998 | if (waitqueue_active(&hw->wait_aliveness_resp)) |
965 | wake_up(&hw->wait_aliveness); | 999 | wake_up(&hw->wait_aliveness_resp); |
966 | } | 1000 | } |
967 | 1001 | ||
968 | 1002 | ||
@@ -1008,15 +1042,51 @@ end: | |||
1008 | return IRQ_HANDLED; | 1042 | return IRQ_HANDLED; |
1009 | } | 1043 | } |
1010 | 1044 | ||
1045 | |||
1046 | /** | ||
1047 | * mei_txe_fw_status - retrieve fw status from the pci config space | ||
1048 | * | ||
1049 | * @dev: the device structure | ||
1050 | * @fw_status: fw status registers storage | ||
1051 | * | ||
1052 | * returns: 0 on success an error code otherwise | ||
1053 | */ | ||
1054 | static int mei_txe_fw_status(struct mei_device *dev, | ||
1055 | struct mei_fw_status *fw_status) | ||
1056 | { | ||
1057 | const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1}; | ||
1058 | int i; | ||
1059 | |||
1060 | if (!fw_status) | ||
1061 | return -EINVAL; | ||
1062 | |||
1063 | fw_status->count = 2; | ||
1064 | |||
1065 | for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { | ||
1066 | int ret; | ||
1067 | ret = pci_read_config_dword(dev->pdev, | ||
1068 | pci_cfg_reg[i], &fw_status->status[i]); | ||
1069 | if (ret) | ||
1070 | return ret; | ||
1071 | } | ||
1072 | |||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1011 | static const struct mei_hw_ops mei_txe_hw_ops = { | 1076 | static const struct mei_hw_ops mei_txe_hw_ops = { |
1012 | 1077 | ||
1078 | .fw_status = mei_txe_fw_status, | ||
1013 | .host_is_ready = mei_txe_host_is_ready, | 1079 | .host_is_ready = mei_txe_host_is_ready, |
1014 | 1080 | ||
1081 | .pg_state = mei_txe_pg_state, | ||
1082 | |||
1015 | .hw_is_ready = mei_txe_hw_is_ready, | 1083 | .hw_is_ready = mei_txe_hw_is_ready, |
1016 | .hw_reset = mei_txe_hw_reset, | 1084 | .hw_reset = mei_txe_hw_reset, |
1017 | .hw_config = mei_txe_hw_config, | 1085 | .hw_config = mei_txe_hw_config, |
1018 | .hw_start = mei_txe_hw_start, | 1086 | .hw_start = mei_txe_hw_start, |
1019 | 1087 | ||
1088 | .pg_is_enabled = mei_txe_pg_is_enabled, | ||
1089 | |||
1020 | .intr_clear = mei_txe_intr_clear, | 1090 | .intr_clear = mei_txe_intr_clear, |
1021 | .intr_enable = mei_txe_intr_enable, | 1091 | .intr_enable = mei_txe_intr_enable, |
1022 | .intr_disable = mei_txe_intr_disable, | 1092 | .intr_disable = mei_txe_intr_disable, |
@@ -1034,14 +1104,27 @@ static const struct mei_hw_ops mei_txe_hw_ops = { | |||
1034 | 1104 | ||
1035 | }; | 1105 | }; |
1036 | 1106 | ||
1107 | #define MEI_CFG_TXE_FW_STS \ | ||
1108 | .fw_status.count = 2, \ | ||
1109 | .fw_status.status[0] = PCI_CFG_TXE_FW_STS0, \ | ||
1110 | .fw_status.status[1] = PCI_CFG_TXE_FW_STS1 | ||
1111 | |||
1112 | const struct mei_cfg mei_txe_cfg = { | ||
1113 | MEI_CFG_TXE_FW_STS, | ||
1114 | }; | ||
1115 | |||
1116 | |||
1037 | /** | 1117 | /** |
1038 | * mei_txe_dev_init - allocates and initializes txe hardware specific structure | 1118 | * mei_txe_dev_init - allocates and initializes txe hardware specific structure |
1039 | * | 1119 | * |
1040 | * @pdev - pci device | 1120 | * @pdev - pci device |
1121 | * @cfg - per device generation config | ||
1122 | * | ||
1041 | * returns struct mei_device * on success or NULL; | 1123 | * returns struct mei_device * on success or NULL; |
1042 | * | 1124 | * |
1043 | */ | 1125 | */ |
1044 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) | 1126 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, |
1127 | const struct mei_cfg *cfg) | ||
1045 | { | 1128 | { |
1046 | struct mei_device *dev; | 1129 | struct mei_device *dev; |
1047 | struct mei_txe_hw *hw; | 1130 | struct mei_txe_hw *hw; |
@@ -1051,11 +1134,11 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) | |||
1051 | if (!dev) | 1134 | if (!dev) |
1052 | return NULL; | 1135 | return NULL; |
1053 | 1136 | ||
1054 | mei_device_init(dev); | 1137 | mei_device_init(dev, cfg); |
1055 | 1138 | ||
1056 | hw = to_txe_hw(dev); | 1139 | hw = to_txe_hw(dev); |
1057 | 1140 | ||
1058 | init_waitqueue_head(&hw->wait_aliveness); | 1141 | init_waitqueue_head(&hw->wait_aliveness_resp); |
1059 | 1142 | ||
1060 | dev->ops = &mei_txe_hw_ops; | 1143 | dev->ops = &mei_txe_hw_ops; |
1061 | 1144 | ||
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h index 0812d98633a4..e244af79167f 100644 --- a/drivers/misc/mei/hw-txe.h +++ b/drivers/misc/mei/hw-txe.h | |||
@@ -22,6 +22,8 @@ | |||
22 | #include "hw.h" | 22 | #include "hw.h" |
23 | #include "hw-txe-regs.h" | 23 | #include "hw-txe-regs.h" |
24 | 24 | ||
25 | #define MEI_TXI_RPM_TIMEOUT 500 /* ms */ | ||
26 | |||
25 | /* Flatten Hierarchy interrupt cause */ | 27 | /* Flatten Hierarchy interrupt cause */ |
26 | #define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */ | 28 | #define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */ |
27 | #define TXE_INTR_READINESS HISR_INT_0_STS | 29 | #define TXE_INTR_READINESS HISR_INT_0_STS |
@@ -35,12 +37,11 @@ | |||
35 | /** | 37 | /** |
36 | * struct mei_txe_hw - txe hardware specifics | 38 | * struct mei_txe_hw - txe hardware specifics |
37 | * | 39 | * |
38 | * @mem_addr: SeC and BRIDGE bars | 40 | * @mem_addr: SeC and BRIDGE bars |
39 | * @aliveness: aliveness (power gating) state of the hardware | 41 | * @aliveness: aliveness (power gating) state of the hardware |
40 | * @readiness: readiness state of the hardware | 42 | * @readiness: readiness state of the hardware |
41 | * @wait_aliveness: aliveness wait queue | 43 | * @wait_aliveness_resp: aliveness wait queue |
42 | * @recvd_aliveness: aliveness interrupt was recived | 44 | * @intr_cause: translated interrupt cause |
43 | * @intr_cause: translated interrupt cause | ||
44 | */ | 45 | */ |
45 | struct mei_txe_hw { | 46 | struct mei_txe_hw { |
46 | void __iomem *mem_addr[NUM_OF_MEM_BARS]; | 47 | void __iomem *mem_addr[NUM_OF_MEM_BARS]; |
@@ -48,8 +49,7 @@ struct mei_txe_hw { | |||
48 | u32 readiness; | 49 | u32 readiness; |
49 | u32 slots; | 50 | u32 slots; |
50 | 51 | ||
51 | wait_queue_head_t wait_aliveness; | 52 | wait_queue_head_t wait_aliveness_resp; |
52 | bool recvd_aliveness; | ||
53 | 53 | ||
54 | unsigned long intr_cause; | 54 | unsigned long intr_cause; |
55 | }; | 55 | }; |
@@ -61,7 +61,10 @@ static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) | |||
61 | return container_of((void *)hw, struct mei_device, hw); | 61 | return container_of((void *)hw, struct mei_device, hw); |
62 | } | 62 | } |
63 | 63 | ||
64 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev); | 64 | extern const struct mei_cfg mei_txe_cfg; |
65 | |||
66 | struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, | ||
67 | const struct mei_cfg *cfg); | ||
65 | 68 | ||
66 | irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); | 69 | irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); |
67 | irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); | 70 | irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); |
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 6b476ab49b2e..dd448e58cc87 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h | |||
@@ -31,14 +31,21 @@ | |||
31 | #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ | 31 | #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ |
32 | #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ | 32 | #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ |
33 | 33 | ||
34 | #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ | ||
34 | #define MEI_HBM_TIMEOUT 1 /* 1 second */ | 35 | #define MEI_HBM_TIMEOUT 1 /* 1 second */ |
35 | 36 | ||
36 | /* | 37 | /* |
37 | * MEI Version | 38 | * MEI Version |
38 | */ | 39 | */ |
39 | #define HBM_MINOR_VERSION 0 | 40 | #define HBM_MINOR_VERSION 1 |
40 | #define HBM_MAJOR_VERSION 1 | 41 | #define HBM_MAJOR_VERSION 1 |
41 | 42 | ||
43 | /* | ||
44 | * MEI version with PGI support | ||
45 | */ | ||
46 | #define HBM_MINOR_VERSION_PGI 1 | ||
47 | #define HBM_MAJOR_VERSION_PGI 1 | ||
48 | |||
42 | /* Host bus message command opcode */ | 49 | /* Host bus message command opcode */ |
43 | #define MEI_HBM_CMD_OP_MSK 0x7f | 50 | #define MEI_HBM_CMD_OP_MSK 0x7f |
44 | /* Host bus message command RESPONSE */ | 51 | /* Host bus message command RESPONSE */ |
@@ -69,6 +76,11 @@ | |||
69 | 76 | ||
70 | #define MEI_FLOW_CONTROL_CMD 0x08 | 77 | #define MEI_FLOW_CONTROL_CMD 0x08 |
71 | 78 | ||
79 | #define MEI_PG_ISOLATION_ENTRY_REQ_CMD 0x0a | ||
80 | #define MEI_PG_ISOLATION_ENTRY_RES_CMD 0x8a | ||
81 | #define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b | ||
82 | #define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b | ||
83 | |||
72 | /* | 84 | /* |
73 | * MEI Stop Reason | 85 | * MEI Stop Reason |
74 | * used by hbm_host_stop_request.reason | 86 | * used by hbm_host_stop_request.reason |
@@ -208,6 +220,17 @@ struct hbm_props_response { | |||
208 | } __packed; | 220 | } __packed; |
209 | 221 | ||
210 | /** | 222 | /** |
223 | * struct hbm_power_gate - power gate request/response | ||
224 | * | ||
225 | * @hbm_cmd - bus message command header | ||
226 | * @reserved[3] | ||
227 | */ | ||
228 | struct hbm_power_gate { | ||
229 | u8 hbm_cmd; | ||
230 | u8 reserved[3]; | ||
231 | } __packed; | ||
232 | |||
233 | /** | ||
211 | * struct hbm_client_connect_request - connect/disconnect request | 234 | * struct hbm_client_connect_request - connect/disconnect request |
212 | * | 235 | * |
213 | * @hbm_cmd - bus message command header | 236 | * @hbm_cmd - bus message command header |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 4460975c0eef..006929222481 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
@@ -74,9 +74,13 @@ int mei_reset(struct mei_device *dev) | |||
74 | if (state != MEI_DEV_INITIALIZING && | 74 | if (state != MEI_DEV_INITIALIZING && |
75 | state != MEI_DEV_DISABLED && | 75 | state != MEI_DEV_DISABLED && |
76 | state != MEI_DEV_POWER_DOWN && | 76 | state != MEI_DEV_POWER_DOWN && |
77 | state != MEI_DEV_POWER_UP) | 77 | state != MEI_DEV_POWER_UP) { |
78 | dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", | 78 | struct mei_fw_status fw_status; |
79 | mei_dev_state_str(state)); | 79 | mei_fw_status(dev, &fw_status); |
80 | dev_warn(&dev->pdev->dev, | ||
81 | "unexpected reset: dev_state = %s " FW_STS_FMT "\n", | ||
82 | mei_dev_state_str(state), FW_STS_PRM(fw_status)); | ||
83 | } | ||
80 | 84 | ||
81 | /* we're already in reset, cancel the init timer | 85 | /* we're already in reset, cancel the init timer |
82 | * if the reset was called due the hbm protocol error | 86 | * if the reset was called due the hbm protocol error |
@@ -118,8 +122,8 @@ int mei_reset(struct mei_device *dev) | |||
118 | mei_amthif_reset_params(dev); | 122 | mei_amthif_reset_params(dev); |
119 | } | 123 | } |
120 | 124 | ||
125 | mei_hbm_reset(dev); | ||
121 | 126 | ||
122 | dev->me_clients_num = 0; | ||
123 | dev->rd_msg_hdr = 0; | 127 | dev->rd_msg_hdr = 0; |
124 | dev->wd_pending = false; | 128 | dev->wd_pending = false; |
125 | 129 | ||
@@ -303,15 +307,58 @@ void mei_stop(struct mei_device *dev) | |||
303 | } | 307 | } |
304 | EXPORT_SYMBOL_GPL(mei_stop); | 308 | EXPORT_SYMBOL_GPL(mei_stop); |
305 | 309 | ||
310 | /** | ||
311 | * mei_write_is_idle - check if the write queues are idle | ||
312 | * | ||
313 | * @dev: the device structure | ||
314 | * | ||
315 | * returns true of there is no pending write | ||
316 | */ | ||
317 | bool mei_write_is_idle(struct mei_device *dev) | ||
318 | { | ||
319 | bool idle = (dev->dev_state == MEI_DEV_ENABLED && | ||
320 | list_empty(&dev->ctrl_wr_list.list) && | ||
321 | list_empty(&dev->write_list.list)); | ||
306 | 322 | ||
323 | dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", | ||
324 | idle, | ||
325 | mei_dev_state_str(dev->dev_state), | ||
326 | list_empty(&dev->ctrl_wr_list.list), | ||
327 | list_empty(&dev->write_list.list)); | ||
307 | 328 | ||
308 | void mei_device_init(struct mei_device *dev) | 329 | return idle; |
330 | } | ||
331 | EXPORT_SYMBOL_GPL(mei_write_is_idle); | ||
332 | |||
333 | int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) | ||
334 | { | ||
335 | int i; | ||
336 | const struct mei_fw_status *fw_src = &dev->cfg->fw_status; | ||
337 | |||
338 | if (!fw_status) | ||
339 | return -EINVAL; | ||
340 | |||
341 | fw_status->count = fw_src->count; | ||
342 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | ||
343 | int ret; | ||
344 | ret = pci_read_config_dword(dev->pdev, | ||
345 | fw_src->status[i], &fw_status->status[i]); | ||
346 | if (ret) | ||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | EXPORT_SYMBOL_GPL(mei_fw_status); | ||
353 | |||
354 | void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) | ||
309 | { | 355 | { |
310 | /* setup our list array */ | 356 | /* setup our list array */ |
311 | INIT_LIST_HEAD(&dev->file_list); | 357 | INIT_LIST_HEAD(&dev->file_list); |
312 | INIT_LIST_HEAD(&dev->device_list); | 358 | INIT_LIST_HEAD(&dev->device_list); |
313 | mutex_init(&dev->device_lock); | 359 | mutex_init(&dev->device_lock); |
314 | init_waitqueue_head(&dev->wait_hw_ready); | 360 | init_waitqueue_head(&dev->wait_hw_ready); |
361 | init_waitqueue_head(&dev->wait_pg); | ||
315 | init_waitqueue_head(&dev->wait_recvd_msg); | 362 | init_waitqueue_head(&dev->wait_recvd_msg); |
316 | init_waitqueue_head(&dev->wait_stop_wd); | 363 | init_waitqueue_head(&dev->wait_stop_wd); |
317 | dev->dev_state = MEI_DEV_INITIALIZING; | 364 | dev->dev_state = MEI_DEV_INITIALIZING; |
@@ -340,6 +387,9 @@ void mei_device_init(struct mei_device *dev) | |||
340 | * 0: Reserved for MEI Bus Message communications | 387 | * 0: Reserved for MEI Bus Message communications |
341 | */ | 388 | */ |
342 | bitmap_set(dev->host_clients_map, 0, 1); | 389 | bitmap_set(dev->host_clients_map, 0, 1); |
390 | |||
391 | dev->pg_event = MEI_PG_EVENT_IDLE; | ||
392 | dev->cfg = cfg; | ||
343 | } | 393 | } |
344 | EXPORT_SYMBOL_GPL(mei_device_init); | 394 | EXPORT_SYMBOL_GPL(mei_device_init); |
345 | 395 | ||
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 147413145c97..66f0a1a06451 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
@@ -467,7 +467,6 @@ static int mei_ioctl_connect_client(struct file *file, | |||
467 | } | 467 | } |
468 | 468 | ||
469 | cl->me_client_id = dev->me_clients[i].client_id; | 469 | cl->me_client_id = dev->me_clients[i].client_id; |
470 | cl->state = MEI_FILE_CONNECTING; | ||
471 | 470 | ||
472 | dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", | 471 | dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", |
473 | cl->me_client_id); | 472 | cl->me_client_id); |
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 94a516716d22..5c7e990e2f22 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
@@ -153,6 +153,20 @@ struct mei_msg_data { | |||
153 | unsigned char *data; | 153 | unsigned char *data; |
154 | }; | 154 | }; |
155 | 155 | ||
156 | /* Maximum number of processed FW status registers */ | ||
157 | #define MEI_FW_STATUS_MAX 2 | ||
158 | |||
159 | /* | ||
160 | * struct mei_fw_status - storage of FW status data | ||
161 | * | ||
162 | * @count - number of actually available elements in array | ||
163 | * @status - FW status registers | ||
164 | */ | ||
165 | struct mei_fw_status { | ||
166 | int count; | ||
167 | u32 status[MEI_FW_STATUS_MAX]; | ||
168 | }; | ||
169 | |||
156 | /** | 170 | /** |
157 | * struct mei_me_client - representation of me (fw) client | 171 | * struct mei_me_client - representation of me (fw) client |
158 | * | 172 | * |
@@ -213,6 +227,7 @@ struct mei_cl { | |||
213 | 227 | ||
214 | /** struct mei_hw_ops | 228 | /** struct mei_hw_ops |
215 | * | 229 | * |
230 | * @fw_status - read FW status from PCI config space | ||
216 | * @host_is_ready - query for host readiness | 231 | * @host_is_ready - query for host readiness |
217 | 232 | ||
218 | * @hw_is_ready - query if hw is ready | 233 | * @hw_is_ready - query if hw is ready |
@@ -220,6 +235,9 @@ struct mei_cl { | |||
220 | * @hw_start - start hw after reset | 235 | * @hw_start - start hw after reset |
221 | * @hw_config - configure hw | 236 | * @hw_config - configure hw |
222 | 237 | ||
238 | * @pg_state - power gating state of the device | ||
239 | * @pg_is_enabled - is power gating enabled | ||
240 | |||
223 | * @intr_clear - clear pending interrupts | 241 | * @intr_clear - clear pending interrupts |
224 | * @intr_enable - enable interrupts | 242 | * @intr_enable - enable interrupts |
225 | * @intr_disable - disable interrupts | 243 | * @intr_disable - disable interrupts |
@@ -237,6 +255,8 @@ struct mei_cl { | |||
237 | */ | 255 | */ |
238 | struct mei_hw_ops { | 256 | struct mei_hw_ops { |
239 | 257 | ||
258 | int (*fw_status)(struct mei_device *dev, | ||
259 | struct mei_fw_status *fw_status); | ||
240 | bool (*host_is_ready)(struct mei_device *dev); | 260 | bool (*host_is_ready)(struct mei_device *dev); |
241 | 261 | ||
242 | bool (*hw_is_ready)(struct mei_device *dev); | 262 | bool (*hw_is_ready)(struct mei_device *dev); |
@@ -244,6 +264,9 @@ struct mei_hw_ops { | |||
244 | int (*hw_start)(struct mei_device *dev); | 264 | int (*hw_start)(struct mei_device *dev); |
245 | void (*hw_config)(struct mei_device *dev); | 265 | void (*hw_config)(struct mei_device *dev); |
246 | 266 | ||
267 | enum mei_pg_state (*pg_state)(struct mei_device *dev); | ||
268 | bool (*pg_is_enabled)(struct mei_device *dev); | ||
269 | |||
247 | void (*intr_clear)(struct mei_device *dev); | 270 | void (*intr_clear)(struct mei_device *dev); |
248 | void (*intr_enable)(struct mei_device *dev); | 271 | void (*intr_enable)(struct mei_device *dev); |
249 | void (*intr_disable)(struct mei_device *dev); | 272 | void (*intr_disable)(struct mei_device *dev); |
@@ -331,16 +354,61 @@ struct mei_cl_device { | |||
331 | void *priv_data; | 354 | void *priv_data; |
332 | }; | 355 | }; |
333 | 356 | ||
357 | |||
358 | /** | ||
359 | * enum mei_pg_event - power gating transition events | ||
360 | * | ||
361 | * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition | ||
362 | * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete | ||
363 | * @MEI_PG_EVENT_RECEIVED: the driver received pg event | ||
364 | */ | ||
365 | enum mei_pg_event { | ||
366 | MEI_PG_EVENT_IDLE, | ||
367 | MEI_PG_EVENT_WAIT, | ||
368 | MEI_PG_EVENT_RECEIVED, | ||
369 | }; | ||
370 | |||
371 | /** | ||
372 | * enum mei_pg_state - device internal power gating state | ||
373 | * | ||
374 | * @MEI_PG_OFF: device is not power gated - it is active | ||
375 | * @MEI_PG_ON: device is power gated - it is in lower power state | ||
376 | */ | ||
377 | enum mei_pg_state { | ||
378 | MEI_PG_OFF = 0, | ||
379 | MEI_PG_ON = 1, | ||
380 | }; | ||
381 | |||
382 | /* | ||
383 | * mei_cfg | ||
384 | * | ||
385 | * @fw_status - FW status | ||
386 | * @quirk_probe - device exclusion quirk | ||
387 | */ | ||
388 | struct mei_cfg { | ||
389 | const struct mei_fw_status fw_status; | ||
390 | bool (*quirk_probe)(struct pci_dev *pdev); | ||
391 | }; | ||
392 | |||
393 | |||
394 | #define MEI_PCI_DEVICE(dev, cfg) \ | ||
395 | .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ | ||
396 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ | ||
397 | .driver_data = (kernel_ulong_t)&(cfg) | ||
398 | |||
399 | |||
334 | /** | 400 | /** |
335 | * struct mei_device - MEI private device struct | 401 | * struct mei_device - MEI private device struct |
336 | 402 | ||
337 | * @reset_count - limits the number of consecutive resets | 403 | * @reset_count - limits the number of consecutive resets |
338 | * @hbm_state - state of host bus message protocol | 404 | * @hbm_state - state of host bus message protocol |
405 | * @pg_event - power gating event | ||
339 | * @mem_addr - mem mapped base register address | 406 | * @mem_addr - mem mapped base register address |
340 | 407 | ||
341 | * @hbuf_depth - depth of hardware host/write buffer is slots | 408 | * @hbuf_depth - depth of hardware host/write buffer is slots |
342 | * @hbuf_is_ready - query if the host host/write buffer is ready | 409 | * @hbuf_is_ready - query if the host host/write buffer is ready |
343 | * @wr_msg - the buffer for hbm control messages | 410 | * @wr_msg - the buffer for hbm control messages |
411 | * @cfg - per device generation config and ops | ||
344 | */ | 412 | */ |
345 | struct mei_device { | 413 | struct mei_device { |
346 | struct pci_dev *pdev; /* pointer to pci device struct */ | 414 | struct pci_dev *pdev; /* pointer to pci device struct */ |
@@ -371,6 +439,7 @@ struct mei_device { | |||
371 | * waiting queue for receive message from FW | 439 | * waiting queue for receive message from FW |
372 | */ | 440 | */ |
373 | wait_queue_head_t wait_hw_ready; | 441 | wait_queue_head_t wait_hw_ready; |
442 | wait_queue_head_t wait_pg; | ||
374 | wait_queue_head_t wait_recvd_msg; | 443 | wait_queue_head_t wait_recvd_msg; |
375 | wait_queue_head_t wait_stop_wd; | 444 | wait_queue_head_t wait_stop_wd; |
376 | 445 | ||
@@ -382,6 +451,14 @@ struct mei_device { | |||
382 | enum mei_hbm_state hbm_state; | 451 | enum mei_hbm_state hbm_state; |
383 | u16 init_clients_timer; | 452 | u16 init_clients_timer; |
384 | 453 | ||
454 | /* | ||
455 | * Power Gating support | ||
456 | */ | ||
457 | enum mei_pg_event pg_event; | ||
458 | #ifdef CONFIG_PM_RUNTIME | ||
459 | struct dev_pm_domain pg_domain; | ||
460 | #endif /* CONFIG_PM_RUNTIME */ | ||
461 | |||
385 | unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ | 462 | unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ |
386 | u32 rd_msg_hdr; | 463 | u32 rd_msg_hdr; |
387 | 464 | ||
@@ -442,6 +519,7 @@ struct mei_device { | |||
442 | 519 | ||
443 | 520 | ||
444 | const struct mei_hw_ops *ops; | 521 | const struct mei_hw_ops *ops; |
522 | const struct mei_cfg *cfg; | ||
445 | char hw[0] __aligned(sizeof(void *)); | 523 | char hw[0] __aligned(sizeof(void *)); |
446 | }; | 524 | }; |
447 | 525 | ||
@@ -474,7 +552,7 @@ static inline u32 mei_slots2data(int slots) | |||
474 | /* | 552 | /* |
475 | * mei init function prototypes | 553 | * mei init function prototypes |
476 | */ | 554 | */ |
477 | void mei_device_init(struct mei_device *dev); | 555 | void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg); |
478 | int mei_reset(struct mei_device *dev); | 556 | int mei_reset(struct mei_device *dev); |
479 | int mei_start(struct mei_device *dev); | 557 | int mei_start(struct mei_device *dev); |
480 | int mei_restart(struct mei_device *dev); | 558 | int mei_restart(struct mei_device *dev); |
@@ -553,10 +631,22 @@ void mei_watchdog_unregister(struct mei_device *dev); | |||
553 | * Register Access Function | 631 | * Register Access Function |
554 | */ | 632 | */ |
555 | 633 | ||
634 | |||
556 | static inline void mei_hw_config(struct mei_device *dev) | 635 | static inline void mei_hw_config(struct mei_device *dev) |
557 | { | 636 | { |
558 | dev->ops->hw_config(dev); | 637 | dev->ops->hw_config(dev); |
559 | } | 638 | } |
639 | |||
640 | static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) | ||
641 | { | ||
642 | return dev->ops->pg_state(dev); | ||
643 | } | ||
644 | |||
645 | static inline bool mei_pg_is_enabled(struct mei_device *dev) | ||
646 | { | ||
647 | return dev->ops->pg_is_enabled(dev); | ||
648 | } | ||
649 | |||
560 | static inline int mei_hw_reset(struct mei_device *dev, bool enable) | 650 | static inline int mei_hw_reset(struct mei_device *dev, bool enable) |
561 | { | 651 | { |
562 | return dev->ops->hw_reset(dev, enable); | 652 | return dev->ops->hw_reset(dev, enable); |
@@ -629,8 +719,17 @@ static inline int mei_count_full_read_slots(struct mei_device *dev) | |||
629 | return dev->ops->rdbuf_full_slots(dev); | 719 | return dev->ops->rdbuf_full_slots(dev); |
630 | } | 720 | } |
631 | 721 | ||
722 | int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status); | ||
723 | |||
724 | #define FW_STS_FMT "%08X %08X" | ||
725 | #define FW_STS_PRM(fw_status) \ | ||
726 | (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \ | ||
727 | (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF | ||
728 | |||
632 | bool mei_hbuf_acquire(struct mei_device *dev); | 729 | bool mei_hbuf_acquire(struct mei_device *dev); |
633 | 730 | ||
731 | bool mei_write_is_idle(struct mei_device *dev); | ||
732 | |||
634 | #if IS_ENABLED(CONFIG_DEBUG_FS) | 733 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
635 | int mei_dbgfs_register(struct mei_device *dev, const char *name); | 734 | int mei_dbgfs_register(struct mei_device *dev, const char *name); |
636 | void mei_dbgfs_deregister(struct mei_device *dev); | 735 | void mei_dbgfs_deregister(struct mei_device *dev); |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 95889e2e31ff..1b46c64a649f 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
34 | #include <linux/miscdevice.h> | 34 | #include <linux/miscdevice.h> |
35 | 35 | ||
36 | #include <linux/pm_runtime.h> | ||
37 | |||
36 | #include <linux/mei.h> | 38 | #include <linux/mei.h> |
37 | 39 | ||
38 | #include "mei_dev.h" | 40 | #include "mei_dev.h" |
@@ -42,42 +44,44 @@ | |||
42 | 44 | ||
43 | /* mei_pci_tbl - PCI Device ID Table */ | 45 | /* mei_pci_tbl - PCI Device ID Table */ |
44 | static const struct pci_device_id mei_me_pci_tbl[] = { | 46 | static const struct pci_device_id mei_me_pci_tbl[] = { |
45 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, | 47 | {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)}, |
46 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, | 48 | {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)}, |
47 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, | 49 | {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)}, |
48 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)}, | 50 | {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)}, |
49 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)}, | 51 | {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)}, |
50 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)}, | 52 | {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)}, |
51 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)}, | 53 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)}, |
52 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)}, | 54 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)}, |
53 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)}, | 55 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)}, |
54 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)}, | 56 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)}, |
55 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)}, | 57 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)}, |
56 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)}, | 58 | |
57 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)}, | 59 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)}, |
58 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)}, | 60 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)}, |
59 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)}, | 61 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)}, |
60 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)}, | 62 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)}, |
61 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)}, | 63 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)}, |
62 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)}, | 64 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)}, |
63 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)}, | 65 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)}, |
64 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)}, | 66 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)}, |
65 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)}, | 67 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)}, |
66 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)}, | 68 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)}, |
67 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)}, | 69 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)}, |
68 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)}, | 70 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)}, |
69 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)}, | 71 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)}, |
70 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)}, | 72 | |
71 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)}, | 73 | {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)}, |
72 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)}, | 74 | {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)}, |
73 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, | 75 | {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)}, |
74 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, | 76 | {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)}, |
75 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, | 77 | {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, |
76 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)}, | 78 | {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, |
77 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, | 79 | {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, |
78 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, | 80 | {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, |
79 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)}, | 81 | {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, |
80 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)}, | 82 | {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, |
83 | {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, | ||
84 | {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, | ||
81 | 85 | ||
82 | /* required last entry */ | 86 | /* required last entry */ |
83 | {0, } | 87 | {0, } |
@@ -85,44 +89,33 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
85 | 89 | ||
86 | MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); | 90 | MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); |
87 | 91 | ||
92 | #ifdef CONFIG_PM_RUNTIME | ||
93 | static inline void mei_me_set_pm_domain(struct mei_device *dev); | ||
94 | static inline void mei_me_unset_pm_domain(struct mei_device *dev); | ||
95 | #else | ||
96 | static inline void mei_me_set_pm_domain(struct mei_device *dev) {} | ||
97 | static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} | ||
98 | #endif /* CONFIG_PM_RUNTIME */ | ||
99 | |||
88 | /** | 100 | /** |
89 | * mei_quirk_probe - probe for devices that doesn't valid ME interface | 101 | * mei_quirk_probe - probe for devices that doesn't valid ME interface |
90 | * | 102 | * |
91 | * @pdev: PCI device structure | 103 | * @pdev: PCI device structure |
92 | * @ent: entry into pci_device_table | 104 | * @cfg: per generation config |
93 | * | 105 | * |
94 | * returns true if ME Interface is valid, false otherwise | 106 | * returns true if ME Interface is valid, false otherwise |
95 | */ | 107 | */ |
96 | static bool mei_me_quirk_probe(struct pci_dev *pdev, | 108 | static bool mei_me_quirk_probe(struct pci_dev *pdev, |
97 | const struct pci_device_id *ent) | 109 | const struct mei_cfg *cfg) |
98 | { | 110 | { |
99 | u32 reg; | 111 | if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { |
100 | /* Cougar Point || Patsburg */ | 112 | dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); |
101 | if (ent->device == MEI_DEV_ID_CPT_1 || | 113 | return false; |
102 | ent->device == MEI_DEV_ID_PBG_1) { | ||
103 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); | ||
104 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ | ||
105 | if ((reg & 0x600) == 0x200) | ||
106 | goto no_mei; | ||
107 | } | ||
108 | |||
109 | /* Lynx Point */ | ||
110 | if (ent->device == MEI_DEV_ID_LPT_H || | ||
111 | ent->device == MEI_DEV_ID_LPT_W || | ||
112 | ent->device == MEI_DEV_ID_LPT_HR) { | ||
113 | /* Read ME FW Status check for SPS Firmware */ | ||
114 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | ||
115 | /* if bits [19:16] = 15, running SPS Firmware */ | ||
116 | if ((reg & 0xf0000) == 0xf0000) | ||
117 | goto no_mei; | ||
118 | } | 114 | } |
119 | 115 | ||
120 | return true; | 116 | return true; |
121 | |||
122 | no_mei: | ||
123 | dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); | ||
124 | return false; | ||
125 | } | 117 | } |
118 | |||
126 | /** | 119 | /** |
127 | * mei_probe - Device Initialization Routine | 120 | * mei_probe - Device Initialization Routine |
128 | * | 121 | * |
@@ -133,15 +126,14 @@ no_mei: | |||
133 | */ | 126 | */ |
134 | static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 127 | static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
135 | { | 128 | { |
129 | const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); | ||
136 | struct mei_device *dev; | 130 | struct mei_device *dev; |
137 | struct mei_me_hw *hw; | 131 | struct mei_me_hw *hw; |
138 | int err; | 132 | int err; |
139 | 133 | ||
140 | 134 | ||
141 | if (!mei_me_quirk_probe(pdev, ent)) { | 135 | if (!mei_me_quirk_probe(pdev, cfg)) |
142 | err = -ENODEV; | 136 | return -ENODEV; |
143 | goto end; | ||
144 | } | ||
145 | 137 | ||
146 | /* enable pci dev */ | 138 | /* enable pci dev */ |
147 | err = pci_enable_device(pdev); | 139 | err = pci_enable_device(pdev); |
@@ -173,7 +165,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
173 | 165 | ||
174 | 166 | ||
175 | /* allocates and initializes the mei dev structure */ | 167 | /* allocates and initializes the mei dev structure */ |
176 | dev = mei_me_dev_init(pdev); | 168 | dev = mei_me_dev_init(pdev, cfg); |
177 | if (!dev) { | 169 | if (!dev) { |
178 | err = -ENOMEM; | 170 | err = -ENOMEM; |
179 | goto release_regions; | 171 | goto release_regions; |
@@ -212,6 +204,9 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
212 | goto release_irq; | 204 | goto release_irq; |
213 | } | 205 | } |
214 | 206 | ||
207 | pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); | ||
208 | pm_runtime_use_autosuspend(&pdev->dev); | ||
209 | |||
215 | err = mei_register(dev); | 210 | err = mei_register(dev); |
216 | if (err) | 211 | if (err) |
217 | goto release_irq; | 212 | goto release_irq; |
@@ -220,6 +215,17 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
220 | 215 | ||
221 | schedule_delayed_work(&dev->timer_work, HZ); | 216 | schedule_delayed_work(&dev->timer_work, HZ); |
222 | 217 | ||
218 | /* | ||
219 | * For not wake-able HW runtime pm framework | ||
220 | * can't be used on pci device level. | ||
221 | * Use domain runtime pm callbacks instead. | ||
222 | */ | ||
223 | if (!pci_dev_run_wake(pdev)) | ||
224 | mei_me_set_pm_domain(dev); | ||
225 | |||
226 | if (mei_pg_is_enabled(dev)) | ||
227 | pm_runtime_put_noidle(&pdev->dev); | ||
228 | |||
223 | dev_dbg(&pdev->dev, "initialization successful.\n"); | 229 | dev_dbg(&pdev->dev, "initialization successful.\n"); |
224 | 230 | ||
225 | return 0; | 231 | return 0; |
@@ -259,12 +265,18 @@ static void mei_me_remove(struct pci_dev *pdev) | |||
259 | if (!dev) | 265 | if (!dev) |
260 | return; | 266 | return; |
261 | 267 | ||
268 | if (mei_pg_is_enabled(dev)) | ||
269 | pm_runtime_get_noresume(&pdev->dev); | ||
270 | |||
262 | hw = to_me_hw(dev); | 271 | hw = to_me_hw(dev); |
263 | 272 | ||
264 | 273 | ||
265 | dev_dbg(&pdev->dev, "stop\n"); | 274 | dev_dbg(&pdev->dev, "stop\n"); |
266 | mei_stop(dev); | 275 | mei_stop(dev); |
267 | 276 | ||
277 | if (!pci_dev_run_wake(pdev)) | ||
278 | mei_me_unset_pm_domain(dev); | ||
279 | |||
268 | /* disable interrupts */ | 280 | /* disable interrupts */ |
269 | mei_disable_interrupts(dev); | 281 | mei_disable_interrupts(dev); |
270 | 282 | ||
@@ -343,12 +355,120 @@ static int mei_me_pci_resume(struct device *device) | |||
343 | 355 | ||
344 | return 0; | 356 | return 0; |
345 | } | 357 | } |
358 | #endif /* CONFIG_PM_SLEEP */ | ||
359 | |||
360 | #ifdef CONFIG_PM_RUNTIME | ||
361 | static int mei_me_pm_runtime_idle(struct device *device) | ||
362 | { | ||
363 | struct pci_dev *pdev = to_pci_dev(device); | ||
364 | struct mei_device *dev; | ||
365 | |||
366 | dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); | ||
367 | |||
368 | dev = pci_get_drvdata(pdev); | ||
369 | if (!dev) | ||
370 | return -ENODEV; | ||
371 | if (mei_write_is_idle(dev)) | ||
372 | pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); | ||
373 | |||
374 | return -EBUSY; | ||
375 | } | ||
376 | |||
377 | static int mei_me_pm_runtime_suspend(struct device *device) | ||
378 | { | ||
379 | struct pci_dev *pdev = to_pci_dev(device); | ||
380 | struct mei_device *dev; | ||
381 | int ret; | ||
382 | |||
383 | dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); | ||
384 | |||
385 | dev = pci_get_drvdata(pdev); | ||
386 | if (!dev) | ||
387 | return -ENODEV; | ||
388 | |||
389 | mutex_lock(&dev->device_lock); | ||
390 | |||
391 | if (mei_write_is_idle(dev)) | ||
392 | ret = mei_me_pg_set_sync(dev); | ||
393 | else | ||
394 | ret = -EAGAIN; | ||
395 | |||
396 | mutex_unlock(&dev->device_lock); | ||
397 | |||
398 | dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); | ||
399 | |||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | static int mei_me_pm_runtime_resume(struct device *device) | ||
404 | { | ||
405 | struct pci_dev *pdev = to_pci_dev(device); | ||
406 | struct mei_device *dev; | ||
407 | int ret; | ||
408 | |||
409 | dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); | ||
410 | |||
411 | dev = pci_get_drvdata(pdev); | ||
412 | if (!dev) | ||
413 | return -ENODEV; | ||
414 | |||
415 | mutex_lock(&dev->device_lock); | ||
416 | |||
417 | ret = mei_me_pg_unset_sync(dev); | ||
418 | |||
419 | mutex_unlock(&dev->device_lock); | ||
420 | |||
421 | dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * mei_me_set_pm_domain - fill and set pm domian stucture for device | ||
428 | * | ||
429 | * @dev: mei_device | ||
430 | */ | ||
431 | static inline void mei_me_set_pm_domain(struct mei_device *dev) | ||
432 | { | ||
433 | struct pci_dev *pdev = dev->pdev; | ||
434 | |||
435 | if (pdev->dev.bus && pdev->dev.bus->pm) { | ||
436 | dev->pg_domain.ops = *pdev->dev.bus->pm; | ||
437 | |||
438 | dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; | ||
439 | dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; | ||
440 | dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; | ||
441 | |||
442 | pdev->dev.pm_domain = &dev->pg_domain; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * mei_me_unset_pm_domain - clean pm domian stucture for device | ||
448 | * | ||
449 | * @dev: mei_device | ||
450 | */ | ||
451 | static inline void mei_me_unset_pm_domain(struct mei_device *dev) | ||
452 | { | ||
453 | /* stop using pm callbacks if any */ | ||
454 | dev->pdev->dev.pm_domain = NULL; | ||
455 | } | ||
456 | #endif /* CONFIG_PM_RUNTIME */ | ||
457 | |||
458 | #ifdef CONFIG_PM | ||
459 | static const struct dev_pm_ops mei_me_pm_ops = { | ||
460 | SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, | ||
461 | mei_me_pci_resume) | ||
462 | SET_RUNTIME_PM_OPS( | ||
463 | mei_me_pm_runtime_suspend, | ||
464 | mei_me_pm_runtime_resume, | ||
465 | mei_me_pm_runtime_idle) | ||
466 | }; | ||
346 | 467 | ||
347 | static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); | ||
348 | #define MEI_ME_PM_OPS (&mei_me_pm_ops) | 468 | #define MEI_ME_PM_OPS (&mei_me_pm_ops) |
349 | #else | 469 | #else |
350 | #define MEI_ME_PM_OPS NULL | 470 | #define MEI_ME_PM_OPS NULL |
351 | #endif /* CONFIG_PM_SLEEP */ | 471 | #endif /* CONFIG_PM */ |
352 | /* | 472 | /* |
353 | * PCI driver structure | 473 | * PCI driver structure |
354 | */ | 474 | */ |
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index ad3adb009a1e..2343c6236df9 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/jiffies.h> | 27 | #include <linux/jiffies.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
30 | #include <linux/pm_runtime.h> | ||
30 | 31 | ||
31 | #include <linux/mei.h> | 32 | #include <linux/mei.h> |
32 | 33 | ||
@@ -35,11 +36,18 @@ | |||
35 | #include "hw-txe.h" | 36 | #include "hw-txe.h" |
36 | 37 | ||
37 | static const struct pci_device_id mei_txe_pci_tbl[] = { | 38 | static const struct pci_device_id mei_txe_pci_tbl[] = { |
38 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */ | 39 | {MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */ |
39 | {0, } | 40 | {0, } |
40 | }; | 41 | }; |
41 | MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); | 42 | MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); |
42 | 43 | ||
44 | #ifdef CONFIG_PM_RUNTIME | ||
45 | static inline void mei_txe_set_pm_domain(struct mei_device *dev); | ||
46 | static inline void mei_txe_unset_pm_domain(struct mei_device *dev); | ||
47 | #else | ||
48 | static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} | ||
49 | static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} | ||
50 | #endif /* CONFIG_PM_RUNTIME */ | ||
43 | 51 | ||
44 | static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) | 52 | static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) |
45 | { | 53 | { |
@@ -61,6 +69,7 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) | |||
61 | */ | 69 | */ |
62 | static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 70 | static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
63 | { | 71 | { |
72 | const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); | ||
64 | struct mei_device *dev; | 73 | struct mei_device *dev; |
65 | struct mei_txe_hw *hw; | 74 | struct mei_txe_hw *hw; |
66 | int err; | 75 | int err; |
@@ -91,7 +100,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
91 | } | 100 | } |
92 | 101 | ||
93 | /* allocates and initializes the mei dev structure */ | 102 | /* allocates and initializes the mei dev structure */ |
94 | dev = mei_txe_dev_init(pdev); | 103 | dev = mei_txe_dev_init(pdev, cfg); |
95 | if (!dev) { | 104 | if (!dev) { |
96 | err = -ENOMEM; | 105 | err = -ENOMEM; |
97 | goto release_regions; | 106 | goto release_regions; |
@@ -137,12 +146,25 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
137 | goto release_irq; | 146 | goto release_irq; |
138 | } | 147 | } |
139 | 148 | ||
149 | pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); | ||
150 | pm_runtime_use_autosuspend(&pdev->dev); | ||
151 | |||
140 | err = mei_register(dev); | 152 | err = mei_register(dev); |
141 | if (err) | 153 | if (err) |
142 | goto release_irq; | 154 | goto release_irq; |
143 | 155 | ||
144 | pci_set_drvdata(pdev, dev); | 156 | pci_set_drvdata(pdev, dev); |
145 | 157 | ||
158 | /* | ||
159 | * For not wake-able HW runtime pm framework | ||
160 | * can't be used on pci device level. | ||
161 | * Use domain runtime pm callbacks instead. | ||
162 | */ | ||
163 | if (!pci_dev_run_wake(pdev)) | ||
164 | mei_txe_set_pm_domain(dev); | ||
165 | |||
166 | pm_runtime_put_noidle(&pdev->dev); | ||
167 | |||
146 | return 0; | 168 | return 0; |
147 | 169 | ||
148 | release_irq: | 170 | release_irq: |
@@ -187,10 +209,15 @@ static void mei_txe_remove(struct pci_dev *pdev) | |||
187 | return; | 209 | return; |
188 | } | 210 | } |
189 | 211 | ||
212 | pm_runtime_get_noresume(&pdev->dev); | ||
213 | |||
190 | hw = to_txe_hw(dev); | 214 | hw = to_txe_hw(dev); |
191 | 215 | ||
192 | mei_stop(dev); | 216 | mei_stop(dev); |
193 | 217 | ||
218 | if (!pci_dev_run_wake(pdev)) | ||
219 | mei_txe_unset_pm_domain(dev); | ||
220 | |||
194 | /* disable interrupts */ | 221 | /* disable interrupts */ |
195 | mei_disable_interrupts(dev); | 222 | mei_disable_interrupts(dev); |
196 | free_irq(pdev->irq, dev); | 223 | free_irq(pdev->irq, dev); |
@@ -265,15 +292,131 @@ static int mei_txe_pci_resume(struct device *device) | |||
265 | 292 | ||
266 | return err; | 293 | return err; |
267 | } | 294 | } |
295 | #endif /* CONFIG_PM_SLEEP */ | ||
296 | |||
297 | #ifdef CONFIG_PM_RUNTIME | ||
298 | static int mei_txe_pm_runtime_idle(struct device *device) | ||
299 | { | ||
300 | struct pci_dev *pdev = to_pci_dev(device); | ||
301 | struct mei_device *dev; | ||
302 | |||
303 | dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); | ||
304 | |||
305 | dev = pci_get_drvdata(pdev); | ||
306 | if (!dev) | ||
307 | return -ENODEV; | ||
308 | if (mei_write_is_idle(dev)) | ||
309 | pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); | ||
310 | |||
311 | return -EBUSY; | ||
312 | } | ||
313 | static int mei_txe_pm_runtime_suspend(struct device *device) | ||
314 | { | ||
315 | struct pci_dev *pdev = to_pci_dev(device); | ||
316 | struct mei_device *dev; | ||
317 | int ret; | ||
318 | |||
319 | dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); | ||
320 | |||
321 | dev = pci_get_drvdata(pdev); | ||
322 | if (!dev) | ||
323 | return -ENODEV; | ||
324 | |||
325 | mutex_lock(&dev->device_lock); | ||
326 | |||
327 | if (mei_write_is_idle(dev)) | ||
328 | ret = mei_txe_aliveness_set_sync(dev, 0); | ||
329 | else | ||
330 | ret = -EAGAIN; | ||
331 | |||
332 | /* | ||
333 | * If everything is okay we're about to enter PCI low | ||
334 | * power state (D3) therefor we need to disable the | ||
335 | * interrupts towards host. | ||
336 | * However if device is not wakeable we do not enter | ||
337 | * D-low state and we need to keep the interrupt kicking | ||
338 | */ | ||
339 | if (!ret && pci_dev_run_wake(pdev)) | ||
340 | mei_disable_interrupts(dev); | ||
341 | |||
342 | dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); | ||
343 | |||
344 | mutex_unlock(&dev->device_lock); | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | static int mei_txe_pm_runtime_resume(struct device *device) | ||
349 | { | ||
350 | struct pci_dev *pdev = to_pci_dev(device); | ||
351 | struct mei_device *dev; | ||
352 | int ret; | ||
353 | |||
354 | dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); | ||
355 | |||
356 | dev = pci_get_drvdata(pdev); | ||
357 | if (!dev) | ||
358 | return -ENODEV; | ||
359 | |||
360 | mutex_lock(&dev->device_lock); | ||
361 | |||
362 | mei_enable_interrupts(dev); | ||
363 | |||
364 | ret = mei_txe_aliveness_set_sync(dev, 1); | ||
365 | |||
366 | mutex_unlock(&dev->device_lock); | ||
367 | |||
368 | dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); | ||
369 | |||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * mei_txe_set_pm_domain - fill and set pm domian stucture for device | ||
375 | * | ||
376 | * @dev: mei_device | ||
377 | */ | ||
378 | static inline void mei_txe_set_pm_domain(struct mei_device *dev) | ||
379 | { | ||
380 | struct pci_dev *pdev = dev->pdev; | ||
381 | |||
382 | if (pdev->dev.bus && pdev->dev.bus->pm) { | ||
383 | dev->pg_domain.ops = *pdev->dev.bus->pm; | ||
384 | |||
385 | dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; | ||
386 | dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; | ||
387 | dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; | ||
388 | |||
389 | pdev->dev.pm_domain = &dev->pg_domain; | ||
390 | } | ||
391 | } | ||
268 | 392 | ||
269 | static SIMPLE_DEV_PM_OPS(mei_txe_pm_ops, | 393 | /** |
270 | mei_txe_pci_suspend, | 394 | * mei_txe_unset_pm_domain - clean pm domian stucture for device |
271 | mei_txe_pci_resume); | 395 | * |
396 | * @dev: mei_device | ||
397 | */ | ||
398 | static inline void mei_txe_unset_pm_domain(struct mei_device *dev) | ||
399 | { | ||
400 | /* stop using pm callbacks if any */ | ||
401 | dev->pdev->dev.pm_domain = NULL; | ||
402 | } | ||
403 | #endif /* CONFIG_PM_RUNTIME */ | ||
404 | |||
405 | #ifdef CONFIG_PM | ||
406 | static const struct dev_pm_ops mei_txe_pm_ops = { | ||
407 | SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, | ||
408 | mei_txe_pci_resume) | ||
409 | SET_RUNTIME_PM_OPS( | ||
410 | mei_txe_pm_runtime_suspend, | ||
411 | mei_txe_pm_runtime_resume, | ||
412 | mei_txe_pm_runtime_idle) | ||
413 | }; | ||
272 | 414 | ||
273 | #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) | 415 | #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) |
274 | #else | 416 | #else |
275 | #define MEI_TXE_PM_OPS NULL | 417 | #define MEI_TXE_PM_OPS NULL |
276 | #endif /* CONFIG_PM_SLEEP */ | 418 | #endif /* CONFIG_PM */ |
419 | |||
277 | /* | 420 | /* |
278 | * PCI driver structure | 421 | * PCI driver structure |
279 | */ | 422 | */ |
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index ebf1cbc198fd..a84a664dfccb 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c | |||
@@ -84,8 +84,6 @@ int mei_wd_host_init(struct mei_device *dev) | |||
84 | return ret; | 84 | return ret; |
85 | } | 85 | } |
86 | 86 | ||
87 | cl->state = MEI_FILE_CONNECTING; | ||
88 | |||
89 | ret = mei_cl_connect(cl, NULL); | 87 | ret = mei_cl_connect(cl, NULL); |
90 | 88 | ||
91 | if (ret) { | 89 | if (ret) { |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 903eb37f047a..f0cc9e6dac3a 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
@@ -266,11 +266,12 @@ config REGULATOR_LP8788 | |||
266 | This driver supports LP8788 voltage regulator chip. | 266 | This driver supports LP8788 voltage regulator chip. |
267 | 267 | ||
268 | config REGULATOR_MAX14577 | 268 | config REGULATOR_MAX14577 |
269 | tristate "Maxim 14577 regulator" | 269 | tristate "Maxim 14577/77836 regulator" |
270 | depends on MFD_MAX14577 | 270 | depends on MFD_MAX14577 |
271 | help | 271 | help |
272 | This driver controls a Maxim 14577 regulator via I2C bus. | 272 | This driver controls a Maxim MAX14577/77836 regulator via I2C bus. |
273 | The regulators include safeout LDO and current regulator 'CHARGER'. | 273 | The MAX14577 regulators include safeout LDO and charger current |
274 | regulator. The MAX77836 has two additional LDOs. | ||
274 | 275 | ||
275 | config REGULATOR_MAX1586 | 276 | config REGULATOR_MAX1586 |
276 | tristate "Maxim 1586/1587 voltage regulator" | 277 | tristate "Maxim 1586/1587 voltage regulator" |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index ed60baaeceec..5d9c605cf534 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * max14577.c - Regulator driver for the Maxim 14577 | 2 | * max14577.c - Regulator driver for the Maxim 14577/77836 |
3 | * | 3 | * |
4 | * Copyright (C) 2013,2014 Samsung Electronics | 4 | * Copyright (C) 2013,2014 Samsung Electronics |
5 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> | 5 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> |
@@ -22,6 +22,42 @@ | |||
22 | #include <linux/mfd/max14577-private.h> | 22 | #include <linux/mfd/max14577-private.h> |
23 | #include <linux/regulator/of_regulator.h> | 23 | #include <linux/regulator/of_regulator.h> |
24 | 24 | ||
25 | /* | ||
26 | * Valid limits of current for max14577 and max77836 chargers. | ||
27 | * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4 | ||
28 | * register for given chipset. | ||
29 | */ | ||
30 | struct maxim_charger_current { | ||
31 | /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */ | ||
32 | unsigned int min; | ||
33 | /* | ||
34 | * Minimal current when high setting is active, | ||
35 | * set in CHGCTRL4/MBCICHWRCH, uA | ||
36 | */ | ||
37 | unsigned int high_start; | ||
38 | /* Value of one step in high setting, uA */ | ||
39 | unsigned int high_step; | ||
40 | /* Maximum current of high setting, uA */ | ||
41 | unsigned int max; | ||
42 | }; | ||
43 | |||
44 | /* Table of valid charger currents for different Maxim chipsets */ | ||
45 | static const struct maxim_charger_current maxim_charger_currents[] = { | ||
46 | [MAXIM_DEVICE_TYPE_UNKNOWN] = { 0, 0, 0, 0 }, | ||
47 | [MAXIM_DEVICE_TYPE_MAX14577] = { | ||
48 | .min = MAX14577_REGULATOR_CURRENT_LIMIT_MIN, | ||
49 | .high_start = MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START, | ||
50 | .high_step = MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP, | ||
51 | .max = MAX14577_REGULATOR_CURRENT_LIMIT_MAX, | ||
52 | }, | ||
53 | [MAXIM_DEVICE_TYPE_MAX77836] = { | ||
54 | .min = MAX77836_REGULATOR_CURRENT_LIMIT_MIN, | ||
55 | .high_start = MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START, | ||
56 | .high_step = MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP, | ||
57 | .max = MAX77836_REGULATOR_CURRENT_LIMIT_MAX, | ||
58 | }, | ||
59 | }; | ||
60 | |||
25 | static int max14577_reg_is_enabled(struct regulator_dev *rdev) | 61 | static int max14577_reg_is_enabled(struct regulator_dev *rdev) |
26 | { | 62 | { |
27 | int rid = rdev_get_id(rdev); | 63 | int rid = rdev_get_id(rdev); |
@@ -47,6 +83,9 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev) | |||
47 | { | 83 | { |
48 | u8 reg_data; | 84 | u8 reg_data; |
49 | struct regmap *rmap = rdev->regmap; | 85 | struct regmap *rmap = rdev->regmap; |
86 | struct max14577 *max14577 = rdev_get_drvdata(rdev); | ||
87 | const struct maxim_charger_current *limits = | ||
88 | &maxim_charger_currents[max14577->dev_type]; | ||
50 | 89 | ||
51 | if (rdev_get_id(rdev) != MAX14577_CHARGER) | 90 | if (rdev_get_id(rdev) != MAX14577_CHARGER) |
52 | return -EINVAL; | 91 | return -EINVAL; |
@@ -54,12 +93,11 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev) | |||
54 | max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, ®_data); | 93 | max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, ®_data); |
55 | 94 | ||
56 | if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0) | 95 | if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0) |
57 | return MAX14577_REGULATOR_CURRENT_LIMIT_MIN; | 96 | return limits->min; |
58 | 97 | ||
59 | reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >> | 98 | reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >> |
60 | CHGCTRL4_MBCICHWRCH_SHIFT); | 99 | CHGCTRL4_MBCICHWRCH_SHIFT); |
61 | return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + | 100 | return limits->high_start + reg_data * limits->high_step; |
62 | reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP; | ||
63 | } | 101 | } |
64 | 102 | ||
65 | static int max14577_reg_set_current_limit(struct regulator_dev *rdev, | 103 | static int max14577_reg_set_current_limit(struct regulator_dev *rdev, |
@@ -67,33 +105,39 @@ static int max14577_reg_set_current_limit(struct regulator_dev *rdev, | |||
67 | { | 105 | { |
68 | int i, current_bits = 0xf; | 106 | int i, current_bits = 0xf; |
69 | u8 reg_data; | 107 | u8 reg_data; |
108 | struct max14577 *max14577 = rdev_get_drvdata(rdev); | ||
109 | const struct maxim_charger_current *limits = | ||
110 | &maxim_charger_currents[max14577->dev_type]; | ||
70 | 111 | ||
71 | if (rdev_get_id(rdev) != MAX14577_CHARGER) | 112 | if (rdev_get_id(rdev) != MAX14577_CHARGER) |
72 | return -EINVAL; | 113 | return -EINVAL; |
73 | 114 | ||
74 | if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX || | 115 | if (min_uA > limits->max || max_uA < limits->min) |
75 | max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN) | ||
76 | return -EINVAL; | 116 | return -EINVAL; |
77 | 117 | ||
78 | if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) { | 118 | if (max_uA < limits->high_start) { |
79 | /* Less than 200 mA, so set 90mA (turn only Low Bit off) */ | 119 | /* |
120 | * Less than high_start, | ||
121 | * so set the minimal current (turn only Low Bit off) | ||
122 | */ | ||
80 | u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT; | 123 | u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT; |
81 | return max14577_update_reg(rdev->regmap, | 124 | return max14577_update_reg(rdev->regmap, |
82 | MAX14577_CHG_REG_CHG_CTRL4, | 125 | MAX14577_CHG_REG_CHG_CTRL4, |
83 | CHGCTRL4_MBCICHWRCL_MASK, reg_data); | 126 | CHGCTRL4_MBCICHWRCL_MASK, reg_data); |
84 | } | 127 | } |
85 | 128 | ||
86 | /* max_uA is in range: <LIMIT_HIGH_START, inifinite>, so search for | 129 | /* |
87 | * valid current starting from LIMIT_MAX. */ | 130 | * max_uA is in range: <high_start, inifinite>, so search for |
88 | for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX; | 131 | * valid current starting from maximum current. |
89 | i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START; | 132 | */ |
90 | i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) { | 133 | for (i = limits->max; i >= limits->high_start; i -= limits->high_step) { |
91 | if (i <= max_uA) | 134 | if (i <= max_uA) |
92 | break; | 135 | break; |
93 | current_bits--; | 136 | current_bits--; |
94 | } | 137 | } |
95 | BUG_ON(current_bits < 0); /* Cannot happen */ | 138 | BUG_ON(current_bits < 0); /* Cannot happen */ |
96 | /* Turn Low Bit on (use range 200mA-950 mA) */ | 139 | |
140 | /* Turn Low Bit on (use range high_start-max)... */ | ||
97 | reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT; | 141 | reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT; |
98 | /* and set proper High Bits */ | 142 | /* and set proper High Bits */ |
99 | reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT; | 143 | reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT; |
@@ -118,7 +162,7 @@ static struct regulator_ops max14577_charger_ops = { | |||
118 | .set_current_limit = max14577_reg_set_current_limit, | 162 | .set_current_limit = max14577_reg_set_current_limit, |
119 | }; | 163 | }; |
120 | 164 | ||
121 | static const struct regulator_desc supported_regulators[] = { | 165 | static const struct regulator_desc max14577_supported_regulators[] = { |
122 | [MAX14577_SAFEOUT] = { | 166 | [MAX14577_SAFEOUT] = { |
123 | .name = "SAFEOUT", | 167 | .name = "SAFEOUT", |
124 | .id = MAX14577_SAFEOUT, | 168 | .id = MAX14577_SAFEOUT, |
@@ -141,16 +185,88 @@ static const struct regulator_desc supported_regulators[] = { | |||
141 | }, | 185 | }, |
142 | }; | 186 | }; |
143 | 187 | ||
188 | static struct regulator_ops max77836_ldo_ops = { | ||
189 | .is_enabled = regulator_is_enabled_regmap, | ||
190 | .enable = regulator_enable_regmap, | ||
191 | .disable = regulator_disable_regmap, | ||
192 | .list_voltage = regulator_list_voltage_linear, | ||
193 | .map_voltage = regulator_map_voltage_linear, | ||
194 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
195 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
196 | /* TODO: add .set_suspend_mode */ | ||
197 | }; | ||
198 | |||
199 | static const struct regulator_desc max77836_supported_regulators[] = { | ||
200 | [MAX14577_SAFEOUT] = { | ||
201 | .name = "SAFEOUT", | ||
202 | .id = MAX14577_SAFEOUT, | ||
203 | .ops = &max14577_safeout_ops, | ||
204 | .type = REGULATOR_VOLTAGE, | ||
205 | .owner = THIS_MODULE, | ||
206 | .n_voltages = 1, | ||
207 | .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE, | ||
208 | .enable_reg = MAX14577_REG_CONTROL2, | ||
209 | .enable_mask = CTRL2_SFOUTORD_MASK, | ||
210 | }, | ||
211 | [MAX14577_CHARGER] = { | ||
212 | .name = "CHARGER", | ||
213 | .id = MAX14577_CHARGER, | ||
214 | .ops = &max14577_charger_ops, | ||
215 | .type = REGULATOR_CURRENT, | ||
216 | .owner = THIS_MODULE, | ||
217 | .enable_reg = MAX14577_CHG_REG_CHG_CTRL2, | ||
218 | .enable_mask = CHGCTRL2_MBCHOSTEN_MASK, | ||
219 | }, | ||
220 | [MAX77836_LDO1] = { | ||
221 | .name = "LDO1", | ||
222 | .id = MAX77836_LDO1, | ||
223 | .ops = &max77836_ldo_ops, | ||
224 | .type = REGULATOR_VOLTAGE, | ||
225 | .owner = THIS_MODULE, | ||
226 | .n_voltages = MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM, | ||
227 | .min_uV = MAX77836_REGULATOR_LDO_VOLTAGE_MIN, | ||
228 | .uV_step = MAX77836_REGULATOR_LDO_VOLTAGE_STEP, | ||
229 | .enable_reg = MAX77836_LDO_REG_CNFG1_LDO1, | ||
230 | .enable_mask = MAX77836_CNFG1_LDO_PWRMD_MASK, | ||
231 | .vsel_reg = MAX77836_LDO_REG_CNFG1_LDO1, | ||
232 | .vsel_mask = MAX77836_CNFG1_LDO_TV_MASK, | ||
233 | }, | ||
234 | [MAX77836_LDO2] = { | ||
235 | .name = "LDO2", | ||
236 | .id = MAX77836_LDO2, | ||
237 | .ops = &max77836_ldo_ops, | ||
238 | .type = REGULATOR_VOLTAGE, | ||
239 | .owner = THIS_MODULE, | ||
240 | .n_voltages = MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM, | ||
241 | .min_uV = MAX77836_REGULATOR_LDO_VOLTAGE_MIN, | ||
242 | .uV_step = MAX77836_REGULATOR_LDO_VOLTAGE_STEP, | ||
243 | .enable_reg = MAX77836_LDO_REG_CNFG1_LDO2, | ||
244 | .enable_mask = MAX77836_CNFG1_LDO_PWRMD_MASK, | ||
245 | .vsel_reg = MAX77836_LDO_REG_CNFG1_LDO2, | ||
246 | .vsel_mask = MAX77836_CNFG1_LDO_TV_MASK, | ||
247 | }, | ||
248 | }; | ||
249 | |||
144 | #ifdef CONFIG_OF | 250 | #ifdef CONFIG_OF |
145 | static struct of_regulator_match max14577_regulator_matches[] = { | 251 | static struct of_regulator_match max14577_regulator_matches[] = { |
146 | { .name = "SAFEOUT", }, | 252 | { .name = "SAFEOUT", }, |
147 | { .name = "CHARGER", }, | 253 | { .name = "CHARGER", }, |
148 | }; | 254 | }; |
149 | 255 | ||
150 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | 256 | static struct of_regulator_match max77836_regulator_matches[] = { |
257 | { .name = "SAFEOUT", }, | ||
258 | { .name = "CHARGER", }, | ||
259 | { .name = "LDO1", }, | ||
260 | { .name = "LDO2", }, | ||
261 | }; | ||
262 | |||
263 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, | ||
264 | enum maxim_device_type dev_type) | ||
151 | { | 265 | { |
152 | int ret; | 266 | int ret; |
153 | struct device_node *np; | 267 | struct device_node *np; |
268 | struct of_regulator_match *regulator_matches; | ||
269 | unsigned int regulator_matches_size; | ||
154 | 270 | ||
155 | np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); | 271 | np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); |
156 | if (!np) { | 272 | if (!np) { |
@@ -158,8 +274,19 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | |||
158 | return -EINVAL; | 274 | return -EINVAL; |
159 | } | 275 | } |
160 | 276 | ||
161 | ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches, | 277 | switch (dev_type) { |
162 | MAX14577_REG_MAX); | 278 | case MAXIM_DEVICE_TYPE_MAX77836: |
279 | regulator_matches = max77836_regulator_matches; | ||
280 | regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches); | ||
281 | break; | ||
282 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
283 | default: | ||
284 | regulator_matches = max14577_regulator_matches; | ||
285 | regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches); | ||
286 | } | ||
287 | |||
288 | ret = of_regulator_match(&pdev->dev, np, regulator_matches, | ||
289 | regulator_matches_size); | ||
163 | if (ret < 0) | 290 | if (ret < 0) |
164 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); | 291 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); |
165 | else | 292 | else |
@@ -170,31 +297,74 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | |||
170 | return ret; | 297 | return ret; |
171 | } | 298 | } |
172 | 299 | ||
173 | static inline struct regulator_init_data *match_init_data(int index) | 300 | static inline struct regulator_init_data *match_init_data(int index, |
301 | enum maxim_device_type dev_type) | ||
174 | { | 302 | { |
175 | return max14577_regulator_matches[index].init_data; | 303 | switch (dev_type) { |
304 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
305 | return max77836_regulator_matches[index].init_data; | ||
306 | |||
307 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
308 | default: | ||
309 | return max14577_regulator_matches[index].init_data; | ||
310 | } | ||
176 | } | 311 | } |
177 | 312 | ||
178 | static inline struct device_node *match_of_node(int index) | 313 | static inline struct device_node *match_of_node(int index, |
314 | enum maxim_device_type dev_type) | ||
179 | { | 315 | { |
180 | return max14577_regulator_matches[index].of_node; | 316 | switch (dev_type) { |
317 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
318 | return max77836_regulator_matches[index].of_node; | ||
319 | |||
320 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
321 | default: | ||
322 | return max14577_regulator_matches[index].of_node; | ||
323 | } | ||
181 | } | 324 | } |
182 | #else /* CONFIG_OF */ | 325 | #else /* CONFIG_OF */ |
183 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | 326 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, |
327 | enum maxim_device_type dev_type) | ||
184 | { | 328 | { |
185 | return 0; | 329 | return 0; |
186 | } | 330 | } |
187 | static inline struct regulator_init_data *match_init_data(int index) | 331 | static inline struct regulator_init_data *match_init_data(int index, |
332 | enum maxim_device_type dev_type) | ||
188 | { | 333 | { |
189 | return NULL; | 334 | return NULL; |
190 | } | 335 | } |
191 | 336 | ||
192 | static inline struct device_node *match_of_node(int index) | 337 | static inline struct device_node *match_of_node(int index, |
338 | enum maxim_device_type dev_type) | ||
193 | { | 339 | { |
194 | return NULL; | 340 | return NULL; |
195 | } | 341 | } |
196 | #endif /* CONFIG_OF */ | 342 | #endif /* CONFIG_OF */ |
197 | 343 | ||
344 | /** | ||
345 | * Registers for regulators of max77836 use different I2C slave addresses so | ||
346 | * different regmaps must be used for them. | ||
347 | * | ||
348 | * Returns proper regmap for accessing regulator passed by id. | ||
349 | */ | ||
350 | static struct regmap *max14577_get_regmap(struct max14577 *max14577, | ||
351 | int reg_id) | ||
352 | { | ||
353 | switch (max14577->dev_type) { | ||
354 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
355 | switch (reg_id) { | ||
356 | case MAX77836_SAFEOUT ... MAX77836_CHARGER: | ||
357 | return max14577->regmap; | ||
358 | default: | ||
359 | /* MAX77836_LDO1 ... MAX77836_LDO2 */ | ||
360 | return max14577->regmap_pmic; | ||
361 | } | ||
362 | |||
363 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
364 | default: | ||
365 | return max14577->regmap; | ||
366 | } | ||
367 | } | ||
198 | 368 | ||
199 | static int max14577_regulator_probe(struct platform_device *pdev) | 369 | static int max14577_regulator_probe(struct platform_device *pdev) |
200 | { | 370 | { |
@@ -202,15 +372,29 @@ static int max14577_regulator_probe(struct platform_device *pdev) | |||
202 | struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); | 372 | struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); |
203 | int i, ret; | 373 | int i, ret; |
204 | struct regulator_config config = {}; | 374 | struct regulator_config config = {}; |
375 | const struct regulator_desc *supported_regulators; | ||
376 | unsigned int supported_regulators_size; | ||
377 | enum maxim_device_type dev_type = max14577->dev_type; | ||
205 | 378 | ||
206 | ret = max14577_regulator_dt_parse_pdata(pdev); | 379 | ret = max14577_regulator_dt_parse_pdata(pdev, dev_type); |
207 | if (ret) | 380 | if (ret) |
208 | return ret; | 381 | return ret; |
209 | 382 | ||
383 | switch (dev_type) { | ||
384 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
385 | supported_regulators = max77836_supported_regulators; | ||
386 | supported_regulators_size = ARRAY_SIZE(max77836_supported_regulators); | ||
387 | break; | ||
388 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
389 | default: | ||
390 | supported_regulators = max14577_supported_regulators; | ||
391 | supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); | ||
392 | } | ||
393 | |||
210 | config.dev = &pdev->dev; | 394 | config.dev = &pdev->dev; |
211 | config.regmap = max14577->regmap; | 395 | config.driver_data = max14577; |
212 | 396 | ||
213 | for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) { | 397 | for (i = 0; i < supported_regulators_size; i++) { |
214 | struct regulator_dev *regulator; | 398 | struct regulator_dev *regulator; |
215 | /* | 399 | /* |
216 | * Index of supported_regulators[] is also the id and must | 400 | * Index of supported_regulators[] is also the id and must |
@@ -220,17 +404,19 @@ static int max14577_regulator_probe(struct platform_device *pdev) | |||
220 | config.init_data = pdata->regulators[i].initdata; | 404 | config.init_data = pdata->regulators[i].initdata; |
221 | config.of_node = pdata->regulators[i].of_node; | 405 | config.of_node = pdata->regulators[i].of_node; |
222 | } else { | 406 | } else { |
223 | config.init_data = match_init_data(i); | 407 | config.init_data = match_init_data(i, dev_type); |
224 | config.of_node = match_of_node(i); | 408 | config.of_node = match_of_node(i, dev_type); |
225 | } | 409 | } |
410 | config.regmap = max14577_get_regmap(max14577, | ||
411 | supported_regulators[i].id); | ||
226 | 412 | ||
227 | regulator = devm_regulator_register(&pdev->dev, | 413 | regulator = devm_regulator_register(&pdev->dev, |
228 | &supported_regulators[i], &config); | 414 | &supported_regulators[i], &config); |
229 | if (IS_ERR(regulator)) { | 415 | if (IS_ERR(regulator)) { |
230 | ret = PTR_ERR(regulator); | 416 | ret = PTR_ERR(regulator); |
231 | dev_err(&pdev->dev, | 417 | dev_err(&pdev->dev, |
232 | "Regulator init failed for ID %d with error: %d\n", | 418 | "Regulator init failed for %d/%s with error: %d\n", |
233 | i, ret); | 419 | i, supported_regulators[i].name, ret); |
234 | return ret; | 420 | return ret; |
235 | } | 421 | } |
236 | } | 422 | } |
@@ -238,20 +424,41 @@ static int max14577_regulator_probe(struct platform_device *pdev) | |||
238 | return ret; | 424 | return ret; |
239 | } | 425 | } |
240 | 426 | ||
427 | static const struct platform_device_id max14577_regulator_id[] = { | ||
428 | { "max14577-regulator", MAXIM_DEVICE_TYPE_MAX14577, }, | ||
429 | { "max77836-regulator", MAXIM_DEVICE_TYPE_MAX77836, }, | ||
430 | { } | ||
431 | }; | ||
432 | MODULE_DEVICE_TABLE(platform, max14577_regulator_id); | ||
433 | |||
241 | static struct platform_driver max14577_regulator_driver = { | 434 | static struct platform_driver max14577_regulator_driver = { |
242 | .driver = { | 435 | .driver = { |
243 | .owner = THIS_MODULE, | 436 | .owner = THIS_MODULE, |
244 | .name = "max14577-regulator", | 437 | .name = "max14577-regulator", |
245 | }, | 438 | }, |
246 | .probe = max14577_regulator_probe, | 439 | .probe = max14577_regulator_probe, |
440 | .id_table = max14577_regulator_id, | ||
247 | }; | 441 | }; |
248 | 442 | ||
249 | static int __init max14577_regulator_init(void) | 443 | static int __init max14577_regulator_init(void) |
250 | { | 444 | { |
445 | /* Check for valid values for charger */ | ||
251 | BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + | 446 | BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START + |
252 | MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf != | 447 | MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf != |
253 | MAX14577_REGULATOR_CURRENT_LIMIT_MAX); | 448 | MAX14577_REGULATOR_CURRENT_LIMIT_MAX); |
254 | BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX); | 449 | BUILD_BUG_ON(MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START + |
450 | MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf != | ||
451 | MAX77836_REGULATOR_CURRENT_LIMIT_MAX); | ||
452 | /* Valid charger current values must be provided for each chipset */ | ||
453 | BUILD_BUG_ON(ARRAY_SIZE(maxim_charger_currents) != MAXIM_DEVICE_TYPE_NUM); | ||
454 | |||
455 | BUILD_BUG_ON(ARRAY_SIZE(max14577_supported_regulators) != MAX14577_REGULATOR_NUM); | ||
456 | BUILD_BUG_ON(ARRAY_SIZE(max77836_supported_regulators) != MAX77836_REGULATOR_NUM); | ||
457 | |||
458 | BUILD_BUG_ON(MAX77836_REGULATOR_LDO_VOLTAGE_MIN + | ||
459 | (MAX77836_REGULATOR_LDO_VOLTAGE_STEP * | ||
460 | (MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM - 1)) != | ||
461 | MAX77836_REGULATOR_LDO_VOLTAGE_MAX); | ||
255 | 462 | ||
256 | return platform_driver_register(&max14577_regulator_driver); | 463 | return platform_driver_register(&max14577_regulator_driver); |
257 | } | 464 | } |
@@ -264,6 +471,6 @@ static void __exit max14577_regulator_exit(void) | |||
264 | module_exit(max14577_regulator_exit); | 471 | module_exit(max14577_regulator_exit); |
265 | 472 | ||
266 | MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>"); | 473 | MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>"); |
267 | MODULE_DESCRIPTION("MAXIM 14577 regulator driver"); | 474 | MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); |
268 | MODULE_LICENSE("GPL"); | 475 | MODULE_LICENSE("GPL"); |
269 | MODULE_ALIAS("platform:max14577-regulator"); | 476 | MODULE_ALIAS("platform:max14577-regulator"); |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index a673e5b6a2e0..e371f5af11f5 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -655,7 +655,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma) | |||
655 | 655 | ||
656 | if (mem->addr & ~PAGE_MASK) | 656 | if (mem->addr & ~PAGE_MASK) |
657 | return -ENODEV; | 657 | return -ENODEV; |
658 | if (vma->vm_end - vma->vm_start > mem->size) | 658 | if (vma->vm_end - vma->vm_start > PAGE_ALIGN(mem->size)) |
659 | return -EINVAL; | 659 | return -EINVAL; |
660 | 660 | ||
661 | vma->vm_ops = &uio_physical_vm_ops; | 661 | vma->vm_ops = &uio_physical_vm_ops; |
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index 1270f3b26139..8d0bba469566 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c | |||
@@ -204,7 +204,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) | |||
204 | ret = platform_get_irq(pdev, 0); | 204 | ret = platform_get_irq(pdev, 0); |
205 | if (ret < 0) { | 205 | if (ret < 0) { |
206 | dev_err(&pdev->dev, "failed to get IRQ\n"); | 206 | dev_err(&pdev->dev, "failed to get IRQ\n"); |
207 | goto bad0; | 207 | goto bad1; |
208 | } | 208 | } |
209 | uioinfo->irq = ret; | 209 | uioinfo->irq = ret; |
210 | } | 210 | } |
@@ -275,6 +275,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) | |||
275 | ret = uio_register_device(&pdev->dev, priv->uioinfo); | 275 | ret = uio_register_device(&pdev->dev, priv->uioinfo); |
276 | if (ret) { | 276 | if (ret) { |
277 | dev_err(&pdev->dev, "unable to register uio device\n"); | 277 | dev_err(&pdev->dev, "unable to register uio device\n"); |
278 | pm_runtime_disable(&pdev->dev); | ||
278 | goto bad1; | 279 | goto bad1; |
279 | } | 280 | } |
280 | 281 | ||
@@ -282,7 +283,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) | |||
282 | return 0; | 283 | return 0; |
283 | bad1: | 284 | bad1: |
284 | kfree(priv); | 285 | kfree(priv); |
285 | pm_runtime_disable(&pdev->dev); | ||
286 | bad0: | 286 | bad0: |
287 | /* kfree uioinfo for OF */ | 287 | /* kfree uioinfo for OF */ |
288 | if (pdev->dev.of_node) | 288 | if (pdev->dev.of_node) |
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index ff52618cafbe..5d7341520544 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
@@ -1078,6 +1078,8 @@ static void w1_search_process(struct w1_master *dev, u8 search_type) | |||
1078 | * w1_process_callbacks() - execute each dev->async_list callback entry | 1078 | * w1_process_callbacks() - execute each dev->async_list callback entry |
1079 | * @dev: w1_master device | 1079 | * @dev: w1_master device |
1080 | * | 1080 | * |
1081 | * The w1 master list_mutex must be held. | ||
1082 | * | ||
1081 | * Return: 1 if there were commands to executed 0 otherwise | 1083 | * Return: 1 if there were commands to executed 0 otherwise |
1082 | */ | 1084 | */ |
1083 | int w1_process_callbacks(struct w1_master *dev) | 1085 | int w1_process_callbacks(struct w1_master *dev) |
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 734dab7fc687..56a49ba41d83 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h | |||
@@ -203,7 +203,6 @@ enum w1_master_flags { | |||
203 | * @search_id: allows continuing a search | 203 | * @search_id: allows continuing a search |
204 | * @refcnt: reference count | 204 | * @refcnt: reference count |
205 | * @priv: private data storage | 205 | * @priv: private data storage |
206 | * @priv_size: size allocated | ||
207 | * @enable_pullup: allows a strong pullup | 206 | * @enable_pullup: allows a strong pullup |
208 | * @pullup_duration: time for the next strong pullup | 207 | * @pullup_duration: time for the next strong pullup |
209 | * @flags: one of w1_master_flags | 208 | * @flags: one of w1_master_flags |
@@ -214,7 +213,6 @@ enum w1_master_flags { | |||
214 | * @dev: sysfs device | 213 | * @dev: sysfs device |
215 | * @bus_master: io operations available | 214 | * @bus_master: io operations available |
216 | * @seq: sequence number used for netlink broadcasts | 215 | * @seq: sequence number used for netlink broadcasts |
217 | * @portid: destination for the current netlink command | ||
218 | */ | 216 | */ |
219 | struct w1_master | 217 | struct w1_master |
220 | { | 218 | { |
@@ -241,7 +239,6 @@ struct w1_master | |||
241 | atomic_t refcnt; | 239 | atomic_t refcnt; |
242 | 240 | ||
243 | void *priv; | 241 | void *priv; |
244 | int priv_size; | ||
245 | 242 | ||
246 | /** 5V strong pullup enabled flag, 1 enabled, zero disabled. */ | 243 | /** 5V strong pullup enabled flag, 1 enabled, zero disabled. */ |
247 | int enable_pullup; | 244 | int enable_pullup; |
@@ -260,11 +257,6 @@ struct w1_master | |||
260 | struct w1_bus_master *bus_master; | 257 | struct w1_bus_master *bus_master; |
261 | 258 | ||
262 | u32 seq; | 259 | u32 seq; |
263 | /* port id to send netlink responses to. The value is temporarily | ||
264 | * stored here while processing a message, set after locking the | ||
265 | * mutex, zero before unlocking the mutex. | ||
266 | */ | ||
267 | u32 portid; | ||
268 | }; | 260 | }; |
269 | 261 | ||
270 | /** | 262 | /** |
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index 9b084db739c7..728039d2efe1 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c | |||
@@ -219,9 +219,13 @@ void __w1_remove_master_device(struct w1_master *dev) | |||
219 | 219 | ||
220 | if (msleep_interruptible(1000)) | 220 | if (msleep_interruptible(1000)) |
221 | flush_signals(current); | 221 | flush_signals(current); |
222 | mutex_lock(&dev->list_mutex); | ||
222 | w1_process_callbacks(dev); | 223 | w1_process_callbacks(dev); |
224 | mutex_unlock(&dev->list_mutex); | ||
223 | } | 225 | } |
226 | mutex_lock(&dev->list_mutex); | ||
224 | w1_process_callbacks(dev); | 227 | w1_process_callbacks(dev); |
228 | mutex_unlock(&dev->list_mutex); | ||
225 | 229 | ||
226 | memset(&msg, 0, sizeof(msg)); | 230 | memset(&msg, 0, sizeof(msg)); |
227 | msg.id.mst.id = dev->id; | 231 | msg.id.mst.id = dev->id; |
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index a02704a59321..351a2978ba72 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c | |||
@@ -29,51 +29,247 @@ | |||
29 | #include "w1_netlink.h" | 29 | #include "w1_netlink.h" |
30 | 30 | ||
31 | #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE))) | 31 | #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE))) |
32 | void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) | 32 | |
33 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) | ||
34 | |||
35 | /* Bundle together everything required to process a request in one memory | ||
36 | * allocation. | ||
37 | */ | ||
38 | struct w1_cb_block { | ||
39 | atomic_t refcnt; | ||
40 | u32 portid; /* Sending process port ID */ | ||
41 | /* maximum value for first_cn->len */ | ||
42 | u16 maxlen; | ||
43 | /* pointers to building up the reply message */ | ||
44 | struct cn_msg *first_cn; /* fixed once the structure is populated */ | ||
45 | struct cn_msg *cn; /* advances as cn_msg is appeneded */ | ||
46 | struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */ | ||
47 | struct w1_netlink_cmd *cmd; /* advances as cmds are appened */ | ||
48 | struct w1_netlink_msg *cur_msg; /* currently message being processed */ | ||
49 | /* copy of the original request follows */ | ||
50 | struct cn_msg request_cn; | ||
51 | /* followed by variable length: | ||
52 | * cn_msg, data (w1_netlink_msg and w1_netlink_cmd) | ||
53 | * one or more struct w1_cb_node | ||
54 | * reply first_cn, data (w1_netlink_msg and w1_netlink_cmd) | ||
55 | */ | ||
56 | }; | ||
57 | struct w1_cb_node { | ||
58 | struct w1_async_cmd async; | ||
59 | /* pointers within w1_cb_block and cn data */ | ||
60 | struct w1_cb_block *block; | ||
61 | struct w1_netlink_msg *msg; | ||
62 | struct w1_slave *sl; | ||
63 | struct w1_master *dev; | ||
64 | }; | ||
65 | |||
66 | /** | ||
67 | * w1_reply_len() - calculate current reply length, compare to maxlen | ||
68 | * @block: block to calculate | ||
69 | * | ||
70 | * Calculates the current message length including possible multiple | ||
71 | * cn_msg and data, excludes the first sizeof(struct cn_msg). Direclty | ||
72 | * compariable to maxlen and usable to send the message. | ||
73 | */ | ||
74 | static u16 w1_reply_len(struct w1_cb_block *block) | ||
75 | { | ||
76 | if (!block->cn) | ||
77 | return 0; | ||
78 | return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len; | ||
79 | } | ||
80 | |||
81 | static void w1_unref_block(struct w1_cb_block *block) | ||
82 | { | ||
83 | if (atomic_sub_return(1, &block->refcnt) == 0) { | ||
84 | u16 len = w1_reply_len(block); | ||
85 | if (len) { | ||
86 | cn_netlink_send_mult(block->first_cn, len, | ||
87 | block->portid, 0, GFP_KERNEL); | ||
88 | } | ||
89 | kfree(block); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * w1_reply_make_space() - send message if needed to make space | ||
95 | * @block: block to make space on | ||
96 | * @space: how many bytes requested | ||
97 | * | ||
98 | * Verify there is enough room left for the caller to add "space" bytes to the | ||
99 | * message, if there isn't send the message and reset. | ||
100 | */ | ||
101 | static void w1_reply_make_space(struct w1_cb_block *block, u16 space) | ||
102 | { | ||
103 | u16 len = w1_reply_len(block); | ||
104 | if (len + space >= block->maxlen) { | ||
105 | cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL); | ||
106 | block->first_cn->len = 0; | ||
107 | block->cn = NULL; | ||
108 | block->msg = NULL; | ||
109 | block->cmd = NULL; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* Early send when replies aren't bundled. */ | ||
114 | static void w1_netlink_check_send(struct w1_cb_block *block) | ||
115 | { | ||
116 | if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn) | ||
117 | w1_reply_make_space(block, block->maxlen); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * w1_netlink_setup_msg() - prepare to write block->msg | ||
122 | * @block: block to operate on | ||
123 | * @ack: determines if cn can be reused | ||
124 | * | ||
125 | * block->cn will be setup with the correct ack, advancing if needed | ||
126 | * block->cn->len does not include space for block->msg | ||
127 | * block->msg advances but remains uninitialized | ||
128 | */ | ||
129 | static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack) | ||
130 | { | ||
131 | if (block->cn && block->cn->ack == ack) { | ||
132 | block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len); | ||
133 | } else { | ||
134 | /* advance or set to data */ | ||
135 | if (block->cn) | ||
136 | block->cn = (struct cn_msg *)(block->cn->data + | ||
137 | block->cn->len); | ||
138 | else | ||
139 | block->cn = block->first_cn; | ||
140 | |||
141 | memcpy(block->cn, &block->request_cn, sizeof(*block->cn)); | ||
142 | block->cn->len = 0; | ||
143 | block->cn->ack = ack; | ||
144 | block->msg = (struct w1_netlink_msg *)block->cn->data; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* Append cmd to msg, include cmd->data as well. This is because | ||
149 | * any following data goes with the command and in the case of a read is | ||
150 | * the results. | ||
151 | */ | ||
152 | static void w1_netlink_queue_cmd(struct w1_cb_block *block, | ||
153 | struct w1_netlink_cmd *cmd) | ||
154 | { | ||
155 | u32 space; | ||
156 | w1_reply_make_space(block, sizeof(struct cn_msg) + | ||
157 | sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len); | ||
158 | |||
159 | /* There's a status message sent after each command, so no point | ||
160 | * in trying to bundle this cmd after an existing one, because | ||
161 | * there won't be one. Allocate and copy over a new cn_msg. | ||
162 | */ | ||
163 | w1_netlink_setup_msg(block, block->request_cn.seq + 1); | ||
164 | memcpy(block->msg, block->cur_msg, sizeof(*block->msg)); | ||
165 | block->cn->len += sizeof(*block->msg); | ||
166 | block->msg->len = 0; | ||
167 | block->cmd = (struct w1_netlink_cmd *)(block->msg->data); | ||
168 | |||
169 | space = sizeof(*cmd) + cmd->len; | ||
170 | if (block->cmd != cmd) | ||
171 | memcpy(block->cmd, cmd, space); | ||
172 | block->cn->len += space; | ||
173 | block->msg->len += space; | ||
174 | } | ||
175 | |||
176 | /* Append req_msg and req_cmd, no other commands and no data from req_cmd are | ||
177 | * copied. | ||
178 | */ | ||
179 | static void w1_netlink_queue_status(struct w1_cb_block *block, | ||
180 | struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd, | ||
181 | int error) | ||
33 | { | 182 | { |
34 | char buf[sizeof(struct cn_msg) + sizeof(struct w1_netlink_msg)]; | 183 | u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd); |
35 | struct cn_msg *m = (struct cn_msg *)buf; | 184 | w1_reply_make_space(block, space); |
36 | struct w1_netlink_msg *w = (struct w1_netlink_msg *)(m+1); | 185 | w1_netlink_setup_msg(block, block->request_cn.ack); |
186 | |||
187 | memcpy(block->msg, req_msg, sizeof(*req_msg)); | ||
188 | block->cn->len += sizeof(*req_msg); | ||
189 | block->msg->len = 0; | ||
190 | block->msg->status = (u8)-error; | ||
191 | if (req_cmd) { | ||
192 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data; | ||
193 | memcpy(cmd, req_cmd, sizeof(*cmd)); | ||
194 | block->cn->len += sizeof(*cmd); | ||
195 | block->msg->len += sizeof(*cmd); | ||
196 | cmd->len = 0; | ||
197 | } | ||
198 | w1_netlink_check_send(block); | ||
199 | } | ||
37 | 200 | ||
38 | memset(buf, 0, sizeof(buf)); | 201 | /** |
202 | * w1_netlink_send_error() - sends the error message now | ||
203 | * @cn: original cn_msg | ||
204 | * @msg: original w1_netlink_msg | ||
205 | * @portid: where to send it | ||
206 | * @error: error status | ||
207 | * | ||
208 | * Use when a block isn't available to queue the message to and cn, msg | ||
209 | * might not be contiguous. | ||
210 | */ | ||
211 | static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg, | ||
212 | int portid, int error) | ||
213 | { | ||
214 | struct { | ||
215 | struct cn_msg cn; | ||
216 | struct w1_netlink_msg msg; | ||
217 | } packet; | ||
218 | memcpy(&packet.cn, cn, sizeof(packet.cn)); | ||
219 | memcpy(&packet.msg, msg, sizeof(packet.msg)); | ||
220 | packet.cn.len = sizeof(packet.msg); | ||
221 | packet.msg.len = 0; | ||
222 | packet.msg.status = (u8)-error; | ||
223 | cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL); | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * w1_netlink_send() - sends w1 netlink notifications | ||
228 | * @dev: w1_master the even is associated with or for | ||
229 | * @msg: w1_netlink_msg message to be sent | ||
230 | * | ||
231 | * This are notifications generated from the kernel. | ||
232 | */ | ||
233 | void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) | ||
234 | { | ||
235 | struct { | ||
236 | struct cn_msg cn; | ||
237 | struct w1_netlink_msg msg; | ||
238 | } packet; | ||
239 | memset(&packet, 0, sizeof(packet)); | ||
39 | 240 | ||
40 | m->id.idx = CN_W1_IDX; | 241 | packet.cn.id.idx = CN_W1_IDX; |
41 | m->id.val = CN_W1_VAL; | 242 | packet.cn.id.val = CN_W1_VAL; |
42 | 243 | ||
43 | m->seq = dev->seq++; | 244 | packet.cn.seq = dev->seq++; |
44 | m->len = sizeof(struct w1_netlink_msg); | 245 | packet.cn.len = sizeof(*msg); |
45 | 246 | ||
46 | memcpy(w, msg, sizeof(struct w1_netlink_msg)); | 247 | memcpy(&packet.msg, msg, sizeof(*msg)); |
248 | packet.msg.len = 0; | ||
47 | 249 | ||
48 | cn_netlink_send(m, dev->portid, 0, GFP_KERNEL); | 250 | cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL); |
49 | } | 251 | } |
50 | 252 | ||
51 | static void w1_send_slave(struct w1_master *dev, u64 rn) | 253 | static void w1_send_slave(struct w1_master *dev, u64 rn) |
52 | { | 254 | { |
53 | struct cn_msg *msg = dev->priv; | 255 | struct w1_cb_block *block = dev->priv; |
54 | struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); | 256 | struct w1_netlink_cmd *cache_cmd = block->cmd; |
55 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); | ||
56 | int avail; | ||
57 | u64 *data; | 257 | u64 *data; |
58 | 258 | ||
59 | avail = dev->priv_size - cmd->len; | 259 | w1_reply_make_space(block, sizeof(*data)); |
60 | 260 | ||
61 | if (avail < 8) { | 261 | /* Add cmd back if the packet was sent */ |
62 | msg->ack++; | 262 | if (!block->cmd) { |
63 | cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL); | 263 | cache_cmd->len = 0; |
64 | 264 | w1_netlink_queue_cmd(block, cache_cmd); | |
65 | msg->len = sizeof(struct w1_netlink_msg) + | ||
66 | sizeof(struct w1_netlink_cmd); | ||
67 | hdr->len = sizeof(struct w1_netlink_cmd); | ||
68 | cmd->len = 0; | ||
69 | } | 265 | } |
70 | 266 | ||
71 | data = (void *)(cmd + 1) + cmd->len; | 267 | data = (u64 *)(block->cmd->data + block->cmd->len); |
72 | 268 | ||
73 | *data = rn; | 269 | *data = rn; |
74 | cmd->len += 8; | 270 | block->cn->len += sizeof(*data); |
75 | hdr->len += 8; | 271 | block->msg->len += sizeof(*data); |
76 | msg->len += 8; | 272 | block->cmd->len += sizeof(*data); |
77 | } | 273 | } |
78 | 274 | ||
79 | static void w1_found_send_slave(struct w1_master *dev, u64 rn) | 275 | static void w1_found_send_slave(struct w1_master *dev, u64 rn) |
@@ -85,40 +281,15 @@ static void w1_found_send_slave(struct w1_master *dev, u64 rn) | |||
85 | } | 281 | } |
86 | 282 | ||
87 | /* Get the current slave list, or search (with or without alarm) */ | 283 | /* Get the current slave list, or search (with or without alarm) */ |
88 | static int w1_get_slaves(struct w1_master *dev, | 284 | static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd) |
89 | struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr, | ||
90 | struct w1_netlink_cmd *req_cmd) | ||
91 | { | 285 | { |
92 | struct cn_msg *msg; | ||
93 | struct w1_netlink_msg *hdr; | ||
94 | struct w1_netlink_cmd *cmd; | ||
95 | struct w1_slave *sl; | 286 | struct w1_slave *sl; |
96 | 287 | ||
97 | msg = kzalloc(PAGE_SIZE, GFP_KERNEL); | 288 | req_cmd->len = 0; |
98 | if (!msg) | 289 | w1_netlink_queue_cmd(dev->priv, req_cmd); |
99 | return -ENOMEM; | ||
100 | |||
101 | msg->id = req_msg->id; | ||
102 | msg->seq = req_msg->seq; | ||
103 | msg->ack = 0; | ||
104 | msg->len = sizeof(struct w1_netlink_msg) + | ||
105 | sizeof(struct w1_netlink_cmd); | ||
106 | |||
107 | hdr = (struct w1_netlink_msg *)(msg + 1); | ||
108 | cmd = (struct w1_netlink_cmd *)(hdr + 1); | ||
109 | |||
110 | hdr->type = W1_MASTER_CMD; | ||
111 | hdr->id = req_hdr->id; | ||
112 | hdr->len = sizeof(struct w1_netlink_cmd); | ||
113 | |||
114 | cmd->cmd = req_cmd->cmd; | ||
115 | cmd->len = 0; | ||
116 | |||
117 | dev->priv = msg; | ||
118 | dev->priv_size = PAGE_SIZE - msg->len - sizeof(struct cn_msg); | ||
119 | 290 | ||
120 | if (req_cmd->cmd == W1_CMD_LIST_SLAVES) { | 291 | if (req_cmd->cmd == W1_CMD_LIST_SLAVES) { |
121 | __u64 rn; | 292 | u64 rn; |
122 | mutex_lock(&dev->list_mutex); | 293 | mutex_lock(&dev->list_mutex); |
123 | list_for_each_entry(sl, &dev->slist, w1_slave_entry) { | 294 | list_for_each_entry(sl, &dev->slist, w1_slave_entry) { |
124 | memcpy(&rn, &sl->reg_num, sizeof(rn)); | 295 | memcpy(&rn, &sl->reg_num, sizeof(rn)); |
@@ -126,73 +297,26 @@ static int w1_get_slaves(struct w1_master *dev, | |||
126 | } | 297 | } |
127 | mutex_unlock(&dev->list_mutex); | 298 | mutex_unlock(&dev->list_mutex); |
128 | } else { | 299 | } else { |
129 | w1_search_process_cb(dev, cmd->cmd == W1_CMD_ALARM_SEARCH ? | 300 | w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ? |
130 | W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave); | 301 | W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave); |
131 | } | 302 | } |
132 | 303 | ||
133 | msg->ack = 0; | ||
134 | cn_netlink_send(msg, dev->portid, 0, GFP_KERNEL); | ||
135 | |||
136 | dev->priv = NULL; | ||
137 | dev->priv_size = 0; | ||
138 | |||
139 | kfree(msg); | ||
140 | |||
141 | return 0; | 304 | return 0; |
142 | } | 305 | } |
143 | 306 | ||
144 | static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr, | 307 | static int w1_process_command_io(struct w1_master *dev, |
145 | struct w1_netlink_cmd *cmd, u32 portid) | 308 | struct w1_netlink_cmd *cmd) |
146 | { | ||
147 | void *data; | ||
148 | struct w1_netlink_msg *h; | ||
149 | struct w1_netlink_cmd *c; | ||
150 | struct cn_msg *cm; | ||
151 | int err; | ||
152 | |||
153 | data = kzalloc(sizeof(struct cn_msg) + | ||
154 | sizeof(struct w1_netlink_msg) + | ||
155 | sizeof(struct w1_netlink_cmd) + | ||
156 | cmd->len, GFP_KERNEL); | ||
157 | if (!data) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | cm = (struct cn_msg *)(data); | ||
161 | h = (struct w1_netlink_msg *)(cm + 1); | ||
162 | c = (struct w1_netlink_cmd *)(h + 1); | ||
163 | |||
164 | memcpy(cm, msg, sizeof(struct cn_msg)); | ||
165 | memcpy(h, hdr, sizeof(struct w1_netlink_msg)); | ||
166 | memcpy(c, cmd, sizeof(struct w1_netlink_cmd)); | ||
167 | |||
168 | cm->ack = msg->seq+1; | ||
169 | cm->len = sizeof(struct w1_netlink_msg) + | ||
170 | sizeof(struct w1_netlink_cmd) + cmd->len; | ||
171 | |||
172 | h->len = sizeof(struct w1_netlink_cmd) + cmd->len; | ||
173 | |||
174 | memcpy(c->data, cmd->data, c->len); | ||
175 | |||
176 | err = cn_netlink_send(cm, portid, 0, GFP_KERNEL); | ||
177 | |||
178 | kfree(data); | ||
179 | |||
180 | return err; | ||
181 | } | ||
182 | |||
183 | static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg, | ||
184 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) | ||
185 | { | 309 | { |
186 | int err = 0; | 310 | int err = 0; |
187 | 311 | ||
188 | switch (cmd->cmd) { | 312 | switch (cmd->cmd) { |
189 | case W1_CMD_TOUCH: | 313 | case W1_CMD_TOUCH: |
190 | w1_touch_block(dev, cmd->data, cmd->len); | 314 | w1_touch_block(dev, cmd->data, cmd->len); |
191 | w1_send_read_reply(msg, hdr, cmd, dev->portid); | 315 | w1_netlink_queue_cmd(dev->priv, cmd); |
192 | break; | 316 | break; |
193 | case W1_CMD_READ: | 317 | case W1_CMD_READ: |
194 | w1_read_block(dev, cmd->data, cmd->len); | 318 | w1_read_block(dev, cmd->data, cmd->len); |
195 | w1_send_read_reply(msg, hdr, cmd, dev->portid); | 319 | w1_netlink_queue_cmd(dev->priv, cmd); |
196 | break; | 320 | break; |
197 | case W1_CMD_WRITE: | 321 | case W1_CMD_WRITE: |
198 | w1_write_block(dev, cmd->data, cmd->len); | 322 | w1_write_block(dev, cmd->data, cmd->len); |
@@ -206,14 +330,13 @@ static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg, | |||
206 | } | 330 | } |
207 | 331 | ||
208 | static int w1_process_command_addremove(struct w1_master *dev, | 332 | static int w1_process_command_addremove(struct w1_master *dev, |
209 | struct cn_msg *msg, struct w1_netlink_msg *hdr, | ||
210 | struct w1_netlink_cmd *cmd) | 333 | struct w1_netlink_cmd *cmd) |
211 | { | 334 | { |
212 | struct w1_slave *sl; | 335 | struct w1_slave *sl; |
213 | int err = 0; | 336 | int err = 0; |
214 | struct w1_reg_num *id; | 337 | struct w1_reg_num *id; |
215 | 338 | ||
216 | if (cmd->len != 8) | 339 | if (cmd->len != sizeof(*id)) |
217 | return -EINVAL; | 340 | return -EINVAL; |
218 | 341 | ||
219 | id = (struct w1_reg_num *)cmd->data; | 342 | id = (struct w1_reg_num *)cmd->data; |
@@ -241,7 +364,6 @@ static int w1_process_command_addremove(struct w1_master *dev, | |||
241 | } | 364 | } |
242 | 365 | ||
243 | static int w1_process_command_master(struct w1_master *dev, | 366 | static int w1_process_command_master(struct w1_master *dev, |
244 | struct cn_msg *req_msg, struct w1_netlink_msg *req_hdr, | ||
245 | struct w1_netlink_cmd *req_cmd) | 367 | struct w1_netlink_cmd *req_cmd) |
246 | { | 368 | { |
247 | int err = -EINVAL; | 369 | int err = -EINVAL; |
@@ -254,13 +376,13 @@ static int w1_process_command_master(struct w1_master *dev, | |||
254 | case W1_CMD_ALARM_SEARCH: | 376 | case W1_CMD_ALARM_SEARCH: |
255 | case W1_CMD_LIST_SLAVES: | 377 | case W1_CMD_LIST_SLAVES: |
256 | mutex_unlock(&dev->bus_mutex); | 378 | mutex_unlock(&dev->bus_mutex); |
257 | err = w1_get_slaves(dev, req_msg, req_hdr, req_cmd); | 379 | err = w1_get_slaves(dev, req_cmd); |
258 | mutex_lock(&dev->bus_mutex); | 380 | mutex_lock(&dev->bus_mutex); |
259 | break; | 381 | break; |
260 | case W1_CMD_READ: | 382 | case W1_CMD_READ: |
261 | case W1_CMD_WRITE: | 383 | case W1_CMD_WRITE: |
262 | case W1_CMD_TOUCH: | 384 | case W1_CMD_TOUCH: |
263 | err = w1_process_command_io(dev, req_msg, req_hdr, req_cmd); | 385 | err = w1_process_command_io(dev, req_cmd); |
264 | break; | 386 | break; |
265 | case W1_CMD_RESET: | 387 | case W1_CMD_RESET: |
266 | err = w1_reset_bus(dev); | 388 | err = w1_reset_bus(dev); |
@@ -269,8 +391,7 @@ static int w1_process_command_master(struct w1_master *dev, | |||
269 | case W1_CMD_SLAVE_REMOVE: | 391 | case W1_CMD_SLAVE_REMOVE: |
270 | mutex_unlock(&dev->bus_mutex); | 392 | mutex_unlock(&dev->bus_mutex); |
271 | mutex_lock(&dev->mutex); | 393 | mutex_lock(&dev->mutex); |
272 | err = w1_process_command_addremove(dev, req_msg, req_hdr, | 394 | err = w1_process_command_addremove(dev, req_cmd); |
273 | req_cmd); | ||
274 | mutex_unlock(&dev->mutex); | 395 | mutex_unlock(&dev->mutex); |
275 | mutex_lock(&dev->bus_mutex); | 396 | mutex_lock(&dev->bus_mutex); |
276 | break; | 397 | break; |
@@ -282,22 +403,21 @@ static int w1_process_command_master(struct w1_master *dev, | |||
282 | return err; | 403 | return err; |
283 | } | 404 | } |
284 | 405 | ||
285 | static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, | 406 | static int w1_process_command_slave(struct w1_slave *sl, |
286 | struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) | 407 | struct w1_netlink_cmd *cmd) |
287 | { | 408 | { |
288 | dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", | 409 | dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", |
289 | __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, | 410 | __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, |
290 | sl->reg_num.crc, cmd->cmd, cmd->len); | 411 | sl->reg_num.crc, cmd->cmd, cmd->len); |
291 | 412 | ||
292 | return w1_process_command_io(sl->master, msg, hdr, cmd); | 413 | return w1_process_command_io(sl->master, cmd); |
293 | } | 414 | } |
294 | 415 | ||
295 | static int w1_process_command_root(struct cn_msg *msg, | 416 | static int w1_process_command_root(struct cn_msg *req_cn, u32 portid) |
296 | struct w1_netlink_msg *mcmd, u32 portid) | ||
297 | { | 417 | { |
298 | struct w1_master *m; | 418 | struct w1_master *dev; |
299 | struct cn_msg *cn; | 419 | struct cn_msg *cn; |
300 | struct w1_netlink_msg *w; | 420 | struct w1_netlink_msg *msg; |
301 | u32 *id; | 421 | u32 *id; |
302 | 422 | ||
303 | cn = kmalloc(PAGE_SIZE, GFP_KERNEL); | 423 | cn = kmalloc(PAGE_SIZE, GFP_KERNEL); |
@@ -307,32 +427,30 @@ static int w1_process_command_root(struct cn_msg *msg, | |||
307 | cn->id.idx = CN_W1_IDX; | 427 | cn->id.idx = CN_W1_IDX; |
308 | cn->id.val = CN_W1_VAL; | 428 | cn->id.val = CN_W1_VAL; |
309 | 429 | ||
310 | cn->seq = msg->seq; | 430 | cn->seq = req_cn->seq; |
311 | cn->ack = 1; | 431 | cn->ack = req_cn->seq + 1; |
312 | cn->len = sizeof(struct w1_netlink_msg); | 432 | cn->len = sizeof(struct w1_netlink_msg); |
313 | w = (struct w1_netlink_msg *)(cn + 1); | 433 | msg = (struct w1_netlink_msg *)cn->data; |
314 | 434 | ||
315 | w->type = W1_LIST_MASTERS; | 435 | msg->type = W1_LIST_MASTERS; |
316 | w->status = 0; | 436 | msg->status = 0; |
317 | w->len = 0; | 437 | msg->len = 0; |
318 | id = (u32 *)(w + 1); | 438 | id = (u32 *)msg->data; |
319 | 439 | ||
320 | mutex_lock(&w1_mlock); | 440 | mutex_lock(&w1_mlock); |
321 | list_for_each_entry(m, &w1_masters, w1_master_entry) { | 441 | list_for_each_entry(dev, &w1_masters, w1_master_entry) { |
322 | if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { | 442 | if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { |
323 | cn_netlink_send(cn, portid, 0, GFP_KERNEL); | 443 | cn_netlink_send(cn, portid, 0, GFP_KERNEL); |
324 | cn->ack++; | ||
325 | cn->len = sizeof(struct w1_netlink_msg); | 444 | cn->len = sizeof(struct w1_netlink_msg); |
326 | w->len = 0; | 445 | msg->len = 0; |
327 | id = (u32 *)(w + 1); | 446 | id = (u32 *)msg->data; |
328 | } | 447 | } |
329 | 448 | ||
330 | *id = m->id; | 449 | *id = dev->id; |
331 | w->len += sizeof(*id); | 450 | msg->len += sizeof(*id); |
332 | cn->len += sizeof(*id); | 451 | cn->len += sizeof(*id); |
333 | id++; | 452 | id++; |
334 | } | 453 | } |
335 | cn->ack = 0; | ||
336 | cn_netlink_send(cn, portid, 0, GFP_KERNEL); | 454 | cn_netlink_send(cn, portid, 0, GFP_KERNEL); |
337 | mutex_unlock(&w1_mlock); | 455 | mutex_unlock(&w1_mlock); |
338 | 456 | ||
@@ -340,100 +458,44 @@ static int w1_process_command_root(struct cn_msg *msg, | |||
340 | return 0; | 458 | return 0; |
341 | } | 459 | } |
342 | 460 | ||
343 | static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg, | ||
344 | struct w1_netlink_cmd *rcmd, int portid, int error) | ||
345 | { | ||
346 | struct cn_msg *cmsg; | ||
347 | struct w1_netlink_msg *msg; | ||
348 | struct w1_netlink_cmd *cmd; | ||
349 | |||
350 | cmsg = kzalloc(sizeof(*msg) + sizeof(*cmd) + sizeof(*cmsg), GFP_KERNEL); | ||
351 | if (!cmsg) | ||
352 | return -ENOMEM; | ||
353 | |||
354 | msg = (struct w1_netlink_msg *)(cmsg + 1); | ||
355 | cmd = (struct w1_netlink_cmd *)(msg + 1); | ||
356 | |||
357 | memcpy(cmsg, rcmsg, sizeof(*cmsg)); | ||
358 | cmsg->len = sizeof(*msg); | ||
359 | |||
360 | memcpy(msg, rmsg, sizeof(*msg)); | ||
361 | msg->len = 0; | ||
362 | msg->status = (short)-error; | ||
363 | |||
364 | if (rcmd) { | ||
365 | memcpy(cmd, rcmd, sizeof(*cmd)); | ||
366 | cmd->len = 0; | ||
367 | msg->len += sizeof(*cmd); | ||
368 | cmsg->len += sizeof(*cmd); | ||
369 | } | ||
370 | |||
371 | error = cn_netlink_send(cmsg, portid, 0, GFP_KERNEL); | ||
372 | kfree(cmsg); | ||
373 | |||
374 | return error; | ||
375 | } | ||
376 | |||
377 | /* Bundle together a reference count, the full message, and broken out | ||
378 | * commands to be executed on each w1 master kthread in one memory allocation. | ||
379 | */ | ||
380 | struct w1_cb_block { | ||
381 | atomic_t refcnt; | ||
382 | u32 portid; /* Sending process port ID */ | ||
383 | struct cn_msg msg; | ||
384 | /* cn_msg data */ | ||
385 | /* one or more variable length struct w1_cb_node */ | ||
386 | }; | ||
387 | struct w1_cb_node { | ||
388 | struct w1_async_cmd async; | ||
389 | /* pointers within w1_cb_block and msg data */ | ||
390 | struct w1_cb_block *block; | ||
391 | struct w1_netlink_msg *m; | ||
392 | struct w1_slave *sl; | ||
393 | struct w1_master *dev; | ||
394 | }; | ||
395 | |||
396 | static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd) | 461 | static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd) |
397 | { | 462 | { |
398 | struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node, | 463 | struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node, |
399 | async); | 464 | async); |
400 | u16 mlen = node->m->len; | 465 | u16 mlen = node->msg->len; |
401 | u8 *cmd_data = node->m->data; | 466 | u16 len; |
402 | int err = 0; | 467 | int err = 0; |
403 | struct w1_slave *sl = node->sl; | 468 | struct w1_slave *sl = node->sl; |
404 | struct w1_netlink_cmd *cmd = NULL; | 469 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data; |
405 | 470 | ||
406 | mutex_lock(&dev->bus_mutex); | 471 | mutex_lock(&dev->bus_mutex); |
407 | dev->portid = node->block->portid; | 472 | dev->priv = node->block; |
408 | if (sl && w1_reset_select_slave(sl)) | 473 | if (sl && w1_reset_select_slave(sl)) |
409 | err = -ENODEV; | 474 | err = -ENODEV; |
475 | node->block->cur_msg = node->msg; | ||
410 | 476 | ||
411 | while (mlen && !err) { | 477 | while (mlen && !err) { |
412 | cmd = (struct w1_netlink_cmd *)cmd_data; | ||
413 | |||
414 | if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { | 478 | if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { |
415 | err = -E2BIG; | 479 | err = -E2BIG; |
416 | break; | 480 | break; |
417 | } | 481 | } |
418 | 482 | ||
419 | if (sl) | 483 | if (sl) |
420 | err = w1_process_command_slave(sl, &node->block->msg, | 484 | err = w1_process_command_slave(sl, cmd); |
421 | node->m, cmd); | ||
422 | else | 485 | else |
423 | err = w1_process_command_master(dev, &node->block->msg, | 486 | err = w1_process_command_master(dev, cmd); |
424 | node->m, cmd); | 487 | w1_netlink_check_send(node->block); |
425 | 488 | ||
426 | w1_netlink_send_error(&node->block->msg, node->m, cmd, | 489 | w1_netlink_queue_status(node->block, node->msg, cmd, err); |
427 | node->block->portid, err); | ||
428 | err = 0; | 490 | err = 0; |
429 | 491 | ||
430 | cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); | 492 | len = sizeof(*cmd) + cmd->len; |
431 | mlen -= cmd->len + sizeof(struct w1_netlink_cmd); | 493 | cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len); |
494 | mlen -= len; | ||
432 | } | 495 | } |
433 | 496 | ||
434 | if (!cmd || err) | 497 | if (!cmd || err) |
435 | w1_netlink_send_error(&node->block->msg, node->m, cmd, | 498 | w1_netlink_queue_status(node->block, node->msg, cmd, err); |
436 | node->block->portid, err); | ||
437 | 499 | ||
438 | /* ref taken in w1_search_slave or w1_search_master_id when building | 500 | /* ref taken in w1_search_slave or w1_search_master_id when building |
439 | * the block | 501 | * the block |
@@ -442,99 +504,186 @@ static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd) | |||
442 | w1_unref_slave(sl); | 504 | w1_unref_slave(sl); |
443 | else | 505 | else |
444 | atomic_dec(&dev->refcnt); | 506 | atomic_dec(&dev->refcnt); |
445 | dev->portid = 0; | 507 | dev->priv = NULL; |
446 | mutex_unlock(&dev->bus_mutex); | 508 | mutex_unlock(&dev->bus_mutex); |
447 | 509 | ||
448 | mutex_lock(&dev->list_mutex); | 510 | mutex_lock(&dev->list_mutex); |
449 | list_del(&async_cmd->async_entry); | 511 | list_del(&async_cmd->async_entry); |
450 | mutex_unlock(&dev->list_mutex); | 512 | mutex_unlock(&dev->list_mutex); |
451 | 513 | ||
452 | if (atomic_sub_return(1, &node->block->refcnt) == 0) | 514 | w1_unref_block(node->block); |
453 | kfree(node->block); | 515 | } |
516 | |||
517 | static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count, | ||
518 | u16 *slave_len) | ||
519 | { | ||
520 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data; | ||
521 | u16 mlen = msg->len; | ||
522 | u16 len; | ||
523 | int slave_list = 0; | ||
524 | while (mlen) { | ||
525 | if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) | ||
526 | break; | ||
527 | |||
528 | switch (cmd->cmd) { | ||
529 | case W1_CMD_SEARCH: | ||
530 | case W1_CMD_ALARM_SEARCH: | ||
531 | case W1_CMD_LIST_SLAVES: | ||
532 | ++slave_list; | ||
533 | } | ||
534 | ++*cmd_count; | ||
535 | len = sizeof(*cmd) + cmd->len; | ||
536 | cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len); | ||
537 | mlen -= len; | ||
538 | } | ||
539 | |||
540 | if (slave_list) { | ||
541 | struct w1_master *dev = w1_search_master_id(msg->id.mst.id); | ||
542 | if (dev) { | ||
543 | /* Bytes, and likely an overstimate, and if it isn't | ||
544 | * the results can still be split between packets. | ||
545 | */ | ||
546 | *slave_len += sizeof(struct w1_reg_num) * slave_list * | ||
547 | (dev->slave_count + dev->max_slave_count); | ||
548 | /* search incremented it */ | ||
549 | atomic_dec(&dev->refcnt); | ||
550 | } | ||
551 | } | ||
454 | } | 552 | } |
455 | 553 | ||
456 | static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) | 554 | static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp) |
457 | { | 555 | { |
458 | struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); | 556 | struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1); |
459 | struct w1_slave *sl; | 557 | struct w1_slave *sl; |
460 | struct w1_master *dev; | 558 | struct w1_master *dev; |
461 | u16 msg_len; | 559 | u16 msg_len; |
560 | u16 slave_len = 0; | ||
462 | int err = 0; | 561 | int err = 0; |
463 | struct w1_cb_block *block = NULL; | 562 | struct w1_cb_block *block = NULL; |
464 | struct w1_cb_node *node = NULL; | 563 | struct w1_cb_node *node = NULL; |
465 | int node_count = 0; | 564 | int node_count = 0; |
565 | int cmd_count = 0; | ||
566 | |||
567 | /* If any unknown flag is set let the application know, that way | ||
568 | * applications can detect the absence of features in kernels that | ||
569 | * don't know about them. http://lwn.net/Articles/587527/ | ||
570 | */ | ||
571 | if (cn->flags & ~(W1_CN_BUNDLE)) { | ||
572 | w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL); | ||
573 | return; | ||
574 | } | ||
466 | 575 | ||
467 | /* Count the number of master or slave commands there are to allocate | 576 | /* Count the number of master or slave commands there are to allocate |
468 | * space for one cb_node each. | 577 | * space for one cb_node each. |
469 | */ | 578 | */ |
470 | msg_len = msg->len; | 579 | msg_len = cn->len; |
471 | while (msg_len && !err) { | 580 | while (msg_len && !err) { |
472 | if (m->len + sizeof(struct w1_netlink_msg) > msg_len) { | 581 | if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { |
473 | err = -E2BIG; | 582 | err = -E2BIG; |
474 | break; | 583 | break; |
475 | } | 584 | } |
476 | 585 | ||
477 | if (m->type == W1_MASTER_CMD || m->type == W1_SLAVE_CMD) | 586 | /* count messages for nodes and allocate any additional space |
587 | * required for slave lists | ||
588 | */ | ||
589 | if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) { | ||
478 | ++node_count; | 590 | ++node_count; |
591 | w1_list_count_cmds(msg, &cmd_count, &slave_len); | ||
592 | } | ||
479 | 593 | ||
480 | msg_len -= sizeof(struct w1_netlink_msg) + m->len; | 594 | msg_len -= sizeof(struct w1_netlink_msg) + msg->len; |
481 | m = (struct w1_netlink_msg *)(((u8 *)m) + | 595 | msg = (struct w1_netlink_msg *)(((u8 *)msg) + |
482 | sizeof(struct w1_netlink_msg) + m->len); | 596 | sizeof(struct w1_netlink_msg) + msg->len); |
483 | } | 597 | } |
484 | m = (struct w1_netlink_msg *)(msg + 1); | 598 | msg = (struct w1_netlink_msg *)(cn + 1); |
485 | if (node_count) { | 599 | if (node_count) { |
486 | /* msg->len doesn't include itself */ | 600 | int size; |
487 | long size = sizeof(struct w1_cb_block) + msg->len + | 601 | u16 reply_size = sizeof(*cn) + cn->len + slave_len; |
488 | node_count*sizeof(struct w1_cb_node); | 602 | if (cn->flags & W1_CN_BUNDLE) { |
489 | block = kmalloc(size, GFP_KERNEL); | 603 | /* bundling duplicats some of the messages */ |
604 | reply_size += 2 * cmd_count * (sizeof(struct cn_msg) + | ||
605 | sizeof(struct w1_netlink_msg) + | ||
606 | sizeof(struct w1_netlink_cmd)); | ||
607 | } | ||
608 | reply_size = MIN(CONNECTOR_MAX_MSG_SIZE, reply_size); | ||
609 | |||
610 | /* allocate space for the block, a copy of the original message, | ||
611 | * one node per cmd to point into the original message, | ||
612 | * space for replies which is the original message size plus | ||
613 | * space for any list slave data and status messages | ||
614 | * cn->len doesn't include itself which is part of the block | ||
615 | * */ | ||
616 | size = /* block + original message */ | ||
617 | sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len + | ||
618 | /* space for nodes */ | ||
619 | node_count * sizeof(struct w1_cb_node) + | ||
620 | /* replies */ | ||
621 | sizeof(struct cn_msg) + reply_size; | ||
622 | block = kzalloc(size, GFP_KERNEL); | ||
490 | if (!block) { | 623 | if (!block) { |
491 | w1_netlink_send_error(msg, m, NULL, nsp->portid, | 624 | /* if the system is already out of memory, |
492 | -ENOMEM); | 625 | * (A) will this work, and (B) would it be better |
626 | * to not try? | ||
627 | */ | ||
628 | w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM); | ||
493 | return; | 629 | return; |
494 | } | 630 | } |
495 | atomic_set(&block->refcnt, 1); | 631 | atomic_set(&block->refcnt, 1); |
496 | block->portid = nsp->portid; | 632 | block->portid = nsp->portid; |
497 | memcpy(&block->msg, msg, sizeof(*msg) + msg->len); | 633 | memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len); |
498 | node = (struct w1_cb_node *)((u8 *)block->msg.data + msg->len); | 634 | node = (struct w1_cb_node *)(block->request_cn.data + cn->len); |
635 | |||
636 | /* Sneeky, when not bundling, reply_size is the allocated space | ||
637 | * required for the reply, cn_msg isn't part of maxlen so | ||
638 | * it should be reply_size - sizeof(struct cn_msg), however | ||
639 | * when checking if there is enough space, w1_reply_make_space | ||
640 | * is called with the full message size including cn_msg, | ||
641 | * because it isn't known at that time if an additional cn_msg | ||
642 | * will need to be allocated. So an extra cn_msg is added | ||
643 | * above in "size". | ||
644 | */ | ||
645 | block->maxlen = reply_size; | ||
646 | block->first_cn = (struct cn_msg *)(node + node_count); | ||
647 | memset(block->first_cn, 0, sizeof(*block->first_cn)); | ||
499 | } | 648 | } |
500 | 649 | ||
501 | msg_len = msg->len; | 650 | msg_len = cn->len; |
502 | while (msg_len && !err) { | 651 | while (msg_len && !err) { |
503 | 652 | ||
504 | dev = NULL; | 653 | dev = NULL; |
505 | sl = NULL; | 654 | sl = NULL; |
506 | 655 | ||
507 | if (m->len + sizeof(struct w1_netlink_msg) > msg_len) { | 656 | if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { |
508 | err = -E2BIG; | 657 | err = -E2BIG; |
509 | break; | 658 | break; |
510 | } | 659 | } |
511 | 660 | ||
512 | /* execute on this thread, no need to process later */ | 661 | /* execute on this thread, no need to process later */ |
513 | if (m->type == W1_LIST_MASTERS) { | 662 | if (msg->type == W1_LIST_MASTERS) { |
514 | err = w1_process_command_root(msg, m, nsp->portid); | 663 | err = w1_process_command_root(cn, nsp->portid); |
515 | goto out_cont; | 664 | goto out_cont; |
516 | } | 665 | } |
517 | 666 | ||
518 | /* All following message types require additional data, | 667 | /* All following message types require additional data, |
519 | * check here before references are taken. | 668 | * check here before references are taken. |
520 | */ | 669 | */ |
521 | if (!m->len) { | 670 | if (!msg->len) { |
522 | err = -EPROTO; | 671 | err = -EPROTO; |
523 | goto out_cont; | 672 | goto out_cont; |
524 | } | 673 | } |
525 | 674 | ||
526 | /* both search calls take reference counts */ | 675 | /* both search calls take references */ |
527 | if (m->type == W1_MASTER_CMD) { | 676 | if (msg->type == W1_MASTER_CMD) { |
528 | dev = w1_search_master_id(m->id.mst.id); | 677 | dev = w1_search_master_id(msg->id.mst.id); |
529 | } else if (m->type == W1_SLAVE_CMD) { | 678 | } else if (msg->type == W1_SLAVE_CMD) { |
530 | sl = w1_search_slave((struct w1_reg_num *)m->id.id); | 679 | sl = w1_search_slave((struct w1_reg_num *)msg->id.id); |
531 | if (sl) | 680 | if (sl) |
532 | dev = sl->master; | 681 | dev = sl->master; |
533 | } else { | 682 | } else { |
534 | printk(KERN_NOTICE | 683 | printk(KERN_NOTICE |
535 | "%s: msg: %x.%x, wrong type: %u, len: %u.\n", | 684 | "%s: cn: %x.%x, wrong type: %u, len: %u.\n", |
536 | __func__, msg->id.idx, msg->id.val, | 685 | __func__, cn->id.idx, cn->id.val, |
537 | m->type, m->len); | 686 | msg->type, msg->len); |
538 | err = -EPROTO; | 687 | err = -EPROTO; |
539 | goto out_cont; | 688 | goto out_cont; |
540 | } | 689 | } |
@@ -549,8 +698,8 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) | |||
549 | atomic_inc(&block->refcnt); | 698 | atomic_inc(&block->refcnt); |
550 | node->async.cb = w1_process_cb; | 699 | node->async.cb = w1_process_cb; |
551 | node->block = block; | 700 | node->block = block; |
552 | node->m = (struct w1_netlink_msg *)((u8 *)&block->msg + | 701 | node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn + |
553 | (size_t)((u8 *)m - (u8 *)msg)); | 702 | (size_t)((u8 *)msg - (u8 *)cn)); |
554 | node->sl = sl; | 703 | node->sl = sl; |
555 | node->dev = dev; | 704 | node->dev = dev; |
556 | 705 | ||
@@ -561,11 +710,15 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) | |||
561 | ++node; | 710 | ++node; |
562 | 711 | ||
563 | out_cont: | 712 | out_cont: |
713 | /* Can't queue because that modifies block and another | ||
714 | * thread could be processing the messages by now and | ||
715 | * there isn't a lock, send directly. | ||
716 | */ | ||
564 | if (err) | 717 | if (err) |
565 | w1_netlink_send_error(msg, m, NULL, nsp->portid, err); | 718 | w1_netlink_send_error(cn, msg, nsp->portid, err); |
566 | msg_len -= sizeof(struct w1_netlink_msg) + m->len; | 719 | msg_len -= sizeof(struct w1_netlink_msg) + msg->len; |
567 | m = (struct w1_netlink_msg *)(((u8 *)m) + | 720 | msg = (struct w1_netlink_msg *)(((u8 *)msg) + |
568 | sizeof(struct w1_netlink_msg) + m->len); | 721 | sizeof(struct w1_netlink_msg) + msg->len); |
569 | 722 | ||
570 | /* | 723 | /* |
571 | * Let's allow requests for nonexisting devices. | 724 | * Let's allow requests for nonexisting devices. |
@@ -573,8 +726,8 @@ out_cont: | |||
573 | if (err == -ENODEV) | 726 | if (err == -ENODEV) |
574 | err = 0; | 727 | err = 0; |
575 | } | 728 | } |
576 | if (block && atomic_sub_return(1, &block->refcnt) == 0) | 729 | if (block) |
577 | kfree(block); | 730 | w1_unref_block(block); |
578 | } | 731 | } |
579 | 732 | ||
580 | int w1_init_netlink(void) | 733 | int w1_init_netlink(void) |
@@ -591,7 +744,7 @@ void w1_fini_netlink(void) | |||
591 | cn_del_callback(&w1_id); | 744 | cn_del_callback(&w1_id); |
592 | } | 745 | } |
593 | #else | 746 | #else |
594 | void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) | 747 | void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn) |
595 | { | 748 | { |
596 | } | 749 | } |
597 | 750 | ||
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 1e9504e67650..c99a9ce05e62 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h | |||
@@ -28,6 +28,17 @@ | |||
28 | #include "w1.h" | 28 | #include "w1.h" |
29 | 29 | ||
30 | /** | 30 | /** |
31 | * enum w1_cn_msg_flags - bitfield flags for struct cn_msg.flags | ||
32 | * | ||
33 | * @W1_CN_BUNDLE: Request bundling replies into fewer messagse. Be prepared | ||
34 | * to handle multiple struct cn_msg, struct w1_netlink_msg, and | ||
35 | * struct w1_netlink_cmd in one packet. | ||
36 | */ | ||
37 | enum w1_cn_msg_flags { | ||
38 | W1_CN_BUNDLE = 1, | ||
39 | }; | ||
40 | |||
41 | /** | ||
31 | * enum w1_netlink_message_types - message type | 42 | * enum w1_netlink_message_types - message type |
32 | * | 43 | * |
33 | * @W1_SLAVE_ADD: notification that a slave device was added | 44 | * @W1_SLAVE_ADD: notification that a slave device was added |
@@ -49,6 +60,19 @@ enum w1_netlink_message_types { | |||
49 | W1_LIST_MASTERS, | 60 | W1_LIST_MASTERS, |
50 | }; | 61 | }; |
51 | 62 | ||
63 | /** | ||
64 | * struct w1_netlink_msg - holds w1 message type, id, and result | ||
65 | * | ||
66 | * @type: one of enum w1_netlink_message_types | ||
67 | * @status: kernel feedback for success 0 or errno failure value | ||
68 | * @len: length of data following w1_netlink_msg | ||
69 | * @id: union holding master bus id (msg.id) and slave device id (id[8]). | ||
70 | * @data: start address of any following data | ||
71 | * | ||
72 | * The base message structure for w1 messages over netlink. | ||
73 | * The netlink connector data sequence is, struct nlmsghdr, struct cn_msg, | ||
74 | * then one or more struct w1_netlink_msg (each with optional data). | ||
75 | */ | ||
52 | struct w1_netlink_msg | 76 | struct w1_netlink_msg |
53 | { | 77 | { |
54 | __u8 type; | 78 | __u8 type; |
@@ -66,6 +90,7 @@ struct w1_netlink_msg | |||
66 | 90 | ||
67 | /** | 91 | /** |
68 | * enum w1_commands - commands available for master or slave operations | 92 | * enum w1_commands - commands available for master or slave operations |
93 | * | ||
69 | * @W1_CMD_READ: read len bytes | 94 | * @W1_CMD_READ: read len bytes |
70 | * @W1_CMD_WRITE: write len bytes | 95 | * @W1_CMD_WRITE: write len bytes |
71 | * @W1_CMD_SEARCH: initiate a standard search, returns only the slave | 96 | * @W1_CMD_SEARCH: initiate a standard search, returns only the slave |
@@ -93,6 +118,17 @@ enum w1_commands { | |||
93 | W1_CMD_MAX | 118 | W1_CMD_MAX |
94 | }; | 119 | }; |
95 | 120 | ||
121 | /** | ||
122 | * struct w1_netlink_cmd - holds the command and data | ||
123 | * | ||
124 | * @cmd: one of enum w1_commands | ||
125 | * @res: reserved | ||
126 | * @len: length of data following w1_netlink_cmd | ||
127 | * @data: start address of any following data | ||
128 | * | ||
129 | * One or more struct w1_netlink_cmd is placed starting at w1_netlink_msg.data | ||
130 | * each with optional data. | ||
131 | */ | ||
96 | struct w1_netlink_cmd | 132 | struct w1_netlink_cmd |
97 | { | 133 | { |
98 | __u8 cmd; | 134 | __u8 cmd; |