diff options
Diffstat (limited to 'drivers')
263 files changed, 7421 insertions, 3250 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index d05d81b19b50..7183b6af5dac 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/ | |||
119 | obj-y += firmware/ | 119 | obj-y += firmware/ |
120 | obj-$(CONFIG_CRYPTO) += crypto/ | 120 | obj-$(CONFIG_CRYPTO) += crypto/ |
121 | obj-$(CONFIG_SUPERH) += sh/ | 121 | obj-$(CONFIG_SUPERH) += sh/ |
122 | obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ | 122 | obj-$(CONFIG_ARCH_SHMOBILE) += sh/ |
123 | ifndef CONFIG_ARCH_USES_GETTIMEOFFSET | 123 | ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
124 | obj-y += clocksource/ | 124 | obj-y += clocksource/ |
125 | endif | 125 | endif |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ab686b310100..a34a22841002 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -47,6 +47,23 @@ config ACPI_SLEEP | |||
47 | depends on SUSPEND || HIBERNATION | 47 | depends on SUSPEND || HIBERNATION |
48 | default y | 48 | default y |
49 | 49 | ||
50 | config ACPI_PROCFS_POWER | ||
51 | bool "Deprecated power /proc/acpi directories" | ||
52 | depends on PROC_FS | ||
53 | help | ||
54 | For backwards compatibility, this option allows | ||
55 | deprecated power /proc/acpi/ directories to exist, even when | ||
56 | they have been replaced by functions in /sys. | ||
57 | The deprecated directories (and their replacements) include: | ||
58 | /proc/acpi/battery/* (/sys/class/power_supply/*) | ||
59 | /proc/acpi/ac_adapter/* (sys/class/power_supply/*) | ||
60 | This option has no effect on /proc/acpi/ directories | ||
61 | and functions, which do not yet exist in /sys | ||
62 | This option, together with the proc directories, will be | ||
63 | deleted in the future. | ||
64 | |||
65 | Say N to delete power /proc/acpi/ directories that have moved to /sys/ | ||
66 | |||
50 | config ACPI_EC_DEBUGFS | 67 | config ACPI_EC_DEBUGFS |
51 | tristate "EC read/write access through /sys/kernel/debug/ec" | 68 | tristate "EC read/write access through /sys/kernel/debug/ec" |
52 | default n | 69 | default n |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 0331f91d56e6..ea55e0179f81 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -39,14 +39,16 @@ acpi-y += processor_core.o | |||
39 | acpi-y += ec.o | 39 | acpi-y += ec.o |
40 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 40 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
41 | acpi-y += pci_root.o pci_link.o pci_irq.o | 41 | acpi-y += pci_root.o pci_link.o pci_irq.o |
42 | acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o | 42 | acpi-y += acpi_lpss.o |
43 | acpi-y += acpi_platform.o | 43 | acpi-y += acpi_platform.o |
44 | acpi-y += acpi_pnp.o | ||
44 | acpi-y += power.o | 45 | acpi-y += power.o |
45 | acpi-y += event.o | 46 | acpi-y += event.o |
46 | acpi-y += sysfs.o | 47 | acpi-y += sysfs.o |
47 | acpi-$(CONFIG_X86) += acpi_cmos_rtc.o | 48 | acpi-$(CONFIG_X86) += acpi_cmos_rtc.o |
48 | acpi-$(CONFIG_DEBUG_FS) += debugfs.o | 49 | acpi-$(CONFIG_DEBUG_FS) += debugfs.o |
49 | acpi-$(CONFIG_ACPI_NUMA) += numa.o | 50 | acpi-$(CONFIG_ACPI_NUMA) += numa.o |
51 | acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o | ||
50 | ifdef CONFIG_ACPI_VIDEO | 52 | ifdef CONFIG_ACPI_VIDEO |
51 | acpi-y += video_detect.o | 53 | acpi-y += video_detect.o |
52 | endif | 54 | endif |
@@ -62,9 +64,9 @@ obj-$(CONFIG_ACPI_FAN) += fan.o | |||
62 | obj-$(CONFIG_ACPI_VIDEO) += video.o | 64 | obj-$(CONFIG_ACPI_VIDEO) += video.o |
63 | obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o | 65 | obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o |
64 | obj-$(CONFIG_ACPI_PROCESSOR) += processor.o | 66 | obj-$(CONFIG_ACPI_PROCESSOR) += processor.o |
65 | obj-$(CONFIG_ACPI_CONTAINER) += container.o | 67 | obj-y += container.o |
66 | obj-$(CONFIG_ACPI_THERMAL) += thermal.o | 68 | obj-$(CONFIG_ACPI_THERMAL) += thermal.o |
67 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o | 69 | obj-y += acpi_memhotplug.o |
68 | obj-$(CONFIG_ACPI_BATTERY) += battery.o | 70 | obj-$(CONFIG_ACPI_BATTERY) += battery.o |
69 | obj-$(CONFIG_ACPI_SBS) += sbshc.o | 71 | obj-$(CONFIG_ACPI_SBS) += sbshc.o |
70 | obj-$(CONFIG_ACPI_SBS) += sbs.o | 72 | obj-$(CONFIG_ACPI_SBS) += sbs.o |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 2c01c1da29ce..c67f6f5ad611 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh"); | |||
52 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); | 52 | MODULE_DESCRIPTION("ACPI AC Adapter Driver"); |
53 | MODULE_LICENSE("GPL"); | 53 | MODULE_LICENSE("GPL"); |
54 | 54 | ||
55 | static int acpi_ac_add(struct acpi_device *device); | ||
56 | static int acpi_ac_remove(struct acpi_device *device); | ||
57 | static void acpi_ac_notify(struct acpi_device *device, u32 event); | ||
58 | |||
59 | static const struct acpi_device_id ac_device_ids[] = { | ||
60 | {"ACPI0003", 0}, | ||
61 | {"", 0}, | ||
62 | }; | ||
63 | MODULE_DEVICE_TABLE(acpi, ac_device_ids); | ||
64 | |||
65 | #ifdef CONFIG_PM_SLEEP | ||
66 | static int acpi_ac_resume(struct device *dev); | ||
67 | #endif | ||
68 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); | ||
69 | |||
55 | static int ac_sleep_before_get_state_ms; | 70 | static int ac_sleep_before_get_state_ms; |
56 | 71 | ||
72 | static struct acpi_driver acpi_ac_driver = { | ||
73 | .name = "ac", | ||
74 | .class = ACPI_AC_CLASS, | ||
75 | .ids = ac_device_ids, | ||
76 | .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, | ||
77 | .ops = { | ||
78 | .add = acpi_ac_add, | ||
79 | .remove = acpi_ac_remove, | ||
80 | .notify = acpi_ac_notify, | ||
81 | }, | ||
82 | .drv.pm = &acpi_ac_pm, | ||
83 | }; | ||
84 | |||
57 | struct acpi_ac { | 85 | struct acpi_ac { |
58 | struct power_supply charger; | 86 | struct power_supply charger; |
59 | struct platform_device *pdev; | 87 | struct acpi_device * device; |
60 | unsigned long long state; | 88 | unsigned long long state; |
61 | struct notifier_block battery_nb; | 89 | struct notifier_block battery_nb; |
62 | }; | 90 | }; |
@@ -69,10 +97,12 @@ struct acpi_ac { | |||
69 | 97 | ||
70 | static int acpi_ac_get_state(struct acpi_ac *ac) | 98 | static int acpi_ac_get_state(struct acpi_ac *ac) |
71 | { | 99 | { |
72 | acpi_status status; | 100 | acpi_status status = AE_OK; |
73 | acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); | 101 | |
102 | if (!ac) | ||
103 | return -EINVAL; | ||
74 | 104 | ||
75 | status = acpi_evaluate_integer(handle, "_PSR", NULL, | 105 | status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, |
76 | &ac->state); | 106 | &ac->state); |
77 | if (ACPI_FAILURE(status)) { | 107 | if (ACPI_FAILURE(status)) { |
78 | ACPI_EXCEPTION((AE_INFO, status, | 108 | ACPI_EXCEPTION((AE_INFO, status, |
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = { | |||
117 | Driver Model | 147 | Driver Model |
118 | -------------------------------------------------------------------------- */ | 148 | -------------------------------------------------------------------------- */ |
119 | 149 | ||
120 | static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) | 150 | static void acpi_ac_notify(struct acpi_device *device, u32 event) |
121 | { | 151 | { |
122 | struct acpi_ac *ac = data; | 152 | struct acpi_ac *ac = acpi_driver_data(device); |
123 | struct acpi_device *adev; | ||
124 | 153 | ||
125 | if (!ac) | 154 | if (!ac) |
126 | return; | 155 | return; |
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) | |||
143 | msleep(ac_sleep_before_get_state_ms); | 172 | msleep(ac_sleep_before_get_state_ms); |
144 | 173 | ||
145 | acpi_ac_get_state(ac); | 174 | acpi_ac_get_state(ac); |
146 | adev = ACPI_COMPANION(&ac->pdev->dev); | 175 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
147 | acpi_bus_generate_netlink_event(adev->pnp.device_class, | 176 | dev_name(&device->dev), event, |
148 | dev_name(&ac->pdev->dev), | 177 | (u32) ac->state); |
149 | event, (u32) ac->state); | 178 | acpi_notifier_call_chain(device, event, (u32) ac->state); |
150 | acpi_notifier_call_chain(adev, event, (u32) ac->state); | ||
151 | kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); | 179 | kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); |
152 | } | 180 | } |
153 | 181 | ||
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = { | |||
192 | {}, | 220 | {}, |
193 | }; | 221 | }; |
194 | 222 | ||
195 | static int acpi_ac_probe(struct platform_device *pdev) | 223 | static int acpi_ac_add(struct acpi_device *device) |
196 | { | 224 | { |
197 | int result = 0; | 225 | int result = 0; |
198 | struct acpi_ac *ac = NULL; | 226 | struct acpi_ac *ac = NULL; |
199 | struct acpi_device *adev; | ||
200 | 227 | ||
201 | if (!pdev) | ||
202 | return -EINVAL; | ||
203 | 228 | ||
204 | adev = ACPI_COMPANION(&pdev->dev); | 229 | if (!device) |
205 | if (!adev) | 230 | return -EINVAL; |
206 | return -ENODEV; | ||
207 | 231 | ||
208 | ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); | 232 | ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); |
209 | if (!ac) | 233 | if (!ac) |
210 | return -ENOMEM; | 234 | return -ENOMEM; |
211 | 235 | ||
212 | strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); | 236 | ac->device = device; |
213 | strcpy(acpi_device_class(adev), ACPI_AC_CLASS); | 237 | strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); |
214 | ac->pdev = pdev; | 238 | strcpy(acpi_device_class(device), ACPI_AC_CLASS); |
215 | platform_set_drvdata(pdev, ac); | 239 | device->driver_data = ac; |
216 | 240 | ||
217 | result = acpi_ac_get_state(ac); | 241 | result = acpi_ac_get_state(ac); |
218 | if (result) | 242 | if (result) |
219 | goto end; | 243 | goto end; |
220 | 244 | ||
221 | ac->charger.name = acpi_device_bid(adev); | 245 | ac->charger.name = acpi_device_bid(device); |
222 | ac->charger.type = POWER_SUPPLY_TYPE_MAINS; | 246 | ac->charger.type = POWER_SUPPLY_TYPE_MAINS; |
223 | ac->charger.properties = ac_props; | 247 | ac->charger.properties = ac_props; |
224 | ac->charger.num_properties = ARRAY_SIZE(ac_props); | 248 | ac->charger.num_properties = ARRAY_SIZE(ac_props); |
225 | ac->charger.get_property = get_ac_property; | 249 | ac->charger.get_property = get_ac_property; |
226 | result = power_supply_register(&pdev->dev, &ac->charger); | 250 | result = power_supply_register(&ac->device->dev, &ac->charger); |
227 | if (result) | 251 | if (result) |
228 | goto end; | 252 | goto end; |
229 | 253 | ||
230 | result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), | ||
231 | ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); | ||
232 | if (result) { | ||
233 | power_supply_unregister(&ac->charger); | ||
234 | goto end; | ||
235 | } | ||
236 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", | 254 | printk(KERN_INFO PREFIX "%s [%s] (%s)\n", |
237 | acpi_device_name(adev), acpi_device_bid(adev), | 255 | acpi_device_name(device), acpi_device_bid(device), |
238 | ac->state ? "on-line" : "off-line"); | 256 | ac->state ? "on-line" : "off-line"); |
239 | 257 | ||
240 | ac->battery_nb.notifier_call = acpi_ac_battery_notify; | 258 | ac->battery_nb.notifier_call = acpi_ac_battery_notify; |
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev) | |||
256 | if (!dev) | 274 | if (!dev) |
257 | return -EINVAL; | 275 | return -EINVAL; |
258 | 276 | ||
259 | ac = platform_get_drvdata(to_platform_device(dev)); | 277 | ac = acpi_driver_data(to_acpi_device(dev)); |
260 | if (!ac) | 278 | if (!ac) |
261 | return -EINVAL; | 279 | return -EINVAL; |
262 | 280 | ||
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev) | |||
270 | #else | 288 | #else |
271 | #define acpi_ac_resume NULL | 289 | #define acpi_ac_resume NULL |
272 | #endif | 290 | #endif |
273 | static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); | ||
274 | 291 | ||
275 | static int acpi_ac_remove(struct platform_device *pdev) | 292 | static int acpi_ac_remove(struct acpi_device *device) |
276 | { | 293 | { |
277 | struct acpi_ac *ac; | 294 | struct acpi_ac *ac = NULL; |
295 | |||
278 | 296 | ||
279 | if (!pdev) | 297 | if (!device || !acpi_driver_data(device)) |
280 | return -EINVAL; | 298 | return -EINVAL; |
281 | 299 | ||
282 | acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), | 300 | ac = acpi_driver_data(device); |
283 | ACPI_ALL_NOTIFY, acpi_ac_notify_handler); | ||
284 | 301 | ||
285 | ac = platform_get_drvdata(pdev); | ||
286 | if (ac->charger.dev) | 302 | if (ac->charger.dev) |
287 | power_supply_unregister(&ac->charger); | 303 | power_supply_unregister(&ac->charger); |
288 | unregister_acpi_notifier(&ac->battery_nb); | 304 | unregister_acpi_notifier(&ac->battery_nb); |
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev) | |||
292 | return 0; | 308 | return 0; |
293 | } | 309 | } |
294 | 310 | ||
295 | static const struct acpi_device_id acpi_ac_match[] = { | ||
296 | { "ACPI0003", 0 }, | ||
297 | { } | ||
298 | }; | ||
299 | MODULE_DEVICE_TABLE(acpi, acpi_ac_match); | ||
300 | |||
301 | static struct platform_driver acpi_ac_driver = { | ||
302 | .probe = acpi_ac_probe, | ||
303 | .remove = acpi_ac_remove, | ||
304 | .driver = { | ||
305 | .name = "acpi-ac", | ||
306 | .owner = THIS_MODULE, | ||
307 | .pm = &acpi_ac_pm_ops, | ||
308 | .acpi_match_table = ACPI_PTR(acpi_ac_match), | ||
309 | }, | ||
310 | }; | ||
311 | |||
312 | static int __init acpi_ac_init(void) | 311 | static int __init acpi_ac_init(void) |
313 | { | 312 | { |
314 | int result; | 313 | int result; |
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void) | |||
316 | if (acpi_disabled) | 315 | if (acpi_disabled) |
317 | return -ENODEV; | 316 | return -ENODEV; |
318 | 317 | ||
319 | result = platform_driver_register(&acpi_ac_driver); | 318 | result = acpi_bus_register_driver(&acpi_ac_driver); |
320 | if (result < 0) | 319 | if (result < 0) |
321 | return -ENODEV; | 320 | return -ENODEV; |
322 | 321 | ||
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void) | |||
325 | 324 | ||
326 | static void __exit acpi_ac_exit(void) | 325 | static void __exit acpi_ac_exit(void) |
327 | { | 326 | { |
328 | platform_driver_unregister(&acpi_ac_driver); | 327 | acpi_bus_unregister_driver(&acpi_ac_driver); |
329 | } | 328 | } |
330 | module_init(acpi_ac_init); | 329 | module_init(acpi_ac_init); |
331 | module_exit(acpi_ac_exit); | 330 | module_exit(acpi_ac_exit); |
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c index 961b45d18a5d..2da8660262e5 100644 --- a/drivers/acpi/acpi_cmos_rtc.c +++ b/drivers/acpi/acpi_cmos_rtc.c | |||
@@ -68,7 +68,7 @@ static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev, | |||
68 | return -ENODEV; | 68 | return -ENODEV; |
69 | } | 69 | } |
70 | 70 | ||
71 | return 0; | 71 | return 1; |
72 | } | 72 | } |
73 | 73 | ||
74 | static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev) | 74 | static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev) |
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index c4a5d87ede7e..185334114d71 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c | |||
@@ -220,13 +220,13 @@ static int __init extlog_init(void) | |||
220 | goto err; | 220 | goto err; |
221 | } | 221 | } |
222 | 222 | ||
223 | extlog_l1_hdr = acpi_os_map_memory(l1_dirbase, l1_hdr_size); | 223 | extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size); |
224 | l1_head = (struct extlog_l1_head *)extlog_l1_hdr; | 224 | l1_head = (struct extlog_l1_head *)extlog_l1_hdr; |
225 | l1_size = l1_head->total_len; | 225 | l1_size = l1_head->total_len; |
226 | l1_percpu_entry = l1_head->entries; | 226 | l1_percpu_entry = l1_head->entries; |
227 | elog_base = l1_head->elog_base; | 227 | elog_base = l1_head->elog_base; |
228 | elog_size = l1_head->elog_len; | 228 | elog_size = l1_head->elog_len; |
229 | acpi_os_unmap_memory(extlog_l1_hdr, l1_hdr_size); | 229 | acpi_os_unmap_iomem(extlog_l1_hdr, l1_hdr_size); |
230 | release_mem_region(l1_dirbase, l1_hdr_size); | 230 | release_mem_region(l1_dirbase, l1_hdr_size); |
231 | 231 | ||
232 | /* remap L1 header again based on completed information */ | 232 | /* remap L1 header again based on completed information */ |
@@ -237,7 +237,7 @@ static int __init extlog_init(void) | |||
237 | (unsigned long long)l1_dirbase + l1_size); | 237 | (unsigned long long)l1_dirbase + l1_size); |
238 | goto err; | 238 | goto err; |
239 | } | 239 | } |
240 | extlog_l1_addr = acpi_os_map_memory(l1_dirbase, l1_size); | 240 | extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size); |
241 | l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size); | 241 | l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size); |
242 | 242 | ||
243 | /* remap elog table */ | 243 | /* remap elog table */ |
@@ -248,7 +248,7 @@ static int __init extlog_init(void) | |||
248 | (unsigned long long)elog_base + elog_size); | 248 | (unsigned long long)elog_base + elog_size); |
249 | goto err_release_l1_dir; | 249 | goto err_release_l1_dir; |
250 | } | 250 | } |
251 | elog_addr = acpi_os_map_memory(elog_base, elog_size); | 251 | elog_addr = acpi_os_map_iomem(elog_base, elog_size); |
252 | 252 | ||
253 | rc = -ENOMEM; | 253 | rc = -ENOMEM; |
254 | /* allocate buffer to save elog record */ | 254 | /* allocate buffer to save elog record */ |
@@ -270,11 +270,11 @@ static int __init extlog_init(void) | |||
270 | 270 | ||
271 | err_release_elog: | 271 | err_release_elog: |
272 | if (elog_addr) | 272 | if (elog_addr) |
273 | acpi_os_unmap_memory(elog_addr, elog_size); | 273 | acpi_os_unmap_iomem(elog_addr, elog_size); |
274 | release_mem_region(elog_base, elog_size); | 274 | release_mem_region(elog_base, elog_size); |
275 | err_release_l1_dir: | 275 | err_release_l1_dir: |
276 | if (extlog_l1_addr) | 276 | if (extlog_l1_addr) |
277 | acpi_os_unmap_memory(extlog_l1_addr, l1_size); | 277 | acpi_os_unmap_iomem(extlog_l1_addr, l1_size); |
278 | release_mem_region(l1_dirbase, l1_size); | 278 | release_mem_region(l1_dirbase, l1_size); |
279 | err: | 279 | err: |
280 | pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n"); | 280 | pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n"); |
@@ -287,9 +287,9 @@ static void __exit extlog_exit(void) | |||
287 | mce_unregister_decode_chain(&extlog_mce_dec); | 287 | mce_unregister_decode_chain(&extlog_mce_dec); |
288 | ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; | 288 | ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; |
289 | if (extlog_l1_addr) | 289 | if (extlog_l1_addr) |
290 | acpi_os_unmap_memory(extlog_l1_addr, l1_size); | 290 | acpi_os_unmap_iomem(extlog_l1_addr, l1_size); |
291 | if (elog_addr) | 291 | if (elog_addr) |
292 | acpi_os_unmap_memory(elog_addr, elog_size); | 292 | acpi_os_unmap_iomem(elog_addr, elog_size); |
293 | release_mem_region(elog_base, elog_size); | 293 | release_mem_region(elog_base, elog_size); |
294 | release_mem_region(l1_dirbase, l1_size); | 294 | release_mem_region(l1_dirbase, l1_size); |
295 | kfree(elog_buf); | 295 | kfree(elog_buf); |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 69e29f409d4c..51069b260518 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -19,15 +19,21 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/platform_data/clk-lpss.h> | 20 | #include <linux/platform_data/clk-lpss.h> |
21 | #include <linux/pm_runtime.h> | 21 | #include <linux/pm_runtime.h> |
22 | #include <linux/delay.h> | ||
22 | 23 | ||
23 | #include "internal.h" | 24 | #include "internal.h" |
24 | 25 | ||
25 | ACPI_MODULE_NAME("acpi_lpss"); | 26 | ACPI_MODULE_NAME("acpi_lpss"); |
26 | 27 | ||
28 | #ifdef CONFIG_X86_INTEL_LPSS | ||
29 | |||
30 | #define LPSS_ADDR(desc) ((unsigned long)&desc) | ||
31 | |||
27 | #define LPSS_CLK_SIZE 0x04 | 32 | #define LPSS_CLK_SIZE 0x04 |
28 | #define LPSS_LTR_SIZE 0x18 | 33 | #define LPSS_LTR_SIZE 0x18 |
29 | 34 | ||
30 | /* Offsets relative to LPSS_PRIVATE_OFFSET */ | 35 | /* Offsets relative to LPSS_PRIVATE_OFFSET */ |
36 | #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) | ||
31 | #define LPSS_GENERAL 0x08 | 37 | #define LPSS_GENERAL 0x08 |
32 | #define LPSS_GENERAL_LTR_MODE_SW BIT(2) | 38 | #define LPSS_GENERAL_LTR_MODE_SW BIT(2) |
33 | #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) | 39 | #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) |
@@ -43,6 +49,8 @@ ACPI_MODULE_NAME("acpi_lpss"); | |||
43 | #define LPSS_TX_INT 0x20 | 49 | #define LPSS_TX_INT 0x20 |
44 | #define LPSS_TX_INT_MASK BIT(1) | 50 | #define LPSS_TX_INT_MASK BIT(1) |
45 | 51 | ||
52 | #define LPSS_PRV_REG_COUNT 9 | ||
53 | |||
46 | struct lpss_shared_clock { | 54 | struct lpss_shared_clock { |
47 | const char *name; | 55 | const char *name; |
48 | unsigned long rate; | 56 | unsigned long rate; |
@@ -57,7 +65,9 @@ struct lpss_device_desc { | |||
57 | bool ltr_required; | 65 | bool ltr_required; |
58 | unsigned int prv_offset; | 66 | unsigned int prv_offset; |
59 | size_t prv_size_override; | 67 | size_t prv_size_override; |
68 | bool clk_divider; | ||
60 | bool clk_gate; | 69 | bool clk_gate; |
70 | bool save_ctx; | ||
61 | struct lpss_shared_clock *shared_clock; | 71 | struct lpss_shared_clock *shared_clock; |
62 | void (*setup)(struct lpss_private_data *pdata); | 72 | void (*setup)(struct lpss_private_data *pdata); |
63 | }; | 73 | }; |
@@ -72,6 +82,7 @@ struct lpss_private_data { | |||
72 | resource_size_t mmio_size; | 82 | resource_size_t mmio_size; |
73 | struct clk *clk; | 83 | struct clk *clk; |
74 | const struct lpss_device_desc *dev_desc; | 84 | const struct lpss_device_desc *dev_desc; |
85 | u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; | ||
75 | }; | 86 | }; |
76 | 87 | ||
77 | static void lpss_uart_setup(struct lpss_private_data *pdata) | 88 | static void lpss_uart_setup(struct lpss_private_data *pdata) |
@@ -92,6 +103,14 @@ static struct lpss_device_desc lpt_dev_desc = { | |||
92 | .clk_required = true, | 103 | .clk_required = true, |
93 | .prv_offset = 0x800, | 104 | .prv_offset = 0x800, |
94 | .ltr_required = true, | 105 | .ltr_required = true, |
106 | .clk_divider = true, | ||
107 | .clk_gate = true, | ||
108 | }; | ||
109 | |||
110 | static struct lpss_device_desc lpt_i2c_dev_desc = { | ||
111 | .clk_required = true, | ||
112 | .prv_offset = 0x800, | ||
113 | .ltr_required = true, | ||
95 | .clk_gate = true, | 114 | .clk_gate = true, |
96 | }; | 115 | }; |
97 | 116 | ||
@@ -99,6 +118,7 @@ static struct lpss_device_desc lpt_uart_dev_desc = { | |||
99 | .clk_required = true, | 118 | .clk_required = true, |
100 | .prv_offset = 0x800, | 119 | .prv_offset = 0x800, |
101 | .ltr_required = true, | 120 | .ltr_required = true, |
121 | .clk_divider = true, | ||
102 | .clk_gate = true, | 122 | .clk_gate = true, |
103 | .setup = lpss_uart_setup, | 123 | .setup = lpss_uart_setup, |
104 | }; | 124 | }; |
@@ -116,32 +136,25 @@ static struct lpss_shared_clock pwm_clock = { | |||
116 | 136 | ||
117 | static struct lpss_device_desc byt_pwm_dev_desc = { | 137 | static struct lpss_device_desc byt_pwm_dev_desc = { |
118 | .clk_required = true, | 138 | .clk_required = true, |
139 | .save_ctx = true, | ||
119 | .shared_clock = &pwm_clock, | 140 | .shared_clock = &pwm_clock, |
120 | }; | 141 | }; |
121 | 142 | ||
122 | static struct lpss_shared_clock uart_clock = { | ||
123 | .name = "uart_clk", | ||
124 | .rate = 44236800, | ||
125 | }; | ||
126 | |||
127 | static struct lpss_device_desc byt_uart_dev_desc = { | 143 | static struct lpss_device_desc byt_uart_dev_desc = { |
128 | .clk_required = true, | 144 | .clk_required = true, |
129 | .prv_offset = 0x800, | 145 | .prv_offset = 0x800, |
146 | .clk_divider = true, | ||
130 | .clk_gate = true, | 147 | .clk_gate = true, |
131 | .shared_clock = &uart_clock, | 148 | .save_ctx = true, |
132 | .setup = lpss_uart_setup, | 149 | .setup = lpss_uart_setup, |
133 | }; | 150 | }; |
134 | 151 | ||
135 | static struct lpss_shared_clock spi_clock = { | ||
136 | .name = "spi_clk", | ||
137 | .rate = 50000000, | ||
138 | }; | ||
139 | |||
140 | static struct lpss_device_desc byt_spi_dev_desc = { | 152 | static struct lpss_device_desc byt_spi_dev_desc = { |
141 | .clk_required = true, | 153 | .clk_required = true, |
142 | .prv_offset = 0x400, | 154 | .prv_offset = 0x400, |
155 | .clk_divider = true, | ||
143 | .clk_gate = true, | 156 | .clk_gate = true, |
144 | .shared_clock = &spi_clock, | 157 | .save_ctx = true, |
145 | }; | 158 | }; |
146 | 159 | ||
147 | static struct lpss_device_desc byt_sdio_dev_desc = { | 160 | static struct lpss_device_desc byt_sdio_dev_desc = { |
@@ -156,43 +169,52 @@ static struct lpss_shared_clock i2c_clock = { | |||
156 | static struct lpss_device_desc byt_i2c_dev_desc = { | 169 | static struct lpss_device_desc byt_i2c_dev_desc = { |
157 | .clk_required = true, | 170 | .clk_required = true, |
158 | .prv_offset = 0x800, | 171 | .prv_offset = 0x800, |
172 | .save_ctx = true, | ||
159 | .shared_clock = &i2c_clock, | 173 | .shared_clock = &i2c_clock, |
160 | }; | 174 | }; |
161 | 175 | ||
176 | #else | ||
177 | |||
178 | #define LPSS_ADDR(desc) (0UL) | ||
179 | |||
180 | #endif /* CONFIG_X86_INTEL_LPSS */ | ||
181 | |||
162 | static const struct acpi_device_id acpi_lpss_device_ids[] = { | 182 | static const struct acpi_device_id acpi_lpss_device_ids[] = { |
163 | /* Generic LPSS devices */ | 183 | /* Generic LPSS devices */ |
164 | { "INTL9C60", (unsigned long)&lpss_dma_desc }, | 184 | { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, |
165 | 185 | ||
166 | /* Lynxpoint LPSS devices */ | 186 | /* Lynxpoint LPSS devices */ |
167 | { "INT33C0", (unsigned long)&lpt_dev_desc }, | 187 | { "INT33C0", LPSS_ADDR(lpt_dev_desc) }, |
168 | { "INT33C1", (unsigned long)&lpt_dev_desc }, | 188 | { "INT33C1", LPSS_ADDR(lpt_dev_desc) }, |
169 | { "INT33C2", (unsigned long)&lpt_dev_desc }, | 189 | { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, |
170 | { "INT33C3", (unsigned long)&lpt_dev_desc }, | 190 | { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, |
171 | { "INT33C4", (unsigned long)&lpt_uart_dev_desc }, | 191 | { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, |
172 | { "INT33C5", (unsigned long)&lpt_uart_dev_desc }, | 192 | { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) }, |
173 | { "INT33C6", (unsigned long)&lpt_sdio_dev_desc }, | 193 | { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) }, |
174 | { "INT33C7", }, | 194 | { "INT33C7", }, |
175 | 195 | ||
176 | /* BayTrail LPSS devices */ | 196 | /* BayTrail LPSS devices */ |
177 | { "80860F09", (unsigned long)&byt_pwm_dev_desc }, | 197 | { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) }, |
178 | { "80860F0A", (unsigned long)&byt_uart_dev_desc }, | 198 | { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) }, |
179 | { "80860F0E", (unsigned long)&byt_spi_dev_desc }, | 199 | { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) }, |
180 | { "80860F14", (unsigned long)&byt_sdio_dev_desc }, | 200 | { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) }, |
181 | { "80860F41", (unsigned long)&byt_i2c_dev_desc }, | 201 | { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, |
182 | { "INT33B2", }, | 202 | { "INT33B2", }, |
183 | 203 | ||
184 | { "INT3430", (unsigned long)&lpt_dev_desc }, | 204 | { "INT3430", LPSS_ADDR(lpt_dev_desc) }, |
185 | { "INT3431", (unsigned long)&lpt_dev_desc }, | 205 | { "INT3431", LPSS_ADDR(lpt_dev_desc) }, |
186 | { "INT3432", (unsigned long)&lpt_dev_desc }, | 206 | { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, |
187 | { "INT3433", (unsigned long)&lpt_dev_desc }, | 207 | { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, |
188 | { "INT3434", (unsigned long)&lpt_uart_dev_desc }, | 208 | { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, |
189 | { "INT3435", (unsigned long)&lpt_uart_dev_desc }, | 209 | { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, |
190 | { "INT3436", (unsigned long)&lpt_sdio_dev_desc }, | 210 | { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, |
191 | { "INT3437", }, | 211 | { "INT3437", }, |
192 | 212 | ||
193 | { } | 213 | { } |
194 | }; | 214 | }; |
195 | 215 | ||
216 | #ifdef CONFIG_X86_INTEL_LPSS | ||
217 | |||
196 | static int is_memory(struct acpi_resource *res, void *not_used) | 218 | static int is_memory(struct acpi_resource *res, void *not_used) |
197 | { | 219 | { |
198 | struct resource r; | 220 | struct resource r; |
@@ -212,9 +234,11 @@ static int register_device_clock(struct acpi_device *adev, | |||
212 | { | 234 | { |
213 | const struct lpss_device_desc *dev_desc = pdata->dev_desc; | 235 | const struct lpss_device_desc *dev_desc = pdata->dev_desc; |
214 | struct lpss_shared_clock *shared_clock = dev_desc->shared_clock; | 236 | struct lpss_shared_clock *shared_clock = dev_desc->shared_clock; |
237 | const char *devname = dev_name(&adev->dev); | ||
215 | struct clk *clk = ERR_PTR(-ENODEV); | 238 | struct clk *clk = ERR_PTR(-ENODEV); |
216 | struct lpss_clk_data *clk_data; | 239 | struct lpss_clk_data *clk_data; |
217 | const char *parent; | 240 | const char *parent, *clk_name; |
241 | void __iomem *prv_base; | ||
218 | 242 | ||
219 | if (!lpss_clk_dev) | 243 | if (!lpss_clk_dev) |
220 | lpt_register_clock_device(); | 244 | lpt_register_clock_device(); |
@@ -225,7 +249,7 @@ static int register_device_clock(struct acpi_device *adev, | |||
225 | 249 | ||
226 | if (dev_desc->clkdev_name) { | 250 | if (dev_desc->clkdev_name) { |
227 | clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, | 251 | clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, |
228 | dev_name(&adev->dev)); | 252 | devname); |
229 | return 0; | 253 | return 0; |
230 | } | 254 | } |
231 | 255 | ||
@@ -234,6 +258,7 @@ static int register_device_clock(struct acpi_device *adev, | |||
234 | return -ENODATA; | 258 | return -ENODATA; |
235 | 259 | ||
236 | parent = clk_data->name; | 260 | parent = clk_data->name; |
261 | prv_base = pdata->mmio_base + dev_desc->prv_offset; | ||
237 | 262 | ||
238 | if (shared_clock) { | 263 | if (shared_clock) { |
239 | clk = shared_clock->clk; | 264 | clk = shared_clock->clk; |
@@ -247,16 +272,41 @@ static int register_device_clock(struct acpi_device *adev, | |||
247 | } | 272 | } |
248 | 273 | ||
249 | if (dev_desc->clk_gate) { | 274 | if (dev_desc->clk_gate) { |
250 | clk = clk_register_gate(NULL, dev_name(&adev->dev), parent, 0, | 275 | clk = clk_register_gate(NULL, devname, parent, 0, |
251 | pdata->mmio_base + dev_desc->prv_offset, | 276 | prv_base, 0, 0, NULL); |
252 | 0, 0, NULL); | 277 | parent = devname; |
253 | pdata->clk = clk; | 278 | } |
279 | |||
280 | if (dev_desc->clk_divider) { | ||
281 | /* Prevent division by zero */ | ||
282 | if (!readl(prv_base)) | ||
283 | writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); | ||
284 | |||
285 | clk_name = kasprintf(GFP_KERNEL, "%s-div", devname); | ||
286 | if (!clk_name) | ||
287 | return -ENOMEM; | ||
288 | clk = clk_register_fractional_divider(NULL, clk_name, parent, | ||
289 | 0, prv_base, | ||
290 | 1, 15, 16, 15, 0, NULL); | ||
291 | parent = clk_name; | ||
292 | |||
293 | clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); | ||
294 | if (!clk_name) { | ||
295 | kfree(parent); | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | clk = clk_register_gate(NULL, clk_name, parent, | ||
299 | CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, | ||
300 | prv_base, 31, 0, NULL); | ||
301 | kfree(parent); | ||
302 | kfree(clk_name); | ||
254 | } | 303 | } |
255 | 304 | ||
256 | if (IS_ERR(clk)) | 305 | if (IS_ERR(clk)) |
257 | return PTR_ERR(clk); | 306 | return PTR_ERR(clk); |
258 | 307 | ||
259 | clk_register_clkdev(clk, NULL, dev_name(&adev->dev)); | 308 | pdata->clk = clk; |
309 | clk_register_clkdev(clk, NULL, devname); | ||
260 | return 0; | 310 | return 0; |
261 | } | 311 | } |
262 | 312 | ||
@@ -267,12 +317,14 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
267 | struct lpss_private_data *pdata; | 317 | struct lpss_private_data *pdata; |
268 | struct resource_list_entry *rentry; | 318 | struct resource_list_entry *rentry; |
269 | struct list_head resource_list; | 319 | struct list_head resource_list; |
320 | struct platform_device *pdev; | ||
270 | int ret; | 321 | int ret; |
271 | 322 | ||
272 | dev_desc = (struct lpss_device_desc *)id->driver_data; | 323 | dev_desc = (struct lpss_device_desc *)id->driver_data; |
273 | if (!dev_desc) | 324 | if (!dev_desc) { |
274 | return acpi_create_platform_device(adev, id); | 325 | pdev = acpi_create_platform_device(adev); |
275 | 326 | return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; | |
327 | } | ||
276 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | 328 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); |
277 | if (!pdata) | 329 | if (!pdata) |
278 | return -ENOMEM; | 330 | return -ENOMEM; |
@@ -322,10 +374,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
322 | dev_desc->setup(pdata); | 374 | dev_desc->setup(pdata); |
323 | 375 | ||
324 | adev->driver_data = pdata; | 376 | adev->driver_data = pdata; |
325 | ret = acpi_create_platform_device(adev, id); | 377 | pdev = acpi_create_platform_device(adev); |
326 | if (ret > 0) | 378 | if (!IS_ERR_OR_NULL(pdev)) { |
327 | return ret; | 379 | device_enable_async_suspend(&pdev->dev); |
380 | return 1; | ||
381 | } | ||
328 | 382 | ||
383 | ret = PTR_ERR(pdev); | ||
329 | adev->driver_data = NULL; | 384 | adev->driver_data = NULL; |
330 | 385 | ||
331 | err_out: | 386 | err_out: |
@@ -449,6 +504,126 @@ static void acpi_lpss_set_ltr(struct device *dev, s32 val) | |||
449 | } | 504 | } |
450 | } | 505 | } |
451 | 506 | ||
507 | #ifdef CONFIG_PM | ||
508 | /** | ||
509 | * acpi_lpss_save_ctx() - Save the private registers of LPSS device | ||
510 | * @dev: LPSS device | ||
511 | * | ||
512 | * Most LPSS devices have private registers which may loose their context when | ||
513 | * the device is powered down. acpi_lpss_save_ctx() saves those registers into | ||
514 | * prv_reg_ctx array. | ||
515 | */ | ||
516 | static void acpi_lpss_save_ctx(struct device *dev) | ||
517 | { | ||
518 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); | ||
519 | unsigned int i; | ||
520 | |||
521 | for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { | ||
522 | unsigned long offset = i * sizeof(u32); | ||
523 | |||
524 | pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset); | ||
525 | dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n", | ||
526 | pdata->prv_reg_ctx[i], offset); | ||
527 | } | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device | ||
532 | * @dev: LPSS device | ||
533 | * | ||
534 | * Restores the registers that were previously stored with acpi_lpss_save_ctx(). | ||
535 | */ | ||
536 | static void acpi_lpss_restore_ctx(struct device *dev) | ||
537 | { | ||
538 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); | ||
539 | unsigned int i; | ||
540 | |||
541 | /* | ||
542 | * The following delay is needed or the subsequent write operations may | ||
543 | * fail. The LPSS devices are actually PCI devices and the PCI spec | ||
544 | * expects 10ms delay before the device can be accessed after D3 to D0 | ||
545 | * transition. | ||
546 | */ | ||
547 | msleep(10); | ||
548 | |||
549 | for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { | ||
550 | unsigned long offset = i * sizeof(u32); | ||
551 | |||
552 | __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); | ||
553 | dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", | ||
554 | pdata->prv_reg_ctx[i], offset); | ||
555 | } | ||
556 | } | ||
557 | |||
558 | #ifdef CONFIG_PM_SLEEP | ||
559 | static int acpi_lpss_suspend_late(struct device *dev) | ||
560 | { | ||
561 | int ret = pm_generic_suspend_late(dev); | ||
562 | |||
563 | if (ret) | ||
564 | return ret; | ||
565 | |||
566 | acpi_lpss_save_ctx(dev); | ||
567 | return acpi_dev_suspend_late(dev); | ||
568 | } | ||
569 | |||
570 | static int acpi_lpss_restore_early(struct device *dev) | ||
571 | { | ||
572 | int ret = acpi_dev_resume_early(dev); | ||
573 | |||
574 | if (ret) | ||
575 | return ret; | ||
576 | |||
577 | acpi_lpss_restore_ctx(dev); | ||
578 | return pm_generic_resume_early(dev); | ||
579 | } | ||
580 | #endif /* CONFIG_PM_SLEEP */ | ||
581 | |||
582 | #ifdef CONFIG_PM_RUNTIME | ||
583 | static int acpi_lpss_runtime_suspend(struct device *dev) | ||
584 | { | ||
585 | int ret = pm_generic_runtime_suspend(dev); | ||
586 | |||
587 | if (ret) | ||
588 | return ret; | ||
589 | |||
590 | acpi_lpss_save_ctx(dev); | ||
591 | return acpi_dev_runtime_suspend(dev); | ||
592 | } | ||
593 | |||
594 | static int acpi_lpss_runtime_resume(struct device *dev) | ||
595 | { | ||
596 | int ret = acpi_dev_runtime_resume(dev); | ||
597 | |||
598 | if (ret) | ||
599 | return ret; | ||
600 | |||
601 | acpi_lpss_restore_ctx(dev); | ||
602 | return pm_generic_runtime_resume(dev); | ||
603 | } | ||
604 | #endif /* CONFIG_PM_RUNTIME */ | ||
605 | #endif /* CONFIG_PM */ | ||
606 | |||
607 | static struct dev_pm_domain acpi_lpss_pm_domain = { | ||
608 | .ops = { | ||
609 | #ifdef CONFIG_PM_SLEEP | ||
610 | .suspend_late = acpi_lpss_suspend_late, | ||
611 | .restore_early = acpi_lpss_restore_early, | ||
612 | .prepare = acpi_subsys_prepare, | ||
613 | .complete = acpi_subsys_complete, | ||
614 | .suspend = acpi_subsys_suspend, | ||
615 | .resume_early = acpi_subsys_resume_early, | ||
616 | .freeze = acpi_subsys_freeze, | ||
617 | .poweroff = acpi_subsys_suspend, | ||
618 | .poweroff_late = acpi_subsys_suspend_late, | ||
619 | #endif | ||
620 | #ifdef CONFIG_PM_RUNTIME | ||
621 | .runtime_suspend = acpi_lpss_runtime_suspend, | ||
622 | .runtime_resume = acpi_lpss_runtime_resume, | ||
623 | #endif | ||
624 | }, | ||
625 | }; | ||
626 | |||
452 | static int acpi_lpss_platform_notify(struct notifier_block *nb, | 627 | static int acpi_lpss_platform_notify(struct notifier_block *nb, |
453 | unsigned long action, void *data) | 628 | unsigned long action, void *data) |
454 | { | 629 | { |
@@ -456,7 +631,6 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, | |||
456 | struct lpss_private_data *pdata; | 631 | struct lpss_private_data *pdata; |
457 | struct acpi_device *adev; | 632 | struct acpi_device *adev; |
458 | const struct acpi_device_id *id; | 633 | const struct acpi_device_id *id; |
459 | int ret = 0; | ||
460 | 634 | ||
461 | id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); | 635 | id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); |
462 | if (!id || !id->driver_data) | 636 | if (!id || !id->driver_data) |
@@ -466,7 +640,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, | |||
466 | return 0; | 640 | return 0; |
467 | 641 | ||
468 | pdata = acpi_driver_data(adev); | 642 | pdata = acpi_driver_data(adev); |
469 | if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required) | 643 | if (!pdata || !pdata->mmio_base) |
470 | return 0; | 644 | return 0; |
471 | 645 | ||
472 | if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { | 646 | if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { |
@@ -474,12 +648,27 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, | |||
474 | return 0; | 648 | return 0; |
475 | } | 649 | } |
476 | 650 | ||
477 | if (action == BUS_NOTIFY_ADD_DEVICE) | 651 | switch (action) { |
478 | ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group); | 652 | case BUS_NOTIFY_BOUND_DRIVER: |
479 | else if (action == BUS_NOTIFY_DEL_DEVICE) | 653 | if (pdata->dev_desc->save_ctx) |
480 | sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); | 654 | pdev->dev.pm_domain = &acpi_lpss_pm_domain; |
655 | break; | ||
656 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
657 | if (pdata->dev_desc->save_ctx) | ||
658 | pdev->dev.pm_domain = NULL; | ||
659 | break; | ||
660 | case BUS_NOTIFY_ADD_DEVICE: | ||
661 | if (pdata->dev_desc->ltr_required) | ||
662 | return sysfs_create_group(&pdev->dev.kobj, | ||
663 | &lpss_attr_group); | ||
664 | case BUS_NOTIFY_DEL_DEVICE: | ||
665 | if (pdata->dev_desc->ltr_required) | ||
666 | sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); | ||
667 | default: | ||
668 | break; | ||
669 | } | ||
481 | 670 | ||
482 | return ret; | 671 | return 0; |
483 | } | 672 | } |
484 | 673 | ||
485 | static struct notifier_block acpi_lpss_nb = { | 674 | static struct notifier_block acpi_lpss_nb = { |
@@ -518,3 +707,16 @@ void __init acpi_lpss_init(void) | |||
518 | acpi_scan_add_handler(&lpss_handler); | 707 | acpi_scan_add_handler(&lpss_handler); |
519 | } | 708 | } |
520 | } | 709 | } |
710 | |||
711 | #else | ||
712 | |||
713 | static struct acpi_scan_handler lpss_handler = { | ||
714 | .ids = acpi_lpss_device_ids, | ||
715 | }; | ||
716 | |||
717 | void __init acpi_lpss_init(void) | ||
718 | { | ||
719 | acpi_scan_add_handler(&lpss_handler); | ||
720 | } | ||
721 | |||
722 | #endif /* CONFIG_X86_INTEL_LPSS */ | ||
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index b67be85ff0fc..23e2319ead41 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -44,6 +44,13 @@ | |||
44 | 44 | ||
45 | ACPI_MODULE_NAME("acpi_memhotplug"); | 45 | ACPI_MODULE_NAME("acpi_memhotplug"); |
46 | 46 | ||
47 | static const struct acpi_device_id memory_device_ids[] = { | ||
48 | {ACPI_MEMORY_DEVICE_HID, 0}, | ||
49 | {"", 0}, | ||
50 | }; | ||
51 | |||
52 | #ifdef CONFIG_ACPI_HOTPLUG_MEMORY | ||
53 | |||
47 | /* Memory Device States */ | 54 | /* Memory Device States */ |
48 | #define MEMORY_INVALID_STATE 0 | 55 | #define MEMORY_INVALID_STATE 0 |
49 | #define MEMORY_POWER_ON_STATE 1 | 56 | #define MEMORY_POWER_ON_STATE 1 |
@@ -53,11 +60,6 @@ static int acpi_memory_device_add(struct acpi_device *device, | |||
53 | const struct acpi_device_id *not_used); | 60 | const struct acpi_device_id *not_used); |
54 | static void acpi_memory_device_remove(struct acpi_device *device); | 61 | static void acpi_memory_device_remove(struct acpi_device *device); |
55 | 62 | ||
56 | static const struct acpi_device_id memory_device_ids[] = { | ||
57 | {ACPI_MEMORY_DEVICE_HID, 0}, | ||
58 | {"", 0}, | ||
59 | }; | ||
60 | |||
61 | static struct acpi_scan_handler memory_device_handler = { | 63 | static struct acpi_scan_handler memory_device_handler = { |
62 | .ids = memory_device_ids, | 64 | .ids = memory_device_ids, |
63 | .attach = acpi_memory_device_add, | 65 | .attach = acpi_memory_device_add, |
@@ -364,9 +366,11 @@ static bool __initdata acpi_no_memhotplug; | |||
364 | 366 | ||
365 | void __init acpi_memory_hotplug_init(void) | 367 | void __init acpi_memory_hotplug_init(void) |
366 | { | 368 | { |
367 | if (acpi_no_memhotplug) | 369 | if (acpi_no_memhotplug) { |
370 | memory_device_handler.attach = NULL; | ||
371 | acpi_scan_add_handler(&memory_device_handler); | ||
368 | return; | 372 | return; |
369 | 373 | } | |
370 | acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory"); | 374 | acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory"); |
371 | } | 375 | } |
372 | 376 | ||
@@ -376,3 +380,16 @@ static int __init disable_acpi_memory_hotplug(char *str) | |||
376 | return 1; | 380 | return 1; |
377 | } | 381 | } |
378 | __setup("acpi_no_memhotplug", disable_acpi_memory_hotplug); | 382 | __setup("acpi_no_memhotplug", disable_acpi_memory_hotplug); |
383 | |||
384 | #else | ||
385 | |||
386 | static struct acpi_scan_handler memory_device_handler = { | ||
387 | .ids = memory_device_ids, | ||
388 | }; | ||
389 | |||
390 | void __init acpi_memory_hotplug_init(void) | ||
391 | { | ||
392 | acpi_scan_add_handler(&memory_device_handler); | ||
393 | } | ||
394 | |||
395 | #endif /* CONFIG_ACPI_HOTPLUG_MEMORY */ | ||
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 37d73024b82e..f148a0580e04 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
@@ -156,12 +156,13 @@ static int power_saving_thread(void *data) | |||
156 | 156 | ||
157 | while (!kthread_should_stop()) { | 157 | while (!kthread_should_stop()) { |
158 | int cpu; | 158 | int cpu; |
159 | u64 expire_time; | 159 | unsigned long expire_time; |
160 | 160 | ||
161 | try_to_freeze(); | 161 | try_to_freeze(); |
162 | 162 | ||
163 | /* round robin to cpus */ | 163 | /* round robin to cpus */ |
164 | if (last_jiffies + round_robin_time * HZ < jiffies) { | 164 | expire_time = last_jiffies + round_robin_time * HZ; |
165 | if (time_before(expire_time, jiffies)) { | ||
165 | last_jiffies = jiffies; | 166 | last_jiffies = jiffies; |
166 | round_robin_cpu(tsk_index); | 167 | round_robin_cpu(tsk_index); |
167 | } | 168 | } |
@@ -200,7 +201,7 @@ static int power_saving_thread(void *data) | |||
200 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 201 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
201 | local_irq_enable(); | 202 | local_irq_enable(); |
202 | 203 | ||
203 | if (jiffies > expire_time) { | 204 | if (time_before(expire_time, jiffies)) { |
204 | do_sleep = 1; | 205 | do_sleep = 1; |
205 | break; | 206 | break; |
206 | } | 207 | } |
@@ -215,8 +216,15 @@ static int power_saving_thread(void *data) | |||
215 | * borrow CPU time from this CPU and cause RT task use > 95% | 216 | * borrow CPU time from this CPU and cause RT task use > 95% |
216 | * CPU time. To make 'avoid starvation' work, takes a nap here. | 217 | * CPU time. To make 'avoid starvation' work, takes a nap here. |
217 | */ | 218 | */ |
218 | if (do_sleep) | 219 | if (unlikely(do_sleep)) |
219 | schedule_timeout_killable(HZ * idle_pct / 100); | 220 | schedule_timeout_killable(HZ * idle_pct / 100); |
221 | |||
222 | /* If an external event has set the need_resched flag, then | ||
223 | * we need to deal with it, or this loop will continue to | ||
224 | * spin without calling __mwait(). | ||
225 | */ | ||
226 | if (unlikely(need_resched())) | ||
227 | schedule(); | ||
220 | } | 228 | } |
221 | 229 | ||
222 | exit_round_robin(tsk_index); | 230 | exit_round_robin(tsk_index); |
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index dbfe49e5fd63..2bf9082f7523 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c | |||
@@ -22,28 +22,16 @@ | |||
22 | 22 | ||
23 | ACPI_MODULE_NAME("platform"); | 23 | ACPI_MODULE_NAME("platform"); |
24 | 24 | ||
25 | /* | 25 | static const struct acpi_device_id forbidden_id_list[] = { |
26 | * The following ACPI IDs are known to be suitable for representing as | 26 | {"PNP0000", 0}, /* PIC */ |
27 | * platform devices. | 27 | {"PNP0100", 0}, /* Timer */ |
28 | */ | 28 | {"PNP0200", 0}, /* AT DMA Controller */ |
29 | static const struct acpi_device_id acpi_platform_device_ids[] = { | 29 | {"", 0}, |
30 | |||
31 | { "PNP0D40" }, | ||
32 | { "ACPI0003" }, | ||
33 | { "VPC2004" }, | ||
34 | { "BCM4752" }, | ||
35 | |||
36 | /* Intel Smart Sound Technology */ | ||
37 | { "INT33C8" }, | ||
38 | { "80860F28" }, | ||
39 | |||
40 | { } | ||
41 | }; | 30 | }; |
42 | 31 | ||
43 | /** | 32 | /** |
44 | * acpi_create_platform_device - Create platform device for ACPI device node | 33 | * acpi_create_platform_device - Create platform device for ACPI device node |
45 | * @adev: ACPI device node to create a platform device for. | 34 | * @adev: ACPI device node to create a platform device for. |
46 | * @id: ACPI device ID used to match @adev. | ||
47 | * | 35 | * |
48 | * Check if the given @adev can be represented as a platform device and, if | 36 | * Check if the given @adev can be represented as a platform device and, if |
49 | * that's the case, create and register a platform device, populate its common | 37 | * that's the case, create and register a platform device, populate its common |
@@ -51,8 +39,7 @@ static const struct acpi_device_id acpi_platform_device_ids[] = { | |||
51 | * | 39 | * |
52 | * Name of the platform device will be the same as @adev's. | 40 | * Name of the platform device will be the same as @adev's. |
53 | */ | 41 | */ |
54 | int acpi_create_platform_device(struct acpi_device *adev, | 42 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev) |
55 | const struct acpi_device_id *id) | ||
56 | { | 43 | { |
57 | struct platform_device *pdev = NULL; | 44 | struct platform_device *pdev = NULL; |
58 | struct acpi_device *acpi_parent; | 45 | struct acpi_device *acpi_parent; |
@@ -64,19 +51,22 @@ int acpi_create_platform_device(struct acpi_device *adev, | |||
64 | 51 | ||
65 | /* If the ACPI node already has a physical device attached, skip it. */ | 52 | /* If the ACPI node already has a physical device attached, skip it. */ |
66 | if (adev->physical_node_count) | 53 | if (adev->physical_node_count) |
67 | return 0; | 54 | return NULL; |
55 | |||
56 | if (!acpi_match_device_ids(adev, forbidden_id_list)) | ||
57 | return ERR_PTR(-EINVAL); | ||
68 | 58 | ||
69 | INIT_LIST_HEAD(&resource_list); | 59 | INIT_LIST_HEAD(&resource_list); |
70 | count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); | 60 | count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); |
71 | if (count < 0) { | 61 | if (count < 0) { |
72 | return 0; | 62 | return NULL; |
73 | } else if (count > 0) { | 63 | } else if (count > 0) { |
74 | resources = kmalloc(count * sizeof(struct resource), | 64 | resources = kmalloc(count * sizeof(struct resource), |
75 | GFP_KERNEL); | 65 | GFP_KERNEL); |
76 | if (!resources) { | 66 | if (!resources) { |
77 | dev_err(&adev->dev, "No memory for resources\n"); | 67 | dev_err(&adev->dev, "No memory for resources\n"); |
78 | acpi_dev_free_resource_list(&resource_list); | 68 | acpi_dev_free_resource_list(&resource_list); |
79 | return -ENOMEM; | 69 | return ERR_PTR(-ENOMEM); |
80 | } | 70 | } |
81 | count = 0; | 71 | count = 0; |
82 | list_for_each_entry(rentry, &resource_list, node) | 72 | list_for_each_entry(rentry, &resource_list, node) |
@@ -113,25 +103,13 @@ int acpi_create_platform_device(struct acpi_device *adev, | |||
113 | pdevinfo.num_res = count; | 103 | pdevinfo.num_res = count; |
114 | pdevinfo.acpi_node.companion = adev; | 104 | pdevinfo.acpi_node.companion = adev; |
115 | pdev = platform_device_register_full(&pdevinfo); | 105 | pdev = platform_device_register_full(&pdevinfo); |
116 | if (IS_ERR(pdev)) { | 106 | if (IS_ERR(pdev)) |
117 | dev_err(&adev->dev, "platform device creation failed: %ld\n", | 107 | dev_err(&adev->dev, "platform device creation failed: %ld\n", |
118 | PTR_ERR(pdev)); | 108 | PTR_ERR(pdev)); |
119 | pdev = NULL; | 109 | else |
120 | } else { | ||
121 | dev_dbg(&adev->dev, "created platform device %s\n", | 110 | dev_dbg(&adev->dev, "created platform device %s\n", |
122 | dev_name(&pdev->dev)); | 111 | dev_name(&pdev->dev)); |
123 | } | ||
124 | 112 | ||
125 | kfree(resources); | 113 | kfree(resources); |
126 | return 1; | 114 | return pdev; |
127 | } | ||
128 | |||
129 | static struct acpi_scan_handler platform_handler = { | ||
130 | .ids = acpi_platform_device_ids, | ||
131 | .attach = acpi_create_platform_device, | ||
132 | }; | ||
133 | |||
134 | void __init acpi_platform_init(void) | ||
135 | { | ||
136 | acpi_scan_add_handler(&platform_handler); | ||
137 | } | 115 | } |
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c new file mode 100644 index 000000000000..6703c1fd993a --- /dev/null +++ b/drivers/acpi/acpi_pnp.c | |||
@@ -0,0 +1,395 @@ | |||
1 | /* | ||
2 | * ACPI support for PNP bus type | ||
3 | * | ||
4 | * Copyright (C) 2014, Intel Corporation | ||
5 | * Authors: Zhang Rui <rui.zhang@intel.com> | ||
6 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/acpi.h> | ||
14 | #include <linux/module.h> | ||
15 | |||
16 | static const struct acpi_device_id acpi_pnp_device_ids[] = { | ||
17 | /* pata_isapnp */ | ||
18 | {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ | ||
19 | /* floppy */ | ||
20 | {"PNP0700"}, | ||
21 | /* ipmi_si */ | ||
22 | {"IPI0001"}, | ||
23 | /* tpm_inf_pnp */ | ||
24 | {"IFX0101"}, /* Infineon TPMs */ | ||
25 | {"IFX0102"}, /* Infineon TPMs */ | ||
26 | /*tpm_tis */ | ||
27 | {"PNP0C31"}, /* TPM */ | ||
28 | {"ATM1200"}, /* Atmel */ | ||
29 | {"IFX0102"}, /* Infineon */ | ||
30 | {"BCM0101"}, /* Broadcom */ | ||
31 | {"BCM0102"}, /* Broadcom */ | ||
32 | {"NSC1200"}, /* National */ | ||
33 | {"ICO0102"}, /* Intel */ | ||
34 | /* ide */ | ||
35 | {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ | ||
36 | /* ns558 */ | ||
37 | {"ASB16fd"}, /* AdLib NSC16 */ | ||
38 | {"AZT3001"}, /* AZT1008 */ | ||
39 | {"CDC0001"}, /* Opl3-SAx */ | ||
40 | {"CSC0001"}, /* CS4232 */ | ||
41 | {"CSC000f"}, /* CS4236 */ | ||
42 | {"CSC0101"}, /* CS4327 */ | ||
43 | {"CTL7001"}, /* SB16 */ | ||
44 | {"CTL7002"}, /* AWE64 */ | ||
45 | {"CTL7005"}, /* Vibra16 */ | ||
46 | {"ENS2020"}, /* SoundscapeVIVO */ | ||
47 | {"ESS0001"}, /* ES1869 */ | ||
48 | {"ESS0005"}, /* ES1878 */ | ||
49 | {"ESS6880"}, /* ES688 */ | ||
50 | {"IBM0012"}, /* CS4232 */ | ||
51 | {"OPT0001"}, /* OPTi Audio16 */ | ||
52 | {"YMH0006"}, /* Opl3-SA */ | ||
53 | {"YMH0022"}, /* Opl3-SAx */ | ||
54 | {"PNPb02f"}, /* Generic */ | ||
55 | /* i8042 kbd */ | ||
56 | {"PNP0300"}, | ||
57 | {"PNP0301"}, | ||
58 | {"PNP0302"}, | ||
59 | {"PNP0303"}, | ||
60 | {"PNP0304"}, | ||
61 | {"PNP0305"}, | ||
62 | {"PNP0306"}, | ||
63 | {"PNP0309"}, | ||
64 | {"PNP030a"}, | ||
65 | {"PNP030b"}, | ||
66 | {"PNP0320"}, | ||
67 | {"PNP0343"}, | ||
68 | {"PNP0344"}, | ||
69 | {"PNP0345"}, | ||
70 | {"CPQA0D7"}, | ||
71 | /* i8042 aux */ | ||
72 | {"AUI0200"}, | ||
73 | {"FJC6000"}, | ||
74 | {"FJC6001"}, | ||
75 | {"PNP0f03"}, | ||
76 | {"PNP0f0b"}, | ||
77 | {"PNP0f0e"}, | ||
78 | {"PNP0f12"}, | ||
79 | {"PNP0f13"}, | ||
80 | {"PNP0f19"}, | ||
81 | {"PNP0f1c"}, | ||
82 | {"SYN0801"}, | ||
83 | /* fcpnp */ | ||
84 | {"AVM0900"}, | ||
85 | /* radio-cadet */ | ||
86 | {"MSM0c24"}, /* ADS Cadet AM/FM Radio Card */ | ||
87 | /* radio-gemtek */ | ||
88 | {"ADS7183"}, /* AOpen FX-3D/Pro Radio */ | ||
89 | /* radio-sf16fmr2 */ | ||
90 | {"MFRad13"}, /* tuner subdevice of SF16-FMD2 */ | ||
91 | /* ene_ir */ | ||
92 | {"ENE0100"}, | ||
93 | {"ENE0200"}, | ||
94 | {"ENE0201"}, | ||
95 | {"ENE0202"}, | ||
96 | /* fintek-cir */ | ||
97 | {"FIT0002"}, /* CIR */ | ||
98 | /* ite-cir */ | ||
99 | {"ITE8704"}, /* Default model */ | ||
100 | {"ITE8713"}, /* CIR found in EEEBox 1501U */ | ||
101 | {"ITE8708"}, /* Bridged IT8512 */ | ||
102 | {"ITE8709"}, /* SRAM-Bridged IT8512 */ | ||
103 | /* nuvoton-cir */ | ||
104 | {"WEC0530"}, /* CIR */ | ||
105 | {"NTN0530"}, /* CIR for new chip's pnp id */ | ||
106 | /* Winbond CIR */ | ||
107 | {"WEC1022"}, | ||
108 | /* wbsd */ | ||
109 | {"WEC0517"}, | ||
110 | {"WEC0518"}, | ||
111 | /* Winbond CIR */ | ||
112 | {"TCM5090"}, /* 3Com Etherlink III (TP) */ | ||
113 | {"TCM5091"}, /* 3Com Etherlink III */ | ||
114 | {"TCM5094"}, /* 3Com Etherlink III (combo) */ | ||
115 | {"TCM5095"}, /* 3Com Etherlink III (TPO) */ | ||
116 | {"TCM5098"}, /* 3Com Etherlink III (TPC) */ | ||
117 | {"PNP80f7"}, /* 3Com Etherlink III compatible */ | ||
118 | {"PNP80f8"}, /* 3Com Etherlink III compatible */ | ||
119 | /* nsc-ircc */ | ||
120 | {"NSC6001"}, | ||
121 | {"HWPC224"}, | ||
122 | {"IBM0071"}, | ||
123 | /* smsc-ircc2 */ | ||
124 | {"SMCf010"}, | ||
125 | /* sb1000 */ | ||
126 | {"GIC1000"}, | ||
127 | /* parport_pc */ | ||
128 | {"PNP0400"}, /* Standard LPT Printer Port */ | ||
129 | {"PNP0401"}, /* ECP Printer Port */ | ||
130 | /* apple-gmux */ | ||
131 | {"APP000B"}, | ||
132 | /* fujitsu-laptop.c */ | ||
133 | {"FUJ02bf"}, | ||
134 | {"FUJ02B1"}, | ||
135 | {"FUJ02E3"}, | ||
136 | /* system */ | ||
137 | {"PNP0c02"}, /* General ID for reserving resources */ | ||
138 | {"PNP0c01"}, /* memory controller */ | ||
139 | /* rtc_cmos */ | ||
140 | {"PNP0b00"}, | ||
141 | {"PNP0b01"}, | ||
142 | {"PNP0b02"}, | ||
143 | /* c6xdigio */ | ||
144 | {"PNP0400"}, /* Standard LPT Printer Port */ | ||
145 | {"PNP0401"}, /* ECP Printer Port */ | ||
146 | /* ni_atmio.c */ | ||
147 | {"NIC1900"}, | ||
148 | {"NIC2400"}, | ||
149 | {"NIC2500"}, | ||
150 | {"NIC2600"}, | ||
151 | {"NIC2700"}, | ||
152 | /* serial */ | ||
153 | {"AAC000F"}, /* Archtek America Corp. Archtek SmartLink Modem 3334BT Plug & Play */ | ||
154 | {"ADC0001"}, /* Anchor Datacomm BV. SXPro 144 External Data Fax Modem Plug & Play */ | ||
155 | {"ADC0002"}, /* SXPro 288 External Data Fax Modem Plug & Play */ | ||
156 | {"AEI0250"}, /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */ | ||
157 | {"AEI1240"}, /* Actiontec ISA PNP 56K X2 Fax Modem */ | ||
158 | {"AKY1021"}, /* Rockwell 56K ACF II Fax+Data+Voice Modem */ | ||
159 | {"AZT4001"}, /* AZT3005 PnP SOUND DEVICE */ | ||
160 | {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */ | ||
161 | {"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */ | ||
162 | {"BRI1400"}, /* Boca Research 33,600 ACF Modem */ | ||
163 | {"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */ | ||
164 | {"BRI0A49"}, /* Boca 33.6 Kbps Internal FD34FSVD */ | ||
165 | {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */ | ||
166 | {"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */ | ||
167 | {"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */ | ||
168 | {"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */ | ||
169 | {"DAV0336"}, /* Davicom ISA 33.6K Modem */ | ||
170 | {"DMB1032"}, /* Creative Modem Blaster Flash56 DI5601-1 */ | ||
171 | {"DMB2001"}, /* Creative Modem Blaster V.90 DI5660 */ | ||
172 | {"ETT0002"}, /* E-Tech CyberBULLET PC56RVP */ | ||
173 | {"FUJ0202"}, /* Fujitsu 33600 PnP-I2 R Plug & Play */ | ||
174 | {"FUJ0205"}, /* Fujitsu FMV-FX431 Plug & Play */ | ||
175 | {"FUJ0206"}, /* Fujitsu 33600 PnP-I4 R Plug & Play */ | ||
176 | {"FUJ0209"}, /* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */ | ||
177 | {"GVC000F"}, /* Archtek SmartLink Modem 3334BT Plug & Play */ | ||
178 | {"GVC0303"}, /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ | ||
179 | {"HAY0001"}, /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ | ||
180 | {"HAY000C"}, /* Hayes Optima 336 V.34 + FAX + Voice PnP */ | ||
181 | {"HAY000D"}, /* Hayes Optima 336B V.34 + FAX + Voice PnP */ | ||
182 | {"HAY5670"}, /* Hayes Accura 56K Ext Fax Modem PnP */ | ||
183 | {"HAY5674"}, /* Hayes Accura 56K Ext Fax Modem PnP */ | ||
184 | {"HAY5675"}, /* Hayes Accura 56K Fax Modem PnP */ | ||
185 | {"HAYF000"}, /* Hayes 288, V.34 + FAX */ | ||
186 | {"HAYF001"}, /* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */ | ||
187 | {"IBM0033"}, /* IBM Thinkpad 701 Internal Modem Voice */ | ||
188 | {"PNP4972"}, /* Intermec CV60 touchscreen port */ | ||
189 | {"IXDC801"}, /* Intertex 28k8 33k6 Voice EXT PnP */ | ||
190 | {"IXDC901"}, /* Intertex 33k6 56k Voice EXT PnP */ | ||
191 | {"IXDD801"}, /* Intertex 28k8 33k6 Voice SP EXT PnP */ | ||
192 | {"IXDD901"}, /* Intertex 33k6 56k Voice SP EXT PnP */ | ||
193 | {"IXDF401"}, /* Intertex 28k8 33k6 Voice SP INT PnP */ | ||
194 | {"IXDF801"}, /* Intertex 28k8 33k6 Voice SP EXT PnP */ | ||
195 | {"IXDF901"}, /* Intertex 33k6 56k Voice SP EXT PnP */ | ||
196 | {"KOR4522"}, /* KORTEX 28800 Externe PnP */ | ||
197 | {"KORF661"}, /* KXPro 33.6 Vocal ASVD PnP */ | ||
198 | {"LAS4040"}, /* LASAT Internet 33600 PnP */ | ||
199 | {"LAS4540"}, /* Lasat Safire 560 PnP */ | ||
200 | {"LAS5440"}, /* Lasat Safire 336 PnP */ | ||
201 | {"MNP0281"}, /* Microcom TravelPorte FAST V.34 Plug & Play */ | ||
202 | {"MNP0336"}, /* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */ | ||
203 | {"MNP0339"}, /* Microcom DeskPorte FAST EP 28.8 Plug & Play */ | ||
204 | {"MNP0342"}, /* Microcom DeskPorte 28.8P Plug & Play */ | ||
205 | {"MNP0500"}, /* Microcom DeskPorte FAST ES 28.8 Plug & Play */ | ||
206 | {"MNP0501"}, /* Microcom DeskPorte FAST ES 28.8 Plug & Play */ | ||
207 | {"MNP0502"}, /* Microcom DeskPorte 28.8S Internal Plug & Play */ | ||
208 | {"MOT1105"}, /* Motorola BitSURFR Plug & Play */ | ||
209 | {"MOT1111"}, /* Motorola TA210 Plug & Play */ | ||
210 | {"MOT1114"}, /* Motorola HMTA 200 (ISDN) Plug & Play */ | ||
211 | {"MOT1115"}, /* Motorola BitSURFR Plug & Play */ | ||
212 | {"MOT1190"}, /* Motorola Lifestyle 28.8 Internal */ | ||
213 | {"MOT1501"}, /* Motorola V.3400 Plug & Play */ | ||
214 | {"MOT1502"}, /* Motorola Lifestyle 28.8 V.34 Plug & Play */ | ||
215 | {"MOT1505"}, /* Motorola Power 28.8 V.34 Plug & Play */ | ||
216 | {"MOT1509"}, /* Motorola ModemSURFR External 28.8 Plug & Play */ | ||
217 | {"MOT150A"}, /* Motorola Premier 33.6 Desktop Plug & Play */ | ||
218 | {"MOT150F"}, /* Motorola VoiceSURFR 56K External PnP */ | ||
219 | {"MOT1510"}, /* Motorola ModemSURFR 56K External PnP */ | ||
220 | {"MOT1550"}, /* Motorola ModemSURFR 56K Internal PnP */ | ||
221 | {"MOT1560"}, /* Motorola ModemSURFR Internal 28.8 Plug & Play */ | ||
222 | {"MOT1580"}, /* Motorola Premier 33.6 Internal Plug & Play */ | ||
223 | {"MOT15B0"}, /* Motorola OnlineSURFR 28.8 Internal Plug & Play */ | ||
224 | {"MOT15F0"}, /* Motorola VoiceSURFR 56K Internal PnP */ | ||
225 | {"MVX00A1"}, /* Deskline K56 Phone System PnP */ | ||
226 | {"MVX00F2"}, /* PC Rider K56 Phone System PnP */ | ||
227 | {"nEC8241"}, /* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */ | ||
228 | {"PMC2430"}, /* Pace 56 Voice Internal Plug & Play Modem */ | ||
229 | {"PNP0500"}, /* Generic standard PC COM port */ | ||
230 | {"PNP0501"}, /* Generic 16550A-compatible COM port */ | ||
231 | {"PNPC000"}, /* Compaq 14400 Modem */ | ||
232 | {"PNPC001"}, /* Compaq 2400/9600 Modem */ | ||
233 | {"PNPC031"}, /* Dial-Up Networking Serial Cable between 2 PCs */ | ||
234 | {"PNPC032"}, /* Dial-Up Networking Parallel Cable between 2 PCs */ | ||
235 | {"PNPC100"}, /* Standard 9600 bps Modem */ | ||
236 | {"PNPC101"}, /* Standard 14400 bps Modem */ | ||
237 | {"PNPC102"}, /* Standard 28800 bps Modem */ | ||
238 | {"PNPC103"}, /* Standard Modem */ | ||
239 | {"PNPC104"}, /* Standard 9600 bps Modem */ | ||
240 | {"PNPC105"}, /* Standard 14400 bps Modem */ | ||
241 | {"PNPC106"}, /* Standard 28800 bps Modem */ | ||
242 | {"PNPC107"}, /* Standard Modem */ | ||
243 | {"PNPC108"}, /* Standard 9600 bps Modem */ | ||
244 | {"PNPC109"}, /* Standard 14400 bps Modem */ | ||
245 | {"PNPC10A"}, /* Standard 28800 bps Modem */ | ||
246 | {"PNPC10B"}, /* Standard Modem */ | ||
247 | {"PNPC10C"}, /* Standard 9600 bps Modem */ | ||
248 | {"PNPC10D"}, /* Standard 14400 bps Modem */ | ||
249 | {"PNPC10E"}, /* Standard 28800 bps Modem */ | ||
250 | {"PNPC10F"}, /* Standard Modem */ | ||
251 | {"PNP2000"}, /* Standard PCMCIA Card Modem */ | ||
252 | {"ROK0030"}, /* Rockwell 33.6 DPF Internal PnP, Modular Technology 33.6 Internal PnP */ | ||
253 | {"ROK0100"}, /* KORTEX 14400 Externe PnP */ | ||
254 | {"ROK4120"}, /* Rockwell 28.8 */ | ||
255 | {"ROK4920"}, /* Viking 28.8 INTERNAL Fax+Data+Voice PnP */ | ||
256 | {"RSS00A0"}, /* Rockwell 33.6 DPF External PnP, BT Prologue 33.6 External PnP, Modular Technology 33.6 External PnP */ | ||
257 | {"RSS0262"}, /* Viking 56K FAX INT */ | ||
258 | {"RSS0250"}, /* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */ | ||
259 | {"SUP1310"}, /* SupraExpress 28.8 Data/Fax PnP modem */ | ||
260 | {"SUP1381"}, /* SupraExpress 336i PnP Voice Modem */ | ||
261 | {"SUP1421"}, /* SupraExpress 33.6 Data/Fax PnP modem */ | ||
262 | {"SUP1590"}, /* SupraExpress 33.6 Data/Fax PnP modem */ | ||
263 | {"SUP1620"}, /* SupraExpress 336i Sp ASVD */ | ||
264 | {"SUP1760"}, /* SupraExpress 33.6 Data/Fax PnP modem */ | ||
265 | {"SUP2171"}, /* SupraExpress 56i Sp Intl */ | ||
266 | {"TEX0011"}, /* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */ | ||
267 | {"UAC000F"}, /* Archtek SmartLink Modem 3334BT Plug & Play */ | ||
268 | {"USR0000"}, /* 3Com Corp. Gateway Telepath IIvi 33.6 */ | ||
269 | {"USR0002"}, /* U.S. Robotics Sporster 33.6K Fax INT PnP */ | ||
270 | {"USR0004"}, /* Sportster Vi 14.4 PnP FAX Voicemail */ | ||
271 | {"USR0006"}, /* U.S. Robotics 33.6K Voice INT PnP */ | ||
272 | {"USR0007"}, /* U.S. Robotics 33.6K Voice EXT PnP */ | ||
273 | {"USR0009"}, /* U.S. Robotics Courier V.Everything INT PnP */ | ||
274 | {"USR2002"}, /* U.S. Robotics 33.6K Voice INT PnP */ | ||
275 | {"USR2070"}, /* U.S. Robotics 56K Voice INT PnP */ | ||
276 | {"USR2080"}, /* U.S. Robotics 56K Voice EXT PnP */ | ||
277 | {"USR3031"}, /* U.S. Robotics 56K FAX INT */ | ||
278 | {"USR3050"}, /* U.S. Robotics 56K FAX INT */ | ||
279 | {"USR3070"}, /* U.S. Robotics 56K Voice INT PnP */ | ||
280 | {"USR3080"}, /* U.S. Robotics 56K Voice EXT PnP */ | ||
281 | {"USR3090"}, /* U.S. Robotics 56K Voice INT PnP */ | ||
282 | {"USR9100"}, /* U.S. Robotics 56K Message */ | ||
283 | {"USR9160"}, /* U.S. Robotics 56K FAX EXT PnP */ | ||
284 | {"USR9170"}, /* U.S. Robotics 56K FAX INT PnP */ | ||
285 | {"USR9180"}, /* U.S. Robotics 56K Voice EXT PnP */ | ||
286 | {"USR9190"}, /* U.S. Robotics 56K Voice INT PnP */ | ||
287 | {"WACFXXX"}, /* Wacom tablets */ | ||
288 | {"FPI2002"}, /* Compaq touchscreen */ | ||
289 | {"FUJ02B2"}, /* Fujitsu Stylistic touchscreens */ | ||
290 | {"FUJ02B3"}, | ||
291 | {"FUJ02B4"}, /* Fujitsu Stylistic LT touchscreens */ | ||
292 | {"FUJ02B6"}, /* Passive Fujitsu Stylistic touchscreens */ | ||
293 | {"FUJ02B7"}, | ||
294 | {"FUJ02B8"}, | ||
295 | {"FUJ02B9"}, | ||
296 | {"FUJ02BC"}, | ||
297 | {"FUJ02E5"}, /* Fujitsu Wacom Tablet PC device */ | ||
298 | {"FUJ02E6"}, /* Fujitsu P-series tablet PC device */ | ||
299 | {"FUJ02E7"}, /* Fujitsu Wacom 2FGT Tablet PC device */ | ||
300 | {"FUJ02E9"}, /* Fujitsu Wacom 1FGT Tablet PC device */ | ||
301 | {"LTS0001"}, /* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in disguise) */ | ||
302 | {"WCI0003"}, /* Rockwell's (PORALiNK) 33600 INT PNP */ | ||
303 | {"WEC1022"}, /* Winbond CIR port, should not be probed. We should keep track of it to prevent the legacy serial driver from probing it */ | ||
304 | /* scl200wdt */ | ||
305 | {"NSC0800"}, /* National Semiconductor PC87307/PC97307 watchdog component */ | ||
306 | /* mpu401 */ | ||
307 | {"PNPb006"}, | ||
308 | /* cs423x-pnpbios */ | ||
309 | {"CSC0100"}, | ||
310 | {"CSC0000"}, | ||
311 | {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ | ||
312 | /* es18xx-pnpbios */ | ||
313 | {"ESS1869"}, | ||
314 | {"ESS1879"}, | ||
315 | /* snd-opl3sa2-pnpbios */ | ||
316 | {"YMH0021"}, | ||
317 | {"NMX2210"}, /* Gateway Solo 2500 */ | ||
318 | {""}, | ||
319 | }; | ||
320 | |||
321 | static bool is_hex_digit(char c) | ||
322 | { | ||
323 | return (c >= 0 && c <= '9') || (c >= 'A' && c <= 'F'); | ||
324 | } | ||
325 | |||
326 | static bool matching_id(char *idstr, char *list_id) | ||
327 | { | ||
328 | int i; | ||
329 | |||
330 | if (memcmp(idstr, list_id, 3)) | ||
331 | return false; | ||
332 | |||
333 | for (i = 3; i < 7; i++) { | ||
334 | char c = toupper(idstr[i]); | ||
335 | |||
336 | if (!is_hex_digit(c) | ||
337 | || (list_id[i] != 'X' && c != toupper(list_id[i]))) | ||
338 | return false; | ||
339 | } | ||
340 | return true; | ||
341 | } | ||
342 | |||
343 | static bool acpi_pnp_match(char *idstr, const struct acpi_device_id **matchid) | ||
344 | { | ||
345 | const struct acpi_device_id *devid; | ||
346 | |||
347 | for (devid = acpi_pnp_device_ids; devid->id[0]; devid++) | ||
348 | if (matching_id(idstr, (char *)devid->id)) { | ||
349 | if (matchid) | ||
350 | *matchid = devid; | ||
351 | |||
352 | return true; | ||
353 | } | ||
354 | |||
355 | return false; | ||
356 | } | ||
357 | |||
358 | static int acpi_pnp_attach(struct acpi_device *adev, | ||
359 | const struct acpi_device_id *id) | ||
360 | { | ||
361 | return 1; | ||
362 | } | ||
363 | |||
364 | static struct acpi_scan_handler acpi_pnp_handler = { | ||
365 | .ids = acpi_pnp_device_ids, | ||
366 | .match = acpi_pnp_match, | ||
367 | .attach = acpi_pnp_attach, | ||
368 | }; | ||
369 | |||
370 | /* | ||
371 | * For CMOS RTC devices, the PNP ACPI scan handler does not work, because | ||
372 | * there is a CMOS RTC ACPI scan handler installed already, so we need to | ||
373 | * check those devices and enumerate them to the PNP bus directly. | ||
374 | */ | ||
375 | static int is_cmos_rtc_device(struct acpi_device *adev) | ||
376 | { | ||
377 | struct acpi_device_id ids[] = { | ||
378 | { "PNP0B00" }, | ||
379 | { "PNP0B01" }, | ||
380 | { "PNP0B02" }, | ||
381 | {""}, | ||
382 | }; | ||
383 | return !acpi_match_device_ids(adev, ids); | ||
384 | } | ||
385 | |||
386 | bool acpi_is_pnp_device(struct acpi_device *adev) | ||
387 | { | ||
388 | return adev->handler == &acpi_pnp_handler || is_cmos_rtc_device(adev); | ||
389 | } | ||
390 | EXPORT_SYMBOL_GPL(acpi_is_pnp_device); | ||
391 | |||
392 | void __init acpi_pnp_init(void) | ||
393 | { | ||
394 | acpi_scan_add_handler(&acpi_pnp_handler); | ||
395 | } | ||
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index b06f5f55ada9..1c085742644f 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
@@ -268,7 +268,7 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
268 | pr->apic_id = apic_id; | 268 | pr->apic_id = apic_id; |
269 | 269 | ||
270 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); | 270 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); |
271 | if (!cpu0_initialized) { | 271 | if (!cpu0_initialized && !acpi_lapic) { |
272 | cpu0_initialized = 1; | 272 | cpu0_initialized = 1; |
273 | /* Handle UP system running SMP kernel, with no LAPIC in MADT */ | 273 | /* Handle UP system running SMP kernel, with no LAPIC in MADT */ |
274 | if ((cpu_index == -1) && (num_online_cpus() == 1)) | 274 | if ((cpu_index == -1) && (num_online_cpus() == 1)) |
@@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device, | |||
405 | goto err; | 405 | goto err; |
406 | 406 | ||
407 | pr->dev = dev; | 407 | pr->dev = dev; |
408 | dev->offline = pr->flags.need_hotplug_init; | ||
409 | 408 | ||
410 | /* Trigger the processor driver's .probe() if present. */ | 409 | /* Trigger the processor driver's .probe() if present. */ |
411 | if (device_attach(dev) >= 0) | 410 | if (device_attach(dev) >= 0) |
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index b7ed86a20427..8bb43f06e11f 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile | |||
@@ -135,6 +135,7 @@ acpi-y += \ | |||
135 | rsxface.o | 135 | rsxface.o |
136 | 136 | ||
137 | acpi-y += \ | 137 | acpi-y += \ |
138 | tbdata.o \ | ||
138 | tbfadt.o \ | 139 | tbfadt.o \ |
139 | tbfind.o \ | 140 | tbfind.o \ |
140 | tbinstal.o \ | 141 | tbinstal.o \ |
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h new file mode 100644 index 000000000000..8698ffba6f39 --- /dev/null +++ b/drivers/acpi/acpica/acapps.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: acapps - common include for ACPI applications/tools | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2014, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #ifndef _ACAPPS | ||
45 | #define _ACAPPS | ||
46 | |||
47 | /* Common info for tool signons */ | ||
48 | |||
49 | #define ACPICA_NAME "Intel ACPI Component Architecture" | ||
50 | #define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2014 Intel Corporation" | ||
51 | |||
52 | #if ACPI_MACHINE_WIDTH == 64 | ||
53 | #define ACPI_WIDTH "-64" | ||
54 | |||
55 | #elif ACPI_MACHINE_WIDTH == 32 | ||
56 | #define ACPI_WIDTH "-32" | ||
57 | |||
58 | #else | ||
59 | #error unknown ACPI_MACHINE_WIDTH | ||
60 | #define ACPI_WIDTH "-??" | ||
61 | |||
62 | #endif | ||
63 | |||
64 | /* Macros for signons and file headers */ | ||
65 | |||
66 | #define ACPI_COMMON_SIGNON(utility_name) \ | ||
67 | "\n%s\n%s version %8.8X%s [%s]\n%s\n\n", \ | ||
68 | ACPICA_NAME, \ | ||
69 | utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, __DATE__, \ | ||
70 | ACPICA_COPYRIGHT | ||
71 | |||
72 | #define ACPI_COMMON_HEADER(utility_name, prefix) \ | ||
73 | "%s%s\n%s%s version %8.8X%s [%s]\n%s%s\n%s\n", \ | ||
74 | prefix, ACPICA_NAME, \ | ||
75 | prefix, utility_name, ((u32) ACPI_CA_VERSION), ACPI_WIDTH, __DATE__, \ | ||
76 | prefix, ACPICA_COPYRIGHT, \ | ||
77 | prefix | ||
78 | |||
79 | /* Macros for usage messages */ | ||
80 | |||
81 | #define ACPI_USAGE_HEADER(usage) \ | ||
82 | printf ("Usage: %s\nOptions:\n", usage); | ||
83 | |||
84 | #define ACPI_OPTION(name, description) \ | ||
85 | printf (" %-18s%s\n", name, description); | ||
86 | |||
87 | #define FILE_SUFFIX_DISASSEMBLY "dsl" | ||
88 | #define ACPI_TABLE_FILE_SUFFIX ".dat" | ||
89 | |||
90 | /* | ||
91 | * getopt | ||
92 | */ | ||
93 | int acpi_getopt(int argc, char **argv, char *opts); | ||
94 | |||
95 | int acpi_getopt_argument(int argc, char **argv); | ||
96 | |||
97 | extern int acpi_gbl_optind; | ||
98 | extern int acpi_gbl_opterr; | ||
99 | extern int acpi_gbl_sub_opt_char; | ||
100 | extern char *acpi_gbl_optarg; | ||
101 | |||
102 | /* | ||
103 | * cmfsize - Common get file size function | ||
104 | */ | ||
105 | u32 cm_get_file_size(FILE * file); | ||
106 | |||
107 | #ifndef ACPI_DUMP_APP | ||
108 | /* | ||
109 | * adisasm | ||
110 | */ | ||
111 | acpi_status | ||
112 | ad_aml_disassemble(u8 out_to_file, | ||
113 | char *filename, char *prefix, char **out_filename); | ||
114 | |||
115 | void ad_print_statistics(void); | ||
116 | |||
117 | acpi_status ad_find_dsdt(u8 **dsdt_ptr, u32 *dsdt_length); | ||
118 | |||
119 | void ad_dump_tables(void); | ||
120 | |||
121 | acpi_status ad_get_local_tables(void); | ||
122 | |||
123 | acpi_status | ||
124 | ad_parse_table(struct acpi_table_header *table, | ||
125 | acpi_owner_id * owner_id, u8 load_table, u8 external); | ||
126 | |||
127 | acpi_status ad_display_tables(char *filename, struct acpi_table_header *table); | ||
128 | |||
129 | acpi_status ad_display_statistics(void); | ||
130 | |||
131 | /* | ||
132 | * adwalk | ||
133 | */ | ||
134 | void | ||
135 | acpi_dm_cross_reference_namespace(union acpi_parse_object *parse_tree_root, | ||
136 | struct acpi_namespace_node *namespace_root, | ||
137 | acpi_owner_id owner_id); | ||
138 | |||
139 | void acpi_dm_dump_tree(union acpi_parse_object *origin); | ||
140 | |||
141 | void acpi_dm_find_orphan_methods(union acpi_parse_object *origin); | ||
142 | |||
143 | void | ||
144 | acpi_dm_finish_namespace_load(union acpi_parse_object *parse_tree_root, | ||
145 | struct acpi_namespace_node *namespace_root, | ||
146 | acpi_owner_id owner_id); | ||
147 | |||
148 | void | ||
149 | acpi_dm_convert_resource_indexes(union acpi_parse_object *parse_tree_root, | ||
150 | struct acpi_namespace_node *namespace_root); | ||
151 | |||
152 | /* | ||
153 | * adfile | ||
154 | */ | ||
155 | acpi_status ad_initialize(void); | ||
156 | |||
157 | char *fl_generate_filename(char *input_filename, char *suffix); | ||
158 | |||
159 | acpi_status | ||
160 | fl_split_input_pathname(char *input_path, | ||
161 | char **out_directory_path, char **out_filename); | ||
162 | |||
163 | char *ad_generate_filename(char *prefix, char *table_id); | ||
164 | |||
165 | void | ||
166 | ad_write_table(struct acpi_table_header *table, | ||
167 | u32 length, char *table_name, char *oem_table_id); | ||
168 | #endif | ||
169 | |||
170 | #endif /* _ACAPPS */ | ||
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 68ec61fff188..7a7811a9fc26 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -104,9 +104,10 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info); | |||
104 | */ | 104 | */ |
105 | acpi_status | 105 | acpi_status |
106 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | 106 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, |
107 | struct acpi_generic_address *gpe_block_address, | 107 | u64 address, |
108 | u8 space_id, | ||
108 | u32 register_count, | 109 | u32 register_count, |
109 | u8 gpe_block_base_number, | 110 | u16 gpe_block_base_number, |
110 | u32 interrupt_number, | 111 | u32 interrupt_number, |
111 | struct acpi_gpe_block_info **return_gpe_block); | 112 | struct acpi_gpe_block_info **return_gpe_block); |
112 | 113 | ||
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 49bbc71fad54..115eedcade1e 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -44,144 +44,14 @@ | |||
44 | #ifndef __ACGLOBAL_H__ | 44 | #ifndef __ACGLOBAL_H__ |
45 | #define __ACGLOBAL_H__ | 45 | #define __ACGLOBAL_H__ |
46 | 46 | ||
47 | /* | ||
48 | * Ensure that the globals are actually defined and initialized only once. | ||
49 | * | ||
50 | * The use of these macros allows a single list of globals (here) in order | ||
51 | * to simplify maintenance of the code. | ||
52 | */ | ||
53 | #ifdef DEFINE_ACPI_GLOBALS | ||
54 | #define ACPI_GLOBAL(type,name) \ | ||
55 | extern type name; \ | ||
56 | type name | ||
57 | |||
58 | #define ACPI_INIT_GLOBAL(type,name,value) \ | ||
59 | type name=value | ||
60 | |||
61 | #else | ||
62 | #define ACPI_GLOBAL(type,name) \ | ||
63 | extern type name | ||
64 | |||
65 | #define ACPI_INIT_GLOBAL(type,name,value) \ | ||
66 | extern type name | ||
67 | #endif | ||
68 | |||
69 | #ifdef DEFINE_ACPI_GLOBALS | ||
70 | |||
71 | /* Public globals, available from outside ACPICA subsystem */ | ||
72 | |||
73 | /***************************************************************************** | 47 | /***************************************************************************** |
74 | * | 48 | * |
75 | * Runtime configuration (static defaults that can be overriden at runtime) | 49 | * Globals related to the ACPI tables |
76 | * | 50 | * |
77 | ****************************************************************************/ | 51 | ****************************************************************************/ |
78 | 52 | ||
79 | /* | 53 | /* Master list of all ACPI tables that were found in the RSDT/XSDT */ |
80 | * Enable "slack" in the AML interpreter? Default is FALSE, and the | ||
81 | * interpreter strictly follows the ACPI specification. Setting to TRUE | ||
82 | * allows the interpreter to ignore certain errors and/or bad AML constructs. | ||
83 | * | ||
84 | * Currently, these features are enabled by this flag: | ||
85 | * | ||
86 | * 1) Allow "implicit return" of last value in a control method | ||
87 | * 2) Allow access beyond the end of an operation region | ||
88 | * 3) Allow access to uninitialized locals/args (auto-init to integer 0) | ||
89 | * 4) Allow ANY object type to be a source operand for the Store() operator | ||
90 | * 5) Allow unresolved references (invalid target name) in package objects | ||
91 | * 6) Enable warning messages for behavior that is not ACPI spec compliant | ||
92 | */ | ||
93 | ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE); | ||
94 | 54 | ||
95 | /* | ||
96 | * Automatically serialize all methods that create named objects? Default | ||
97 | * is TRUE, meaning that all non_serialized methods are scanned once at | ||
98 | * table load time to determine those that create named objects. Methods | ||
99 | * that create named objects are marked Serialized in order to prevent | ||
100 | * possible run-time problems if they are entered by more than one thread. | ||
101 | */ | ||
102 | ACPI_INIT_GLOBAL(u8, acpi_gbl_auto_serialize_methods, TRUE); | ||
103 | |||
104 | /* | ||
105 | * Create the predefined _OSI method in the namespace? Default is TRUE | ||
106 | * because ACPI CA is fully compatible with other ACPI implementations. | ||
107 | * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior. | ||
108 | */ | ||
109 | ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE); | ||
110 | |||
111 | /* | ||
112 | * Optionally use default values for the ACPI register widths. Set this to | ||
113 | * TRUE to use the defaults, if an FADT contains incorrect widths/lengths. | ||
114 | */ | ||
115 | ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE); | ||
116 | |||
117 | /* | ||
118 | * Optionally enable output from the AML Debug Object. | ||
119 | */ | ||
120 | ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE); | ||
121 | |||
122 | /* | ||
123 | * Optionally copy the entire DSDT to local memory (instead of simply | ||
124 | * mapping it.) There are some BIOSs that corrupt or replace the original | ||
125 | * DSDT, creating the need for this option. Default is FALSE, do not copy | ||
126 | * the DSDT. | ||
127 | */ | ||
128 | ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE); | ||
129 | |||
130 | /* | ||
131 | * Optionally ignore an XSDT if present and use the RSDT instead. | ||
132 | * Although the ACPI specification requires that an XSDT be used instead | ||
133 | * of the RSDT, the XSDT has been found to be corrupt or ill-formed on | ||
134 | * some machines. Default behavior is to use the XSDT if present. | ||
135 | */ | ||
136 | ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); | ||
137 | |||
138 | /* | ||
139 | * Optionally use 32-bit FADT addresses if and when there is a conflict | ||
140 | * (address mismatch) between the 32-bit and 64-bit versions of the | ||
141 | * address. Although ACPICA adheres to the ACPI specification which | ||
142 | * requires the use of the corresponding 64-bit address if it is non-zero, | ||
143 | * some machines have been found to have a corrupted non-zero 64-bit | ||
144 | * address. Default is FALSE, do not favor the 32-bit addresses. | ||
145 | */ | ||
146 | ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); | ||
147 | |||
148 | /* | ||
149 | * Optionally truncate I/O addresses to 16 bits. Provides compatibility | ||
150 | * with other ACPI implementations. NOTE: During ACPICA initialization, | ||
151 | * this value is set to TRUE if any Windows OSI strings have been | ||
152 | * requested by the BIOS. | ||
153 | */ | ||
154 | ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE); | ||
155 | |||
156 | /* | ||
157 | * Disable runtime checking and repair of values returned by control methods. | ||
158 | * Use only if the repair is causing a problem on a particular machine. | ||
159 | */ | ||
160 | ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE); | ||
161 | |||
162 | /* | ||
163 | * Optionally do not load any SSDTs from the RSDT/XSDT during initialization. | ||
164 | * This can be useful for debugging ACPI problems on some machines. | ||
165 | */ | ||
166 | ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_load, FALSE); | ||
167 | |||
168 | /* | ||
169 | * We keep track of the latest version of Windows that has been requested by | ||
170 | * the BIOS. | ||
171 | */ | ||
172 | ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); | ||
173 | |||
174 | #endif /* DEFINE_ACPI_GLOBALS */ | ||
175 | |||
176 | /***************************************************************************** | ||
177 | * | ||
178 | * ACPI Table globals | ||
179 | * | ||
180 | ****************************************************************************/ | ||
181 | |||
182 | /* | ||
183 | * Master list of all ACPI tables that were found in the RSDT/XSDT. | ||
184 | */ | ||
185 | ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list); | 55 | ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list); |
186 | 56 | ||
187 | /* DSDT information. Used to check for DSDT corruption */ | 57 | /* DSDT information. Used to check for DSDT corruption */ |
@@ -279,7 +149,6 @@ ACPI_GLOBAL(acpi_exception_handler, acpi_gbl_exception_handler); | |||
279 | ACPI_GLOBAL(acpi_init_handler, acpi_gbl_init_handler); | 149 | ACPI_GLOBAL(acpi_init_handler, acpi_gbl_init_handler); |
280 | ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler); | 150 | ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler); |
281 | ACPI_GLOBAL(void *, acpi_gbl_table_handler_context); | 151 | ACPI_GLOBAL(void *, acpi_gbl_table_handler_context); |
282 | ACPI_GLOBAL(struct acpi_walk_state *, acpi_gbl_breakpoint_walk); | ||
283 | ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler); | 152 | ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler); |
284 | ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list); | 153 | ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list); |
285 | 154 | ||
@@ -296,7 +165,6 @@ ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed); | |||
296 | /* Misc */ | 165 | /* Misc */ |
297 | 166 | ||
298 | ACPI_GLOBAL(u32, acpi_gbl_original_mode); | 167 | ACPI_GLOBAL(u32, acpi_gbl_original_mode); |
299 | ACPI_GLOBAL(u32, acpi_gbl_rsdp_original_location); | ||
300 | ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count); | 168 | ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count); |
301 | ACPI_GLOBAL(u32, acpi_gbl_ps_find_count); | 169 | ACPI_GLOBAL(u32, acpi_gbl_ps_find_count); |
302 | ACPI_GLOBAL(u16, acpi_gbl_pm1_enable_register_save); | 170 | ACPI_GLOBAL(u16, acpi_gbl_pm1_enable_register_save); |
@@ -483,11 +351,6 @@ ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc); | |||
483 | ACPI_GLOBAL(u32, acpi_gbl_num_nodes); | 351 | ACPI_GLOBAL(u32, acpi_gbl_num_nodes); |
484 | ACPI_GLOBAL(u32, acpi_gbl_num_objects); | 352 | ACPI_GLOBAL(u32, acpi_gbl_num_objects); |
485 | 353 | ||
486 | ACPI_GLOBAL(u32, acpi_gbl_size_of_parse_tree); | ||
487 | ACPI_GLOBAL(u32, acpi_gbl_size_of_method_trees); | ||
488 | ACPI_GLOBAL(u32, acpi_gbl_size_of_node_entries); | ||
489 | ACPI_GLOBAL(u32, acpi_gbl_size_of_acpi_objects); | ||
490 | |||
491 | #endif /* ACPI_DEBUGGER */ | 354 | #endif /* ACPI_DEBUGGER */ |
492 | 355 | ||
493 | /***************************************************************************** | 356 | /***************************************************************************** |
@@ -509,5 +372,6 @@ ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL); | |||
509 | ****************************************************************************/ | 372 | ****************************************************************************/ |
510 | 373 | ||
511 | extern const struct ah_predefined_name asl_predefined_info[]; | 374 | extern const struct ah_predefined_name asl_predefined_info[]; |
375 | extern const struct ah_device_id asl_device_ids[]; | ||
512 | 376 | ||
513 | #endif /* __ACGLOBAL_H__ */ | 377 | #endif /* __ACGLOBAL_H__ */ |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 52a21dafb540..91f801a2e689 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -450,9 +450,9 @@ struct acpi_gpe_event_info { | |||
450 | struct acpi_gpe_register_info { | 450 | struct acpi_gpe_register_info { |
451 | struct acpi_generic_address status_address; /* Address of status reg */ | 451 | struct acpi_generic_address status_address; /* Address of status reg */ |
452 | struct acpi_generic_address enable_address; /* Address of enable reg */ | 452 | struct acpi_generic_address enable_address; /* Address of enable reg */ |
453 | u16 base_gpe_number; /* Base GPE number for this register */ | ||
453 | u8 enable_for_wake; /* GPEs to keep enabled when sleeping */ | 454 | u8 enable_for_wake; /* GPEs to keep enabled when sleeping */ |
454 | u8 enable_for_run; /* GPEs to keep enabled when running */ | 455 | u8 enable_for_run; /* GPEs to keep enabled when running */ |
455 | u8 base_gpe_number; /* Base GPE number for this register */ | ||
456 | }; | 456 | }; |
457 | 457 | ||
458 | /* | 458 | /* |
@@ -466,11 +466,12 @@ struct acpi_gpe_block_info { | |||
466 | struct acpi_gpe_xrupt_info *xrupt_block; /* Backpointer to interrupt block */ | 466 | struct acpi_gpe_xrupt_info *xrupt_block; /* Backpointer to interrupt block */ |
467 | struct acpi_gpe_register_info *register_info; /* One per GPE register pair */ | 467 | struct acpi_gpe_register_info *register_info; /* One per GPE register pair */ |
468 | struct acpi_gpe_event_info *event_info; /* One for each GPE */ | 468 | struct acpi_gpe_event_info *event_info; /* One for each GPE */ |
469 | struct acpi_generic_address block_address; /* Base address of the block */ | 469 | u64 address; /* Base address of the block */ |
470 | u32 register_count; /* Number of register pairs in block */ | 470 | u32 register_count; /* Number of register pairs in block */ |
471 | u16 gpe_count; /* Number of individual GPEs in block */ | 471 | u16 gpe_count; /* Number of individual GPEs in block */ |
472 | u8 block_base_number; /* Base GPE number for this block */ | 472 | u16 block_base_number; /* Base GPE number for this block */ |
473 | u8 initialized; /* TRUE if this block is initialized */ | 473 | u8 space_id; |
474 | u8 initialized; /* TRUE if this block is initialized */ | ||
474 | }; | 475 | }; |
475 | 476 | ||
476 | /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ | 477 | /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ |
@@ -733,7 +734,8 @@ union acpi_parse_value { | |||
733 | #define ACPI_DASM_MATCHOP 0x06 /* Parent opcode is a Match() operator */ | 734 | #define ACPI_DASM_MATCHOP 0x06 /* Parent opcode is a Match() operator */ |
734 | #define ACPI_DASM_LNOT_PREFIX 0x07 /* Start of a Lnot_equal (etc.) pair of opcodes */ | 735 | #define ACPI_DASM_LNOT_PREFIX 0x07 /* Start of a Lnot_equal (etc.) pair of opcodes */ |
735 | #define ACPI_DASM_LNOT_SUFFIX 0x08 /* End of a Lnot_equal (etc.) pair of opcodes */ | 736 | #define ACPI_DASM_LNOT_SUFFIX 0x08 /* End of a Lnot_equal (etc.) pair of opcodes */ |
736 | #define ACPI_DASM_IGNORE 0x09 /* Not used at this time */ | 737 | #define ACPI_DASM_HID_STRING 0x09 /* String is a _HID or _CID */ |
738 | #define ACPI_DASM_IGNORE 0x0A /* Not used at this time */ | ||
737 | 739 | ||
738 | /* | 740 | /* |
739 | * Generic operation (for example: If, While, Store) | 741 | * Generic operation (for example: If, While, Store) |
@@ -1147,4 +1149,9 @@ struct ah_predefined_name { | |||
1147 | #endif | 1149 | #endif |
1148 | }; | 1150 | }; |
1149 | 1151 | ||
1152 | struct ah_device_id { | ||
1153 | char *name; | ||
1154 | char *description; | ||
1155 | }; | ||
1156 | |||
1150 | #endif /* __ACLOCAL_H__ */ | 1157 | #endif /* __ACLOCAL_H__ */ |
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index a48d713e9599..bd08817cafd8 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h | |||
@@ -586,6 +586,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { | |||
586 | {{"_LID", METHOD_0ARGS, | 586 | {{"_LID", METHOD_0ARGS, |
587 | METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, | 587 | METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, |
588 | 588 | ||
589 | {{"_LPD", METHOD_0ARGS, | ||
590 | METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (2 Int) */ | ||
591 | PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0), | ||
592 | |||
589 | {{"_MAT", METHOD_0ARGS, | 593 | {{"_MAT", METHOD_0ARGS, |
590 | METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, | 594 | METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, |
591 | 595 | ||
@@ -698,12 +702,6 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { | |||
698 | METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */ | 702 | METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */ |
699 | PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0), | 703 | PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0), |
700 | 704 | ||
701 | {{"_PRP", METHOD_0ARGS, | ||
702 | METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Str, 1 Int/Str/Pkg */ | ||
703 | PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, | ||
704 | ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | | ||
705 | ACPI_RTYPE_PACKAGE | ACPI_RTYPE_REFERENCE, 1, 0), | ||
706 | |||
707 | {{"_PRS", METHOD_0ARGS, | 705 | {{"_PRS", METHOD_0ARGS, |
708 | METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, | 706 | METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, |
709 | 707 | ||
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 5fa4b2027697..f14882788eee 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h | |||
@@ -54,6 +54,31 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); | |||
54 | u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length); | 54 | u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length); |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * tbdata - table data structure management | ||
58 | */ | ||
59 | acpi_status acpi_tb_get_next_root_index(u32 *table_index); | ||
60 | |||
61 | void | ||
62 | acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc, | ||
63 | acpi_physical_address address, | ||
64 | u8 flags, struct acpi_table_header *table); | ||
65 | |||
66 | acpi_status | ||
67 | acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc, | ||
68 | acpi_physical_address address, u8 flags); | ||
69 | |||
70 | void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc); | ||
71 | |||
72 | acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc); | ||
73 | |||
74 | acpi_status | ||
75 | acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature); | ||
76 | |||
77 | u8 acpi_tb_is_table_loaded(u32 table_index); | ||
78 | |||
79 | void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); | ||
80 | |||
81 | /* | ||
57 | * tbfadt - FADT parse/convert/validate | 82 | * tbfadt - FADT parse/convert/validate |
58 | */ | 83 | */ |
59 | void acpi_tb_parse_fadt(u32 table_index); | 84 | void acpi_tb_parse_fadt(u32 table_index); |
@@ -72,22 +97,32 @@ acpi_tb_find_table(char *signature, | |||
72 | */ | 97 | */ |
73 | acpi_status acpi_tb_resize_root_table_list(void); | 98 | acpi_status acpi_tb_resize_root_table_list(void); |
74 | 99 | ||
75 | acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc); | 100 | acpi_status acpi_tb_validate_table(struct acpi_table_desc *table_desc); |
101 | |||
102 | void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc); | ||
103 | |||
104 | void acpi_tb_override_table(struct acpi_table_desc *old_table_desc); | ||
76 | 105 | ||
77 | struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header | 106 | acpi_status |
78 | *table_header, | 107 | acpi_tb_acquire_table(struct acpi_table_desc *table_desc, |
79 | struct acpi_table_desc | 108 | struct acpi_table_header **table_ptr, |
80 | *table_desc); | 109 | u32 *table_length, u8 *table_flags); |
110 | |||
111 | void | ||
112 | acpi_tb_release_table(struct acpi_table_header *table, | ||
113 | u32 table_length, u8 table_flags); | ||
81 | 114 | ||
82 | acpi_status | 115 | acpi_status |
83 | acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index); | 116 | acpi_tb_install_standard_table(acpi_physical_address address, |
117 | u8 flags, | ||
118 | u8 reload, u8 override, u32 *table_index); | ||
84 | 119 | ||
85 | acpi_status | 120 | acpi_status |
86 | acpi_tb_store_table(acpi_physical_address address, | 121 | acpi_tb_store_table(acpi_physical_address address, |
87 | struct acpi_table_header *table, | 122 | struct acpi_table_header *table, |
88 | u32 length, u8 flags, u32 *table_index); | 123 | u32 length, u8 flags, u32 *table_index); |
89 | 124 | ||
90 | void acpi_tb_delete_table(struct acpi_table_desc *table_desc); | 125 | void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc); |
91 | 126 | ||
92 | void acpi_tb_terminate(void); | 127 | void acpi_tb_terminate(void); |
93 | 128 | ||
@@ -99,10 +134,6 @@ acpi_status acpi_tb_release_owner_id(u32 table_index); | |||
99 | 134 | ||
100 | acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); | 135 | acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); |
101 | 136 | ||
102 | u8 acpi_tb_is_table_loaded(u32 table_index); | ||
103 | |||
104 | void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); | ||
105 | |||
106 | /* | 137 | /* |
107 | * tbutils - table manager utilities | 138 | * tbutils - table manager utilities |
108 | */ | 139 | */ |
@@ -124,8 +155,13 @@ void acpi_tb_check_dsdt_header(void); | |||
124 | struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index); | 155 | struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index); |
125 | 156 | ||
126 | void | 157 | void |
127 | acpi_tb_install_table(acpi_physical_address address, | 158 | acpi_tb_install_table_with_override(u32 table_index, |
128 | char *signature, u32 table_index); | 159 | struct acpi_table_desc *new_table_desc, |
160 | u8 override); | ||
161 | |||
162 | acpi_status | ||
163 | acpi_tb_install_fixed_table(acpi_physical_address address, | ||
164 | char *signature, u32 table_index); | ||
129 | 165 | ||
130 | acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address); | 166 | acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address); |
131 | 167 | ||
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index ceeec0b7ccb1..1e256c5bda20 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h | |||
@@ -176,8 +176,7 @@ acpi_status acpi_ut_init_globals(void); | |||
176 | 176 | ||
177 | char *acpi_ut_get_mutex_name(u32 mutex_id); | 177 | char *acpi_ut_get_mutex_name(u32 mutex_id); |
178 | 178 | ||
179 | const char *acpi_ut_get_notify_name(u32 notify_value); | 179 | const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type); |
180 | |||
181 | #endif | 180 | #endif |
182 | 181 | ||
183 | char *acpi_ut_get_type_name(acpi_object_type type); | 182 | char *acpi_ut_get_type_name(acpi_object_type type); |
@@ -737,4 +736,11 @@ acpi_ut_method_error(const char *module_name, | |||
737 | struct acpi_namespace_node *node, | 736 | struct acpi_namespace_node *node, |
738 | const char *path, acpi_status lookup_status); | 737 | const char *path, acpi_status lookup_status); |
739 | 738 | ||
739 | /* | ||
740 | * Utility functions for ACPI names and IDs | ||
741 | */ | ||
742 | const struct ah_predefined_name *acpi_ah_match_predefined_name(char *nameseg); | ||
743 | |||
744 | const struct ah_device_id *acpi_ah_match_hardware_id(char *hid); | ||
745 | |||
740 | #endif /* _ACUTILS_H */ | 746 | #endif /* _ACUTILS_H */ |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 955f83da68a5..48f70013b488 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -383,7 +383,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
383 | if (!(gpe_register_info->enable_for_run | | 383 | if (!(gpe_register_info->enable_for_run | |
384 | gpe_register_info->enable_for_wake)) { | 384 | gpe_register_info->enable_for_wake)) { |
385 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, | 385 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, |
386 | "Ignore disabled registers for GPE%02X-GPE%02X: " | 386 | "Ignore disabled registers for GPE %02X-%02X: " |
387 | "RunEnable=%02X, WakeEnable=%02X\n", | 387 | "RunEnable=%02X, WakeEnable=%02X\n", |
388 | gpe_register_info-> | 388 | gpe_register_info-> |
389 | base_gpe_number, | 389 | base_gpe_number, |
@@ -416,7 +416,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
416 | } | 416 | } |
417 | 417 | ||
418 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, | 418 | ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, |
419 | "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, " | 419 | "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, " |
420 | "RunEnable=%02X, WakeEnable=%02X\n", | 420 | "RunEnable=%02X, WakeEnable=%02X\n", |
421 | gpe_register_info->base_gpe_number, | 421 | gpe_register_info->base_gpe_number, |
422 | gpe_register_info->base_gpe_number + | 422 | gpe_register_info->base_gpe_number + |
@@ -706,7 +706,8 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, | |||
706 | status = acpi_hw_clear_gpe(gpe_event_info); | 706 | status = acpi_hw_clear_gpe(gpe_event_info); |
707 | if (ACPI_FAILURE(status)) { | 707 | if (ACPI_FAILURE(status)) { |
708 | ACPI_EXCEPTION((AE_INFO, status, | 708 | ACPI_EXCEPTION((AE_INFO, status, |
709 | "Unable to clear GPE%02X", gpe_number)); | 709 | "Unable to clear GPE %02X", |
710 | gpe_number)); | ||
710 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | 711 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); |
711 | } | 712 | } |
712 | } | 713 | } |
@@ -723,7 +724,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, | |||
723 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | 724 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); |
724 | if (ACPI_FAILURE(status)) { | 725 | if (ACPI_FAILURE(status)) { |
725 | ACPI_EXCEPTION((AE_INFO, status, | 726 | ACPI_EXCEPTION((AE_INFO, status, |
726 | "Unable to disable GPE%02X", gpe_number)); | 727 | "Unable to disable GPE %02X", gpe_number)); |
727 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); | 728 | return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); |
728 | } | 729 | } |
729 | 730 | ||
@@ -764,7 +765,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, | |||
764 | gpe_event_info); | 765 | gpe_event_info); |
765 | if (ACPI_FAILURE(status)) { | 766 | if (ACPI_FAILURE(status)) { |
766 | ACPI_EXCEPTION((AE_INFO, status, | 767 | ACPI_EXCEPTION((AE_INFO, status, |
767 | "Unable to queue handler for GPE%02X - event disabled", | 768 | "Unable to queue handler for GPE %02X - event disabled", |
768 | gpe_number)); | 769 | gpe_number)); |
769 | } | 770 | } |
770 | break; | 771 | break; |
@@ -776,7 +777,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, | |||
776 | * a GPE to be enabled if it has no handler or method. | 777 | * a GPE to be enabled if it has no handler or method. |
777 | */ | 778 | */ |
778 | ACPI_ERROR((AE_INFO, | 779 | ACPI_ERROR((AE_INFO, |
779 | "No handler or method for GPE%02X, disabling event", | 780 | "No handler or method for GPE %02X, disabling event", |
780 | gpe_number)); | 781 | gpe_number)); |
781 | 782 | ||
782 | break; | 783 | break; |
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index caaed3c673fd..d86699eea33c 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -252,21 +252,17 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) | |||
252 | 252 | ||
253 | /* Init the register_info for this GPE register (8 GPEs) */ | 253 | /* Init the register_info for this GPE register (8 GPEs) */ |
254 | 254 | ||
255 | this_register->base_gpe_number = | 255 | this_register->base_gpe_number = (u16) |
256 | (u8) (gpe_block->block_base_number + | 256 | (gpe_block->block_base_number + |
257 | (i * ACPI_GPE_REGISTER_WIDTH)); | 257 | (i * ACPI_GPE_REGISTER_WIDTH)); |
258 | 258 | ||
259 | this_register->status_address.address = | 259 | this_register->status_address.address = gpe_block->address + i; |
260 | gpe_block->block_address.address + i; | ||
261 | 260 | ||
262 | this_register->enable_address.address = | 261 | this_register->enable_address.address = |
263 | gpe_block->block_address.address + i + | 262 | gpe_block->address + i + gpe_block->register_count; |
264 | gpe_block->register_count; | ||
265 | 263 | ||
266 | this_register->status_address.space_id = | 264 | this_register->status_address.space_id = gpe_block->space_id; |
267 | gpe_block->block_address.space_id; | 265 | this_register->enable_address.space_id = gpe_block->space_id; |
268 | this_register->enable_address.space_id = | ||
269 | gpe_block->block_address.space_id; | ||
270 | this_register->status_address.bit_width = | 266 | this_register->status_address.bit_width = |
271 | ACPI_GPE_REGISTER_WIDTH; | 267 | ACPI_GPE_REGISTER_WIDTH; |
272 | this_register->enable_address.bit_width = | 268 | this_register->enable_address.bit_width = |
@@ -334,9 +330,10 @@ error_exit: | |||
334 | 330 | ||
335 | acpi_status | 331 | acpi_status |
336 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | 332 | acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, |
337 | struct acpi_generic_address *gpe_block_address, | 333 | u64 address, |
334 | u8 space_id, | ||
338 | u32 register_count, | 335 | u32 register_count, |
339 | u8 gpe_block_base_number, | 336 | u16 gpe_block_base_number, |
340 | u32 interrupt_number, | 337 | u32 interrupt_number, |
341 | struct acpi_gpe_block_info **return_gpe_block) | 338 | struct acpi_gpe_block_info **return_gpe_block) |
342 | { | 339 | { |
@@ -359,15 +356,14 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
359 | 356 | ||
360 | /* Initialize the new GPE block */ | 357 | /* Initialize the new GPE block */ |
361 | 358 | ||
359 | gpe_block->address = address; | ||
360 | gpe_block->space_id = space_id; | ||
362 | gpe_block->node = gpe_device; | 361 | gpe_block->node = gpe_device; |
363 | gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); | 362 | gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); |
364 | gpe_block->initialized = FALSE; | 363 | gpe_block->initialized = FALSE; |
365 | gpe_block->register_count = register_count; | 364 | gpe_block->register_count = register_count; |
366 | gpe_block->block_base_number = gpe_block_base_number; | 365 | gpe_block->block_base_number = gpe_block_base_number; |
367 | 366 | ||
368 | ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, | ||
369 | sizeof(struct acpi_generic_address)); | ||
370 | |||
371 | /* | 367 | /* |
372 | * Create the register_info and event_info sub-structures | 368 | * Create the register_info and event_info sub-structures |
373 | * Note: disables and clears all GPEs in the block | 369 | * Note: disables and clears all GPEs in the block |
@@ -408,12 +404,14 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, | |||
408 | } | 404 | } |
409 | 405 | ||
410 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, | 406 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, |
411 | " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X\n", | 407 | " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n", |
412 | (u32)gpe_block->block_base_number, | 408 | (u32)gpe_block->block_base_number, |
413 | (u32)(gpe_block->block_base_number + | 409 | (u32)(gpe_block->block_base_number + |
414 | (gpe_block->gpe_count - 1)), | 410 | (gpe_block->gpe_count - 1)), |
415 | gpe_device->name.ascii, gpe_block->register_count, | 411 | gpe_device->name.ascii, gpe_block->register_count, |
416 | interrupt_number)); | 412 | interrupt_number, |
413 | interrupt_number == | ||
414 | acpi_gbl_FADT.sci_interrupt ? " (SCI)" : "")); | ||
417 | 415 | ||
418 | /* Update global count of currently available GPEs */ | 416 | /* Update global count of currently available GPEs */ |
419 | 417 | ||
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index ae779c1e871d..49fc7effd961 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c | |||
@@ -131,8 +131,10 @@ acpi_status acpi_ev_gpe_initialize(void) | |||
131 | /* Install GPE Block 0 */ | 131 | /* Install GPE Block 0 */ |
132 | 132 | ||
133 | status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, | 133 | status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, |
134 | &acpi_gbl_FADT.xgpe0_block, | 134 | acpi_gbl_FADT.xgpe0_block. |
135 | register_count0, 0, | 135 | address, |
136 | acpi_gbl_FADT.xgpe0_block. | ||
137 | space_id, register_count0, 0, | ||
136 | acpi_gbl_FADT.sci_interrupt, | 138 | acpi_gbl_FADT.sci_interrupt, |
137 | &acpi_gbl_gpe_fadt_blocks[0]); | 139 | &acpi_gbl_gpe_fadt_blocks[0]); |
138 | 140 | ||
@@ -169,8 +171,10 @@ acpi_status acpi_ev_gpe_initialize(void) | |||
169 | 171 | ||
170 | status = | 172 | status = |
171 | acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, | 173 | acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, |
172 | &acpi_gbl_FADT.xgpe1_block, | 174 | acpi_gbl_FADT.xgpe1_block. |
173 | register_count1, | 175 | address, |
176 | acpi_gbl_FADT.xgpe1_block. | ||
177 | space_id, register_count1, | ||
174 | acpi_gbl_FADT.gpe1_base, | 178 | acpi_gbl_FADT.gpe1_base, |
175 | acpi_gbl_FADT. | 179 | acpi_gbl_FADT. |
176 | sci_interrupt, | 180 | sci_interrupt, |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index 5d594eb2e5ec..24ea3424981b 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -167,7 +167,8 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node, | |||
167 | "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n", | 167 | "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n", |
168 | acpi_ut_get_node_name(node), | 168 | acpi_ut_get_node_name(node), |
169 | acpi_ut_get_type_name(node->type), notify_value, | 169 | acpi_ut_get_type_name(node->type), notify_value, |
170 | acpi_ut_get_notify_name(notify_value), node)); | 170 | acpi_ut_get_notify_name(notify_value, ACPI_TYPE_ANY), |
171 | node)); | ||
171 | 172 | ||
172 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, | 173 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, |
173 | info); | 174 | info); |
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c index 4d8a709c1fc4..29630e303829 100644 --- a/drivers/acpi/acpica/evsci.c +++ b/drivers/acpi/acpica/evsci.c | |||
@@ -117,7 +117,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context) | |||
117 | ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler); | 117 | ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler); |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * We are guaranteed by the ACPI CA initialization/shutdown code that | 120 | * We are guaranteed by the ACPICA initialization/shutdown code that |
121 | * if this interrupt handler is installed, ACPI is enabled. | 121 | * if this interrupt handler is installed, ACPI is enabled. |
122 | */ | 122 | */ |
123 | 123 | ||
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index a734b27da061..11e5803b8b41 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -239,7 +239,7 @@ acpi_remove_notify_handler(acpi_handle device, | |||
239 | union acpi_operand_object *obj_desc; | 239 | union acpi_operand_object *obj_desc; |
240 | union acpi_operand_object *handler_obj; | 240 | union acpi_operand_object *handler_obj; |
241 | union acpi_operand_object *previous_handler_obj; | 241 | union acpi_operand_object *previous_handler_obj; |
242 | acpi_status status; | 242 | acpi_status status = AE_OK; |
243 | u32 i; | 243 | u32 i; |
244 | 244 | ||
245 | ACPI_FUNCTION_TRACE(acpi_remove_notify_handler); | 245 | ACPI_FUNCTION_TRACE(acpi_remove_notify_handler); |
@@ -251,20 +251,17 @@ acpi_remove_notify_handler(acpi_handle device, | |||
251 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 251 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
252 | } | 252 | } |
253 | 253 | ||
254 | /* Make sure all deferred notify tasks are completed */ | ||
255 | |||
256 | acpi_os_wait_events_complete(); | ||
257 | |||
258 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
259 | if (ACPI_FAILURE(status)) { | ||
260 | return_ACPI_STATUS(status); | ||
261 | } | ||
262 | |||
263 | /* Root Object. Global handlers are removed here */ | 254 | /* Root Object. Global handlers are removed here */ |
264 | 255 | ||
265 | if (device == ACPI_ROOT_OBJECT) { | 256 | if (device == ACPI_ROOT_OBJECT) { |
266 | for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) { | 257 | for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) { |
267 | if (handler_type & (i + 1)) { | 258 | if (handler_type & (i + 1)) { |
259 | status = | ||
260 | acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
261 | if (ACPI_FAILURE(status)) { | ||
262 | return_ACPI_STATUS(status); | ||
263 | } | ||
264 | |||
268 | if (!acpi_gbl_global_notify[i].handler || | 265 | if (!acpi_gbl_global_notify[i].handler || |
269 | (acpi_gbl_global_notify[i].handler != | 266 | (acpi_gbl_global_notify[i].handler != |
270 | handler)) { | 267 | handler)) { |
@@ -277,31 +274,40 @@ acpi_remove_notify_handler(acpi_handle device, | |||
277 | 274 | ||
278 | acpi_gbl_global_notify[i].handler = NULL; | 275 | acpi_gbl_global_notify[i].handler = NULL; |
279 | acpi_gbl_global_notify[i].context = NULL; | 276 | acpi_gbl_global_notify[i].context = NULL; |
277 | |||
278 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
279 | |||
280 | /* Make sure all deferred notify tasks are completed */ | ||
281 | |||
282 | acpi_os_wait_events_complete(); | ||
280 | } | 283 | } |
281 | } | 284 | } |
282 | 285 | ||
283 | goto unlock_and_exit; | 286 | return_ACPI_STATUS(AE_OK); |
284 | } | 287 | } |
285 | 288 | ||
286 | /* All other objects: Are Notifies allowed on this object? */ | 289 | /* All other objects: Are Notifies allowed on this object? */ |
287 | 290 | ||
288 | if (!acpi_ev_is_notify_object(node)) { | 291 | if (!acpi_ev_is_notify_object(node)) { |
289 | status = AE_TYPE; | 292 | return_ACPI_STATUS(AE_TYPE); |
290 | goto unlock_and_exit; | ||
291 | } | 293 | } |
292 | 294 | ||
293 | /* Must have an existing internal object */ | 295 | /* Must have an existing internal object */ |
294 | 296 | ||
295 | obj_desc = acpi_ns_get_attached_object(node); | 297 | obj_desc = acpi_ns_get_attached_object(node); |
296 | if (!obj_desc) { | 298 | if (!obj_desc) { |
297 | status = AE_NOT_EXIST; | 299 | return_ACPI_STATUS(AE_NOT_EXIST); |
298 | goto unlock_and_exit; | ||
299 | } | 300 | } |
300 | 301 | ||
301 | /* Internal object exists. Find the handler and remove it */ | 302 | /* Internal object exists. Find the handler and remove it */ |
302 | 303 | ||
303 | for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) { | 304 | for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) { |
304 | if (handler_type & (i + 1)) { | 305 | if (handler_type & (i + 1)) { |
306 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
307 | if (ACPI_FAILURE(status)) { | ||
308 | return_ACPI_STATUS(status); | ||
309 | } | ||
310 | |||
305 | handler_obj = obj_desc->common_notify.notify_list[i]; | 311 | handler_obj = obj_desc->common_notify.notify_list[i]; |
306 | previous_handler_obj = NULL; | 312 | previous_handler_obj = NULL; |
307 | 313 | ||
@@ -329,10 +335,17 @@ acpi_remove_notify_handler(acpi_handle device, | |||
329 | handler_obj->notify.next[i]; | 335 | handler_obj->notify.next[i]; |
330 | } | 336 | } |
331 | 337 | ||
338 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
339 | |||
340 | /* Make sure all deferred notify tasks are completed */ | ||
341 | |||
342 | acpi_os_wait_events_complete(); | ||
332 | acpi_ut_remove_reference(handler_obj); | 343 | acpi_ut_remove_reference(handler_obj); |
333 | } | 344 | } |
334 | } | 345 | } |
335 | 346 | ||
347 | return_ACPI_STATUS(status); | ||
348 | |||
336 | unlock_and_exit: | 349 | unlock_and_exit: |
337 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | 350 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); |
338 | return_ACPI_STATUS(status); | 351 | return_ACPI_STATUS(status); |
@@ -457,6 +470,8 @@ exit: | |||
457 | return_ACPI_STATUS(status); | 470 | return_ACPI_STATUS(status); |
458 | } | 471 | } |
459 | 472 | ||
473 | ACPI_EXPORT_SYMBOL(acpi_install_sci_handler) | ||
474 | |||
460 | /******************************************************************************* | 475 | /******************************************************************************* |
461 | * | 476 | * |
462 | * FUNCTION: acpi_remove_sci_handler | 477 | * FUNCTION: acpi_remove_sci_handler |
@@ -468,7 +483,6 @@ exit: | |||
468 | * DESCRIPTION: Remove a handler for a System Control Interrupt. | 483 | * DESCRIPTION: Remove a handler for a System Control Interrupt. |
469 | * | 484 | * |
470 | ******************************************************************************/ | 485 | ******************************************************************************/ |
471 | |||
472 | acpi_status acpi_remove_sci_handler(acpi_sci_handler address) | 486 | acpi_status acpi_remove_sci_handler(acpi_sci_handler address) |
473 | { | 487 | { |
474 | struct acpi_sci_handler_info *prev_sci_handler; | 488 | struct acpi_sci_handler_info *prev_sci_handler; |
@@ -522,6 +536,8 @@ unlock_and_exit: | |||
522 | return_ACPI_STATUS(status); | 536 | return_ACPI_STATUS(status); |
523 | } | 537 | } |
524 | 538 | ||
539 | ACPI_EXPORT_SYMBOL(acpi_remove_sci_handler) | ||
540 | |||
525 | /******************************************************************************* | 541 | /******************************************************************************* |
526 | * | 542 | * |
527 | * FUNCTION: acpi_install_global_event_handler | 543 | * FUNCTION: acpi_install_global_event_handler |
@@ -537,7 +553,6 @@ unlock_and_exit: | |||
537 | * Can be used to update event counters, etc. | 553 | * Can be used to update event counters, etc. |
538 | * | 554 | * |
539 | ******************************************************************************/ | 555 | ******************************************************************************/ |
540 | |||
541 | acpi_status | 556 | acpi_status |
542 | acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) | 557 | acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) |
543 | { | 558 | { |
@@ -840,10 +855,6 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
840 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 855 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
841 | } | 856 | } |
842 | 857 | ||
843 | /* Make sure all deferred GPE tasks are completed */ | ||
844 | |||
845 | acpi_os_wait_events_complete(); | ||
846 | |||
847 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | 858 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); |
848 | if (ACPI_FAILURE(status)) { | 859 | if (ACPI_FAILURE(status)) { |
849 | return_ACPI_STATUS(status); | 860 | return_ACPI_STATUS(status); |
@@ -895,9 +906,17 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
895 | (void)acpi_ev_add_gpe_reference(gpe_event_info); | 906 | (void)acpi_ev_add_gpe_reference(gpe_event_info); |
896 | } | 907 | } |
897 | 908 | ||
909 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
910 | (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
911 | |||
912 | /* Make sure all deferred GPE tasks are completed */ | ||
913 | |||
914 | acpi_os_wait_events_complete(); | ||
915 | |||
898 | /* Now we can free the handler object */ | 916 | /* Now we can free the handler object */ |
899 | 917 | ||
900 | ACPI_FREE(handler); | 918 | ACPI_FREE(handler); |
919 | return_ACPI_STATUS(status); | ||
901 | 920 | ||
902 | unlock_and_exit: | 921 | unlock_and_exit: |
903 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 922 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 20a1392ffe06..cb534faf5369 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -599,9 +599,10 @@ acpi_install_gpe_block(acpi_handle gpe_device, | |||
599 | * For user-installed GPE Block Devices, the gpe_block_base_number | 599 | * For user-installed GPE Block Devices, the gpe_block_base_number |
600 | * is always zero | 600 | * is always zero |
601 | */ | 601 | */ |
602 | status = | 602 | status = acpi_ev_create_gpe_block(node, gpe_block_address->address, |
603 | acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, | 603 | gpe_block_address->space_id, |
604 | interrupt_number, &gpe_block); | 604 | register_count, 0, interrupt_number, |
605 | &gpe_block); | ||
605 | if (ACPI_FAILURE(status)) { | 606 | if (ACPI_FAILURE(status)) { |
606 | goto unlock_and_exit; | 607 | goto unlock_and_exit; |
607 | } | 608 | } |
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 8ba1464efd11..7d2949420db7 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
@@ -343,16 +343,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
343 | struct acpi_walk_state *walk_state) | 343 | struct acpi_walk_state *walk_state) |
344 | { | 344 | { |
345 | union acpi_operand_object *ddb_handle; | 345 | union acpi_operand_object *ddb_handle; |
346 | struct acpi_table_header *table_header; | ||
346 | struct acpi_table_header *table; | 347 | struct acpi_table_header *table; |
347 | struct acpi_table_desc table_desc; | ||
348 | u32 table_index; | 348 | u32 table_index; |
349 | acpi_status status; | 349 | acpi_status status; |
350 | u32 length; | 350 | u32 length; |
351 | 351 | ||
352 | ACPI_FUNCTION_TRACE(ex_load_op); | 352 | ACPI_FUNCTION_TRACE(ex_load_op); |
353 | 353 | ||
354 | ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc)); | ||
355 | |||
356 | /* Source Object can be either an op_region or a Buffer/Field */ | 354 | /* Source Object can be either an op_region or a Buffer/Field */ |
357 | 355 | ||
358 | switch (obj_desc->common.type) { | 356 | switch (obj_desc->common.type) { |
@@ -380,17 +378,17 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
380 | 378 | ||
381 | /* Get the table header first so we can get the table length */ | 379 | /* Get the table header first so we can get the table length */ |
382 | 380 | ||
383 | table = ACPI_ALLOCATE(sizeof(struct acpi_table_header)); | 381 | table_header = ACPI_ALLOCATE(sizeof(struct acpi_table_header)); |
384 | if (!table) { | 382 | if (!table_header) { |
385 | return_ACPI_STATUS(AE_NO_MEMORY); | 383 | return_ACPI_STATUS(AE_NO_MEMORY); |
386 | } | 384 | } |
387 | 385 | ||
388 | status = | 386 | status = |
389 | acpi_ex_region_read(obj_desc, | 387 | acpi_ex_region_read(obj_desc, |
390 | sizeof(struct acpi_table_header), | 388 | sizeof(struct acpi_table_header), |
391 | ACPI_CAST_PTR(u8, table)); | 389 | ACPI_CAST_PTR(u8, table_header)); |
392 | length = table->length; | 390 | length = table_header->length; |
393 | ACPI_FREE(table); | 391 | ACPI_FREE(table_header); |
394 | 392 | ||
395 | if (ACPI_FAILURE(status)) { | 393 | if (ACPI_FAILURE(status)) { |
396 | return_ACPI_STATUS(status); | 394 | return_ACPI_STATUS(status); |
@@ -420,22 +418,19 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
420 | 418 | ||
421 | /* Allocate a buffer for the table */ | 419 | /* Allocate a buffer for the table */ |
422 | 420 | ||
423 | table_desc.pointer = ACPI_ALLOCATE(length); | 421 | table = ACPI_ALLOCATE(length); |
424 | if (!table_desc.pointer) { | 422 | if (!table) { |
425 | return_ACPI_STATUS(AE_NO_MEMORY); | 423 | return_ACPI_STATUS(AE_NO_MEMORY); |
426 | } | 424 | } |
427 | 425 | ||
428 | /* Read the entire table */ | 426 | /* Read the entire table */ |
429 | 427 | ||
430 | status = acpi_ex_region_read(obj_desc, length, | 428 | status = acpi_ex_region_read(obj_desc, length, |
431 | ACPI_CAST_PTR(u8, | 429 | ACPI_CAST_PTR(u8, table)); |
432 | table_desc.pointer)); | ||
433 | if (ACPI_FAILURE(status)) { | 430 | if (ACPI_FAILURE(status)) { |
434 | ACPI_FREE(table_desc.pointer); | 431 | ACPI_FREE(table); |
435 | return_ACPI_STATUS(status); | 432 | return_ACPI_STATUS(status); |
436 | } | 433 | } |
437 | |||
438 | table_desc.address = obj_desc->region.address; | ||
439 | break; | 434 | break; |
440 | 435 | ||
441 | case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */ | 436 | case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */ |
@@ -452,10 +447,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
452 | 447 | ||
453 | /* Get the actual table length from the table header */ | 448 | /* Get the actual table length from the table header */ |
454 | 449 | ||
455 | table = | 450 | table_header = |
456 | ACPI_CAST_PTR(struct acpi_table_header, | 451 | ACPI_CAST_PTR(struct acpi_table_header, |
457 | obj_desc->buffer.pointer); | 452 | obj_desc->buffer.pointer); |
458 | length = table->length; | 453 | length = table_header->length; |
459 | 454 | ||
460 | /* Table cannot extend beyond the buffer */ | 455 | /* Table cannot extend beyond the buffer */ |
461 | 456 | ||
@@ -470,13 +465,12 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
470 | * Copy the table from the buffer because the buffer could be modified | 465 | * Copy the table from the buffer because the buffer could be modified |
471 | * or even deleted in the future | 466 | * or even deleted in the future |
472 | */ | 467 | */ |
473 | table_desc.pointer = ACPI_ALLOCATE(length); | 468 | table = ACPI_ALLOCATE(length); |
474 | if (!table_desc.pointer) { | 469 | if (!table) { |
475 | return_ACPI_STATUS(AE_NO_MEMORY); | 470 | return_ACPI_STATUS(AE_NO_MEMORY); |
476 | } | 471 | } |
477 | 472 | ||
478 | ACPI_MEMCPY(table_desc.pointer, table, length); | 473 | ACPI_MEMCPY(table, table_header, length); |
479 | table_desc.address = ACPI_TO_INTEGER(table_desc.pointer); | ||
480 | break; | 474 | break; |
481 | 475 | ||
482 | default: | 476 | default: |
@@ -484,27 +478,32 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
484 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); | 478 | return_ACPI_STATUS(AE_AML_OPERAND_TYPE); |
485 | } | 479 | } |
486 | 480 | ||
487 | /* Validate table checksum (will not get validated in tb_add_table) */ | 481 | /* Install the new table into the local data structures */ |
488 | |||
489 | status = acpi_tb_verify_checksum(table_desc.pointer, length); | ||
490 | if (ACPI_FAILURE(status)) { | ||
491 | ACPI_FREE(table_desc.pointer); | ||
492 | return_ACPI_STATUS(status); | ||
493 | } | ||
494 | |||
495 | /* Complete the table descriptor */ | ||
496 | 482 | ||
497 | table_desc.length = length; | 483 | ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:")); |
498 | table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED; | 484 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); |
499 | 485 | ||
500 | /* Install the new table into the local data structures */ | 486 | status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table), |
487 | ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, | ||
488 | TRUE, TRUE, &table_index); | ||
501 | 489 | ||
502 | status = acpi_tb_add_table(&table_desc, &table_index); | 490 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
503 | if (ACPI_FAILURE(status)) { | 491 | if (ACPI_FAILURE(status)) { |
504 | 492 | ||
505 | /* Delete allocated table buffer */ | 493 | /* Delete allocated table buffer */ |
506 | 494 | ||
507 | acpi_tb_delete_table(&table_desc); | 495 | ACPI_FREE(table); |
496 | return_ACPI_STATUS(status); | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * Note: Now table is "INSTALLED", it must be validated before | ||
501 | * loading. | ||
502 | */ | ||
503 | status = | ||
504 | acpi_tb_validate_table(&acpi_gbl_root_table_list. | ||
505 | tables[table_index]); | ||
506 | if (ACPI_FAILURE(status)) { | ||
508 | return_ACPI_STATUS(status); | 507 | return_ACPI_STATUS(status); |
509 | } | 508 | } |
510 | 509 | ||
@@ -536,9 +535,6 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
536 | return_ACPI_STATUS(status); | 535 | return_ACPI_STATUS(status); |
537 | } | 536 | } |
538 | 537 | ||
539 | ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:")); | ||
540 | acpi_tb_print_table_header(0, table_desc.pointer); | ||
541 | |||
542 | /* Remove the reference by added by acpi_ex_store above */ | 538 | /* Remove the reference by added by acpi_ex_store above */ |
543 | 539 | ||
544 | acpi_ut_remove_reference(ddb_handle); | 540 | acpi_ut_remove_reference(ddb_handle); |
@@ -546,8 +542,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
546 | /* Invoke table handler if present */ | 542 | /* Invoke table handler if present */ |
547 | 543 | ||
548 | if (acpi_gbl_table_handler) { | 544 | if (acpi_gbl_table_handler) { |
549 | (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, | 545 | (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, |
550 | table_desc.pointer, | ||
551 | acpi_gbl_table_handler_context); | 546 | acpi_gbl_table_handler_context); |
552 | } | 547 | } |
553 | 548 | ||
@@ -576,6 +571,13 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) | |||
576 | ACPI_FUNCTION_TRACE(ex_unload_table); | 571 | ACPI_FUNCTION_TRACE(ex_unload_table); |
577 | 572 | ||
578 | /* | 573 | /* |
574 | * Temporarily emit a warning so that the ASL for the machine can be | ||
575 | * hopefully obtained. This is to say that the Unload() operator is | ||
576 | * extremely rare if not completely unused. | ||
577 | */ | ||
578 | ACPI_WARNING((AE_INFO, "Received request to unload an ACPI table")); | ||
579 | |||
580 | /* | ||
579 | * Validate the handle | 581 | * Validate the handle |
580 | * Although the handle is partially validated in acpi_ex_reconfiguration() | 582 | * Although the handle is partially validated in acpi_ex_reconfiguration() |
581 | * when it calls acpi_ex_resolve_operands(), the handle is more completely | 583 | * when it calls acpi_ex_resolve_operands(), the handle is more completely |
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 973fdae00f94..925202acc3e4 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c | |||
@@ -134,9 +134,11 @@ static struct acpi_exdump_info acpi_ex_dump_method[9] = { | |||
134 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"} | 134 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"} |
135 | }; | 135 | }; |
136 | 136 | ||
137 | static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { | 137 | static struct acpi_exdump_info acpi_ex_dump_mutex[6] = { |
138 | {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, | 138 | {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, |
139 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, | 139 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, |
140 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.original_sync_level), | ||
141 | "Original Sync Level"}, | ||
140 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, | 142 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, |
141 | {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), | 143 | {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), |
142 | "Acquire Depth"}, | 144 | "Acquire Depth"}, |
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index e701d8c33dbf..6aade8e1d2a1 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c | |||
@@ -140,11 +140,12 @@ acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id, | |||
140 | /* Walk the list, updating the PCI device/function/bus numbers */ | 140 | /* Walk the list, updating the PCI device/function/bus numbers */ |
141 | 141 | ||
142 | status = acpi_hw_process_pci_list(pci_id, list_head); | 142 | status = acpi_hw_process_pci_list(pci_id, list_head); |
143 | } | ||
144 | 143 | ||
145 | /* Always delete the list */ | 144 | /* Delete the list */ |
145 | |||
146 | acpi_hw_delete_pci_list(list_head); | ||
147 | } | ||
146 | 148 | ||
147 | acpi_hw_delete_pci_list(list_head); | ||
148 | return_ACPI_STATUS(status); | 149 | return_ACPI_STATUS(status); |
149 | } | 150 | } |
150 | 151 | ||
@@ -187,6 +188,10 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device, | |||
187 | while (1) { | 188 | while (1) { |
188 | status = acpi_get_parent(current_device, &parent_device); | 189 | status = acpi_get_parent(current_device, &parent_device); |
189 | if (ACPI_FAILURE(status)) { | 190 | if (ACPI_FAILURE(status)) { |
191 | |||
192 | /* Must delete the list before exit */ | ||
193 | |||
194 | acpi_hw_delete_pci_list(*return_list_head); | ||
190 | return (status); | 195 | return (status); |
191 | } | 196 | } |
192 | 197 | ||
@@ -199,6 +204,10 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device, | |||
199 | 204 | ||
200 | list_element = ACPI_ALLOCATE(sizeof(struct acpi_pci_device)); | 205 | list_element = ACPI_ALLOCATE(sizeof(struct acpi_pci_device)); |
201 | if (!list_element) { | 206 | if (!list_element) { |
207 | |||
208 | /* Must delete the list before exit */ | ||
209 | |||
210 | acpi_hw_delete_pci_list(*return_list_head); | ||
202 | return (AE_NO_MEMORY); | 211 | return (AE_NO_MEMORY); |
203 | } | 212 | } |
204 | 213 | ||
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index 75d369050657..049d9c22a0f9 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c | |||
@@ -72,6 +72,8 @@ acpi_buffer_to_resource(u8 *aml_buffer, | |||
72 | void *resource; | 72 | void *resource; |
73 | void *current_resource_ptr; | 73 | void *current_resource_ptr; |
74 | 74 | ||
75 | ACPI_FUNCTION_TRACE(acpi_buffer_to_resource); | ||
76 | |||
75 | /* | 77 | /* |
76 | * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag | 78 | * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag |
77 | * is not required here. | 79 | * is not required here. |
@@ -85,7 +87,7 @@ acpi_buffer_to_resource(u8 *aml_buffer, | |||
85 | status = AE_OK; | 87 | status = AE_OK; |
86 | } | 88 | } |
87 | if (ACPI_FAILURE(status)) { | 89 | if (ACPI_FAILURE(status)) { |
88 | return (status); | 90 | return_ACPI_STATUS(status); |
89 | } | 91 | } |
90 | 92 | ||
91 | /* Allocate a buffer for the converted resource */ | 93 | /* Allocate a buffer for the converted resource */ |
@@ -93,7 +95,7 @@ acpi_buffer_to_resource(u8 *aml_buffer, | |||
93 | resource = ACPI_ALLOCATE_ZEROED(list_size_needed); | 95 | resource = ACPI_ALLOCATE_ZEROED(list_size_needed); |
94 | current_resource_ptr = resource; | 96 | current_resource_ptr = resource; |
95 | if (!resource) { | 97 | if (!resource) { |
96 | return (AE_NO_MEMORY); | 98 | return_ACPI_STATUS(AE_NO_MEMORY); |
97 | } | 99 | } |
98 | 100 | ||
99 | /* Perform the AML-to-Resource conversion */ | 101 | /* Perform the AML-to-Resource conversion */ |
@@ -110,9 +112,11 @@ acpi_buffer_to_resource(u8 *aml_buffer, | |||
110 | *resource_ptr = resource; | 112 | *resource_ptr = resource; |
111 | } | 113 | } |
112 | 114 | ||
113 | return (status); | 115 | return_ACPI_STATUS(status); |
114 | } | 116 | } |
115 | 117 | ||
118 | ACPI_EXPORT_SYMBOL(acpi_buffer_to_resource) | ||
119 | |||
116 | /******************************************************************************* | 120 | /******************************************************************************* |
117 | * | 121 | * |
118 | * FUNCTION: acpi_rs_create_resource_list | 122 | * FUNCTION: acpi_rs_create_resource_list |
@@ -130,10 +134,9 @@ acpi_buffer_to_resource(u8 *aml_buffer, | |||
130 | * of device resources. | 134 | * of device resources. |
131 | * | 135 | * |
132 | ******************************************************************************/ | 136 | ******************************************************************************/ |
133 | |||
134 | acpi_status | 137 | acpi_status |
135 | acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, | 138 | acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, |
136 | struct acpi_buffer * output_buffer) | 139 | struct acpi_buffer *output_buffer) |
137 | { | 140 | { |
138 | 141 | ||
139 | acpi_status status; | 142 | acpi_status status; |
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c new file mode 100644 index 000000000000..f499c10ceb4a --- /dev/null +++ b/drivers/acpi/acpica/tbdata.c | |||
@@ -0,0 +1,760 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Module Name: tbdata - Table manager data structure functions | ||
4 | * | ||
5 | *****************************************************************************/ | ||
6 | |||
7 | /* | ||
8 | * Copyright (C) 2000 - 2014, Intel Corp. | ||
9 | * All rights reserved. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions | ||
13 | * are met: | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions, and the following disclaimer, | ||
16 | * without modification. | ||
17 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer | ||
18 | * substantially similar to the "NO WARRANTY" disclaimer below | ||
19 | * ("Disclaimer") and any redistribution must be conditioned upon | ||
20 | * including a substantially similar Disclaimer requirement for further | ||
21 | * binary redistribution. | ||
22 | * 3. Neither the names of the above-listed copyright holders nor the names | ||
23 | * of any contributors may be used to endorse or promote products derived | ||
24 | * from this software without specific prior written permission. | ||
25 | * | ||
26 | * Alternatively, this software may be distributed under the terms of the | ||
27 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
28 | * Software Foundation. | ||
29 | * | ||
30 | * NO WARRANTY | ||
31 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
32 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
33 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR | ||
34 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
35 | * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
36 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
37 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
38 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | ||
39 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | ||
40 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
41 | * POSSIBILITY OF SUCH DAMAGES. | ||
42 | */ | ||
43 | |||
44 | #include <acpi/acpi.h> | ||
45 | #include "accommon.h" | ||
46 | #include "acnamesp.h" | ||
47 | #include "actables.h" | ||
48 | |||
49 | #define _COMPONENT ACPI_TABLES | ||
50 | ACPI_MODULE_NAME("tbdata") | ||
51 | |||
52 | /******************************************************************************* | ||
53 | * | ||
54 | * FUNCTION: acpi_tb_init_table_descriptor | ||
55 | * | ||
56 | * PARAMETERS: table_desc - Table descriptor | ||
57 | * address - Physical address of the table | ||
58 | * flags - Allocation flags of the table | ||
59 | * table - Pointer to the table | ||
60 | * | ||
61 | * RETURN: None | ||
62 | * | ||
63 | * DESCRIPTION: Initialize a new table descriptor | ||
64 | * | ||
65 | ******************************************************************************/ | ||
66 | void | ||
67 | acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc, | ||
68 | acpi_physical_address address, | ||
69 | u8 flags, struct acpi_table_header *table) | ||
70 | { | ||
71 | |||
72 | /* | ||
73 | * Initialize the table descriptor. Set the pointer to NULL, since the | ||
74 | * table is not fully mapped at this time. | ||
75 | */ | ||
76 | ACPI_MEMSET(table_desc, 0, sizeof(struct acpi_table_desc)); | ||
77 | table_desc->address = address; | ||
78 | table_desc->length = table->length; | ||
79 | table_desc->flags = flags; | ||
80 | ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature); | ||
81 | } | ||
82 | |||
83 | /******************************************************************************* | ||
84 | * | ||
85 | * FUNCTION: acpi_tb_acquire_table | ||
86 | * | ||
87 | * PARAMETERS: table_desc - Table descriptor | ||
88 | * table_ptr - Where table is returned | ||
89 | * table_length - Where table length is returned | ||
90 | * table_flags - Where table allocation flags are returned | ||
91 | * | ||
92 | * RETURN: Status | ||
93 | * | ||
94 | * DESCRIPTION: Acquire an ACPI table. It can be used for tables not | ||
95 | * maintained in the acpi_gbl_root_table_list. | ||
96 | * | ||
97 | ******************************************************************************/ | ||
98 | |||
99 | acpi_status | ||
100 | acpi_tb_acquire_table(struct acpi_table_desc *table_desc, | ||
101 | struct acpi_table_header **table_ptr, | ||
102 | u32 *table_length, u8 *table_flags) | ||
103 | { | ||
104 | struct acpi_table_header *table = NULL; | ||
105 | |||
106 | switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) { | ||
107 | case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: | ||
108 | |||
109 | table = | ||
110 | acpi_os_map_memory(table_desc->address, table_desc->length); | ||
111 | break; | ||
112 | |||
113 | case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: | ||
114 | case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: | ||
115 | |||
116 | table = | ||
117 | ACPI_CAST_PTR(struct acpi_table_header, | ||
118 | table_desc->address); | ||
119 | break; | ||
120 | |||
121 | default: | ||
122 | |||
123 | break; | ||
124 | } | ||
125 | |||
126 | /* Table is not valid yet */ | ||
127 | |||
128 | if (!table) { | ||
129 | return (AE_NO_MEMORY); | ||
130 | } | ||
131 | |||
132 | /* Fill the return values */ | ||
133 | |||
134 | *table_ptr = table; | ||
135 | *table_length = table_desc->length; | ||
136 | *table_flags = table_desc->flags; | ||
137 | return (AE_OK); | ||
138 | } | ||
139 | |||
140 | /******************************************************************************* | ||
141 | * | ||
142 | * FUNCTION: acpi_tb_release_table | ||
143 | * | ||
144 | * PARAMETERS: table - Pointer for the table | ||
145 | * table_length - Length for the table | ||
146 | * table_flags - Allocation flags for the table | ||
147 | * | ||
148 | * RETURN: None | ||
149 | * | ||
150 | * DESCRIPTION: Release a table. The inverse of acpi_tb_acquire_table(). | ||
151 | * | ||
152 | ******************************************************************************/ | ||
153 | |||
154 | void | ||
155 | acpi_tb_release_table(struct acpi_table_header *table, | ||
156 | u32 table_length, u8 table_flags) | ||
157 | { | ||
158 | |||
159 | switch (table_flags & ACPI_TABLE_ORIGIN_MASK) { | ||
160 | case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: | ||
161 | |||
162 | acpi_os_unmap_memory(table, table_length); | ||
163 | break; | ||
164 | |||
165 | case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: | ||
166 | case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: | ||
167 | default: | ||
168 | |||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | /******************************************************************************* | ||
174 | * | ||
175 | * FUNCTION: acpi_tb_acquire_temp_table | ||
176 | * | ||
177 | * PARAMETERS: table_desc - Table descriptor to be acquired | ||
178 | * address - Address of the table | ||
179 | * flags - Allocation flags of the table | ||
180 | * | ||
181 | * RETURN: Status | ||
182 | * | ||
183 | * DESCRIPTION: This function validates the table header to obtain the length | ||
184 | * of a table and fills the table descriptor to make its state as | ||
185 | * "INSTALLED". Such a table descriptor is only used for verified | ||
186 | * installation. | ||
187 | * | ||
188 | ******************************************************************************/ | ||
189 | |||
190 | acpi_status | ||
191 | acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc, | ||
192 | acpi_physical_address address, u8 flags) | ||
193 | { | ||
194 | struct acpi_table_header *table_header; | ||
195 | |||
196 | switch (flags & ACPI_TABLE_ORIGIN_MASK) { | ||
197 | case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: | ||
198 | |||
199 | /* Get the length of the full table from the header */ | ||
200 | |||
201 | table_header = | ||
202 | acpi_os_map_memory(address, | ||
203 | sizeof(struct acpi_table_header)); | ||
204 | if (!table_header) { | ||
205 | return (AE_NO_MEMORY); | ||
206 | } | ||
207 | |||
208 | acpi_tb_init_table_descriptor(table_desc, address, flags, | ||
209 | table_header); | ||
210 | acpi_os_unmap_memory(table_header, | ||
211 | sizeof(struct acpi_table_header)); | ||
212 | return (AE_OK); | ||
213 | |||
214 | case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: | ||
215 | case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: | ||
216 | |||
217 | table_header = ACPI_CAST_PTR(struct acpi_table_header, address); | ||
218 | if (!table_header) { | ||
219 | return (AE_NO_MEMORY); | ||
220 | } | ||
221 | |||
222 | acpi_tb_init_table_descriptor(table_desc, address, flags, | ||
223 | table_header); | ||
224 | return (AE_OK); | ||
225 | |||
226 | default: | ||
227 | |||
228 | break; | ||
229 | } | ||
230 | |||
231 | /* Table is not valid yet */ | ||
232 | |||
233 | return (AE_NO_MEMORY); | ||
234 | } | ||
235 | |||
236 | /******************************************************************************* | ||
237 | * | ||
238 | * FUNCTION: acpi_tb_release_temp_table | ||
239 | * | ||
240 | * PARAMETERS: table_desc - Table descriptor to be released | ||
241 | * | ||
242 | * RETURN: Status | ||
243 | * | ||
244 | * DESCRIPTION: The inverse of acpi_tb_acquire_temp_table(). | ||
245 | * | ||
246 | *****************************************************************************/ | ||
247 | |||
248 | void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc) | ||
249 | { | ||
250 | |||
251 | /* | ||
252 | * Note that the .Address is maintained by the callers of | ||
253 | * acpi_tb_acquire_temp_table(), thus do not invoke acpi_tb_uninstall_table() | ||
254 | * where .Address will be freed. | ||
255 | */ | ||
256 | acpi_tb_invalidate_table(table_desc); | ||
257 | } | ||
258 | |||
259 | /****************************************************************************** | ||
260 | * | ||
261 | * FUNCTION: acpi_tb_validate_table | ||
262 | * | ||
263 | * PARAMETERS: table_desc - Table descriptor | ||
264 | * | ||
265 | * RETURN: Status | ||
266 | * | ||
267 | * DESCRIPTION: This function is called to validate the table, the returned | ||
268 | * table descriptor is in "VALIDATED" state. | ||
269 | * | ||
270 | *****************************************************************************/ | ||
271 | |||
272 | acpi_status acpi_tb_validate_table(struct acpi_table_desc *table_desc) | ||
273 | { | ||
274 | acpi_status status = AE_OK; | ||
275 | |||
276 | ACPI_FUNCTION_TRACE(tb_validate_table); | ||
277 | |||
278 | /* Validate the table if necessary */ | ||
279 | |||
280 | if (!table_desc->pointer) { | ||
281 | status = acpi_tb_acquire_table(table_desc, &table_desc->pointer, | ||
282 | &table_desc->length, | ||
283 | &table_desc->flags); | ||
284 | if (!table_desc->pointer) { | ||
285 | status = AE_NO_MEMORY; | ||
286 | } | ||
287 | } | ||
288 | |||
289 | return_ACPI_STATUS(status); | ||
290 | } | ||
291 | |||
292 | /******************************************************************************* | ||
293 | * | ||
294 | * FUNCTION: acpi_tb_invalidate_table | ||
295 | * | ||
296 | * PARAMETERS: table_desc - Table descriptor | ||
297 | * | ||
298 | * RETURN: None | ||
299 | * | ||
300 | * DESCRIPTION: Invalidate one internal ACPI table, this is the inverse of | ||
301 | * acpi_tb_validate_table(). | ||
302 | * | ||
303 | ******************************************************************************/ | ||
304 | |||
305 | void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc) | ||
306 | { | ||
307 | |||
308 | ACPI_FUNCTION_TRACE(tb_invalidate_table); | ||
309 | |||
310 | /* Table must be validated */ | ||
311 | |||
312 | if (!table_desc->pointer) { | ||
313 | return_VOID; | ||
314 | } | ||
315 | |||
316 | acpi_tb_release_table(table_desc->pointer, table_desc->length, | ||
317 | table_desc->flags); | ||
318 | table_desc->pointer = NULL; | ||
319 | |||
320 | return_VOID; | ||
321 | } | ||
322 | |||
323 | /****************************************************************************** | ||
324 | * | ||
325 | * FUNCTION: acpi_tb_validate_temp_table | ||
326 | * | ||
327 | * PARAMETERS: table_desc - Table descriptor | ||
328 | * | ||
329 | * RETURN: Status | ||
330 | * | ||
331 | * DESCRIPTION: This function is called to validate the table, the returned | ||
332 | * table descriptor is in "VALIDATED" state. | ||
333 | * | ||
334 | *****************************************************************************/ | ||
335 | |||
336 | acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc) | ||
337 | { | ||
338 | |||
339 | if (!table_desc->pointer && !acpi_gbl_verify_table_checksum) { | ||
340 | /* | ||
341 | * Only validates the header of the table. | ||
342 | * Note that Length contains the size of the mapping after invoking | ||
343 | * this work around, this value is required by | ||
344 | * acpi_tb_release_temp_table(). | ||
345 | * We can do this because in acpi_init_table_descriptor(), the Length | ||
346 | * field of the installed descriptor is filled with the actual | ||
347 | * table length obtaining from the table header. | ||
348 | */ | ||
349 | table_desc->length = sizeof(struct acpi_table_header); | ||
350 | } | ||
351 | |||
352 | return (acpi_tb_validate_table(table_desc)); | ||
353 | } | ||
354 | |||
355 | /****************************************************************************** | ||
356 | * | ||
357 | * FUNCTION: acpi_tb_verify_temp_table | ||
358 | * | ||
359 | * PARAMETERS: table_desc - Table descriptor | ||
360 | * signature - Table signature to verify | ||
361 | * | ||
362 | * RETURN: Status | ||
363 | * | ||
364 | * DESCRIPTION: This function is called to validate and verify the table, the | ||
365 | * returned table descriptor is in "VALIDATED" state. | ||
366 | * | ||
367 | *****************************************************************************/ | ||
368 | |||
369 | acpi_status | ||
370 | acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature) | ||
371 | { | ||
372 | acpi_status status = AE_OK; | ||
373 | |||
374 | ACPI_FUNCTION_TRACE(tb_verify_temp_table); | ||
375 | |||
376 | /* Validate the table */ | ||
377 | |||
378 | status = acpi_tb_validate_temp_table(table_desc); | ||
379 | if (ACPI_FAILURE(status)) { | ||
380 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
381 | } | ||
382 | |||
383 | /* If a particular signature is expected (DSDT/FACS), it must match */ | ||
384 | |||
385 | if (signature && !ACPI_COMPARE_NAME(&table_desc->signature, signature)) { | ||
386 | ACPI_BIOS_ERROR((AE_INFO, | ||
387 | "Invalid signature 0x%X for ACPI table, expected [%s]", | ||
388 | table_desc->signature.integer, signature)); | ||
389 | status = AE_BAD_SIGNATURE; | ||
390 | goto invalidate_and_exit; | ||
391 | } | ||
392 | |||
393 | /* Verify the checksum */ | ||
394 | |||
395 | if (acpi_gbl_verify_table_checksum) { | ||
396 | status = | ||
397 | acpi_tb_verify_checksum(table_desc->pointer, | ||
398 | table_desc->length); | ||
399 | if (ACPI_FAILURE(status)) { | ||
400 | ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, | ||
401 | "%4.4s " ACPI_PRINTF_UINT | ||
402 | " Attempted table install failed", | ||
403 | acpi_ut_valid_acpi_name(table_desc-> | ||
404 | signature. | ||
405 | ascii) ? | ||
406 | table_desc->signature.ascii : "????", | ||
407 | ACPI_FORMAT_TO_UINT(table_desc-> | ||
408 | address))); | ||
409 | goto invalidate_and_exit; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | return_ACPI_STATUS(AE_OK); | ||
414 | |||
415 | invalidate_and_exit: | ||
416 | acpi_tb_invalidate_table(table_desc); | ||
417 | return_ACPI_STATUS(status); | ||
418 | } | ||
419 | |||
420 | /******************************************************************************* | ||
421 | * | ||
422 | * FUNCTION: acpi_tb_resize_root_table_list | ||
423 | * | ||
424 | * PARAMETERS: None | ||
425 | * | ||
426 | * RETURN: Status | ||
427 | * | ||
428 | * DESCRIPTION: Expand the size of global table array | ||
429 | * | ||
430 | ******************************************************************************/ | ||
431 | |||
432 | acpi_status acpi_tb_resize_root_table_list(void) | ||
433 | { | ||
434 | struct acpi_table_desc *tables; | ||
435 | u32 table_count; | ||
436 | |||
437 | ACPI_FUNCTION_TRACE(tb_resize_root_table_list); | ||
438 | |||
439 | /* allow_resize flag is a parameter to acpi_initialize_tables */ | ||
440 | |||
441 | if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) { | ||
442 | ACPI_ERROR((AE_INFO, | ||
443 | "Resize of Root Table Array is not allowed")); | ||
444 | return_ACPI_STATUS(AE_SUPPORT); | ||
445 | } | ||
446 | |||
447 | /* Increase the Table Array size */ | ||
448 | |||
449 | if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { | ||
450 | table_count = acpi_gbl_root_table_list.max_table_count; | ||
451 | } else { | ||
452 | table_count = acpi_gbl_root_table_list.current_table_count; | ||
453 | } | ||
454 | |||
455 | tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count + | ||
456 | ACPI_ROOT_TABLE_SIZE_INCREMENT) * | ||
457 | sizeof(struct acpi_table_desc)); | ||
458 | if (!tables) { | ||
459 | ACPI_ERROR((AE_INFO, | ||
460 | "Could not allocate new root table array")); | ||
461 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
462 | } | ||
463 | |||
464 | /* Copy and free the previous table array */ | ||
465 | |||
466 | if (acpi_gbl_root_table_list.tables) { | ||
467 | ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, | ||
468 | (acpi_size) table_count * | ||
469 | sizeof(struct acpi_table_desc)); | ||
470 | |||
471 | if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { | ||
472 | ACPI_FREE(acpi_gbl_root_table_list.tables); | ||
473 | } | ||
474 | } | ||
475 | |||
476 | acpi_gbl_root_table_list.tables = tables; | ||
477 | acpi_gbl_root_table_list.max_table_count = | ||
478 | table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT; | ||
479 | acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED; | ||
480 | |||
481 | return_ACPI_STATUS(AE_OK); | ||
482 | } | ||
483 | |||
484 | /******************************************************************************* | ||
485 | * | ||
486 | * FUNCTION: acpi_tb_get_next_root_index | ||
487 | * | ||
488 | * PARAMETERS: table_index - Where table index is returned | ||
489 | * | ||
490 | * RETURN: Status and table index. | ||
491 | * | ||
492 | * DESCRIPTION: Allocate a new ACPI table entry to the global table list | ||
493 | * | ||
494 | ******************************************************************************/ | ||
495 | |||
496 | acpi_status acpi_tb_get_next_root_index(u32 *table_index) | ||
497 | { | ||
498 | acpi_status status; | ||
499 | |||
500 | /* Ensure that there is room for the table in the Root Table List */ | ||
501 | |||
502 | if (acpi_gbl_root_table_list.current_table_count >= | ||
503 | acpi_gbl_root_table_list.max_table_count) { | ||
504 | status = acpi_tb_resize_root_table_list(); | ||
505 | if (ACPI_FAILURE(status)) { | ||
506 | return (status); | ||
507 | } | ||
508 | } | ||
509 | |||
510 | *table_index = acpi_gbl_root_table_list.current_table_count; | ||
511 | acpi_gbl_root_table_list.current_table_count++; | ||
512 | return (AE_OK); | ||
513 | } | ||
514 | |||
515 | /******************************************************************************* | ||
516 | * | ||
517 | * FUNCTION: acpi_tb_terminate | ||
518 | * | ||
519 | * PARAMETERS: None | ||
520 | * | ||
521 | * RETURN: None | ||
522 | * | ||
523 | * DESCRIPTION: Delete all internal ACPI tables | ||
524 | * | ||
525 | ******************************************************************************/ | ||
526 | |||
527 | void acpi_tb_terminate(void) | ||
528 | { | ||
529 | u32 i; | ||
530 | |||
531 | ACPI_FUNCTION_TRACE(tb_terminate); | ||
532 | |||
533 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
534 | |||
535 | /* Delete the individual tables */ | ||
536 | |||
537 | for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) { | ||
538 | acpi_tb_uninstall_table(&acpi_gbl_root_table_list.tables[i]); | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * Delete the root table array if allocated locally. Array cannot be | ||
543 | * mapped, so we don't need to check for that flag. | ||
544 | */ | ||
545 | if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { | ||
546 | ACPI_FREE(acpi_gbl_root_table_list.tables); | ||
547 | } | ||
548 | |||
549 | acpi_gbl_root_table_list.tables = NULL; | ||
550 | acpi_gbl_root_table_list.flags = 0; | ||
551 | acpi_gbl_root_table_list.current_table_count = 0; | ||
552 | |||
553 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); | ||
554 | |||
555 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
556 | return_VOID; | ||
557 | } | ||
558 | |||
559 | /******************************************************************************* | ||
560 | * | ||
561 | * FUNCTION: acpi_tb_delete_namespace_by_owner | ||
562 | * | ||
563 | * PARAMETERS: table_index - Table index | ||
564 | * | ||
565 | * RETURN: Status | ||
566 | * | ||
567 | * DESCRIPTION: Delete all namespace objects created when this table was loaded. | ||
568 | * | ||
569 | ******************************************************************************/ | ||
570 | |||
571 | acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index) | ||
572 | { | ||
573 | acpi_owner_id owner_id; | ||
574 | acpi_status status; | ||
575 | |||
576 | ACPI_FUNCTION_TRACE(tb_delete_namespace_by_owner); | ||
577 | |||
578 | status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
579 | if (ACPI_FAILURE(status)) { | ||
580 | return_ACPI_STATUS(status); | ||
581 | } | ||
582 | |||
583 | if (table_index >= acpi_gbl_root_table_list.current_table_count) { | ||
584 | |||
585 | /* The table index does not exist */ | ||
586 | |||
587 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
588 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
589 | } | ||
590 | |||
591 | /* Get the owner ID for this table, used to delete namespace nodes */ | ||
592 | |||
593 | owner_id = acpi_gbl_root_table_list.tables[table_index].owner_id; | ||
594 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
595 | |||
596 | /* | ||
597 | * Need to acquire the namespace writer lock to prevent interference | ||
598 | * with any concurrent namespace walks. The interpreter must be | ||
599 | * released during the deletion since the acquisition of the deletion | ||
600 | * lock may block, and also since the execution of a namespace walk | ||
601 | * must be allowed to use the interpreter. | ||
602 | */ | ||
603 | (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); | ||
604 | status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock); | ||
605 | |||
606 | acpi_ns_delete_namespace_by_owner(owner_id); | ||
607 | if (ACPI_FAILURE(status)) { | ||
608 | return_ACPI_STATUS(status); | ||
609 | } | ||
610 | |||
611 | acpi_ut_release_write_lock(&acpi_gbl_namespace_rw_lock); | ||
612 | |||
613 | status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); | ||
614 | return_ACPI_STATUS(status); | ||
615 | } | ||
616 | |||
617 | /******************************************************************************* | ||
618 | * | ||
619 | * FUNCTION: acpi_tb_allocate_owner_id | ||
620 | * | ||
621 | * PARAMETERS: table_index - Table index | ||
622 | * | ||
623 | * RETURN: Status | ||
624 | * | ||
625 | * DESCRIPTION: Allocates owner_id in table_desc | ||
626 | * | ||
627 | ******************************************************************************/ | ||
628 | |||
629 | acpi_status acpi_tb_allocate_owner_id(u32 table_index) | ||
630 | { | ||
631 | acpi_status status = AE_BAD_PARAMETER; | ||
632 | |||
633 | ACPI_FUNCTION_TRACE(tb_allocate_owner_id); | ||
634 | |||
635 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
636 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
637 | status = | ||
638 | acpi_ut_allocate_owner_id(& | ||
639 | (acpi_gbl_root_table_list. | ||
640 | tables[table_index].owner_id)); | ||
641 | } | ||
642 | |||
643 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
644 | return_ACPI_STATUS(status); | ||
645 | } | ||
646 | |||
647 | /******************************************************************************* | ||
648 | * | ||
649 | * FUNCTION: acpi_tb_release_owner_id | ||
650 | * | ||
651 | * PARAMETERS: table_index - Table index | ||
652 | * | ||
653 | * RETURN: Status | ||
654 | * | ||
655 | * DESCRIPTION: Releases owner_id in table_desc | ||
656 | * | ||
657 | ******************************************************************************/ | ||
658 | |||
659 | acpi_status acpi_tb_release_owner_id(u32 table_index) | ||
660 | { | ||
661 | acpi_status status = AE_BAD_PARAMETER; | ||
662 | |||
663 | ACPI_FUNCTION_TRACE(tb_release_owner_id); | ||
664 | |||
665 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
666 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
667 | acpi_ut_release_owner_id(& | ||
668 | (acpi_gbl_root_table_list. | ||
669 | tables[table_index].owner_id)); | ||
670 | status = AE_OK; | ||
671 | } | ||
672 | |||
673 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
674 | return_ACPI_STATUS(status); | ||
675 | } | ||
676 | |||
677 | /******************************************************************************* | ||
678 | * | ||
679 | * FUNCTION: acpi_tb_get_owner_id | ||
680 | * | ||
681 | * PARAMETERS: table_index - Table index | ||
682 | * owner_id - Where the table owner_id is returned | ||
683 | * | ||
684 | * RETURN: Status | ||
685 | * | ||
686 | * DESCRIPTION: returns owner_id for the ACPI table | ||
687 | * | ||
688 | ******************************************************************************/ | ||
689 | |||
690 | acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id * owner_id) | ||
691 | { | ||
692 | acpi_status status = AE_BAD_PARAMETER; | ||
693 | |||
694 | ACPI_FUNCTION_TRACE(tb_get_owner_id); | ||
695 | |||
696 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
697 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
698 | *owner_id = | ||
699 | acpi_gbl_root_table_list.tables[table_index].owner_id; | ||
700 | status = AE_OK; | ||
701 | } | ||
702 | |||
703 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
704 | return_ACPI_STATUS(status); | ||
705 | } | ||
706 | |||
707 | /******************************************************************************* | ||
708 | * | ||
709 | * FUNCTION: acpi_tb_is_table_loaded | ||
710 | * | ||
711 | * PARAMETERS: table_index - Index into the root table | ||
712 | * | ||
713 | * RETURN: Table Loaded Flag | ||
714 | * | ||
715 | ******************************************************************************/ | ||
716 | |||
717 | u8 acpi_tb_is_table_loaded(u32 table_index) | ||
718 | { | ||
719 | u8 is_loaded = FALSE; | ||
720 | |||
721 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
722 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
723 | is_loaded = (u8) | ||
724 | (acpi_gbl_root_table_list.tables[table_index].flags & | ||
725 | ACPI_TABLE_IS_LOADED); | ||
726 | } | ||
727 | |||
728 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
729 | return (is_loaded); | ||
730 | } | ||
731 | |||
732 | /******************************************************************************* | ||
733 | * | ||
734 | * FUNCTION: acpi_tb_set_table_loaded_flag | ||
735 | * | ||
736 | * PARAMETERS: table_index - Table index | ||
737 | * is_loaded - TRUE if table is loaded, FALSE otherwise | ||
738 | * | ||
739 | * RETURN: None | ||
740 | * | ||
741 | * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE. | ||
742 | * | ||
743 | ******************************************************************************/ | ||
744 | |||
745 | void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded) | ||
746 | { | ||
747 | |||
748 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
749 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
750 | if (is_loaded) { | ||
751 | acpi_gbl_root_table_list.tables[table_index].flags |= | ||
752 | ACPI_TABLE_IS_LOADED; | ||
753 | } else { | ||
754 | acpi_gbl_root_table_list.tables[table_index].flags &= | ||
755 | ~ACPI_TABLE_IS_LOADED; | ||
756 | } | ||
757 | } | ||
758 | |||
759 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
760 | } | ||
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index ec14588254d4..41519a958083 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c | |||
@@ -52,7 +52,8 @@ ACPI_MODULE_NAME("tbfadt") | |||
52 | static void | 52 | static void |
53 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, | 53 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, |
54 | u8 space_id, | 54 | u8 space_id, |
55 | u8 byte_width, u64 address, char *register_name); | 55 | u8 byte_width, |
56 | u64 address, char *register_name, u8 flags); | ||
56 | 57 | ||
57 | static void acpi_tb_convert_fadt(void); | 58 | static void acpi_tb_convert_fadt(void); |
58 | 59 | ||
@@ -69,13 +70,14 @@ typedef struct acpi_fadt_info { | |||
69 | u16 address32; | 70 | u16 address32; |
70 | u16 length; | 71 | u16 length; |
71 | u8 default_length; | 72 | u8 default_length; |
72 | u8 type; | 73 | u8 flags; |
73 | 74 | ||
74 | } acpi_fadt_info; | 75 | } acpi_fadt_info; |
75 | 76 | ||
76 | #define ACPI_FADT_OPTIONAL 0 | 77 | #define ACPI_FADT_OPTIONAL 0 |
77 | #define ACPI_FADT_REQUIRED 1 | 78 | #define ACPI_FADT_REQUIRED 1 |
78 | #define ACPI_FADT_SEPARATE_LENGTH 2 | 79 | #define ACPI_FADT_SEPARATE_LENGTH 2 |
80 | #define ACPI_FADT_GPE_REGISTER 4 | ||
79 | 81 | ||
80 | static struct acpi_fadt_info fadt_info_table[] = { | 82 | static struct acpi_fadt_info fadt_info_table[] = { |
81 | {"Pm1aEventBlock", | 83 | {"Pm1aEventBlock", |
@@ -125,14 +127,14 @@ static struct acpi_fadt_info fadt_info_table[] = { | |||
125 | ACPI_FADT_OFFSET(gpe0_block), | 127 | ACPI_FADT_OFFSET(gpe0_block), |
126 | ACPI_FADT_OFFSET(gpe0_block_length), | 128 | ACPI_FADT_OFFSET(gpe0_block_length), |
127 | 0, | 129 | 0, |
128 | ACPI_FADT_SEPARATE_LENGTH}, | 130 | ACPI_FADT_SEPARATE_LENGTH | ACPI_FADT_GPE_REGISTER}, |
129 | 131 | ||
130 | {"Gpe1Block", | 132 | {"Gpe1Block", |
131 | ACPI_FADT_OFFSET(xgpe1_block), | 133 | ACPI_FADT_OFFSET(xgpe1_block), |
132 | ACPI_FADT_OFFSET(gpe1_block), | 134 | ACPI_FADT_OFFSET(gpe1_block), |
133 | ACPI_FADT_OFFSET(gpe1_block_length), | 135 | ACPI_FADT_OFFSET(gpe1_block_length), |
134 | 0, | 136 | 0, |
135 | ACPI_FADT_SEPARATE_LENGTH} | 137 | ACPI_FADT_SEPARATE_LENGTH | ACPI_FADT_GPE_REGISTER} |
136 | }; | 138 | }; |
137 | 139 | ||
138 | #define ACPI_FADT_INFO_ENTRIES \ | 140 | #define ACPI_FADT_INFO_ENTRIES \ |
@@ -189,19 +191,29 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = { | |||
189 | static void | 191 | static void |
190 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, | 192 | acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, |
191 | u8 space_id, | 193 | u8 space_id, |
192 | u8 byte_width, u64 address, char *register_name) | 194 | u8 byte_width, |
195 | u64 address, char *register_name, u8 flags) | ||
193 | { | 196 | { |
194 | u8 bit_width; | 197 | u8 bit_width; |
195 | 198 | ||
196 | /* Bit width field in the GAS is only one byte long, 255 max */ | 199 | /* |
197 | 200 | * Bit width field in the GAS is only one byte long, 255 max. | |
201 | * Check for bit_width overflow in GAS. | ||
202 | */ | ||
198 | bit_width = (u8)(byte_width * 8); | 203 | bit_width = (u8)(byte_width * 8); |
199 | 204 | if (byte_width > 31) { /* (31*8)=248, (32*8)=256 */ | |
200 | if (byte_width > 31) { /* (31*8)=248 */ | 205 | /* |
201 | ACPI_ERROR((AE_INFO, | 206 | * No error for GPE blocks, because we do not use the bit_width |
202 | "%s - 32-bit FADT register is too long (%u bytes, %u bits) " | 207 | * for GPEs, the legacy length (byte_width) is used instead to |
203 | "to convert to GAS struct - 255 bits max, truncating", | 208 | * allow for a large number of GPEs. |
204 | register_name, byte_width, (byte_width * 8))); | 209 | */ |
210 | if (!(flags & ACPI_FADT_GPE_REGISTER)) { | ||
211 | ACPI_ERROR((AE_INFO, | ||
212 | "%s - 32-bit FADT register is too long (%u bytes, %u bits) " | ||
213 | "to convert to GAS struct - 255 bits max, truncating", | ||
214 | register_name, byte_width, | ||
215 | (byte_width * 8))); | ||
216 | } | ||
205 | 217 | ||
206 | bit_width = 255; | 218 | bit_width = 255; |
207 | } | 219 | } |
@@ -332,15 +344,15 @@ void acpi_tb_parse_fadt(u32 table_index) | |||
332 | 344 | ||
333 | /* Obtain the DSDT and FACS tables via their addresses within the FADT */ | 345 | /* Obtain the DSDT and FACS tables via their addresses within the FADT */ |
334 | 346 | ||
335 | acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, | 347 | acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, |
336 | ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); | 348 | ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); |
337 | 349 | ||
338 | /* If Hardware Reduced flag is set, there is no FACS */ | 350 | /* If Hardware Reduced flag is set, there is no FACS */ |
339 | 351 | ||
340 | if (!acpi_gbl_reduced_hardware) { | 352 | if (!acpi_gbl_reduced_hardware) { |
341 | acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT. | 353 | acpi_tb_install_fixed_table((acpi_physical_address) |
342 | Xfacs, ACPI_SIG_FACS, | 354 | acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS, |
343 | ACPI_TABLE_INDEX_FACS); | 355 | ACPI_TABLE_INDEX_FACS); |
344 | } | 356 | } |
345 | } | 357 | } |
346 | 358 | ||
@@ -450,6 +462,7 @@ static void acpi_tb_convert_fadt(void) | |||
450 | struct acpi_generic_address *address64; | 462 | struct acpi_generic_address *address64; |
451 | u32 address32; | 463 | u32 address32; |
452 | u8 length; | 464 | u8 length; |
465 | u8 flags; | ||
453 | u32 i; | 466 | u32 i; |
454 | 467 | ||
455 | /* | 468 | /* |
@@ -515,6 +528,7 @@ static void acpi_tb_convert_fadt(void) | |||
515 | fadt_info_table[i].length); | 528 | fadt_info_table[i].length); |
516 | 529 | ||
517 | name = fadt_info_table[i].name; | 530 | name = fadt_info_table[i].name; |
531 | flags = fadt_info_table[i].flags; | ||
518 | 532 | ||
519 | /* | 533 | /* |
520 | * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X" | 534 | * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X" |
@@ -554,7 +568,7 @@ static void acpi_tb_convert_fadt(void) | |||
554 | [i]. | 568 | [i]. |
555 | length), | 569 | length), |
556 | (u64)address32, | 570 | (u64)address32, |
557 | name); | 571 | name, flags); |
558 | } else if (address64->address != (u64)address32) { | 572 | } else if (address64->address != (u64)address32) { |
559 | 573 | ||
560 | /* Address mismatch */ | 574 | /* Address mismatch */ |
@@ -582,7 +596,8 @@ static void acpi_tb_convert_fadt(void) | |||
582 | length), | 596 | length), |
583 | (u64) | 597 | (u64) |
584 | address32, | 598 | address32, |
585 | name); | 599 | name, |
600 | flags); | ||
586 | } | 601 | } |
587 | } | 602 | } |
588 | } | 603 | } |
@@ -603,7 +618,7 @@ static void acpi_tb_convert_fadt(void) | |||
603 | address64->bit_width)); | 618 | address64->bit_width)); |
604 | } | 619 | } |
605 | 620 | ||
606 | if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) { | 621 | if (fadt_info_table[i].flags & ACPI_FADT_REQUIRED) { |
607 | /* | 622 | /* |
608 | * Field is required (Pm1a_event, Pm1a_control). | 623 | * Field is required (Pm1a_event, Pm1a_control). |
609 | * Both the address and length must be non-zero. | 624 | * Both the address and length must be non-zero. |
@@ -617,7 +632,7 @@ static void acpi_tb_convert_fadt(void) | |||
617 | address), | 632 | address), |
618 | length)); | 633 | length)); |
619 | } | 634 | } |
620 | } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) { | 635 | } else if (fadt_info_table[i].flags & ACPI_FADT_SEPARATE_LENGTH) { |
621 | /* | 636 | /* |
622 | * Field is optional (Pm2_control, GPE0, GPE1) AND has its own | 637 | * Field is optional (Pm2_control, GPE0, GPE1) AND has its own |
623 | * length field. If present, both the address and length must | 638 | * length field. If present, both the address and length must |
@@ -726,7 +741,7 @@ static void acpi_tb_setup_fadt_registers(void) | |||
726 | (fadt_pm_info_table[i]. | 741 | (fadt_pm_info_table[i]. |
727 | register_num * | 742 | register_num * |
728 | pm1_register_byte_width), | 743 | pm1_register_byte_width), |
729 | "PmRegisters"); | 744 | "PmRegisters", 0); |
730 | } | 745 | } |
731 | } | 746 | } |
732 | } | 747 | } |
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index c12003947bd5..cb947700206c 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c | |||
@@ -99,8 +99,8 @@ acpi_tb_find_table(char *signature, | |||
99 | /* Table is not currently mapped, map it */ | 99 | /* Table is not currently mapped, map it */ |
100 | 100 | ||
101 | status = | 101 | status = |
102 | acpi_tb_verify_table(&acpi_gbl_root_table_list. | 102 | acpi_tb_validate_table(&acpi_gbl_root_table_list. |
103 | tables[i]); | 103 | tables[i]); |
104 | if (ACPI_FAILURE(status)) { | 104 | if (ACPI_FAILURE(status)) { |
105 | return_ACPI_STATUS(status); | 105 | return_ACPI_STATUS(status); |
106 | } | 106 | } |
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index e3040947e9a0..755b90c40ddf 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -43,688 +43,483 @@ | |||
43 | 43 | ||
44 | #include <acpi/acpi.h> | 44 | #include <acpi/acpi.h> |
45 | #include "accommon.h" | 45 | #include "accommon.h" |
46 | #include "acnamesp.h" | ||
47 | #include "actables.h" | 46 | #include "actables.h" |
48 | 47 | ||
49 | #define _COMPONENT ACPI_TABLES | 48 | #define _COMPONENT ACPI_TABLES |
50 | ACPI_MODULE_NAME("tbinstal") | 49 | ACPI_MODULE_NAME("tbinstal") |
51 | 50 | ||
52 | /****************************************************************************** | 51 | /* Local prototypes */ |
52 | static u8 | ||
53 | acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index); | ||
54 | |||
55 | /******************************************************************************* | ||
53 | * | 56 | * |
54 | * FUNCTION: acpi_tb_verify_table | 57 | * FUNCTION: acpi_tb_compare_tables |
55 | * | 58 | * |
56 | * PARAMETERS: table_desc - table | 59 | * PARAMETERS: table_desc - Table 1 descriptor to be compared |
60 | * table_index - Index of table 2 to be compared | ||
57 | * | 61 | * |
58 | * RETURN: Status | 62 | * RETURN: TRUE if both tables are identical. |
59 | * | 63 | * |
60 | * DESCRIPTION: this function is called to verify and map table | 64 | * DESCRIPTION: This function compares a table with another table that has |
65 | * already been installed in the root table list. | ||
61 | * | 66 | * |
62 | *****************************************************************************/ | 67 | ******************************************************************************/ |
63 | acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc) | 68 | |
69 | static u8 | ||
70 | acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index) | ||
64 | { | 71 | { |
65 | acpi_status status = AE_OK; | 72 | acpi_status status = AE_OK; |
73 | u8 is_identical; | ||
74 | struct acpi_table_header *table; | ||
75 | u32 table_length; | ||
76 | u8 table_flags; | ||
66 | 77 | ||
67 | ACPI_FUNCTION_TRACE(tb_verify_table); | 78 | status = |
68 | 79 | acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index], | |
69 | /* Map the table if necessary */ | 80 | &table, &table_length, &table_flags); |
70 | 81 | if (ACPI_FAILURE(status)) { | |
71 | if (!table_desc->pointer) { | 82 | return (FALSE); |
72 | if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) == | ||
73 | ACPI_TABLE_ORIGIN_MAPPED) { | ||
74 | table_desc->pointer = | ||
75 | acpi_os_map_memory(table_desc->address, | ||
76 | table_desc->length); | ||
77 | } | ||
78 | if (!table_desc->pointer) { | ||
79 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
80 | } | ||
81 | } | 83 | } |
82 | 84 | ||
83 | /* Always calculate checksum, ignore bad checksum if requested */ | 85 | /* |
86 | * Check for a table match on the entire table length, | ||
87 | * not just the header. | ||
88 | */ | ||
89 | is_identical = (u8)((table_desc->length != table_length || | ||
90 | ACPI_MEMCMP(table_desc->pointer, table, | ||
91 | table_length)) ? FALSE : TRUE); | ||
84 | 92 | ||
85 | status = | 93 | /* Release the acquired table */ |
86 | acpi_tb_verify_checksum(table_desc->pointer, table_desc->length); | ||
87 | 94 | ||
88 | return_ACPI_STATUS(status); | 95 | acpi_tb_release_table(table, table_length, table_flags); |
96 | return (is_identical); | ||
89 | } | 97 | } |
90 | 98 | ||
91 | /******************************************************************************* | 99 | /******************************************************************************* |
92 | * | 100 | * |
93 | * FUNCTION: acpi_tb_add_table | 101 | * FUNCTION: acpi_tb_install_table_with_override |
94 | * | 102 | * |
95 | * PARAMETERS: table_desc - Table descriptor | 103 | * PARAMETERS: table_index - Index into root table array |
96 | * table_index - Where the table index is returned | 104 | * new_table_desc - New table descriptor to install |
105 | * override - Whether override should be performed | ||
97 | * | 106 | * |
98 | * RETURN: Status | 107 | * RETURN: None |
99 | * | 108 | * |
100 | * DESCRIPTION: This function is called to add an ACPI table. It is used to | 109 | * DESCRIPTION: Install an ACPI table into the global data structure. The |
101 | * dynamically load tables via the Load and load_table AML | 110 | * table override mechanism is called to allow the host |
102 | * operators. | 111 | * OS to replace any table before it is installed in the root |
112 | * table array. | ||
103 | * | 113 | * |
104 | ******************************************************************************/ | 114 | ******************************************************************************/ |
105 | 115 | ||
106 | acpi_status | 116 | void |
107 | acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) | 117 | acpi_tb_install_table_with_override(u32 table_index, |
118 | struct acpi_table_desc *new_table_desc, | ||
119 | u8 override) | ||
108 | { | 120 | { |
109 | u32 i; | ||
110 | acpi_status status = AE_OK; | ||
111 | 121 | ||
112 | ACPI_FUNCTION_TRACE(tb_add_table); | 122 | if (table_index >= acpi_gbl_root_table_list.current_table_count) { |
113 | 123 | return; | |
114 | if (!table_desc->pointer) { | ||
115 | status = acpi_tb_verify_table(table_desc); | ||
116 | if (ACPI_FAILURE(status) || !table_desc->pointer) { | ||
117 | return_ACPI_STATUS(status); | ||
118 | } | ||
119 | } | 124 | } |
120 | 125 | ||
121 | /* | 126 | /* |
122 | * Validate the incoming table signature. | 127 | * ACPI Table Override: |
123 | * | 128 | * |
124 | * 1) Originally, we checked the table signature for "SSDT" or "PSDT". | 129 | * Before we install the table, let the host OS override it with a new |
125 | * 2) We added support for OEMx tables, signature "OEM". | 130 | * one if desired. Any table within the RSDT/XSDT can be replaced, |
126 | * 3) Valid tables were encountered with a null signature, so we just | 131 | * including the DSDT which is pointed to by the FADT. |
127 | * gave up on validating the signature, (05/2008). | ||
128 | * 4) We encountered non-AML tables such as the MADT, which caused | ||
129 | * interpreter errors and kernel faults. So now, we once again allow | ||
130 | * only "SSDT", "OEMx", and now, also a null signature. (05/2011). | ||
131 | */ | 132 | */ |
132 | if ((table_desc->pointer->signature[0] != 0x00) && | 133 | if (override) { |
133 | (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)) | 134 | acpi_tb_override_table(new_table_desc); |
134 | && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) { | ||
135 | ACPI_BIOS_ERROR((AE_INFO, | ||
136 | "Table has invalid signature [%4.4s] (0x%8.8X), " | ||
137 | "must be SSDT or OEMx", | ||
138 | acpi_ut_valid_acpi_name(table_desc->pointer-> | ||
139 | signature) ? | ||
140 | table_desc->pointer->signature : "????", | ||
141 | *(u32 *)table_desc->pointer->signature)); | ||
142 | |||
143 | return_ACPI_STATUS(AE_BAD_SIGNATURE); | ||
144 | } | 135 | } |
145 | 136 | ||
146 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 137 | acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list. |
138 | tables[table_index], | ||
139 | new_table_desc->address, | ||
140 | new_table_desc->flags, | ||
141 | new_table_desc->pointer); | ||
147 | 142 | ||
148 | /* Check if table is already registered */ | 143 | acpi_tb_print_table_header(new_table_desc->address, |
144 | new_table_desc->pointer); | ||
149 | 145 | ||
150 | for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { | 146 | /* Set the global integer width (based upon revision of the DSDT) */ |
151 | if (!acpi_gbl_root_table_list.tables[i].pointer) { | ||
152 | status = | ||
153 | acpi_tb_verify_table(&acpi_gbl_root_table_list. | ||
154 | tables[i]); | ||
155 | if (ACPI_FAILURE(status) | ||
156 | || !acpi_gbl_root_table_list.tables[i].pointer) { | ||
157 | continue; | ||
158 | } | ||
159 | } | ||
160 | 147 | ||
161 | /* | 148 | if (table_index == ACPI_TABLE_INDEX_DSDT) { |
162 | * Check for a table match on the entire table length, | 149 | acpi_ut_set_integer_width(new_table_desc->pointer->revision); |
163 | * not just the header. | ||
164 | */ | ||
165 | if (table_desc->length != | ||
166 | acpi_gbl_root_table_list.tables[i].length) { | ||
167 | continue; | ||
168 | } | ||
169 | |||
170 | if (ACPI_MEMCMP(table_desc->pointer, | ||
171 | acpi_gbl_root_table_list.tables[i].pointer, | ||
172 | acpi_gbl_root_table_list.tables[i].length)) { | ||
173 | continue; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Note: the current mechanism does not unregister a table if it is | ||
178 | * dynamically unloaded. The related namespace entries are deleted, | ||
179 | * but the table remains in the root table list. | ||
180 | * | ||
181 | * The assumption here is that the number of different tables that | ||
182 | * will be loaded is actually small, and there is minimal overhead | ||
183 | * in just keeping the table in case it is needed again. | ||
184 | * | ||
185 | * If this assumption changes in the future (perhaps on large | ||
186 | * machines with many table load/unload operations), tables will | ||
187 | * need to be unregistered when they are unloaded, and slots in the | ||
188 | * root table list should be reused when empty. | ||
189 | */ | ||
190 | |||
191 | /* | ||
192 | * Table is already registered. | ||
193 | * We can delete the table that was passed as a parameter. | ||
194 | */ | ||
195 | acpi_tb_delete_table(table_desc); | ||
196 | *table_index = i; | ||
197 | |||
198 | if (acpi_gbl_root_table_list.tables[i]. | ||
199 | flags & ACPI_TABLE_IS_LOADED) { | ||
200 | |||
201 | /* Table is still loaded, this is an error */ | ||
202 | |||
203 | status = AE_ALREADY_EXISTS; | ||
204 | goto release; | ||
205 | } else { | ||
206 | /* Table was unloaded, allow it to be reloaded */ | ||
207 | |||
208 | table_desc->pointer = | ||
209 | acpi_gbl_root_table_list.tables[i].pointer; | ||
210 | table_desc->address = | ||
211 | acpi_gbl_root_table_list.tables[i].address; | ||
212 | status = AE_OK; | ||
213 | goto print_header; | ||
214 | } | ||
215 | } | 150 | } |
216 | |||
217 | /* | ||
218 | * ACPI Table Override: | ||
219 | * Allow the host to override dynamically loaded tables. | ||
220 | * NOTE: the table is fully mapped at this point, and the mapping will | ||
221 | * be deleted by tb_table_override if the table is actually overridden. | ||
222 | */ | ||
223 | (void)acpi_tb_table_override(table_desc->pointer, table_desc); | ||
224 | |||
225 | /* Add the table to the global root table list */ | ||
226 | |||
227 | status = acpi_tb_store_table(table_desc->address, table_desc->pointer, | ||
228 | table_desc->length, table_desc->flags, | ||
229 | table_index); | ||
230 | if (ACPI_FAILURE(status)) { | ||
231 | goto release; | ||
232 | } | ||
233 | |||
234 | print_header: | ||
235 | acpi_tb_print_table_header(table_desc->address, table_desc->pointer); | ||
236 | |||
237 | release: | ||
238 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
239 | return_ACPI_STATUS(status); | ||
240 | } | 151 | } |
241 | 152 | ||
242 | /******************************************************************************* | 153 | /******************************************************************************* |
243 | * | 154 | * |
244 | * FUNCTION: acpi_tb_table_override | 155 | * FUNCTION: acpi_tb_install_fixed_table |
245 | * | 156 | * |
246 | * PARAMETERS: table_header - Header for the original table | 157 | * PARAMETERS: address - Physical address of DSDT or FACS |
247 | * table_desc - Table descriptor initialized for the | 158 | * signature - Table signature, NULL if no need to |
248 | * original table. May or may not be mapped. | 159 | * match |
160 | * table_index - Index into root table array | ||
249 | * | 161 | * |
250 | * RETURN: Pointer to the entire new table. NULL if table not overridden. | 162 | * RETURN: Status |
251 | * If overridden, installs the new table within the input table | ||
252 | * descriptor. | ||
253 | * | 163 | * |
254 | * DESCRIPTION: Attempt table override by calling the OSL override functions. | 164 | * DESCRIPTION: Install a fixed ACPI table (DSDT/FACS) into the global data |
255 | * Note: If the table is overridden, then the entire new table | 165 | * structure. |
256 | * is mapped and returned by this function. | ||
257 | * | 166 | * |
258 | ******************************************************************************/ | 167 | ******************************************************************************/ |
259 | 168 | ||
260 | struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header | 169 | acpi_status |
261 | *table_header, | 170 | acpi_tb_install_fixed_table(acpi_physical_address address, |
262 | struct acpi_table_desc | 171 | char *signature, u32 table_index) |
263 | *table_desc) | ||
264 | { | 172 | { |
173 | struct acpi_table_desc new_table_desc; | ||
265 | acpi_status status; | 174 | acpi_status status; |
266 | struct acpi_table_header *new_table = NULL; | ||
267 | acpi_physical_address new_address = 0; | ||
268 | u32 new_table_length = 0; | ||
269 | u8 new_flags; | ||
270 | char *override_type; | ||
271 | 175 | ||
272 | /* (1) Attempt logical override (returns a logical address) */ | 176 | ACPI_FUNCTION_TRACE(tb_install_fixed_table); |
273 | 177 | ||
274 | status = acpi_os_table_override(table_header, &new_table); | 178 | if (!address) { |
275 | if (ACPI_SUCCESS(status) && new_table) { | 179 | ACPI_ERROR((AE_INFO, |
276 | new_address = ACPI_PTR_TO_PHYSADDR(new_table); | 180 | "Null physical address for ACPI table [%s]", |
277 | new_table_length = new_table->length; | 181 | signature)); |
278 | new_flags = ACPI_TABLE_ORIGIN_OVERRIDE; | 182 | return (AE_NO_MEMORY); |
279 | override_type = "Logical"; | ||
280 | goto finish_override; | ||
281 | } | 183 | } |
282 | 184 | ||
283 | /* (2) Attempt physical override (returns a physical address) */ | 185 | /* Fill a table descriptor for validation */ |
284 | 186 | ||
285 | status = acpi_os_physical_table_override(table_header, | 187 | status = acpi_tb_acquire_temp_table(&new_table_desc, address, |
286 | &new_address, | 188 | ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL); |
287 | &new_table_length); | 189 | if (ACPI_FAILURE(status)) { |
288 | if (ACPI_SUCCESS(status) && new_address && new_table_length) { | 190 | ACPI_ERROR((AE_INFO, "Could not acquire table length at %p", |
289 | 191 | ACPI_CAST_PTR(void, address))); | |
290 | /* Map the entire new table */ | 192 | return_ACPI_STATUS(status); |
291 | |||
292 | new_table = acpi_os_map_memory(new_address, new_table_length); | ||
293 | if (!new_table) { | ||
294 | ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, | ||
295 | "%4.4s " ACPI_PRINTF_UINT | ||
296 | " Attempted physical table override failed", | ||
297 | table_header->signature, | ||
298 | ACPI_FORMAT_TO_UINT(table_desc-> | ||
299 | address))); | ||
300 | return (NULL); | ||
301 | } | ||
302 | |||
303 | override_type = "Physical"; | ||
304 | new_flags = ACPI_TABLE_ORIGIN_MAPPED; | ||
305 | goto finish_override; | ||
306 | } | 193 | } |
307 | 194 | ||
308 | return (NULL); /* There was no override */ | 195 | /* Validate and verify a table before installation */ |
309 | |||
310 | finish_override: | ||
311 | |||
312 | ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT | ||
313 | " %s table override, new table: " ACPI_PRINTF_UINT, | ||
314 | table_header->signature, | ||
315 | ACPI_FORMAT_TO_UINT(table_desc->address), | ||
316 | override_type, ACPI_FORMAT_TO_UINT(new_table))); | ||
317 | 196 | ||
318 | /* We can now unmap/delete the original table (if fully mapped) */ | 197 | status = acpi_tb_verify_temp_table(&new_table_desc, signature); |
198 | if (ACPI_FAILURE(status)) { | ||
199 | goto release_and_exit; | ||
200 | } | ||
319 | 201 | ||
320 | acpi_tb_delete_table(table_desc); | 202 | acpi_tb_install_table_with_override(table_index, &new_table_desc, TRUE); |
321 | 203 | ||
322 | /* Setup descriptor for the new table */ | 204 | release_and_exit: |
323 | 205 | ||
324 | table_desc->address = new_address; | 206 | /* Release the temporary table descriptor */ |
325 | table_desc->pointer = new_table; | ||
326 | table_desc->length = new_table_length; | ||
327 | table_desc->flags = new_flags; | ||
328 | 207 | ||
329 | return (new_table); | 208 | acpi_tb_release_temp_table(&new_table_desc); |
209 | return_ACPI_STATUS(status); | ||
330 | } | 210 | } |
331 | 211 | ||
332 | /******************************************************************************* | 212 | /******************************************************************************* |
333 | * | 213 | * |
334 | * FUNCTION: acpi_tb_resize_root_table_list | 214 | * FUNCTION: acpi_tb_install_standard_table |
335 | * | 215 | * |
336 | * PARAMETERS: None | 216 | * PARAMETERS: address - Address of the table (might be a virtual |
217 | * address depending on the table_flags) | ||
218 | * flags - Flags for the table | ||
219 | * reload - Whether reload should be performed | ||
220 | * override - Whether override should be performed | ||
221 | * table_index - Where the table index is returned | ||
337 | * | 222 | * |
338 | * RETURN: Status | 223 | * RETURN: Status |
339 | * | 224 | * |
340 | * DESCRIPTION: Expand the size of global table array | 225 | * DESCRIPTION: This function is called to install an ACPI table that is |
226 | * neither DSDT nor FACS (a "standard" table.) | ||
227 | * When this function is called by "Load" or "LoadTable" opcodes, | ||
228 | * or by acpi_load_table() API, the "Reload" parameter is set. | ||
229 | * After sucessfully returning from this function, table is | ||
230 | * "INSTALLED" but not "VALIDATED". | ||
341 | * | 231 | * |
342 | ******************************************************************************/ | 232 | ******************************************************************************/ |
343 | 233 | ||
344 | acpi_status acpi_tb_resize_root_table_list(void) | 234 | acpi_status |
235 | acpi_tb_install_standard_table(acpi_physical_address address, | ||
236 | u8 flags, | ||
237 | u8 reload, u8 override, u32 *table_index) | ||
345 | { | 238 | { |
346 | struct acpi_table_desc *tables; | 239 | u32 i; |
347 | u32 table_count; | 240 | acpi_status status = AE_OK; |
348 | 241 | struct acpi_table_desc new_table_desc; | |
349 | ACPI_FUNCTION_TRACE(tb_resize_root_table_list); | ||
350 | |||
351 | /* allow_resize flag is a parameter to acpi_initialize_tables */ | ||
352 | 242 | ||
353 | if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) { | 243 | ACPI_FUNCTION_TRACE(tb_install_standard_table); |
354 | ACPI_ERROR((AE_INFO, | ||
355 | "Resize of Root Table Array is not allowed")); | ||
356 | return_ACPI_STATUS(AE_SUPPORT); | ||
357 | } | ||
358 | 244 | ||
359 | /* Increase the Table Array size */ | 245 | /* Acquire a temporary table descriptor for validation */ |
360 | 246 | ||
361 | if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { | 247 | status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags); |
362 | table_count = acpi_gbl_root_table_list.max_table_count; | 248 | if (ACPI_FAILURE(status)) { |
363 | } else { | 249 | ACPI_ERROR((AE_INFO, "Could not acquire table length at %p", |
364 | table_count = acpi_gbl_root_table_list.current_table_count; | 250 | ACPI_CAST_PTR(void, address))); |
251 | return_ACPI_STATUS(status); | ||
365 | } | 252 | } |
366 | 253 | ||
367 | tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count + | 254 | /* |
368 | ACPI_ROOT_TABLE_SIZE_INCREMENT) * | 255 | * Optionally do not load any SSDTs from the RSDT/XSDT. This can |
369 | sizeof(struct acpi_table_desc)); | 256 | * be useful for debugging ACPI problems on some machines. |
370 | if (!tables) { | 257 | */ |
371 | ACPI_ERROR((AE_INFO, | 258 | if (!reload && |
372 | "Could not allocate new root table array")); | 259 | acpi_gbl_disable_ssdt_table_install && |
373 | return_ACPI_STATUS(AE_NO_MEMORY); | 260 | ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) { |
261 | ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p", | ||
262 | new_table_desc.signature.ascii, ACPI_CAST_PTR(void, | ||
263 | address))); | ||
264 | goto release_and_exit; | ||
374 | } | 265 | } |
375 | 266 | ||
376 | /* Copy and free the previous table array */ | 267 | /* Validate and verify a table before installation */ |
377 | |||
378 | if (acpi_gbl_root_table_list.tables) { | ||
379 | ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, | ||
380 | (acpi_size) table_count * | ||
381 | sizeof(struct acpi_table_desc)); | ||
382 | 268 | ||
383 | if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { | 269 | status = acpi_tb_verify_temp_table(&new_table_desc, NULL); |
384 | ACPI_FREE(acpi_gbl_root_table_list.tables); | 270 | if (ACPI_FAILURE(status)) { |
385 | } | 271 | goto release_and_exit; |
386 | } | 272 | } |
387 | 273 | ||
388 | acpi_gbl_root_table_list.tables = tables; | 274 | if (reload) { |
389 | acpi_gbl_root_table_list.max_table_count = | 275 | /* |
390 | table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT; | 276 | * Validate the incoming table signature. |
391 | acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED; | 277 | * |
392 | 278 | * 1) Originally, we checked the table signature for "SSDT" or "PSDT". | |
393 | return_ACPI_STATUS(AE_OK); | 279 | * 2) We added support for OEMx tables, signature "OEM". |
394 | } | 280 | * 3) Valid tables were encountered with a null signature, so we just |
395 | 281 | * gave up on validating the signature, (05/2008). | |
396 | /******************************************************************************* | 282 | * 4) We encountered non-AML tables such as the MADT, which caused |
397 | * | 283 | * interpreter errors and kernel faults. So now, we once again allow |
398 | * FUNCTION: acpi_tb_store_table | 284 | * only "SSDT", "OEMx", and now, also a null signature. (05/2011). |
399 | * | 285 | */ |
400 | * PARAMETERS: address - Table address | 286 | if ((new_table_desc.signature.ascii[0] != 0x00) && |
401 | * table - Table header | 287 | (!ACPI_COMPARE_NAME |
402 | * length - Table length | 288 | (&new_table_desc.signature, ACPI_SIG_SSDT)) |
403 | * flags - flags | 289 | && (ACPI_STRNCMP(new_table_desc.signature.ascii, "OEM", 3))) |
404 | * | 290 | { |
405 | * RETURN: Status and table index. | 291 | ACPI_BIOS_ERROR((AE_INFO, |
406 | * | 292 | "Table has invalid signature [%4.4s] (0x%8.8X), " |
407 | * DESCRIPTION: Add an ACPI table to the global table list | 293 | "must be SSDT or OEMx", |
408 | * | 294 | acpi_ut_valid_acpi_name(new_table_desc. |
409 | ******************************************************************************/ | 295 | signature. |
296 | ascii) ? | ||
297 | new_table_desc.signature. | ||
298 | ascii : "????", | ||
299 | new_table_desc.signature.integer)); | ||
300 | |||
301 | status = AE_BAD_SIGNATURE; | ||
302 | goto release_and_exit; | ||
303 | } | ||
410 | 304 | ||
411 | acpi_status | 305 | /* Check if table is already registered */ |
412 | acpi_tb_store_table(acpi_physical_address address, | ||
413 | struct acpi_table_header *table, | ||
414 | u32 length, u8 flags, u32 *table_index) | ||
415 | { | ||
416 | acpi_status status; | ||
417 | struct acpi_table_desc *new_table; | ||
418 | 306 | ||
419 | /* Ensure that there is room for the table in the Root Table List */ | 307 | for (i = 0; i < acpi_gbl_root_table_list.current_table_count; |
308 | ++i) { | ||
309 | /* | ||
310 | * Check for a table match on the entire table length, | ||
311 | * not just the header. | ||
312 | */ | ||
313 | if (!acpi_tb_compare_tables(&new_table_desc, i)) { | ||
314 | continue; | ||
315 | } | ||
420 | 316 | ||
421 | if (acpi_gbl_root_table_list.current_table_count >= | 317 | /* |
422 | acpi_gbl_root_table_list.max_table_count) { | 318 | * Note: the current mechanism does not unregister a table if it is |
423 | status = acpi_tb_resize_root_table_list(); | 319 | * dynamically unloaded. The related namespace entries are deleted, |
424 | if (ACPI_FAILURE(status)) { | 320 | * but the table remains in the root table list. |
425 | return (status); | 321 | * |
322 | * The assumption here is that the number of different tables that | ||
323 | * will be loaded is actually small, and there is minimal overhead | ||
324 | * in just keeping the table in case it is needed again. | ||
325 | * | ||
326 | * If this assumption changes in the future (perhaps on large | ||
327 | * machines with many table load/unload operations), tables will | ||
328 | * need to be unregistered when they are unloaded, and slots in the | ||
329 | * root table list should be reused when empty. | ||
330 | */ | ||
331 | if (acpi_gbl_root_table_list.tables[i]. | ||
332 | flags & ACPI_TABLE_IS_LOADED) { | ||
333 | |||
334 | /* Table is still loaded, this is an error */ | ||
335 | |||
336 | status = AE_ALREADY_EXISTS; | ||
337 | goto release_and_exit; | ||
338 | } else { | ||
339 | /* | ||
340 | * Table was unloaded, allow it to be reloaded. | ||
341 | * As we are going to return AE_OK to the caller, we should | ||
342 | * take the responsibility of freeing the input descriptor. | ||
343 | * Refill the input descriptor to ensure | ||
344 | * acpi_tb_install_table_with_override() can be called again to | ||
345 | * indicate the re-installation. | ||
346 | */ | ||
347 | acpi_tb_uninstall_table(&new_table_desc); | ||
348 | *table_index = i; | ||
349 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
350 | return_ACPI_STATUS(AE_OK); | ||
351 | } | ||
426 | } | 352 | } |
427 | } | 353 | } |
428 | 354 | ||
429 | new_table = | 355 | /* Add the table to the global root table list */ |
430 | &acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list. | ||
431 | current_table_count]; | ||
432 | |||
433 | /* Initialize added table */ | ||
434 | |||
435 | new_table->address = address; | ||
436 | new_table->pointer = table; | ||
437 | new_table->length = length; | ||
438 | new_table->owner_id = 0; | ||
439 | new_table->flags = flags; | ||
440 | |||
441 | ACPI_MOVE_32_TO_32(&new_table->signature, table->signature); | ||
442 | |||
443 | *table_index = acpi_gbl_root_table_list.current_table_count; | ||
444 | acpi_gbl_root_table_list.current_table_count++; | ||
445 | return (AE_OK); | ||
446 | } | ||
447 | |||
448 | /******************************************************************************* | ||
449 | * | ||
450 | * FUNCTION: acpi_tb_delete_table | ||
451 | * | ||
452 | * PARAMETERS: table_index - Table index | ||
453 | * | ||
454 | * RETURN: None | ||
455 | * | ||
456 | * DESCRIPTION: Delete one internal ACPI table | ||
457 | * | ||
458 | ******************************************************************************/ | ||
459 | 356 | ||
460 | void acpi_tb_delete_table(struct acpi_table_desc *table_desc) | 357 | status = acpi_tb_get_next_root_index(&i); |
461 | { | 358 | if (ACPI_FAILURE(status)) { |
462 | /* Table must be mapped or allocated */ | 359 | goto release_and_exit; |
463 | if (!table_desc->pointer) { | ||
464 | return; | ||
465 | } | 360 | } |
466 | switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) { | ||
467 | case ACPI_TABLE_ORIGIN_MAPPED: | ||
468 | |||
469 | acpi_os_unmap_memory(table_desc->pointer, table_desc->length); | ||
470 | break; | ||
471 | |||
472 | case ACPI_TABLE_ORIGIN_ALLOCATED: | ||
473 | 361 | ||
474 | ACPI_FREE(table_desc->pointer); | 362 | *table_index = i; |
475 | break; | 363 | acpi_tb_install_table_with_override(i, &new_table_desc, override); |
476 | 364 | ||
477 | /* Not mapped or allocated, there is nothing we can do */ | 365 | release_and_exit: |
478 | 366 | ||
479 | default: | 367 | /* Release the temporary table descriptor */ |
480 | 368 | ||
481 | return; | 369 | acpi_tb_release_temp_table(&new_table_desc); |
482 | } | 370 | return_ACPI_STATUS(status); |
483 | |||
484 | table_desc->pointer = NULL; | ||
485 | } | 371 | } |
486 | 372 | ||
487 | /******************************************************************************* | 373 | /******************************************************************************* |
488 | * | 374 | * |
489 | * FUNCTION: acpi_tb_terminate | 375 | * FUNCTION: acpi_tb_override_table |
490 | * | 376 | * |
491 | * PARAMETERS: None | 377 | * PARAMETERS: old_table_desc - Validated table descriptor to be |
378 | * overridden | ||
492 | * | 379 | * |
493 | * RETURN: None | 380 | * RETURN: None |
494 | * | 381 | * |
495 | * DESCRIPTION: Delete all internal ACPI tables | 382 | * DESCRIPTION: Attempt table override by calling the OSL override functions. |
383 | * Note: If the table is overridden, then the entire new table | ||
384 | * is acquired and returned by this function. | ||
385 | * Before/after invocation, the table descriptor is in a state | ||
386 | * that is "VALIDATED". | ||
496 | * | 387 | * |
497 | ******************************************************************************/ | 388 | ******************************************************************************/ |
498 | 389 | ||
499 | void acpi_tb_terminate(void) | 390 | void acpi_tb_override_table(struct acpi_table_desc *old_table_desc) |
500 | { | 391 | { |
501 | u32 i; | 392 | acpi_status status; |
502 | 393 | char *override_type; | |
503 | ACPI_FUNCTION_TRACE(tb_terminate); | 394 | struct acpi_table_desc new_table_desc; |
504 | 395 | struct acpi_table_header *table; | |
505 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 396 | acpi_physical_address address; |
506 | 397 | u32 length; | |
507 | /* Delete the individual tables */ | ||
508 | 398 | ||
509 | for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) { | 399 | /* (1) Attempt logical override (returns a logical address) */ |
510 | acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]); | ||
511 | } | ||
512 | 400 | ||
513 | /* | 401 | status = acpi_os_table_override(old_table_desc->pointer, &table); |
514 | * Delete the root table array if allocated locally. Array cannot be | 402 | if (ACPI_SUCCESS(status) && table) { |
515 | * mapped, so we don't need to check for that flag. | 403 | acpi_tb_acquire_temp_table(&new_table_desc, |
516 | */ | 404 | ACPI_PTR_TO_PHYSADDR(table), |
517 | if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { | 405 | ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL); |
518 | ACPI_FREE(acpi_gbl_root_table_list.tables); | 406 | override_type = "Logical"; |
407 | goto finish_override; | ||
519 | } | 408 | } |
520 | 409 | ||
521 | acpi_gbl_root_table_list.tables = NULL; | 410 | /* (2) Attempt physical override (returns a physical address) */ |
522 | acpi_gbl_root_table_list.flags = 0; | ||
523 | acpi_gbl_root_table_list.current_table_count = 0; | ||
524 | 411 | ||
525 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); | 412 | status = acpi_os_physical_table_override(old_table_desc->pointer, |
526 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 413 | &address, &length); |
414 | if (ACPI_SUCCESS(status) && address && length) { | ||
415 | acpi_tb_acquire_temp_table(&new_table_desc, address, | ||
416 | ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL); | ||
417 | override_type = "Physical"; | ||
418 | goto finish_override; | ||
419 | } | ||
527 | 420 | ||
528 | return_VOID; | 421 | return; /* There was no override */ |
529 | } | ||
530 | 422 | ||
531 | /******************************************************************************* | 423 | finish_override: |
532 | * | ||
533 | * FUNCTION: acpi_tb_delete_namespace_by_owner | ||
534 | * | ||
535 | * PARAMETERS: table_index - Table index | ||
536 | * | ||
537 | * RETURN: Status | ||
538 | * | ||
539 | * DESCRIPTION: Delete all namespace objects created when this table was loaded. | ||
540 | * | ||
541 | ******************************************************************************/ | ||
542 | |||
543 | acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index) | ||
544 | { | ||
545 | acpi_owner_id owner_id; | ||
546 | acpi_status status; | ||
547 | 424 | ||
548 | ACPI_FUNCTION_TRACE(tb_delete_namespace_by_owner); | 425 | /* Validate and verify a table before overriding */ |
549 | 426 | ||
550 | status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 427 | status = acpi_tb_verify_temp_table(&new_table_desc, NULL); |
551 | if (ACPI_FAILURE(status)) { | 428 | if (ACPI_FAILURE(status)) { |
552 | return_ACPI_STATUS(status); | 429 | return; |
553 | } | 430 | } |
554 | 431 | ||
555 | if (table_index >= acpi_gbl_root_table_list.current_table_count) { | 432 | ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT |
556 | 433 | " %s table override, new table: " ACPI_PRINTF_UINT, | |
557 | /* The table index does not exist */ | 434 | old_table_desc->signature.ascii, |
558 | 435 | ACPI_FORMAT_TO_UINT(old_table_desc->address), | |
559 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 436 | override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address))); |
560 | return_ACPI_STATUS(AE_NOT_EXIST); | ||
561 | } | ||
562 | 437 | ||
563 | /* Get the owner ID for this table, used to delete namespace nodes */ | 438 | /* We can now uninstall the original table */ |
564 | 439 | ||
565 | owner_id = acpi_gbl_root_table_list.tables[table_index].owner_id; | 440 | acpi_tb_uninstall_table(old_table_desc); |
566 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
567 | 441 | ||
568 | /* | 442 | /* |
569 | * Need to acquire the namespace writer lock to prevent interference | 443 | * Replace the original table descriptor and keep its state as |
570 | * with any concurrent namespace walks. The interpreter must be | 444 | * "VALIDATED". |
571 | * released during the deletion since the acquisition of the deletion | ||
572 | * lock may block, and also since the execution of a namespace walk | ||
573 | * must be allowed to use the interpreter. | ||
574 | */ | 445 | */ |
575 | (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); | 446 | acpi_tb_init_table_descriptor(old_table_desc, new_table_desc.address, |
576 | status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock); | 447 | new_table_desc.flags, |
448 | new_table_desc.pointer); | ||
449 | acpi_tb_validate_temp_table(old_table_desc); | ||
577 | 450 | ||
578 | acpi_ns_delete_namespace_by_owner(owner_id); | 451 | /* Release the temporary table descriptor */ |
579 | if (ACPI_FAILURE(status)) { | ||
580 | return_ACPI_STATUS(status); | ||
581 | } | ||
582 | 452 | ||
583 | acpi_ut_release_write_lock(&acpi_gbl_namespace_rw_lock); | 453 | acpi_tb_release_temp_table(&new_table_desc); |
584 | |||
585 | status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); | ||
586 | return_ACPI_STATUS(status); | ||
587 | } | 454 | } |
588 | 455 | ||
589 | /******************************************************************************* | 456 | /******************************************************************************* |
590 | * | 457 | * |
591 | * FUNCTION: acpi_tb_allocate_owner_id | 458 | * FUNCTION: acpi_tb_store_table |
592 | * | 459 | * |
593 | * PARAMETERS: table_index - Table index | 460 | * PARAMETERS: address - Table address |
461 | * table - Table header | ||
462 | * length - Table length | ||
463 | * flags - Install flags | ||
464 | * table_index - Where the table index is returned | ||
594 | * | 465 | * |
595 | * RETURN: Status | 466 | * RETURN: Status and table index. |
596 | * | 467 | * |
597 | * DESCRIPTION: Allocates owner_id in table_desc | 468 | * DESCRIPTION: Add an ACPI table to the global table list |
598 | * | 469 | * |
599 | ******************************************************************************/ | 470 | ******************************************************************************/ |
600 | 471 | ||
601 | acpi_status acpi_tb_allocate_owner_id(u32 table_index) | 472 | acpi_status |
473 | acpi_tb_store_table(acpi_physical_address address, | ||
474 | struct acpi_table_header * table, | ||
475 | u32 length, u8 flags, u32 *table_index) | ||
602 | { | 476 | { |
603 | acpi_status status = AE_BAD_PARAMETER; | 477 | acpi_status status; |
604 | 478 | struct acpi_table_desc *table_desc; | |
605 | ACPI_FUNCTION_TRACE(tb_allocate_owner_id); | ||
606 | 479 | ||
607 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 480 | status = acpi_tb_get_next_root_index(table_index); |
608 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | 481 | if (ACPI_FAILURE(status)) { |
609 | status = acpi_ut_allocate_owner_id | 482 | return (status); |
610 | (&(acpi_gbl_root_table_list.tables[table_index].owner_id)); | ||
611 | } | 483 | } |
612 | 484 | ||
613 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 485 | /* Initialize added table */ |
614 | return_ACPI_STATUS(status); | ||
615 | } | ||
616 | |||
617 | /******************************************************************************* | ||
618 | * | ||
619 | * FUNCTION: acpi_tb_release_owner_id | ||
620 | * | ||
621 | * PARAMETERS: table_index - Table index | ||
622 | * | ||
623 | * RETURN: Status | ||
624 | * | ||
625 | * DESCRIPTION: Releases owner_id in table_desc | ||
626 | * | ||
627 | ******************************************************************************/ | ||
628 | |||
629 | acpi_status acpi_tb_release_owner_id(u32 table_index) | ||
630 | { | ||
631 | acpi_status status = AE_BAD_PARAMETER; | ||
632 | |||
633 | ACPI_FUNCTION_TRACE(tb_release_owner_id); | ||
634 | |||
635 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
636 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
637 | acpi_ut_release_owner_id(& | ||
638 | (acpi_gbl_root_table_list. | ||
639 | tables[table_index].owner_id)); | ||
640 | status = AE_OK; | ||
641 | } | ||
642 | 486 | ||
643 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 487 | table_desc = &acpi_gbl_root_table_list.tables[*table_index]; |
644 | return_ACPI_STATUS(status); | 488 | acpi_tb_init_table_descriptor(table_desc, address, flags, table); |
489 | table_desc->pointer = table; | ||
490 | return (AE_OK); | ||
645 | } | 491 | } |
646 | 492 | ||
647 | /******************************************************************************* | 493 | /******************************************************************************* |
648 | * | 494 | * |
649 | * FUNCTION: acpi_tb_get_owner_id | 495 | * FUNCTION: acpi_tb_uninstall_table |
650 | * | 496 | * |
651 | * PARAMETERS: table_index - Table index | 497 | * PARAMETERS: table_desc - Table descriptor |
652 | * owner_id - Where the table owner_id is returned | ||
653 | * | 498 | * |
654 | * RETURN: Status | 499 | * RETURN: None |
655 | * | 500 | * |
656 | * DESCRIPTION: returns owner_id for the ACPI table | 501 | * DESCRIPTION: Delete one internal ACPI table |
657 | * | 502 | * |
658 | ******************************************************************************/ | 503 | ******************************************************************************/ |
659 | 504 | ||
660 | acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id) | 505 | void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc) |
661 | { | 506 | { |
662 | acpi_status status = AE_BAD_PARAMETER; | ||
663 | |||
664 | ACPI_FUNCTION_TRACE(tb_get_owner_id); | ||
665 | 507 | ||
666 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 508 | ACPI_FUNCTION_TRACE(tb_uninstall_table); |
667 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
668 | *owner_id = | ||
669 | acpi_gbl_root_table_list.tables[table_index].owner_id; | ||
670 | status = AE_OK; | ||
671 | } | ||
672 | 509 | ||
673 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 510 | /* Table must be installed */ |
674 | return_ACPI_STATUS(status); | ||
675 | } | ||
676 | |||
677 | /******************************************************************************* | ||
678 | * | ||
679 | * FUNCTION: acpi_tb_is_table_loaded | ||
680 | * | ||
681 | * PARAMETERS: table_index - Table index | ||
682 | * | ||
683 | * RETURN: Table Loaded Flag | ||
684 | * | ||
685 | ******************************************************************************/ | ||
686 | 511 | ||
687 | u8 acpi_tb_is_table_loaded(u32 table_index) | 512 | if (!table_desc->address) { |
688 | { | 513 | return_VOID; |
689 | u8 is_loaded = FALSE; | ||
690 | |||
691 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
692 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | ||
693 | is_loaded = (u8) | ||
694 | (acpi_gbl_root_table_list.tables[table_index].flags & | ||
695 | ACPI_TABLE_IS_LOADED); | ||
696 | } | 514 | } |
697 | 515 | ||
698 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 516 | acpi_tb_invalidate_table(table_desc); |
699 | return (is_loaded); | ||
700 | } | ||
701 | |||
702 | /******************************************************************************* | ||
703 | * | ||
704 | * FUNCTION: acpi_tb_set_table_loaded_flag | ||
705 | * | ||
706 | * PARAMETERS: table_index - Table index | ||
707 | * is_loaded - TRUE if table is loaded, FALSE otherwise | ||
708 | * | ||
709 | * RETURN: None | ||
710 | * | ||
711 | * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE. | ||
712 | * | ||
713 | ******************************************************************************/ | ||
714 | |||
715 | void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded) | ||
716 | { | ||
717 | 517 | ||
718 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 518 | if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) == |
719 | if (table_index < acpi_gbl_root_table_list.current_table_count) { | 519 | ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) { |
720 | if (is_loaded) { | 520 | ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address)); |
721 | acpi_gbl_root_table_list.tables[table_index].flags |= | ||
722 | ACPI_TABLE_IS_LOADED; | ||
723 | } else { | ||
724 | acpi_gbl_root_table_list.tables[table_index].flags &= | ||
725 | ~ACPI_TABLE_IS_LOADED; | ||
726 | } | ||
727 | } | 521 | } |
728 | 522 | ||
729 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 523 | table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL); |
524 | return_VOID; | ||
730 | } | 525 | } |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index a4702eee91a8..6b1ca9991b90 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -49,8 +49,6 @@ | |||
49 | ACPI_MODULE_NAME("tbutils") | 49 | ACPI_MODULE_NAME("tbutils") |
50 | 50 | ||
51 | /* Local prototypes */ | 51 | /* Local prototypes */ |
52 | static acpi_status acpi_tb_validate_xsdt(acpi_physical_address address); | ||
53 | |||
54 | static acpi_physical_address | 52 | static acpi_physical_address |
55 | acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); | 53 | acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); |
56 | 54 | ||
@@ -178,9 +176,13 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index) | |||
178 | } | 176 | } |
179 | 177 | ||
180 | ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length); | 178 | ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length); |
181 | acpi_tb_delete_table(table_desc); | 179 | acpi_tb_uninstall_table(table_desc); |
182 | table_desc->pointer = new_table; | 180 | |
183 | table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED; | 181 | acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list. |
182 | tables[ACPI_TABLE_INDEX_DSDT], | ||
183 | ACPI_PTR_TO_PHYSADDR(new_table), | ||
184 | ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, | ||
185 | new_table); | ||
184 | 186 | ||
185 | ACPI_INFO((AE_INFO, | 187 | ACPI_INFO((AE_INFO, |
186 | "Forced DSDT copy: length 0x%05X copied locally, original unmapped", | 188 | "Forced DSDT copy: length 0x%05X copied locally, original unmapped", |
@@ -191,116 +193,6 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index) | |||
191 | 193 | ||
192 | /******************************************************************************* | 194 | /******************************************************************************* |
193 | * | 195 | * |
194 | * FUNCTION: acpi_tb_install_table | ||
195 | * | ||
196 | * PARAMETERS: address - Physical address of DSDT or FACS | ||
197 | * signature - Table signature, NULL if no need to | ||
198 | * match | ||
199 | * table_index - Index into root table array | ||
200 | * | ||
201 | * RETURN: None | ||
202 | * | ||
203 | * DESCRIPTION: Install an ACPI table into the global data structure. The | ||
204 | * table override mechanism is called to allow the host | ||
205 | * OS to replace any table before it is installed in the root | ||
206 | * table array. | ||
207 | * | ||
208 | ******************************************************************************/ | ||
209 | |||
210 | void | ||
211 | acpi_tb_install_table(acpi_physical_address address, | ||
212 | char *signature, u32 table_index) | ||
213 | { | ||
214 | struct acpi_table_header *table; | ||
215 | struct acpi_table_header *final_table; | ||
216 | struct acpi_table_desc *table_desc; | ||
217 | |||
218 | if (!address) { | ||
219 | ACPI_ERROR((AE_INFO, | ||
220 | "Null physical address for ACPI table [%s]", | ||
221 | signature)); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | /* Map just the table header */ | ||
226 | |||
227 | table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); | ||
228 | if (!table) { | ||
229 | ACPI_ERROR((AE_INFO, | ||
230 | "Could not map memory for table [%s] at %p", | ||
231 | signature, ACPI_CAST_PTR(void, address))); | ||
232 | return; | ||
233 | } | ||
234 | |||
235 | /* If a particular signature is expected (DSDT/FACS), it must match */ | ||
236 | |||
237 | if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) { | ||
238 | ACPI_BIOS_ERROR((AE_INFO, | ||
239 | "Invalid signature 0x%X for ACPI table, expected [%s]", | ||
240 | *ACPI_CAST_PTR(u32, table->signature), | ||
241 | signature)); | ||
242 | goto unmap_and_exit; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Initialize the table entry. Set the pointer to NULL, since the | ||
247 | * table is not fully mapped at this time. | ||
248 | */ | ||
249 | table_desc = &acpi_gbl_root_table_list.tables[table_index]; | ||
250 | |||
251 | table_desc->address = address; | ||
252 | table_desc->pointer = NULL; | ||
253 | table_desc->length = table->length; | ||
254 | table_desc->flags = ACPI_TABLE_ORIGIN_MAPPED; | ||
255 | ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature); | ||
256 | |||
257 | /* | ||
258 | * ACPI Table Override: | ||
259 | * | ||
260 | * Before we install the table, let the host OS override it with a new | ||
261 | * one if desired. Any table within the RSDT/XSDT can be replaced, | ||
262 | * including the DSDT which is pointed to by the FADT. | ||
263 | * | ||
264 | * NOTE: If the table is overridden, then final_table will contain a | ||
265 | * mapped pointer to the full new table. If the table is not overridden, | ||
266 | * or if there has been a physical override, then the table will be | ||
267 | * fully mapped later (in verify table). In any case, we must | ||
268 | * unmap the header that was mapped above. | ||
269 | */ | ||
270 | final_table = acpi_tb_table_override(table, table_desc); | ||
271 | if (!final_table) { | ||
272 | final_table = table; /* There was no override */ | ||
273 | } | ||
274 | |||
275 | acpi_tb_print_table_header(table_desc->address, final_table); | ||
276 | |||
277 | /* Set the global integer width (based upon revision of the DSDT) */ | ||
278 | |||
279 | if (table_index == ACPI_TABLE_INDEX_DSDT) { | ||
280 | acpi_ut_set_integer_width(final_table->revision); | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * If we have a physical override during this early loading of the ACPI | ||
285 | * tables, unmap the table for now. It will be mapped again later when | ||
286 | * it is actually used. This supports very early loading of ACPI tables, | ||
287 | * before virtual memory is fully initialized and running within the | ||
288 | * host OS. Note: A logical override has the ACPI_TABLE_ORIGIN_OVERRIDE | ||
289 | * flag set and will not be deleted below. | ||
290 | */ | ||
291 | if (final_table != table) { | ||
292 | acpi_tb_delete_table(table_desc); | ||
293 | } | ||
294 | |||
295 | unmap_and_exit: | ||
296 | |||
297 | /* Always unmap the table header that we mapped above */ | ||
298 | |||
299 | acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); | ||
300 | } | ||
301 | |||
302 | /******************************************************************************* | ||
303 | * | ||
304 | * FUNCTION: acpi_tb_get_root_table_entry | 196 | * FUNCTION: acpi_tb_get_root_table_entry |
305 | * | 197 | * |
306 | * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry | 198 | * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry |
@@ -357,87 +249,6 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) | |||
357 | 249 | ||
358 | /******************************************************************************* | 250 | /******************************************************************************* |
359 | * | 251 | * |
360 | * FUNCTION: acpi_tb_validate_xsdt | ||
361 | * | ||
362 | * PARAMETERS: address - Physical address of the XSDT (from RSDP) | ||
363 | * | ||
364 | * RETURN: Status. AE_OK if the table appears to be valid. | ||
365 | * | ||
366 | * DESCRIPTION: Validate an XSDT to ensure that it is of minimum size and does | ||
367 | * not contain any NULL entries. A problem that is seen in the | ||
368 | * field is that the XSDT exists, but is actually useless because | ||
369 | * of one or more (or all) NULL entries. | ||
370 | * | ||
371 | ******************************************************************************/ | ||
372 | |||
373 | static acpi_status acpi_tb_validate_xsdt(acpi_physical_address xsdt_address) | ||
374 | { | ||
375 | struct acpi_table_header *table; | ||
376 | u8 *next_entry; | ||
377 | acpi_physical_address address; | ||
378 | u32 length; | ||
379 | u32 entry_count; | ||
380 | acpi_status status; | ||
381 | u32 i; | ||
382 | |||
383 | /* Get the XSDT length */ | ||
384 | |||
385 | table = | ||
386 | acpi_os_map_memory(xsdt_address, sizeof(struct acpi_table_header)); | ||
387 | if (!table) { | ||
388 | return (AE_NO_MEMORY); | ||
389 | } | ||
390 | |||
391 | length = table->length; | ||
392 | acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); | ||
393 | |||
394 | /* | ||
395 | * Minimum XSDT length is the size of the standard ACPI header | ||
396 | * plus one physical address entry | ||
397 | */ | ||
398 | if (length < (sizeof(struct acpi_table_header) + ACPI_XSDT_ENTRY_SIZE)) { | ||
399 | return (AE_INVALID_TABLE_LENGTH); | ||
400 | } | ||
401 | |||
402 | /* Map the entire XSDT */ | ||
403 | |||
404 | table = acpi_os_map_memory(xsdt_address, length); | ||
405 | if (!table) { | ||
406 | return (AE_NO_MEMORY); | ||
407 | } | ||
408 | |||
409 | /* Get the number of entries and pointer to first entry */ | ||
410 | |||
411 | status = AE_OK; | ||
412 | next_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header)); | ||
413 | entry_count = (u32)((table->length - sizeof(struct acpi_table_header)) / | ||
414 | ACPI_XSDT_ENTRY_SIZE); | ||
415 | |||
416 | /* Validate each entry (physical address) within the XSDT */ | ||
417 | |||
418 | for (i = 0; i < entry_count; i++) { | ||
419 | address = | ||
420 | acpi_tb_get_root_table_entry(next_entry, | ||
421 | ACPI_XSDT_ENTRY_SIZE); | ||
422 | if (!address) { | ||
423 | |||
424 | /* Detected a NULL entry, XSDT is invalid */ | ||
425 | |||
426 | status = AE_NULL_ENTRY; | ||
427 | break; | ||
428 | } | ||
429 | |||
430 | next_entry += ACPI_XSDT_ENTRY_SIZE; | ||
431 | } | ||
432 | |||
433 | /* Unmap table */ | ||
434 | |||
435 | acpi_os_unmap_memory(table, length); | ||
436 | return (status); | ||
437 | } | ||
438 | |||
439 | /******************************************************************************* | ||
440 | * | ||
441 | * FUNCTION: acpi_tb_parse_root_table | 252 | * FUNCTION: acpi_tb_parse_root_table |
442 | * | 253 | * |
443 | * PARAMETERS: rsdp - Pointer to the RSDP | 254 | * PARAMETERS: rsdp - Pointer to the RSDP |
@@ -464,6 +275,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) | |||
464 | u32 length; | 275 | u32 length; |
465 | u8 *table_entry; | 276 | u8 *table_entry; |
466 | acpi_status status; | 277 | acpi_status status; |
278 | u32 table_index; | ||
467 | 279 | ||
468 | ACPI_FUNCTION_TRACE(tb_parse_root_table); | 280 | ACPI_FUNCTION_TRACE(tb_parse_root_table); |
469 | 281 | ||
@@ -502,25 +314,6 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) | |||
502 | */ | 314 | */ |
503 | acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); | 315 | acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); |
504 | 316 | ||
505 | /* | ||
506 | * If it is present and used, validate the XSDT for access/size | ||
507 | * and ensure that all table entries are at least non-NULL | ||
508 | */ | ||
509 | if (table_entry_size == ACPI_XSDT_ENTRY_SIZE) { | ||
510 | status = acpi_tb_validate_xsdt(address); | ||
511 | if (ACPI_FAILURE(status)) { | ||
512 | ACPI_BIOS_WARNING((AE_INFO, | ||
513 | "XSDT is invalid (%s), using RSDT", | ||
514 | acpi_format_exception(status))); | ||
515 | |||
516 | /* Fall back to the RSDT */ | ||
517 | |||
518 | address = | ||
519 | (acpi_physical_address) rsdp->rsdt_physical_address; | ||
520 | table_entry_size = ACPI_RSDT_ENTRY_SIZE; | ||
521 | } | ||
522 | } | ||
523 | |||
524 | /* Map the RSDT/XSDT table header to get the full table length */ | 317 | /* Map the RSDT/XSDT table header to get the full table length */ |
525 | 318 | ||
526 | table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); | 319 | table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); |
@@ -573,55 +366,36 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) | |||
573 | /* Initialize the root table array from the RSDT/XSDT */ | 366 | /* Initialize the root table array from the RSDT/XSDT */ |
574 | 367 | ||
575 | for (i = 0; i < table_count; i++) { | 368 | for (i = 0; i < table_count; i++) { |
576 | if (acpi_gbl_root_table_list.current_table_count >= | ||
577 | acpi_gbl_root_table_list.max_table_count) { | ||
578 | |||
579 | /* There is no more room in the root table array, attempt resize */ | ||
580 | |||
581 | status = acpi_tb_resize_root_table_list(); | ||
582 | if (ACPI_FAILURE(status)) { | ||
583 | ACPI_WARNING((AE_INFO, | ||
584 | "Truncating %u table entries!", | ||
585 | (unsigned) (table_count - | ||
586 | (acpi_gbl_root_table_list. | ||
587 | current_table_count - | ||
588 | 2)))); | ||
589 | break; | ||
590 | } | ||
591 | } | ||
592 | 369 | ||
593 | /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ | 370 | /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ |
594 | 371 | ||
595 | acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list. | 372 | address = |
596 | current_table_count].address = | ||
597 | acpi_tb_get_root_table_entry(table_entry, table_entry_size); | 373 | acpi_tb_get_root_table_entry(table_entry, table_entry_size); |
598 | 374 | ||
599 | table_entry += table_entry_size; | 375 | /* Skip NULL entries in RSDT/XSDT */ |
600 | acpi_gbl_root_table_list.current_table_count++; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * It is not possible to map more than one entry in some environments, | ||
605 | * so unmap the root table here before mapping other tables | ||
606 | */ | ||
607 | acpi_os_unmap_memory(table, length); | ||
608 | 376 | ||
609 | /* | 377 | if (!address) { |
610 | * Complete the initialization of the root table array by examining | 378 | goto next_table; |
611 | * the header of each table | 379 | } |
612 | */ | ||
613 | for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) { | ||
614 | acpi_tb_install_table(acpi_gbl_root_table_list.tables[i]. | ||
615 | address, NULL, i); | ||
616 | 380 | ||
617 | /* Special case for FADT - validate it then get the DSDT and FACS */ | 381 | status = acpi_tb_install_standard_table(address, |
382 | ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, | ||
383 | FALSE, TRUE, | ||
384 | &table_index); | ||
618 | 385 | ||
619 | if (ACPI_COMPARE_NAME | 386 | if (ACPI_SUCCESS(status) && |
620 | (&acpi_gbl_root_table_list.tables[i].signature, | 387 | ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. |
621 | ACPI_SIG_FADT)) { | 388 | tables[table_index].signature, |
622 | acpi_tb_parse_fadt(i); | 389 | ACPI_SIG_FADT)) { |
390 | acpi_tb_parse_fadt(table_index); | ||
623 | } | 391 | } |
392 | |||
393 | next_table: | ||
394 | |||
395 | table_entry += table_entry_size; | ||
624 | } | 396 | } |
625 | 397 | ||
398 | acpi_os_unmap_memory(table, length); | ||
399 | |||
626 | return_ACPI_STATUS(AE_OK); | 400 | return_ACPI_STATUS(AE_OK); |
627 | } | 401 | } |
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index a1593159d9ea..6482b0ded652 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -206,8 +206,8 @@ acpi_status | |||
206 | acpi_get_table_header(char *signature, | 206 | acpi_get_table_header(char *signature, |
207 | u32 instance, struct acpi_table_header *out_table_header) | 207 | u32 instance, struct acpi_table_header *out_table_header) |
208 | { | 208 | { |
209 | u32 i; | 209 | u32 i; |
210 | u32 j; | 210 | u32 j; |
211 | struct acpi_table_header *header; | 211 | struct acpi_table_header *header; |
212 | 212 | ||
213 | /* Parameter validation */ | 213 | /* Parameter validation */ |
@@ -233,7 +233,7 @@ acpi_get_table_header(char *signature, | |||
233 | if (!acpi_gbl_root_table_list.tables[i].pointer) { | 233 | if (!acpi_gbl_root_table_list.tables[i].pointer) { |
234 | if ((acpi_gbl_root_table_list.tables[i].flags & | 234 | if ((acpi_gbl_root_table_list.tables[i].flags & |
235 | ACPI_TABLE_ORIGIN_MASK) == | 235 | ACPI_TABLE_ORIGIN_MASK) == |
236 | ACPI_TABLE_ORIGIN_MAPPED) { | 236 | ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL) { |
237 | header = | 237 | header = |
238 | acpi_os_map_memory(acpi_gbl_root_table_list. | 238 | acpi_os_map_memory(acpi_gbl_root_table_list. |
239 | tables[i].address, | 239 | tables[i].address, |
@@ -321,8 +321,8 @@ acpi_get_table_with_size(char *signature, | |||
321 | u32 instance, struct acpi_table_header **out_table, | 321 | u32 instance, struct acpi_table_header **out_table, |
322 | acpi_size *tbl_size) | 322 | acpi_size *tbl_size) |
323 | { | 323 | { |
324 | u32 i; | 324 | u32 i; |
325 | u32 j; | 325 | u32 j; |
326 | acpi_status status; | 326 | acpi_status status; |
327 | 327 | ||
328 | /* Parameter validation */ | 328 | /* Parameter validation */ |
@@ -346,7 +346,7 @@ acpi_get_table_with_size(char *signature, | |||
346 | } | 346 | } |
347 | 347 | ||
348 | status = | 348 | status = |
349 | acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); | 349 | acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]); |
350 | if (ACPI_SUCCESS(status)) { | 350 | if (ACPI_SUCCESS(status)) { |
351 | *out_table = acpi_gbl_root_table_list.tables[i].pointer; | 351 | *out_table = acpi_gbl_root_table_list.tables[i].pointer; |
352 | *tbl_size = acpi_gbl_root_table_list.tables[i].length; | 352 | *tbl_size = acpi_gbl_root_table_list.tables[i].length; |
@@ -390,7 +390,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table) | |||
390 | * | 390 | * |
391 | ******************************************************************************/ | 391 | ******************************************************************************/ |
392 | acpi_status | 392 | acpi_status |
393 | acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table) | 393 | acpi_get_table_by_index(u32 table_index, struct acpi_table_header ** table) |
394 | { | 394 | { |
395 | acpi_status status; | 395 | acpi_status status; |
396 | 396 | ||
@@ -416,8 +416,8 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table) | |||
416 | /* Table is not mapped, map it */ | 416 | /* Table is not mapped, map it */ |
417 | 417 | ||
418 | status = | 418 | status = |
419 | acpi_tb_verify_table(&acpi_gbl_root_table_list. | 419 | acpi_tb_validate_table(&acpi_gbl_root_table_list. |
420 | tables[table_index]); | 420 | tables[table_index]); |
421 | if (ACPI_FAILURE(status)) { | 421 | if (ACPI_FAILURE(status)) { |
422 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 422 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
423 | return_ACPI_STATUS(status); | 423 | return_ACPI_STATUS(status); |
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 0909420fc776..ab5308b81aa8 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c | |||
@@ -117,7 +117,7 @@ static acpi_status acpi_tb_load_namespace(void) | |||
117 | tables[ACPI_TABLE_INDEX_DSDT].signature), | 117 | tables[ACPI_TABLE_INDEX_DSDT].signature), |
118 | ACPI_SIG_DSDT) | 118 | ACPI_SIG_DSDT) |
119 | || | 119 | || |
120 | ACPI_FAILURE(acpi_tb_verify_table | 120 | ACPI_FAILURE(acpi_tb_validate_table |
121 | (&acpi_gbl_root_table_list. | 121 | (&acpi_gbl_root_table_list. |
122 | tables[ACPI_TABLE_INDEX_DSDT]))) { | 122 | tables[ACPI_TABLE_INDEX_DSDT]))) { |
123 | status = AE_NO_ACPI_TABLES; | 123 | status = AE_NO_ACPI_TABLES; |
@@ -128,7 +128,7 @@ static acpi_status acpi_tb_load_namespace(void) | |||
128 | * Save the DSDT pointer for simple access. This is the mapped memory | 128 | * Save the DSDT pointer for simple access. This is the mapped memory |
129 | * address. We must take care here because the address of the .Tables | 129 | * address. We must take care here because the address of the .Tables |
130 | * array can change dynamically as tables are loaded at run-time. Note: | 130 | * array can change dynamically as tables are loaded at run-time. Note: |
131 | * .Pointer field is not validated until after call to acpi_tb_verify_table. | 131 | * .Pointer field is not validated until after call to acpi_tb_validate_table. |
132 | */ | 132 | */ |
133 | acpi_gbl_DSDT = | 133 | acpi_gbl_DSDT = |
134 | acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer; | 134 | acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer; |
@@ -174,24 +174,11 @@ static acpi_status acpi_tb_load_namespace(void) | |||
174 | (acpi_gbl_root_table_list.tables[i]. | 174 | (acpi_gbl_root_table_list.tables[i]. |
175 | signature), ACPI_SIG_PSDT)) | 175 | signature), ACPI_SIG_PSDT)) |
176 | || | 176 | || |
177 | ACPI_FAILURE(acpi_tb_verify_table | 177 | ACPI_FAILURE(acpi_tb_validate_table |
178 | (&acpi_gbl_root_table_list.tables[i]))) { | 178 | (&acpi_gbl_root_table_list.tables[i]))) { |
179 | continue; | 179 | continue; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | ||
183 | * Optionally do not load any SSDTs from the RSDT/XSDT. This can | ||
184 | * be useful for debugging ACPI problems on some machines. | ||
185 | */ | ||
186 | if (acpi_gbl_disable_ssdt_table_load) { | ||
187 | ACPI_INFO((AE_INFO, "Ignoring %4.4s at %p", | ||
188 | acpi_gbl_root_table_list.tables[i].signature. | ||
189 | ascii, ACPI_CAST_PTR(void, | ||
190 | acpi_gbl_root_table_list. | ||
191 | tables[i].address))); | ||
192 | continue; | ||
193 | } | ||
194 | |||
195 | /* Ignore errors while loading tables, get as many as possible */ | 182 | /* Ignore errors while loading tables, get as many as possible */ |
196 | 183 | ||
197 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | 184 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); |
@@ -208,6 +195,45 @@ unlock_and_exit: | |||
208 | 195 | ||
209 | /******************************************************************************* | 196 | /******************************************************************************* |
210 | * | 197 | * |
198 | * FUNCTION: acpi_install_table | ||
199 | * | ||
200 | * PARAMETERS: address - Address of the ACPI table to be installed. | ||
201 | * physical - Whether the address is a physical table | ||
202 | * address or not | ||
203 | * | ||
204 | * RETURN: Status | ||
205 | * | ||
206 | * DESCRIPTION: Dynamically install an ACPI table. | ||
207 | * Note: This function should only be invoked after | ||
208 | * acpi_initialize_tables() and before acpi_load_tables(). | ||
209 | * | ||
210 | ******************************************************************************/ | ||
211 | |||
212 | acpi_status __init | ||
213 | acpi_install_table(acpi_physical_address address, u8 physical) | ||
214 | { | ||
215 | acpi_status status; | ||
216 | u8 flags; | ||
217 | u32 table_index; | ||
218 | |||
219 | ACPI_FUNCTION_TRACE(acpi_install_table); | ||
220 | |||
221 | if (physical) { | ||
222 | flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL; | ||
223 | } else { | ||
224 | flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL; | ||
225 | } | ||
226 | |||
227 | status = acpi_tb_install_standard_table(address, flags, | ||
228 | FALSE, FALSE, &table_index); | ||
229 | |||
230 | return_ACPI_STATUS(status); | ||
231 | } | ||
232 | |||
233 | ACPI_EXPORT_SYMBOL_INIT(acpi_install_table) | ||
234 | |||
235 | /******************************************************************************* | ||
236 | * | ||
211 | * FUNCTION: acpi_load_table | 237 | * FUNCTION: acpi_load_table |
212 | * | 238 | * |
213 | * PARAMETERS: table - Pointer to a buffer containing the ACPI | 239 | * PARAMETERS: table - Pointer to a buffer containing the ACPI |
@@ -222,11 +248,9 @@ unlock_and_exit: | |||
222 | * to ensure that the table is not deleted or unmapped. | 248 | * to ensure that the table is not deleted or unmapped. |
223 | * | 249 | * |
224 | ******************************************************************************/ | 250 | ******************************************************************************/ |
225 | |||
226 | acpi_status acpi_load_table(struct acpi_table_header *table) | 251 | acpi_status acpi_load_table(struct acpi_table_header *table) |
227 | { | 252 | { |
228 | acpi_status status; | 253 | acpi_status status; |
229 | struct acpi_table_desc table_desc; | ||
230 | u32 table_index; | 254 | u32 table_index; |
231 | 255 | ||
232 | ACPI_FUNCTION_TRACE(acpi_load_table); | 256 | ACPI_FUNCTION_TRACE(acpi_load_table); |
@@ -237,14 +261,6 @@ acpi_status acpi_load_table(struct acpi_table_header *table) | |||
237 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 261 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
238 | } | 262 | } |
239 | 263 | ||
240 | /* Init local table descriptor */ | ||
241 | |||
242 | ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc)); | ||
243 | table_desc.address = ACPI_PTR_TO_PHYSADDR(table); | ||
244 | table_desc.pointer = table; | ||
245 | table_desc.length = table->length; | ||
246 | table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN; | ||
247 | |||
248 | /* Must acquire the interpreter lock during this operation */ | 264 | /* Must acquire the interpreter lock during this operation */ |
249 | 265 | ||
250 | status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); | 266 | status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); |
@@ -255,7 +271,24 @@ acpi_status acpi_load_table(struct acpi_table_header *table) | |||
255 | /* Install the table and load it into the namespace */ | 271 | /* Install the table and load it into the namespace */ |
256 | 272 | ||
257 | ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:")); | 273 | ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:")); |
258 | status = acpi_tb_add_table(&table_desc, &table_index); | 274 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); |
275 | |||
276 | status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table), | ||
277 | ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, | ||
278 | TRUE, FALSE, &table_index); | ||
279 | |||
280 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
281 | if (ACPI_FAILURE(status)) { | ||
282 | goto unlock_and_exit; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Note: Now table is "INSTALLED", it must be validated before | ||
287 | * using. | ||
288 | */ | ||
289 | status = | ||
290 | acpi_tb_validate_table(&acpi_gbl_root_table_list. | ||
291 | tables[table_index]); | ||
259 | if (ACPI_FAILURE(status)) { | 292 | if (ACPI_FAILURE(status)) { |
260 | goto unlock_and_exit; | 293 | goto unlock_and_exit; |
261 | } | 294 | } |
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index fbfa9eca011f..90ec37c473c6 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c | |||
@@ -462,7 +462,7 @@ char *acpi_ut_get_mutex_name(u32 mutex_id) | |||
462 | 462 | ||
463 | /* Names for Notify() values, used for debug output */ | 463 | /* Names for Notify() values, used for debug output */ |
464 | 464 | ||
465 | static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = { | 465 | static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = { |
466 | /* 00 */ "Bus Check", | 466 | /* 00 */ "Bus Check", |
467 | /* 01 */ "Device Check", | 467 | /* 01 */ "Device Check", |
468 | /* 02 */ "Device Wake", | 468 | /* 02 */ "Device Wake", |
@@ -473,23 +473,75 @@ static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = { | |||
473 | /* 07 */ "Power Fault", | 473 | /* 07 */ "Power Fault", |
474 | /* 08 */ "Capabilities Check", | 474 | /* 08 */ "Capabilities Check", |
475 | /* 09 */ "Device PLD Check", | 475 | /* 09 */ "Device PLD Check", |
476 | /* 10 */ "Reserved", | 476 | /* 0A */ "Reserved", |
477 | /* 11 */ "System Locality Update", | 477 | /* 0B */ "System Locality Update", |
478 | /* 12 */ "Shutdown Request" | 478 | /* 0C */ "Shutdown Request" |
479 | }; | 479 | }; |
480 | 480 | ||
481 | const char *acpi_ut_get_notify_name(u32 notify_value) | 481 | static const char *acpi_gbl_device_notify[4] = { |
482 | /* 80 */ "Status Change", | ||
483 | /* 81 */ "Information Change", | ||
484 | /* 82 */ "Device-Specific Change", | ||
485 | /* 83 */ "Device-Specific Change" | ||
486 | }; | ||
487 | |||
488 | static const char *acpi_gbl_processor_notify[4] = { | ||
489 | /* 80 */ "Performance Capability Change", | ||
490 | /* 81 */ "C-State Change", | ||
491 | /* 82 */ "Throttling Capability Change", | ||
492 | /* 83 */ "Device-Specific Change" | ||
493 | }; | ||
494 | |||
495 | static const char *acpi_gbl_thermal_notify[4] = { | ||
496 | /* 80 */ "Thermal Status Change", | ||
497 | /* 81 */ "Thermal Trip Point Change", | ||
498 | /* 82 */ "Thermal Device List Change", | ||
499 | /* 83 */ "Thermal Relationship Change" | ||
500 | }; | ||
501 | |||
502 | const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type) | ||
482 | { | 503 | { |
483 | 504 | ||
505 | /* 00 - 0C are common to all object types */ | ||
506 | |||
484 | if (notify_value <= ACPI_NOTIFY_MAX) { | 507 | if (notify_value <= ACPI_NOTIFY_MAX) { |
485 | return (acpi_gbl_notify_value_names[notify_value]); | 508 | return (acpi_gbl_generic_notify[notify_value]); |
486 | } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) { | 509 | } |
510 | |||
511 | /* 0D - 7F are reserved */ | ||
512 | |||
513 | if (notify_value <= ACPI_MAX_SYS_NOTIFY) { | ||
487 | return ("Reserved"); | 514 | return ("Reserved"); |
488 | } else if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) { | ||
489 | return ("Device Specific"); | ||
490 | } else { | ||
491 | return ("Hardware Specific"); | ||
492 | } | 515 | } |
516 | |||
517 | /* 80 - 83 are per-object-type */ | ||
518 | |||
519 | if (notify_value <= 0x83) { | ||
520 | switch (type) { | ||
521 | case ACPI_TYPE_ANY: | ||
522 | case ACPI_TYPE_DEVICE: | ||
523 | return (acpi_gbl_device_notify[notify_value - 0x80]); | ||
524 | |||
525 | case ACPI_TYPE_PROCESSOR: | ||
526 | return (acpi_gbl_processor_notify[notify_value - 0x80]); | ||
527 | |||
528 | case ACPI_TYPE_THERMAL: | ||
529 | return (acpi_gbl_thermal_notify[notify_value - 0x80]); | ||
530 | |||
531 | default: | ||
532 | return ("Target object type does not support notifies"); | ||
533 | } | ||
534 | } | ||
535 | |||
536 | /* 84 - BF are device-specific */ | ||
537 | |||
538 | if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) { | ||
539 | return ("Device-Specific"); | ||
540 | } | ||
541 | |||
542 | /* C0 and above are hardware-specific */ | ||
543 | |||
544 | return ("Hardware-Specific"); | ||
493 | } | 545 | } |
494 | #endif | 546 | #endif |
495 | 547 | ||
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index f3abeae9d2f8..d69be3cb3fae 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c | |||
@@ -55,28 +55,7 @@ ACPI_MODULE_NAME("utglobal") | |||
55 | * Static global variable initialization. | 55 | * Static global variable initialization. |
56 | * | 56 | * |
57 | ******************************************************************************/ | 57 | ******************************************************************************/ |
58 | /* Debug output control masks */ | ||
59 | u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT; | ||
60 | |||
61 | u32 acpi_dbg_layer = 0; | ||
62 | |||
63 | /* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ | ||
64 | |||
65 | struct acpi_table_fadt acpi_gbl_FADT; | ||
66 | u32 acpi_gbl_trace_flags; | ||
67 | acpi_name acpi_gbl_trace_method_name; | ||
68 | u8 acpi_gbl_system_awake_and_running; | ||
69 | u32 acpi_current_gpe_count; | ||
70 | |||
71 | /* | ||
72 | * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning | ||
73 | * that the ACPI hardware is no longer required. A flag in the FADT indicates | ||
74 | * a reduced HW machine, and that flag is duplicated here for convenience. | ||
75 | */ | ||
76 | u8 acpi_gbl_reduced_hardware; | ||
77 | |||
78 | /* Various state name strings */ | 58 | /* Various state name strings */ |
79 | |||
80 | const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { | 59 | const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { |
81 | "\\_S0_", | 60 | "\\_S0_", |
82 | "\\_S1_", | 61 | "\\_S1_", |
@@ -337,7 +316,6 @@ acpi_status acpi_ut_init_globals(void) | |||
337 | acpi_gbl_acpi_hardware_present = TRUE; | 316 | acpi_gbl_acpi_hardware_present = TRUE; |
338 | acpi_gbl_last_owner_id_index = 0; | 317 | acpi_gbl_last_owner_id_index = 0; |
339 | acpi_gbl_next_owner_id_offset = 0; | 318 | acpi_gbl_next_owner_id_offset = 0; |
340 | acpi_gbl_trace_method_name = 0; | ||
341 | acpi_gbl_trace_dbg_level = 0; | 319 | acpi_gbl_trace_dbg_level = 0; |
342 | acpi_gbl_trace_dbg_layer = 0; | 320 | acpi_gbl_trace_dbg_layer = 0; |
343 | acpi_gbl_debugger_configuration = DEBUGGER_THREADING; | 321 | acpi_gbl_debugger_configuration = DEBUGGER_THREADING; |
@@ -377,9 +355,7 @@ acpi_status acpi_ut_init_globals(void) | |||
377 | acpi_gbl_disable_mem_tracking = FALSE; | 355 | acpi_gbl_disable_mem_tracking = FALSE; |
378 | #endif | 356 | #endif |
379 | 357 | ||
380 | #ifdef ACPI_DEBUGGER | 358 | ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE); |
381 | acpi_gbl_db_terminate_threads = FALSE; | ||
382 | #endif | ||
383 | 359 | ||
384 | return_ACPI_STATUS(AE_OK); | 360 | return_ACPI_STATUS(AE_OK); |
385 | } | 361 | } |
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index 77219336c7e0..6dc54b3c28b0 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c | |||
@@ -353,7 +353,7 @@ void acpi_ut_print_string(char *string, u16 max_length) | |||
353 | } | 353 | } |
354 | 354 | ||
355 | acpi_os_printf("\""); | 355 | acpi_os_printf("\""); |
356 | for (i = 0; string[i] && (i < max_length); i++) { | 356 | for (i = 0; (i < max_length) && string[i]; i++) { |
357 | 357 | ||
358 | /* Escape sequences */ | 358 | /* Escape sequences */ |
359 | 359 | ||
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index edd861102f1b..88ef77f3cf88 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c | |||
@@ -53,6 +53,7 @@ ACPI_MODULE_NAME("utxferror") | |||
53 | * This module is used for the in-kernel ACPICA as well as the ACPICA | 53 | * This module is used for the in-kernel ACPICA as well as the ACPICA |
54 | * tools/applications. | 54 | * tools/applications. |
55 | */ | 55 | */ |
56 | #ifndef ACPI_NO_ERROR_MESSAGES /* Entire module */ | ||
56 | /******************************************************************************* | 57 | /******************************************************************************* |
57 | * | 58 | * |
58 | * FUNCTION: acpi_error | 59 | * FUNCTION: acpi_error |
@@ -249,3 +250,4 @@ acpi_bios_warning(const char *module_name, | |||
249 | } | 250 | } |
250 | 251 | ||
251 | ACPI_EXPORT_SYMBOL(acpi_bios_warning) | 252 | ACPI_EXPORT_SYMBOL(acpi_bios_warning) |
253 | #endif /* ACPI_NO_ERROR_MESSAGES */ | ||
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 1be6f5564485..a095d4f858da 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c | |||
@@ -202,7 +202,7 @@ static void check_vendor_extension(u64 paddr, | |||
202 | 202 | ||
203 | if (!offset) | 203 | if (!offset) |
204 | return; | 204 | return; |
205 | v = acpi_os_map_memory(paddr + offset, sizeof(*v)); | 205 | v = acpi_os_map_iomem(paddr + offset, sizeof(*v)); |
206 | if (!v) | 206 | if (!v) |
207 | return; | 207 | return; |
208 | sbdf = v->pcie_sbdf; | 208 | sbdf = v->pcie_sbdf; |
@@ -210,7 +210,7 @@ static void check_vendor_extension(u64 paddr, | |||
210 | sbdf >> 24, (sbdf >> 16) & 0xff, | 210 | sbdf >> 24, (sbdf >> 16) & 0xff, |
211 | (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, | 211 | (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, |
212 | v->vendor_id, v->device_id, v->rev_id); | 212 | v->vendor_id, v->device_id, v->rev_id); |
213 | acpi_os_unmap_memory(v, sizeof(*v)); | 213 | acpi_os_unmap_iomem(v, sizeof(*v)); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void *einj_get_parameter_address(void) | 216 | static void *einj_get_parameter_address(void) |
@@ -236,7 +236,7 @@ static void *einj_get_parameter_address(void) | |||
236 | if (pa_v5) { | 236 | if (pa_v5) { |
237 | struct set_error_type_with_address *v5param; | 237 | struct set_error_type_with_address *v5param; |
238 | 238 | ||
239 | v5param = acpi_os_map_memory(pa_v5, sizeof(*v5param)); | 239 | v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param)); |
240 | if (v5param) { | 240 | if (v5param) { |
241 | acpi5 = 1; | 241 | acpi5 = 1; |
242 | check_vendor_extension(pa_v5, v5param); | 242 | check_vendor_extension(pa_v5, v5param); |
@@ -246,11 +246,11 @@ static void *einj_get_parameter_address(void) | |||
246 | if (param_extension && pa_v4) { | 246 | if (param_extension && pa_v4) { |
247 | struct einj_parameter *v4param; | 247 | struct einj_parameter *v4param; |
248 | 248 | ||
249 | v4param = acpi_os_map_memory(pa_v4, sizeof(*v4param)); | 249 | v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param)); |
250 | if (!v4param) | 250 | if (!v4param) |
251 | return NULL; | 251 | return NULL; |
252 | if (v4param->reserved1 || v4param->reserved2) { | 252 | if (v4param->reserved1 || v4param->reserved2) { |
253 | acpi_os_unmap_memory(v4param, sizeof(*v4param)); | 253 | acpi_os_unmap_iomem(v4param, sizeof(*v4param)); |
254 | return NULL; | 254 | return NULL; |
255 | } | 255 | } |
256 | return v4param; | 256 | return v4param; |
@@ -794,7 +794,7 @@ err_unmap: | |||
794 | sizeof(struct set_error_type_with_address) : | 794 | sizeof(struct set_error_type_with_address) : |
795 | sizeof(struct einj_parameter); | 795 | sizeof(struct einj_parameter); |
796 | 796 | ||
797 | acpi_os_unmap_memory(einj_param, size); | 797 | acpi_os_unmap_iomem(einj_param, size); |
798 | } | 798 | } |
799 | apei_exec_post_unmap_gars(&ctx); | 799 | apei_exec_post_unmap_gars(&ctx); |
800 | err_release: | 800 | err_release: |
@@ -816,7 +816,7 @@ static void __exit einj_exit(void) | |||
816 | sizeof(struct set_error_type_with_address) : | 816 | sizeof(struct set_error_type_with_address) : |
817 | sizeof(struct einj_parameter); | 817 | sizeof(struct einj_parameter); |
818 | 818 | ||
819 | acpi_os_unmap_memory(einj_param, size); | 819 | acpi_os_unmap_iomem(einj_param, size); |
820 | } | 820 | } |
821 | einj_exec_ctx_init(&ctx); | 821 | einj_exec_ctx_init(&ctx); |
822 | apei_exec_post_unmap_gars(&ctx); | 822 | apei_exec_post_unmap_gars(&ctx); |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 9a2c63b20050..e48fc98e71c4 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -36,6 +36,12 @@ | |||
36 | #include <linux/suspend.h> | 36 | #include <linux/suspend.h> |
37 | #include <asm/unaligned.h> | 37 | #include <asm/unaligned.h> |
38 | 38 | ||
39 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
40 | #include <linux/proc_fs.h> | ||
41 | #include <linux/seq_file.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | #endif | ||
44 | |||
39 | #include <linux/acpi.h> | 45 | #include <linux/acpi.h> |
40 | #include <linux/power_supply.h> | 46 | #include <linux/power_supply.h> |
41 | 47 | ||
@@ -50,6 +56,10 @@ | |||
50 | /* Battery power unit: 0 means mW, 1 means mA */ | 56 | /* Battery power unit: 0 means mW, 1 means mA */ |
51 | #define ACPI_BATTERY_POWER_UNIT_MA 1 | 57 | #define ACPI_BATTERY_POWER_UNIT_MA 1 |
52 | 58 | ||
59 | #define ACPI_BATTERY_STATE_DISCHARGING 0x1 | ||
60 | #define ACPI_BATTERY_STATE_CHARGING 0x2 | ||
61 | #define ACPI_BATTERY_STATE_CRITICAL 0x4 | ||
62 | |||
53 | #define _COMPONENT ACPI_BATTERY_COMPONENT | 63 | #define _COMPONENT ACPI_BATTERY_COMPONENT |
54 | 64 | ||
55 | ACPI_MODULE_NAME("battery"); | 65 | ACPI_MODULE_NAME("battery"); |
@@ -64,6 +74,19 @@ static unsigned int cache_time = 1000; | |||
64 | module_param(cache_time, uint, 0644); | 74 | module_param(cache_time, uint, 0644); |
65 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 75 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
66 | 76 | ||
77 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
78 | extern struct proc_dir_entry *acpi_lock_battery_dir(void); | ||
79 | extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); | ||
80 | |||
81 | enum acpi_battery_files { | ||
82 | info_tag = 0, | ||
83 | state_tag, | ||
84 | alarm_tag, | ||
85 | ACPI_BATTERY_NUMFILES, | ||
86 | }; | ||
87 | |||
88 | #endif | ||
89 | |||
67 | static const struct acpi_device_id battery_device_ids[] = { | 90 | static const struct acpi_device_id battery_device_ids[] = { |
68 | {"PNP0C0A", 0}, | 91 | {"PNP0C0A", 0}, |
69 | {"", 0}, | 92 | {"", 0}, |
@@ -150,7 +173,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery); | |||
150 | 173 | ||
151 | static int acpi_battery_is_charged(struct acpi_battery *battery) | 174 | static int acpi_battery_is_charged(struct acpi_battery *battery) |
152 | { | 175 | { |
153 | /* either charging or discharging */ | 176 | /* charging, discharging or critical low */ |
154 | if (battery->state != 0) | 177 | if (battery->state != 0) |
155 | return 0; | 178 | return 0; |
156 | 179 | ||
@@ -185,9 +208,9 @@ static int acpi_battery_get_property(struct power_supply *psy, | |||
185 | return -ENODEV; | 208 | return -ENODEV; |
186 | switch (psp) { | 209 | switch (psp) { |
187 | case POWER_SUPPLY_PROP_STATUS: | 210 | case POWER_SUPPLY_PROP_STATUS: |
188 | if (battery->state & 0x01) | 211 | if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) |
189 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; | 212 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; |
190 | else if (battery->state & 0x02) | 213 | else if (battery->state & ACPI_BATTERY_STATE_CHARGING) |
191 | val->intval = POWER_SUPPLY_STATUS_CHARGING; | 214 | val->intval = POWER_SUPPLY_STATUS_CHARGING; |
192 | else if (acpi_battery_is_charged(battery)) | 215 | else if (acpi_battery_is_charged(battery)) |
193 | val->intval = POWER_SUPPLY_STATUS_FULL; | 216 | val->intval = POWER_SUPPLY_STATUS_FULL; |
@@ -250,6 +273,17 @@ static int acpi_battery_get_property(struct power_supply *psy, | |||
250 | else | 273 | else |
251 | val->intval = 0; | 274 | val->intval = 0; |
252 | break; | 275 | break; |
276 | case POWER_SUPPLY_PROP_CAPACITY_LEVEL: | ||
277 | if (battery->state & ACPI_BATTERY_STATE_CRITICAL) | ||
278 | val->intval = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; | ||
279 | else if (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && | ||
280 | (battery->capacity_now <= battery->alarm)) | ||
281 | val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW; | ||
282 | else if (acpi_battery_is_charged(battery)) | ||
283 | val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL; | ||
284 | else | ||
285 | val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; | ||
286 | break; | ||
253 | case POWER_SUPPLY_PROP_MODEL_NAME: | 287 | case POWER_SUPPLY_PROP_MODEL_NAME: |
254 | val->strval = battery->model_number; | 288 | val->strval = battery->model_number; |
255 | break; | 289 | break; |
@@ -277,6 +311,7 @@ static enum power_supply_property charge_battery_props[] = { | |||
277 | POWER_SUPPLY_PROP_CHARGE_FULL, | 311 | POWER_SUPPLY_PROP_CHARGE_FULL, |
278 | POWER_SUPPLY_PROP_CHARGE_NOW, | 312 | POWER_SUPPLY_PROP_CHARGE_NOW, |
279 | POWER_SUPPLY_PROP_CAPACITY, | 313 | POWER_SUPPLY_PROP_CAPACITY, |
314 | POWER_SUPPLY_PROP_CAPACITY_LEVEL, | ||
280 | POWER_SUPPLY_PROP_MODEL_NAME, | 315 | POWER_SUPPLY_PROP_MODEL_NAME, |
281 | POWER_SUPPLY_PROP_MANUFACTURER, | 316 | POWER_SUPPLY_PROP_MANUFACTURER, |
282 | POWER_SUPPLY_PROP_SERIAL_NUMBER, | 317 | POWER_SUPPLY_PROP_SERIAL_NUMBER, |
@@ -294,11 +329,20 @@ static enum power_supply_property energy_battery_props[] = { | |||
294 | POWER_SUPPLY_PROP_ENERGY_FULL, | 329 | POWER_SUPPLY_PROP_ENERGY_FULL, |
295 | POWER_SUPPLY_PROP_ENERGY_NOW, | 330 | POWER_SUPPLY_PROP_ENERGY_NOW, |
296 | POWER_SUPPLY_PROP_CAPACITY, | 331 | POWER_SUPPLY_PROP_CAPACITY, |
332 | POWER_SUPPLY_PROP_CAPACITY_LEVEL, | ||
297 | POWER_SUPPLY_PROP_MODEL_NAME, | 333 | POWER_SUPPLY_PROP_MODEL_NAME, |
298 | POWER_SUPPLY_PROP_MANUFACTURER, | 334 | POWER_SUPPLY_PROP_MANUFACTURER, |
299 | POWER_SUPPLY_PROP_SERIAL_NUMBER, | 335 | POWER_SUPPLY_PROP_SERIAL_NUMBER, |
300 | }; | 336 | }; |
301 | 337 | ||
338 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
339 | inline char *acpi_battery_units(struct acpi_battery *battery) | ||
340 | { | ||
341 | return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? | ||
342 | "mA" : "mW"; | ||
343 | } | ||
344 | #endif | ||
345 | |||
302 | /* -------------------------------------------------------------------------- | 346 | /* -------------------------------------------------------------------------- |
303 | Battery Management | 347 | Battery Management |
304 | -------------------------------------------------------------------------- */ | 348 | -------------------------------------------------------------------------- */ |
@@ -578,7 +622,8 @@ static int sysfs_add_battery(struct acpi_battery *battery) | |||
578 | battery->bat.type = POWER_SUPPLY_TYPE_BATTERY; | 622 | battery->bat.type = POWER_SUPPLY_TYPE_BATTERY; |
579 | battery->bat.get_property = acpi_battery_get_property; | 623 | battery->bat.get_property = acpi_battery_get_property; |
580 | 624 | ||
581 | result = power_supply_register(&battery->device->dev, &battery->bat); | 625 | result = power_supply_register_no_ws(&battery->device->dev, &battery->bat); |
626 | |||
582 | if (result) | 627 | if (result) |
583 | return result; | 628 | return result; |
584 | return device_create_file(battery->bat.dev, &alarm_attr); | 629 | return device_create_file(battery->bat.dev, &alarm_attr); |
@@ -669,7 +714,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery) | |||
669 | } | 714 | } |
670 | } | 715 | } |
671 | 716 | ||
672 | static int acpi_battery_update(struct acpi_battery *battery) | 717 | static int acpi_battery_update(struct acpi_battery *battery, bool resume) |
673 | { | 718 | { |
674 | int result, old_present = acpi_battery_present(battery); | 719 | int result, old_present = acpi_battery_present(battery); |
675 | result = acpi_battery_get_status(battery); | 720 | result = acpi_battery_get_status(battery); |
@@ -680,6 +725,10 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
680 | battery->update_time = 0; | 725 | battery->update_time = 0; |
681 | return 0; | 726 | return 0; |
682 | } | 727 | } |
728 | |||
729 | if (resume) | ||
730 | return 0; | ||
731 | |||
683 | if (!battery->update_time || | 732 | if (!battery->update_time || |
684 | old_present != acpi_battery_present(battery)) { | 733 | old_present != acpi_battery_present(battery)) { |
685 | result = acpi_battery_get_info(battery); | 734 | result = acpi_battery_get_info(battery); |
@@ -693,7 +742,19 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
693 | return result; | 742 | return result; |
694 | } | 743 | } |
695 | result = acpi_battery_get_state(battery); | 744 | result = acpi_battery_get_state(battery); |
745 | if (result) | ||
746 | return result; | ||
696 | acpi_battery_quirks(battery); | 747 | acpi_battery_quirks(battery); |
748 | |||
749 | /* | ||
750 | * Wakeup the system if battery is critical low | ||
751 | * or lower than the alarm level | ||
752 | */ | ||
753 | if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || | ||
754 | (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && | ||
755 | (battery->capacity_now <= battery->alarm))) | ||
756 | pm_wakeup_event(&battery->device->dev, 0); | ||
757 | |||
697 | return result; | 758 | return result; |
698 | } | 759 | } |
699 | 760 | ||
@@ -717,6 +778,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery) | |||
717 | } | 778 | } |
718 | 779 | ||
719 | /* -------------------------------------------------------------------------- | 780 | /* -------------------------------------------------------------------------- |
781 | FS Interface (/proc) | ||
782 | -------------------------------------------------------------------------- */ | ||
783 | |||
784 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
785 | static struct proc_dir_entry *acpi_battery_dir; | ||
786 | |||
787 | static int acpi_battery_print_info(struct seq_file *seq, int result) | ||
788 | { | ||
789 | struct acpi_battery *battery = seq->private; | ||
790 | |||
791 | if (result) | ||
792 | goto end; | ||
793 | |||
794 | seq_printf(seq, "present: %s\n", | ||
795 | acpi_battery_present(battery) ? "yes" : "no"); | ||
796 | if (!acpi_battery_present(battery)) | ||
797 | goto end; | ||
798 | if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) | ||
799 | seq_printf(seq, "design capacity: unknown\n"); | ||
800 | else | ||
801 | seq_printf(seq, "design capacity: %d %sh\n", | ||
802 | battery->design_capacity, | ||
803 | acpi_battery_units(battery)); | ||
804 | |||
805 | if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) | ||
806 | seq_printf(seq, "last full capacity: unknown\n"); | ||
807 | else | ||
808 | seq_printf(seq, "last full capacity: %d %sh\n", | ||
809 | battery->full_charge_capacity, | ||
810 | acpi_battery_units(battery)); | ||
811 | |||
812 | seq_printf(seq, "battery technology: %srechargeable\n", | ||
813 | (!battery->technology)?"non-":""); | ||
814 | |||
815 | if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) | ||
816 | seq_printf(seq, "design voltage: unknown\n"); | ||
817 | else | ||
818 | seq_printf(seq, "design voltage: %d mV\n", | ||
819 | battery->design_voltage); | ||
820 | seq_printf(seq, "design capacity warning: %d %sh\n", | ||
821 | battery->design_capacity_warning, | ||
822 | acpi_battery_units(battery)); | ||
823 | seq_printf(seq, "design capacity low: %d %sh\n", | ||
824 | battery->design_capacity_low, | ||
825 | acpi_battery_units(battery)); | ||
826 | seq_printf(seq, "cycle count: %i\n", battery->cycle_count); | ||
827 | seq_printf(seq, "capacity granularity 1: %d %sh\n", | ||
828 | battery->capacity_granularity_1, | ||
829 | acpi_battery_units(battery)); | ||
830 | seq_printf(seq, "capacity granularity 2: %d %sh\n", | ||
831 | battery->capacity_granularity_2, | ||
832 | acpi_battery_units(battery)); | ||
833 | seq_printf(seq, "model number: %s\n", battery->model_number); | ||
834 | seq_printf(seq, "serial number: %s\n", battery->serial_number); | ||
835 | seq_printf(seq, "battery type: %s\n", battery->type); | ||
836 | seq_printf(seq, "OEM info: %s\n", battery->oem_info); | ||
837 | end: | ||
838 | if (result) | ||
839 | seq_printf(seq, "ERROR: Unable to read battery info\n"); | ||
840 | return result; | ||
841 | } | ||
842 | |||
843 | static int acpi_battery_print_state(struct seq_file *seq, int result) | ||
844 | { | ||
845 | struct acpi_battery *battery = seq->private; | ||
846 | |||
847 | if (result) | ||
848 | goto end; | ||
849 | |||
850 | seq_printf(seq, "present: %s\n", | ||
851 | acpi_battery_present(battery) ? "yes" : "no"); | ||
852 | if (!acpi_battery_present(battery)) | ||
853 | goto end; | ||
854 | |||
855 | seq_printf(seq, "capacity state: %s\n", | ||
856 | (battery->state & 0x04) ? "critical" : "ok"); | ||
857 | if ((battery->state & 0x01) && (battery->state & 0x02)) | ||
858 | seq_printf(seq, | ||
859 | "charging state: charging/discharging\n"); | ||
860 | else if (battery->state & 0x01) | ||
861 | seq_printf(seq, "charging state: discharging\n"); | ||
862 | else if (battery->state & 0x02) | ||
863 | seq_printf(seq, "charging state: charging\n"); | ||
864 | else | ||
865 | seq_printf(seq, "charging state: charged\n"); | ||
866 | |||
867 | if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) | ||
868 | seq_printf(seq, "present rate: unknown\n"); | ||
869 | else | ||
870 | seq_printf(seq, "present rate: %d %s\n", | ||
871 | battery->rate_now, acpi_battery_units(battery)); | ||
872 | |||
873 | if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) | ||
874 | seq_printf(seq, "remaining capacity: unknown\n"); | ||
875 | else | ||
876 | seq_printf(seq, "remaining capacity: %d %sh\n", | ||
877 | battery->capacity_now, acpi_battery_units(battery)); | ||
878 | if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) | ||
879 | seq_printf(seq, "present voltage: unknown\n"); | ||
880 | else | ||
881 | seq_printf(seq, "present voltage: %d mV\n", | ||
882 | battery->voltage_now); | ||
883 | end: | ||
884 | if (result) | ||
885 | seq_printf(seq, "ERROR: Unable to read battery state\n"); | ||
886 | |||
887 | return result; | ||
888 | } | ||
889 | |||
890 | static int acpi_battery_print_alarm(struct seq_file *seq, int result) | ||
891 | { | ||
892 | struct acpi_battery *battery = seq->private; | ||
893 | |||
894 | if (result) | ||
895 | goto end; | ||
896 | |||
897 | if (!acpi_battery_present(battery)) { | ||
898 | seq_printf(seq, "present: no\n"); | ||
899 | goto end; | ||
900 | } | ||
901 | seq_printf(seq, "alarm: "); | ||
902 | if (!battery->alarm) | ||
903 | seq_printf(seq, "unsupported\n"); | ||
904 | else | ||
905 | seq_printf(seq, "%u %sh\n", battery->alarm, | ||
906 | acpi_battery_units(battery)); | ||
907 | end: | ||
908 | if (result) | ||
909 | seq_printf(seq, "ERROR: Unable to read battery alarm\n"); | ||
910 | return result; | ||
911 | } | ||
912 | |||
913 | static ssize_t acpi_battery_write_alarm(struct file *file, | ||
914 | const char __user * buffer, | ||
915 | size_t count, loff_t * ppos) | ||
916 | { | ||
917 | int result = 0; | ||
918 | char alarm_string[12] = { '\0' }; | ||
919 | struct seq_file *m = file->private_data; | ||
920 | struct acpi_battery *battery = m->private; | ||
921 | |||
922 | if (!battery || (count > sizeof(alarm_string) - 1)) | ||
923 | return -EINVAL; | ||
924 | if (!acpi_battery_present(battery)) { | ||
925 | result = -ENODEV; | ||
926 | goto end; | ||
927 | } | ||
928 | if (copy_from_user(alarm_string, buffer, count)) { | ||
929 | result = -EFAULT; | ||
930 | goto end; | ||
931 | } | ||
932 | alarm_string[count] = '\0'; | ||
933 | battery->alarm = simple_strtol(alarm_string, NULL, 0); | ||
934 | result = acpi_battery_set_alarm(battery); | ||
935 | end: | ||
936 | if (!result) | ||
937 | return count; | ||
938 | return result; | ||
939 | } | ||
940 | |||
941 | typedef int(*print_func)(struct seq_file *seq, int result); | ||
942 | |||
943 | static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = { | ||
944 | acpi_battery_print_info, | ||
945 | acpi_battery_print_state, | ||
946 | acpi_battery_print_alarm, | ||
947 | }; | ||
948 | |||
949 | static int acpi_battery_read(int fid, struct seq_file *seq) | ||
950 | { | ||
951 | struct acpi_battery *battery = seq->private; | ||
952 | int result = acpi_battery_update(battery, false); | ||
953 | return acpi_print_funcs[fid](seq, result); | ||
954 | } | ||
955 | |||
956 | #define DECLARE_FILE_FUNCTIONS(_name) \ | ||
957 | static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \ | ||
958 | { \ | ||
959 | return acpi_battery_read(_name##_tag, seq); \ | ||
960 | } \ | ||
961 | static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \ | ||
962 | { \ | ||
963 | return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \ | ||
964 | } | ||
965 | |||
966 | DECLARE_FILE_FUNCTIONS(info); | ||
967 | DECLARE_FILE_FUNCTIONS(state); | ||
968 | DECLARE_FILE_FUNCTIONS(alarm); | ||
969 | |||
970 | #undef DECLARE_FILE_FUNCTIONS | ||
971 | |||
972 | #define FILE_DESCRIPTION_RO(_name) \ | ||
973 | { \ | ||
974 | .name = __stringify(_name), \ | ||
975 | .mode = S_IRUGO, \ | ||
976 | .ops = { \ | ||
977 | .open = acpi_battery_##_name##_open_fs, \ | ||
978 | .read = seq_read, \ | ||
979 | .llseek = seq_lseek, \ | ||
980 | .release = single_release, \ | ||
981 | .owner = THIS_MODULE, \ | ||
982 | }, \ | ||
983 | } | ||
984 | |||
985 | #define FILE_DESCRIPTION_RW(_name) \ | ||
986 | { \ | ||
987 | .name = __stringify(_name), \ | ||
988 | .mode = S_IFREG | S_IRUGO | S_IWUSR, \ | ||
989 | .ops = { \ | ||
990 | .open = acpi_battery_##_name##_open_fs, \ | ||
991 | .read = seq_read, \ | ||
992 | .llseek = seq_lseek, \ | ||
993 | .write = acpi_battery_write_##_name, \ | ||
994 | .release = single_release, \ | ||
995 | .owner = THIS_MODULE, \ | ||
996 | }, \ | ||
997 | } | ||
998 | |||
999 | static const struct battery_file { | ||
1000 | struct file_operations ops; | ||
1001 | umode_t mode; | ||
1002 | const char *name; | ||
1003 | } acpi_battery_file[] = { | ||
1004 | FILE_DESCRIPTION_RO(info), | ||
1005 | FILE_DESCRIPTION_RO(state), | ||
1006 | FILE_DESCRIPTION_RW(alarm), | ||
1007 | }; | ||
1008 | |||
1009 | #undef FILE_DESCRIPTION_RO | ||
1010 | #undef FILE_DESCRIPTION_RW | ||
1011 | |||
1012 | static int acpi_battery_add_fs(struct acpi_device *device) | ||
1013 | { | ||
1014 | struct proc_dir_entry *entry = NULL; | ||
1015 | int i; | ||
1016 | |||
1017 | printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," | ||
1018 | " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); | ||
1019 | if (!acpi_device_dir(device)) { | ||
1020 | acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), | ||
1021 | acpi_battery_dir); | ||
1022 | if (!acpi_device_dir(device)) | ||
1023 | return -ENODEV; | ||
1024 | } | ||
1025 | |||
1026 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { | ||
1027 | entry = proc_create_data(acpi_battery_file[i].name, | ||
1028 | acpi_battery_file[i].mode, | ||
1029 | acpi_device_dir(device), | ||
1030 | &acpi_battery_file[i].ops, | ||
1031 | acpi_driver_data(device)); | ||
1032 | if (!entry) | ||
1033 | return -ENODEV; | ||
1034 | } | ||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | static void acpi_battery_remove_fs(struct acpi_device *device) | ||
1039 | { | ||
1040 | int i; | ||
1041 | if (!acpi_device_dir(device)) | ||
1042 | return; | ||
1043 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) | ||
1044 | remove_proc_entry(acpi_battery_file[i].name, | ||
1045 | acpi_device_dir(device)); | ||
1046 | |||
1047 | remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); | ||
1048 | acpi_device_dir(device) = NULL; | ||
1049 | } | ||
1050 | |||
1051 | #endif | ||
1052 | |||
1053 | /* -------------------------------------------------------------------------- | ||
720 | Driver Interface | 1054 | Driver Interface |
721 | -------------------------------------------------------------------------- */ | 1055 | -------------------------------------------------------------------------- */ |
722 | 1056 | ||
@@ -730,7 +1064,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) | |||
730 | old = battery->bat.dev; | 1064 | old = battery->bat.dev; |
731 | if (event == ACPI_BATTERY_NOTIFY_INFO) | 1065 | if (event == ACPI_BATTERY_NOTIFY_INFO) |
732 | acpi_battery_refresh(battery); | 1066 | acpi_battery_refresh(battery); |
733 | acpi_battery_update(battery); | 1067 | acpi_battery_update(battery, false); |
734 | acpi_bus_generate_netlink_event(device->pnp.device_class, | 1068 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
735 | dev_name(&device->dev), event, | 1069 | dev_name(&device->dev), event, |
736 | acpi_battery_present(battery)); | 1070 | acpi_battery_present(battery)); |
@@ -745,13 +1079,27 @@ static int battery_notify(struct notifier_block *nb, | |||
745 | { | 1079 | { |
746 | struct acpi_battery *battery = container_of(nb, struct acpi_battery, | 1080 | struct acpi_battery *battery = container_of(nb, struct acpi_battery, |
747 | pm_nb); | 1081 | pm_nb); |
1082 | int result; | ||
1083 | |||
748 | switch (mode) { | 1084 | switch (mode) { |
749 | case PM_POST_HIBERNATION: | 1085 | case PM_POST_HIBERNATION: |
750 | case PM_POST_SUSPEND: | 1086 | case PM_POST_SUSPEND: |
751 | if (battery->bat.dev) { | 1087 | if (!acpi_battery_present(battery)) |
752 | sysfs_remove_battery(battery); | 1088 | return 0; |
753 | sysfs_add_battery(battery); | 1089 | |
754 | } | 1090 | if (!battery->bat.dev) { |
1091 | result = acpi_battery_get_info(battery); | ||
1092 | if (result) | ||
1093 | return result; | ||
1094 | |||
1095 | result = sysfs_add_battery(battery); | ||
1096 | if (result) | ||
1097 | return result; | ||
1098 | } else | ||
1099 | acpi_battery_refresh(battery); | ||
1100 | |||
1101 | acpi_battery_init_alarm(battery); | ||
1102 | acpi_battery_get_state(battery); | ||
755 | break; | 1103 | break; |
756 | } | 1104 | } |
757 | 1105 | ||
@@ -787,9 +1135,18 @@ static int acpi_battery_add(struct acpi_device *device) | |||
787 | mutex_init(&battery->sysfs_lock); | 1135 | mutex_init(&battery->sysfs_lock); |
788 | if (acpi_has_method(battery->device->handle, "_BIX")) | 1136 | if (acpi_has_method(battery->device->handle, "_BIX")) |
789 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); | 1137 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); |
790 | result = acpi_battery_update(battery); | 1138 | result = acpi_battery_update(battery, false); |
791 | if (result) | 1139 | if (result) |
792 | goto fail; | 1140 | goto fail; |
1141 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1142 | result = acpi_battery_add_fs(device); | ||
1143 | #endif | ||
1144 | if (result) { | ||
1145 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1146 | acpi_battery_remove_fs(device); | ||
1147 | #endif | ||
1148 | goto fail; | ||
1149 | } | ||
793 | 1150 | ||
794 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", | 1151 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", |
795 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), | 1152 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), |
@@ -798,6 +1155,8 @@ static int acpi_battery_add(struct acpi_device *device) | |||
798 | battery->pm_nb.notifier_call = battery_notify; | 1155 | battery->pm_nb.notifier_call = battery_notify; |
799 | register_pm_notifier(&battery->pm_nb); | 1156 | register_pm_notifier(&battery->pm_nb); |
800 | 1157 | ||
1158 | device_init_wakeup(&device->dev, 1); | ||
1159 | |||
801 | return result; | 1160 | return result; |
802 | 1161 | ||
803 | fail: | 1162 | fail: |
@@ -814,8 +1173,12 @@ static int acpi_battery_remove(struct acpi_device *device) | |||
814 | 1173 | ||
815 | if (!device || !acpi_driver_data(device)) | 1174 | if (!device || !acpi_driver_data(device)) |
816 | return -EINVAL; | 1175 | return -EINVAL; |
1176 | device_init_wakeup(&device->dev, 0); | ||
817 | battery = acpi_driver_data(device); | 1177 | battery = acpi_driver_data(device); |
818 | unregister_pm_notifier(&battery->pm_nb); | 1178 | unregister_pm_notifier(&battery->pm_nb); |
1179 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1180 | acpi_battery_remove_fs(device); | ||
1181 | #endif | ||
819 | sysfs_remove_battery(battery); | 1182 | sysfs_remove_battery(battery); |
820 | mutex_destroy(&battery->lock); | 1183 | mutex_destroy(&battery->lock); |
821 | mutex_destroy(&battery->sysfs_lock); | 1184 | mutex_destroy(&battery->sysfs_lock); |
@@ -837,7 +1200,7 @@ static int acpi_battery_resume(struct device *dev) | |||
837 | return -EINVAL; | 1200 | return -EINVAL; |
838 | 1201 | ||
839 | battery->update_time = 0; | 1202 | battery->update_time = 0; |
840 | acpi_battery_update(battery); | 1203 | acpi_battery_update(battery, true); |
841 | return 0; | 1204 | return 0; |
842 | } | 1205 | } |
843 | #else | 1206 | #else |
@@ -866,7 +1229,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) | |||
866 | 1229 | ||
867 | if (dmi_check_system(bat_dmi_table)) | 1230 | if (dmi_check_system(bat_dmi_table)) |
868 | battery_bix_broken_package = 1; | 1231 | battery_bix_broken_package = 1; |
869 | acpi_bus_register_driver(&acpi_battery_driver); | 1232 | |
1233 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1234 | acpi_battery_dir = acpi_lock_battery_dir(); | ||
1235 | if (!acpi_battery_dir) | ||
1236 | return; | ||
1237 | #endif | ||
1238 | if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { | ||
1239 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1240 | acpi_unlock_battery_dir(acpi_battery_dir); | ||
1241 | #endif | ||
1242 | return; | ||
1243 | } | ||
1244 | return; | ||
870 | } | 1245 | } |
871 | 1246 | ||
872 | static int __init acpi_battery_init(void) | 1247 | static int __init acpi_battery_init(void) |
@@ -878,6 +1253,9 @@ static int __init acpi_battery_init(void) | |||
878 | static void __exit acpi_battery_exit(void) | 1253 | static void __exit acpi_battery_exit(void) |
879 | { | 1254 | { |
880 | acpi_bus_unregister_driver(&acpi_battery_driver); | 1255 | acpi_bus_unregister_driver(&acpi_battery_driver); |
1256 | #ifdef CONFIG_ACPI_PROCFS_POWER | ||
1257 | acpi_unlock_battery_dir(acpi_battery_dir); | ||
1258 | #endif | ||
881 | } | 1259 | } |
882 | 1260 | ||
883 | module_init(acpi_battery_init); | 1261 | module_init(acpi_battery_init); |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index afec4526c48a..3d8413d02a97 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
314 | DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), | 314 | DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), |
315 | }, | 315 | }, |
316 | }, | 316 | }, |
317 | { | ||
318 | .callback = dmi_disable_osi_win8, | ||
319 | .ident = "Dell Inspiron 7737", | ||
320 | .matches = { | ||
321 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
322 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), | ||
323 | }, | ||
324 | }, | ||
317 | 325 | ||
318 | /* | 326 | /* |
319 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 327 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
374 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), | 382 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), |
375 | }, | 383 | }, |
376 | }, | 384 | }, |
385 | /* | ||
386 | * Without this this EEEpc exports a non working WMI interface, with | ||
387 | * this it exports a working "good old" eeepc_laptop interface, fixing | ||
388 | * both brightness control, and rfkill not working. | ||
389 | */ | ||
390 | { | ||
391 | .callback = dmi_enable_osi_linux, | ||
392 | .ident = "Asus EEE PC 1015PX", | ||
393 | .matches = { | ||
394 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), | ||
395 | DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"), | ||
396 | }, | ||
397 | }, | ||
377 | {} | 398 | {} |
378 | }; | 399 | }; |
379 | 400 | ||
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index cf925c4f36b7..c5bc8cfe09fa 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -52,6 +52,12 @@ struct proc_dir_entry *acpi_root_dir; | |||
52 | EXPORT_SYMBOL(acpi_root_dir); | 52 | EXPORT_SYMBOL(acpi_root_dir); |
53 | 53 | ||
54 | #ifdef CONFIG_X86 | 54 | #ifdef CONFIG_X86 |
55 | #ifdef CONFIG_ACPI_CUSTOM_DSDT | ||
56 | static inline int set_copy_dsdt(const struct dmi_system_id *id) | ||
57 | { | ||
58 | return 0; | ||
59 | } | ||
60 | #else | ||
55 | static int set_copy_dsdt(const struct dmi_system_id *id) | 61 | static int set_copy_dsdt(const struct dmi_system_id *id) |
56 | { | 62 | { |
57 | printk(KERN_NOTICE "%s detected - " | 63 | printk(KERN_NOTICE "%s detected - " |
@@ -59,6 +65,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) | |||
59 | acpi_gbl_copy_dsdt_locally = 1; | 65 | acpi_gbl_copy_dsdt_locally = 1; |
60 | return 0; | 66 | return 0; |
61 | } | 67 | } |
68 | #endif | ||
62 | 69 | ||
63 | static struct dmi_system_id dsdt_dmi_table[] __initdata = { | 70 | static struct dmi_system_id dsdt_dmi_table[] __initdata = { |
64 | /* | 71 | /* |
@@ -132,6 +139,21 @@ void acpi_bus_private_data_handler(acpi_handle handle, | |||
132 | } | 139 | } |
133 | EXPORT_SYMBOL(acpi_bus_private_data_handler); | 140 | EXPORT_SYMBOL(acpi_bus_private_data_handler); |
134 | 141 | ||
142 | int acpi_bus_attach_private_data(acpi_handle handle, void *data) | ||
143 | { | ||
144 | acpi_status status; | ||
145 | |||
146 | status = acpi_attach_data(handle, | ||
147 | acpi_bus_private_data_handler, data); | ||
148 | if (ACPI_FAILURE(status)) { | ||
149 | acpi_handle_debug(handle, "Error attaching device data\n"); | ||
150 | return -ENODEV; | ||
151 | } | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(acpi_bus_attach_private_data); | ||
156 | |||
135 | int acpi_bus_get_private_data(acpi_handle handle, void **data) | 157 | int acpi_bus_get_private_data(acpi_handle handle, void **data) |
136 | { | 158 | { |
137 | acpi_status status; | 159 | acpi_status status; |
@@ -140,15 +162,20 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) | |||
140 | return -EINVAL; | 162 | return -EINVAL; |
141 | 163 | ||
142 | status = acpi_get_data(handle, acpi_bus_private_data_handler, data); | 164 | status = acpi_get_data(handle, acpi_bus_private_data_handler, data); |
143 | if (ACPI_FAILURE(status) || !*data) { | 165 | if (ACPI_FAILURE(status)) { |
144 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n", | 166 | acpi_handle_debug(handle, "No context for object\n"); |
145 | handle)); | ||
146 | return -ENODEV; | 167 | return -ENODEV; |
147 | } | 168 | } |
148 | 169 | ||
149 | return 0; | 170 | return 0; |
150 | } | 171 | } |
151 | EXPORT_SYMBOL(acpi_bus_get_private_data); | 172 | EXPORT_SYMBOL_GPL(acpi_bus_get_private_data); |
173 | |||
174 | void acpi_bus_detach_private_data(acpi_handle handle) | ||
175 | { | ||
176 | acpi_detach_data(handle, acpi_bus_private_data_handler); | ||
177 | } | ||
178 | EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data); | ||
152 | 179 | ||
153 | void acpi_bus_no_hotplug(acpi_handle handle) | 180 | void acpi_bus_no_hotplug(acpi_handle handle) |
154 | { | 181 | { |
@@ -340,16 +367,18 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
340 | { | 367 | { |
341 | struct acpi_device *adev; | 368 | struct acpi_device *adev; |
342 | struct acpi_driver *driver; | 369 | struct acpi_driver *driver; |
343 | acpi_status status; | ||
344 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; | 370 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; |
371 | bool hotplug_event = false; | ||
345 | 372 | ||
346 | switch (type) { | 373 | switch (type) { |
347 | case ACPI_NOTIFY_BUS_CHECK: | 374 | case ACPI_NOTIFY_BUS_CHECK: |
348 | acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); | 375 | acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); |
376 | hotplug_event = true; | ||
349 | break; | 377 | break; |
350 | 378 | ||
351 | case ACPI_NOTIFY_DEVICE_CHECK: | 379 | case ACPI_NOTIFY_DEVICE_CHECK: |
352 | acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); | 380 | acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); |
381 | hotplug_event = true; | ||
353 | break; | 382 | break; |
354 | 383 | ||
355 | case ACPI_NOTIFY_DEVICE_WAKE: | 384 | case ACPI_NOTIFY_DEVICE_WAKE: |
@@ -358,6 +387,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
358 | 387 | ||
359 | case ACPI_NOTIFY_EJECT_REQUEST: | 388 | case ACPI_NOTIFY_EJECT_REQUEST: |
360 | acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); | 389 | acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); |
390 | hotplug_event = true; | ||
361 | break; | 391 | break; |
362 | 392 | ||
363 | case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: | 393 | case ACPI_NOTIFY_DEVICE_CHECK_LIGHT: |
@@ -393,16 +423,9 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) | |||
393 | (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) | 423 | (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) |
394 | driver->ops.notify(adev, type); | 424 | driver->ops.notify(adev, type); |
395 | 425 | ||
396 | switch (type) { | 426 | if (hotplug_event && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) |
397 | case ACPI_NOTIFY_BUS_CHECK: | 427 | return; |
398 | case ACPI_NOTIFY_DEVICE_CHECK: | 428 | |
399 | case ACPI_NOTIFY_EJECT_REQUEST: | ||
400 | status = acpi_hotplug_schedule(adev, type); | ||
401 | if (ACPI_SUCCESS(status)) | ||
402 | return; | ||
403 | default: | ||
404 | break; | ||
405 | } | ||
406 | acpi_bus_put_acpi_device(adev); | 429 | acpi_bus_put_acpi_device(adev); |
407 | return; | 430 | return; |
408 | 431 | ||
@@ -466,6 +489,9 @@ void __init acpi_early_init(void) | |||
466 | 489 | ||
467 | printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); | 490 | printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION); |
468 | 491 | ||
492 | /* It's safe to verify table checksums during late stage */ | ||
493 | acpi_gbl_verify_table_checksum = TRUE; | ||
494 | |||
469 | /* enable workarounds, unless strict ACPI spec. compliance */ | 495 | /* enable workarounds, unless strict ACPI spec. compliance */ |
470 | if (!acpi_strict) | 496 | if (!acpi_strict) |
471 | acpi_gbl_enable_interpreter_slack = TRUE; | 497 | acpi_gbl_enable_interpreter_slack = TRUE; |
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c new file mode 100644 index 000000000000..6c9ee68e46fb --- /dev/null +++ b/drivers/acpi/cm_sbs.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or (at | ||
7 | * your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/acpi.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/proc_fs.h> | ||
27 | #include <linux/seq_file.h> | ||
28 | #include <acpi/acpi_bus.h> | ||
29 | #include <acpi/acpi_drivers.h> | ||
30 | |||
31 | #define PREFIX "ACPI: " | ||
32 | |||
33 | ACPI_MODULE_NAME("cm_sbs"); | ||
34 | #define ACPI_AC_CLASS "ac_adapter" | ||
35 | #define ACPI_BATTERY_CLASS "battery" | ||
36 | #define _COMPONENT ACPI_SBS_COMPONENT | ||
37 | static struct proc_dir_entry *acpi_ac_dir; | ||
38 | static struct proc_dir_entry *acpi_battery_dir; | ||
39 | |||
40 | static DEFINE_MUTEX(cm_sbs_mutex); | ||
41 | |||
42 | static int lock_ac_dir_cnt; | ||
43 | static int lock_battery_dir_cnt; | ||
44 | |||
45 | struct proc_dir_entry *acpi_lock_ac_dir(void) | ||
46 | { | ||
47 | mutex_lock(&cm_sbs_mutex); | ||
48 | if (!acpi_ac_dir) | ||
49 | acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); | ||
50 | if (acpi_ac_dir) { | ||
51 | lock_ac_dir_cnt++; | ||
52 | } else { | ||
53 | printk(KERN_ERR PREFIX | ||
54 | "Cannot create %s\n", ACPI_AC_CLASS); | ||
55 | } | ||
56 | mutex_unlock(&cm_sbs_mutex); | ||
57 | return acpi_ac_dir; | ||
58 | } | ||
59 | EXPORT_SYMBOL(acpi_lock_ac_dir); | ||
60 | |||
61 | void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) | ||
62 | { | ||
63 | mutex_lock(&cm_sbs_mutex); | ||
64 | if (acpi_ac_dir_param) | ||
65 | lock_ac_dir_cnt--; | ||
66 | if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { | ||
67 | remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); | ||
68 | acpi_ac_dir = NULL; | ||
69 | } | ||
70 | mutex_unlock(&cm_sbs_mutex); | ||
71 | } | ||
72 | EXPORT_SYMBOL(acpi_unlock_ac_dir); | ||
73 | |||
74 | struct proc_dir_entry *acpi_lock_battery_dir(void) | ||
75 | { | ||
76 | mutex_lock(&cm_sbs_mutex); | ||
77 | if (!acpi_battery_dir) { | ||
78 | acpi_battery_dir = | ||
79 | proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); | ||
80 | } | ||
81 | if (acpi_battery_dir) { | ||
82 | lock_battery_dir_cnt++; | ||
83 | } else { | ||
84 | printk(KERN_ERR PREFIX | ||
85 | "Cannot create %s\n", ACPI_BATTERY_CLASS); | ||
86 | } | ||
87 | mutex_unlock(&cm_sbs_mutex); | ||
88 | return acpi_battery_dir; | ||
89 | } | ||
90 | EXPORT_SYMBOL(acpi_lock_battery_dir); | ||
91 | |||
92 | void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) | ||
93 | { | ||
94 | mutex_lock(&cm_sbs_mutex); | ||
95 | if (acpi_battery_dir_param) | ||
96 | lock_battery_dir_cnt--; | ||
97 | if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param | ||
98 | && acpi_battery_dir) { | ||
99 | remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); | ||
100 | acpi_battery_dir = NULL; | ||
101 | } | ||
102 | mutex_unlock(&cm_sbs_mutex); | ||
103 | return; | ||
104 | } | ||
105 | EXPORT_SYMBOL(acpi_unlock_battery_dir); | ||
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 63119d09b354..76f7cff64594 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c | |||
@@ -41,6 +41,8 @@ static const struct acpi_device_id container_device_ids[] = { | |||
41 | {"", 0}, | 41 | {"", 0}, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #ifdef CONFIG_ACPI_CONTAINER | ||
45 | |||
44 | static int acpi_container_offline(struct container_dev *cdev) | 46 | static int acpi_container_offline(struct container_dev *cdev) |
45 | { | 47 | { |
46 | struct acpi_device *adev = ACPI_COMPANION(&cdev->dev); | 48 | struct acpi_device *adev = ACPI_COMPANION(&cdev->dev); |
@@ -109,5 +111,18 @@ static struct acpi_scan_handler container_handler = { | |||
109 | 111 | ||
110 | void __init acpi_container_init(void) | 112 | void __init acpi_container_init(void) |
111 | { | 113 | { |
114 | acpi_scan_add_handler(&container_handler); | ||
115 | } | ||
116 | |||
117 | #else | ||
118 | |||
119 | static struct acpi_scan_handler container_handler = { | ||
120 | .ids = container_device_ids, | ||
121 | }; | ||
122 | |||
123 | void __init acpi_container_init(void) | ||
124 | { | ||
112 | acpi_scan_add_handler_with_hotplug(&container_handler, "container"); | 125 | acpi_scan_add_handler_with_hotplug(&container_handler, "container"); |
113 | } | 126 | } |
127 | |||
128 | #endif /* CONFIG_ACPI_CONTAINER */ | ||
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d047739f3380..49a51277f81d 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
@@ -900,18 +900,47 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early); | |||
900 | */ | 900 | */ |
901 | int acpi_subsys_prepare(struct device *dev) | 901 | int acpi_subsys_prepare(struct device *dev) |
902 | { | 902 | { |
903 | /* | 903 | struct acpi_device *adev = ACPI_COMPANION(dev); |
904 | * Devices having power.ignore_children set may still be necessary for | 904 | u32 sys_target; |
905 | * suspending their children in the next phase of device suspend. | 905 | int ret, state; |
906 | */ | 906 | |
907 | if (dev->power.ignore_children) | 907 | ret = pm_generic_prepare(dev); |
908 | pm_runtime_resume(dev); | 908 | if (ret < 0) |
909 | return ret; | ||
910 | |||
911 | if (!adev || !pm_runtime_suspended(dev) | ||
912 | || device_may_wakeup(dev) != !!adev->wakeup.prepare_count) | ||
913 | return 0; | ||
914 | |||
915 | sys_target = acpi_target_system_state(); | ||
916 | if (sys_target == ACPI_STATE_S0) | ||
917 | return 1; | ||
909 | 918 | ||
910 | return pm_generic_prepare(dev); | 919 | if (adev->power.flags.dsw_present) |
920 | return 0; | ||
921 | |||
922 | ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); | ||
923 | return !ret && state == adev->power.state; | ||
911 | } | 924 | } |
912 | EXPORT_SYMBOL_GPL(acpi_subsys_prepare); | 925 | EXPORT_SYMBOL_GPL(acpi_subsys_prepare); |
913 | 926 | ||
914 | /** | 927 | /** |
928 | * acpi_subsys_complete - Finalize device's resume during system resume. | ||
929 | * @dev: Device to handle. | ||
930 | */ | ||
931 | void acpi_subsys_complete(struct device *dev) | ||
932 | { | ||
933 | /* | ||
934 | * If the device had been runtime-suspended before the system went into | ||
935 | * the sleep state it is going out of and it has never been resumed till | ||
936 | * now, resume it in case the firmware powered it up. | ||
937 | */ | ||
938 | if (dev->power.direct_complete) | ||
939 | pm_request_resume(dev); | ||
940 | } | ||
941 | EXPORT_SYMBOL_GPL(acpi_subsys_complete); | ||
942 | |||
943 | /** | ||
915 | * acpi_subsys_suspend - Run the device driver's suspend callback. | 944 | * acpi_subsys_suspend - Run the device driver's suspend callback. |
916 | * @dev: Device to handle. | 945 | * @dev: Device to handle. |
917 | * | 946 | * |
@@ -923,6 +952,7 @@ int acpi_subsys_suspend(struct device *dev) | |||
923 | pm_runtime_resume(dev); | 952 | pm_runtime_resume(dev); |
924 | return pm_generic_suspend(dev); | 953 | return pm_generic_suspend(dev); |
925 | } | 954 | } |
955 | EXPORT_SYMBOL_GPL(acpi_subsys_suspend); | ||
926 | 956 | ||
927 | /** | 957 | /** |
928 | * acpi_subsys_suspend_late - Suspend device using ACPI. | 958 | * acpi_subsys_suspend_late - Suspend device using ACPI. |
@@ -968,6 +998,7 @@ int acpi_subsys_freeze(struct device *dev) | |||
968 | pm_runtime_resume(dev); | 998 | pm_runtime_resume(dev); |
969 | return pm_generic_freeze(dev); | 999 | return pm_generic_freeze(dev); |
970 | } | 1000 | } |
1001 | EXPORT_SYMBOL_GPL(acpi_subsys_freeze); | ||
971 | 1002 | ||
972 | #endif /* CONFIG_PM_SLEEP */ | 1003 | #endif /* CONFIG_PM_SLEEP */ |
973 | 1004 | ||
@@ -979,6 +1010,7 @@ static struct dev_pm_domain acpi_general_pm_domain = { | |||
979 | #endif | 1010 | #endif |
980 | #ifdef CONFIG_PM_SLEEP | 1011 | #ifdef CONFIG_PM_SLEEP |
981 | .prepare = acpi_subsys_prepare, | 1012 | .prepare = acpi_subsys_prepare, |
1013 | .complete = acpi_subsys_complete, | ||
982 | .suspend = acpi_subsys_suspend, | 1014 | .suspend = acpi_subsys_suspend, |
983 | .suspend_late = acpi_subsys_suspend_late, | 1015 | .suspend_late = acpi_subsys_suspend_late, |
984 | .resume_early = acpi_subsys_resume_early, | 1016 | .resume_early = acpi_subsys_resume_early, |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 957391306cbf..7de5b603f272 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -30,12 +30,10 @@ void acpi_pci_root_init(void); | |||
30 | void acpi_pci_link_init(void); | 30 | void acpi_pci_link_init(void); |
31 | void acpi_processor_init(void); | 31 | void acpi_processor_init(void); |
32 | void acpi_platform_init(void); | 32 | void acpi_platform_init(void); |
33 | void acpi_pnp_init(void); | ||
33 | int acpi_sysfs_init(void); | 34 | int acpi_sysfs_init(void); |
34 | #ifdef CONFIG_ACPI_CONTAINER | ||
35 | void acpi_container_init(void); | 35 | void acpi_container_init(void); |
36 | #else | 36 | void acpi_memory_hotplug_init(void); |
37 | static inline void acpi_container_init(void) {} | ||
38 | #endif | ||
39 | #ifdef CONFIG_ACPI_DOCK | 37 | #ifdef CONFIG_ACPI_DOCK |
40 | void register_dock_dependent_device(struct acpi_device *adev, | 38 | void register_dock_dependent_device(struct acpi_device *adev, |
41 | acpi_handle dshandle); | 39 | acpi_handle dshandle); |
@@ -47,11 +45,6 @@ static inline void register_dock_dependent_device(struct acpi_device *adev, | |||
47 | static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; } | 45 | static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; } |
48 | static inline void acpi_dock_add(struct acpi_device *adev) {} | 46 | static inline void acpi_dock_add(struct acpi_device *adev) {} |
49 | #endif | 47 | #endif |
50 | #ifdef CONFIG_ACPI_HOTPLUG_MEMORY | ||
51 | void acpi_memory_hotplug_init(void); | ||
52 | #else | ||
53 | static inline void acpi_memory_hotplug_init(void) {} | ||
54 | #endif | ||
55 | #ifdef CONFIG_X86 | 48 | #ifdef CONFIG_X86 |
56 | void acpi_cmos_rtc_init(void); | 49 | void acpi_cmos_rtc_init(void); |
57 | #else | 50 | #else |
@@ -72,11 +65,7 @@ int acpi_debugfs_init(void); | |||
72 | #else | 65 | #else |
73 | static inline void acpi_debugfs_init(void) { return; } | 66 | static inline void acpi_debugfs_init(void) { return; } |
74 | #endif | 67 | #endif |
75 | #ifdef CONFIG_X86_INTEL_LPSS | ||
76 | void acpi_lpss_init(void); | 68 | void acpi_lpss_init(void); |
77 | #else | ||
78 | static inline void acpi_lpss_init(void) {} | ||
79 | #endif | ||
80 | 69 | ||
81 | acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src); | 70 | acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src); |
82 | bool acpi_queue_hotplug_work(struct work_struct *work); | 71 | bool acpi_queue_hotplug_work(struct work_struct *work); |
@@ -180,8 +169,7 @@ static inline void suspend_nvs_restore(void) {} | |||
180 | -------------------------------------------------------------------------- */ | 169 | -------------------------------------------------------------------------- */ |
181 | struct platform_device; | 170 | struct platform_device; |
182 | 171 | ||
183 | int acpi_create_platform_device(struct acpi_device *adev, | 172 | struct platform_device *acpi_create_platform_device(struct acpi_device *adev); |
184 | const struct acpi_device_id *id); | ||
185 | 173 | ||
186 | /*-------------------------------------------------------------------------- | 174 | /*-------------------------------------------------------------------------- |
187 | Video | 175 | Video |
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c index de4fe03873c5..85287b8fe3aa 100644 --- a/drivers/acpi/nvs.c +++ b/drivers/acpi/nvs.c | |||
@@ -139,8 +139,8 @@ void suspend_nvs_free(void) | |||
139 | iounmap(entry->kaddr); | 139 | iounmap(entry->kaddr); |
140 | entry->unmap = false; | 140 | entry->unmap = false; |
141 | } else { | 141 | } else { |
142 | acpi_os_unmap_memory(entry->kaddr, | 142 | acpi_os_unmap_iomem(entry->kaddr, |
143 | entry->size); | 143 | entry->size); |
144 | } | 144 | } |
145 | entry->kaddr = NULL; | 145 | entry->kaddr = NULL; |
146 | } | 146 | } |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 6776c599816f..147bc1b91b42 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -355,7 +355,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) | |||
355 | } | 355 | } |
356 | 356 | ||
357 | void __iomem *__init_refok | 357 | void __iomem *__init_refok |
358 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | 358 | acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) |
359 | { | 359 | { |
360 | struct acpi_ioremap *map; | 360 | struct acpi_ioremap *map; |
361 | void __iomem *virt; | 361 | void __iomem *virt; |
@@ -401,10 +401,17 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
401 | 401 | ||
402 | list_add_tail_rcu(&map->list, &acpi_ioremaps); | 402 | list_add_tail_rcu(&map->list, &acpi_ioremaps); |
403 | 403 | ||
404 | out: | 404 | out: |
405 | mutex_unlock(&acpi_ioremap_lock); | 405 | mutex_unlock(&acpi_ioremap_lock); |
406 | return map->virt + (phys - map->phys); | 406 | return map->virt + (phys - map->phys); |
407 | } | 407 | } |
408 | EXPORT_SYMBOL_GPL(acpi_os_map_iomem); | ||
409 | |||
410 | void *__init_refok | ||
411 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | ||
412 | { | ||
413 | return (void *)acpi_os_map_iomem(phys, size); | ||
414 | } | ||
408 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); | 415 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); |
409 | 416 | ||
410 | static void acpi_os_drop_map_ref(struct acpi_ioremap *map) | 417 | static void acpi_os_drop_map_ref(struct acpi_ioremap *map) |
@@ -422,7 +429,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) | |||
422 | } | 429 | } |
423 | } | 430 | } |
424 | 431 | ||
425 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | 432 | void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) |
426 | { | 433 | { |
427 | struct acpi_ioremap *map; | 434 | struct acpi_ioremap *map; |
428 | 435 | ||
@@ -443,6 +450,12 @@ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | |||
443 | 450 | ||
444 | acpi_os_map_cleanup(map); | 451 | acpi_os_map_cleanup(map); |
445 | } | 452 | } |
453 | EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); | ||
454 | |||
455 | void __ref acpi_os_unmap_memory(void *virt, acpi_size size) | ||
456 | { | ||
457 | return acpi_os_unmap_iomem((void __iomem *)virt, size); | ||
458 | } | ||
446 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); | 459 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); |
447 | 460 | ||
448 | void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | 461 | void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) |
@@ -464,7 +477,7 @@ int acpi_os_map_generic_address(struct acpi_generic_address *gas) | |||
464 | if (!addr || !gas->bit_width) | 477 | if (!addr || !gas->bit_width) |
465 | return -EINVAL; | 478 | return -EINVAL; |
466 | 479 | ||
467 | virt = acpi_os_map_memory(addr, gas->bit_width / 8); | 480 | virt = acpi_os_map_iomem(addr, gas->bit_width / 8); |
468 | if (!virt) | 481 | if (!virt) |
469 | return -EIO; | 482 | return -EIO; |
470 | 483 | ||
@@ -1770,16 +1783,15 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) | |||
1770 | } | 1783 | } |
1771 | #endif | 1784 | #endif |
1772 | 1785 | ||
1773 | static int __init acpi_no_auto_ssdt_setup(char *s) | 1786 | static int __init acpi_no_static_ssdt_setup(char *s) |
1774 | { | 1787 | { |
1775 | printk(KERN_NOTICE PREFIX "SSDT auto-load disabled\n"); | 1788 | acpi_gbl_disable_ssdt_table_install = TRUE; |
1789 | pr_info("ACPI: static SSDT installation disabled\n"); | ||
1776 | 1790 | ||
1777 | acpi_gbl_disable_ssdt_table_load = TRUE; | 1791 | return 0; |
1778 | |||
1779 | return 1; | ||
1780 | } | 1792 | } |
1781 | 1793 | ||
1782 | __setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup); | 1794 | early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); |
1783 | 1795 | ||
1784 | static int __init acpi_disable_return_repair(char *s) | 1796 | static int __init acpi_disable_return_repair(char *s) |
1785 | { | 1797 | { |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 7f70f3182d50..4fcbd670415c 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -121,6 +121,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
121 | struct acpi_processor *pr = per_cpu(processors, cpu); | 121 | struct acpi_processor *pr = per_cpu(processors, cpu); |
122 | struct acpi_device *device; | 122 | struct acpi_device *device; |
123 | 123 | ||
124 | /* | ||
125 | * CPU_STARTING and CPU_DYING must not sleep. Return here since | ||
126 | * acpi_bus_get_device() may sleep. | ||
127 | */ | ||
128 | if (action == CPU_STARTING || action == CPU_DYING) | ||
129 | return NOTIFY_DONE; | ||
130 | |||
124 | if (!pr || acpi_bus_get_device(pr->handle, &device)) | 131 | if (!pr || acpi_bus_get_device(pr->handle, &device)) |
125 | return NOTIFY_DONE; | 132 | return NOTIFY_DONE; |
126 | 133 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 7efe546a8c42..f775fa0d850f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(acpi_initialize_hp_context); | |||
84 | 84 | ||
85 | int acpi_scan_add_handler(struct acpi_scan_handler *handler) | 85 | int acpi_scan_add_handler(struct acpi_scan_handler *handler) |
86 | { | 86 | { |
87 | if (!handler || !handler->attach) | 87 | if (!handler) |
88 | return -EINVAL; | 88 | return -EINVAL; |
89 | 89 | ||
90 | list_add_tail(&handler->list_node, &acpi_scan_handlers_list); | 90 | list_add_tail(&handler->list_node, &acpi_scan_handlers_list); |
@@ -1551,9 +1551,13 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) | |||
1551 | */ | 1551 | */ |
1552 | if (acpi_has_method(device->handle, "_PSC")) | 1552 | if (acpi_has_method(device->handle, "_PSC")) |
1553 | device->power.flags.explicit_get = 1; | 1553 | device->power.flags.explicit_get = 1; |
1554 | |||
1554 | if (acpi_has_method(device->handle, "_IRC")) | 1555 | if (acpi_has_method(device->handle, "_IRC")) |
1555 | device->power.flags.inrush_current = 1; | 1556 | device->power.flags.inrush_current = 1; |
1556 | 1557 | ||
1558 | if (acpi_has_method(device->handle, "_DSW")) | ||
1559 | device->power.flags.dsw_present = 1; | ||
1560 | |||
1557 | /* | 1561 | /* |
1558 | * Enumerate supported power management states | 1562 | * Enumerate supported power management states |
1559 | */ | 1563 | */ |
@@ -1793,8 +1797,10 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, | |||
1793 | return; | 1797 | return; |
1794 | } | 1798 | } |
1795 | 1799 | ||
1796 | if (info->valid & ACPI_VALID_HID) | 1800 | if (info->valid & ACPI_VALID_HID) { |
1797 | acpi_add_id(pnp, info->hardware_id.string); | 1801 | acpi_add_id(pnp, info->hardware_id.string); |
1802 | pnp->type.platform_id = 1; | ||
1803 | } | ||
1798 | if (info->valid & ACPI_VALID_CID) { | 1804 | if (info->valid & ACPI_VALID_CID) { |
1799 | cid_list = &info->compatible_id_list; | 1805 | cid_list = &info->compatible_id_list; |
1800 | for (i = 0; i < cid_list->count; i++) | 1806 | for (i = 0; i < cid_list->count; i++) |
@@ -1973,6 +1979,9 @@ static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, | |||
1973 | { | 1979 | { |
1974 | const struct acpi_device_id *devid; | 1980 | const struct acpi_device_id *devid; |
1975 | 1981 | ||
1982 | if (handler->match) | ||
1983 | return handler->match(idstr, matchid); | ||
1984 | |||
1976 | for (devid = handler->ids; devid->id[0]; devid++) | 1985 | for (devid = handler->ids; devid->id[0]; devid++) |
1977 | if (!strcmp((char *)devid->id, idstr)) { | 1986 | if (!strcmp((char *)devid->id, idstr)) { |
1978 | if (matchid) | 1987 | if (matchid) |
@@ -2061,6 +2070,44 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, | |||
2061 | return AE_OK; | 2070 | return AE_OK; |
2062 | } | 2071 | } |
2063 | 2072 | ||
2073 | static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) | ||
2074 | { | ||
2075 | bool *is_spi_i2c_slave_p = data; | ||
2076 | |||
2077 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) | ||
2078 | return 1; | ||
2079 | |||
2080 | /* | ||
2081 | * devices that are connected to UART still need to be enumerated to | ||
2082 | * platform bus | ||
2083 | */ | ||
2084 | if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) | ||
2085 | *is_spi_i2c_slave_p = true; | ||
2086 | |||
2087 | /* no need to do more checking */ | ||
2088 | return -1; | ||
2089 | } | ||
2090 | |||
2091 | static void acpi_default_enumeration(struct acpi_device *device) | ||
2092 | { | ||
2093 | struct list_head resource_list; | ||
2094 | bool is_spi_i2c_slave = false; | ||
2095 | |||
2096 | if (!device->pnp.type.platform_id || device->handler) | ||
2097 | return; | ||
2098 | |||
2099 | /* | ||
2100 | * Do not enemerate SPI/I2C slaves as they will be enuerated by their | ||
2101 | * respective parents. | ||
2102 | */ | ||
2103 | INIT_LIST_HEAD(&resource_list); | ||
2104 | acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, | ||
2105 | &is_spi_i2c_slave); | ||
2106 | acpi_dev_free_resource_list(&resource_list); | ||
2107 | if (!is_spi_i2c_slave) | ||
2108 | acpi_create_platform_device(device); | ||
2109 | } | ||
2110 | |||
2064 | static int acpi_scan_attach_handler(struct acpi_device *device) | 2111 | static int acpi_scan_attach_handler(struct acpi_device *device) |
2065 | { | 2112 | { |
2066 | struct acpi_hardware_id *hwid; | 2113 | struct acpi_hardware_id *hwid; |
@@ -2072,6 +2119,10 @@ static int acpi_scan_attach_handler(struct acpi_device *device) | |||
2072 | 2119 | ||
2073 | handler = acpi_scan_match_handler(hwid->id, &devid); | 2120 | handler = acpi_scan_match_handler(hwid->id, &devid); |
2074 | if (handler) { | 2121 | if (handler) { |
2122 | if (!handler->attach) { | ||
2123 | device->pnp.type.platform_id = 0; | ||
2124 | continue; | ||
2125 | } | ||
2075 | device->handler = handler; | 2126 | device->handler = handler; |
2076 | ret = handler->attach(device, devid); | 2127 | ret = handler->attach(device, devid); |
2077 | if (ret > 0) | 2128 | if (ret > 0) |
@@ -2082,6 +2133,9 @@ static int acpi_scan_attach_handler(struct acpi_device *device) | |||
2082 | break; | 2133 | break; |
2083 | } | 2134 | } |
2084 | } | 2135 | } |
2136 | if (!ret) | ||
2137 | acpi_default_enumeration(device); | ||
2138 | |||
2085 | return ret; | 2139 | return ret; |
2086 | } | 2140 | } |
2087 | 2141 | ||
@@ -2241,11 +2295,11 @@ int __init acpi_scan_init(void) | |||
2241 | acpi_pci_root_init(); | 2295 | acpi_pci_root_init(); |
2242 | acpi_pci_link_init(); | 2296 | acpi_pci_link_init(); |
2243 | acpi_processor_init(); | 2297 | acpi_processor_init(); |
2244 | acpi_platform_init(); | ||
2245 | acpi_lpss_init(); | 2298 | acpi_lpss_init(); |
2246 | acpi_cmos_rtc_init(); | 2299 | acpi_cmos_rtc_init(); |
2247 | acpi_container_init(); | 2300 | acpi_container_init(); |
2248 | acpi_memory_hotplug_init(); | 2301 | acpi_memory_hotplug_init(); |
2302 | acpi_pnp_init(); | ||
2249 | 2303 | ||
2250 | mutex_lock(&acpi_scan_lock); | 2304 | mutex_lock(&acpi_scan_lock); |
2251 | /* | 2305 | /* |
@@ -2259,12 +2313,16 @@ int __init acpi_scan_init(void) | |||
2259 | if (result) | 2313 | if (result) |
2260 | goto out; | 2314 | goto out; |
2261 | 2315 | ||
2262 | result = acpi_bus_scan_fixed(); | 2316 | /* Fixed feature devices do not exist on HW-reduced platform */ |
2263 | if (result) { | 2317 | if (!acpi_gbl_reduced_hardware) { |
2264 | acpi_detach_data(acpi_root->handle, acpi_scan_drop_device); | 2318 | result = acpi_bus_scan_fixed(); |
2265 | acpi_device_del(acpi_root); | 2319 | if (result) { |
2266 | put_device(&acpi_root->dev); | 2320 | acpi_detach_data(acpi_root->handle, |
2267 | goto out; | 2321 | acpi_scan_drop_device); |
2322 | acpi_device_del(acpi_root); | ||
2323 | put_device(&acpi_root->dev); | ||
2324 | goto out; | ||
2325 | } | ||
2268 | } | 2326 | } |
2269 | 2327 | ||
2270 | acpi_update_all_gpes(); | 2328 | acpi_update_all_gpes(); |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index c40fb2e81bbc..c11e3795431b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -89,6 +89,7 @@ u32 acpi_target_system_state(void) | |||
89 | { | 89 | { |
90 | return acpi_target_sleep_state; | 90 | return acpi_target_sleep_state; |
91 | } | 91 | } |
92 | EXPORT_SYMBOL_GPL(acpi_target_system_state); | ||
92 | 93 | ||
93 | static bool pwr_btn_event_pending; | 94 | static bool pwr_btn_event_pending; |
94 | 95 | ||
@@ -611,6 +612,22 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { | |||
611 | .recover = acpi_pm_finish, | 612 | .recover = acpi_pm_finish, |
612 | }; | 613 | }; |
613 | 614 | ||
615 | static int acpi_freeze_begin(void) | ||
616 | { | ||
617 | acpi_scan_lock_acquire(); | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | static void acpi_freeze_end(void) | ||
622 | { | ||
623 | acpi_scan_lock_release(); | ||
624 | } | ||
625 | |||
626 | static const struct platform_freeze_ops acpi_freeze_ops = { | ||
627 | .begin = acpi_freeze_begin, | ||
628 | .end = acpi_freeze_end, | ||
629 | }; | ||
630 | |||
614 | static void acpi_sleep_suspend_setup(void) | 631 | static void acpi_sleep_suspend_setup(void) |
615 | { | 632 | { |
616 | int i; | 633 | int i; |
@@ -621,7 +638,9 @@ static void acpi_sleep_suspend_setup(void) | |||
621 | 638 | ||
622 | suspend_set_ops(old_suspend_ordering ? | 639 | suspend_set_ops(old_suspend_ordering ? |
623 | &acpi_suspend_ops_old : &acpi_suspend_ops); | 640 | &acpi_suspend_ops_old : &acpi_suspend_ops); |
641 | freeze_set_ops(&acpi_freeze_ops); | ||
624 | } | 642 | } |
643 | |||
625 | #else /* !CONFIG_SUSPEND */ | 644 | #else /* !CONFIG_SUSPEND */ |
626 | static inline void acpi_sleep_suspend_setup(void) {} | 645 | static inline void acpi_sleep_suspend_setup(void) {} |
627 | #endif /* !CONFIG_SUSPEND */ | 646 | #endif /* !CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 21782290df41..05550ba44d32 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -44,6 +44,12 @@ static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata; | |||
44 | 44 | ||
45 | static int acpi_apic_instance __initdata; | 45 | static int acpi_apic_instance __initdata; |
46 | 46 | ||
47 | /* | ||
48 | * Disable table checksum verification for the early stage due to the size | ||
49 | * limitation of the current x86 early mapping implementation. | ||
50 | */ | ||
51 | static bool acpi_verify_table_checksum __initdata = false; | ||
52 | |||
47 | void acpi_table_print_madt_entry(struct acpi_subtable_header *header) | 53 | void acpi_table_print_madt_entry(struct acpi_subtable_header *header) |
48 | { | 54 | { |
49 | if (!header) | 55 | if (!header) |
@@ -333,6 +339,14 @@ int __init acpi_table_init(void) | |||
333 | { | 339 | { |
334 | acpi_status status; | 340 | acpi_status status; |
335 | 341 | ||
342 | if (acpi_verify_table_checksum) { | ||
343 | pr_info("Early table checksum verification enabled\n"); | ||
344 | acpi_gbl_verify_table_checksum = TRUE; | ||
345 | } else { | ||
346 | pr_info("Early table checksum verification disabled\n"); | ||
347 | acpi_gbl_verify_table_checksum = FALSE; | ||
348 | } | ||
349 | |||
336 | status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); | 350 | status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); |
337 | if (ACPI_FAILURE(status)) | 351 | if (ACPI_FAILURE(status)) |
338 | return -EINVAL; | 352 | return -EINVAL; |
@@ -354,3 +368,12 @@ static int __init acpi_parse_apic_instance(char *str) | |||
354 | } | 368 | } |
355 | 369 | ||
356 | early_param("acpi_apic_instance", acpi_parse_apic_instance); | 370 | early_param("acpi_apic_instance", acpi_parse_apic_instance); |
371 | |||
372 | static int __init acpi_force_table_verification_setup(char *s) | ||
373 | { | ||
374 | acpi_verify_table_checksum = true; | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | early_param("acpi_force_table_verification", acpi_force_table_verification_setup); | ||
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index c1e31a41f949..112817e963e0 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -925,13 +925,10 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) | |||
925 | if (result) | 925 | if (result) |
926 | return result; | 926 | return result; |
927 | 927 | ||
928 | status = acpi_attach_data(tz->device->handle, | 928 | status = acpi_bus_attach_private_data(tz->device->handle, |
929 | acpi_bus_private_data_handler, | 929 | tz->thermal_zone); |
930 | tz->thermal_zone); | 930 | if (ACPI_FAILURE(status)) |
931 | if (ACPI_FAILURE(status)) { | ||
932 | pr_err(PREFIX "Error attaching device data\n"); | ||
933 | return -ENODEV; | 931 | return -ENODEV; |
934 | } | ||
935 | 932 | ||
936 | tz->tz_enabled = 1; | 933 | tz->tz_enabled = 1; |
937 | 934 | ||
@@ -946,7 +943,7 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) | |||
946 | sysfs_remove_link(&tz->thermal_zone->device.kobj, "device"); | 943 | sysfs_remove_link(&tz->thermal_zone->device.kobj, "device"); |
947 | thermal_zone_device_unregister(tz->thermal_zone); | 944 | thermal_zone_device_unregister(tz->thermal_zone); |
948 | tz->thermal_zone = NULL; | 945 | tz->thermal_zone = NULL; |
949 | acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler); | 946 | acpi_bus_detach_private_data(tz->device->handle); |
950 | } | 947 | } |
951 | 948 | ||
952 | 949 | ||
@@ -1278,8 +1275,8 @@ static int __init acpi_thermal_init(void) | |||
1278 | 1275 | ||
1279 | static void __exit acpi_thermal_exit(void) | 1276 | static void __exit acpi_thermal_exit(void) |
1280 | { | 1277 | { |
1281 | destroy_workqueue(acpi_thermal_pm_queue); | ||
1282 | acpi_bus_unregister_driver(&acpi_thermal_driver); | 1278 | acpi_bus_unregister_driver(&acpi_thermal_driver); |
1279 | destroy_workqueue(acpi_thermal_pm_queue); | ||
1283 | 1280 | ||
1284 | return; | 1281 | return; |
1285 | } | 1282 | } |
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index bba526148583..07c8c5a5ee95 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/hardirq.h> | 31 | #include <linux/hardirq.h> |
32 | #include <linux/acpi.h> | 32 | #include <linux/acpi.h> |
33 | #include <linux/dynamic_debug.h> | ||
33 | 34 | ||
34 | #include "internal.h" | 35 | #include "internal.h" |
35 | 36 | ||
@@ -457,6 +458,24 @@ acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, | |||
457 | EXPORT_SYMBOL(acpi_evaluate_ost); | 458 | EXPORT_SYMBOL(acpi_evaluate_ost); |
458 | 459 | ||
459 | /** | 460 | /** |
461 | * acpi_handle_path: Return the object path of handle | ||
462 | * | ||
463 | * Caller must free the returned buffer | ||
464 | */ | ||
465 | static char *acpi_handle_path(acpi_handle handle) | ||
466 | { | ||
467 | struct acpi_buffer buffer = { | ||
468 | .length = ACPI_ALLOCATE_BUFFER, | ||
469 | .pointer = NULL | ||
470 | }; | ||
471 | |||
472 | if (in_interrupt() || | ||
473 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) | ||
474 | return NULL; | ||
475 | return buffer.pointer; | ||
476 | } | ||
477 | |||
478 | /** | ||
460 | * acpi_handle_printk: Print message with ACPI prefix and object path | 479 | * acpi_handle_printk: Print message with ACPI prefix and object path |
461 | * | 480 | * |
462 | * This function is called through acpi_handle_<level> macros and prints | 481 | * This function is called through acpi_handle_<level> macros and prints |
@@ -469,29 +488,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...) | |||
469 | { | 488 | { |
470 | struct va_format vaf; | 489 | struct va_format vaf; |
471 | va_list args; | 490 | va_list args; |
472 | struct acpi_buffer buffer = { | ||
473 | .length = ACPI_ALLOCATE_BUFFER, | ||
474 | .pointer = NULL | ||
475 | }; | ||
476 | const char *path; | 491 | const char *path; |
477 | 492 | ||
478 | va_start(args, fmt); | 493 | va_start(args, fmt); |
479 | vaf.fmt = fmt; | 494 | vaf.fmt = fmt; |
480 | vaf.va = &args; | 495 | vaf.va = &args; |
481 | 496 | ||
482 | if (in_interrupt() || | 497 | path = acpi_handle_path(handle); |
483 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) | 498 | printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf); |
484 | path = "<n/a>"; | ||
485 | else | ||
486 | path = buffer.pointer; | ||
487 | |||
488 | printk("%sACPI: %s: %pV", level, path, &vaf); | ||
489 | 499 | ||
490 | va_end(args); | 500 | va_end(args); |
491 | kfree(buffer.pointer); | 501 | kfree(path); |
492 | } | 502 | } |
493 | EXPORT_SYMBOL(acpi_handle_printk); | 503 | EXPORT_SYMBOL(acpi_handle_printk); |
494 | 504 | ||
505 | #if defined(CONFIG_DYNAMIC_DEBUG) | ||
506 | /** | ||
507 | * __acpi_handle_debug: pr_debug with ACPI prefix and object path | ||
508 | * | ||
509 | * This function is called through acpi_handle_debug macro and debug | ||
510 | * prints a message with ACPI prefix and object path. This function | ||
511 | * acquires the global namespace mutex to obtain an object path. In | ||
512 | * interrupt context, it shows the object path as <n/a>. | ||
513 | */ | ||
514 | void | ||
515 | __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, | ||
516 | const char *fmt, ...) | ||
517 | { | ||
518 | struct va_format vaf; | ||
519 | va_list args; | ||
520 | const char *path; | ||
521 | |||
522 | va_start(args, fmt); | ||
523 | vaf.fmt = fmt; | ||
524 | vaf.va = &args; | ||
525 | |||
526 | path = acpi_handle_path(handle); | ||
527 | __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf); | ||
528 | |||
529 | va_end(args); | ||
530 | kfree(path); | ||
531 | } | ||
532 | EXPORT_SYMBOL(__acpi_handle_debug); | ||
533 | #endif | ||
534 | |||
495 | /** | 535 | /** |
496 | * acpi_has_method: Check whether @handle has a method named @name | 536 | * acpi_has_method: Check whether @handle has a method named @name |
497 | * @handle: ACPI device handle | 537 | * @handle: ACPI device handle |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8b6990e417ec..101fb090dcb9 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot"); | |||
68 | MODULE_DESCRIPTION("ACPI Video Driver"); | 68 | MODULE_DESCRIPTION("ACPI Video Driver"); |
69 | MODULE_LICENSE("GPL"); | 69 | MODULE_LICENSE("GPL"); |
70 | 70 | ||
71 | static bool brightness_switch_enabled = 1; | 71 | static bool brightness_switch_enabled; |
72 | module_param(brightness_switch_enabled, bool, 0644); | 72 | module_param(brightness_switch_enabled, bool, 0644); |
73 | 73 | ||
74 | /* | 74 | /* |
@@ -150,6 +150,8 @@ struct acpi_video_enumerated_device { | |||
150 | 150 | ||
151 | struct acpi_video_bus { | 151 | struct acpi_video_bus { |
152 | struct acpi_device *device; | 152 | struct acpi_device *device; |
153 | bool backlight_registered; | ||
154 | bool backlight_notifier_registered; | ||
153 | u8 dos_setting; | 155 | u8 dos_setting; |
154 | struct acpi_video_enumerated_device *attached_array; | 156 | struct acpi_video_enumerated_device *attached_array; |
155 | u8 attached_count; | 157 | u8 attached_count; |
@@ -161,6 +163,7 @@ struct acpi_video_bus { | |||
161 | struct input_dev *input; | 163 | struct input_dev *input; |
162 | char phys[32]; /* for input device */ | 164 | char phys[32]; /* for input device */ |
163 | struct notifier_block pm_nb; | 165 | struct notifier_block pm_nb; |
166 | struct notifier_block backlight_nb; | ||
164 | }; | 167 | }; |
165 | 168 | ||
166 | struct acpi_video_device_flags { | 169 | struct acpi_video_device_flags { |
@@ -457,10 +460,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
457 | }, | 460 | }, |
458 | { | 461 | { |
459 | .callback = video_set_use_native_backlight, | 462 | .callback = video_set_use_native_backlight, |
460 | .ident = "ThinkPad T430s", | 463 | .ident = "ThinkPad T430 and T430s", |
461 | .matches = { | 464 | .matches = { |
462 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 465 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
463 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), | 466 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), |
464 | }, | 467 | }, |
465 | }, | 468 | }, |
466 | { | 469 | { |
@@ -472,7 +475,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
472 | }, | 475 | }, |
473 | }, | 476 | }, |
474 | { | 477 | { |
475 | .callback = video_set_use_native_backlight, | 478 | .callback = video_set_use_native_backlight, |
479 | .ident = "ThinkPad W530", | ||
480 | .matches = { | ||
481 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
482 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"), | ||
483 | }, | ||
484 | }, | ||
485 | { | ||
486 | .callback = video_set_use_native_backlight, | ||
476 | .ident = "ThinkPad X1 Carbon", | 487 | .ident = "ThinkPad X1 Carbon", |
477 | .matches = { | 488 | .matches = { |
478 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 489 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
@@ -488,6 +499,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
488 | }, | 499 | }, |
489 | }, | 500 | }, |
490 | { | 501 | { |
502 | .callback = video_set_use_native_backlight, | ||
503 | .ident = "Lenovo Yoga 2 11", | ||
504 | .matches = { | ||
505 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
506 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"), | ||
507 | }, | ||
508 | }, | ||
509 | { | ||
491 | .callback = video_set_use_native_backlight, | 510 | .callback = video_set_use_native_backlight, |
492 | .ident = "Thinkpad Helix", | 511 | .ident = "Thinkpad Helix", |
493 | .matches = { | 512 | .matches = { |
@@ -500,7 +519,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
500 | .ident = "Dell Inspiron 7520", | 519 | .ident = "Dell Inspiron 7520", |
501 | .matches = { | 520 | .matches = { |
502 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 521 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
503 | DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), | 522 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), |
504 | }, | 523 | }, |
505 | }, | 524 | }, |
506 | { | 525 | { |
@@ -513,6 +532,22 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
513 | }, | 532 | }, |
514 | { | 533 | { |
515 | .callback = video_set_use_native_backlight, | 534 | .callback = video_set_use_native_backlight, |
535 | .ident = "Acer Aspire 5742G", | ||
536 | .matches = { | ||
537 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
538 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"), | ||
539 | }, | ||
540 | }, | ||
541 | { | ||
542 | .callback = video_set_use_native_backlight, | ||
543 | .ident = "Acer Aspire V5-171", | ||
544 | .matches = { | ||
545 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
546 | DMI_MATCH(DMI_PRODUCT_NAME, "V5-171"), | ||
547 | }, | ||
548 | }, | ||
549 | { | ||
550 | .callback = video_set_use_native_backlight, | ||
516 | .ident = "Acer Aspire V5-431", | 551 | .ident = "Acer Aspire V5-431", |
517 | .matches = { | 552 | .matches = { |
518 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 553 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
@@ -520,6 +555,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
520 | }, | 555 | }, |
521 | }, | 556 | }, |
522 | { | 557 | { |
558 | .callback = video_set_use_native_backlight, | ||
559 | .ident = "Acer Aspire V5-471G", | ||
560 | .matches = { | ||
561 | DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), | ||
562 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"), | ||
563 | }, | ||
564 | }, | ||
565 | { | ||
523 | .callback = video_set_use_native_backlight, | 566 | .callback = video_set_use_native_backlight, |
524 | .ident = "HP ProBook 4340s", | 567 | .ident = "HP ProBook 4340s", |
525 | .matches = { | 568 | .matches = { |
@@ -571,6 +614,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
571 | }, | 614 | }, |
572 | { | 615 | { |
573 | .callback = video_set_use_native_backlight, | 616 | .callback = video_set_use_native_backlight, |
617 | .ident = "HP EliteBook 8470p", | ||
618 | .matches = { | ||
619 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
620 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8470p"), | ||
621 | }, | ||
622 | }, | ||
623 | { | ||
624 | .callback = video_set_use_native_backlight, | ||
574 | .ident = "HP EliteBook 8780w", | 625 | .ident = "HP EliteBook 8780w", |
575 | .matches = { | 626 | .matches = { |
576 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | 627 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
@@ -1650,88 +1701,92 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context, | |||
1650 | 1701 | ||
1651 | static void acpi_video_dev_register_backlight(struct acpi_video_device *device) | 1702 | static void acpi_video_dev_register_backlight(struct acpi_video_device *device) |
1652 | { | 1703 | { |
1653 | if (acpi_video_verify_backlight_support()) { | 1704 | struct backlight_properties props; |
1654 | struct backlight_properties props; | 1705 | struct pci_dev *pdev; |
1655 | struct pci_dev *pdev; | 1706 | acpi_handle acpi_parent; |
1656 | acpi_handle acpi_parent; | 1707 | struct device *parent = NULL; |
1657 | struct device *parent = NULL; | 1708 | int result; |
1658 | int result; | 1709 | static int count; |
1659 | static int count; | 1710 | char *name; |
1660 | char *name; | ||
1661 | |||
1662 | result = acpi_video_init_brightness(device); | ||
1663 | if (result) | ||
1664 | return; | ||
1665 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | ||
1666 | if (!name) | ||
1667 | return; | ||
1668 | count++; | ||
1669 | 1711 | ||
1670 | acpi_get_parent(device->dev->handle, &acpi_parent); | 1712 | result = acpi_video_init_brightness(device); |
1713 | if (result) | ||
1714 | return; | ||
1715 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | ||
1716 | if (!name) | ||
1717 | return; | ||
1718 | count++; | ||
1671 | 1719 | ||
1672 | pdev = acpi_get_pci_dev(acpi_parent); | 1720 | acpi_get_parent(device->dev->handle, &acpi_parent); |
1673 | if (pdev) { | ||
1674 | parent = &pdev->dev; | ||
1675 | pci_dev_put(pdev); | ||
1676 | } | ||
1677 | 1721 | ||
1678 | memset(&props, 0, sizeof(struct backlight_properties)); | 1722 | pdev = acpi_get_pci_dev(acpi_parent); |
1679 | props.type = BACKLIGHT_FIRMWARE; | 1723 | if (pdev) { |
1680 | props.max_brightness = device->brightness->count - 3; | 1724 | parent = &pdev->dev; |
1681 | device->backlight = backlight_device_register(name, | 1725 | pci_dev_put(pdev); |
1682 | parent, | 1726 | } |
1683 | device, | ||
1684 | &acpi_backlight_ops, | ||
1685 | &props); | ||
1686 | kfree(name); | ||
1687 | if (IS_ERR(device->backlight)) | ||
1688 | return; | ||
1689 | 1727 | ||
1690 | /* | 1728 | memset(&props, 0, sizeof(struct backlight_properties)); |
1691 | * Save current brightness level in case we have to restore it | 1729 | props.type = BACKLIGHT_FIRMWARE; |
1692 | * before acpi_video_device_lcd_set_level() is called next time. | 1730 | props.max_brightness = device->brightness->count - 3; |
1693 | */ | 1731 | device->backlight = backlight_device_register(name, |
1694 | device->backlight->props.brightness = | 1732 | parent, |
1695 | acpi_video_get_brightness(device->backlight); | 1733 | device, |
1734 | &acpi_backlight_ops, | ||
1735 | &props); | ||
1736 | kfree(name); | ||
1737 | if (IS_ERR(device->backlight)) | ||
1738 | return; | ||
1696 | 1739 | ||
1697 | device->cooling_dev = thermal_cooling_device_register("LCD", | 1740 | /* |
1698 | device->dev, &video_cooling_ops); | 1741 | * Save current brightness level in case we have to restore it |
1699 | if (IS_ERR(device->cooling_dev)) { | 1742 | * before acpi_video_device_lcd_set_level() is called next time. |
1700 | /* | 1743 | */ |
1701 | * Set cooling_dev to NULL so we don't crash trying to | 1744 | device->backlight->props.brightness = |
1702 | * free it. | 1745 | acpi_video_get_brightness(device->backlight); |
1703 | * Also, why the hell we are returning early and | ||
1704 | * not attempt to register video output if cooling | ||
1705 | * device registration failed? | ||
1706 | * -- dtor | ||
1707 | */ | ||
1708 | device->cooling_dev = NULL; | ||
1709 | return; | ||
1710 | } | ||
1711 | 1746 | ||
1712 | dev_info(&device->dev->dev, "registered as cooling_device%d\n", | 1747 | device->cooling_dev = thermal_cooling_device_register("LCD", |
1713 | device->cooling_dev->id); | 1748 | device->dev, &video_cooling_ops); |
1714 | result = sysfs_create_link(&device->dev->dev.kobj, | 1749 | if (IS_ERR(device->cooling_dev)) { |
1715 | &device->cooling_dev->device.kobj, | 1750 | /* |
1716 | "thermal_cooling"); | 1751 | * Set cooling_dev to NULL so we don't crash trying to free it. |
1717 | if (result) | 1752 | * Also, why the hell we are returning early and not attempt to |
1718 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 1753 | * register video output if cooling device registration failed? |
1719 | result = sysfs_create_link(&device->cooling_dev->device.kobj, | 1754 | * -- dtor |
1720 | &device->dev->dev.kobj, "device"); | 1755 | */ |
1721 | if (result) | 1756 | device->cooling_dev = NULL; |
1722 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 1757 | return; |
1723 | } | 1758 | } |
1759 | |||
1760 | dev_info(&device->dev->dev, "registered as cooling_device%d\n", | ||
1761 | device->cooling_dev->id); | ||
1762 | result = sysfs_create_link(&device->dev->dev.kobj, | ||
1763 | &device->cooling_dev->device.kobj, | ||
1764 | "thermal_cooling"); | ||
1765 | if (result) | ||
1766 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
1767 | result = sysfs_create_link(&device->cooling_dev->device.kobj, | ||
1768 | &device->dev->dev.kobj, "device"); | ||
1769 | if (result) | ||
1770 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
1724 | } | 1771 | } |
1725 | 1772 | ||
1726 | static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) | 1773 | static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) |
1727 | { | 1774 | { |
1728 | struct acpi_video_device *dev; | 1775 | struct acpi_video_device *dev; |
1729 | 1776 | ||
1777 | if (video->backlight_registered) | ||
1778 | return 0; | ||
1779 | |||
1780 | if (!acpi_video_verify_backlight_support()) | ||
1781 | return 0; | ||
1782 | |||
1730 | mutex_lock(&video->device_list_lock); | 1783 | mutex_lock(&video->device_list_lock); |
1731 | list_for_each_entry(dev, &video->video_device_list, entry) | 1784 | list_for_each_entry(dev, &video->video_device_list, entry) |
1732 | acpi_video_dev_register_backlight(dev); | 1785 | acpi_video_dev_register_backlight(dev); |
1733 | mutex_unlock(&video->device_list_lock); | 1786 | mutex_unlock(&video->device_list_lock); |
1734 | 1787 | ||
1788 | video->backlight_registered = true; | ||
1789 | |||
1735 | video->pm_nb.notifier_call = acpi_video_resume; | 1790 | video->pm_nb.notifier_call = acpi_video_resume; |
1736 | video->pm_nb.priority = 0; | 1791 | video->pm_nb.priority = 0; |
1737 | return register_pm_notifier(&video->pm_nb); | 1792 | return register_pm_notifier(&video->pm_nb); |
@@ -1759,13 +1814,20 @@ static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device | |||
1759 | static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video) | 1814 | static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video) |
1760 | { | 1815 | { |
1761 | struct acpi_video_device *dev; | 1816 | struct acpi_video_device *dev; |
1762 | int error = unregister_pm_notifier(&video->pm_nb); | 1817 | int error; |
1818 | |||
1819 | if (!video->backlight_registered) | ||
1820 | return 0; | ||
1821 | |||
1822 | error = unregister_pm_notifier(&video->pm_nb); | ||
1763 | 1823 | ||
1764 | mutex_lock(&video->device_list_lock); | 1824 | mutex_lock(&video->device_list_lock); |
1765 | list_for_each_entry(dev, &video->video_device_list, entry) | 1825 | list_for_each_entry(dev, &video->video_device_list, entry) |
1766 | acpi_video_dev_unregister_backlight(dev); | 1826 | acpi_video_dev_unregister_backlight(dev); |
1767 | mutex_unlock(&video->device_list_lock); | 1827 | mutex_unlock(&video->device_list_lock); |
1768 | 1828 | ||
1829 | video->backlight_registered = false; | ||
1830 | |||
1769 | return error; | 1831 | return error; |
1770 | } | 1832 | } |
1771 | 1833 | ||
@@ -1859,6 +1921,56 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) | |||
1859 | video->input = NULL; | 1921 | video->input = NULL; |
1860 | } | 1922 | } |
1861 | 1923 | ||
1924 | static int acpi_video_backlight_notify(struct notifier_block *nb, | ||
1925 | unsigned long val, void *bd) | ||
1926 | { | ||
1927 | struct backlight_device *backlight = bd; | ||
1928 | struct acpi_video_bus *video; | ||
1929 | |||
1930 | /* acpi_video_verify_backlight_support only cares about raw devices */ | ||
1931 | if (backlight->props.type != BACKLIGHT_RAW) | ||
1932 | return NOTIFY_DONE; | ||
1933 | |||
1934 | video = container_of(nb, struct acpi_video_bus, backlight_nb); | ||
1935 | |||
1936 | switch (val) { | ||
1937 | case BACKLIGHT_REGISTERED: | ||
1938 | if (!acpi_video_verify_backlight_support()) | ||
1939 | acpi_video_bus_unregister_backlight(video); | ||
1940 | break; | ||
1941 | case BACKLIGHT_UNREGISTERED: | ||
1942 | acpi_video_bus_register_backlight(video); | ||
1943 | break; | ||
1944 | } | ||
1945 | |||
1946 | return NOTIFY_OK; | ||
1947 | } | ||
1948 | |||
1949 | static int acpi_video_bus_add_backlight_notify_handler( | ||
1950 | struct acpi_video_bus *video) | ||
1951 | { | ||
1952 | int error; | ||
1953 | |||
1954 | video->backlight_nb.notifier_call = acpi_video_backlight_notify; | ||
1955 | video->backlight_nb.priority = 0; | ||
1956 | error = backlight_register_notifier(&video->backlight_nb); | ||
1957 | if (error == 0) | ||
1958 | video->backlight_notifier_registered = true; | ||
1959 | |||
1960 | return error; | ||
1961 | } | ||
1962 | |||
1963 | static int acpi_video_bus_remove_backlight_notify_handler( | ||
1964 | struct acpi_video_bus *video) | ||
1965 | { | ||
1966 | if (!video->backlight_notifier_registered) | ||
1967 | return 0; | ||
1968 | |||
1969 | video->backlight_notifier_registered = false; | ||
1970 | |||
1971 | return backlight_unregister_notifier(&video->backlight_nb); | ||
1972 | } | ||
1973 | |||
1862 | static int acpi_video_bus_put_devices(struct acpi_video_bus *video) | 1974 | static int acpi_video_bus_put_devices(struct acpi_video_bus *video) |
1863 | { | 1975 | { |
1864 | struct acpi_video_device *dev, *next; | 1976 | struct acpi_video_device *dev, *next; |
@@ -1940,6 +2052,7 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
1940 | 2052 | ||
1941 | acpi_video_bus_register_backlight(video); | 2053 | acpi_video_bus_register_backlight(video); |
1942 | acpi_video_bus_add_notify_handler(video); | 2054 | acpi_video_bus_add_notify_handler(video); |
2055 | acpi_video_bus_add_backlight_notify_handler(video); | ||
1943 | 2056 | ||
1944 | return 0; | 2057 | return 0; |
1945 | 2058 | ||
@@ -1963,6 +2076,7 @@ static int acpi_video_bus_remove(struct acpi_device *device) | |||
1963 | 2076 | ||
1964 | video = acpi_driver_data(device); | 2077 | video = acpi_driver_data(device); |
1965 | 2078 | ||
2079 | acpi_video_bus_remove_backlight_notify_handler(video); | ||
1966 | acpi_video_bus_remove_notify_handler(video); | 2080 | acpi_video_bus_remove_notify_handler(video); |
1967 | acpi_video_bus_unregister_backlight(video); | 2081 | acpi_video_bus_unregister_backlight(video); |
1968 | acpi_video_bus_put_devices(video); | 2082 | acpi_video_bus_put_devices(video); |
@@ -2053,6 +2167,20 @@ void acpi_video_unregister(void) | |||
2053 | } | 2167 | } |
2054 | EXPORT_SYMBOL(acpi_video_unregister); | 2168 | EXPORT_SYMBOL(acpi_video_unregister); |
2055 | 2169 | ||
2170 | void acpi_video_unregister_backlight(void) | ||
2171 | { | ||
2172 | struct acpi_video_bus *video; | ||
2173 | |||
2174 | if (!register_count) | ||
2175 | return; | ||
2176 | |||
2177 | mutex_lock(&video_list_lock); | ||
2178 | list_for_each_entry(video, &video_bus_head, entry) | ||
2179 | acpi_video_bus_unregister_backlight(video); | ||
2180 | mutex_unlock(&video_list_lock); | ||
2181 | } | ||
2182 | EXPORT_SYMBOL(acpi_video_unregister_backlight); | ||
2183 | |||
2056 | /* | 2184 | /* |
2057 | * This is kind of nasty. Hardware using Intel chipsets may require | 2185 | * This is kind of nasty. Hardware using Intel chipsets may require |
2058 | * the video opregion code to be run first in order to initialise | 2186 | * the video opregion code to be run first in order to initialise |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index c2706047337f..0033fafc470b 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -815,7 +815,7 @@ config PATA_AT32 | |||
815 | 815 | ||
816 | config PATA_AT91 | 816 | config PATA_AT91 |
817 | tristate "PATA support for AT91SAM9260" | 817 | tristate "PATA support for AT91SAM9260" |
818 | depends on ARM && ARCH_AT91 | 818 | depends on ARM && SOC_AT91SAM9 |
819 | help | 819 | help |
820 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. | 820 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. |
821 | 821 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 71e15b73513d..60707814a84b 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev) | |||
1115 | return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); | 1115 | return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | static bool ahci_broken_devslp(struct pci_dev *pdev) | ||
1119 | { | ||
1120 | /* device with broken DEVSLP but still showing SDS capability */ | ||
1121 | static const struct pci_device_id ids[] = { | ||
1122 | { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */ | ||
1123 | {} | ||
1124 | }; | ||
1125 | |||
1126 | return pci_match_id(ids, pdev); | ||
1127 | } | ||
1128 | |||
1118 | #ifdef CONFIG_ATA_ACPI | 1129 | #ifdef CONFIG_ATA_ACPI |
1119 | static void ahci_gtf_filter_workaround(struct ata_host *host) | 1130 | static void ahci_gtf_filter_workaround(struct ata_host *host) |
1120 | { | 1131 | { |
@@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1364 | 1375 | ||
1365 | hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; | 1376 | hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; |
1366 | 1377 | ||
1378 | /* must set flag prior to save config in order to take effect */ | ||
1379 | if (ahci_broken_devslp(pdev)) | ||
1380 | hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; | ||
1381 | |||
1367 | /* save initial config */ | 1382 | /* save initial config */ |
1368 | ahci_pci_save_initial_config(pdev, hpriv); | 1383 | ahci_pci_save_initial_config(pdev, hpriv); |
1369 | 1384 | ||
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index b5eb886da226..af63c75c2001 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h | |||
@@ -236,6 +236,7 @@ enum { | |||
236 | port start (wait until | 236 | port start (wait until |
237 | error-handling stage) */ | 237 | error-handling stage) */ |
238 | AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ | 238 | AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ |
239 | AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ | ||
239 | 240 | ||
240 | /* ap->flags bits */ | 241 | /* ap->flags bits */ |
241 | 242 | ||
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 497c7abe1c7d..8befeb69eeb1 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c | |||
@@ -29,9 +29,25 @@ | |||
29 | #include "ahci.h" | 29 | #include "ahci.h" |
30 | 30 | ||
31 | enum { | 31 | enum { |
32 | PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ | 32 | /* Timer 1-ms Register */ |
33 | PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ | 33 | IMX_TIMER1MS = 0x00e0, |
34 | HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ | 34 | /* Port0 PHY Control Register */ |
35 | IMX_P0PHYCR = 0x0178, | ||
36 | IMX_P0PHYCR_TEST_PDDQ = 1 << 20, | ||
37 | IMX_P0PHYCR_CR_READ = 1 << 19, | ||
38 | IMX_P0PHYCR_CR_WRITE = 1 << 18, | ||
39 | IMX_P0PHYCR_CR_CAP_DATA = 1 << 17, | ||
40 | IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16, | ||
41 | /* Port0 PHY Status Register */ | ||
42 | IMX_P0PHYSR = 0x017c, | ||
43 | IMX_P0PHYSR_CR_ACK = 1 << 18, | ||
44 | IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0, | ||
45 | /* Lane0 Output Status Register */ | ||
46 | IMX_LANE0_OUT_STAT = 0x2003, | ||
47 | IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1, | ||
48 | /* Clock Reset Register */ | ||
49 | IMX_CLOCK_RESET = 0x7f3f, | ||
50 | IMX_CLOCK_RESET_RESET = 1 << 0, | ||
35 | }; | 51 | }; |
36 | 52 | ||
37 | enum ahci_imx_type { | 53 | enum ahci_imx_type { |
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support | |||
54 | 70 | ||
55 | static void ahci_imx_host_stop(struct ata_host *host); | 71 | static void ahci_imx_host_stop(struct ata_host *host); |
56 | 72 | ||
73 | static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert) | ||
74 | { | ||
75 | int timeout = 10; | ||
76 | u32 crval; | ||
77 | u32 srval; | ||
78 | |||
79 | /* Assert or deassert the bit */ | ||
80 | crval = readl(mmio + IMX_P0PHYCR); | ||
81 | if (assert) | ||
82 | crval |= bit; | ||
83 | else | ||
84 | crval &= ~bit; | ||
85 | writel(crval, mmio + IMX_P0PHYCR); | ||
86 | |||
87 | /* Wait for the cr_ack signal */ | ||
88 | do { | ||
89 | srval = readl(mmio + IMX_P0PHYSR); | ||
90 | if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK) | ||
91 | break; | ||
92 | usleep_range(100, 200); | ||
93 | } while (--timeout); | ||
94 | |||
95 | return timeout ? 0 : -ETIMEDOUT; | ||
96 | } | ||
97 | |||
98 | static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio) | ||
99 | { | ||
100 | u32 crval = addr; | ||
101 | int ret; | ||
102 | |||
103 | /* Supply the address on cr_data_in */ | ||
104 | writel(crval, mmio + IMX_P0PHYCR); | ||
105 | |||
106 | /* Assert the cr_cap_addr signal */ | ||
107 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true); | ||
108 | if (ret) | ||
109 | return ret; | ||
110 | |||
111 | /* Deassert cr_cap_addr */ | ||
112 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false); | ||
113 | if (ret) | ||
114 | return ret; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int imx_phy_reg_write(u16 val, void __iomem *mmio) | ||
120 | { | ||
121 | u32 crval = val; | ||
122 | int ret; | ||
123 | |||
124 | /* Supply the data on cr_data_in */ | ||
125 | writel(crval, mmio + IMX_P0PHYCR); | ||
126 | |||
127 | /* Assert the cr_cap_data signal */ | ||
128 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | |||
132 | /* Deassert cr_cap_data */ | ||
133 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false); | ||
134 | if (ret) | ||
135 | return ret; | ||
136 | |||
137 | if (val & IMX_CLOCK_RESET_RESET) { | ||
138 | /* | ||
139 | * In case we're resetting the phy, it's unable to acknowledge, | ||
140 | * so we return immediately here. | ||
141 | */ | ||
142 | crval |= IMX_P0PHYCR_CR_WRITE; | ||
143 | writel(crval, mmio + IMX_P0PHYCR); | ||
144 | goto out; | ||
145 | } | ||
146 | |||
147 | /* Assert the cr_write signal */ | ||
148 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true); | ||
149 | if (ret) | ||
150 | return ret; | ||
151 | |||
152 | /* Deassert cr_write */ | ||
153 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false); | ||
154 | if (ret) | ||
155 | return ret; | ||
156 | |||
157 | out: | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int imx_phy_reg_read(u16 *val, void __iomem *mmio) | ||
162 | { | ||
163 | int ret; | ||
164 | |||
165 | /* Assert the cr_read signal */ | ||
166 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true); | ||
167 | if (ret) | ||
168 | return ret; | ||
169 | |||
170 | /* Capture the data from cr_data_out[] */ | ||
171 | *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT; | ||
172 | |||
173 | /* Deassert cr_read */ | ||
174 | ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false); | ||
175 | if (ret) | ||
176 | return ret; | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static int imx_sata_phy_reset(struct ahci_host_priv *hpriv) | ||
182 | { | ||
183 | void __iomem *mmio = hpriv->mmio; | ||
184 | int timeout = 10; | ||
185 | u16 val; | ||
186 | int ret; | ||
187 | |||
188 | /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */ | ||
189 | ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio); | ||
190 | if (ret) | ||
191 | return ret; | ||
192 | ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio); | ||
193 | if (ret) | ||
194 | return ret; | ||
195 | |||
196 | /* Wait for PHY RX_PLL to be stable */ | ||
197 | do { | ||
198 | usleep_range(100, 200); | ||
199 | ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio); | ||
200 | if (ret) | ||
201 | return ret; | ||
202 | ret = imx_phy_reg_read(&val, mmio); | ||
203 | if (ret) | ||
204 | return ret; | ||
205 | if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE) | ||
206 | break; | ||
207 | } while (--timeout); | ||
208 | |||
209 | return timeout ? 0 : -ETIMEDOUT; | ||
210 | } | ||
211 | |||
57 | static int imx_sata_enable(struct ahci_host_priv *hpriv) | 212 | static int imx_sata_enable(struct ahci_host_priv *hpriv) |
58 | { | 213 | { |
59 | struct imx_ahci_priv *imxpriv = hpriv->plat_data; | 214 | struct imx_ahci_priv *imxpriv = hpriv->plat_data; |
215 | struct device *dev = &imxpriv->ahci_pdev->dev; | ||
60 | int ret; | 216 | int ret; |
61 | 217 | ||
62 | if (imxpriv->no_device) | 218 | if (imxpriv->no_device) |
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) | |||
101 | regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, | 257 | regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, |
102 | IMX6Q_GPR13_SATA_MPLL_CLK_EN, | 258 | IMX6Q_GPR13_SATA_MPLL_CLK_EN, |
103 | IMX6Q_GPR13_SATA_MPLL_CLK_EN); | 259 | IMX6Q_GPR13_SATA_MPLL_CLK_EN); |
260 | |||
261 | usleep_range(100, 200); | ||
262 | |||
263 | ret = imx_sata_phy_reset(hpriv); | ||
264 | if (ret) { | ||
265 | dev_err(dev, "failed to reset phy: %d\n", ret); | ||
266 | goto disable_regulator; | ||
267 | } | ||
104 | } | 268 | } |
105 | 269 | ||
106 | usleep_range(1000, 2000); | 270 | usleep_range(1000, 2000); |
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap) | |||
156 | * without full reset once the pddq mode is enabled making it | 320 | * without full reset once the pddq mode is enabled making it |
157 | * impossible to use as part of libata LPM. | 321 | * impossible to use as part of libata LPM. |
158 | */ | 322 | */ |
159 | reg_val = readl(mmio + PORT_PHY_CTL); | 323 | reg_val = readl(mmio + IMX_P0PHYCR); |
160 | writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); | 324 | writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); |
161 | imx_sata_disable(hpriv); | 325 | imx_sata_disable(hpriv); |
162 | imxpriv->no_device = true; | 326 | imxpriv->no_device = true; |
163 | } | 327 | } |
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev) | |||
217 | if (!imxpriv) | 381 | if (!imxpriv) |
218 | return -ENOMEM; | 382 | return -ENOMEM; |
219 | 383 | ||
384 | imxpriv->ahci_pdev = pdev; | ||
220 | imxpriv->no_device = false; | 385 | imxpriv->no_device = false; |
221 | imxpriv->first_time = true; | 386 | imxpriv->first_time = true; |
222 | imxpriv->type = (enum ahci_imx_type)of_id->data; | 387 | imxpriv->type = (enum ahci_imx_type)of_id->data; |
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev) | |||
248 | 413 | ||
249 | /* | 414 | /* |
250 | * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, | 415 | * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, |
251 | * and IP vendor specific register HOST_TIMER1MS. | 416 | * and IP vendor specific register IMX_TIMER1MS. |
252 | * Configure CAP_SSS (support stagered spin up). | 417 | * Configure CAP_SSS (support stagered spin up). |
253 | * Implement the port0. | 418 | * Implement the port0. |
254 | * Get the ahb clock rate, and configure the TIMER1MS register. | 419 | * Get the ahb clock rate, and configure the TIMER1MS register. |
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev) | |||
265 | } | 430 | } |
266 | 431 | ||
267 | reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; | 432 | reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; |
268 | writel(reg_val, hpriv->mmio + HOST_TIMER1MS); | 433 | writel(reg_val, hpriv->mmio + IMX_TIMER1MS); |
269 | 434 | ||
270 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); | 435 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); |
271 | if (ret) | 436 | if (ret) |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 6bd4f660b4e1..b9861453fc81 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev, | |||
452 | cap &= ~HOST_CAP_SNTF; | 452 | cap &= ~HOST_CAP_SNTF; |
453 | } | 453 | } |
454 | 454 | ||
455 | if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) { | ||
456 | dev_info(dev, | ||
457 | "controller can't do DEVSLP, turning off\n"); | ||
458 | cap2 &= ~HOST_CAP2_SDS; | ||
459 | cap2 &= ~HOST_CAP2_SADM; | ||
460 | } | ||
461 | |||
455 | if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { | 462 | if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { |
456 | dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); | 463 | dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); |
457 | cap |= HOST_CAP_FBS; | 464 | cap |= HOST_CAP_FBS; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 943cc8b83e59..ea83828bfea9 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq, | |||
6314 | static void ata_port_detach(struct ata_port *ap) | 6314 | static void ata_port_detach(struct ata_port *ap) |
6315 | { | 6315 | { |
6316 | unsigned long flags; | 6316 | unsigned long flags; |
6317 | struct ata_link *link; | ||
6318 | struct ata_device *dev; | ||
6317 | 6319 | ||
6318 | if (!ap->ops->error_handler) | 6320 | if (!ap->ops->error_handler) |
6319 | goto skip_eh; | 6321 | goto skip_eh; |
@@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap) | |||
6333 | cancel_delayed_work_sync(&ap->hotplug_task); | 6335 | cancel_delayed_work_sync(&ap->hotplug_task); |
6334 | 6336 | ||
6335 | skip_eh: | 6337 | skip_eh: |
6338 | /* clean up zpodd on port removal */ | ||
6339 | ata_for_each_link(link, ap, HOST_FIRST) { | ||
6340 | ata_for_each_dev(dev, link, ALL) { | ||
6341 | if (zpodd_dev_enabled(dev)) | ||
6342 | zpodd_exit(dev); | ||
6343 | } | ||
6344 | } | ||
6336 | if (ap->pmp_link) { | 6345 | if (ap->pmp_link) { |
6337 | int i; | 6346 | int i; |
6338 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) | 6347 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 86d5e4fb5b98..343ffad59377 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -479,7 +479,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn | |||
479 | TRACE_DEVICE(dev); | 479 | TRACE_DEVICE(dev); |
480 | TRACE_RESUME(0); | 480 | TRACE_RESUME(0); |
481 | 481 | ||
482 | if (dev->power.syscore) | 482 | if (dev->power.syscore || dev->power.direct_complete) |
483 | goto Out; | 483 | goto Out; |
484 | 484 | ||
485 | if (!dev->power.is_noirq_suspended) | 485 | if (!dev->power.is_noirq_suspended) |
@@ -605,7 +605,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn | |||
605 | TRACE_DEVICE(dev); | 605 | TRACE_DEVICE(dev); |
606 | TRACE_RESUME(0); | 606 | TRACE_RESUME(0); |
607 | 607 | ||
608 | if (dev->power.syscore) | 608 | if (dev->power.syscore || dev->power.direct_complete) |
609 | goto Out; | 609 | goto Out; |
610 | 610 | ||
611 | if (!dev->power.is_late_suspended) | 611 | if (!dev->power.is_late_suspended) |
@@ -735,6 +735,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
735 | if (dev->power.syscore) | 735 | if (dev->power.syscore) |
736 | goto Complete; | 736 | goto Complete; |
737 | 737 | ||
738 | if (dev->power.direct_complete) { | ||
739 | /* Match the pm_runtime_disable() in __device_suspend(). */ | ||
740 | pm_runtime_enable(dev); | ||
741 | goto Complete; | ||
742 | } | ||
743 | |||
738 | dpm_wait(dev->parent, async); | 744 | dpm_wait(dev->parent, async); |
739 | dpm_watchdog_set(&wd, dev); | 745 | dpm_watchdog_set(&wd, dev); |
740 | device_lock(dev); | 746 | device_lock(dev); |
@@ -1007,7 +1013,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a | |||
1007 | goto Complete; | 1013 | goto Complete; |
1008 | } | 1014 | } |
1009 | 1015 | ||
1010 | if (dev->power.syscore) | 1016 | if (dev->power.syscore || dev->power.direct_complete) |
1011 | goto Complete; | 1017 | goto Complete; |
1012 | 1018 | ||
1013 | dpm_wait_for_children(dev, async); | 1019 | dpm_wait_for_children(dev, async); |
@@ -1146,7 +1152,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as | |||
1146 | goto Complete; | 1152 | goto Complete; |
1147 | } | 1153 | } |
1148 | 1154 | ||
1149 | if (dev->power.syscore) | 1155 | if (dev->power.syscore || dev->power.direct_complete) |
1150 | goto Complete; | 1156 | goto Complete; |
1151 | 1157 | ||
1152 | dpm_wait_for_children(dev, async); | 1158 | dpm_wait_for_children(dev, async); |
@@ -1332,6 +1338,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1332 | if (dev->power.syscore) | 1338 | if (dev->power.syscore) |
1333 | goto Complete; | 1339 | goto Complete; |
1334 | 1340 | ||
1341 | if (dev->power.direct_complete) { | ||
1342 | if (pm_runtime_status_suspended(dev)) { | ||
1343 | pm_runtime_disable(dev); | ||
1344 | if (pm_runtime_suspended_if_enabled(dev)) | ||
1345 | goto Complete; | ||
1346 | |||
1347 | pm_runtime_enable(dev); | ||
1348 | } | ||
1349 | dev->power.direct_complete = false; | ||
1350 | } | ||
1351 | |||
1335 | dpm_watchdog_set(&wd, dev); | 1352 | dpm_watchdog_set(&wd, dev); |
1336 | device_lock(dev); | 1353 | device_lock(dev); |
1337 | 1354 | ||
@@ -1382,10 +1399,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
1382 | 1399 | ||
1383 | End: | 1400 | End: |
1384 | if (!error) { | 1401 | if (!error) { |
1402 | struct device *parent = dev->parent; | ||
1403 | |||
1385 | dev->power.is_suspended = true; | 1404 | dev->power.is_suspended = true; |
1386 | if (dev->power.wakeup_path | 1405 | if (parent) { |
1387 | && dev->parent && !dev->parent->power.ignore_children) | 1406 | spin_lock_irq(&parent->power.lock); |
1388 | dev->parent->power.wakeup_path = true; | 1407 | |
1408 | dev->parent->power.direct_complete = false; | ||
1409 | if (dev->power.wakeup_path | ||
1410 | && !dev->parent->power.ignore_children) | ||
1411 | dev->parent->power.wakeup_path = true; | ||
1412 | |||
1413 | spin_unlock_irq(&parent->power.lock); | ||
1414 | } | ||
1389 | } | 1415 | } |
1390 | 1416 | ||
1391 | device_unlock(dev); | 1417 | device_unlock(dev); |
@@ -1487,7 +1513,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1487 | { | 1513 | { |
1488 | int (*callback)(struct device *) = NULL; | 1514 | int (*callback)(struct device *) = NULL; |
1489 | char *info = NULL; | 1515 | char *info = NULL; |
1490 | int error = 0; | 1516 | int ret = 0; |
1491 | 1517 | ||
1492 | if (dev->power.syscore) | 1518 | if (dev->power.syscore) |
1493 | return 0; | 1519 | return 0; |
@@ -1523,17 +1549,27 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1523 | callback = dev->driver->pm->prepare; | 1549 | callback = dev->driver->pm->prepare; |
1524 | } | 1550 | } |
1525 | 1551 | ||
1526 | if (callback) { | 1552 | if (callback) |
1527 | error = callback(dev); | 1553 | ret = callback(dev); |
1528 | suspend_report_result(callback, error); | ||
1529 | } | ||
1530 | 1554 | ||
1531 | device_unlock(dev); | 1555 | device_unlock(dev); |
1532 | 1556 | ||
1533 | if (error) | 1557 | if (ret < 0) { |
1558 | suspend_report_result(callback, ret); | ||
1534 | pm_runtime_put(dev); | 1559 | pm_runtime_put(dev); |
1535 | 1560 | return ret; | |
1536 | return error; | 1561 | } |
1562 | /* | ||
1563 | * A positive return value from ->prepare() means "this device appears | ||
1564 | * to be runtime-suspended and its state is fine, so if it really is | ||
1565 | * runtime-suspended, you can leave it in that state provided that you | ||
1566 | * will do the same thing with all of its descendants". This only | ||
1567 | * applies to suspend transitions, however. | ||
1568 | */ | ||
1569 | spin_lock_irq(&dev->power.lock); | ||
1570 | dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; | ||
1571 | spin_unlock_irq(&dev->power.lock); | ||
1572 | return 0; | ||
1537 | } | 1573 | } |
1538 | 1574 | ||
1539 | /** | 1575 | /** |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 25538675d59e..39412c15db70 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -394,6 +394,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); | |||
394 | * to keep the integrity of the internal data structures. Callers should ensure | 394 | * to keep the integrity of the internal data structures. Callers should ensure |
395 | * that this function is *NOT* called under RCU protection or in contexts where | 395 | * that this function is *NOT* called under RCU protection or in contexts where |
396 | * mutex cannot be locked. | 396 | * mutex cannot be locked. |
397 | * | ||
398 | * Return: | ||
399 | * 0: On success OR | ||
400 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
401 | * -EEXIST: Freq are same and volt are different OR | ||
402 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
403 | * -ENOMEM: Memory allocation failure | ||
397 | */ | 404 | */ |
398 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | 405 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) |
399 | { | 406 | { |
@@ -443,15 +450,31 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
443 | new_opp->u_volt = u_volt; | 450 | new_opp->u_volt = u_volt; |
444 | new_opp->available = true; | 451 | new_opp->available = true; |
445 | 452 | ||
446 | /* Insert new OPP in order of increasing frequency */ | 453 | /* |
454 | * Insert new OPP in order of increasing frequency | ||
455 | * and discard if already present | ||
456 | */ | ||
447 | head = &dev_opp->opp_list; | 457 | head = &dev_opp->opp_list; |
448 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { | 458 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { |
449 | if (new_opp->rate < opp->rate) | 459 | if (new_opp->rate <= opp->rate) |
450 | break; | 460 | break; |
451 | else | 461 | else |
452 | head = &opp->node; | 462 | head = &opp->node; |
453 | } | 463 | } |
454 | 464 | ||
465 | /* Duplicate OPPs ? */ | ||
466 | if (new_opp->rate == opp->rate) { | ||
467 | int ret = opp->available && new_opp->u_volt == opp->u_volt ? | ||
468 | 0 : -EEXIST; | ||
469 | |||
470 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", | ||
471 | __func__, opp->rate, opp->u_volt, opp->available, | ||
472 | new_opp->rate, new_opp->u_volt, new_opp->available); | ||
473 | mutex_unlock(&dev_opp_list_lock); | ||
474 | kfree(new_opp); | ||
475 | return ret; | ||
476 | } | ||
477 | |||
455 | list_add_rcu(&new_opp->node, head); | 478 | list_add_rcu(&new_opp->node, head); |
456 | mutex_unlock(&dev_opp_list_lock); | 479 | mutex_unlock(&dev_opp_list_lock); |
457 | 480 | ||
@@ -734,11 +757,9 @@ int of_init_opp_table(struct device *dev) | |||
734 | unsigned long freq = be32_to_cpup(val++) * 1000; | 757 | unsigned long freq = be32_to_cpup(val++) * 1000; |
735 | unsigned long volt = be32_to_cpup(val++); | 758 | unsigned long volt = be32_to_cpup(val++); |
736 | 759 | ||
737 | if (dev_pm_opp_add(dev, freq, volt)) { | 760 | if (dev_pm_opp_add(dev, freq, volt)) |
738 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 761 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
739 | __func__, freq); | 762 | __func__, freq); |
740 | continue; | ||
741 | } | ||
742 | nr -= 2; | 763 | nr -= 2; |
743 | } | 764 | } |
744 | 765 | ||
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2d56f4113ae7..eb1bd2ecad8b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable) | |||
318 | { | 318 | { |
319 | int ret = 0; | 319 | int ret = 0; |
320 | 320 | ||
321 | if (!dev) | ||
322 | return -EINVAL; | ||
323 | |||
321 | if (enable) { | 324 | if (enable) { |
322 | device_set_wakeup_capable(dev, true); | 325 | device_set_wakeup_capable(dev, true); |
323 | ret = device_wakeup_enable(dev); | 326 | ret = device_wakeup_enable(dev); |
324 | } else { | 327 | } else { |
328 | if (dev->power.can_wakeup) | ||
329 | device_wakeup_disable(dev); | ||
330 | |||
325 | device_set_wakeup_capable(dev, false); | 331 | device_set_wakeup_capable(dev, false); |
326 | } | 332 | } |
327 | 333 | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6d8a87f252de..cb9b1f8326c3 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq) | |||
144 | if (unlikely(virtqueue_is_broken(vq))) | 144 | if (unlikely(virtqueue_is_broken(vq))) |
145 | break; | 145 | break; |
146 | } while (!virtqueue_enable_cb(vq)); | 146 | } while (!virtqueue_enable_cb(vq)); |
147 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
148 | 147 | ||
149 | /* In case queue is stopped waiting for more buffers. */ | 148 | /* In case queue is stopped waiting for more buffers. */ |
150 | if (req_done) | 149 | if (req_done) |
151 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); | 150 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); |
151 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
152 | } | 152 | } |
153 | 153 | ||
154 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | 154 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) |
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | |||
202 | err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); | 202 | err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); |
203 | if (err) { | 203 | if (err) { |
204 | virtqueue_kick(vblk->vq); | 204 | virtqueue_kick(vblk->vq); |
205 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
206 | blk_mq_stop_hw_queue(hctx); | 205 | blk_mq_stop_hw_queue(hctx); |
206 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | ||
207 | /* Out of mem doesn't actually happen, since we fall back | 207 | /* Out of mem doesn't actually happen, since we fall back |
208 | * to direct descriptors */ | 208 | * to direct descriptors */ |
209 | if (err == -ENOMEM || err == -ENOSPC) | 209 | if (err == -ENOMEM || err == -ENOSPC) |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 293e2e0a0a87..00b73448b22e 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/of.h> | 56 | #include <linux/of.h> |
57 | #include <linux/of_address.h> | 57 | #include <linux/of_address.h> |
58 | #include <linux/debugfs.h> | 58 | #include <linux/debugfs.h> |
59 | #include <linux/log2.h> | ||
59 | 60 | ||
60 | /* | 61 | /* |
61 | * DDR target is the same on all platforms. | 62 | * DDR target is the same on all platforms. |
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus, | |||
222 | */ | 223 | */ |
223 | if ((u64)base < wend && end > wbase) | 224 | if ((u64)base < wend && end > wbase) |
224 | return 0; | 225 | return 0; |
225 | |||
226 | /* | ||
227 | * Check if target/attribute conflicts | ||
228 | */ | ||
229 | if (target == wtarget && attr == wattr) | ||
230 | return 0; | ||
231 | } | 226 | } |
232 | 227 | ||
233 | return 1; | 228 | return 1; |
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, | |||
266 | mbus->soc->win_cfg_offset(win); | 261 | mbus->soc->win_cfg_offset(win); |
267 | u32 ctrl, remap_addr; | 262 | u32 ctrl, remap_addr; |
268 | 263 | ||
264 | if (!is_power_of_2(size)) { | ||
265 | WARN(true, "Invalid MBus window size: 0x%zx\n", size); | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | |||
269 | if ((base & (phys_addr_t)(size - 1)) != 0) { | ||
270 | WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base, | ||
271 | size); | ||
272 | return -EINVAL; | ||
273 | } | ||
274 | |||
269 | ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | | 275 | ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | |
270 | (attr << WIN_CTRL_ATTR_SHIFT) | | 276 | (attr << WIN_CTRL_ATTR_SHIFT) | |
271 | (target << WIN_CTRL_TGT_SHIFT) | | 277 | (target << WIN_CTRL_TGT_SHIFT) | |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v) | |||
413 | win, (unsigned long long)wbase, | 419 | win, (unsigned long long)wbase, |
414 | (unsigned long long)(wbase + wsize), wtarget, wattr); | 420 | (unsigned long long)(wbase + wsize), wtarget, wattr); |
415 | 421 | ||
422 | if (!is_power_of_2(wsize) || | ||
423 | ((wbase & (u64)(wsize - 1)) != 0)) | ||
424 | seq_puts(seq, " (Invalid base/size!!)"); | ||
425 | |||
416 | if (win < mbus->soc->num_remappable_wins) { | 426 | if (win < mbus->soc->num_remappable_wins) { |
417 | seq_printf(seq, " (remap %016llx)\n", | 427 | seq_printf(seq, " (remap %016llx)\n", |
418 | (unsigned long long)wremap); | 428 | (unsigned long long)wremap); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 6b75713d953a..102c50d38902 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -995,8 +995,11 @@ retry: | |||
995 | ibytes = min_t(size_t, ibytes, have_bytes - reserved); | 995 | ibytes = min_t(size_t, ibytes, have_bytes - reserved); |
996 | if (ibytes < min) | 996 | if (ibytes < min) |
997 | ibytes = 0; | 997 | ibytes = 0; |
998 | entropy_count = max_t(int, 0, | 998 | if (have_bytes >= ibytes + reserved) |
999 | entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); | 999 | entropy_count -= ibytes << (ENTROPY_SHIFT + 3); |
1000 | else | ||
1001 | entropy_count = reserved << (ENTROPY_SHIFT + 3); | ||
1002 | |||
1000 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) | 1003 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
1001 | goto retry; | 1004 | goto retry; |
1002 | 1005 | ||
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c index b9a57fa4b710..565a9478cb94 100644 --- a/drivers/char/tpm/tpm_acpi.c +++ b/drivers/char/tpm/tpm_acpi.c | |||
@@ -95,7 +95,7 @@ int read_log(struct tpm_bios_log *log) | |||
95 | 95 | ||
96 | log->bios_event_log_end = log->bios_event_log + len; | 96 | log->bios_event_log_end = log->bios_event_log + len; |
97 | 97 | ||
98 | virt = acpi_os_map_memory(start, len); | 98 | virt = acpi_os_map_iomem(start, len); |
99 | if (!virt) { | 99 | if (!virt) { |
100 | kfree(log->bios_event_log); | 100 | kfree(log->bios_event_log); |
101 | printk("%s: ERROR - Unable to map memory\n", __func__); | 101 | printk("%s: ERROR - Unable to map memory\n", __func__); |
@@ -104,6 +104,6 @@ int read_log(struct tpm_bios_log *log) | |||
104 | 104 | ||
105 | memcpy_fromio(log->bios_event_log, virt, len); | 105 | memcpy_fromio(log->bios_event_log, virt, len); |
106 | 106 | ||
107 | acpi_os_unmap_memory(virt, len); | 107 | acpi_os_unmap_iomem(virt, len); |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index b3ea223585bd..61dcc8011ec7 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c | |||
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent) | |||
328 | /* Cache TPM ACPI handle and version string */ | 328 | /* Cache TPM ACPI handle and version string */ |
329 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, | 329 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, |
330 | ppi_callback, NULL, NULL, &tpm_ppi_handle); | 330 | ppi_callback, NULL, NULL, &tpm_ppi_handle); |
331 | if (tpm_ppi_handle == NULL) | 331 | return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0; |
332 | return -ENODEV; | ||
333 | |||
334 | return sysfs_create_group(parent, &ppi_attr_grp); | ||
335 | } | 332 | } |
336 | 333 | ||
337 | void tpm_remove_ppi(struct kobject *parent) | 334 | void tpm_remove_ppi(struct kobject *parent) |
338 | { | 335 | { |
339 | sysfs_remove_group(parent, &ppi_attr_grp); | 336 | if (tpm_ppi_handle) |
337 | sysfs_remove_group(parent, &ppi_attr_grp); | ||
340 | } | 338 | } |
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 5f8a28735c96..0745059b1834 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o | |||
8 | obj-$(CONFIG_COMMON_CLK) += clk-gate.o | 8 | obj-$(CONFIG_COMMON_CLK) += clk-gate.o |
9 | obj-$(CONFIG_COMMON_CLK) += clk-mux.o | 9 | obj-$(CONFIG_COMMON_CLK) += clk-mux.o |
10 | obj-$(CONFIG_COMMON_CLK) += clk-composite.o | 10 | obj-$(CONFIG_COMMON_CLK) += clk-composite.o |
11 | obj-$(CONFIG_COMMON_CLK) += clk-fractional-divider.o | ||
11 | 12 | ||
12 | # hardware specific clock types | 13 | # hardware specific clock types |
13 | # please keep this section sorted lexicographically by file/directory path name | 14 | # please keep this section sorted lexicographically by file/directory path name |
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c index c7607feb18dd..54a06526f64f 100644 --- a/drivers/clk/bcm/clk-kona-setup.c +++ b/drivers/clk/bcm/clk-kona-setup.c | |||
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */ | |||
27 | 27 | ||
28 | static bool clk_requires_trigger(struct kona_clk *bcm_clk) | 28 | static bool clk_requires_trigger(struct kona_clk *bcm_clk) |
29 | { | 29 | { |
30 | struct peri_clk_data *peri = bcm_clk->peri; | 30 | struct peri_clk_data *peri = bcm_clk->u.peri; |
31 | struct bcm_clk_sel *sel; | 31 | struct bcm_clk_sel *sel; |
32 | struct bcm_clk_div *div; | 32 | struct bcm_clk_div *div; |
33 | 33 | ||
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) | |||
63 | u32 limit; | 63 | u32 limit; |
64 | 64 | ||
65 | BUG_ON(bcm_clk->type != bcm_clk_peri); | 65 | BUG_ON(bcm_clk->type != bcm_clk_peri); |
66 | peri = bcm_clk->peri; | 66 | peri = bcm_clk->u.peri; |
67 | name = bcm_clk->name; | 67 | name = bcm_clk->name; |
68 | range = bcm_clk->ccu->range; | 68 | range = bcm_clk->ccu->range; |
69 | 69 | ||
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) | |||
81 | 81 | ||
82 | div = &peri->div; | 82 | div = &peri->div; |
83 | if (divider_exists(div)) { | 83 | if (divider_exists(div)) { |
84 | if (div->offset > limit) { | 84 | if (div->u.s.offset > limit) { |
85 | pr_err("%s: bad divider offset for %s (%u > %u)\n", | 85 | pr_err("%s: bad divider offset for %s (%u > %u)\n", |
86 | __func__, name, div->offset, limit); | 86 | __func__, name, div->u.s.offset, limit); |
87 | return false; | 87 | return false; |
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
91 | div = &peri->pre_div; | 91 | div = &peri->pre_div; |
92 | if (divider_exists(div)) { | 92 | if (divider_exists(div)) { |
93 | if (div->offset > limit) { | 93 | if (div->u.s.offset > limit) { |
94 | pr_err("%s: bad pre-divider offset for %s " | 94 | pr_err("%s: bad pre-divider offset for %s " |
95 | "(%u > %u)\n", | 95 | "(%u > %u)\n", |
96 | __func__, name, div->offset, limit); | 96 | __func__, name, div->u.s.offset, limit); |
97 | return false; | 97 | return false; |
98 | } | 98 | } |
99 | } | 99 | } |
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name, | |||
249 | { | 249 | { |
250 | if (divider_is_fixed(div)) { | 250 | if (divider_is_fixed(div)) { |
251 | /* Any fixed divider value but 0 is OK */ | 251 | /* Any fixed divider value but 0 is OK */ |
252 | if (div->fixed == 0) { | 252 | if (div->u.fixed == 0) { |
253 | pr_err("%s: bad %s fixed value 0 for %s\n", __func__, | 253 | pr_err("%s: bad %s fixed value 0 for %s\n", __func__, |
254 | field_name, clock_name); | 254 | field_name, clock_name); |
255 | return false; | 255 | return false; |
256 | } | 256 | } |
257 | return true; | 257 | return true; |
258 | } | 258 | } |
259 | if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) | 259 | if (!bitfield_valid(div->u.s.shift, div->u.s.width, |
260 | field_name, clock_name)) | ||
260 | return false; | 261 | return false; |
261 | 262 | ||
262 | if (divider_has_fraction(div)) | 263 | if (divider_has_fraction(div)) |
263 | if (div->frac_width > div->width) { | 264 | if (div->u.s.frac_width > div->u.s.width) { |
264 | pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", | 265 | pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", |
265 | __func__, field_name, clock_name, | 266 | __func__, field_name, clock_name, |
266 | div->frac_width, div->width); | 267 | div->u.s.frac_width, div->u.s.width); |
267 | return false; | 268 | return false; |
268 | } | 269 | } |
269 | 270 | ||
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name, | |||
278 | */ | 279 | */ |
279 | static bool kona_dividers_valid(struct kona_clk *bcm_clk) | 280 | static bool kona_dividers_valid(struct kona_clk *bcm_clk) |
280 | { | 281 | { |
281 | struct peri_clk_data *peri = bcm_clk->peri; | 282 | struct peri_clk_data *peri = bcm_clk->u.peri; |
282 | struct bcm_clk_div *div; | 283 | struct bcm_clk_div *div; |
283 | struct bcm_clk_div *pre_div; | 284 | struct bcm_clk_div *pre_div; |
284 | u32 limit; | 285 | u32 limit; |
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk) | |||
295 | 296 | ||
296 | limit = BITS_PER_BYTE * sizeof(u32); | 297 | limit = BITS_PER_BYTE * sizeof(u32); |
297 | 298 | ||
298 | return div->frac_width + pre_div->frac_width <= limit; | 299 | return div->u.s.frac_width + pre_div->u.s.frac_width <= limit; |
299 | } | 300 | } |
300 | 301 | ||
301 | 302 | ||
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk) | |||
328 | if (!peri_clk_data_offsets_valid(bcm_clk)) | 329 | if (!peri_clk_data_offsets_valid(bcm_clk)) |
329 | return false; | 330 | return false; |
330 | 331 | ||
331 | peri = bcm_clk->peri; | 332 | peri = bcm_clk->u.peri; |
332 | name = bcm_clk->name; | 333 | name = bcm_clk->name; |
333 | gate = &peri->gate; | 334 | gate = &peri->gate; |
334 | if (gate_exists(gate) && !gate_valid(gate, "gate", name)) | 335 | if (gate_exists(gate) && !gate_valid(gate, "gate", name)) |
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk) | |||
588 | { | 589 | { |
589 | switch (bcm_clk->type) { | 590 | switch (bcm_clk->type) { |
590 | case bcm_clk_peri: | 591 | case bcm_clk_peri: |
591 | peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); | 592 | peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data); |
592 | break; | 593 | break; |
593 | default: | 594 | default: |
594 | break; | 595 | break; |
595 | } | 596 | } |
596 | bcm_clk->data = NULL; | 597 | bcm_clk->u.data = NULL; |
597 | bcm_clk->type = bcm_clk_none; | 598 | bcm_clk->type = bcm_clk_none; |
598 | } | 599 | } |
599 | 600 | ||
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, | |||
644 | break; | 645 | break; |
645 | } | 646 | } |
646 | bcm_clk->type = type; | 647 | bcm_clk->type = type; |
647 | bcm_clk->data = data; | 648 | bcm_clk->u.data = data; |
648 | 649 | ||
649 | /* Make sure everything makes sense before we set it up */ | 650 | /* Make sure everything makes sense before we set it up */ |
650 | if (!kona_clk_valid(bcm_clk)) { | 651 | if (!kona_clk_valid(bcm_clk)) { |
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index e3d339e08309..db11a87449f2 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c | |||
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor) | |||
61 | /* Convert a divider into the scaled divisor value it represents. */ | 61 | /* Convert a divider into the scaled divisor value it represents. */ |
62 | static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) | 62 | static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) |
63 | { | 63 | { |
64 | return (u64)reg_div + ((u64)1 << div->frac_width); | 64 | return (u64)reg_div + ((u64)1 << div->u.s.frac_width); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | 67 | /* |
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) | |||
77 | BUG_ON(billionths >= BILLION); | 77 | BUG_ON(billionths >= BILLION); |
78 | 78 | ||
79 | combined = (u64)div_value * BILLION + billionths; | 79 | combined = (u64)div_value * BILLION + billionths; |
80 | combined <<= div->frac_width; | 80 | combined <<= div->u.s.frac_width; |
81 | 81 | ||
82 | return do_div_round_closest(combined, BILLION); | 82 | return do_div_round_closest(combined, BILLION); |
83 | } | 83 | } |
@@ -87,7 +87,7 @@ static inline u64 | |||
87 | scaled_div_min(struct bcm_clk_div *div) | 87 | scaled_div_min(struct bcm_clk_div *div) |
88 | { | 88 | { |
89 | if (divider_is_fixed(div)) | 89 | if (divider_is_fixed(div)) |
90 | return (u64)div->fixed; | 90 | return (u64)div->u.fixed; |
91 | 91 | ||
92 | return scaled_div_value(div, 0); | 92 | return scaled_div_value(div, 0); |
93 | } | 93 | } |
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div) | |||
98 | u32 reg_div; | 98 | u32 reg_div; |
99 | 99 | ||
100 | if (divider_is_fixed(div)) | 100 | if (divider_is_fixed(div)) |
101 | return (u64)div->fixed; | 101 | return (u64)div->u.fixed; |
102 | 102 | ||
103 | reg_div = ((u32)1 << div->width) - 1; | 103 | reg_div = ((u32)1 << div->u.s.width) - 1; |
104 | 104 | ||
105 | return scaled_div_value(div, reg_div); | 105 | return scaled_div_value(div, reg_div); |
106 | } | 106 | } |
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div) | |||
115 | BUG_ON(scaled_div < scaled_div_min(div)); | 115 | BUG_ON(scaled_div < scaled_div_min(div)); |
116 | BUG_ON(scaled_div > scaled_div_max(div)); | 116 | BUG_ON(scaled_div > scaled_div_max(div)); |
117 | 117 | ||
118 | return (u32)(scaled_div - ((u64)1 << div->frac_width)); | 118 | return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); |
119 | } | 119 | } |
120 | 120 | ||
121 | /* Return a rate scaled for use when dividing by a scaled divisor. */ | 121 | /* Return a rate scaled for use when dividing by a scaled divisor. */ |
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate) | |||
125 | if (divider_is_fixed(div)) | 125 | if (divider_is_fixed(div)) |
126 | return (u64)rate; | 126 | return (u64)rate; |
127 | 127 | ||
128 | return (u64)rate << div->frac_width; | 128 | return (u64)rate << div->u.s.frac_width; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* CCU access */ | 131 | /* CCU access */ |
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) | |||
398 | u32 reg_div; | 398 | u32 reg_div; |
399 | 399 | ||
400 | if (divider_is_fixed(div)) | 400 | if (divider_is_fixed(div)) |
401 | return (u64)div->fixed; | 401 | return (u64)div->u.fixed; |
402 | 402 | ||
403 | flags = ccu_lock(ccu); | 403 | flags = ccu_lock(ccu); |
404 | reg_val = __ccu_read(ccu, div->offset); | 404 | reg_val = __ccu_read(ccu, div->u.s.offset); |
405 | ccu_unlock(ccu, flags); | 405 | ccu_unlock(ccu, flags); |
406 | 406 | ||
407 | /* Extract the full divider field from the register value */ | 407 | /* Extract the full divider field from the register value */ |
408 | reg_div = bitfield_extract(reg_val, div->shift, div->width); | 408 | reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); |
409 | 409 | ||
410 | /* Return the scaled divisor value it represents */ | 410 | /* Return the scaled divisor value it represents */ |
411 | return scaled_div_value(div, reg_div); | 411 | return scaled_div_value(div, reg_div); |
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, | |||
433 | * state was defined in the device tree, we just find out | 433 | * state was defined in the device tree, we just find out |
434 | * what its current value is rather than updating it. | 434 | * what its current value is rather than updating it. |
435 | */ | 435 | */ |
436 | if (div->scaled_div == BAD_SCALED_DIV_VALUE) { | 436 | if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { |
437 | reg_val = __ccu_read(ccu, div->offset); | 437 | reg_val = __ccu_read(ccu, div->u.s.offset); |
438 | reg_div = bitfield_extract(reg_val, div->shift, div->width); | 438 | reg_div = bitfield_extract(reg_val, div->u.s.shift, |
439 | div->scaled_div = scaled_div_value(div, reg_div); | 439 | div->u.s.width); |
440 | div->u.s.scaled_div = scaled_div_value(div, reg_div); | ||
440 | 441 | ||
441 | return 0; | 442 | return 0; |
442 | } | 443 | } |
443 | 444 | ||
444 | /* Convert the scaled divisor to the value we need to record */ | 445 | /* Convert the scaled divisor to the value we need to record */ |
445 | reg_div = divider(div, div->scaled_div); | 446 | reg_div = divider(div, div->u.s.scaled_div); |
446 | 447 | ||
447 | /* Clock needs to be enabled before changing the rate */ | 448 | /* Clock needs to be enabled before changing the rate */ |
448 | enabled = __is_clk_gate_enabled(ccu, gate); | 449 | enabled = __is_clk_gate_enabled(ccu, gate); |
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, | |||
452 | } | 453 | } |
453 | 454 | ||
454 | /* Replace the divider value and record the result */ | 455 | /* Replace the divider value and record the result */ |
455 | reg_val = __ccu_read(ccu, div->offset); | 456 | reg_val = __ccu_read(ccu, div->u.s.offset); |
456 | reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); | 457 | reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, |
457 | __ccu_write(ccu, div->offset, reg_val); | 458 | reg_div); |
459 | __ccu_write(ccu, div->u.s.offset, reg_val); | ||
458 | 460 | ||
459 | /* If the trigger fails we still want to disable the gate */ | 461 | /* If the trigger fails we still want to disable the gate */ |
460 | if (!__clk_trigger(ccu, trig)) | 462 | if (!__clk_trigger(ccu, trig)) |
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, | |||
490 | 492 | ||
491 | BUG_ON(divider_is_fixed(div)); | 493 | BUG_ON(divider_is_fixed(div)); |
492 | 494 | ||
493 | previous = div->scaled_div; | 495 | previous = div->u.s.scaled_div; |
494 | if (previous == scaled_div) | 496 | if (previous == scaled_div) |
495 | return 0; /* No change */ | 497 | return 0; /* No change */ |
496 | 498 | ||
497 | div->scaled_div = scaled_div; | 499 | div->u.s.scaled_div = scaled_div; |
498 | 500 | ||
499 | flags = ccu_lock(ccu); | 501 | flags = ccu_lock(ccu); |
500 | __ccu_write_enable(ccu); | 502 | __ccu_write_enable(ccu); |
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, | |||
505 | ccu_unlock(ccu, flags); | 507 | ccu_unlock(ccu, flags); |
506 | 508 | ||
507 | if (ret) | 509 | if (ret) |
508 | div->scaled_div = previous; /* Revert the change */ | 510 | div->u.s.scaled_div = previous; /* Revert the change */ |
509 | 511 | ||
510 | return ret; | 512 | return ret; |
511 | 513 | ||
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, | |||
802 | static int kona_peri_clk_enable(struct clk_hw *hw) | 804 | static int kona_peri_clk_enable(struct clk_hw *hw) |
803 | { | 805 | { |
804 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 806 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
805 | struct bcm_clk_gate *gate = &bcm_clk->peri->gate; | 807 | struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; |
806 | 808 | ||
807 | return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); | 809 | return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); |
808 | } | 810 | } |
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw) | |||
810 | static void kona_peri_clk_disable(struct clk_hw *hw) | 812 | static void kona_peri_clk_disable(struct clk_hw *hw) |
811 | { | 813 | { |
812 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 814 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
813 | struct bcm_clk_gate *gate = &bcm_clk->peri->gate; | 815 | struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; |
814 | 816 | ||
815 | (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); | 817 | (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); |
816 | } | 818 | } |
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw) | |||
818 | static int kona_peri_clk_is_enabled(struct clk_hw *hw) | 820 | static int kona_peri_clk_is_enabled(struct clk_hw *hw) |
819 | { | 821 | { |
820 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 822 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
821 | struct bcm_clk_gate *gate = &bcm_clk->peri->gate; | 823 | struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; |
822 | 824 | ||
823 | return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; | 825 | return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; |
824 | } | 826 | } |
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, | |||
827 | unsigned long parent_rate) | 829 | unsigned long parent_rate) |
828 | { | 830 | { |
829 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 831 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
830 | struct peri_clk_data *data = bcm_clk->peri; | 832 | struct peri_clk_data *data = bcm_clk->u.peri; |
831 | 833 | ||
832 | return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, | 834 | return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, |
833 | parent_rate); | 835 | parent_rate); |
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, | |||
837 | unsigned long *parent_rate) | 839 | unsigned long *parent_rate) |
838 | { | 840 | { |
839 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 841 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
840 | struct bcm_clk_div *div = &bcm_clk->peri->div; | 842 | struct bcm_clk_div *div = &bcm_clk->u.peri->div; |
841 | 843 | ||
842 | if (!divider_exists(div)) | 844 | if (!divider_exists(div)) |
843 | return __clk_get_rate(hw->clk); | 845 | return __clk_get_rate(hw->clk); |
844 | 846 | ||
845 | /* Quietly avoid a zero rate */ | 847 | /* Quietly avoid a zero rate */ |
846 | return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, | 848 | return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, |
847 | rate ? rate : 1, *parent_rate, NULL); | 849 | rate ? rate : 1, *parent_rate, NULL); |
848 | } | 850 | } |
849 | 851 | ||
850 | static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) | 852 | static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) |
851 | { | 853 | { |
852 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 854 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
853 | struct peri_clk_data *data = bcm_clk->peri; | 855 | struct peri_clk_data *data = bcm_clk->u.peri; |
854 | struct bcm_clk_sel *sel = &data->sel; | 856 | struct bcm_clk_sel *sel = &data->sel; |
855 | struct bcm_clk_trig *trig; | 857 | struct bcm_clk_trig *trig; |
856 | int ret; | 858 | int ret; |
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) | |||
884 | static u8 kona_peri_clk_get_parent(struct clk_hw *hw) | 886 | static u8 kona_peri_clk_get_parent(struct clk_hw *hw) |
885 | { | 887 | { |
886 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 888 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
887 | struct peri_clk_data *data = bcm_clk->peri; | 889 | struct peri_clk_data *data = bcm_clk->u.peri; |
888 | u8 index; | 890 | u8 index; |
889 | 891 | ||
890 | index = selector_read_index(bcm_clk->ccu, &data->sel); | 892 | index = selector_read_index(bcm_clk->ccu, &data->sel); |
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
897 | unsigned long parent_rate) | 899 | unsigned long parent_rate) |
898 | { | 900 | { |
899 | struct kona_clk *bcm_clk = to_kona_clk(hw); | 901 | struct kona_clk *bcm_clk = to_kona_clk(hw); |
900 | struct peri_clk_data *data = bcm_clk->peri; | 902 | struct peri_clk_data *data = bcm_clk->u.peri; |
901 | struct bcm_clk_div *div = &data->div; | 903 | struct bcm_clk_div *div = &data->div; |
902 | u64 scaled_div = 0; | 904 | u64 scaled_div = 0; |
903 | int ret; | 905 | int ret; |
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = { | |||
958 | static bool __peri_clk_init(struct kona_clk *bcm_clk) | 960 | static bool __peri_clk_init(struct kona_clk *bcm_clk) |
959 | { | 961 | { |
960 | struct ccu_data *ccu = bcm_clk->ccu; | 962 | struct ccu_data *ccu = bcm_clk->ccu; |
961 | struct peri_clk_data *peri = bcm_clk->peri; | 963 | struct peri_clk_data *peri = bcm_clk->u.peri; |
962 | const char *name = bcm_clk->name; | 964 | const char *name = bcm_clk->name; |
963 | struct bcm_clk_trig *trig; | 965 | struct bcm_clk_trig *trig; |
964 | 966 | ||
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h index 5e139adc3dc5..dee690951bb6 100644 --- a/drivers/clk/bcm/clk-kona.h +++ b/drivers/clk/bcm/clk-kona.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) | 57 | #define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) |
58 | #define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) | 58 | #define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) |
59 | #define divider_has_fraction(div) (!divider_is_fixed(div) && \ | 59 | #define divider_has_fraction(div) (!divider_is_fixed(div) && \ |
60 | (div)->frac_width > 0) | 60 | (div)->u.s.frac_width > 0) |
61 | 61 | ||
62 | #define selector_exists(sel) ((sel)->width != 0) | 62 | #define selector_exists(sel) ((sel)->width != 0) |
63 | #define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) | 63 | #define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) |
@@ -244,9 +244,9 @@ struct bcm_clk_div { | |||
244 | u32 frac_width; /* field fraction width */ | 244 | u32 frac_width; /* field fraction width */ |
245 | 245 | ||
246 | u64 scaled_div; /* scaled divider value */ | 246 | u64 scaled_div; /* scaled divider value */ |
247 | }; | 247 | } s; |
248 | u32 fixed; /* non-zero fixed divider value */ | 248 | u32 fixed; /* non-zero fixed divider value */ |
249 | }; | 249 | } u; |
250 | u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ | 250 | u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ |
251 | }; | 251 | }; |
252 | 252 | ||
@@ -263,28 +263,28 @@ struct bcm_clk_div { | |||
263 | /* A fixed (non-zero) divider */ | 263 | /* A fixed (non-zero) divider */ |
264 | #define FIXED_DIVIDER(_value) \ | 264 | #define FIXED_DIVIDER(_value) \ |
265 | { \ | 265 | { \ |
266 | .fixed = (_value), \ | 266 | .u.fixed = (_value), \ |
267 | .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ | 267 | .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ |
268 | } | 268 | } |
269 | 269 | ||
270 | /* A divider with an integral divisor */ | 270 | /* A divider with an integral divisor */ |
271 | #define DIVIDER(_offset, _shift, _width) \ | 271 | #define DIVIDER(_offset, _shift, _width) \ |
272 | { \ | 272 | { \ |
273 | .offset = (_offset), \ | 273 | .u.s.offset = (_offset), \ |
274 | .shift = (_shift), \ | 274 | .u.s.shift = (_shift), \ |
275 | .width = (_width), \ | 275 | .u.s.width = (_width), \ |
276 | .scaled_div = BAD_SCALED_DIV_VALUE, \ | 276 | .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ |
277 | .flags = FLAG(DIV, EXISTS), \ | 277 | .flags = FLAG(DIV, EXISTS), \ |
278 | } | 278 | } |
279 | 279 | ||
280 | /* A divider whose divisor has an integer and fractional part */ | 280 | /* A divider whose divisor has an integer and fractional part */ |
281 | #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ | 281 | #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ |
282 | { \ | 282 | { \ |
283 | .offset = (_offset), \ | 283 | .u.s.offset = (_offset), \ |
284 | .shift = (_shift), \ | 284 | .u.s.shift = (_shift), \ |
285 | .width = (_width), \ | 285 | .u.s.width = (_width), \ |
286 | .frac_width = (_frac_width), \ | 286 | .u.s.frac_width = (_frac_width), \ |
287 | .scaled_div = BAD_SCALED_DIV_VALUE, \ | 287 | .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ |
288 | .flags = FLAG(DIV, EXISTS), \ | 288 | .flags = FLAG(DIV, EXISTS), \ |
289 | } | 289 | } |
290 | 290 | ||
@@ -380,7 +380,7 @@ struct kona_clk { | |||
380 | union { | 380 | union { |
381 | void *data; | 381 | void *data; |
382 | struct peri_clk_data *peri; | 382 | struct peri_clk_data *peri; |
383 | }; | 383 | } u; |
384 | }; | 384 | }; |
385 | #define to_kona_clk(_hw) \ | 385 | #define to_kona_clk(_hw) \ |
386 | container_of(_hw, struct kona_clk, hw) | 386 | container_of(_hw, struct kona_clk, hw) |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index ec22112e569f..3fbee4540228 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div) | |||
144 | return true; | 144 | return true; |
145 | } | 145 | } |
146 | 146 | ||
147 | static int _round_up_table(const struct clk_div_table *table, int div) | ||
148 | { | ||
149 | const struct clk_div_table *clkt; | ||
150 | int up = INT_MAX; | ||
151 | |||
152 | for (clkt = table; clkt->div; clkt++) { | ||
153 | if (clkt->div == div) | ||
154 | return clkt->div; | ||
155 | else if (clkt->div < div) | ||
156 | continue; | ||
157 | |||
158 | if ((clkt->div - div) < (up - div)) | ||
159 | up = clkt->div; | ||
160 | } | ||
161 | |||
162 | return up; | ||
163 | } | ||
164 | |||
165 | static int _div_round_up(struct clk_divider *divider, | ||
166 | unsigned long parent_rate, unsigned long rate) | ||
167 | { | ||
168 | int div = DIV_ROUND_UP(parent_rate, rate); | ||
169 | |||
170 | if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) | ||
171 | div = __roundup_pow_of_two(div); | ||
172 | if (divider->table) | ||
173 | div = _round_up_table(divider->table, div); | ||
174 | |||
175 | return div; | ||
176 | } | ||
177 | |||
147 | static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, | 178 | static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, |
148 | unsigned long *best_parent_rate) | 179 | unsigned long *best_parent_rate) |
149 | { | 180 | { |
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, | |||
159 | 190 | ||
160 | if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { | 191 | if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { |
161 | parent_rate = *best_parent_rate; | 192 | parent_rate = *best_parent_rate; |
162 | bestdiv = DIV_ROUND_UP(parent_rate, rate); | 193 | bestdiv = _div_round_up(divider, parent_rate, rate); |
163 | bestdiv = bestdiv == 0 ? 1 : bestdiv; | 194 | bestdiv = bestdiv == 0 ? 1 : bestdiv; |
164 | bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; | 195 | bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; |
165 | return bestdiv; | 196 | return bestdiv; |
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, | |||
219 | u32 val; | 250 | u32 val; |
220 | 251 | ||
221 | div = DIV_ROUND_UP(parent_rate, rate); | 252 | div = DIV_ROUND_UP(parent_rate, rate); |
253 | |||
254 | if (!_is_valid_div(divider, div)) | ||
255 | return -EINVAL; | ||
256 | |||
222 | value = _get_val(divider, div); | 257 | value = _get_val(divider, div); |
223 | 258 | ||
224 | if (value > div_mask(divider)) | 259 | if (value > div_mask(divider)) |
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c new file mode 100644 index 000000000000..ede685ca0d20 --- /dev/null +++ b/drivers/clk/clk-fractional-divider.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Intel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * Adjustable fractional divider clock implementation. | ||
9 | * Output rate = (m / n) * parent_rate. | ||
10 | */ | ||
11 | |||
12 | #include <linux/clk-provider.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/gcd.h> | ||
17 | |||
18 | #define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) | ||
19 | |||
20 | static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, | ||
21 | unsigned long parent_rate) | ||
22 | { | ||
23 | struct clk_fractional_divider *fd = to_clk_fd(hw); | ||
24 | unsigned long flags = 0; | ||
25 | u32 val, m, n; | ||
26 | u64 ret; | ||
27 | |||
28 | if (fd->lock) | ||
29 | spin_lock_irqsave(fd->lock, flags); | ||
30 | |||
31 | val = clk_readl(fd->reg); | ||
32 | |||
33 | if (fd->lock) | ||
34 | spin_unlock_irqrestore(fd->lock, flags); | ||
35 | |||
36 | m = (val & fd->mmask) >> fd->mshift; | ||
37 | n = (val & fd->nmask) >> fd->nshift; | ||
38 | |||
39 | ret = parent_rate * m; | ||
40 | do_div(ret, n); | ||
41 | |||
42 | return ret; | ||
43 | } | ||
44 | |||
45 | static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, | ||
46 | unsigned long *prate) | ||
47 | { | ||
48 | struct clk_fractional_divider *fd = to_clk_fd(hw); | ||
49 | unsigned maxn = (fd->nmask >> fd->nshift) + 1; | ||
50 | unsigned div; | ||
51 | |||
52 | if (!rate || rate >= *prate) | ||
53 | return *prate; | ||
54 | |||
55 | div = gcd(*prate, rate); | ||
56 | |||
57 | while ((*prate / div) > maxn) { | ||
58 | div <<= 1; | ||
59 | rate <<= 1; | ||
60 | } | ||
61 | |||
62 | return rate; | ||
63 | } | ||
64 | |||
65 | static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, | ||
66 | unsigned long parent_rate) | ||
67 | { | ||
68 | struct clk_fractional_divider *fd = to_clk_fd(hw); | ||
69 | unsigned long flags = 0; | ||
70 | unsigned long div; | ||
71 | unsigned n, m; | ||
72 | u32 val; | ||
73 | |||
74 | div = gcd(parent_rate, rate); | ||
75 | m = rate / div; | ||
76 | n = parent_rate / div; | ||
77 | |||
78 | if (fd->lock) | ||
79 | spin_lock_irqsave(fd->lock, flags); | ||
80 | |||
81 | val = clk_readl(fd->reg); | ||
82 | val &= ~(fd->mmask | fd->nmask); | ||
83 | val |= (m << fd->mshift) | (n << fd->nshift); | ||
84 | clk_writel(val, fd->reg); | ||
85 | |||
86 | if (fd->lock) | ||
87 | spin_unlock_irqrestore(fd->lock, flags); | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | const struct clk_ops clk_fractional_divider_ops = { | ||
93 | .recalc_rate = clk_fd_recalc_rate, | ||
94 | .round_rate = clk_fd_round_rate, | ||
95 | .set_rate = clk_fd_set_rate, | ||
96 | }; | ||
97 | EXPORT_SYMBOL_GPL(clk_fractional_divider_ops); | ||
98 | |||
99 | struct clk *clk_register_fractional_divider(struct device *dev, | ||
100 | const char *name, const char *parent_name, unsigned long flags, | ||
101 | void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, | ||
102 | u8 clk_divider_flags, spinlock_t *lock) | ||
103 | { | ||
104 | struct clk_fractional_divider *fd; | ||
105 | struct clk_init_data init; | ||
106 | struct clk *clk; | ||
107 | |||
108 | fd = kzalloc(sizeof(*fd), GFP_KERNEL); | ||
109 | if (!fd) { | ||
110 | dev_err(dev, "could not allocate fractional divider clk\n"); | ||
111 | return ERR_PTR(-ENOMEM); | ||
112 | } | ||
113 | |||
114 | init.name = name; | ||
115 | init.ops = &clk_fractional_divider_ops; | ||
116 | init.flags = flags | CLK_IS_BASIC; | ||
117 | init.parent_names = parent_name ? &parent_name : NULL; | ||
118 | init.num_parents = parent_name ? 1 : 0; | ||
119 | |||
120 | fd->reg = reg; | ||
121 | fd->mshift = mshift; | ||
122 | fd->mmask = (BIT(mwidth) - 1) << mshift; | ||
123 | fd->nshift = nshift; | ||
124 | fd->nmask = (BIT(nwidth) - 1) << nshift; | ||
125 | fd->flags = clk_divider_flags; | ||
126 | fd->lock = lock; | ||
127 | fd->hw.init = &init; | ||
128 | |||
129 | clk = clk_register(dev, &fd->hw); | ||
130 | if (IS_ERR(clk)) | ||
131 | kfree(fd); | ||
132 | |||
133 | return clk; | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(clk_register_fractional_divider); | ||
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index dff0373f53c1..7cf2c093cc54 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw) | |||
1984 | } | 1984 | } |
1985 | EXPORT_SYMBOL_GPL(__clk_register); | 1985 | EXPORT_SYMBOL_GPL(__clk_register); |
1986 | 1986 | ||
1987 | static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) | 1987 | /** |
1988 | * clk_register - allocate a new clock, register it and return an opaque cookie | ||
1989 | * @dev: device that is registering this clock | ||
1990 | * @hw: link to hardware-specific clock data | ||
1991 | * | ||
1992 | * clk_register is the primary interface for populating the clock tree with new | ||
1993 | * clock nodes. It returns a pointer to the newly allocated struct clk which | ||
1994 | * cannot be dereferenced by driver code but may be used in conjuction with the | ||
1995 | * rest of the clock API. In the event of an error clk_register will return an | ||
1996 | * error code; drivers must test for an error code after calling clk_register. | ||
1997 | */ | ||
1998 | struct clk *clk_register(struct device *dev, struct clk_hw *hw) | ||
1988 | { | 1999 | { |
1989 | int i, ret; | 2000 | int i, ret; |
2001 | struct clk *clk; | ||
2002 | |||
2003 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); | ||
2004 | if (!clk) { | ||
2005 | pr_err("%s: could not allocate clk\n", __func__); | ||
2006 | ret = -ENOMEM; | ||
2007 | goto fail_out; | ||
2008 | } | ||
1990 | 2009 | ||
1991 | clk->name = kstrdup(hw->init->name, GFP_KERNEL); | 2010 | clk->name = kstrdup(hw->init->name, GFP_KERNEL); |
1992 | if (!clk->name) { | 2011 | if (!clk->name) { |
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) | |||
2026 | 2045 | ||
2027 | ret = __clk_init(dev, clk); | 2046 | ret = __clk_init(dev, clk); |
2028 | if (!ret) | 2047 | if (!ret) |
2029 | return 0; | 2048 | return clk; |
2030 | 2049 | ||
2031 | fail_parent_names_copy: | 2050 | fail_parent_names_copy: |
2032 | while (--i >= 0) | 2051 | while (--i >= 0) |
@@ -2035,36 +2054,6 @@ fail_parent_names_copy: | |||
2035 | fail_parent_names: | 2054 | fail_parent_names: |
2036 | kfree(clk->name); | 2055 | kfree(clk->name); |
2037 | fail_name: | 2056 | fail_name: |
2038 | return ret; | ||
2039 | } | ||
2040 | |||
2041 | /** | ||
2042 | * clk_register - allocate a new clock, register it and return an opaque cookie | ||
2043 | * @dev: device that is registering this clock | ||
2044 | * @hw: link to hardware-specific clock data | ||
2045 | * | ||
2046 | * clk_register is the primary interface for populating the clock tree with new | ||
2047 | * clock nodes. It returns a pointer to the newly allocated struct clk which | ||
2048 | * cannot be dereferenced by driver code but may be used in conjuction with the | ||
2049 | * rest of the clock API. In the event of an error clk_register will return an | ||
2050 | * error code; drivers must test for an error code after calling clk_register. | ||
2051 | */ | ||
2052 | struct clk *clk_register(struct device *dev, struct clk_hw *hw) | ||
2053 | { | ||
2054 | int ret; | ||
2055 | struct clk *clk; | ||
2056 | |||
2057 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); | ||
2058 | if (!clk) { | ||
2059 | pr_err("%s: could not allocate clk\n", __func__); | ||
2060 | ret = -ENOMEM; | ||
2061 | goto fail_out; | ||
2062 | } | ||
2063 | |||
2064 | ret = _clk_register(dev, hw, clk); | ||
2065 | if (!ret) | ||
2066 | return clk; | ||
2067 | |||
2068 | kfree(clk); | 2057 | kfree(clk); |
2069 | fail_out: | 2058 | fail_out: |
2070 | return ERR_PTR(ret); | 2059 | return ERR_PTR(ret); |
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk) | |||
2151 | 2140 | ||
2152 | if (!hlist_empty(&clk->children)) { | 2141 | if (!hlist_empty(&clk->children)) { |
2153 | struct clk *child; | 2142 | struct clk *child; |
2143 | struct hlist_node *t; | ||
2154 | 2144 | ||
2155 | /* Reparent all children to the orphan list. */ | 2145 | /* Reparent all children to the orphan list. */ |
2156 | hlist_for_each_entry(child, &clk->children, child_node) | 2146 | hlist_for_each_entry_safe(child, t, &clk->children, child_node) |
2157 | clk_set_parent(child, NULL); | 2147 | clk_set_parent(child, NULL); |
2158 | } | 2148 | } |
2159 | 2149 | ||
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister); | |||
2173 | 2163 | ||
2174 | static void devm_clk_release(struct device *dev, void *res) | 2164 | static void devm_clk_release(struct device *dev, void *res) |
2175 | { | 2165 | { |
2176 | clk_unregister(res); | 2166 | clk_unregister(*(struct clk **)res); |
2177 | } | 2167 | } |
2178 | 2168 | ||
2179 | /** | 2169 | /** |
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res) | |||
2188 | struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) | 2178 | struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) |
2189 | { | 2179 | { |
2190 | struct clk *clk; | 2180 | struct clk *clk; |
2191 | int ret; | 2181 | struct clk **clkp; |
2192 | 2182 | ||
2193 | clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); | 2183 | clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); |
2194 | if (!clk) | 2184 | if (!clkp) |
2195 | return ERR_PTR(-ENOMEM); | 2185 | return ERR_PTR(-ENOMEM); |
2196 | 2186 | ||
2197 | ret = _clk_register(dev, hw, clk); | 2187 | clk = clk_register(dev, hw); |
2198 | if (!ret) { | 2188 | if (!IS_ERR(clk)) { |
2199 | devres_add(dev, clk); | 2189 | *clkp = clk; |
2190 | devres_add(dev, clkp); | ||
2200 | } else { | 2191 | } else { |
2201 | devres_free(clk); | 2192 | devres_free(clkp); |
2202 | clk = ERR_PTR(ret); | ||
2203 | } | 2193 | } |
2204 | 2194 | ||
2205 | return clk; | 2195 | return clk; |
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c index 2e5810c88d11..1f6324e29a80 100644 --- a/drivers/clk/shmobile/clk-mstp.c +++ b/drivers/clk/shmobile/clk-mstp.c | |||
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name, | |||
156 | static void __init cpg_mstp_clocks_init(struct device_node *np) | 156 | static void __init cpg_mstp_clocks_init(struct device_node *np) |
157 | { | 157 | { |
158 | struct mstp_clock_group *group; | 158 | struct mstp_clock_group *group; |
159 | const char *idxname; | ||
159 | struct clk **clks; | 160 | struct clk **clks; |
160 | unsigned int i; | 161 | unsigned int i; |
161 | 162 | ||
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) | |||
184 | for (i = 0; i < MSTP_MAX_CLOCKS; ++i) | 185 | for (i = 0; i < MSTP_MAX_CLOCKS; ++i) |
185 | clks[i] = ERR_PTR(-ENOENT); | 186 | clks[i] = ERR_PTR(-ENOENT); |
186 | 187 | ||
188 | if (of_find_property(np, "clock-indices", &i)) | ||
189 | idxname = "clock-indices"; | ||
190 | else | ||
191 | idxname = "renesas,clock-indices"; | ||
192 | |||
187 | for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { | 193 | for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { |
188 | const char *parent_name; | 194 | const char *parent_name; |
189 | const char *name; | 195 | const char *name; |
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) | |||
197 | continue; | 203 | continue; |
198 | 204 | ||
199 | parent_name = of_clk_get_parent_name(np, i); | 205 | parent_name = of_clk_get_parent_name(np, i); |
200 | ret = of_property_read_u32_index(np, "renesas,clock-indices", i, | 206 | ret = of_property_read_u32_index(np, idxname, i, &clkidx); |
201 | &clkidx); | ||
202 | if (parent_name == NULL || ret < 0) | 207 | if (parent_name == NULL || ret < 0) |
203 | break; | 208 | break; |
204 | 209 | ||
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index 88dafb5e9627..de6da957a09d 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/clk-provider.h> | 20 | #include <linux/clk-provider.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
23 | #include <linux/of_address.h> | ||
23 | 24 | ||
24 | #include "clk.h" | 25 | #include "clk.h" |
25 | 26 | ||
@@ -43,6 +44,8 @@ | |||
43 | 44 | ||
44 | #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) | 45 | #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) |
45 | 46 | ||
47 | void __iomem *clk_mgr_base_addr; | ||
48 | |||
46 | static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, | 49 | static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, |
47 | unsigned long parent_rate) | 50 | unsigned long parent_rate) |
48 | { | 51 | { |
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, | |||
87 | const char *clk_name = node->name; | 90 | const char *clk_name = node->name; |
88 | const char *parent_name[SOCFPGA_MAX_PARENTS]; | 91 | const char *parent_name[SOCFPGA_MAX_PARENTS]; |
89 | struct clk_init_data init; | 92 | struct clk_init_data init; |
93 | struct device_node *clkmgr_np; | ||
90 | int rc; | 94 | int rc; |
91 | int i = 0; | 95 | int i = 0; |
92 | 96 | ||
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, | |||
96 | if (WARN_ON(!pll_clk)) | 100 | if (WARN_ON(!pll_clk)) |
97 | return NULL; | 101 | return NULL; |
98 | 102 | ||
103 | clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); | ||
104 | clk_mgr_base_addr = of_iomap(clkmgr_np, 0); | ||
105 | BUG_ON(!clk_mgr_base_addr); | ||
99 | pll_clk->hw.reg = clk_mgr_base_addr + reg; | 106 | pll_clk->hw.reg = clk_mgr_base_addr + reg; |
100 | 107 | ||
101 | of_property_read_string(node, "clock-output-names", &clk_name); | 108 | of_property_read_string(node, "clock-output-names", &clk_name); |
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 35a960a993f9..43db947e5f0e 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c | |||
@@ -17,28 +17,11 @@ | |||
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
19 | */ | 19 | */ |
20 | #include <linux/clk.h> | ||
21 | #include <linux/clkdev.h> | ||
22 | #include <linux/clk-provider.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/of.h> | 20 | #include <linux/of.h> |
25 | #include <linux/of_address.h> | ||
26 | 21 | ||
27 | #include "clk.h" | 22 | #include "clk.h" |
28 | 23 | ||
29 | void __iomem *clk_mgr_base_addr; | 24 | CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init); |
30 | 25 | CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init); | |
31 | static const struct of_device_id socfpga_child_clocks[] __initconst = { | 26 | CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init); |
32 | { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, }, | ||
33 | { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, }, | ||
34 | { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, }, | ||
35 | {}, | ||
36 | }; | ||
37 | |||
38 | static void __init socfpga_clkmgr_init(struct device_node *node) | ||
39 | { | ||
40 | clk_mgr_base_addr = of_iomap(node, 0); | ||
41 | of_clk_init(socfpga_child_clocks); | ||
42 | } | ||
43 | CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init); | ||
44 | 27 | ||
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index bca0a0badbfa..a886702f7c8b 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c | |||
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name, | |||
521 | gate->lock = odf_lock; | 521 | gate->lock = odf_lock; |
522 | 522 | ||
523 | div = kzalloc(sizeof(*div), GFP_KERNEL); | 523 | div = kzalloc(sizeof(*div), GFP_KERNEL); |
524 | if (!div) | 524 | if (!div) { |
525 | kfree(gate); | ||
525 | return ERR_PTR(-ENOMEM); | 526 | return ERR_PTR(-ENOMEM); |
527 | } | ||
526 | 528 | ||
527 | div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; | 529 | div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; |
528 | div->reg = reg + pll_data->odf[odf].offset; | 530 | div->reg = reg + pll_data->odf[odf].offset; |
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 0d20241e0770..6aad8abc69a2 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c | |||
@@ -58,9 +58,9 @@ | |||
58 | #define PLLDU_LFCON_SET_DIVN 600 | 58 | #define PLLDU_LFCON_SET_DIVN 600 |
59 | 59 | ||
60 | #define PLLE_BASE_DIVCML_SHIFT 24 | 60 | #define PLLE_BASE_DIVCML_SHIFT 24 |
61 | #define PLLE_BASE_DIVCML_WIDTH 4 | 61 | #define PLLE_BASE_DIVCML_MASK 0xf |
62 | #define PLLE_BASE_DIVP_SHIFT 16 | 62 | #define PLLE_BASE_DIVP_SHIFT 16 |
63 | #define PLLE_BASE_DIVP_WIDTH 7 | 63 | #define PLLE_BASE_DIVP_WIDTH 6 |
64 | #define PLLE_BASE_DIVN_SHIFT 8 | 64 | #define PLLE_BASE_DIVN_SHIFT 8 |
65 | #define PLLE_BASE_DIVN_WIDTH 8 | 65 | #define PLLE_BASE_DIVN_WIDTH 8 |
66 | #define PLLE_BASE_DIVM_SHIFT 0 | 66 | #define PLLE_BASE_DIVM_SHIFT 0 |
@@ -183,6 +183,14 @@ | |||
183 | #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ | 183 | #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ |
184 | mask(p->params->div_nmp->divp_width)) | 184 | mask(p->params->div_nmp->divp_width)) |
185 | 185 | ||
186 | #define divm_shift(p) (p)->params->div_nmp->divm_shift | ||
187 | #define divn_shift(p) (p)->params->div_nmp->divn_shift | ||
188 | #define divp_shift(p) (p)->params->div_nmp->divp_shift | ||
189 | |||
190 | #define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p)) | ||
191 | #define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p)) | ||
192 | #define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p)) | ||
193 | |||
186 | #define divm_max(p) (divm_mask(p)) | 194 | #define divm_max(p) (divm_mask(p)) |
187 | #define divn_max(p) (divn_mask(p)) | 195 | #define divn_max(p) (divn_mask(p)) |
188 | #define divp_max(p) (1 << (divp_mask(p))) | 196 | #define divp_max(p) (1 << (divp_mask(p))) |
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll, | |||
476 | } else { | 484 | } else { |
477 | val = pll_readl_base(pll); | 485 | val = pll_readl_base(pll); |
478 | 486 | ||
479 | val &= ~((divm_mask(pll) << div_nmp->divm_shift) | | 487 | val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) | |
480 | (divn_mask(pll) << div_nmp->divn_shift) | | 488 | divp_mask_shifted(pll)); |
481 | (divp_mask(pll) << div_nmp->divp_shift)); | ||
482 | 489 | ||
483 | val |= ((cfg->m << div_nmp->divm_shift) | | 490 | val |= (cfg->m << divm_shift(pll)) | |
484 | (cfg->n << div_nmp->divn_shift) | | 491 | (cfg->n << divn_shift(pll)) | |
485 | (cfg->p << div_nmp->divp_shift)); | 492 | (cfg->p << divp_shift(pll)); |
486 | 493 | ||
487 | pll_writel_base(val, pll); | 494 | pll_writel_base(val, pll); |
488 | } | 495 | } |
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw) | |||
730 | if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { | 737 | if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { |
731 | /* configure dividers */ | 738 | /* configure dividers */ |
732 | val = pll_readl_base(pll); | 739 | val = pll_readl_base(pll); |
733 | val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); | 740 | val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) | |
734 | val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); | 741 | divm_mask_shifted(pll)); |
735 | val |= sel.m << pll->params->div_nmp->divm_shift; | 742 | val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); |
736 | val |= sel.n << pll->params->div_nmp->divn_shift; | 743 | val |= sel.m << divm_shift(pll); |
737 | val |= sel.p << pll->params->div_nmp->divp_shift; | 744 | val |= sel.n << divn_shift(pll); |
745 | val |= sel.p << divp_shift(pll); | ||
738 | val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; | 746 | val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; |
739 | pll_writel_base(val, pll); | 747 | pll_writel_base(val, pll); |
740 | } | 748 | } |
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw) | |||
745 | pll_writel_misc(val, pll); | 753 | pll_writel_misc(val, pll); |
746 | 754 | ||
747 | val = readl(pll->clk_base + PLLE_SS_CTRL); | 755 | val = readl(pll->clk_base + PLLE_SS_CTRL); |
756 | val &= ~PLLE_SS_COEFFICIENTS_MASK; | ||
748 | val |= PLLE_SS_DISABLE; | 757 | val |= PLLE_SS_DISABLE; |
749 | writel(val, pll->clk_base + PLLE_SS_CTRL); | 758 | writel(val, pll->clk_base + PLLE_SS_CTRL); |
750 | 759 | ||
751 | val |= pll_readl_base(pll); | 760 | val = pll_readl_base(pll); |
752 | val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); | 761 | val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); |
753 | pll_writel_base(val, pll); | 762 | pll_writel_base(val, pll); |
754 | 763 | ||
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) | |||
1292 | pll_writel(val, PLLE_SS_CTRL, pll); | 1301 | pll_writel(val, PLLE_SS_CTRL, pll); |
1293 | 1302 | ||
1294 | val = pll_readl_base(pll); | 1303 | val = pll_readl_base(pll); |
1295 | val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); | 1304 | val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) | |
1296 | val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); | 1305 | divm_mask_shifted(pll)); |
1297 | val |= sel.m << pll->params->div_nmp->divm_shift; | 1306 | val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); |
1298 | val |= sel.n << pll->params->div_nmp->divn_shift; | 1307 | val |= sel.m << divm_shift(pll); |
1308 | val |= sel.n << divn_shift(pll); | ||
1299 | val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; | 1309 | val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; |
1300 | pll_writel_base(val, pll); | 1310 | pll_writel_base(val, pll); |
1301 | udelay(1); | 1311 | udelay(1); |
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name, | |||
1410 | return clk; | 1420 | return clk; |
1411 | } | 1421 | } |
1412 | 1422 | ||
1423 | static struct div_nmp pll_e_nmp = { | ||
1424 | .divn_shift = PLLE_BASE_DIVN_SHIFT, | ||
1425 | .divn_width = PLLE_BASE_DIVN_WIDTH, | ||
1426 | .divm_shift = PLLE_BASE_DIVM_SHIFT, | ||
1427 | .divm_width = PLLE_BASE_DIVM_WIDTH, | ||
1428 | .divp_shift = PLLE_BASE_DIVP_SHIFT, | ||
1429 | .divp_width = PLLE_BASE_DIVP_WIDTH, | ||
1430 | }; | ||
1431 | |||
1413 | struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, | 1432 | struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, |
1414 | void __iomem *clk_base, void __iomem *pmc, | 1433 | void __iomem *clk_base, void __iomem *pmc, |
1415 | unsigned long flags, struct tegra_clk_pll_params *pll_params, | 1434 | unsigned long flags, struct tegra_clk_pll_params *pll_params, |
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, | |||
1420 | 1439 | ||
1421 | pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; | 1440 | pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; |
1422 | pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; | 1441 | pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; |
1442 | |||
1443 | if (!pll_params->div_nmp) | ||
1444 | pll_params->div_nmp = &pll_e_nmp; | ||
1445 | |||
1423 | pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); | 1446 | pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); |
1424 | if (IS_ERR(pll)) | 1447 | if (IS_ERR(pll)) |
1425 | return ERR_CAST(pll); | 1448 | return ERR_CAST(pll); |
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name, | |||
1557 | int m; | 1580 | int m; |
1558 | 1581 | ||
1559 | m = _pll_fixed_mdiv(pll_params, parent_rate); | 1582 | m = _pll_fixed_mdiv(pll_params, parent_rate); |
1560 | val = m << PLL_BASE_DIVM_SHIFT; | 1583 | val = m << divm_shift(pll); |
1561 | val |= (pll_params->vco_min / parent_rate) | 1584 | val |= (pll_params->vco_min / parent_rate) << divn_shift(pll); |
1562 | << PLL_BASE_DIVN_SHIFT; | ||
1563 | pll_writel_base(val, pll); | 1585 | pll_writel_base(val, pll); |
1564 | } | 1586 | } |
1565 | 1587 | ||
@@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name, | |||
1718 | "pll_re_vco"); | 1740 | "pll_re_vco"); |
1719 | } else { | 1741 | } else { |
1720 | val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); | 1742 | val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); |
1721 | pll_writel(val, pll_params->aux_reg, pll); | 1743 | pll_writel(val_aux, pll_params->aux_reg, pll); |
1722 | } | 1744 | } |
1723 | 1745 | ||
1724 | clk = _tegra_clk_register_pll(pll, name, parent_name, flags, | 1746 | clk = _tegra_clk_register_pll(pll, name, parent_name, flags, |
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 00fdd1170284..a8d7ea14f183 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c | |||
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) | |||
100 | || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { | 100 | || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { |
101 | __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); | 101 | __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); |
102 | __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); | 102 | __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); |
103 | clk_disable_unprepare(tcd->clk); | 103 | clk_disable(tcd->clk); |
104 | } | 104 | } |
105 | 105 | ||
106 | switch (m) { | 106 | switch (m) { |
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) | |||
109 | * of oneshot, we get lower overhead and improved accuracy. | 109 | * of oneshot, we get lower overhead and improved accuracy. |
110 | */ | 110 | */ |
111 | case CLOCK_EVT_MODE_PERIODIC: | 111 | case CLOCK_EVT_MODE_PERIODIC: |
112 | clk_prepare_enable(tcd->clk); | 112 | clk_enable(tcd->clk); |
113 | 113 | ||
114 | /* slow clock, count up to RC, then irq and restart */ | 114 | /* slow clock, count up to RC, then irq and restart */ |
115 | __raw_writel(timer_clock | 115 | __raw_writel(timer_clock |
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) | |||
126 | break; | 126 | break; |
127 | 127 | ||
128 | case CLOCK_EVT_MODE_ONESHOT: | 128 | case CLOCK_EVT_MODE_ONESHOT: |
129 | clk_prepare_enable(tcd->clk); | 129 | clk_enable(tcd->clk); |
130 | 130 | ||
131 | /* slow clock, count up to RC, then irq and stop */ | 131 | /* slow clock, count up to RC, then irq and stop */ |
132 | __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | 132 | __raw_writel(timer_clock | ATMEL_TC_CPCSTOP |
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) | |||
194 | ret = clk_prepare_enable(t2_clk); | 194 | ret = clk_prepare_enable(t2_clk); |
195 | if (ret) | 195 | if (ret) |
196 | return ret; | 196 | return ret; |
197 | clk_disable_unprepare(t2_clk); | 197 | clk_disable(t2_clk); |
198 | 198 | ||
199 | clkevt.regs = tc->regs; | 199 | clkevt.regs = tc->regs; |
200 | clkevt.clk = t2_clk; | 200 | clkevt.clk = t2_clk; |
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index b52e1c078b99..7f5374dbefd9 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c | |||
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | |||
199 | 199 | ||
200 | action->dev_id = ce; | 200 | action->dev_id = ce; |
201 | BUG_ON(setup_irq(ce->irq, action)); | 201 | BUG_ON(setup_irq(ce->irq, action)); |
202 | irq_set_affinity(action->irq, cpumask_of(cpu)); | 202 | irq_force_affinity(action->irq, cpumask_of(cpu)); |
203 | 203 | ||
204 | clockevents_register_device(ce); | 204 | clockevents_register_device(ce); |
205 | return 0; | 205 | return 0; |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 1bf6bbac3e03..09b9129c7bd3 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
130 | return -ENOENT; | 130 | return -ENOENT; |
131 | } | 131 | } |
132 | 132 | ||
133 | cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); | 133 | cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); |
134 | if (IS_ERR(cpu_reg)) { | 134 | if (IS_ERR(cpu_reg)) { |
135 | /* | 135 | /* |
136 | * If cpu0 regulator supply node is present, but regulator is | 136 | * If cpu0 regulator supply node is present, but regulator is |
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
145 | PTR_ERR(cpu_reg)); | 145 | PTR_ERR(cpu_reg)); |
146 | } | 146 | } |
147 | 147 | ||
148 | cpu_clk = devm_clk_get(cpu_dev, NULL); | 148 | cpu_clk = clk_get(cpu_dev, NULL); |
149 | if (IS_ERR(cpu_clk)) { | 149 | if (IS_ERR(cpu_clk)) { |
150 | ret = PTR_ERR(cpu_clk); | 150 | ret = PTR_ERR(cpu_clk); |
151 | pr_err("failed to get cpu0 clock: %d\n", ret); | 151 | pr_err("failed to get cpu0 clock: %d\n", ret); |
152 | goto out_put_node; | 152 | goto out_put_reg; |
153 | } | 153 | } |
154 | 154 | ||
155 | ret = of_init_opp_table(cpu_dev); | 155 | ret = of_init_opp_table(cpu_dev); |
156 | if (ret) { | 156 | if (ret) { |
157 | pr_err("failed to init OPP table: %d\n", ret); | 157 | pr_err("failed to init OPP table: %d\n", ret); |
158 | goto out_put_node; | 158 | goto out_put_clk; |
159 | } | 159 | } |
160 | 160 | ||
161 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); | 161 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
162 | if (ret) { | 162 | if (ret) { |
163 | pr_err("failed to init cpufreq table: %d\n", ret); | 163 | pr_err("failed to init cpufreq table: %d\n", ret); |
164 | goto out_put_node; | 164 | goto out_put_clk; |
165 | } | 165 | } |
166 | 166 | ||
167 | of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); | 167 | of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); |
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
216 | 216 | ||
217 | out_free_table: | 217 | out_free_table: |
218 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); | 218 | dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); |
219 | out_put_clk: | ||
220 | if (!IS_ERR(cpu_clk)) | ||
221 | clk_put(cpu_clk); | ||
222 | out_put_reg: | ||
223 | if (!IS_ERR(cpu_reg)) | ||
224 | regulator_put(cpu_reg); | ||
219 | out_put_node: | 225 | out_put_node: |
220 | of_node_put(np); | 226 | of_node_put(np); |
221 | return ret; | 227 | return ret; |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index ba43991ba98a..e1c6433b16e0 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
366 | break; | 366 | break; |
367 | 367 | ||
368 | case CPUFREQ_GOV_LIMITS: | 368 | case CPUFREQ_GOV_LIMITS: |
369 | mutex_lock(&dbs_data->mutex); | ||
370 | if (!cpu_cdbs->cur_policy) { | ||
371 | mutex_unlock(&dbs_data->mutex); | ||
372 | break; | ||
373 | } | ||
369 | mutex_lock(&cpu_cdbs->timer_mutex); | 374 | mutex_lock(&cpu_cdbs->timer_mutex); |
370 | if (policy->max < cpu_cdbs->cur_policy->cur) | 375 | if (policy->max < cpu_cdbs->cur_policy->cur) |
371 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | 376 | __cpufreq_driver_target(cpu_cdbs->cur_policy, |
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
375 | policy->min, CPUFREQ_RELATION_L); | 380 | policy->min, CPUFREQ_RELATION_L); |
376 | dbs_check_cpu(dbs_data, cpu); | 381 | dbs_check_cpu(dbs_data, cpu); |
377 | mutex_unlock(&cpu_cdbs->timer_mutex); | 382 | mutex_unlock(&cpu_cdbs->timer_mutex); |
383 | mutex_unlock(&dbs_data->mutex); | ||
378 | break; | 384 | break; |
379 | } | 385 | } |
380 | return 0; | 386 | return 0; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 099967302bf2..db2e45b4808e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -37,12 +37,13 @@ | |||
37 | #define BYT_RATIOS 0x66a | 37 | #define BYT_RATIOS 0x66a |
38 | #define BYT_VIDS 0x66b | 38 | #define BYT_VIDS 0x66b |
39 | #define BYT_TURBO_RATIOS 0x66c | 39 | #define BYT_TURBO_RATIOS 0x66c |
40 | #define BYT_TURBO_VIDS 0x66d | ||
40 | 41 | ||
41 | 42 | ||
42 | #define FRAC_BITS 6 | 43 | #define FRAC_BITS 8 |
43 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 44 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
44 | #define fp_toint(X) ((X) >> FRAC_BITS) | 45 | #define fp_toint(X) ((X) >> FRAC_BITS) |
45 | #define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS) | 46 | |
46 | 47 | ||
47 | static inline int32_t mul_fp(int32_t x, int32_t y) | 48 | static inline int32_t mul_fp(int32_t x, int32_t y) |
48 | { | 49 | { |
@@ -58,8 +59,8 @@ struct sample { | |||
58 | int32_t core_pct_busy; | 59 | int32_t core_pct_busy; |
59 | u64 aperf; | 60 | u64 aperf; |
60 | u64 mperf; | 61 | u64 mperf; |
61 | unsigned long long tsc; | ||
62 | int freq; | 62 | int freq; |
63 | ktime_t time; | ||
63 | }; | 64 | }; |
64 | 65 | ||
65 | struct pstate_data { | 66 | struct pstate_data { |
@@ -70,8 +71,9 @@ struct pstate_data { | |||
70 | }; | 71 | }; |
71 | 72 | ||
72 | struct vid_data { | 73 | struct vid_data { |
73 | int32_t min; | 74 | int min; |
74 | int32_t max; | 75 | int max; |
76 | int turbo; | ||
75 | int32_t ratio; | 77 | int32_t ratio; |
76 | }; | 78 | }; |
77 | 79 | ||
@@ -96,9 +98,9 @@ struct cpudata { | |||
96 | struct vid_data vid; | 98 | struct vid_data vid; |
97 | struct _pid pid; | 99 | struct _pid pid; |
98 | 100 | ||
101 | ktime_t last_sample_time; | ||
99 | u64 prev_aperf; | 102 | u64 prev_aperf; |
100 | u64 prev_mperf; | 103 | u64 prev_mperf; |
101 | unsigned long long prev_tsc; | ||
102 | struct sample sample; | 104 | struct sample sample; |
103 | }; | 105 | }; |
104 | 106 | ||
@@ -198,7 +200,10 @@ static signed int pid_calc(struct _pid *pid, int32_t busy) | |||
198 | pid->last_err = fp_error; | 200 | pid->last_err = fp_error; |
199 | 201 | ||
200 | result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; | 202 | result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; |
201 | 203 | if (result >= 0) | |
204 | result = result + (1 << (FRAC_BITS-1)); | ||
205 | else | ||
206 | result = result - (1 << (FRAC_BITS-1)); | ||
202 | return (signed int)fp_toint(result); | 207 | return (signed int)fp_toint(result); |
203 | } | 208 | } |
204 | 209 | ||
@@ -359,14 +364,14 @@ static int byt_get_min_pstate(void) | |||
359 | { | 364 | { |
360 | u64 value; | 365 | u64 value; |
361 | rdmsrl(BYT_RATIOS, value); | 366 | rdmsrl(BYT_RATIOS, value); |
362 | return (value >> 8) & 0xFF; | 367 | return (value >> 8) & 0x3F; |
363 | } | 368 | } |
364 | 369 | ||
365 | static int byt_get_max_pstate(void) | 370 | static int byt_get_max_pstate(void) |
366 | { | 371 | { |
367 | u64 value; | 372 | u64 value; |
368 | rdmsrl(BYT_RATIOS, value); | 373 | rdmsrl(BYT_RATIOS, value); |
369 | return (value >> 16) & 0xFF; | 374 | return (value >> 16) & 0x3F; |
370 | } | 375 | } |
371 | 376 | ||
372 | static int byt_get_turbo_pstate(void) | 377 | static int byt_get_turbo_pstate(void) |
@@ -393,6 +398,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) | |||
393 | vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); | 398 | vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); |
394 | vid = fp_toint(vid_fp); | 399 | vid = fp_toint(vid_fp); |
395 | 400 | ||
401 | if (pstate > cpudata->pstate.max_pstate) | ||
402 | vid = cpudata->vid.turbo; | ||
403 | |||
396 | val |= vid; | 404 | val |= vid; |
397 | 405 | ||
398 | wrmsrl(MSR_IA32_PERF_CTL, val); | 406 | wrmsrl(MSR_IA32_PERF_CTL, val); |
@@ -402,13 +410,17 @@ static void byt_get_vid(struct cpudata *cpudata) | |||
402 | { | 410 | { |
403 | u64 value; | 411 | u64 value; |
404 | 412 | ||
413 | |||
405 | rdmsrl(BYT_VIDS, value); | 414 | rdmsrl(BYT_VIDS, value); |
406 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); | 415 | cpudata->vid.min = int_tofp((value >> 8) & 0x3f); |
407 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); | 416 | cpudata->vid.max = int_tofp((value >> 16) & 0x3f); |
408 | cpudata->vid.ratio = div_fp( | 417 | cpudata->vid.ratio = div_fp( |
409 | cpudata->vid.max - cpudata->vid.min, | 418 | cpudata->vid.max - cpudata->vid.min, |
410 | int_tofp(cpudata->pstate.max_pstate - | 419 | int_tofp(cpudata->pstate.max_pstate - |
411 | cpudata->pstate.min_pstate)); | 420 | cpudata->pstate.min_pstate)); |
421 | |||
422 | rdmsrl(BYT_TURBO_VIDS, value); | ||
423 | cpudata->vid.turbo = value & 0x7f; | ||
412 | } | 424 | } |
413 | 425 | ||
414 | 426 | ||
@@ -545,58 +557,48 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) | |||
545 | 557 | ||
546 | if (pstate_funcs.get_vid) | 558 | if (pstate_funcs.get_vid) |
547 | pstate_funcs.get_vid(cpu); | 559 | pstate_funcs.get_vid(cpu); |
548 | 560 | intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); | |
549 | /* | ||
550 | * goto max pstate so we don't slow up boot if we are built-in if we are | ||
551 | * a module we will take care of it during normal operation | ||
552 | */ | ||
553 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | ||
554 | } | 561 | } |
555 | 562 | ||
556 | static inline void intel_pstate_calc_busy(struct cpudata *cpu, | 563 | static inline void intel_pstate_calc_busy(struct cpudata *cpu, |
557 | struct sample *sample) | 564 | struct sample *sample) |
558 | { | 565 | { |
559 | int32_t core_pct; | 566 | int64_t core_pct; |
560 | int32_t c0_pct; | 567 | int32_t rem; |
561 | 568 | ||
562 | core_pct = div_fp(int_tofp((sample->aperf)), | 569 | core_pct = int_tofp(sample->aperf) * int_tofp(100); |
563 | int_tofp((sample->mperf))); | 570 | core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); |
564 | core_pct = mul_fp(core_pct, int_tofp(100)); | ||
565 | FP_ROUNDUP(core_pct); | ||
566 | 571 | ||
567 | c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc)); | 572 | if ((rem << 1) >= int_tofp(sample->mperf)) |
573 | core_pct += 1; | ||
568 | 574 | ||
569 | sample->freq = fp_toint( | 575 | sample->freq = fp_toint( |
570 | mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); | 576 | mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); |
571 | 577 | ||
572 | sample->core_pct_busy = mul_fp(core_pct, c0_pct); | 578 | sample->core_pct_busy = (int32_t)core_pct; |
573 | } | 579 | } |
574 | 580 | ||
575 | static inline void intel_pstate_sample(struct cpudata *cpu) | 581 | static inline void intel_pstate_sample(struct cpudata *cpu) |
576 | { | 582 | { |
577 | u64 aperf, mperf; | 583 | u64 aperf, mperf; |
578 | unsigned long long tsc; | ||
579 | 584 | ||
580 | rdmsrl(MSR_IA32_APERF, aperf); | 585 | rdmsrl(MSR_IA32_APERF, aperf); |
581 | rdmsrl(MSR_IA32_MPERF, mperf); | 586 | rdmsrl(MSR_IA32_MPERF, mperf); |
582 | tsc = native_read_tsc(); | ||
583 | 587 | ||
584 | aperf = aperf >> FRAC_BITS; | 588 | aperf = aperf >> FRAC_BITS; |
585 | mperf = mperf >> FRAC_BITS; | 589 | mperf = mperf >> FRAC_BITS; |
586 | tsc = tsc >> FRAC_BITS; | ||
587 | 590 | ||
591 | cpu->last_sample_time = cpu->sample.time; | ||
592 | cpu->sample.time = ktime_get(); | ||
588 | cpu->sample.aperf = aperf; | 593 | cpu->sample.aperf = aperf; |
589 | cpu->sample.mperf = mperf; | 594 | cpu->sample.mperf = mperf; |
590 | cpu->sample.tsc = tsc; | ||
591 | cpu->sample.aperf -= cpu->prev_aperf; | 595 | cpu->sample.aperf -= cpu->prev_aperf; |
592 | cpu->sample.mperf -= cpu->prev_mperf; | 596 | cpu->sample.mperf -= cpu->prev_mperf; |
593 | cpu->sample.tsc -= cpu->prev_tsc; | ||
594 | 597 | ||
595 | intel_pstate_calc_busy(cpu, &cpu->sample); | 598 | intel_pstate_calc_busy(cpu, &cpu->sample); |
596 | 599 | ||
597 | cpu->prev_aperf = aperf; | 600 | cpu->prev_aperf = aperf; |
598 | cpu->prev_mperf = mperf; | 601 | cpu->prev_mperf = mperf; |
599 | cpu->prev_tsc = tsc; | ||
600 | } | 602 | } |
601 | 603 | ||
602 | static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | 604 | static inline void intel_pstate_set_sample_time(struct cpudata *cpu) |
@@ -610,13 +612,25 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
610 | 612 | ||
611 | static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) | 613 | static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) |
612 | { | 614 | { |
613 | int32_t core_busy, max_pstate, current_pstate; | 615 | int32_t core_busy, max_pstate, current_pstate, sample_ratio; |
616 | u32 duration_us; | ||
617 | u32 sample_time; | ||
614 | 618 | ||
615 | core_busy = cpu->sample.core_pct_busy; | 619 | core_busy = cpu->sample.core_pct_busy; |
616 | max_pstate = int_tofp(cpu->pstate.max_pstate); | 620 | max_pstate = int_tofp(cpu->pstate.max_pstate); |
617 | current_pstate = int_tofp(cpu->pstate.current_pstate); | 621 | current_pstate = int_tofp(cpu->pstate.current_pstate); |
618 | core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); | 622 | core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); |
619 | return FP_ROUNDUP(core_busy); | 623 | |
624 | sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); | ||
625 | duration_us = (u32) ktime_us_delta(cpu->sample.time, | ||
626 | cpu->last_sample_time); | ||
627 | if (duration_us > sample_time * 3) { | ||
628 | sample_ratio = div_fp(int_tofp(sample_time), | ||
629 | int_tofp(duration_us)); | ||
630 | core_busy = mul_fp(core_busy, sample_ratio); | ||
631 | } | ||
632 | |||
633 | return core_busy; | ||
620 | } | 634 | } |
621 | 635 | ||
622 | static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | 636 | static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) |
@@ -695,11 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
695 | cpu = all_cpu_data[cpunum]; | 709 | cpu = all_cpu_data[cpunum]; |
696 | 710 | ||
697 | intel_pstate_get_cpu_pstates(cpu); | 711 | intel_pstate_get_cpu_pstates(cpu); |
698 | if (!cpu->pstate.current_pstate) { | ||
699 | all_cpu_data[cpunum] = NULL; | ||
700 | kfree(cpu); | ||
701 | return -ENODATA; | ||
702 | } | ||
703 | 712 | ||
704 | cpu->cpu = cpunum; | 713 | cpu->cpu = cpunum; |
705 | 714 | ||
@@ -710,7 +719,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
710 | cpu->timer.expires = jiffies + HZ/100; | 719 | cpu->timer.expires = jiffies + HZ/100; |
711 | intel_pstate_busy_pid_reset(cpu); | 720 | intel_pstate_busy_pid_reset(cpu); |
712 | intel_pstate_sample(cpu); | 721 | intel_pstate_sample(cpu); |
713 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | ||
714 | 722 | ||
715 | add_timer_on(&cpu->timer, cpunum); | 723 | add_timer_on(&cpu->timer, cpunum); |
716 | 724 | ||
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index f0bc31f5db27..d4add8621944 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c | |||
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, | |||
62 | set_cpus_allowed_ptr(current, &cpus_allowed); | 62 | set_cpus_allowed_ptr(current, &cpus_allowed); |
63 | 63 | ||
64 | /* setting the cpu frequency */ | 64 | /* setting the cpu frequency */ |
65 | clk_set_rate(policy->clk, freq); | 65 | clk_set_rate(policy->clk, freq * 1000); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
92 | i++) | 92 | i++) |
93 | loongson2_clockmod_table[i].frequency = (rate * i) / 8; | 93 | loongson2_clockmod_table[i].frequency = (rate * i) / 8; |
94 | 94 | ||
95 | ret = clk_set_rate(cpuclk, rate); | 95 | ret = clk_set_rate(cpuclk, rate * 1000); |
96 | if (ret) { | 96 | if (ret) { |
97 | clk_put(cpuclk); | 97 | clk_put(cpuclk); |
98 | return ret; | 98 | return ret; |
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 97ccc31dbdd8..371e75d2348d 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm | |||
@@ -13,6 +13,12 @@ config ARM_BIG_LITTLE_CPUIDLE | |||
13 | define different C-states for little and big cores through the | 13 | define different C-states for little and big cores through the |
14 | multiple CPU idle drivers infrastructure. | 14 | multiple CPU idle drivers infrastructure. |
15 | 15 | ||
16 | config ARM_CLPS711X_CPUIDLE | ||
17 | bool "CPU Idle Driver for CLPS711X processors" | ||
18 | depends on ARCH_CLPS711X || COMPILE_TEST | ||
19 | help | ||
20 | Select this to enable cpuidle on Cirrus Logic CLPS711X SOCs. | ||
21 | |||
16 | config ARM_HIGHBANK_CPUIDLE | 22 | config ARM_HIGHBANK_CPUIDLE |
17 | bool "CPU Idle Driver for Calxeda processors" | 23 | bool "CPU Idle Driver for Calxeda processors" |
18 | depends on ARM_PSCI | 24 | depends on ARM_PSCI |
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index f71ae1b373c5..534fff575823 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o | |||
8 | ################################################################################## | 8 | ################################################################################## |
9 | # ARM SoC drivers | 9 | # ARM SoC drivers |
10 | obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o | 10 | obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o |
11 | obj-$(CONFIG_ARM_CLPS711X_CPUIDLE) += cpuidle-clps711x.o | ||
11 | obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o | 12 | obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o |
12 | obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o | 13 | obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o |
13 | obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o | 14 | obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o |
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c new file mode 100644 index 000000000000..5243811daa6e --- /dev/null +++ b/drivers/cpuidle/cpuidle-clps711x.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * CLPS711X CPU idle driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/cpuidle.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | |||
18 | #define CLPS711X_CPUIDLE_NAME "clps711x-cpuidle" | ||
19 | |||
20 | static void __iomem *clps711x_halt; | ||
21 | |||
22 | static int clps711x_cpuidle_halt(struct cpuidle_device *dev, | ||
23 | struct cpuidle_driver *drv, int index) | ||
24 | { | ||
25 | writel(0xaa, clps711x_halt); | ||
26 | |||
27 | return index; | ||
28 | } | ||
29 | |||
30 | static struct cpuidle_driver clps711x_idle_driver = { | ||
31 | .name = CLPS711X_CPUIDLE_NAME, | ||
32 | .owner = THIS_MODULE, | ||
33 | .states[0] = { | ||
34 | .name = "HALT", | ||
35 | .desc = "CLPS711X HALT", | ||
36 | .enter = clps711x_cpuidle_halt, | ||
37 | .exit_latency = 1, | ||
38 | }, | ||
39 | .state_count = 1, | ||
40 | }; | ||
41 | |||
42 | static int __init clps711x_cpuidle_probe(struct platform_device *pdev) | ||
43 | { | ||
44 | struct resource *res; | ||
45 | |||
46 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
47 | clps711x_halt = devm_ioremap_resource(&pdev->dev, res); | ||
48 | if (IS_ERR(clps711x_halt)) | ||
49 | return PTR_ERR(clps711x_halt); | ||
50 | |||
51 | return cpuidle_register(&clps711x_idle_driver, NULL); | ||
52 | } | ||
53 | |||
54 | static struct platform_driver clps711x_cpuidle_driver = { | ||
55 | .driver = { | ||
56 | .name = CLPS711X_CPUIDLE_NAME, | ||
57 | .owner = THIS_MODULE, | ||
58 | }, | ||
59 | }; | ||
60 | module_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe); | ||
61 | |||
62 | MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); | ||
63 | MODULE_DESCRIPTION("CLPS711X CPU idle driver"); | ||
64 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 8236746e46bb..cb7019977c50 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -32,6 +32,7 @@ LIST_HEAD(cpuidle_detected_devices); | |||
32 | static int enabled_devices; | 32 | static int enabled_devices; |
33 | static int off __read_mostly; | 33 | static int off __read_mostly; |
34 | static int initialized __read_mostly; | 34 | static int initialized __read_mostly; |
35 | static bool use_deepest_state __read_mostly; | ||
35 | 36 | ||
36 | int cpuidle_disabled(void) | 37 | int cpuidle_disabled(void) |
37 | { | 38 | { |
@@ -65,23 +66,42 @@ int cpuidle_play_dead(void) | |||
65 | } | 66 | } |
66 | 67 | ||
67 | /** | 68 | /** |
68 | * cpuidle_enabled - check if the cpuidle framework is ready | 69 | * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. |
69 | * @dev: cpuidle device for this cpu | 70 | * @enable: Whether enable or disable the feature. |
70 | * @drv: cpuidle driver for this cpu | 71 | * |
72 | * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and | ||
73 | * always use the state with the greatest exit latency (out of the states that | ||
74 | * are not disabled). | ||
71 | * | 75 | * |
72 | * Return 0 on success, otherwise: | 76 | * This function can only be called after cpuidle_pause() to avoid races. |
73 | * -NODEV : the cpuidle framework is not available | ||
74 | * -EBUSY : the cpuidle framework is not initialized | ||
75 | */ | 77 | */ |
76 | int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev) | 78 | void cpuidle_use_deepest_state(bool enable) |
77 | { | 79 | { |
78 | if (off || !initialized) | 80 | use_deepest_state = enable; |
79 | return -ENODEV; | 81 | } |
80 | 82 | ||
81 | if (!drv || !dev || !dev->enabled) | 83 | /** |
82 | return -EBUSY; | 84 | * cpuidle_find_deepest_state - Find the state of the greatest exit latency. |
85 | * @drv: cpuidle driver for a given CPU. | ||
86 | * @dev: cpuidle device for a given CPU. | ||
87 | */ | ||
88 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
89 | struct cpuidle_device *dev) | ||
90 | { | ||
91 | unsigned int latency_req = 0; | ||
92 | int i, ret = CPUIDLE_DRIVER_STATE_START - 1; | ||
83 | 93 | ||
84 | return 0; | 94 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
95 | struct cpuidle_state *s = &drv->states[i]; | ||
96 | struct cpuidle_state_usage *su = &dev->states_usage[i]; | ||
97 | |||
98 | if (s->disabled || su->disable || s->exit_latency <= latency_req) | ||
99 | continue; | ||
100 | |||
101 | latency_req = s->exit_latency; | ||
102 | ret = i; | ||
103 | } | ||
104 | return ret; | ||
85 | } | 105 | } |
86 | 106 | ||
87 | /** | 107 | /** |
@@ -138,6 +158,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
138 | */ | 158 | */ |
139 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | 159 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
140 | { | 160 | { |
161 | if (off || !initialized) | ||
162 | return -ENODEV; | ||
163 | |||
164 | if (!drv || !dev || !dev->enabled) | ||
165 | return -EBUSY; | ||
166 | |||
167 | if (unlikely(use_deepest_state)) | ||
168 | return cpuidle_find_deepest_state(drv, dev); | ||
169 | |||
141 | return cpuidle_curr_governor->select(drv, dev); | 170 | return cpuidle_curr_governor->select(drv, dev); |
142 | } | 171 | } |
143 | 172 | ||
@@ -169,7 +198,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
169 | */ | 198 | */ |
170 | void cpuidle_reflect(struct cpuidle_device *dev, int index) | 199 | void cpuidle_reflect(struct cpuidle_device *dev, int index) |
171 | { | 200 | { |
172 | if (cpuidle_curr_governor->reflect) | 201 | if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) |
173 | cpuidle_curr_governor->reflect(dev, index); | 202 | cpuidle_curr_governor->reflect(dev, index); |
174 | } | 203 | } |
175 | 204 | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 71b523293354..c4f80c15a48d 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -296,7 +296,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
296 | data->needs_update = 0; | 296 | data->needs_update = 0; |
297 | } | 297 | } |
298 | 298 | ||
299 | data->last_state_idx = 0; | 299 | data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; |
300 | 300 | ||
301 | /* Special case when user has set very strict latency requirement */ | 301 | /* Special case when user has set very strict latency requirement */ |
302 | if (unlikely(latency_req == 0)) | 302 | if (unlikely(latency_req == 0)) |
@@ -311,13 +311,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
311 | data->bucket = which_bucket(data->next_timer_us); | 311 | data->bucket = which_bucket(data->next_timer_us); |
312 | 312 | ||
313 | /* | 313 | /* |
314 | * if the correction factor is 0 (eg first time init or cpu hotplug | ||
315 | * etc), we actually want to start out with a unity factor. | ||
316 | */ | ||
317 | if (data->correction_factor[data->bucket] == 0) | ||
318 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | ||
319 | |||
320 | /* | ||
321 | * Force the result of multiplication to be 64 bits even if both | 314 | * Force the result of multiplication to be 64 bits even if both |
322 | * operands are 32 bits. | 315 | * operands are 32 bits. |
323 | * Make sure to round up for half microseconds. | 316 | * Make sure to round up for half microseconds. |
@@ -466,9 +459,17 @@ static int menu_enable_device(struct cpuidle_driver *drv, | |||
466 | struct cpuidle_device *dev) | 459 | struct cpuidle_device *dev) |
467 | { | 460 | { |
468 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 461 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
462 | int i; | ||
469 | 463 | ||
470 | memset(data, 0, sizeof(struct menu_device)); | 464 | memset(data, 0, sizeof(struct menu_device)); |
471 | 465 | ||
466 | /* | ||
467 | * if the correction factor is 0 (eg first time init or cpu hotplug | ||
468 | * etc), we actually want to start out with a unity factor. | ||
469 | */ | ||
470 | for(i = 0; i < BUCKETS; i++) | ||
471 | data->correction_factor[i] = RESOLUTION * DECAY; | ||
472 | |||
472 | return 0; | 473 | return 0; |
473 | } | 474 | } |
474 | 475 | ||
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 9f25f5296029..0eabd81e1a90 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
@@ -16,9 +16,13 @@ | |||
16 | char *tmp; \ | 16 | char *tmp; \ |
17 | \ | 17 | \ |
18 | tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ | 18 | tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ |
19 | sprintf(tmp, format, param); \ | 19 | if (likely(tmp)) { \ |
20 | strcat(str, tmp); \ | 20 | sprintf(tmp, format, param); \ |
21 | kfree(tmp); \ | 21 | strcat(str, tmp); \ |
22 | kfree(tmp); \ | ||
23 | } else { \ | ||
24 | strcat(str, "kmalloc failure in SPRINTFCAT"); \ | ||
25 | } \ | ||
22 | } | 26 | } |
23 | 27 | ||
24 | static void report_jump_idx(u32 status, char *outstr) | 28 | static void report_jump_idx(u32 status, char *outstr) |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a886713937fd..d5d30ed863ce 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref) | |||
1009 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | 1009 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1010 | DMA_BIDIRECTIONAL); | 1010 | DMA_BIDIRECTIONAL); |
1011 | } | 1011 | } |
1012 | cnt = unmap->map_cnt; | ||
1012 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); | 1013 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); |
1013 | } | 1014 | } |
1014 | 1015 | ||
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | |||
1074 | memset(unmap, 0, sizeof(*unmap)); | 1075 | memset(unmap, 0, sizeof(*unmap)); |
1075 | kref_init(&unmap->kref); | 1076 | kref_init(&unmap->kref); |
1076 | unmap->dev = dev; | 1077 | unmap->dev = dev; |
1078 | unmap->map_cnt = nr; | ||
1077 | 1079 | ||
1078 | return unmap; | 1080 | return unmap; |
1079 | } | 1081 | } |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cfdbb92aae1d..7a740769c2fa 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1548 | /* Disable BLOCK interrupts as well */ | 1548 | /* Disable BLOCK interrupts as well */ |
1549 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1549 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1550 | 1550 | ||
1551 | err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, | ||
1552 | IRQF_SHARED, "dw_dmac", dw); | ||
1553 | if (err) | ||
1554 | return err; | ||
1555 | |||
1556 | /* Create a pool of consistent memory blocks for hardware descriptors */ | 1551 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
1557 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, | 1552 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, |
1558 | sizeof(struct dw_desc), 4, 0); | 1553 | sizeof(struct dw_desc), 4, 0); |
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1563 | 1558 | ||
1564 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1559 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1565 | 1560 | ||
1561 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, | ||
1562 | "dw_dmac", dw); | ||
1563 | if (err) | ||
1564 | return err; | ||
1565 | |||
1566 | INIT_LIST_HEAD(&dw->dma.channels); | 1566 | INIT_LIST_HEAD(&dw->dma.channels); |
1567 | for (i = 0; i < nr_channels; i++) { | 1567 | for (i = 0; i < nr_channels; i++) { |
1568 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1568 | struct dw_dma_chan *dwc = &dw->chan[i]; |
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip) | |||
1667 | dw_dma_off(dw); | 1667 | dw_dma_off(dw); |
1668 | dma_async_device_unregister(&dw->dma); | 1668 | dma_async_device_unregister(&dw->dma); |
1669 | 1669 | ||
1670 | free_irq(chip->irq, dw); | ||
1670 | tasklet_kill(&dw->tasklet); | 1671 | tasklet_kill(&dw->tasklet); |
1671 | 1672 | ||
1672 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 1673 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 766b68ed505c..394cbc5c93e3 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan, | |||
191 | 191 | ||
192 | static void mv_chan_activate(struct mv_xor_chan *chan) | 192 | static void mv_chan_activate(struct mv_xor_chan *chan) |
193 | { | 193 | { |
194 | u32 activation; | ||
195 | |||
196 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); | 194 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
197 | activation = readl_relaxed(XOR_ACTIVATION(chan)); | 195 | |
198 | activation |= 0x1; | 196 | /* writel ensures all descriptors are flushed before activation */ |
199 | writel_relaxed(activation, XOR_ACTIVATION(chan)); | 197 | writel(BIT(0), XOR_ACTIVATION(chan)); |
200 | } | 198 | } |
201 | 199 | ||
202 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | 200 | static char mv_chan_is_busy(struct mv_xor_chan *chan) |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ab26d46bbe15..5ebdfbc1051e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy { | |||
113 | struct sa11x0_dma_desc *txd_load; | 113 | struct sa11x0_dma_desc *txd_load; |
114 | unsigned sg_done; | 114 | unsigned sg_done; |
115 | struct sa11x0_dma_desc *txd_done; | 115 | struct sa11x0_dma_desc *txd_done; |
116 | #ifdef CONFIG_PM_SLEEP | ||
117 | u32 dbs[2]; | 116 | u32 dbs[2]; |
118 | u32 dbt[2]; | 117 | u32 dbt[2]; |
119 | u32 dcsr; | 118 | u32 dcsr; |
120 | #endif | ||
121 | }; | 119 | }; |
122 | 120 | ||
123 | struct sa11x0_dma_dev { | 121 | struct sa11x0_dma_dev { |
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev) | |||
984 | return 0; | 982 | return 0; |
985 | } | 983 | } |
986 | 984 | ||
987 | #ifdef CONFIG_PM_SLEEP | ||
988 | static int sa11x0_dma_suspend(struct device *dev) | 985 | static int sa11x0_dma_suspend(struct device *dev) |
989 | { | 986 | { |
990 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | 987 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); |
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev) | |||
1054 | 1051 | ||
1055 | return 0; | 1052 | return 0; |
1056 | } | 1053 | } |
1057 | #endif | ||
1058 | 1054 | ||
1059 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { | 1055 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { |
1060 | .suspend_noirq = sa11x0_dma_suspend, | 1056 | .suspend_noirq = sa11x0_dma_suspend, |
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c98764aeeec6..f477308b6e9c 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h | |||
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation) | |||
237 | 237 | ||
238 | #define LOCAL_BUS 0xffc0 | 238 | #define LOCAL_BUS 0xffc0 |
239 | 239 | ||
240 | /* arbitrarily chosen maximum range for physical DMA: 128 TB */ | 240 | /* OHCI-1394's default upper bound for physical DMA: 4 GB */ |
241 | #define FW_MAX_PHYSICAL_RANGE (128ULL << 40) | 241 | #define FW_MAX_PHYSICAL_RANGE (1ULL << 32) |
242 | 242 | ||
243 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); | 243 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); |
244 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); | 244 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8db663219560..586f2f7f6993 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev, | |||
3716 | version >> 16, version & 0xff, ohci->card.index, | 3716 | version >> 16, version & 0xff, ohci->card.index, |
3717 | ohci->n_ir, ohci->n_it, ohci->quirks, | 3717 | ohci->n_ir, ohci->n_it, ohci->quirks, |
3718 | reg_read(ohci, OHCI1394_PhyUpperBound) ? | 3718 | reg_read(ohci, OHCI1394_PhyUpperBound) ? |
3719 | ", >4 GB phys DMA" : ""); | 3719 | ", physUB" : ""); |
3720 | 3720 | ||
3721 | return 0; | 3721 | return 0; |
3722 | 3722 | ||
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 3ee852c9925b..071c2c969eec 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c | |||
@@ -756,6 +756,7 @@ static const struct { | |||
756 | */ | 756 | */ |
757 | { ACPI_SIG_IBFT }, | 757 | { ACPI_SIG_IBFT }, |
758 | { "iBFT" }, | 758 | { "iBFT" }, |
759 | { "BIFT" }, /* Broadcom iSCSI Offload */ | ||
759 | }; | 760 | }; |
760 | 761 | ||
761 | static void __init acpi_find_ibft_region(void) | 762 | static void __init acpi_find_ibft_region(void) |
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index e73c6755a5eb..70304220a479 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c | |||
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = { | |||
305 | 305 | ||
306 | .ngpio = 50, | 306 | .ngpio = 50, |
307 | .have_blink = true, | 307 | .have_blink = true, |
308 | .regs = ichx_regs, | ||
309 | .reglen = ichx_reglen, | ||
308 | }; | 310 | }; |
309 | 311 | ||
310 | /* Intel 3100 */ | 312 | /* Intel 3100 */ |
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = { | |||
324 | .uses_gpe0 = true, | 326 | .uses_gpe0 = true, |
325 | 327 | ||
326 | .ngpio = 50, | 328 | .ngpio = 50, |
329 | .regs = ichx_regs, | ||
330 | .reglen = ichx_reglen, | ||
327 | }; | 331 | }; |
328 | 332 | ||
329 | /* ICH7 and ICH8-based */ | 333 | /* ICH7 and ICH8-based */ |
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 99a68310e7c0..3d53fd6880d1 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c | |||
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi) | |||
894 | dev_err(&spi->dev, "invalid spi-present-mask\n"); | 894 | dev_err(&spi->dev, "invalid spi-present-mask\n"); |
895 | return -ENODEV; | 895 | return -ENODEV; |
896 | } | 896 | } |
897 | 897 | for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { | |
898 | for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) | 898 | if ((spi_present_mask & (1 << addr))) |
899 | chips++; | ||
899 | pullups[addr] = 0; | 900 | pullups[addr] = 0; |
901 | } | ||
900 | } else { | 902 | } else { |
901 | type = spi_get_device_id(spi)->driver_data; | 903 | type = spi_get_device_id(spi)->driver_data; |
902 | pdata = dev_get_platdata(&spi->dev); | 904 | pdata = dev_get_platdata(&spi->dev); |
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi) | |||
919 | pullups[addr] = pdata->chip[addr].pullups; | 921 | pullups[addr] = pdata->chip[addr].pullups; |
920 | } | 922 | } |
921 | 923 | ||
922 | if (!chips) | ||
923 | return -ENODEV; | ||
924 | |||
925 | base = pdata->base; | 924 | base = pdata->base; |
926 | } | 925 | } |
927 | 926 | ||
927 | if (!chips) | ||
928 | return -ENODEV; | ||
929 | |||
928 | data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), | 930 | data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), |
929 | GFP_KERNEL); | 931 | GFP_KERNEL); |
930 | if (!data) | 932 | if (!data) |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 96177eec0a0e..eedb023af27d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev) | |||
1833 | flush_workqueue(dev_priv->wq); | 1833 | flush_workqueue(dev_priv->wq); |
1834 | 1834 | ||
1835 | mutex_lock(&dev->struct_mutex); | 1835 | mutex_lock(&dev->struct_mutex); |
1836 | i915_gem_free_all_phys_object(dev); | ||
1837 | i915_gem_cleanup_ringbuffer(dev); | 1836 | i915_gem_cleanup_ringbuffer(dev); |
1838 | i915_gem_context_fini(dev); | 1837 | i915_gem_context_fini(dev); |
1839 | WARN_ON(dev_priv->mm.aliasing_ppgtt); | 1838 | WARN_ON(dev_priv->mm.aliasing_ppgtt); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 108e1ec2fa4b..388c028e223c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -242,18 +242,6 @@ struct intel_ddi_plls { | |||
242 | #define WATCH_LISTS 0 | 242 | #define WATCH_LISTS 0 |
243 | #define WATCH_GTT 0 | 243 | #define WATCH_GTT 0 |
244 | 244 | ||
245 | #define I915_GEM_PHYS_CURSOR_0 1 | ||
246 | #define I915_GEM_PHYS_CURSOR_1 2 | ||
247 | #define I915_GEM_PHYS_OVERLAY_REGS 3 | ||
248 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) | ||
249 | |||
250 | struct drm_i915_gem_phys_object { | ||
251 | int id; | ||
252 | struct page **page_list; | ||
253 | drm_dma_handle_t *handle; | ||
254 | struct drm_i915_gem_object *cur_obj; | ||
255 | }; | ||
256 | |||
257 | struct opregion_header; | 245 | struct opregion_header; |
258 | struct opregion_acpi; | 246 | struct opregion_acpi; |
259 | struct opregion_swsci; | 247 | struct opregion_swsci; |
@@ -1187,9 +1175,6 @@ struct i915_gem_mm { | |||
1187 | /** Bit 6 swizzling required for Y tiling */ | 1175 | /** Bit 6 swizzling required for Y tiling */ |
1188 | uint32_t bit_6_swizzle_y; | 1176 | uint32_t bit_6_swizzle_y; |
1189 | 1177 | ||
1190 | /* storage for physical objects */ | ||
1191 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | ||
1192 | |||
1193 | /* accounting, useful for userland debugging */ | 1178 | /* accounting, useful for userland debugging */ |
1194 | spinlock_t object_stat_lock; | 1179 | spinlock_t object_stat_lock; |
1195 | size_t object_memory; | 1180 | size_t object_memory; |
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object { | |||
1769 | struct drm_file *pin_filp; | 1754 | struct drm_file *pin_filp; |
1770 | 1755 | ||
1771 | /** for phy allocated objects */ | 1756 | /** for phy allocated objects */ |
1772 | struct drm_i915_gem_phys_object *phys_obj; | 1757 | drm_dma_handle_t *phys_handle; |
1773 | }; | 1758 | }; |
1774 | 1759 | ||
1775 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) | 1760 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); | |||
2204 | #define PIN_MAPPABLE 0x1 | 2189 | #define PIN_MAPPABLE 0x1 |
2205 | #define PIN_NONBLOCK 0x2 | 2190 | #define PIN_NONBLOCK 0x2 |
2206 | #define PIN_GLOBAL 0x4 | 2191 | #define PIN_GLOBAL 0x4 |
2192 | #define PIN_OFFSET_BIAS 0x8 | ||
2193 | #define PIN_OFFSET_MASK (~4095) | ||
2207 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | 2194 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2208 | struct i915_address_space *vm, | 2195 | struct i915_address_space *vm, |
2209 | uint32_t alignment, | 2196 | uint32_t alignment, |
2210 | unsigned flags); | 2197 | uint64_t flags); |
2211 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2198 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2212 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | 2199 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
2213 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); | 2200 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
2334 | u32 alignment, | 2321 | u32 alignment, |
2335 | struct intel_ring_buffer *pipelined); | 2322 | struct intel_ring_buffer *pipelined); |
2336 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); | 2323 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); |
2337 | int i915_gem_attach_phys_object(struct drm_device *dev, | 2324 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, |
2338 | struct drm_i915_gem_object *obj, | ||
2339 | int id, | ||
2340 | int align); | 2325 | int align); |
2341 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
2342 | struct drm_i915_gem_object *obj); | ||
2343 | void i915_gem_free_all_phys_object(struct drm_device *dev); | ||
2344 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); | 2326 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); |
2345 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 2327 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
2346 | 2328 | ||
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, | |||
2465 | int min_size, | 2447 | int min_size, |
2466 | unsigned alignment, | 2448 | unsigned alignment, |
2467 | unsigned cache_level, | 2449 | unsigned cache_level, |
2450 | unsigned long start, | ||
2451 | unsigned long end, | ||
2468 | unsigned flags); | 2452 | unsigned flags); |
2469 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | 2453 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
2470 | int i915_gem_evict_everything(struct drm_device *dev); | 2454 | int i915_gem_evict_everything(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2871ce75f438..3326770c9ed2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o | |||
43 | static __must_check int | 43 | static __must_check int |
44 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 44 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
45 | bool readonly); | 45 | bool readonly); |
46 | static int i915_gem_phys_pwrite(struct drm_device *dev, | ||
47 | struct drm_i915_gem_object *obj, | ||
48 | struct drm_i915_gem_pwrite *args, | ||
49 | struct drm_file *file); | ||
50 | 46 | ||
51 | static void i915_gem_write_fence(struct drm_device *dev, int reg, | 47 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
52 | struct drm_i915_gem_object *obj); | 48 | struct drm_i915_gem_object *obj); |
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
209 | return 0; | 205 | return 0; |
210 | } | 206 | } |
211 | 207 | ||
208 | static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) | ||
209 | { | ||
210 | drm_dma_handle_t *phys = obj->phys_handle; | ||
211 | |||
212 | if (!phys) | ||
213 | return; | ||
214 | |||
215 | if (obj->madv == I915_MADV_WILLNEED) { | ||
216 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
217 | char *vaddr = phys->vaddr; | ||
218 | int i; | ||
219 | |||
220 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
221 | struct page *page = shmem_read_mapping_page(mapping, i); | ||
222 | if (!IS_ERR(page)) { | ||
223 | char *dst = kmap_atomic(page); | ||
224 | memcpy(dst, vaddr, PAGE_SIZE); | ||
225 | drm_clflush_virt_range(dst, PAGE_SIZE); | ||
226 | kunmap_atomic(dst); | ||
227 | |||
228 | set_page_dirty(page); | ||
229 | mark_page_accessed(page); | ||
230 | page_cache_release(page); | ||
231 | } | ||
232 | vaddr += PAGE_SIZE; | ||
233 | } | ||
234 | i915_gem_chipset_flush(obj->base.dev); | ||
235 | } | ||
236 | |||
237 | #ifdef CONFIG_X86 | ||
238 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
239 | #endif | ||
240 | drm_pci_free(obj->base.dev, phys); | ||
241 | obj->phys_handle = NULL; | ||
242 | } | ||
243 | |||
244 | int | ||
245 | i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | ||
246 | int align) | ||
247 | { | ||
248 | drm_dma_handle_t *phys; | ||
249 | struct address_space *mapping; | ||
250 | char *vaddr; | ||
251 | int i; | ||
252 | |||
253 | if (obj->phys_handle) { | ||
254 | if ((unsigned long)obj->phys_handle->vaddr & (align -1)) | ||
255 | return -EBUSY; | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | if (obj->madv != I915_MADV_WILLNEED) | ||
261 | return -EFAULT; | ||
262 | |||
263 | if (obj->base.filp == NULL) | ||
264 | return -EINVAL; | ||
265 | |||
266 | /* create a new object */ | ||
267 | phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); | ||
268 | if (!phys) | ||
269 | return -ENOMEM; | ||
270 | |||
271 | vaddr = phys->vaddr; | ||
272 | #ifdef CONFIG_X86 | ||
273 | set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); | ||
274 | #endif | ||
275 | mapping = file_inode(obj->base.filp)->i_mapping; | ||
276 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
277 | struct page *page; | ||
278 | char *src; | ||
279 | |||
280 | page = shmem_read_mapping_page(mapping, i); | ||
281 | if (IS_ERR(page)) { | ||
282 | #ifdef CONFIG_X86 | ||
283 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
284 | #endif | ||
285 | drm_pci_free(obj->base.dev, phys); | ||
286 | return PTR_ERR(page); | ||
287 | } | ||
288 | |||
289 | src = kmap_atomic(page); | ||
290 | memcpy(vaddr, src, PAGE_SIZE); | ||
291 | kunmap_atomic(src); | ||
292 | |||
293 | mark_page_accessed(page); | ||
294 | page_cache_release(page); | ||
295 | |||
296 | vaddr += PAGE_SIZE; | ||
297 | } | ||
298 | |||
299 | obj->phys_handle = phys; | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int | ||
304 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | ||
305 | struct drm_i915_gem_pwrite *args, | ||
306 | struct drm_file *file_priv) | ||
307 | { | ||
308 | struct drm_device *dev = obj->base.dev; | ||
309 | void *vaddr = obj->phys_handle->vaddr + args->offset; | ||
310 | char __user *user_data = to_user_ptr(args->data_ptr); | ||
311 | |||
312 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | ||
313 | unsigned long unwritten; | ||
314 | |||
315 | /* The physical object once assigned is fixed for the lifetime | ||
316 | * of the obj, so we can safely drop the lock and continue | ||
317 | * to access vaddr. | ||
318 | */ | ||
319 | mutex_unlock(&dev->struct_mutex); | ||
320 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
321 | mutex_lock(&dev->struct_mutex); | ||
322 | if (unwritten) | ||
323 | return -EFAULT; | ||
324 | } | ||
325 | |||
326 | i915_gem_chipset_flush(dev); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
212 | void *i915_gem_object_alloc(struct drm_device *dev) | 330 | void *i915_gem_object_alloc(struct drm_device *dev) |
213 | { | 331 | { |
214 | struct drm_i915_private *dev_priv = dev->dev_private; | 332 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
921 | * pread/pwrite currently are reading and writing from the CPU | 1039 | * pread/pwrite currently are reading and writing from the CPU |
922 | * perspective, requiring manual detiling by the client. | 1040 | * perspective, requiring manual detiling by the client. |
923 | */ | 1041 | */ |
924 | if (obj->phys_obj) { | 1042 | if (obj->phys_handle) { |
925 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1043 | ret = i915_gem_phys_pwrite(obj, args, file); |
926 | goto out; | 1044 | goto out; |
927 | } | 1045 | } |
928 | 1046 | ||
@@ -3208,12 +3326,14 @@ static struct i915_vma * | |||
3208 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | 3326 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
3209 | struct i915_address_space *vm, | 3327 | struct i915_address_space *vm, |
3210 | unsigned alignment, | 3328 | unsigned alignment, |
3211 | unsigned flags) | 3329 | uint64_t flags) |
3212 | { | 3330 | { |
3213 | struct drm_device *dev = obj->base.dev; | 3331 | struct drm_device *dev = obj->base.dev; |
3214 | struct drm_i915_private *dev_priv = dev->dev_private; | 3332 | struct drm_i915_private *dev_priv = dev->dev_private; |
3215 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 3333 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
3216 | size_t gtt_max = | 3334 | unsigned long start = |
3335 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | ||
3336 | unsigned long end = | ||
3217 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; | 3337 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; |
3218 | struct i915_vma *vma; | 3338 | struct i915_vma *vma; |
3219 | int ret; | 3339 | int ret; |
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3242 | /* If the object is bigger than the entire aperture, reject it early | 3362 | /* If the object is bigger than the entire aperture, reject it early |
3243 | * before evicting everything in a vain attempt to find space. | 3363 | * before evicting everything in a vain attempt to find space. |
3244 | */ | 3364 | */ |
3245 | if (obj->base.size > gtt_max) { | 3365 | if (obj->base.size > end) { |
3246 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", | 3366 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", |
3247 | obj->base.size, | 3367 | obj->base.size, |
3248 | flags & PIN_MAPPABLE ? "mappable" : "total", | 3368 | flags & PIN_MAPPABLE ? "mappable" : "total", |
3249 | gtt_max); | 3369 | end); |
3250 | return ERR_PTR(-E2BIG); | 3370 | return ERR_PTR(-E2BIG); |
3251 | } | 3371 | } |
3252 | 3372 | ||
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3263 | search_free: | 3383 | search_free: |
3264 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, | 3384 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
3265 | size, alignment, | 3385 | size, alignment, |
3266 | obj->cache_level, 0, gtt_max, | 3386 | obj->cache_level, |
3387 | start, end, | ||
3267 | DRM_MM_SEARCH_DEFAULT, | 3388 | DRM_MM_SEARCH_DEFAULT, |
3268 | DRM_MM_CREATE_DEFAULT); | 3389 | DRM_MM_CREATE_DEFAULT); |
3269 | if (ret) { | 3390 | if (ret) { |
3270 | ret = i915_gem_evict_something(dev, vm, size, alignment, | 3391 | ret = i915_gem_evict_something(dev, vm, size, alignment, |
3271 | obj->cache_level, flags); | 3392 | obj->cache_level, |
3393 | start, end, | ||
3394 | flags); | ||
3272 | if (ret == 0) | 3395 | if (ret == 0) |
3273 | goto search_free; | 3396 | goto search_free; |
3274 | 3397 | ||
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3828 | return ret; | 3951 | return ret; |
3829 | } | 3952 | } |
3830 | 3953 | ||
3954 | static bool | ||
3955 | i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) | ||
3956 | { | ||
3957 | struct drm_i915_gem_object *obj = vma->obj; | ||
3958 | |||
3959 | if (alignment && | ||
3960 | vma->node.start & (alignment - 1)) | ||
3961 | return true; | ||
3962 | |||
3963 | if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) | ||
3964 | return true; | ||
3965 | |||
3966 | if (flags & PIN_OFFSET_BIAS && | ||
3967 | vma->node.start < (flags & PIN_OFFSET_MASK)) | ||
3968 | return true; | ||
3969 | |||
3970 | return false; | ||
3971 | } | ||
3972 | |||
3831 | int | 3973 | int |
3832 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | 3974 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3833 | struct i915_address_space *vm, | 3975 | struct i915_address_space *vm, |
3834 | uint32_t alignment, | 3976 | uint32_t alignment, |
3835 | unsigned flags) | 3977 | uint64_t flags) |
3836 | { | 3978 | { |
3837 | struct i915_vma *vma; | 3979 | struct i915_vma *vma; |
3838 | int ret; | 3980 | int ret; |
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
3845 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) | 3987 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
3846 | return -EBUSY; | 3988 | return -EBUSY; |
3847 | 3989 | ||
3848 | if ((alignment && | 3990 | if (i915_vma_misplaced(vma, alignment, flags)) { |
3849 | vma->node.start & (alignment - 1)) || | ||
3850 | (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { | ||
3851 | WARN(vma->pin_count, | 3991 | WARN(vma->pin_count, |
3852 | "bo is already pinned with incorrect alignment:" | 3992 | "bo is already pinned with incorrect alignment:" |
3853 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," | 3993 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
3854 | " obj->map_and_fenceable=%d\n", | 3994 | " obj->map_and_fenceable=%d\n", |
3855 | i915_gem_obj_offset(obj, vm), alignment, | 3995 | i915_gem_obj_offset(obj, vm), alignment, |
3856 | flags & PIN_MAPPABLE, | 3996 | !!(flags & PIN_MAPPABLE), |
3857 | obj->map_and_fenceable); | 3997 | obj->map_and_fenceable); |
3858 | ret = i915_vma_unbind(vma); | 3998 | ret = i915_vma_unbind(vma); |
3859 | if (ret) | 3999 | if (ret) |
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4163 | 4303 | ||
4164 | trace_i915_gem_object_destroy(obj); | 4304 | trace_i915_gem_object_destroy(obj); |
4165 | 4305 | ||
4166 | if (obj->phys_obj) | ||
4167 | i915_gem_detach_phys_object(dev, obj); | ||
4168 | |||
4169 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { | 4306 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
4170 | int ret; | 4307 | int ret; |
4171 | 4308 | ||
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4183 | } | 4320 | } |
4184 | } | 4321 | } |
4185 | 4322 | ||
4323 | i915_gem_object_detach_phys(obj); | ||
4324 | |||
4186 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up | 4325 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
4187 | * before progressing. */ | 4326 | * before progressing. */ |
4188 | if (obj->stolen) | 4327 | if (obj->stolen) |
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev) | |||
4646 | register_shrinker(&dev_priv->mm.inactive_shrinker); | 4785 | register_shrinker(&dev_priv->mm.inactive_shrinker); |
4647 | } | 4786 | } |
4648 | 4787 | ||
4649 | /* | ||
4650 | * Create a physically contiguous memory object for this object | ||
4651 | * e.g. for cursor + overlay regs | ||
4652 | */ | ||
4653 | static int i915_gem_init_phys_object(struct drm_device *dev, | ||
4654 | int id, int size, int align) | ||
4655 | { | ||
4656 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4657 | struct drm_i915_gem_phys_object *phys_obj; | ||
4658 | int ret; | ||
4659 | |||
4660 | if (dev_priv->mm.phys_objs[id - 1] || !size) | ||
4661 | return 0; | ||
4662 | |||
4663 | phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); | ||
4664 | if (!phys_obj) | ||
4665 | return -ENOMEM; | ||
4666 | |||
4667 | phys_obj->id = id; | ||
4668 | |||
4669 | phys_obj->handle = drm_pci_alloc(dev, size, align); | ||
4670 | if (!phys_obj->handle) { | ||
4671 | ret = -ENOMEM; | ||
4672 | goto kfree_obj; | ||
4673 | } | ||
4674 | #ifdef CONFIG_X86 | ||
4675 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
4676 | #endif | ||
4677 | |||
4678 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | ||
4679 | |||
4680 | return 0; | ||
4681 | kfree_obj: | ||
4682 | kfree(phys_obj); | ||
4683 | return ret; | ||
4684 | } | ||
4685 | |||
4686 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) | ||
4687 | { | ||
4688 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4689 | struct drm_i915_gem_phys_object *phys_obj; | ||
4690 | |||
4691 | if (!dev_priv->mm.phys_objs[id - 1]) | ||
4692 | return; | ||
4693 | |||
4694 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
4695 | if (phys_obj->cur_obj) { | ||
4696 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | ||
4697 | } | ||
4698 | |||
4699 | #ifdef CONFIG_X86 | ||
4700 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
4701 | #endif | ||
4702 | drm_pci_free(dev, phys_obj->handle); | ||
4703 | kfree(phys_obj); | ||
4704 | dev_priv->mm.phys_objs[id - 1] = NULL; | ||
4705 | } | ||
4706 | |||
4707 | void i915_gem_free_all_phys_object(struct drm_device *dev) | ||
4708 | { | ||
4709 | int i; | ||
4710 | |||
4711 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | ||
4712 | i915_gem_free_phys_object(dev, i); | ||
4713 | } | ||
4714 | |||
4715 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
4716 | struct drm_i915_gem_object *obj) | ||
4717 | { | ||
4718 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
4719 | char *vaddr; | ||
4720 | int i; | ||
4721 | int page_count; | ||
4722 | |||
4723 | if (!obj->phys_obj) | ||
4724 | return; | ||
4725 | vaddr = obj->phys_obj->handle->vaddr; | ||
4726 | |||
4727 | page_count = obj->base.size / PAGE_SIZE; | ||
4728 | for (i = 0; i < page_count; i++) { | ||
4729 | struct page *page = shmem_read_mapping_page(mapping, i); | ||
4730 | if (!IS_ERR(page)) { | ||
4731 | char *dst = kmap_atomic(page); | ||
4732 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | ||
4733 | kunmap_atomic(dst); | ||
4734 | |||
4735 | drm_clflush_pages(&page, 1); | ||
4736 | |||
4737 | set_page_dirty(page); | ||
4738 | mark_page_accessed(page); | ||
4739 | page_cache_release(page); | ||
4740 | } | ||
4741 | } | ||
4742 | i915_gem_chipset_flush(dev); | ||
4743 | |||
4744 | obj->phys_obj->cur_obj = NULL; | ||
4745 | obj->phys_obj = NULL; | ||
4746 | } | ||
4747 | |||
4748 | int | ||
4749 | i915_gem_attach_phys_object(struct drm_device *dev, | ||
4750 | struct drm_i915_gem_object *obj, | ||
4751 | int id, | ||
4752 | int align) | ||
4753 | { | ||
4754 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
4755 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4756 | int ret = 0; | ||
4757 | int page_count; | ||
4758 | int i; | ||
4759 | |||
4760 | if (id > I915_MAX_PHYS_OBJECT) | ||
4761 | return -EINVAL; | ||
4762 | |||
4763 | if (obj->phys_obj) { | ||
4764 | if (obj->phys_obj->id == id) | ||
4765 | return 0; | ||
4766 | i915_gem_detach_phys_object(dev, obj); | ||
4767 | } | ||
4768 | |||
4769 | /* create a new object */ | ||
4770 | if (!dev_priv->mm.phys_objs[id - 1]) { | ||
4771 | ret = i915_gem_init_phys_object(dev, id, | ||
4772 | obj->base.size, align); | ||
4773 | if (ret) { | ||
4774 | DRM_ERROR("failed to init phys object %d size: %zu\n", | ||
4775 | id, obj->base.size); | ||
4776 | return ret; | ||
4777 | } | ||
4778 | } | ||
4779 | |||
4780 | /* bind to the object */ | ||
4781 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
4782 | obj->phys_obj->cur_obj = obj; | ||
4783 | |||
4784 | page_count = obj->base.size / PAGE_SIZE; | ||
4785 | |||
4786 | for (i = 0; i < page_count; i++) { | ||
4787 | struct page *page; | ||
4788 | char *dst, *src; | ||
4789 | |||
4790 | page = shmem_read_mapping_page(mapping, i); | ||
4791 | if (IS_ERR(page)) | ||
4792 | return PTR_ERR(page); | ||
4793 | |||
4794 | src = kmap_atomic(page); | ||
4795 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
4796 | memcpy(dst, src, PAGE_SIZE); | ||
4797 | kunmap_atomic(src); | ||
4798 | |||
4799 | mark_page_accessed(page); | ||
4800 | page_cache_release(page); | ||
4801 | } | ||
4802 | |||
4803 | return 0; | ||
4804 | } | ||
4805 | |||
4806 | static int | ||
4807 | i915_gem_phys_pwrite(struct drm_device *dev, | ||
4808 | struct drm_i915_gem_object *obj, | ||
4809 | struct drm_i915_gem_pwrite *args, | ||
4810 | struct drm_file *file_priv) | ||
4811 | { | ||
4812 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; | ||
4813 | char __user *user_data = to_user_ptr(args->data_ptr); | ||
4814 | |||
4815 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | ||
4816 | unsigned long unwritten; | ||
4817 | |||
4818 | /* The physical object once assigned is fixed for the lifetime | ||
4819 | * of the obj, so we can safely drop the lock and continue | ||
4820 | * to access vaddr. | ||
4821 | */ | ||
4822 | mutex_unlock(&dev->struct_mutex); | ||
4823 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4824 | mutex_lock(&dev->struct_mutex); | ||
4825 | if (unwritten) | ||
4826 | return -EFAULT; | ||
4827 | } | ||
4828 | |||
4829 | i915_gem_chipset_flush(dev); | ||
4830 | return 0; | ||
4831 | } | ||
4832 | |||
4833 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) | 4788 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
4834 | { | 4789 | { |
4835 | struct drm_i915_file_private *file_priv = file->driver_priv; | 4790 | struct drm_i915_file_private *file_priv = file->driver_priv; |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 75fca63dc8c1..bbf4b12d842e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) | |||
68 | int | 68 | int |
69 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | 69 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, |
70 | int min_size, unsigned alignment, unsigned cache_level, | 70 | int min_size, unsigned alignment, unsigned cache_level, |
71 | unsigned long start, unsigned long end, | ||
71 | unsigned flags) | 72 | unsigned flags) |
72 | { | 73 | { |
73 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
74 | struct list_head eviction_list, unwind_list; | 74 | struct list_head eviction_list, unwind_list; |
75 | struct i915_vma *vma; | 75 | struct i915_vma *vma; |
76 | int ret = 0; | 76 | int ret = 0; |
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
102 | */ | 102 | */ |
103 | 103 | ||
104 | INIT_LIST_HEAD(&unwind_list); | 104 | INIT_LIST_HEAD(&unwind_list); |
105 | if (flags & PIN_MAPPABLE) { | 105 | if (start != 0 || end != vm->total) { |
106 | BUG_ON(!i915_is_ggtt(vm)); | ||
107 | drm_mm_init_scan_with_range(&vm->mm, min_size, | 106 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
108 | alignment, cache_level, 0, | 107 | alignment, cache_level, |
109 | dev_priv->gtt.mappable_end); | 108 | start, end); |
110 | } else | 109 | } else |
111 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | 110 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
112 | 111 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2c9d9cbaf653..20fef6c50267 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -35,6 +35,9 @@ | |||
35 | 35 | ||
36 | #define __EXEC_OBJECT_HAS_PIN (1<<31) | 36 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | 37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
38 | #define __EXEC_OBJECT_NEEDS_BIAS (1<<28) | ||
39 | |||
40 | #define BATCH_OFFSET_BIAS (256*1024) | ||
38 | 41 | ||
39 | struct eb_vmas { | 42 | struct eb_vmas { |
40 | struct list_head vmas; | 43 | struct list_head vmas; |
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
545 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; | 548 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
546 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | 549 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
547 | bool need_fence; | 550 | bool need_fence; |
548 | unsigned flags; | 551 | uint64_t flags; |
549 | int ret; | 552 | int ret; |
550 | 553 | ||
551 | flags = 0; | 554 | flags = 0; |
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
559 | 562 | ||
560 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT) | 563 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT) |
561 | flags |= PIN_GLOBAL; | 564 | flags |= PIN_GLOBAL; |
565 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) | ||
566 | flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; | ||
562 | 567 | ||
563 | ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); | 568 | ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); |
564 | if (ret) | 569 | if (ret) |
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
592 | return 0; | 597 | return 0; |
593 | } | 598 | } |
594 | 599 | ||
600 | static bool | ||
601 | eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) | ||
602 | { | ||
603 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; | ||
604 | struct drm_i915_gem_object *obj = vma->obj; | ||
605 | bool need_fence, need_mappable; | ||
606 | |||
607 | need_fence = | ||
608 | has_fenced_gpu_access && | ||
609 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
610 | obj->tiling_mode != I915_TILING_NONE; | ||
611 | need_mappable = need_fence || need_reloc_mappable(vma); | ||
612 | |||
613 | WARN_ON((need_mappable || need_fence) && | ||
614 | !i915_is_ggtt(vma->vm)); | ||
615 | |||
616 | if (entry->alignment && | ||
617 | vma->node.start & (entry->alignment - 1)) | ||
618 | return true; | ||
619 | |||
620 | if (need_mappable && !obj->map_and_fenceable) | ||
621 | return true; | ||
622 | |||
623 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && | ||
624 | vma->node.start < BATCH_OFFSET_BIAS) | ||
625 | return true; | ||
626 | |||
627 | return false; | ||
628 | } | ||
629 | |||
595 | static int | 630 | static int |
596 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | 631 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
597 | struct list_head *vmas, | 632 | struct list_head *vmas, |
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
653 | 688 | ||
654 | /* Unbind any ill-fitting objects or pin. */ | 689 | /* Unbind any ill-fitting objects or pin. */ |
655 | list_for_each_entry(vma, vmas, exec_list) { | 690 | list_for_each_entry(vma, vmas, exec_list) { |
656 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; | ||
657 | bool need_fence, need_mappable; | ||
658 | |||
659 | obj = vma->obj; | ||
660 | |||
661 | if (!drm_mm_node_allocated(&vma->node)) | 691 | if (!drm_mm_node_allocated(&vma->node)) |
662 | continue; | 692 | continue; |
663 | 693 | ||
664 | need_fence = | 694 | if (eb_vma_misplaced(vma, has_fenced_gpu_access)) |
665 | has_fenced_gpu_access && | ||
666 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
667 | obj->tiling_mode != I915_TILING_NONE; | ||
668 | need_mappable = need_fence || need_reloc_mappable(vma); | ||
669 | |||
670 | WARN_ON((need_mappable || need_fence) && | ||
671 | !i915_is_ggtt(vma->vm)); | ||
672 | |||
673 | if ((entry->alignment && | ||
674 | vma->node.start & (entry->alignment - 1)) || | ||
675 | (need_mappable && !obj->map_and_fenceable)) | ||
676 | ret = i915_vma_unbind(vma); | 695 | ret = i915_vma_unbind(vma); |
677 | else | 696 | else |
678 | ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); | 697 | ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); |
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
773 | * relocations were valid. | 792 | * relocations were valid. |
774 | */ | 793 | */ |
775 | for (j = 0; j < exec[i].relocation_count; j++) { | 794 | for (j = 0; j < exec[i].relocation_count; j++) { |
776 | if (copy_to_user(&user_relocs[j].presumed_offset, | 795 | if (__copy_to_user(&user_relocs[j].presumed_offset, |
777 | &invalid_offset, | 796 | &invalid_offset, |
778 | sizeof(invalid_offset))) { | 797 | sizeof(invalid_offset))) { |
779 | ret = -EFAULT; | 798 | ret = -EFAULT; |
780 | mutex_lock(&dev->struct_mutex); | 799 | mutex_lock(&dev->struct_mutex); |
781 | goto err; | 800 | goto err; |
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, | |||
999 | return 0; | 1018 | return 0; |
1000 | } | 1019 | } |
1001 | 1020 | ||
1021 | static struct drm_i915_gem_object * | ||
1022 | eb_get_batch(struct eb_vmas *eb) | ||
1023 | { | ||
1024 | struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); | ||
1025 | |||
1026 | /* | ||
1027 | * SNA is doing fancy tricks with compressing batch buffers, which leads | ||
1028 | * to negative relocation deltas. Usually that works out ok since the | ||
1029 | * relocate address is still positive, except when the batch is placed | ||
1030 | * very low in the GTT. Ensure this doesn't happen. | ||
1031 | * | ||
1032 | * Note that actual hangs have only been observed on gen7, but for | ||
1033 | * paranoia do it everywhere. | ||
1034 | */ | ||
1035 | vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; | ||
1036 | |||
1037 | return vma->obj; | ||
1038 | } | ||
1039 | |||
1002 | static int | 1040 | static int |
1003 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 1041 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
1004 | struct drm_file *file, | 1042 | struct drm_file *file, |
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1153 | goto err; | 1191 | goto err; |
1154 | 1192 | ||
1155 | /* take note of the batch buffer before we might reorder the lists */ | 1193 | /* take note of the batch buffer before we might reorder the lists */ |
1156 | batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; | 1194 | batch_obj = eb_get_batch(eb); |
1157 | 1195 | ||
1158 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 1196 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1159 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; | 1197 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1355 | 1393 | ||
1356 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); | 1394 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); |
1357 | if (!ret) { | 1395 | if (!ret) { |
1396 | struct drm_i915_gem_exec_object __user *user_exec_list = | ||
1397 | to_user_ptr(args->buffers_ptr); | ||
1398 | |||
1358 | /* Copy the new buffer offsets back to the user's exec list. */ | 1399 | /* Copy the new buffer offsets back to the user's exec list. */ |
1359 | for (i = 0; i < args->buffer_count; i++) | 1400 | for (i = 0; i < args->buffer_count; i++) { |
1360 | exec_list[i].offset = exec2_list[i].offset; | 1401 | ret = __copy_to_user(&user_exec_list[i].offset, |
1361 | /* ... and back out to userspace */ | 1402 | &exec2_list[i].offset, |
1362 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), | 1403 | sizeof(user_exec_list[i].offset)); |
1363 | exec_list, | 1404 | if (ret) { |
1364 | sizeof(*exec_list) * args->buffer_count); | 1405 | ret = -EFAULT; |
1365 | if (ret) { | 1406 | DRM_DEBUG("failed to copy %d exec entries " |
1366 | ret = -EFAULT; | 1407 | "back to user (%d)\n", |
1367 | DRM_DEBUG("failed to copy %d exec entries " | 1408 | args->buffer_count, ret); |
1368 | "back to user (%d)\n", | 1409 | break; |
1369 | args->buffer_count, ret); | 1410 | } |
1370 | } | 1411 | } |
1371 | } | 1412 | } |
1372 | 1413 | ||
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1412 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); | 1453 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); |
1413 | if (!ret) { | 1454 | if (!ret) { |
1414 | /* Copy the new buffer offsets back to the user's exec list. */ | 1455 | /* Copy the new buffer offsets back to the user's exec list. */ |
1415 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), | 1456 | struct drm_i915_gem_exec_object2 *user_exec_list = |
1416 | exec2_list, | 1457 | to_user_ptr(args->buffers_ptr); |
1417 | sizeof(*exec2_list) * args->buffer_count); | 1458 | int i; |
1418 | if (ret) { | 1459 | |
1419 | ret = -EFAULT; | 1460 | for (i = 0; i < args->buffer_count; i++) { |
1420 | DRM_DEBUG("failed to copy %d exec entries " | 1461 | ret = __copy_to_user(&user_exec_list[i].offset, |
1421 | "back to user (%d)\n", | 1462 | &exec2_list[i].offset, |
1422 | args->buffer_count, ret); | 1463 | sizeof(user_exec_list[i].offset)); |
1464 | if (ret) { | ||
1465 | ret = -EFAULT; | ||
1466 | DRM_DEBUG("failed to copy %d exec entries " | ||
1467 | "back to user\n", | ||
1468 | args->buffer_count); | ||
1469 | break; | ||
1470 | } | ||
1423 | } | 1471 | } |
1424 | } | 1472 | } |
1425 | 1473 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 154b0f8bb88d..5deb22864c52 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1089,7 +1089,9 @@ alloc: | |||
1089 | if (ret == -ENOSPC && !retried) { | 1089 | if (ret == -ENOSPC && !retried) { |
1090 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, | 1090 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
1091 | GEN6_PD_SIZE, GEN6_PD_ALIGN, | 1091 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
1092 | I915_CACHE_NONE, 0); | 1092 | I915_CACHE_NONE, |
1093 | 0, dev_priv->gtt.base.total, | ||
1094 | 0); | ||
1093 | if (ret) | 1095 | if (ret) |
1094 | return ret; | 1096 | return ret; |
1095 | 1097 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index fa486c5fbb02..aff4a113cda3 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
560 | 560 | ||
561 | dev_priv->vbt.edp_pps = *edp_pps; | 561 | dev_priv->vbt.edp_pps = *edp_pps; |
562 | 562 | ||
563 | dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : | 563 | switch (edp_link_params->rate) { |
564 | DP_LINK_BW_1_62; | 564 | case EDP_RATE_1_62: |
565 | dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; | ||
566 | break; | ||
567 | case EDP_RATE_2_7: | ||
568 | dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; | ||
569 | break; | ||
570 | default: | ||
571 | DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", | ||
572 | edp_link_params->rate); | ||
573 | break; | ||
574 | } | ||
575 | |||
565 | switch (edp_link_params->lanes) { | 576 | switch (edp_link_params->lanes) { |
566 | case 0: | 577 | case EDP_LANE_1: |
567 | dev_priv->vbt.edp_lanes = 1; | 578 | dev_priv->vbt.edp_lanes = 1; |
568 | break; | 579 | break; |
569 | case 1: | 580 | case EDP_LANE_2: |
570 | dev_priv->vbt.edp_lanes = 2; | 581 | dev_priv->vbt.edp_lanes = 2; |
571 | break; | 582 | break; |
572 | case 3: | 583 | case EDP_LANE_4: |
573 | default: | ||
574 | dev_priv->vbt.edp_lanes = 4; | 584 | dev_priv->vbt.edp_lanes = 4; |
575 | break; | 585 | break; |
586 | default: | ||
587 | DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", | ||
588 | edp_link_params->lanes); | ||
589 | break; | ||
576 | } | 590 | } |
591 | |||
577 | switch (edp_link_params->preemphasis) { | 592 | switch (edp_link_params->preemphasis) { |
578 | case 0: | 593 | case EDP_PREEMPHASIS_NONE: |
579 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; | 594 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; |
580 | break; | 595 | break; |
581 | case 1: | 596 | case EDP_PREEMPHASIS_3_5dB: |
582 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; | 597 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; |
583 | break; | 598 | break; |
584 | case 2: | 599 | case EDP_PREEMPHASIS_6dB: |
585 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; | 600 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; |
586 | break; | 601 | break; |
587 | case 3: | 602 | case EDP_PREEMPHASIS_9_5dB: |
588 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; | 603 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; |
589 | break; | 604 | break; |
605 | default: | ||
606 | DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", | ||
607 | edp_link_params->preemphasis); | ||
608 | break; | ||
590 | } | 609 | } |
610 | |||
591 | switch (edp_link_params->vswing) { | 611 | switch (edp_link_params->vswing) { |
592 | case 0: | 612 | case EDP_VSWING_0_4V: |
593 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; | 613 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; |
594 | break; | 614 | break; |
595 | case 1: | 615 | case EDP_VSWING_0_6V: |
596 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; | 616 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; |
597 | break; | 617 | break; |
598 | case 2: | 618 | case EDP_VSWING_0_8V: |
599 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; | 619 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; |
600 | break; | 620 | break; |
601 | case 3: | 621 | case EDP_VSWING_1_2V: |
602 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; | 622 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; |
603 | break; | 623 | break; |
624 | default: | ||
625 | DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", | ||
626 | edp_link_params->vswing); | ||
627 | break; | ||
604 | } | 628 | } |
605 | } | 629 | } |
606 | 630 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 48aa516a1ac0..5b60e25baa32 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
7825 | addr = i915_gem_obj_ggtt_offset(obj); | 7825 | addr = i915_gem_obj_ggtt_offset(obj); |
7826 | } else { | 7826 | } else { |
7827 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 7827 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
7828 | ret = i915_gem_attach_phys_object(dev, obj, | 7828 | ret = i915_gem_object_attach_phys(obj, align); |
7829 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | ||
7830 | align); | ||
7831 | if (ret) { | 7829 | if (ret) { |
7832 | DRM_DEBUG_KMS("failed to attach phys object\n"); | 7830 | DRM_DEBUG_KMS("failed to attach phys object\n"); |
7833 | goto fail_locked; | 7831 | goto fail_locked; |
7834 | } | 7832 | } |
7835 | addr = obj->phys_obj->handle->busaddr; | 7833 | addr = obj->phys_handle->busaddr; |
7836 | } | 7834 | } |
7837 | 7835 | ||
7838 | if (IS_GEN2(dev)) | 7836 | if (IS_GEN2(dev)) |
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
7840 | 7838 | ||
7841 | finish: | 7839 | finish: |
7842 | if (intel_crtc->cursor_bo) { | 7840 | if (intel_crtc->cursor_bo) { |
7843 | if (INTEL_INFO(dev)->cursor_needs_physical) { | 7841 | if (!INTEL_INFO(dev)->cursor_needs_physical) |
7844 | if (intel_crtc->cursor_bo != obj) | ||
7845 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | ||
7846 | } else | ||
7847 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); | 7842 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); |
7848 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); | 7843 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
7849 | } | 7844 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ca68aa9f237..2a00cb828d20 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -121,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
121 | return max_link_bw; | 121 | return max_link_bw; |
122 | } | 122 | } |
123 | 123 | ||
124 | static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) | ||
125 | { | ||
126 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
127 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
128 | u8 source_max, sink_max; | ||
129 | |||
130 | source_max = 4; | ||
131 | if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && | ||
132 | (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) | ||
133 | source_max = 2; | ||
134 | |||
135 | sink_max = drm_dp_max_lane_count(intel_dp->dpcd); | ||
136 | |||
137 | return min(source_max, sink_max); | ||
138 | } | ||
139 | |||
124 | /* | 140 | /* |
125 | * The units on the numbers in the next two are... bizarre. Examples will | 141 | * The units on the numbers in the next two are... bizarre. Examples will |
126 | * make it clearer; this one parallels an example in the eDP spec. | 142 | * make it clearer; this one parallels an example in the eDP spec. |
@@ -171,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
171 | } | 187 | } |
172 | 188 | ||
173 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); | 189 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
174 | max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); | 190 | max_lanes = intel_dp_max_lane_count(intel_dp); |
175 | 191 | ||
176 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); | 192 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
177 | mode_rate = intel_dp_link_required(target_clock, 18); | 193 | mode_rate = intel_dp_link_required(target_clock, 18); |
@@ -751,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
751 | struct intel_crtc *intel_crtc = encoder->new_crtc; | 767 | struct intel_crtc *intel_crtc = encoder->new_crtc; |
752 | struct intel_connector *intel_connector = intel_dp->attached_connector; | 768 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
753 | int lane_count, clock; | 769 | int lane_count, clock; |
754 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); | 770 | int min_lane_count = 1; |
771 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | ||
755 | /* Conveniently, the link BW constants become indices with a shift...*/ | 772 | /* Conveniently, the link BW constants become indices with a shift...*/ |
773 | int min_clock = 0; | ||
756 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; | 774 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; |
757 | int bpp, mode_rate; | 775 | int bpp, mode_rate; |
758 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; | 776 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; |
@@ -785,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
785 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 803 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
786 | * bpc in between. */ | 804 | * bpc in between. */ |
787 | bpp = pipe_config->pipe_bpp; | 805 | bpp = pipe_config->pipe_bpp; |
788 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && | 806 | if (is_edp(intel_dp)) { |
789 | dev_priv->vbt.edp_bpp < bpp) { | 807 | if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { |
790 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", | 808 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", |
791 | dev_priv->vbt.edp_bpp); | 809 | dev_priv->vbt.edp_bpp); |
792 | bpp = dev_priv->vbt.edp_bpp; | 810 | bpp = dev_priv->vbt.edp_bpp; |
811 | } | ||
812 | |||
813 | if (IS_BROADWELL(dev)) { | ||
814 | /* Yes, it's an ugly hack. */ | ||
815 | min_lane_count = max_lane_count; | ||
816 | DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", | ||
817 | min_lane_count); | ||
818 | } else if (dev_priv->vbt.edp_lanes) { | ||
819 | min_lane_count = min(dev_priv->vbt.edp_lanes, | ||
820 | max_lane_count); | ||
821 | DRM_DEBUG_KMS("using min %u lanes per VBT\n", | ||
822 | min_lane_count); | ||
823 | } | ||
824 | |||
825 | if (dev_priv->vbt.edp_rate) { | ||
826 | min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); | ||
827 | DRM_DEBUG_KMS("using min %02x link bw per VBT\n", | ||
828 | bws[min_clock]); | ||
829 | } | ||
793 | } | 830 | } |
794 | 831 | ||
795 | for (; bpp >= 6*3; bpp -= 2*3) { | 832 | for (; bpp >= 6*3; bpp -= 2*3) { |
796 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, | 833 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, |
797 | bpp); | 834 | bpp); |
798 | 835 | ||
799 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 836 | for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { |
800 | for (clock = 0; clock <= max_clock; clock++) { | 837 | for (clock = min_clock; clock <= max_clock; clock++) { |
801 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); | 838 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); |
802 | link_avail = intel_dp_max_data_rate(link_clock, | 839 | link_avail = intel_dp_max_data_rate(link_clock, |
803 | lane_count); | 840 | lane_count); |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fce4a0d93c0b..f73ba5e6b7a8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
387 | height); | 387 | height); |
388 | } | 388 | } |
389 | 389 | ||
390 | /* No preferred mode marked by the EDID? Are there any modes? */ | ||
391 | if (!modes[i] && !list_empty(&connector->modes)) { | ||
392 | DRM_DEBUG_KMS("using first mode listed on connector %s\n", | ||
393 | drm_get_connector_name(connector)); | ||
394 | modes[i] = list_first_entry(&connector->modes, | ||
395 | struct drm_display_mode, | ||
396 | head); | ||
397 | } | ||
398 | |||
390 | /* last resort: use current mode */ | 399 | /* last resort: use current mode */ |
391 | if (!modes[i]) { | 400 | if (!modes[i]) { |
392 | /* | 401 | /* |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d8adc9104dca..129db0c7d835 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) | |||
193 | struct overlay_registers __iomem *regs; | 193 | struct overlay_registers __iomem *regs; |
194 | 194 | ||
195 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | 195 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
196 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; | 196 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; |
197 | else | 197 | else |
198 | regs = io_mapping_map_wc(dev_priv->gtt.mappable, | 198 | regs = io_mapping_map_wc(dev_priv->gtt.mappable, |
199 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); | 199 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); |
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1340 | overlay->reg_bo = reg_bo; | 1340 | overlay->reg_bo = reg_bo; |
1341 | 1341 | ||
1342 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { | 1342 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { |
1343 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1343 | ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); |
1344 | I915_GEM_PHYS_OVERLAY_REGS, | ||
1345 | PAGE_SIZE); | ||
1346 | if (ret) { | 1344 | if (ret) { |
1347 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1345 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1348 | goto out_free_bo; | 1346 | goto out_free_bo; |
1349 | } | 1347 | } |
1350 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; | 1348 | overlay->flip_addr = reg_bo->phys_handle->busaddr; |
1351 | } else { | 1349 | } else { |
1352 | ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); | 1350 | ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); |
1353 | if (ret) { | 1351 | if (ret) { |
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | |||
1428 | /* Cast to make sparse happy, but it's wc memory anyway, so | 1426 | /* Cast to make sparse happy, but it's wc memory anyway, so |
1429 | * equivalent to the wc io mapping on X86. */ | 1427 | * equivalent to the wc io mapping on X86. */ |
1430 | regs = (struct overlay_registers __iomem *) | 1428 | regs = (struct overlay_registers __iomem *) |
1431 | overlay->reg_bo->phys_obj->handle->vaddr; | 1429 | overlay->reg_bo->phys_handle->vaddr; |
1432 | else | 1430 | else |
1433 | regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, | 1431 | regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
1434 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); | 1432 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); |
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) | |||
1462 | error->dovsta = I915_READ(DOVSTA); | 1460 | error->dovsta = I915_READ(DOVSTA); |
1463 | error->isr = I915_READ(ISR); | 1461 | error->isr = I915_READ(ISR); |
1464 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | 1462 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
1465 | error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; | 1463 | error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; |
1466 | else | 1464 | else |
1467 | error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); | 1465 | error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); |
1468 | 1466 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 0eead16aeda7..cb8cfb7e0974 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
492 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 492 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
493 | u32 freq; | 493 | u32 freq; |
494 | unsigned long flags; | 494 | unsigned long flags; |
495 | u64 n; | ||
495 | 496 | ||
496 | if (!panel->backlight.present || pipe == INVALID_PIPE) | 497 | if (!panel->backlight.present || pipe == INVALID_PIPE) |
497 | return; | 498 | return; |
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
502 | 503 | ||
503 | /* scale to hardware max, but be careful to not overflow */ | 504 | /* scale to hardware max, but be careful to not overflow */ |
504 | freq = panel->backlight.max; | 505 | freq = panel->backlight.max; |
505 | if (freq < max) | 506 | n = (u64)level * freq; |
506 | level = level * freq / max; | 507 | do_div(n, max); |
507 | else | 508 | level = n; |
508 | level = freq / max * level; | ||
509 | 509 | ||
510 | panel->backlight.level = level; | 510 | panel->backlight.level = level; |
511 | if (panel->backlight.device) | 511 | if (panel->backlight.device) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 19e94c3edc19..d93dcf683e8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev, | |||
2095 | } | 2095 | } |
2096 | } | 2096 | } |
2097 | 2097 | ||
2098 | static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, | ||
2099 | uint16_t wm[5], uint16_t min) | ||
2100 | { | ||
2101 | int level, max_level = ilk_wm_max_level(dev_priv->dev); | ||
2102 | |||
2103 | if (wm[0] >= min) | ||
2104 | return false; | ||
2105 | |||
2106 | wm[0] = max(wm[0], min); | ||
2107 | for (level = 1; level <= max_level; level++) | ||
2108 | wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); | ||
2109 | |||
2110 | return true; | ||
2111 | } | ||
2112 | |||
2113 | static void snb_wm_latency_quirk(struct drm_device *dev) | ||
2114 | { | ||
2115 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2116 | bool changed; | ||
2117 | |||
2118 | /* | ||
2119 | * The BIOS provided WM memory latency values are often | ||
2120 | * inadequate for high resolution displays. Adjust them. | ||
2121 | */ | ||
2122 | changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | | ||
2123 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | | ||
2124 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); | ||
2125 | |||
2126 | if (!changed) | ||
2127 | return; | ||
2128 | |||
2129 | DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); | ||
2130 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | ||
2131 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | ||
2132 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | ||
2133 | } | ||
2134 | |||
2098 | static void ilk_setup_wm_latency(struct drm_device *dev) | 2135 | static void ilk_setup_wm_latency(struct drm_device *dev) |
2099 | { | 2136 | { |
2100 | struct drm_i915_private *dev_priv = dev->dev_private; | 2137 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev) | |||
2112 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | 2149 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); |
2113 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | 2150 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); |
2114 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | 2151 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); |
2152 | |||
2153 | if (IS_GEN6(dev)) | ||
2154 | snb_wm_latency_quirk(dev); | ||
2115 | } | 2155 | } |
2116 | 2156 | ||
2117 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, | 2157 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d27155adf5db..46be00d66df3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, | |||
2424 | if (ret < 0) | 2424 | if (ret < 0) |
2425 | goto err1; | 2425 | goto err1; |
2426 | 2426 | ||
2427 | ret = sysfs_create_link(&encoder->ddc.dev.kobj, | 2427 | ret = sysfs_create_link(&drm_connector->kdev->kobj, |
2428 | &drm_connector->kdev->kobj, | 2428 | &encoder->ddc.dev.kobj, |
2429 | encoder->ddc.dev.kobj.name); | 2429 | encoder->ddc.dev.kobj.name); |
2430 | if (ret < 0) | 2430 | if (ret < 0) |
2431 | goto err2; | 2431 | goto err2; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f729dc71d5be..d0c75779d3f6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | |||
185 | { | 185 | { |
186 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | 186 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
187 | _MASKED_BIT_DISABLE(0xffff)); | 187 | _MASKED_BIT_DISABLE(0xffff)); |
188 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
189 | _MASKED_BIT_DISABLE(0xffff)); | ||
188 | /* something from same cacheline, but !FORCEWAKE_VLV */ | 190 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
189 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); | 191 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
190 | } | 192 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 7762665ad8fd..876de9ac3793 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c | |||
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, | |||
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | if (outp == 8) | 1011 | if (outp == 8) |
1012 | return false; | 1012 | return conf; |
1013 | 1013 | ||
1014 | data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); | 1014 | data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); |
1015 | if (data == 0x0000) | 1015 | if (data == 0x0000) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c index 43fec17ea540..bbf117be572f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c | |||
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line) | |||
40 | case 0x00: return 2; | 40 | case 0x00: return 2; |
41 | case 0x19: return 1; | 41 | case 0x19: return 1; |
42 | case 0x1c: return 0; | 42 | case 0x1c: return 0; |
43 | case 0x1e: return 2; | ||
43 | default: | 44 | default: |
44 | break; | 45 | break; |
45 | } | 46 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 630f6e84fc01..2c1e4aad7da3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -31,7 +31,6 @@ | |||
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
34 | #include <linux/acpi.h> | ||
35 | 34 | ||
36 | #include "nouveau_drm.h" | 35 | #include "nouveau_drm.h" |
37 | #include "nouveau_reg.h" | 36 | #include "nouveau_reg.h" |
@@ -222,14 +221,6 @@ nouveau_backlight_init(struct drm_device *dev) | |||
222 | struct nouveau_device *device = nv_device(drm->device); | 221 | struct nouveau_device *device = nv_device(drm->device); |
223 | struct drm_connector *connector; | 222 | struct drm_connector *connector; |
224 | 223 | ||
225 | #ifdef CONFIG_ACPI | ||
226 | if (acpi_video_backlight_support()) { | ||
227 | NV_INFO(drm, "ACPI backlight interface available, " | ||
228 | "not registering our own\n"); | ||
229 | return 0; | ||
230 | } | ||
231 | #endif | ||
232 | |||
233 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 224 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
234 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && | 225 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && |
235 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | 226 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 68528619834a..8149e7cf4303 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1642,6 +1642,7 @@ struct radeon_vce { | |||
1642 | unsigned fb_version; | 1642 | unsigned fb_version; |
1643 | atomic_t handles[RADEON_MAX_VCE_HANDLES]; | 1643 | atomic_t handles[RADEON_MAX_VCE_HANDLES]; |
1644 | struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; | 1644 | struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; |
1645 | unsigned img_size[RADEON_MAX_VCE_HANDLES]; | ||
1645 | struct delayed_work idle_work; | 1646 | struct delayed_work idle_work; |
1646 | }; | 1647 | }; |
1647 | 1648 | ||
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
1655 | uint32_t handle, struct radeon_fence **fence); | 1656 | uint32_t handle, struct radeon_fence **fence); |
1656 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); | 1657 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); |
1657 | void radeon_vce_note_usage(struct radeon_device *rdev); | 1658 | void radeon_vce_note_usage(struct radeon_device *rdev); |
1658 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); | 1659 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); |
1659 | int radeon_vce_cs_parse(struct radeon_cs_parser *p); | 1660 | int radeon_vce_cs_parse(struct radeon_cs_parser *p); |
1660 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | 1661 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, |
1661 | struct radeon_ring *ring, | 1662 | struct radeon_ring *ring, |
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
2640 | #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) | 2641 | #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) |
2641 | #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) | 2642 | #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) |
2642 | #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) | 2643 | #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) |
2643 | #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) | 2644 | #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ |
2645 | (rdev->family == CHIP_MULLINS)) | ||
2644 | 2646 | ||
2645 | #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ | 2647 | #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ |
2646 | (rdev->ddev->pdev->device == 0x6850) || \ | 2648 | (rdev->ddev->pdev->device == 0x6850) || \ |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a5317..9ab30976287d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | if (!found) { | ||
200 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { | ||
201 | dhandle = ACPI_HANDLE(&pdev->dev); | ||
202 | if (!dhandle) | ||
203 | continue; | ||
204 | |||
205 | status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); | ||
206 | if (!ACPI_FAILURE(status)) { | ||
207 | found = true; | ||
208 | break; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
199 | if (!found) | 213 | if (!found) |
200 | return false; | 214 | return false; |
201 | 215 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 2b6e0ebcc13a..41ecf8a60611 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
152 | uint32_t domain = r->write_domain ? | 152 | uint32_t domain = r->write_domain ? |
153 | r->write_domain : r->read_domains; | 153 | r->write_domain : r->read_domains; |
154 | 154 | ||
155 | if (domain & RADEON_GEM_DOMAIN_CPU) { | ||
156 | DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " | ||
157 | "for command submission\n"); | ||
158 | return -EINVAL; | ||
159 | } | ||
160 | |||
155 | p->relocs[i].domain = domain; | 161 | p->relocs[i].domain = domain; |
156 | if (domain == RADEON_GEM_DOMAIN_VRAM) | 162 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
157 | domain |= RADEON_GEM_DOMAIN_GTT; | 163 | domain |= RADEON_GEM_DOMAIN_GTT; |
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
342 | return -EINVAL; | 348 | return -EINVAL; |
343 | 349 | ||
344 | /* we only support VM on some SI+ rings */ | 350 | /* we only support VM on some SI+ rings */ |
345 | if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && | 351 | if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { |
346 | ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { | 352 | if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { |
347 | DRM_ERROR("Ring %d requires VM!\n", p->ring); | 353 | DRM_ERROR("Ring %d requires VM!\n", p->ring); |
348 | return -EINVAL; | 354 | return -EINVAL; |
355 | } | ||
356 | } else { | ||
357 | if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { | ||
358 | DRM_ERROR("VM not supported on ring %d!\n", | ||
359 | p->ring); | ||
360 | return -EINVAL; | ||
361 | } | ||
349 | } | 362 | } |
350 | } | 363 | } |
351 | 364 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0e770bbf7e29..14671406212f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1533 | 1533 | ||
1534 | radeon_restore_bios_scratch_regs(rdev); | 1534 | radeon_restore_bios_scratch_regs(rdev); |
1535 | 1535 | ||
1536 | if (fbcon) { | ||
1537 | radeon_fbdev_set_suspend(rdev, 0); | ||
1538 | console_unlock(); | ||
1539 | } | ||
1540 | |||
1541 | /* init dig PHYs, disp eng pll */ | 1536 | /* init dig PHYs, disp eng pll */ |
1542 | if (rdev->is_atom_bios) { | 1537 | if (rdev->is_atom_bios) { |
1543 | radeon_atom_encoder_init(rdev); | 1538 | radeon_atom_encoder_init(rdev); |
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1562 | } | 1557 | } |
1563 | 1558 | ||
1564 | drm_kms_helper_poll_enable(dev); | 1559 | drm_kms_helper_poll_enable(dev); |
1560 | |||
1561 | if (fbcon) { | ||
1562 | radeon_fbdev_set_suspend(rdev, 0); | ||
1563 | console_unlock(); | ||
1564 | } | ||
1565 | |||
1565 | return 0; | 1566 | return 0; |
1566 | } | 1567 | } |
1567 | 1568 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 408b6ac53f0b..356b733caafe 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, | |||
862 | unsigned *fb_div, unsigned *ref_div) | 862 | unsigned *fb_div, unsigned *ref_div) |
863 | { | 863 | { |
864 | /* limit reference * post divider to a maximum */ | 864 | /* limit reference * post divider to a maximum */ |
865 | ref_div_max = min(128 / post_div, ref_div_max); | 865 | ref_div_max = max(min(100 / post_div, ref_div_max), 1u); |
866 | 866 | ||
867 | /* get matching reference and feedback divider */ | 867 | /* get matching reference and feedback divider */ |
868 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); | 868 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); |
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
999 | 999 | ||
1000 | /* avoid high jitter with small fractional dividers */ | 1000 | /* avoid high jitter with small fractional dividers */ |
1001 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { | 1001 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { |
1002 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); | 1002 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); |
1003 | if (fb_div < fb_div_min) { | 1003 | if (fb_div < fb_div_min) { |
1004 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); | 1004 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); |
1005 | fb_div *= tmp; | 1005 | fb_div *= tmp; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0cc47f12d995..eaaedba04675 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
577 | return r; | 577 | return r; |
578 | } | 578 | } |
579 | 579 | ||
580 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 580 | if (rdev->accel_working) { |
581 | if (r) { | 581 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
582 | radeon_vm_fini(rdev, &fpriv->vm); | 582 | if (r) { |
583 | kfree(fpriv); | 583 | radeon_vm_fini(rdev, &fpriv->vm); |
584 | return r; | 584 | kfree(fpriv); |
585 | } | 585 | return r; |
586 | } | ||
586 | 587 | ||
587 | /* map the ib pool buffer read only into | 588 | /* map the ib pool buffer read only into |
588 | * virtual address space */ | 589 | * virtual address space */ |
589 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, | 590 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, |
590 | rdev->ring_tmp_bo.bo); | 591 | rdev->ring_tmp_bo.bo); |
591 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, | 592 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, |
592 | RADEON_VM_PAGE_READABLE | | 593 | RADEON_VM_PAGE_READABLE | |
593 | RADEON_VM_PAGE_SNOOPED); | 594 | RADEON_VM_PAGE_SNOOPED); |
594 | 595 | ||
595 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | 596 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
596 | if (r) { | 597 | if (r) { |
597 | radeon_vm_fini(rdev, &fpriv->vm); | 598 | radeon_vm_fini(rdev, &fpriv->vm); |
598 | kfree(fpriv); | 599 | kfree(fpriv); |
599 | return r; | 600 | return r; |
601 | } | ||
600 | } | 602 | } |
601 | |||
602 | file_priv->driver_priv = fpriv; | 603 | file_priv->driver_priv = fpriv; |
603 | } | 604 | } |
604 | 605 | ||
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, | |||
626 | struct radeon_bo_va *bo_va; | 627 | struct radeon_bo_va *bo_va; |
627 | int r; | 628 | int r; |
628 | 629 | ||
629 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 630 | if (rdev->accel_working) { |
630 | if (!r) { | 631 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
631 | bo_va = radeon_vm_bo_find(&fpriv->vm, | 632 | if (!r) { |
632 | rdev->ring_tmp_bo.bo); | 633 | bo_va = radeon_vm_bo_find(&fpriv->vm, |
633 | if (bo_va) | 634 | rdev->ring_tmp_bo.bo); |
634 | radeon_vm_bo_rmv(rdev, bo_va); | 635 | if (bo_va) |
635 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | 636 | radeon_vm_bo_rmv(rdev, bo_va); |
637 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | ||
638 | } | ||
636 | } | 639 | } |
637 | 640 | ||
638 | radeon_vm_fini(rdev, &fpriv->vm); | 641 | radeon_vm_fini(rdev, &fpriv->vm); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 19bec0dbfa38..4faa4d6f9bb4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
458 | * into account. We don't want to disallow buffer moves | 458 | * into account. We don't want to disallow buffer moves |
459 | * completely. | 459 | * completely. |
460 | */ | 460 | */ |
461 | if (current_domain != RADEON_GEM_DOMAIN_CPU && | 461 | if ((lobj->alt_domain & current_domain) != 0 && |
462 | (domain & current_domain) == 0 && /* will be moved */ | 462 | (domain & current_domain) == 0 && /* will be moved */ |
463 | bytes_moved > bytes_moved_threshold) { | 463 | bytes_moved > bytes_moved_threshold) { |
464 | /* don't move it */ | 464 | /* don't move it */ |
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
699 | rbo = container_of(bo, struct radeon_bo, tbo); | 699 | rbo = container_of(bo, struct radeon_bo, tbo); |
700 | radeon_bo_check_tiling(rbo, 0, 0); | 700 | radeon_bo_check_tiling(rbo, 0, 0); |
701 | rdev = rbo->rdev; | 701 | rdev = rbo->rdev; |
702 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 702 | if (bo->mem.mem_type != TTM_PL_VRAM) |
703 | size = bo->mem.num_pages << PAGE_SHIFT; | 703 | return 0; |
704 | offset = bo->mem.start << PAGE_SHIFT; | 704 | |
705 | if ((offset + size) > rdev->mc.visible_vram_size) { | 705 | size = bo->mem.num_pages << PAGE_SHIFT; |
706 | /* hurrah the memory is not visible ! */ | 706 | offset = bo->mem.start << PAGE_SHIFT; |
707 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | 707 | if ((offset + size) <= rdev->mc.visible_vram_size) |
708 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; | 708 | return 0; |
709 | r = ttm_bo_validate(bo, &rbo->placement, false, false); | 709 | |
710 | if (unlikely(r != 0)) | 710 | /* hurrah the memory is not visible ! */ |
711 | return r; | 711 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
712 | offset = bo->mem.start << PAGE_SHIFT; | 712 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
713 | /* this should not happen */ | 713 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
714 | if ((offset + size) > rdev->mc.visible_vram_size) | 714 | if (unlikely(r == -ENOMEM)) { |
715 | return -EINVAL; | 715 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
716 | } | 716 | return ttm_bo_validate(bo, &rbo->placement, false, false); |
717 | } else if (unlikely(r != 0)) { | ||
718 | return r; | ||
717 | } | 719 | } |
720 | |||
721 | offset = bo->mem.start << PAGE_SHIFT; | ||
722 | /* this should never happen */ | ||
723 | if ((offset + size) > rdev->mc.visible_vram_size) | ||
724 | return -EINVAL; | ||
725 | |||
718 | return 0; | 726 | return 0; |
719 | } | 727 | } |
720 | 728 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f30b8426eee2..53d6e1bb48dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, | |||
361 | struct drm_device *ddev = dev_get_drvdata(dev); | 361 | struct drm_device *ddev = dev_get_drvdata(dev); |
362 | struct radeon_device *rdev = ddev->dev_private; | 362 | struct radeon_device *rdev = ddev->dev_private; |
363 | 363 | ||
364 | /* Can't set profile when the card is off */ | ||
365 | if ((rdev->flags & RADEON_IS_PX) && | ||
366 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
367 | return -EINVAL; | ||
368 | |||
364 | mutex_lock(&rdev->pm.mutex); | 369 | mutex_lock(&rdev->pm.mutex); |
365 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 370 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
366 | if (strncmp("default", buf, strlen("default")) == 0) | 371 | if (strncmp("default", buf, strlen("default")) == 0) |
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
409 | struct drm_device *ddev = dev_get_drvdata(dev); | 414 | struct drm_device *ddev = dev_get_drvdata(dev); |
410 | struct radeon_device *rdev = ddev->dev_private; | 415 | struct radeon_device *rdev = ddev->dev_private; |
411 | 416 | ||
417 | /* Can't set method when the card is off */ | ||
418 | if ((rdev->flags & RADEON_IS_PX) && | ||
419 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { | ||
420 | count = -EINVAL; | ||
421 | goto fail; | ||
422 | } | ||
423 | |||
412 | /* we don't support the legacy modes with dpm */ | 424 | /* we don't support the legacy modes with dpm */ |
413 | if (rdev->pm.pm_method == PM_METHOD_DPM) { | 425 | if (rdev->pm.pm_method == PM_METHOD_DPM) { |
414 | count = -EINVAL; | 426 | count = -EINVAL; |
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, | |||
446 | struct radeon_device *rdev = ddev->dev_private; | 458 | struct radeon_device *rdev = ddev->dev_private; |
447 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; | 459 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; |
448 | 460 | ||
461 | if ((rdev->flags & RADEON_IS_PX) && | ||
462 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
463 | return snprintf(buf, PAGE_SIZE, "off\n"); | ||
464 | |||
449 | return snprintf(buf, PAGE_SIZE, "%s\n", | 465 | return snprintf(buf, PAGE_SIZE, "%s\n", |
450 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : | 466 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : |
451 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); | 467 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); |
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, | |||
459 | struct drm_device *ddev = dev_get_drvdata(dev); | 475 | struct drm_device *ddev = dev_get_drvdata(dev); |
460 | struct radeon_device *rdev = ddev->dev_private; | 476 | struct radeon_device *rdev = ddev->dev_private; |
461 | 477 | ||
478 | /* Can't set dpm state when the card is off */ | ||
479 | if ((rdev->flags & RADEON_IS_PX) && | ||
480 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
481 | return -EINVAL; | ||
482 | |||
462 | mutex_lock(&rdev->pm.mutex); | 483 | mutex_lock(&rdev->pm.mutex); |
463 | if (strncmp("battery", buf, strlen("battery")) == 0) | 484 | if (strncmp("battery", buf, strlen("battery")) == 0) |
464 | rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; | 485 | rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; |
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, | |||
485 | struct radeon_device *rdev = ddev->dev_private; | 506 | struct radeon_device *rdev = ddev->dev_private; |
486 | enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; | 507 | enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; |
487 | 508 | ||
509 | if ((rdev->flags & RADEON_IS_PX) && | ||
510 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
511 | return snprintf(buf, PAGE_SIZE, "off\n"); | ||
512 | |||
488 | return snprintf(buf, PAGE_SIZE, "%s\n", | 513 | return snprintf(buf, PAGE_SIZE, "%s\n", |
489 | (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : | 514 | (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : |
490 | (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); | 515 | (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); |
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, | |||
500 | enum radeon_dpm_forced_level level; | 525 | enum radeon_dpm_forced_level level; |
501 | int ret = 0; | 526 | int ret = 0; |
502 | 527 | ||
528 | /* Can't force performance level when the card is off */ | ||
529 | if ((rdev->flags & RADEON_IS_PX) && | ||
530 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
531 | return -EINVAL; | ||
532 | |||
503 | mutex_lock(&rdev->pm.mutex); | 533 | mutex_lock(&rdev->pm.mutex); |
504 | if (strncmp("low", buf, strlen("low")) == 0) { | 534 | if (strncmp("low", buf, strlen("low")) == 0) { |
505 | level = RADEON_DPM_FORCED_LEVEL_LOW; | 535 | level = RADEON_DPM_FORCED_LEVEL_LOW; |
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
538 | char *buf) | 568 | char *buf) |
539 | { | 569 | { |
540 | struct radeon_device *rdev = dev_get_drvdata(dev); | 570 | struct radeon_device *rdev = dev_get_drvdata(dev); |
571 | struct drm_device *ddev = rdev->ddev; | ||
541 | int temp; | 572 | int temp; |
542 | 573 | ||
574 | /* Can't get temperature when the card is off */ | ||
575 | if ((rdev->flags & RADEON_IS_PX) && | ||
576 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
577 | return -EINVAL; | ||
578 | |||
543 | if (rdev->asic->pm.get_temperature) | 579 | if (rdev->asic->pm.get_temperature) |
544 | temp = radeon_get_temperature(rdev); | 580 | temp = radeon_get_temperature(rdev); |
545 | else | 581 | else |
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
1614 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1650 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1615 | struct drm_device *dev = node->minor->dev; | 1651 | struct drm_device *dev = node->minor->dev; |
1616 | struct radeon_device *rdev = dev->dev_private; | 1652 | struct radeon_device *rdev = dev->dev_private; |
1653 | struct drm_device *ddev = rdev->ddev; | ||
1617 | 1654 | ||
1618 | if (rdev->pm.dpm_enabled) { | 1655 | if ((rdev->flags & RADEON_IS_PX) && |
1656 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { | ||
1657 | seq_printf(m, "PX asic powered off\n"); | ||
1658 | } else if (rdev->pm.dpm_enabled) { | ||
1619 | mutex_lock(&rdev->pm.mutex); | 1659 | mutex_lock(&rdev->pm.mutex); |
1620 | if (rdev->asic->dpm.debugfs_print_current_performance_level) | 1660 | if (rdev->asic->dpm.debugfs_print_current_performance_level) |
1621 | radeon_dpm_debugfs_print_current_performance_level(rdev, m); | 1661 | radeon_dpm_debugfs_print_current_performance_level(rdev, m); |
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index f73324c81491..3971d968af6c 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
443 | * @p: parser context | 443 | * @p: parser context |
444 | * @lo: address of lower dword | 444 | * @lo: address of lower dword |
445 | * @hi: address of higher dword | 445 | * @hi: address of higher dword |
446 | * @size: size of checker for relocation buffer | ||
446 | * | 447 | * |
447 | * Patch relocation inside command stream with real buffer address | 448 | * Patch relocation inside command stream with real buffer address |
448 | */ | 449 | */ |
449 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | 450 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, |
451 | unsigned size) | ||
450 | { | 452 | { |
451 | struct radeon_cs_chunk *relocs_chunk; | 453 | struct radeon_cs_chunk *relocs_chunk; |
452 | uint64_t offset; | 454 | struct radeon_cs_reloc *reloc; |
455 | uint64_t start, end, offset; | ||
453 | unsigned idx; | 456 | unsigned idx; |
454 | 457 | ||
455 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 458 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
@@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | |||
462 | return -EINVAL; | 465 | return -EINVAL; |
463 | } | 466 | } |
464 | 467 | ||
465 | offset += p->relocs_ptr[(idx / 4)]->gpu_offset; | 468 | reloc = p->relocs_ptr[(idx / 4)]; |
469 | start = reloc->gpu_offset; | ||
470 | end = start + radeon_bo_size(reloc->robj); | ||
471 | start += offset; | ||
466 | 472 | ||
467 | p->ib.ptr[lo] = offset & 0xFFFFFFFF; | 473 | p->ib.ptr[lo] = start & 0xFFFFFFFF; |
468 | p->ib.ptr[hi] = offset >> 32; | 474 | p->ib.ptr[hi] = start >> 32; |
475 | |||
476 | if (end <= start) { | ||
477 | DRM_ERROR("invalid reloc offset %llX!\n", offset); | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | if ((end - start) < size) { | ||
481 | DRM_ERROR("buffer to small (%d / %d)!\n", | ||
482 | (unsigned)(end - start), size); | ||
483 | return -EINVAL; | ||
484 | } | ||
469 | 485 | ||
470 | return 0; | 486 | return 0; |
471 | } | 487 | } |
472 | 488 | ||
473 | /** | 489 | /** |
490 | * radeon_vce_validate_handle - validate stream handle | ||
491 | * | ||
492 | * @p: parser context | ||
493 | * @handle: handle to validate | ||
494 | * | ||
495 | * Validates the handle and return the found session index or -EINVAL | ||
496 | * we we don't have another free session index. | ||
497 | */ | ||
498 | int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) | ||
499 | { | ||
500 | unsigned i; | ||
501 | |||
502 | /* validate the handle */ | ||
503 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
504 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) | ||
505 | return i; | ||
506 | } | ||
507 | |||
508 | /* handle not found try to alloc a new one */ | ||
509 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
510 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | ||
511 | p->rdev->vce.filp[i] = p->filp; | ||
512 | p->rdev->vce.img_size[i] = 0; | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | DRM_ERROR("No more free VCE handles!\n"); | ||
518 | return -EINVAL; | ||
519 | } | ||
520 | |||
521 | /** | ||
474 | * radeon_vce_cs_parse - parse and validate the command stream | 522 | * radeon_vce_cs_parse - parse and validate the command stream |
475 | * | 523 | * |
476 | * @p: parser context | 524 | * @p: parser context |
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | |||
478 | */ | 526 | */ |
479 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) | 527 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) |
480 | { | 528 | { |
481 | uint32_t handle = 0; | 529 | int session_idx = -1; |
482 | bool destroy = false; | 530 | bool destroyed = false; |
531 | uint32_t tmp, handle = 0; | ||
532 | uint32_t *size = &tmp; | ||
483 | int i, r; | 533 | int i, r; |
484 | 534 | ||
485 | while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { | 535 | while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { |
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
491 | return -EINVAL; | 541 | return -EINVAL; |
492 | } | 542 | } |
493 | 543 | ||
544 | if (destroyed) { | ||
545 | DRM_ERROR("No other command allowed after destroy!\n"); | ||
546 | return -EINVAL; | ||
547 | } | ||
548 | |||
494 | switch (cmd) { | 549 | switch (cmd) { |
495 | case 0x00000001: // session | 550 | case 0x00000001: // session |
496 | handle = radeon_get_ib_value(p, p->idx + 2); | 551 | handle = radeon_get_ib_value(p, p->idx + 2); |
552 | session_idx = radeon_vce_validate_handle(p, handle); | ||
553 | if (session_idx < 0) | ||
554 | return session_idx; | ||
555 | size = &p->rdev->vce.img_size[session_idx]; | ||
497 | break; | 556 | break; |
498 | 557 | ||
499 | case 0x00000002: // task info | 558 | case 0x00000002: // task info |
559 | break; | ||
560 | |||
500 | case 0x01000001: // create | 561 | case 0x01000001: // create |
562 | *size = radeon_get_ib_value(p, p->idx + 8) * | ||
563 | radeon_get_ib_value(p, p->idx + 10) * | ||
564 | 8 * 3 / 2; | ||
565 | break; | ||
566 | |||
501 | case 0x04000001: // config extension | 567 | case 0x04000001: // config extension |
502 | case 0x04000002: // pic control | 568 | case 0x04000002: // pic control |
503 | case 0x04000005: // rate control | 569 | case 0x04000005: // rate control |
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
506 | break; | 572 | break; |
507 | 573 | ||
508 | case 0x03000001: // encode | 574 | case 0x03000001: // encode |
509 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); | 575 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, |
576 | *size); | ||
510 | if (r) | 577 | if (r) |
511 | return r; | 578 | return r; |
512 | 579 | ||
513 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); | 580 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, |
581 | *size / 3); | ||
514 | if (r) | 582 | if (r) |
515 | return r; | 583 | return r; |
516 | break; | 584 | break; |
517 | 585 | ||
518 | case 0x02000001: // destroy | 586 | case 0x02000001: // destroy |
519 | destroy = true; | 587 | destroyed = true; |
520 | break; | 588 | break; |
521 | 589 | ||
522 | case 0x05000001: // context buffer | 590 | case 0x05000001: // context buffer |
591 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | ||
592 | *size * 2); | ||
593 | if (r) | ||
594 | return r; | ||
595 | break; | ||
596 | |||
523 | case 0x05000004: // video bitstream buffer | 597 | case 0x05000004: // video bitstream buffer |
598 | tmp = radeon_get_ib_value(p, p->idx + 4); | ||
599 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | ||
600 | tmp); | ||
601 | if (r) | ||
602 | return r; | ||
603 | break; | ||
604 | |||
524 | case 0x05000005: // feedback buffer | 605 | case 0x05000005: // feedback buffer |
525 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); | 606 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
607 | 4096); | ||
526 | if (r) | 608 | if (r) |
527 | return r; | 609 | return r; |
528 | break; | 610 | break; |
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
532 | return -EINVAL; | 614 | return -EINVAL; |
533 | } | 615 | } |
534 | 616 | ||
617 | if (session_idx == -1) { | ||
618 | DRM_ERROR("no session command at start of IB\n"); | ||
619 | return -EINVAL; | ||
620 | } | ||
621 | |||
535 | p->idx += len / 4; | 622 | p->idx += len / 4; |
536 | } | 623 | } |
537 | 624 | ||
538 | if (destroy) { | 625 | if (destroyed) { |
539 | /* IB contains a destroy msg, free the handle */ | 626 | /* IB contains a destroy msg, free the handle */ |
540 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) | 627 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) |
541 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); | 628 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); |
542 | |||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | /* create or encode, validate the handle */ | ||
547 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
548 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) | ||
549 | return 0; | ||
550 | } | 629 | } |
551 | 630 | ||
552 | /* handle not found try to alloc a new one */ | 631 | return 0; |
553 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
554 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | ||
555 | p->rdev->vce.filp[i] = p->filp; | ||
556 | return 0; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | DRM_ERROR("No more free VCE handles!\n"); | ||
561 | return -EINVAL; | ||
562 | } | 632 | } |
563 | 633 | ||
564 | /** | 634 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2aae6ce49d32..1f426696de36 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, | |||
130 | struct list_head *head) | 130 | struct list_head *head) |
131 | { | 131 | { |
132 | struct radeon_cs_reloc *list; | 132 | struct radeon_cs_reloc *list; |
133 | unsigned i, idx, size; | 133 | unsigned i, idx; |
134 | 134 | ||
135 | size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); | 135 | list = kmalloc_array(vm->max_pde_used + 1, |
136 | list = kmalloc(size, GFP_KERNEL); | 136 | sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
137 | if (!list) | 137 | if (!list) |
138 | return NULL; | 138 | return NULL; |
139 | 139 | ||
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
595 | ndw = 64; | 595 | ndw = 64; |
596 | 596 | ||
597 | /* assume the worst case */ | 597 | /* assume the worst case */ |
598 | ndw += vm->max_pde_used * 12; | 598 | ndw += vm->max_pde_used * 16; |
599 | 599 | ||
600 | /* update too big for an IB */ | 600 | /* update too big for an IB */ |
601 | if (ndw > 0xfffff) | 601 | if (ndw > 0xfffff) |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 683532f84931..7321283602ce 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -107,8 +107,8 @@ | |||
107 | #define SPLL_CHG_STATUS (1 << 1) | 107 | #define SPLL_CHG_STATUS (1 << 1) |
108 | #define SPLL_CNTL_MODE 0x618 | 108 | #define SPLL_CNTL_MODE 0x618 |
109 | #define SPLL_SW_DIR_CONTROL (1 << 0) | 109 | #define SPLL_SW_DIR_CONTROL (1 << 0) |
110 | # define SPLL_REFCLK_SEL(x) ((x) << 8) | 110 | # define SPLL_REFCLK_SEL(x) ((x) << 26) |
111 | # define SPLL_REFCLK_SEL_MASK 0xFF00 | 111 | # define SPLL_REFCLK_SEL_MASK (3 << 26) |
112 | 112 | ||
113 | #define CG_SPLL_SPREAD_SPECTRUM 0x620 | 113 | #define CG_SPLL_SPREAD_SPECTRUM 0x620 |
114 | #define SSEN (1 << 0) | 114 | #define SSEN (1 << 0) |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index bc196f49ec53..4af0da96c2e2 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427 | |||
1053 | 1053 | ||
1054 | config SENSORS_NTC_THERMISTOR | 1054 | config SENSORS_NTC_THERMISTOR |
1055 | tristate "NTC thermistor support" | 1055 | tristate "NTC thermistor support" |
1056 | depends on (!OF && !IIO) || (OF && IIO) | 1056 | depends on !OF || IIO=n || IIO |
1057 | help | 1057 | help |
1058 | This driver supports NTC thermistors sensor reading and its | 1058 | This driver supports NTC thermistors sensor reading and its |
1059 | interpretation. The driver can also monitor the temperature and | 1059 | interpretation. The driver can also monitor the temperature and |
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 90ec1173b8a1..01723f04fe45 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c | |||
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev, | |||
163 | if (retval < 0) | 163 | if (retval < 0) |
164 | goto fail; | 164 | goto fail; |
165 | 165 | ||
166 | hyst = val - retval * 1000; | 166 | hyst = retval * 1000 - val; |
167 | hyst = DIV_ROUND_CLOSEST(hyst, 1000); | 167 | hyst = DIV_ROUND_CLOSEST(hyst, 1000); |
168 | if (hyst < 0 || hyst > 255) { | 168 | if (hyst < 0 || hyst > 255) { |
169 | retval = -ERANGE; | 169 | retval = -ERANGE; |
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client, | |||
330 | } | 330 | } |
331 | 331 | ||
332 | id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); | 332 | id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); |
333 | if (id != 0x01) | 333 | if (id < 0x01 || id > 0x04) |
334 | return -ENODEV; | 334 | return -ENODEV; |
335 | 335 | ||
336 | return 0; | 336 | return 0; |
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client, | |||
355 | if (id->driver_data) | 355 | if (id->driver_data) |
356 | data->groups[1] = &emc1404_group; | 356 | data->groups[1] = &emc1404_group; |
357 | 357 | ||
358 | hwmon_dev = hwmon_device_register_with_groups(&client->dev, | 358 | hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, |
359 | client->name, data, | 359 | client->name, data, |
360 | data->groups); | 360 | data->groups); |
361 | if (IS_ERR(hwmon_dev)) | 361 | if (IS_ERR(hwmon_dev)) |
362 | return PTR_ERR(hwmon_dev); | 362 | return PTR_ERR(hwmon_dev); |
363 | 363 | ||
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8a17f01e8672..e76feb86a1d4 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
@@ -44,6 +44,7 @@ struct ntc_compensation { | |||
44 | unsigned int ohm; | 44 | unsigned int ohm; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* Order matters, ntc_match references the entries by index */ | ||
47 | static const struct platform_device_id ntc_thermistor_id[] = { | 48 | static const struct platform_device_id ntc_thermistor_id[] = { |
48 | { "ncp15wb473", TYPE_NCPXXWB473 }, | 49 | { "ncp15wb473", TYPE_NCPXXWB473 }, |
49 | { "ncp18wb473", TYPE_NCPXXWB473 }, | 50 | { "ncp18wb473", TYPE_NCPXXWB473 }, |
@@ -141,7 +142,7 @@ struct ntc_data { | |||
141 | char name[PLATFORM_NAME_SIZE]; | 142 | char name[PLATFORM_NAME_SIZE]; |
142 | }; | 143 | }; |
143 | 144 | ||
144 | #ifdef CONFIG_OF | 145 | #if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO) |
145 | static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) | 146 | static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) |
146 | { | 147 | { |
147 | struct iio_channel *channel = pdata->chan; | 148 | struct iio_channel *channel = pdata->chan; |
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) | |||
163 | 164 | ||
164 | static const struct of_device_id ntc_match[] = { | 165 | static const struct of_device_id ntc_match[] = { |
165 | { .compatible = "ntc,ncp15wb473", | 166 | { .compatible = "ntc,ncp15wb473", |
166 | .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, | 167 | .data = &ntc_thermistor_id[0] }, |
167 | { .compatible = "ntc,ncp18wb473", | 168 | { .compatible = "ntc,ncp18wb473", |
168 | .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, | 169 | .data = &ntc_thermistor_id[1] }, |
169 | { .compatible = "ntc,ncp21wb473", | 170 | { .compatible = "ntc,ncp21wb473", |
170 | .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, | 171 | .data = &ntc_thermistor_id[2] }, |
171 | { .compatible = "ntc,ncp03wb473", | 172 | { .compatible = "ntc,ncp03wb473", |
172 | .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, | 173 | .data = &ntc_thermistor_id[3] }, |
173 | { .compatible = "ntc,ncp15wl333", | 174 | { .compatible = "ntc,ncp15wl333", |
174 | .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, | 175 | .data = &ntc_thermistor_id[4] }, |
175 | { }, | 176 | { }, |
176 | }; | 177 | }; |
177 | MODULE_DEVICE_TABLE(of, ntc_match); | 178 | MODULE_DEVICE_TABLE(of, ntc_match); |
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) | |||
223 | return NULL; | 224 | return NULL; |
224 | } | 225 | } |
225 | 226 | ||
227 | #define ntc_match NULL | ||
228 | |||
226 | static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) | 229 | static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) |
227 | { } | 230 | { } |
228 | #endif | 231 | #endif |
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 22e92c3d3d07..3c20e4bd6dd1 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c | |||
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
422 | */ | 422 | */ |
423 | dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); | 423 | dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); |
424 | 424 | ||
425 | /* enforce disabled interrupts (due to HW issues) */ | ||
426 | i2c_dw_disable_int(dev); | ||
427 | |||
425 | /* Enable the adapter */ | 428 | /* Enable the adapter */ |
426 | __i2c_dw_enable(dev, true); | 429 | __i2c_dw_enable(dev, true); |
427 | 430 | ||
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 28cbe1b2a2ec..32c85e9ecdae 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c | |||
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) | |||
999 | 999 | ||
1000 | dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, | 1000 | dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, |
1001 | resource_size(&adev->res)); | 1001 | resource_size(&adev->res)); |
1002 | if (IS_ERR(dev->virtbase)) { | 1002 | if (!dev->virtbase) { |
1003 | ret = -ENOMEM; | 1003 | ret = -ENOMEM; |
1004 | goto err_no_mem; | 1004 | goto err_no_mem; |
1005 | } | 1005 | } |
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 1b4cf14f1106..2a5efb5b487c 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c | |||
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap, | |||
479 | int ret, idx; | 479 | int ret, idx; |
480 | 480 | ||
481 | ret = pm_runtime_get_sync(qup->dev); | 481 | ret = pm_runtime_get_sync(qup->dev); |
482 | if (ret) | 482 | if (ret < 0) |
483 | goto out; | 483 | goto out; |
484 | 484 | ||
485 | writel(1, qup->base + QUP_SW_RESET); | 485 | writel(1, qup->base + QUP_SW_RESET); |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d4fa8eba6e9d..06d47aafbb79 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
561 | 561 | ||
562 | ret = -EINVAL; | 562 | ret = -EINVAL; |
563 | for (i = 0; i < num; i++) { | 563 | for (i = 0; i < num; i++) { |
564 | /* This HW can't send STOP after address phase */ | ||
565 | if (msgs[i].len == 0) { | ||
566 | ret = -EOPNOTSUPP; | ||
567 | break; | ||
568 | } | ||
569 | |||
564 | /*-------------- spin lock -----------------*/ | 570 | /*-------------- spin lock -----------------*/ |
565 | spin_lock_irqsave(&priv->lock, flags); | 571 | spin_lock_irqsave(&priv->lock, flags); |
566 | 572 | ||
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
625 | 631 | ||
626 | static u32 rcar_i2c_func(struct i2c_adapter *adap) | 632 | static u32 rcar_i2c_func(struct i2c_adapter *adap) |
627 | { | 633 | { |
628 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | 634 | /* This HW can't do SMBUS_QUICK and NOSTART */ |
635 | return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); | ||
629 | } | 636 | } |
630 | 637 | ||
631 | static const struct i2c_algorithm rcar_i2c_algo = { | 638 | static const struct i2c_algorithm rcar_i2c_algo = { |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index ae4491062e41..bb3a9964f7e0 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev) | |||
1276 | struct platform_device *pdev = to_platform_device(dev); | 1276 | struct platform_device *pdev = to_platform_device(dev); |
1277 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); | 1277 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); |
1278 | 1278 | ||
1279 | i2c->suspended = 0; | ||
1280 | clk_prepare_enable(i2c->clk); | 1279 | clk_prepare_enable(i2c->clk); |
1281 | s3c24xx_i2c_init(i2c); | 1280 | s3c24xx_i2c_init(i2c); |
1282 | clk_disable_unprepare(i2c->clk); | 1281 | clk_disable_unprepare(i2c->clk); |
1282 | i2c->suspended = 0; | ||
1283 | 1283 | ||
1284 | return 0; | 1284 | return 0; |
1285 | } | 1285 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b6dbe156a37..199c7896f081 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -48,6 +48,7 @@ | |||
48 | 48 | ||
49 | #include <linux/mlx4/driver.h> | 49 | #include <linux/mlx4/driver.h> |
50 | #include <linux/mlx4/cmd.h> | 50 | #include <linux/mlx4/cmd.h> |
51 | #include <linux/mlx4/qp.h> | ||
51 | 52 | ||
52 | #include "mlx4_ib.h" | 53 | #include "mlx4_ib.h" |
53 | #include "user.h" | 54 | #include "user.h" |
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, | |||
1614 | } | 1615 | } |
1615 | #endif | 1616 | #endif |
1616 | 1617 | ||
1618 | #define MLX4_IB_INVALID_MAC ((u64)-1) | ||
1619 | static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, | ||
1620 | struct net_device *dev, | ||
1621 | int port) | ||
1622 | { | ||
1623 | u64 new_smac = 0; | ||
1624 | u64 release_mac = MLX4_IB_INVALID_MAC; | ||
1625 | struct mlx4_ib_qp *qp; | ||
1626 | |||
1627 | read_lock(&dev_base_lock); | ||
1628 | new_smac = mlx4_mac_to_u64(dev->dev_addr); | ||
1629 | read_unlock(&dev_base_lock); | ||
1630 | |||
1631 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1632 | qp = ibdev->qp1_proxy[port - 1]; | ||
1633 | if (qp) { | ||
1634 | int new_smac_index; | ||
1635 | u64 old_smac = qp->pri.smac; | ||
1636 | struct mlx4_update_qp_params update_params; | ||
1637 | |||
1638 | if (new_smac == old_smac) | ||
1639 | goto unlock; | ||
1640 | |||
1641 | new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); | ||
1642 | |||
1643 | if (new_smac_index < 0) | ||
1644 | goto unlock; | ||
1645 | |||
1646 | update_params.smac_index = new_smac_index; | ||
1647 | if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, | ||
1648 | &update_params)) { | ||
1649 | release_mac = new_smac; | ||
1650 | goto unlock; | ||
1651 | } | ||
1652 | |||
1653 | qp->pri.smac = new_smac; | ||
1654 | qp->pri.smac_index = new_smac_index; | ||
1655 | |||
1656 | release_mac = old_smac; | ||
1657 | } | ||
1658 | |||
1659 | unlock: | ||
1660 | mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); | ||
1661 | if (release_mac != MLX4_IB_INVALID_MAC) | ||
1662 | mlx4_unregister_mac(ibdev->dev, port, release_mac); | ||
1663 | } | ||
1664 | |||
1617 | static void mlx4_ib_get_dev_addr(struct net_device *dev, | 1665 | static void mlx4_ib_get_dev_addr(struct net_device *dev, |
1618 | struct mlx4_ib_dev *ibdev, u8 port) | 1666 | struct mlx4_ib_dev *ibdev, u8 port) |
1619 | { | 1667 | { |
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1689 | return 0; | 1737 | return 0; |
1690 | } | 1738 | } |
1691 | 1739 | ||
1692 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | 1740 | static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, |
1741 | struct net_device *dev, | ||
1742 | unsigned long event) | ||
1743 | |||
1693 | { | 1744 | { |
1694 | struct mlx4_ib_iboe *iboe; | 1745 | struct mlx4_ib_iboe *iboe; |
1746 | int update_qps_port = -1; | ||
1695 | int port; | 1747 | int port; |
1696 | 1748 | ||
1697 | iboe = &ibdev->iboe; | 1749 | iboe = &ibdev->iboe; |
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1719 | } | 1771 | } |
1720 | curr_master = iboe->masters[port - 1]; | 1772 | curr_master = iboe->masters[port - 1]; |
1721 | 1773 | ||
1774 | if (dev == iboe->netdevs[port - 1] && | ||
1775 | (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || | ||
1776 | event == NETDEV_UP || event == NETDEV_CHANGE)) | ||
1777 | update_qps_port = port; | ||
1778 | |||
1722 | if (curr_netdev) { | 1779 | if (curr_netdev) { |
1723 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | 1780 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? |
1724 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 1781 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1752 | } | 1809 | } |
1753 | 1810 | ||
1754 | spin_unlock(&iboe->lock); | 1811 | spin_unlock(&iboe->lock); |
1812 | |||
1813 | if (update_qps_port > 0) | ||
1814 | mlx4_ib_update_qps(ibdev, dev, update_qps_port); | ||
1755 | } | 1815 | } |
1756 | 1816 | ||
1757 | static int mlx4_ib_netdev_event(struct notifier_block *this, | 1817 | static int mlx4_ib_netdev_event(struct notifier_block *this, |
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, | |||
1764 | return NOTIFY_DONE; | 1824 | return NOTIFY_DONE; |
1765 | 1825 | ||
1766 | ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); | 1826 | ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); |
1767 | mlx4_ib_scan_netdevs(ibdev); | 1827 | mlx4_ib_scan_netdevs(ibdev, dev, event); |
1768 | 1828 | ||
1769 | return NOTIFY_DONE; | 1829 | return NOTIFY_DONE; |
1770 | } | 1830 | } |
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2043 | goto err_map; | 2103 | goto err_map; |
2044 | 2104 | ||
2045 | for (i = 0; i < ibdev->num_ports; ++i) { | 2105 | for (i = 0; i < ibdev->num_ports; ++i) { |
2106 | mutex_init(&ibdev->qp1_proxy_lock[i]); | ||
2046 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == | 2107 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == |
2047 | IB_LINK_LAYER_ETHERNET) { | 2108 | IB_LINK_LAYER_ETHERNET) { |
2048 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); | 2109 | err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); |
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2126 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | 2187 | for (i = 1 ; i <= ibdev->num_ports ; ++i) |
2127 | reset_gid_table(ibdev, i); | 2188 | reset_gid_table(ibdev, i); |
2128 | rtnl_lock(); | 2189 | rtnl_lock(); |
2129 | mlx4_ib_scan_netdevs(ibdev); | 2190 | mlx4_ib_scan_netdevs(ibdev, NULL, 0); |
2130 | rtnl_unlock(); | 2191 | rtnl_unlock(); |
2131 | mlx4_ib_init_gid_table(ibdev); | 2192 | mlx4_ib_init_gid_table(ibdev); |
2132 | } | 2193 | } |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f589522fddfd..66b0b7dbd9f4 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -522,6 +522,9 @@ struct mlx4_ib_dev { | |||
522 | int steer_qpn_count; | 522 | int steer_qpn_count; |
523 | int steer_qpn_base; | 523 | int steer_qpn_base; |
524 | int steering_support; | 524 | int steering_support; |
525 | struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; | ||
526 | /* lock when destroying qp1_proxy and getting netdev events */ | ||
527 | struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; | ||
525 | }; | 528 | }; |
526 | 529 | ||
527 | struct ib_event_work { | 530 | struct ib_event_work { |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 41308af4163c..dc57482ae7af 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) | |||
1132 | if (is_qp0(dev, mqp)) | 1132 | if (is_qp0(dev, mqp)) |
1133 | mlx4_CLOSE_PORT(dev->dev, mqp->port); | 1133 | mlx4_CLOSE_PORT(dev->dev, mqp->port); |
1134 | 1134 | ||
1135 | if (dev->qp1_proxy[mqp->port - 1] == mqp) { | ||
1136 | mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); | ||
1137 | dev->qp1_proxy[mqp->port - 1] = NULL; | ||
1138 | mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); | ||
1139 | } | ||
1140 | |||
1135 | pd = get_pd(mqp); | 1141 | pd = get_pd(mqp); |
1136 | destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); | 1142 | destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); |
1137 | 1143 | ||
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1646 | err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); | 1652 | err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); |
1647 | if (err) | 1653 | if (err) |
1648 | return -EINVAL; | 1654 | return -EINVAL; |
1655 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) | ||
1656 | dev->qp1_proxy[qp->port - 1] = qp; | ||
1649 | } | 1657 | } |
1650 | } | 1658 | } |
1651 | } | 1659 | } |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c98fdb185931..a1710465faaf 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <target/target_core_base.h> | 28 | #include <target/target_core_base.h> |
29 | #include <target/target_core_fabric.h> | 29 | #include <target/target_core_fabric.h> |
30 | #include <target/iscsi/iscsi_transport.h> | 30 | #include <target/iscsi/iscsi_transport.h> |
31 | #include <linux/semaphore.h> | ||
31 | 32 | ||
32 | #include "isert_proto.h" | 33 | #include "isert_proto.h" |
33 | #include "ib_isert.h" | 34 | #include "ib_isert.h" |
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
561 | struct isert_device *device; | 562 | struct isert_device *device; |
562 | struct ib_device *ib_dev = cma_id->device; | 563 | struct ib_device *ib_dev = cma_id->device; |
563 | int ret = 0; | 564 | int ret = 0; |
564 | u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; | 565 | u8 pi_support; |
566 | |||
567 | spin_lock_bh(&np->np_thread_lock); | ||
568 | if (!np->enabled) { | ||
569 | spin_unlock_bh(&np->np_thread_lock); | ||
570 | pr_debug("iscsi_np is not enabled, reject connect request\n"); | ||
571 | return rdma_reject(cma_id, NULL, 0); | ||
572 | } | ||
573 | spin_unlock_bh(&np->np_thread_lock); | ||
565 | 574 | ||
566 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", | 575 | pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", |
567 | cma_id, cma_id->context); | 576 | cma_id, cma_id->context); |
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
652 | goto out_mr; | 661 | goto out_mr; |
653 | } | 662 | } |
654 | 663 | ||
664 | pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; | ||
655 | if (pi_support && !device->pi_capable) { | 665 | if (pi_support && !device->pi_capable) { |
656 | pr_err("Protection information requested but not supported\n"); | 666 | pr_err("Protection information requested but not supported\n"); |
657 | ret = -EINVAL; | 667 | ret = -EINVAL; |
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
663 | goto out_conn_dev; | 673 | goto out_conn_dev; |
664 | 674 | ||
665 | mutex_lock(&isert_np->np_accept_mutex); | 675 | mutex_lock(&isert_np->np_accept_mutex); |
666 | list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); | 676 | list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); |
667 | mutex_unlock(&isert_np->np_accept_mutex); | 677 | mutex_unlock(&isert_np->np_accept_mutex); |
668 | 678 | ||
669 | pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); | 679 | pr_debug("isert_connect_request() up np_sem np: %p\n", np); |
670 | wake_up(&isert_np->np_accept_wq); | 680 | up(&isert_np->np_sem); |
671 | return 0; | 681 | return 0; |
672 | 682 | ||
673 | out_conn_dev: | 683 | out_conn_dev: |
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np, | |||
2999 | pr_err("Unable to allocate struct isert_np\n"); | 3009 | pr_err("Unable to allocate struct isert_np\n"); |
3000 | return -ENOMEM; | 3010 | return -ENOMEM; |
3001 | } | 3011 | } |
3002 | init_waitqueue_head(&isert_np->np_accept_wq); | 3012 | sema_init(&isert_np->np_sem, 0); |
3003 | mutex_init(&isert_np->np_accept_mutex); | 3013 | mutex_init(&isert_np->np_accept_mutex); |
3004 | INIT_LIST_HEAD(&isert_np->np_accept_list); | 3014 | INIT_LIST_HEAD(&isert_np->np_accept_list); |
3005 | init_completion(&isert_np->np_login_comp); | 3015 | init_completion(&isert_np->np_login_comp); |
@@ -3048,18 +3058,6 @@ out: | |||
3048 | } | 3058 | } |
3049 | 3059 | ||
3050 | static int | 3060 | static int |
3051 | isert_check_accept_queue(struct isert_np *isert_np) | ||
3052 | { | ||
3053 | int empty; | ||
3054 | |||
3055 | mutex_lock(&isert_np->np_accept_mutex); | ||
3056 | empty = list_empty(&isert_np->np_accept_list); | ||
3057 | mutex_unlock(&isert_np->np_accept_mutex); | ||
3058 | |||
3059 | return empty; | ||
3060 | } | ||
3061 | |||
3062 | static int | ||
3063 | isert_rdma_accept(struct isert_conn *isert_conn) | 3061 | isert_rdma_accept(struct isert_conn *isert_conn) |
3064 | { | 3062 | { |
3065 | struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; | 3063 | struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; |
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) | |||
3151 | int max_accept = 0, ret; | 3149 | int max_accept = 0, ret; |
3152 | 3150 | ||
3153 | accept_wait: | 3151 | accept_wait: |
3154 | ret = wait_event_interruptible(isert_np->np_accept_wq, | 3152 | ret = down_interruptible(&isert_np->np_sem); |
3155 | !isert_check_accept_queue(isert_np) || | ||
3156 | np->np_thread_state == ISCSI_NP_THREAD_RESET); | ||
3157 | if (max_accept > 5) | 3153 | if (max_accept > 5) |
3158 | return -ENODEV; | 3154 | return -ENODEV; |
3159 | 3155 | ||
3160 | spin_lock_bh(&np->np_thread_lock); | 3156 | spin_lock_bh(&np->np_thread_lock); |
3161 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 3157 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { |
3162 | spin_unlock_bh(&np->np_thread_lock); | 3158 | spin_unlock_bh(&np->np_thread_lock); |
3163 | pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); | 3159 | pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); |
3164 | return -ENODEV; | 3160 | return -ENODEV; |
3165 | } | 3161 | } |
3166 | spin_unlock_bh(&np->np_thread_lock); | 3162 | spin_unlock_bh(&np->np_thread_lock); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 4c072ae34c01..da6612e68000 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -182,7 +182,7 @@ struct isert_device { | |||
182 | }; | 182 | }; |
183 | 183 | ||
184 | struct isert_np { | 184 | struct isert_np { |
185 | wait_queue_head_t np_accept_wq; | 185 | struct semaphore np_sem; |
186 | struct rdma_cm_id *np_cm_id; | 186 | struct rdma_cm_id *np_cm_id; |
187 | struct mutex np_accept_mutex; | 187 | struct mutex np_accept_mutex; |
188 | struct list_head np_accept_list; | 188 | struct list_head np_accept_list; |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 76842d7dc2e3..ffc7ad3a2c88 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD | |||
71 | default y | 71 | default y |
72 | select SERIO | 72 | select SERIO |
73 | select SERIO_LIBPS2 | 73 | select SERIO_LIBPS2 |
74 | select SERIO_I8042 if X86 | 74 | select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO |
75 | select SERIO_GSCPS2 if GSC | 75 | select SERIO_GSCPS2 if GSC |
76 | help | 76 | help |
77 | Say Y here if you want to use a standard AT or PS/2 keyboard. Usually | 77 | Say Y here if you want to use a standard AT or PS/2 keyboard. Usually |
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index d8241ba0afa0..a15063bea700 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c | |||
@@ -111,6 +111,8 @@ struct pxa27x_keypad { | |||
111 | unsigned short keycodes[MAX_KEYPAD_KEYS]; | 111 | unsigned short keycodes[MAX_KEYPAD_KEYS]; |
112 | int rotary_rel_code[2]; | 112 | int rotary_rel_code[2]; |
113 | 113 | ||
114 | unsigned int row_shift; | ||
115 | |||
114 | /* state row bits of each column scan */ | 116 | /* state row bits of each column scan */ |
115 | uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; | 117 | uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; |
116 | uint32_t direct_key_state; | 118 | uint32_t direct_key_state; |
@@ -467,7 +469,8 @@ scan: | |||
467 | if ((bits_changed & (1 << row)) == 0) | 469 | if ((bits_changed & (1 << row)) == 0) |
468 | continue; | 470 | continue; |
469 | 471 | ||
470 | code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); | 472 | code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); |
473 | |||
471 | input_event(input_dev, EV_MSC, MSC_SCAN, code); | 474 | input_event(input_dev, EV_MSC, MSC_SCAN, code); |
472 | input_report_key(input_dev, keypad->keycodes[code], | 475 | input_report_key(input_dev, keypad->keycodes[code], |
473 | new_state[col] & (1 << row)); | 476 | new_state[col] & (1 << row)); |
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev) | |||
802 | goto failed_put_clk; | 805 | goto failed_put_clk; |
803 | } | 806 | } |
804 | 807 | ||
808 | keypad->row_shift = get_count_order(pdata->matrix_key_cols); | ||
809 | |||
805 | if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || | 810 | if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || |
806 | (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { | 811 | (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { |
807 | input_dev->evbit[0] |= BIT_MASK(EV_REL); | 812 | input_dev->evbit[0] |= BIT_MASK(EV_REL); |
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index effa9c5f2c5c..6b8441f7bc32 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig | |||
@@ -17,7 +17,7 @@ config MOUSE_PS2 | |||
17 | default y | 17 | default y |
18 | select SERIO | 18 | select SERIO |
19 | select SERIO_LIBPS2 | 19 | select SERIO_LIBPS2 |
20 | select SERIO_I8042 if X86 | 20 | select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO |
21 | select SERIO_GSCPS2 if GSC | 21 | select SERIO_GSCPS2 if GSC |
22 | help | 22 | help |
23 | Say Y here if you have a PS/2 mouse connected to your system. This | 23 | Say Y here if you have a PS/2 mouse connected to your system. This |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index d68d33fb5ac2..c5ec703c727e 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse) | |||
117 | } | 117 | } |
118 | 118 | ||
119 | #ifdef CONFIG_MOUSE_PS2_SYNAPTICS | 119 | #ifdef CONFIG_MOUSE_PS2_SYNAPTICS |
120 | struct min_max_quirk { | ||
121 | const char * const *pnp_ids; | ||
122 | int x_min, x_max, y_min, y_max; | ||
123 | }; | ||
124 | |||
125 | static const struct min_max_quirk min_max_pnpid_table[] = { | ||
126 | { | ||
127 | (const char * const []){"LEN0033", NULL}, | ||
128 | 1024, 5052, 2258, 4832 | ||
129 | }, | ||
130 | { | ||
131 | (const char * const []){"LEN0035", "LEN0042", NULL}, | ||
132 | 1232, 5710, 1156, 4696 | ||
133 | }, | ||
134 | { | ||
135 | (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, | ||
136 | 1024, 5112, 2024, 4832 | ||
137 | }, | ||
138 | { | ||
139 | (const char * const []){"LEN2001", NULL}, | ||
140 | 1024, 5022, 2508, 4832 | ||
141 | }, | ||
142 | { } | ||
143 | }; | ||
144 | |||
120 | /* This list has been kindly provided by Synaptics. */ | 145 | /* This list has been kindly provided by Synaptics. */ |
121 | static const char * const topbuttonpad_pnp_ids[] = { | 146 | static const char * const topbuttonpad_pnp_ids[] = { |
122 | "LEN0017", | 147 | "LEN0017", |
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
129 | "LEN002D", | 154 | "LEN002D", |
130 | "LEN002E", | 155 | "LEN002E", |
131 | "LEN0033", /* Helix */ | 156 | "LEN0033", /* Helix */ |
132 | "LEN0034", /* T431s, T540, X1 Carbon 2nd */ | 157 | "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ |
133 | "LEN0035", /* X240 */ | 158 | "LEN0035", /* X240 */ |
134 | "LEN0036", /* T440 */ | 159 | "LEN0036", /* T440 */ |
135 | "LEN0037", | 160 | "LEN0037", |
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
142 | "LEN0048", | 167 | "LEN0048", |
143 | "LEN0049", | 168 | "LEN0049", |
144 | "LEN2000", | 169 | "LEN2000", |
145 | "LEN2001", | 170 | "LEN2001", /* Edge E431 */ |
146 | "LEN2002", | 171 | "LEN2002", |
147 | "LEN2003", | 172 | "LEN2003", |
148 | "LEN2004", /* L440 */ | 173 | "LEN2004", /* L440 */ |
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
156 | NULL | 181 | NULL |
157 | }; | 182 | }; |
158 | 183 | ||
184 | static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[]) | ||
185 | { | ||
186 | int i; | ||
187 | |||
188 | if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) | ||
189 | for (i = 0; ids[i]; i++) | ||
190 | if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i])) | ||
191 | return true; | ||
192 | |||
193 | return false; | ||
194 | } | ||
195 | |||
159 | /***************************************************************************** | 196 | /***************************************************************************** |
160 | * Synaptics communications functions | 197 | * Synaptics communications functions |
161 | ****************************************************************************/ | 198 | ****************************************************************************/ |
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse) | |||
304 | * Resolution is left zero if touchpad does not support the query | 341 | * Resolution is left zero if touchpad does not support the query |
305 | */ | 342 | */ |
306 | 343 | ||
307 | static const int *quirk_min_max; | ||
308 | |||
309 | static int synaptics_resolution(struct psmouse *psmouse) | 344 | static int synaptics_resolution(struct psmouse *psmouse) |
310 | { | 345 | { |
311 | struct synaptics_data *priv = psmouse->private; | 346 | struct synaptics_data *priv = psmouse->private; |
312 | unsigned char resp[3]; | 347 | unsigned char resp[3]; |
348 | int i; | ||
313 | 349 | ||
314 | if (quirk_min_max) { | 350 | for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) |
315 | priv->x_min = quirk_min_max[0]; | 351 | if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) { |
316 | priv->x_max = quirk_min_max[1]; | 352 | priv->x_min = min_max_pnpid_table[i].x_min; |
317 | priv->y_min = quirk_min_max[2]; | 353 | priv->x_max = min_max_pnpid_table[i].x_max; |
318 | priv->y_max = quirk_min_max[3]; | 354 | priv->y_min = min_max_pnpid_table[i].y_min; |
319 | return 0; | 355 | priv->y_max = min_max_pnpid_table[i].y_max; |
320 | } | 356 | return 0; |
357 | } | ||
321 | 358 | ||
322 | if (SYN_ID_MAJOR(priv->identity) < 4) | 359 | if (SYN_ID_MAJOR(priv->identity) < 4) |
323 | return 0; | 360 | return 0; |
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse, | |||
1365 | 1402 | ||
1366 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { | 1403 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { |
1367 | __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); | 1404 | __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); |
1368 | /* See if this buttonpad has a top button area */ | 1405 | if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) |
1369 | if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { | 1406 | __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); |
1370 | for (i = 0; topbuttonpad_pnp_ids[i]; i++) { | ||
1371 | if (strstr(psmouse->ps2dev.serio->firmware_id, | ||
1372 | topbuttonpad_pnp_ids[i])) { | ||
1373 | __set_bit(INPUT_PROP_TOPBUTTONPAD, | ||
1374 | dev->propbit); | ||
1375 | break; | ||
1376 | } | ||
1377 | } | ||
1378 | } | ||
1379 | /* Clickpads report only left button */ | 1407 | /* Clickpads report only left button */ |
1380 | __clear_bit(BTN_RIGHT, dev->keybit); | 1408 | __clear_bit(BTN_RIGHT, dev->keybit); |
1381 | __clear_bit(BTN_MIDDLE, dev->keybit); | 1409 | __clear_bit(BTN_MIDDLE, dev->keybit); |
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = { | |||
1547 | { } | 1575 | { } |
1548 | }; | 1576 | }; |
1549 | 1577 | ||
1550 | static const struct dmi_system_id min_max_dmi_table[] __initconst = { | ||
1551 | #if defined(CONFIG_DMI) | ||
1552 | { | ||
1553 | /* Lenovo ThinkPad Helix */ | ||
1554 | .matches = { | ||
1555 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1556 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), | ||
1557 | }, | ||
1558 | .driver_data = (int []){1024, 5052, 2258, 4832}, | ||
1559 | }, | ||
1560 | { | ||
1561 | /* Lenovo ThinkPad X240 */ | ||
1562 | .matches = { | ||
1563 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1564 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"), | ||
1565 | }, | ||
1566 | .driver_data = (int []){1232, 5710, 1156, 4696}, | ||
1567 | }, | ||
1568 | { | ||
1569 | /* Lenovo ThinkPad Edge E431 */ | ||
1570 | .matches = { | ||
1571 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1572 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"), | ||
1573 | }, | ||
1574 | .driver_data = (int []){1024, 5022, 2508, 4832}, | ||
1575 | }, | ||
1576 | { | ||
1577 | /* Lenovo ThinkPad T431s */ | ||
1578 | .matches = { | ||
1579 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1580 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"), | ||
1581 | }, | ||
1582 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
1583 | }, | ||
1584 | { | ||
1585 | /* Lenovo ThinkPad T440s */ | ||
1586 | .matches = { | ||
1587 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1588 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"), | ||
1589 | }, | ||
1590 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
1591 | }, | ||
1592 | { | ||
1593 | /* Lenovo ThinkPad L440 */ | ||
1594 | .matches = { | ||
1595 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1596 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"), | ||
1597 | }, | ||
1598 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
1599 | }, | ||
1600 | { | ||
1601 | /* Lenovo ThinkPad T540p */ | ||
1602 | .matches = { | ||
1603 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1604 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"), | ||
1605 | }, | ||
1606 | .driver_data = (int []){1024, 5056, 2058, 4832}, | ||
1607 | }, | ||
1608 | { | ||
1609 | /* Lenovo ThinkPad L540 */ | ||
1610 | .matches = { | ||
1611 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1612 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"), | ||
1613 | }, | ||
1614 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
1615 | }, | ||
1616 | { | ||
1617 | /* Lenovo Yoga S1 */ | ||
1618 | .matches = { | ||
1619 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1620 | DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, | ||
1621 | "ThinkPad S1 Yoga"), | ||
1622 | }, | ||
1623 | .driver_data = (int []){1232, 5710, 1156, 4696}, | ||
1624 | }, | ||
1625 | { | ||
1626 | /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */ | ||
1627 | .matches = { | ||
1628 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1629 | DMI_MATCH(DMI_PRODUCT_VERSION, | ||
1630 | "ThinkPad X1 Carbon 2nd"), | ||
1631 | }, | ||
1632 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
1633 | }, | ||
1634 | #endif | ||
1635 | { } | ||
1636 | }; | ||
1637 | |||
1638 | void __init synaptics_module_init(void) | 1578 | void __init synaptics_module_init(void) |
1639 | { | 1579 | { |
1640 | const struct dmi_system_id *min_max_dmi; | ||
1641 | |||
1642 | impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); | 1580 | impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); |
1643 | broken_olpc_ec = dmi_check_system(olpc_dmi_table); | 1581 | broken_olpc_ec = dmi_check_system(olpc_dmi_table); |
1644 | |||
1645 | min_max_dmi = dmi_first_match(min_max_dmi_table); | ||
1646 | if (min_max_dmi) | ||
1647 | quirk_min_max = min_max_dmi->driver_data; | ||
1648 | } | 1582 | } |
1649 | 1583 | ||
1650 | static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) | 1584 | static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) |
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index 762b08432de0..8b748d99b934 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c | |||
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io) | |||
79 | writeb(divisor, KMICLKDIV); | 79 | writeb(divisor, KMICLKDIV); |
80 | writeb(KMICR_EN, KMICR); | 80 | writeb(KMICR_EN, KMICR); |
81 | 81 | ||
82 | ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); | 82 | ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050", |
83 | kmi); | ||
83 | if (ret) { | 84 | if (ret) { |
84 | printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); | 85 | printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); |
85 | writeb(0, KMICR); | 86 | writeb(0, KMICR); |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 68edc9db2c64..b845e9370871 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713 | |||
640 | 640 | ||
641 | config TOUCHSCREEN_WM97XX_ATMEL | 641 | config TOUCHSCREEN_WM97XX_ATMEL |
642 | tristate "WM97xx Atmel accelerated touch" | 642 | tristate "WM97xx Atmel accelerated touch" |
643 | depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) | 643 | depends on TOUCHSCREEN_WM97XX && AVR32 |
644 | help | 644 | help |
645 | Say Y here for support for streaming mode with WM97xx touchscreens | 645 | Say Y here for support for streaming mode with WM97xx touchscreens |
646 | on Atmel AT91 or AVR32 systems with an AC97C module. | 646 | on Atmel AT91 or AVR32 systems with an AC97C module. |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c949520bd196..57068e8035b5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | |||
3999 | iommu_flush_dte(iommu, devid); | 3999 | iommu_flush_dte(iommu, devid); |
4000 | if (devid != alias) { | 4000 | if (devid != alias) { |
4001 | irq_lookup_table[alias] = table; | 4001 | irq_lookup_table[alias] = table; |
4002 | set_dte_irq_entry(devid, table); | 4002 | set_dte_irq_entry(alias, table); |
4003 | iommu_flush_dte(iommu, alias); | 4003 | iommu_flush_dte(iommu, alias); |
4004 | } | 4004 | } |
4005 | 4005 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b76c58dbe30c..0e08545d7298 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | |||
788 | * per device. But we can enable the exclusion range per | 788 | * per device. But we can enable the exclusion range per |
789 | * device. This is done here | 789 | * device. This is done here |
790 | */ | 790 | */ |
791 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); | 791 | set_dev_entry_bit(devid, DEV_ENTRY_EX); |
792 | iommu->exclusion_start = m->range_start; | 792 | iommu->exclusion_start = m->range_start; |
793 | iommu->exclusion_length = m->range_length; | 793 | iommu->exclusion_length = m->range_length; |
794 | } | 794 | } |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 5208828792e6..203b2e6a91cf 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work) | |||
504 | 504 | ||
505 | write = !!(fault->flags & PPR_FAULT_WRITE); | 505 | write = !!(fault->flags & PPR_FAULT_WRITE); |
506 | 506 | ||
507 | down_read(&fault->state->mm->mmap_sem); | ||
507 | npages = get_user_pages(fault->state->task, fault->state->mm, | 508 | npages = get_user_pages(fault->state->task, fault->state->mm, |
508 | fault->address, 1, write, 0, &page, NULL); | 509 | fault->address, 1, write, 0, &page, NULL); |
510 | up_read(&fault->state->mm->mmap_sem); | ||
509 | 511 | ||
510 | if (npages == 1) { | 512 | if (npages == 1) { |
511 | put_page(page); | 513 | put_page(page); |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9380be7b1895..5f054c44b485 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2178 | ti->num_discard_bios = 1; | 2178 | ti->num_discard_bios = 1; |
2179 | ti->discards_supported = true; | 2179 | ti->discards_supported = true; |
2180 | ti->discard_zeroes_data_unsupported = true; | 2180 | ti->discard_zeroes_data_unsupported = true; |
2181 | /* Discard bios must be split on a block boundary */ | ||
2182 | ti->split_discard_bios = true; | ||
2181 | 2183 | ||
2182 | cache->features = ca->features; | 2184 | cache->features = ca->features; |
2183 | ti->per_bio_data_size = get_per_bio_data_size(cache); | 2185 | ti->per_bio_data_size = get_per_bio_data_size(cache); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 784695d22fde..53b213226c01 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
20 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
21 | #include <linux/backing-dev.h> | 21 | #include <linux/backing-dev.h> |
22 | #include <linux/percpu.h> | ||
23 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
24 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
25 | #include <asm/page.h> | 24 | #include <asm/page.h> |
@@ -43,6 +42,7 @@ struct convert_context { | |||
43 | struct bvec_iter iter_out; | 42 | struct bvec_iter iter_out; |
44 | sector_t cc_sector; | 43 | sector_t cc_sector; |
45 | atomic_t cc_pending; | 44 | atomic_t cc_pending; |
45 | struct ablkcipher_request *req; | ||
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* | 48 | /* |
@@ -111,15 +111,7 @@ struct iv_tcw_private { | |||
111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; | 111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Duplicated per-CPU state for cipher. | 114 | * The fields in here must be read only after initialization. |
115 | */ | ||
116 | struct crypt_cpu { | ||
117 | struct ablkcipher_request *req; | ||
118 | }; | ||
119 | |||
120 | /* | ||
121 | * The fields in here must be read only after initialization, | ||
122 | * changing state should be in crypt_cpu. | ||
123 | */ | 115 | */ |
124 | struct crypt_config { | 116 | struct crypt_config { |
125 | struct dm_dev *dev; | 117 | struct dm_dev *dev; |
@@ -150,12 +142,6 @@ struct crypt_config { | |||
150 | sector_t iv_offset; | 142 | sector_t iv_offset; |
151 | unsigned int iv_size; | 143 | unsigned int iv_size; |
152 | 144 | ||
153 | /* | ||
154 | * Duplicated per cpu state. Access through | ||
155 | * per_cpu_ptr() only. | ||
156 | */ | ||
157 | struct crypt_cpu __percpu *cpu; | ||
158 | |||
159 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | 145 | /* ESSIV: struct crypto_cipher *essiv_tfm */ |
160 | void *iv_private; | 146 | void *iv_private; |
161 | struct crypto_ablkcipher **tfms; | 147 | struct crypto_ablkcipher **tfms; |
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *); | |||
192 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | 178 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
193 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); | 179 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); |
194 | 180 | ||
195 | static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) | ||
196 | { | ||
197 | return this_cpu_ptr(cc->cpu); | ||
198 | } | ||
199 | |||
200 | /* | 181 | /* |
201 | * Use this to access cipher attributes that are the same for each CPU. | 182 | * Use this to access cipher attributes that are the same for each CPU. |
202 | */ | 183 | */ |
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
903 | static void crypt_alloc_req(struct crypt_config *cc, | 884 | static void crypt_alloc_req(struct crypt_config *cc, |
904 | struct convert_context *ctx) | 885 | struct convert_context *ctx) |
905 | { | 886 | { |
906 | struct crypt_cpu *this_cc = this_crypt_config(cc); | ||
907 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); | 887 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
908 | 888 | ||
909 | if (!this_cc->req) | 889 | if (!ctx->req) |
910 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 890 | ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
911 | 891 | ||
912 | ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); | 892 | ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); |
913 | ablkcipher_request_set_callback(this_cc->req, | 893 | ablkcipher_request_set_callback(ctx->req, |
914 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 894 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
915 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); | 895 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); |
916 | } | 896 | } |
917 | 897 | ||
918 | /* | 898 | /* |
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
921 | static int crypt_convert(struct crypt_config *cc, | 901 | static int crypt_convert(struct crypt_config *cc, |
922 | struct convert_context *ctx) | 902 | struct convert_context *ctx) |
923 | { | 903 | { |
924 | struct crypt_cpu *this_cc = this_crypt_config(cc); | ||
925 | int r; | 904 | int r; |
926 | 905 | ||
927 | atomic_set(&ctx->cc_pending, 1); | 906 | atomic_set(&ctx->cc_pending, 1); |
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
932 | 911 | ||
933 | atomic_inc(&ctx->cc_pending); | 912 | atomic_inc(&ctx->cc_pending); |
934 | 913 | ||
935 | r = crypt_convert_block(cc, ctx, this_cc->req); | 914 | r = crypt_convert_block(cc, ctx, ctx->req); |
936 | 915 | ||
937 | switch (r) { | 916 | switch (r) { |
938 | /* async */ | 917 | /* async */ |
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
941 | reinit_completion(&ctx->restart); | 920 | reinit_completion(&ctx->restart); |
942 | /* fall through*/ | 921 | /* fall through*/ |
943 | case -EINPROGRESS: | 922 | case -EINPROGRESS: |
944 | this_cc->req = NULL; | 923 | ctx->req = NULL; |
945 | ctx->cc_sector++; | 924 | ctx->cc_sector++; |
946 | continue; | 925 | continue; |
947 | 926 | ||
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, | |||
1040 | io->sector = sector; | 1019 | io->sector = sector; |
1041 | io->error = 0; | 1020 | io->error = 0; |
1042 | io->base_io = NULL; | 1021 | io->base_io = NULL; |
1022 | io->ctx.req = NULL; | ||
1043 | atomic_set(&io->io_pending, 0); | 1023 | atomic_set(&io->io_pending, 0); |
1044 | 1024 | ||
1045 | return io; | 1025 | return io; |
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1065 | if (!atomic_dec_and_test(&io->io_pending)) | 1045 | if (!atomic_dec_and_test(&io->io_pending)) |
1066 | return; | 1046 | return; |
1067 | 1047 | ||
1048 | if (io->ctx.req) | ||
1049 | mempool_free(io->ctx.req, cc->req_pool); | ||
1068 | mempool_free(io, cc->io_pool); | 1050 | mempool_free(io, cc->io_pool); |
1069 | 1051 | ||
1070 | if (likely(!base_io)) | 1052 | if (likely(!base_io)) |
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) | |||
1492 | static void crypt_dtr(struct dm_target *ti) | 1474 | static void crypt_dtr(struct dm_target *ti) |
1493 | { | 1475 | { |
1494 | struct crypt_config *cc = ti->private; | 1476 | struct crypt_config *cc = ti->private; |
1495 | struct crypt_cpu *cpu_cc; | ||
1496 | int cpu; | ||
1497 | 1477 | ||
1498 | ti->private = NULL; | 1478 | ti->private = NULL; |
1499 | 1479 | ||
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti) | |||
1505 | if (cc->crypt_queue) | 1485 | if (cc->crypt_queue) |
1506 | destroy_workqueue(cc->crypt_queue); | 1486 | destroy_workqueue(cc->crypt_queue); |
1507 | 1487 | ||
1508 | if (cc->cpu) | ||
1509 | for_each_possible_cpu(cpu) { | ||
1510 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1511 | if (cpu_cc->req) | ||
1512 | mempool_free(cpu_cc->req, cc->req_pool); | ||
1513 | } | ||
1514 | |||
1515 | crypt_free_tfms(cc); | 1488 | crypt_free_tfms(cc); |
1516 | 1489 | ||
1517 | if (cc->bs) | 1490 | if (cc->bs) |
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) | |||
1530 | if (cc->dev) | 1503 | if (cc->dev) |
1531 | dm_put_device(ti, cc->dev); | 1504 | dm_put_device(ti, cc->dev); |
1532 | 1505 | ||
1533 | if (cc->cpu) | ||
1534 | free_percpu(cc->cpu); | ||
1535 | |||
1536 | kzfree(cc->cipher); | 1506 | kzfree(cc->cipher); |
1537 | kzfree(cc->cipher_string); | 1507 | kzfree(cc->cipher_string); |
1538 | 1508 | ||
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1588 | if (tmp) | 1558 | if (tmp) |
1589 | DMWARN("Ignoring unexpected additional cipher options"); | 1559 | DMWARN("Ignoring unexpected additional cipher options"); |
1590 | 1560 | ||
1591 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), | ||
1592 | __alignof__(struct crypt_cpu)); | ||
1593 | if (!cc->cpu) { | ||
1594 | ti->error = "Cannot allocate per cpu state"; | ||
1595 | goto bad_mem; | ||
1596 | } | ||
1597 | |||
1598 | /* | 1561 | /* |
1599 | * For compatibility with the original dm-crypt mapping format, if | 1562 | * For compatibility with the original dm-crypt mapping format, if |
1600 | * only the cipher name is supplied, use cbc-plain. | 1563 | * only the cipher name is supplied, use cbc-plain. |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aa009e865871..ebfa411d1a7d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, | |||
445 | else | 445 | else |
446 | m->saved_queue_if_no_path = queue_if_no_path; | 446 | m->saved_queue_if_no_path = queue_if_no_path; |
447 | m->queue_if_no_path = queue_if_no_path; | 447 | m->queue_if_no_path = queue_if_no_path; |
448 | if (!m->queue_if_no_path) | ||
449 | dm_table_run_md_queue_async(m->ti->table); | ||
450 | |||
451 | spin_unlock_irqrestore(&m->lock, flags); | 448 | spin_unlock_irqrestore(&m->lock, flags); |
452 | 449 | ||
450 | if (!queue_if_no_path) | ||
451 | dm_table_run_md_queue_async(m->ti->table); | ||
452 | |||
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
@@ -954,7 +954,7 @@ out: | |||
954 | */ | 954 | */ |
955 | static int reinstate_path(struct pgpath *pgpath) | 955 | static int reinstate_path(struct pgpath *pgpath) |
956 | { | 956 | { |
957 | int r = 0; | 957 | int r = 0, run_queue = 0; |
958 | unsigned long flags; | 958 | unsigned long flags; |
959 | struct multipath *m = pgpath->pg->m; | 959 | struct multipath *m = pgpath->pg->m; |
960 | 960 | ||
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
978 | 978 | ||
979 | if (!m->nr_valid_paths++) { | 979 | if (!m->nr_valid_paths++) { |
980 | m->current_pgpath = NULL; | 980 | m->current_pgpath = NULL; |
981 | dm_table_run_md_queue_async(m->ti->table); | 981 | run_queue = 1; |
982 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | 982 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { |
983 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) | 983 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) |
984 | m->pg_init_in_progress++; | 984 | m->pg_init_in_progress++; |
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath) | |||
991 | 991 | ||
992 | out: | 992 | out: |
993 | spin_unlock_irqrestore(&m->lock, flags); | 993 | spin_unlock_irqrestore(&m->lock, flags); |
994 | if (run_queue) | ||
995 | dm_table_run_md_queue_async(m->ti->table); | ||
994 | 996 | ||
995 | return r; | 997 | return r; |
996 | } | 998 | } |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 13abade76ad9..242ac2ea5f29 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #define MAPPING_POOL_SIZE 1024 | 27 | #define MAPPING_POOL_SIZE 1024 |
28 | #define PRISON_CELLS 1024 | 28 | #define PRISON_CELLS 1024 |
29 | #define COMMIT_PERIOD HZ | 29 | #define COMMIT_PERIOD HZ |
30 | #define NO_SPACE_TIMEOUT_SECS 60 | ||
31 | |||
32 | static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; | ||
30 | 33 | ||
31 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, | 34 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, |
32 | "A percentage of time allocated for copy on write"); | 35 | "A percentage of time allocated for copy on write"); |
@@ -175,6 +178,7 @@ struct pool { | |||
175 | struct workqueue_struct *wq; | 178 | struct workqueue_struct *wq; |
176 | struct work_struct worker; | 179 | struct work_struct worker; |
177 | struct delayed_work waker; | 180 | struct delayed_work waker; |
181 | struct delayed_work no_space_timeout; | ||
178 | 182 | ||
179 | unsigned long last_commit_jiffies; | 183 | unsigned long last_commit_jiffies; |
180 | unsigned ref_count; | 184 | unsigned ref_count; |
@@ -935,7 +939,7 @@ static int commit(struct pool *pool) | |||
935 | { | 939 | { |
936 | int r; | 940 | int r; |
937 | 941 | ||
938 | if (get_pool_mode(pool) != PM_WRITE) | 942 | if (get_pool_mode(pool) >= PM_READ_ONLY) |
939 | return -EINVAL; | 943 | return -EINVAL; |
940 | 944 | ||
941 | r = dm_pool_commit_metadata(pool->pmd); | 945 | r = dm_pool_commit_metadata(pool->pmd); |
@@ -1590,6 +1594,20 @@ static void do_waker(struct work_struct *ws) | |||
1590 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); | 1594 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); |
1591 | } | 1595 | } |
1592 | 1596 | ||
1597 | /* | ||
1598 | * We're holding onto IO to allow userland time to react. After the | ||
1599 | * timeout either the pool will have been resized (and thus back in | ||
1600 | * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. | ||
1601 | */ | ||
1602 | static void do_no_space_timeout(struct work_struct *ws) | ||
1603 | { | ||
1604 | struct pool *pool = container_of(to_delayed_work(ws), struct pool, | ||
1605 | no_space_timeout); | ||
1606 | |||
1607 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) | ||
1608 | set_pool_mode(pool, PM_READ_ONLY); | ||
1609 | } | ||
1610 | |||
1593 | /*----------------------------------------------------------------*/ | 1611 | /*----------------------------------------------------------------*/ |
1594 | 1612 | ||
1595 | struct noflush_work { | 1613 | struct noflush_work { |
@@ -1654,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
1654 | struct pool_c *pt = pool->ti->private; | 1672 | struct pool_c *pt = pool->ti->private; |
1655 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); | 1673 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); |
1656 | enum pool_mode old_mode = get_pool_mode(pool); | 1674 | enum pool_mode old_mode = get_pool_mode(pool); |
1675 | unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; | ||
1657 | 1676 | ||
1658 | /* | 1677 | /* |
1659 | * Never allow the pool to transition to PM_WRITE mode if user | 1678 | * Never allow the pool to transition to PM_WRITE mode if user |
@@ -1715,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
1715 | pool->process_discard = process_discard; | 1734 | pool->process_discard = process_discard; |
1716 | pool->process_prepared_mapping = process_prepared_mapping; | 1735 | pool->process_prepared_mapping = process_prepared_mapping; |
1717 | pool->process_prepared_discard = process_prepared_discard_passdown; | 1736 | pool->process_prepared_discard = process_prepared_discard_passdown; |
1737 | |||
1738 | if (!pool->pf.error_if_no_space && no_space_timeout) | ||
1739 | queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); | ||
1718 | break; | 1740 | break; |
1719 | 1741 | ||
1720 | case PM_WRITE: | 1742 | case PM_WRITE: |
@@ -2100,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2100 | 2122 | ||
2101 | INIT_WORK(&pool->worker, do_worker); | 2123 | INIT_WORK(&pool->worker, do_worker); |
2102 | INIT_DELAYED_WORK(&pool->waker, do_waker); | 2124 | INIT_DELAYED_WORK(&pool->waker, do_waker); |
2125 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); | ||
2103 | spin_lock_init(&pool->lock); | 2126 | spin_lock_init(&pool->lock); |
2104 | bio_list_init(&pool->deferred_flush_bios); | 2127 | bio_list_init(&pool->deferred_flush_bios); |
2105 | INIT_LIST_HEAD(&pool->prepared_mappings); | 2128 | INIT_LIST_HEAD(&pool->prepared_mappings); |
@@ -2662,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti) | |||
2662 | struct pool *pool = pt->pool; | 2685 | struct pool *pool = pt->pool; |
2663 | 2686 | ||
2664 | cancel_delayed_work(&pool->waker); | 2687 | cancel_delayed_work(&pool->waker); |
2688 | cancel_delayed_work(&pool->no_space_timeout); | ||
2665 | flush_workqueue(pool->wq); | 2689 | flush_workqueue(pool->wq); |
2666 | (void) commit(pool); | 2690 | (void) commit(pool); |
2667 | } | 2691 | } |
@@ -3487,6 +3511,9 @@ static void dm_thin_exit(void) | |||
3487 | module_init(dm_thin_init); | 3511 | module_init(dm_thin_init); |
3488 | module_exit(dm_thin_exit); | 3512 | module_exit(dm_thin_exit); |
3489 | 3513 | ||
3514 | module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); | ||
3515 | MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); | ||
3516 | |||
3490 | MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); | 3517 | MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); |
3491 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | 3518 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); |
3492 | MODULE_LICENSE("GPL"); | 3519 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 8fda38d23e38..237b7e0ddc7a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this, | |||
8516 | if (mddev_trylock(mddev)) { | 8516 | if (mddev_trylock(mddev)) { |
8517 | if (mddev->pers) | 8517 | if (mddev->pers) |
8518 | __md_stop_writes(mddev); | 8518 | __md_stop_writes(mddev); |
8519 | mddev->safemode = 2; | 8519 | if (mddev->persistent) |
8520 | mddev->safemode = 2; | ||
8520 | mddev_unlock(mddev); | 8521 | mddev_unlock(mddev); |
8521 | } | 8522 | } |
8522 | need_delay = 1; | 8523 | need_delay = 1; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 33fc408e5eac..cb882aae9e20 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio) | |||
1172 | int max_sectors; | 1172 | int max_sectors; |
1173 | int sectors; | 1173 | int sectors; |
1174 | 1174 | ||
1175 | /* | ||
1176 | * Register the new request and wait if the reconstruction | ||
1177 | * thread has put up a bar for new requests. | ||
1178 | * Continue immediately if no resync is active currently. | ||
1179 | */ | ||
1180 | wait_barrier(conf); | ||
1181 | |||
1175 | sectors = bio_sectors(bio); | 1182 | sectors = bio_sectors(bio); |
1176 | while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | 1183 | while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && |
1177 | bio->bi_iter.bi_sector < conf->reshape_progress && | 1184 | bio->bi_iter.bi_sector < conf->reshape_progress && |
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio) | |||
1552 | 1559 | ||
1553 | md_write_start(mddev, bio); | 1560 | md_write_start(mddev, bio); |
1554 | 1561 | ||
1555 | /* | ||
1556 | * Register the new request and wait if the reconstruction | ||
1557 | * thread has put up a bar for new requests. | ||
1558 | * Continue immediately if no resync is active currently. | ||
1559 | */ | ||
1560 | wait_barrier(conf); | ||
1561 | 1562 | ||
1562 | do { | 1563 | do { |
1563 | 1564 | ||
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index e8a1ce204036..cdd7c1b7259b 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c | |||
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd, | |||
1109 | * windows that fall outside that. | 1109 | * windows that fall outside that. |
1110 | */ | 1110 | */ |
1111 | for (i = 0; i < n_win_sizes; i++) { | 1111 | for (i = 0; i < n_win_sizes; i++) { |
1112 | struct ov7670_win_size *win = &info->devtype->win_sizes[index]; | 1112 | struct ov7670_win_size *win = &info->devtype->win_sizes[i]; |
1113 | if (info->min_width && win->width < info->min_width) | 1113 | if (info->min_width && win->width < info->min_width) |
1114 | continue; | 1114 | continue; |
1115 | if (info->min_height && win->height < info->min_height) | 1115 | if (info->min_height && win->height < info->min_height) |
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index a4459301b5f8..ee0f57e01b56 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c | |||
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) | |||
1616 | if (ret < 0) | 1616 | if (ret < 0) |
1617 | return -EINVAL; | 1617 | return -EINVAL; |
1618 | 1618 | ||
1619 | node_ep = v4l2_of_get_next_endpoint(node, NULL); | 1619 | node_ep = of_graph_get_next_endpoint(node, NULL); |
1620 | if (!node_ep) { | 1620 | if (!node_ep) { |
1621 | dev_warn(dev, "no endpoint defined for node: %s\n", | 1621 | dev_warn(dev, "no endpoint defined for node: %s\n", |
1622 | node->full_name); | 1622 | node->full_name); |
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index d5a7a135f75d..703560fa5e73 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c | |||
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev, | |||
93 | struct media_entity *ent; | 93 | struct media_entity *ent; |
94 | struct media_entity_desc u_ent; | 94 | struct media_entity_desc u_ent; |
95 | 95 | ||
96 | memset(&u_ent, 0, sizeof(u_ent)); | ||
96 | if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) | 97 | if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) |
97 | return -EFAULT; | 98 | return -EFAULT; |
98 | 99 | ||
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index b4f12d00be05..656708252962 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c | |||
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq) | |||
372 | { | 372 | { |
373 | struct vpbe_fh *fh = vb2_get_drv_priv(vq); | 373 | struct vpbe_fh *fh = vb2_get_drv_priv(vq); |
374 | struct vpbe_layer *layer = fh->layer; | 374 | struct vpbe_layer *layer = fh->layer; |
375 | struct vpbe_display *disp = fh->disp_dev; | ||
376 | unsigned long flags; | ||
375 | 377 | ||
376 | if (!vb2_is_streaming(vq)) | 378 | if (!vb2_is_streaming(vq)) |
377 | return 0; | 379 | return 0; |
378 | 380 | ||
379 | /* release all active buffers */ | 381 | /* release all active buffers */ |
382 | spin_lock_irqsave(&disp->dma_queue_lock, flags); | ||
383 | if (layer->cur_frm == layer->next_frm) { | ||
384 | vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); | ||
385 | } else { | ||
386 | if (layer->cur_frm != NULL) | ||
387 | vb2_buffer_done(&layer->cur_frm->vb, | ||
388 | VB2_BUF_STATE_ERROR); | ||
389 | if (layer->next_frm != NULL) | ||
390 | vb2_buffer_done(&layer->next_frm->vb, | ||
391 | VB2_BUF_STATE_ERROR); | ||
392 | } | ||
393 | |||
380 | while (!list_empty(&layer->dma_queue)) { | 394 | while (!list_empty(&layer->dma_queue)) { |
381 | layer->next_frm = list_entry(layer->dma_queue.next, | 395 | layer->next_frm = list_entry(layer->dma_queue.next, |
382 | struct vpbe_disp_buffer, list); | 396 | struct vpbe_disp_buffer, list); |
383 | list_del(&layer->next_frm->list); | 397 | list_del(&layer->next_frm->list); |
384 | vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); | 398 | vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); |
385 | } | 399 | } |
386 | 400 | spin_unlock_irqrestore(&disp->dma_queue_lock, flags); | |
387 | return 0; | 401 | return 0; |
388 | } | 402 | } |
389 | 403 | ||
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index d762246eabf5..0379cb9f9a9c 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c | |||
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file) | |||
734 | } | 734 | } |
735 | vpfe_dev->io_usrs = 0; | 735 | vpfe_dev->io_usrs = 0; |
736 | vpfe_dev->numbuffers = config_params.numbuffers; | 736 | vpfe_dev->numbuffers = config_params.numbuffers; |
737 | videobuf_stop(&vpfe_dev->buffer_queue); | ||
738 | videobuf_mmap_free(&vpfe_dev->buffer_queue); | ||
737 | } | 739 | } |
738 | 740 | ||
739 | /* Decrement device usrs counter */ | 741 | /* Decrement device usrs counter */ |
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 756da78bac23..8dea0b84a3ad 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c | |||
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq) | |||
358 | 358 | ||
359 | common = &ch->common[VPIF_VIDEO_INDEX]; | 359 | common = &ch->common[VPIF_VIDEO_INDEX]; |
360 | 360 | ||
361 | /* Disable channel as per its device type and channel id */ | ||
362 | if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { | ||
363 | enable_channel0(0); | ||
364 | channel0_intr_enable(0); | ||
365 | } | ||
366 | if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || | ||
367 | (2 == common->started)) { | ||
368 | enable_channel1(0); | ||
369 | channel1_intr_enable(0); | ||
370 | } | ||
371 | common->started = 0; | ||
372 | |||
361 | /* release all active buffers */ | 373 | /* release all active buffers */ |
362 | spin_lock_irqsave(&common->irqlock, flags); | 374 | spin_lock_irqsave(&common->irqlock, flags); |
375 | if (common->cur_frm == common->next_frm) { | ||
376 | vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); | ||
377 | } else { | ||
378 | if (common->cur_frm != NULL) | ||
379 | vb2_buffer_done(&common->cur_frm->vb, | ||
380 | VB2_BUF_STATE_ERROR); | ||
381 | if (common->next_frm != NULL) | ||
382 | vb2_buffer_done(&common->next_frm->vb, | ||
383 | VB2_BUF_STATE_ERROR); | ||
384 | } | ||
385 | |||
363 | while (!list_empty(&common->dma_queue)) { | 386 | while (!list_empty(&common->dma_queue)) { |
364 | common->next_frm = list_entry(common->dma_queue.next, | 387 | common->next_frm = list_entry(common->dma_queue.next, |
365 | struct vpif_cap_buffer, list); | 388 | struct vpif_cap_buffer, list); |
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep) | |||
933 | if (fh->io_allowed[VPIF_VIDEO_INDEX]) { | 956 | if (fh->io_allowed[VPIF_VIDEO_INDEX]) { |
934 | /* Reset io_usrs member of channel object */ | 957 | /* Reset io_usrs member of channel object */ |
935 | common->io_usrs = 0; | 958 | common->io_usrs = 0; |
936 | /* Disable channel as per its device type and channel id */ | ||
937 | if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { | ||
938 | enable_channel0(0); | ||
939 | channel0_intr_enable(0); | ||
940 | } | ||
941 | if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || | ||
942 | (2 == common->started)) { | ||
943 | enable_channel1(0); | ||
944 | channel1_intr_enable(0); | ||
945 | } | ||
946 | common->started = 0; | ||
947 | /* Free buffers allocated */ | 959 | /* Free buffers allocated */ |
948 | vb2_queue_release(&common->buffer_queue); | 960 | vb2_queue_release(&common->buffer_queue); |
949 | vb2_dma_contig_cleanup_ctx(common->alloc_ctx); | 961 | vb2_dma_contig_cleanup_ctx(common->alloc_ctx); |
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 0ac841e35aa4..aed41edd0501 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c | |||
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq) | |||
320 | 320 | ||
321 | common = &ch->common[VPIF_VIDEO_INDEX]; | 321 | common = &ch->common[VPIF_VIDEO_INDEX]; |
322 | 322 | ||
323 | /* Disable channel */ | ||
324 | if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { | ||
325 | enable_channel2(0); | ||
326 | channel2_intr_enable(0); | ||
327 | } | ||
328 | if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || | ||
329 | (2 == common->started)) { | ||
330 | enable_channel3(0); | ||
331 | channel3_intr_enable(0); | ||
332 | } | ||
333 | common->started = 0; | ||
334 | |||
323 | /* release all active buffers */ | 335 | /* release all active buffers */ |
324 | spin_lock_irqsave(&common->irqlock, flags); | 336 | spin_lock_irqsave(&common->irqlock, flags); |
337 | if (common->cur_frm == common->next_frm) { | ||
338 | vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); | ||
339 | } else { | ||
340 | if (common->cur_frm != NULL) | ||
341 | vb2_buffer_done(&common->cur_frm->vb, | ||
342 | VB2_BUF_STATE_ERROR); | ||
343 | if (common->next_frm != NULL) | ||
344 | vb2_buffer_done(&common->next_frm->vb, | ||
345 | VB2_BUF_STATE_ERROR); | ||
346 | } | ||
347 | |||
325 | while (!list_empty(&common->dma_queue)) { | 348 | while (!list_empty(&common->dma_queue)) { |
326 | common->next_frm = list_entry(common->dma_queue.next, | 349 | common->next_frm = list_entry(common->dma_queue.next, |
327 | struct vpif_disp_buffer, list); | 350 | struct vpif_disp_buffer, list); |
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep) | |||
773 | if (fh->io_allowed[VPIF_VIDEO_INDEX]) { | 796 | if (fh->io_allowed[VPIF_VIDEO_INDEX]) { |
774 | /* Reset io_usrs member of channel object */ | 797 | /* Reset io_usrs member of channel object */ |
775 | common->io_usrs = 0; | 798 | common->io_usrs = 0; |
776 | /* Disable channel */ | ||
777 | if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { | ||
778 | enable_channel2(0); | ||
779 | channel2_intr_enable(0); | ||
780 | } | ||
781 | if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || | ||
782 | (2 == common->started)) { | ||
783 | enable_channel3(0); | ||
784 | channel3_intr_enable(0); | ||
785 | } | ||
786 | common->started = 0; | ||
787 | |||
788 | /* Free buffers allocated */ | 799 | /* Free buffers allocated */ |
789 | vb2_queue_release(&common->buffer_queue); | 800 | vb2_queue_release(&common->buffer_queue); |
790 | vb2_dma_contig_cleanup_ctx(common->alloc_ctx); | 801 | vb2_dma_contig_cleanup_ctx(common->alloc_ctx); |
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index da2fc86cc524..25dbf5b05a96 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c | |||
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = { | |||
122 | }, { | 122 | }, { |
123 | .name = "YUV 4:2:2 planar, Y/Cb/Cr", | 123 | .name = "YUV 4:2:2 planar, Y/Cb/Cr", |
124 | .fourcc = V4L2_PIX_FMT_YUV422P, | 124 | .fourcc = V4L2_PIX_FMT_YUV422P, |
125 | .depth = { 12 }, | 125 | .depth = { 16 }, |
126 | .color = FIMC_FMT_YCBYCR422, | 126 | .color = FIMC_FMT_YCBYCR422, |
127 | .memplanes = 1, | 127 | .memplanes = 1, |
128 | .colplanes = 3, | 128 | .colplanes = 3, |
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c index 3aecaf465094..f0c9c42867de 100644 --- a/drivers/media/tuners/fc2580.c +++ b/drivers/media/tuners/fc2580.c | |||
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe) | |||
195 | 195 | ||
196 | f_ref = 2UL * priv->cfg->clock / r_val; | 196 | f_ref = 2UL * priv->cfg->clock / r_val; |
197 | n_val = div_u64_rem(f_vco, f_ref, &k_val); | 197 | n_val = div_u64_rem(f_vco, f_ref, &k_val); |
198 | k_val_reg = 1UL * k_val * (1 << 20) / f_ref; | 198 | k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref); |
199 | 199 | ||
200 | ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); | 200 | ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); |
201 | if (ret < 0) | 201 | if (ret < 0) |
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe) | |||
348 | if (ret < 0) | 348 | if (ret < 0) |
349 | goto err; | 349 | goto err; |
350 | 350 | ||
351 | ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ | 351 | ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock * |
352 | fc2580_if_filter_lut[i].mul / 1000000000); | 352 | fc2580_if_filter_lut[i].mul, 1000000000)); |
353 | if (ret < 0) | 353 | if (ret < 0) |
354 | goto err; | 354 | goto err; |
355 | 355 | ||
diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h index be38a9e637e0..646c99452136 100644 --- a/drivers/media/tuners/fc2580_priv.h +++ b/drivers/media/tuners/fc2580_priv.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define FC2580_PRIV_H | 22 | #define FC2580_PRIV_H |
23 | 23 | ||
24 | #include "fc2580.h" | 24 | #include "fc2580.h" |
25 | #include <linux/math64.h> | ||
25 | 26 | ||
26 | struct fc2580_reg_val { | 27 | struct fc2580_reg_val { |
27 | u8 reg; | 28 | u8 reg; |
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile index 7407b8338ccf..bc38f03394cd 100644 --- a/drivers/media/usb/dvb-usb-v2/Makefile +++ b/drivers/media/usb/dvb-usb-v2/Makefile | |||
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core | |||
41 | ccflags-y += -I$(srctree)/drivers/media/dvb-frontends | 41 | ccflags-y += -I$(srctree)/drivers/media/dvb-frontends |
42 | ccflags-y += -I$(srctree)/drivers/media/tuners | 42 | ccflags-y += -I$(srctree)/drivers/media/tuners |
43 | ccflags-y += -I$(srctree)/drivers/media/common | 43 | ccflags-y += -I$(srctree)/drivers/media/common |
44 | ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr | ||
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 61d196e8b3ab..dcbd392e6efc 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include "rtl2830.h" | 25 | #include "rtl2830.h" |
26 | #include "rtl2832.h" | 26 | #include "rtl2832.h" |
27 | #include "rtl2832_sdr.h" | ||
28 | 27 | ||
29 | #include "qt1010.h" | 28 | #include "qt1010.h" |
30 | #include "mt2060.h" | 29 | #include "mt2060.h" |
@@ -36,6 +35,45 @@ | |||
36 | #include "tua9001.h" | 35 | #include "tua9001.h" |
37 | #include "r820t.h" | 36 | #include "r820t.h" |
38 | 37 | ||
38 | /* | ||
39 | * RTL2832_SDR module is in staging. That logic is added in order to avoid any | ||
40 | * hard dependency to drivers/staging/ directory as we want compile mainline | ||
41 | * driver even whole staging directory is missing. | ||
42 | */ | ||
43 | #include <media/v4l2-subdev.h> | ||
44 | |||
45 | #if IS_ENABLED(CONFIG_DVB_RTL2832_SDR) | ||
46 | struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, | ||
47 | struct i2c_adapter *i2c, const struct rtl2832_config *cfg, | ||
48 | struct v4l2_subdev *sd); | ||
49 | #else | ||
50 | static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, | ||
51 | struct i2c_adapter *i2c, const struct rtl2832_config *cfg, | ||
52 | struct v4l2_subdev *sd) | ||
53 | { | ||
54 | return NULL; | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_MEDIA_ATTACH | ||
59 | #define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ | ||
60 | void *__r = NULL; \ | ||
61 | typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ | ||
62 | if (__a) { \ | ||
63 | __r = (void *) __a(ARGS); \ | ||
64 | if (__r == NULL) \ | ||
65 | symbol_put(FUNCTION); \ | ||
66 | } \ | ||
67 | __r; \ | ||
68 | }) | ||
69 | |||
70 | #else | ||
71 | #define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ | ||
72 | FUNCTION(ARGS); \ | ||
73 | }) | ||
74 | |||
75 | #endif | ||
76 | |||
39 | static int rtl28xxu_disable_rc; | 77 | static int rtl28xxu_disable_rc; |
40 | module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); | 78 | module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); |
41 | MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); | 79 | MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); |
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) | |||
908 | adap->fe[0]->ops.tuner_ops.get_rf_strength; | 946 | adap->fe[0]->ops.tuner_ops.get_rf_strength; |
909 | 947 | ||
910 | /* attach SDR */ | 948 | /* attach SDR */ |
911 | dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, | 949 | dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, |
912 | &rtl28xxu_rtl2832_fc0012_config, NULL); | 950 | &rtl28xxu_rtl2832_fc0012_config, NULL); |
913 | break; | 951 | break; |
914 | case TUNER_RTL2832_FC0013: | 952 | case TUNER_RTL2832_FC0013: |
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) | |||
920 | adap->fe[0]->ops.tuner_ops.get_rf_strength; | 958 | adap->fe[0]->ops.tuner_ops.get_rf_strength; |
921 | 959 | ||
922 | /* attach SDR */ | 960 | /* attach SDR */ |
923 | dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, | 961 | dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, |
924 | &rtl28xxu_rtl2832_fc0013_config, NULL); | 962 | &rtl28xxu_rtl2832_fc0013_config, NULL); |
925 | break; | 963 | break; |
926 | case TUNER_RTL2832_E4000: { | 964 | case TUNER_RTL2832_E4000: { |
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) | |||
951 | i2c_set_adapdata(i2c_adap_internal, d); | 989 | i2c_set_adapdata(i2c_adap_internal, d); |
952 | 990 | ||
953 | /* attach SDR */ | 991 | /* attach SDR */ |
954 | dvb_attach(rtl2832_sdr_attach, adap->fe[0], | 992 | dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], |
955 | i2c_adap_internal, | 993 | i2c_adap_internal, |
956 | &rtl28xxu_rtl2832_e4000_config, sd); | 994 | &rtl28xxu_rtl2832_e4000_config, sd); |
957 | } | 995 | } |
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) | |||
982 | adap->fe[0]->ops.tuner_ops.get_rf_strength; | 1020 | adap->fe[0]->ops.tuner_ops.get_rf_strength; |
983 | 1021 | ||
984 | /* attach SDR */ | 1022 | /* attach SDR */ |
985 | dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, | 1023 | dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, |
986 | &rtl28xxu_rtl2832_r820t_config, NULL); | 1024 | &rtl28xxu_rtl2832_r820t_config, NULL); |
987 | break; | 1025 | break; |
988 | case TUNER_RTL2832_R828D: | 1026 | case TUNER_RTL2832_R828D: |
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 7277dbd2afcd..ecbcb39feb71 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c | |||
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = { | |||
1430 | {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, | 1430 | {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, |
1431 | {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, | 1431 | {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, |
1432 | {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, | 1432 | {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, |
1433 | #if !IS_ENABLED(CONFIG_USB_SN9C102) | ||
1434 | {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, | 1433 | {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, |
1435 | {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, | 1434 | {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, |
1436 | #endif | ||
1437 | {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ | 1435 | {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ |
1438 | {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, | 1436 | {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, |
1439 | {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, | 1437 | {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, |
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 04b2daf567be..7e2411c36419 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c | |||
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 { | |||
178 | 178 | ||
179 | static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) | 179 | static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) |
180 | { | 180 | { |
181 | if (get_user(kp->type, &up->type)) | ||
182 | return -EFAULT; | ||
183 | |||
181 | switch (kp->type) { | 184 | switch (kp->type) { |
182 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: | 185 | case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
183 | case V4L2_BUF_TYPE_VIDEO_OUTPUT: | 186 | case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us | |||
204 | 207 | ||
205 | static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) | 208 | static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) |
206 | { | 209 | { |
207 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || | 210 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) |
208 | get_user(kp->type, &up->type)) | 211 | return -EFAULT; |
209 | return -EFAULT; | ||
210 | return __get_v4l2_format32(kp, up); | 212 | return __get_v4l2_format32(kp, up); |
211 | } | 213 | } |
212 | 214 | ||
213 | static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) | 215 | static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) |
214 | { | 216 | { |
215 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || | 217 | if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || |
216 | copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) | 218 | copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) |
217 | return -EFAULT; | 219 | return -EFAULT; |
218 | return __get_v4l2_format32(&kp->format, &up->format); | 220 | return __get_v4l2_format32(&kp->format, &up->format); |
219 | } | 221 | } |
220 | 222 | ||
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 110c03627051..b59a17fb7c3e 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c | |||
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus, | |||
108 | node->full_name); | 108 | node->full_name); |
109 | return err; | 109 | return err; |
110 | } | 110 | } |
111 | /* Convert bit width to byte width */ | 111 | |
112 | r.bus_width /= 8; | 112 | /* |
113 | * The bus width is encoded into the register as 0 for 8 bits, | ||
114 | * and 1 for 16 bits, so we do the necessary conversion here. | ||
115 | */ | ||
116 | if (r.bus_width == 8) | ||
117 | r.bus_width = 0; | ||
118 | else if (r.bus_width == 16) | ||
119 | r.bus_width = 1; | ||
120 | else { | ||
121 | dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width); | ||
122 | return -EINVAL; | ||
123 | } | ||
113 | 124 | ||
114 | err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", | 125 | err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", |
115 | &r.badr_skew); | 126 | &r.badr_skew); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9f69e818b000..93580a47cc54 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | /* Forward declaration */ | 84 | /* Forward declaration */ |
85 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); | 85 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], |
86 | bool strict_match); | ||
86 | static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); | 87 | static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); |
87 | static void rlb_src_unlink(struct bonding *bond, u32 index); | 88 | static void rlb_src_unlink(struct bonding *bond, u32 index); |
88 | static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, | 89 | static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, |
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) | |||
459 | 460 | ||
460 | bond->alb_info.rlb_promisc_timeout_counter = 0; | 461 | bond->alb_info.rlb_promisc_timeout_counter = 0; |
461 | 462 | ||
462 | alb_send_learning_packets(bond->curr_active_slave, addr); | 463 | alb_send_learning_packets(bond->curr_active_slave, addr, true); |
463 | } | 464 | } |
464 | 465 | ||
465 | /* slave being removed should not be active at this point | 466 | /* slave being removed should not be active at this point |
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) | |||
995 | /*********************** tlb/rlb shared functions *********************/ | 996 | /*********************** tlb/rlb shared functions *********************/ |
996 | 997 | ||
997 | static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], | 998 | static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], |
998 | u16 vid) | 999 | __be16 vlan_proto, u16 vid) |
999 | { | 1000 | { |
1000 | struct learning_pkt pkt; | 1001 | struct learning_pkt pkt; |
1001 | struct sk_buff *skb; | 1002 | struct sk_buff *skb; |
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], | |||
1021 | skb->dev = slave->dev; | 1022 | skb->dev = slave->dev; |
1022 | 1023 | ||
1023 | if (vid) { | 1024 | if (vid) { |
1024 | skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); | 1025 | skb = vlan_put_tag(skb, vlan_proto, vid); |
1025 | if (!skb) { | 1026 | if (!skb) { |
1026 | pr_err("%s: Error: failed to insert VLAN tag\n", | 1027 | pr_err("%s: Error: failed to insert VLAN tag\n", |
1027 | slave->bond->dev->name); | 1028 | slave->bond->dev->name); |
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], | |||
1032 | dev_queue_xmit(skb); | 1033 | dev_queue_xmit(skb); |
1033 | } | 1034 | } |
1034 | 1035 | ||
1035 | 1036 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], | |
1036 | static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) | 1037 | bool strict_match) |
1037 | { | 1038 | { |
1038 | struct bonding *bond = bond_get_bond_by_slave(slave); | 1039 | struct bonding *bond = bond_get_bond_by_slave(slave); |
1039 | struct net_device *upper; | 1040 | struct net_device *upper; |
1040 | struct list_head *iter; | 1041 | struct list_head *iter; |
1041 | 1042 | ||
1042 | /* send untagged */ | 1043 | /* send untagged */ |
1043 | alb_send_lp_vid(slave, mac_addr, 0); | 1044 | alb_send_lp_vid(slave, mac_addr, 0, 0); |
1044 | 1045 | ||
1045 | /* loop through vlans and send one packet for each */ | 1046 | /* loop through vlans and send one packet for each */ |
1046 | rcu_read_lock(); | 1047 | rcu_read_lock(); |
1047 | netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { | 1048 | netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { |
1048 | if (upper->priv_flags & IFF_802_1Q_VLAN) | 1049 | if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { |
1049 | alb_send_lp_vid(slave, mac_addr, | 1050 | if (strict_match && |
1050 | vlan_dev_vlan_id(upper)); | 1051 | ether_addr_equal_64bits(mac_addr, |
1052 | upper->dev_addr)) { | ||
1053 | alb_send_lp_vid(slave, mac_addr, | ||
1054 | vlan_dev_vlan_proto(upper), | ||
1055 | vlan_dev_vlan_id(upper)); | ||
1056 | } else if (!strict_match) { | ||
1057 | alb_send_lp_vid(slave, upper->dev_addr, | ||
1058 | vlan_dev_vlan_proto(upper), | ||
1059 | vlan_dev_vlan_id(upper)); | ||
1060 | } | ||
1061 | } | ||
1051 | } | 1062 | } |
1052 | rcu_read_unlock(); | 1063 | rcu_read_unlock(); |
1053 | } | 1064 | } |
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, | |||
1107 | 1118 | ||
1108 | /* fasten the change in the switch */ | 1119 | /* fasten the change in the switch */ |
1109 | if (SLAVE_IS_OK(slave1)) { | 1120 | if (SLAVE_IS_OK(slave1)) { |
1110 | alb_send_learning_packets(slave1, slave1->dev->dev_addr); | 1121 | alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); |
1111 | if (bond->alb_info.rlb_enabled) { | 1122 | if (bond->alb_info.rlb_enabled) { |
1112 | /* inform the clients that the mac address | 1123 | /* inform the clients that the mac address |
1113 | * has changed | 1124 | * has changed |
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, | |||
1119 | } | 1130 | } |
1120 | 1131 | ||
1121 | if (SLAVE_IS_OK(slave2)) { | 1132 | if (SLAVE_IS_OK(slave2)) { |
1122 | alb_send_learning_packets(slave2, slave2->dev->dev_addr); | 1133 | alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); |
1123 | if (bond->alb_info.rlb_enabled) { | 1134 | if (bond->alb_info.rlb_enabled) { |
1124 | /* inform the clients that the mac address | 1135 | /* inform the clients that the mac address |
1125 | * has changed | 1136 | * has changed |
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work) | |||
1490 | 1501 | ||
1491 | /* send learning packets */ | 1502 | /* send learning packets */ |
1492 | if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { | 1503 | if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { |
1504 | bool strict_match; | ||
1505 | |||
1493 | /* change of curr_active_slave involves swapping of mac addresses. | 1506 | /* change of curr_active_slave involves swapping of mac addresses. |
1494 | * in order to avoid this swapping from happening while | 1507 | * in order to avoid this swapping from happening while |
1495 | * sending the learning packets, the curr_slave_lock must be held for | 1508 | * sending the learning packets, the curr_slave_lock must be held for |
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work) | |||
1497 | */ | 1510 | */ |
1498 | read_lock(&bond->curr_slave_lock); | 1511 | read_lock(&bond->curr_slave_lock); |
1499 | 1512 | ||
1500 | bond_for_each_slave_rcu(bond, slave, iter) | 1513 | bond_for_each_slave_rcu(bond, slave, iter) { |
1501 | alb_send_learning_packets(slave, slave->dev->dev_addr); | 1514 | /* If updating current_active, use all currently |
1515 | * user mac addreses (!strict_match). Otherwise, only | ||
1516 | * use mac of the slave device. | ||
1517 | */ | ||
1518 | strict_match = (slave != bond->curr_active_slave); | ||
1519 | alb_send_learning_packets(slave, slave->dev->dev_addr, | ||
1520 | strict_match); | ||
1521 | } | ||
1502 | 1522 | ||
1503 | read_unlock(&bond->curr_slave_lock); | 1523 | read_unlock(&bond->curr_slave_lock); |
1504 | 1524 | ||
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave | |||
1721 | } else { | 1741 | } else { |
1722 | /* set the new_slave to the bond mac address */ | 1742 | /* set the new_slave to the bond mac address */ |
1723 | alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); | 1743 | alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); |
1724 | alb_send_learning_packets(new_slave, bond->dev->dev_addr); | 1744 | alb_send_learning_packets(new_slave, bond->dev->dev_addr, |
1745 | false); | ||
1725 | } | 1746 | } |
1726 | 1747 | ||
1727 | write_lock_bh(&bond->curr_slave_lock); | 1748 | write_lock_bh(&bond->curr_slave_lock); |
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | |||
1764 | alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); | 1785 | alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); |
1765 | 1786 | ||
1766 | read_lock(&bond->lock); | 1787 | read_lock(&bond->lock); |
1767 | alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); | 1788 | alb_send_learning_packets(bond->curr_active_slave, |
1789 | bond_dev->dev_addr, false); | ||
1768 | if (bond->alb_info.rlb_enabled) { | 1790 | if (bond->alb_info.rlb_enabled) { |
1769 | /* inform clients mac address has changed */ | 1791 | /* inform clients mac address has changed */ |
1770 | rlb_req_update_slave_clients(bond, bond->curr_active_slave); | 1792 | rlb_req_update_slave_clients(bond, bond->curr_active_slave); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69aff72c8957..d3a67896d435 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) | |||
2126 | */ | 2126 | */ |
2127 | static void bond_arp_send(struct net_device *slave_dev, int arp_op, | 2127 | static void bond_arp_send(struct net_device *slave_dev, int arp_op, |
2128 | __be32 dest_ip, __be32 src_ip, | 2128 | __be32 dest_ip, __be32 src_ip, |
2129 | struct bond_vlan_tag *inner, | 2129 | struct bond_vlan_tag *tags) |
2130 | struct bond_vlan_tag *outer) | ||
2131 | { | 2130 | { |
2132 | struct sk_buff *skb; | 2131 | struct sk_buff *skb; |
2132 | int i; | ||
2133 | 2133 | ||
2134 | pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", | 2134 | pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", |
2135 | arp_op, slave_dev->name, &dest_ip, &src_ip); | 2135 | arp_op, slave_dev->name, &dest_ip, &src_ip); |
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, | |||
2141 | net_err_ratelimited("ARP packet allocation failed\n"); | 2141 | net_err_ratelimited("ARP packet allocation failed\n"); |
2142 | return; | 2142 | return; |
2143 | } | 2143 | } |
2144 | if (outer->vlan_id) { | ||
2145 | if (inner->vlan_id) { | ||
2146 | pr_debug("inner tag: proto %X vid %X\n", | ||
2147 | ntohs(inner->vlan_proto), inner->vlan_id); | ||
2148 | skb = __vlan_put_tag(skb, inner->vlan_proto, | ||
2149 | inner->vlan_id); | ||
2150 | if (!skb) { | ||
2151 | net_err_ratelimited("failed to insert inner VLAN tag\n"); | ||
2152 | return; | ||
2153 | } | ||
2154 | } | ||
2155 | 2144 | ||
2156 | pr_debug("outer reg: proto %X vid %X\n", | 2145 | /* Go through all the tags backwards and add them to the packet */ |
2157 | ntohs(outer->vlan_proto), outer->vlan_id); | 2146 | for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) { |
2158 | skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); | 2147 | if (!tags[i].vlan_id) |
2148 | continue; | ||
2149 | |||
2150 | pr_debug("inner tag: proto %X vid %X\n", | ||
2151 | ntohs(tags[i].vlan_proto), tags[i].vlan_id); | ||
2152 | skb = __vlan_put_tag(skb, tags[i].vlan_proto, | ||
2153 | tags[i].vlan_id); | ||
2154 | if (!skb) { | ||
2155 | net_err_ratelimited("failed to insert inner VLAN tag\n"); | ||
2156 | return; | ||
2157 | } | ||
2158 | } | ||
2159 | /* Set the outer tag */ | ||
2160 | if (tags[0].vlan_id) { | ||
2161 | pr_debug("outer tag: proto %X vid %X\n", | ||
2162 | ntohs(tags[0].vlan_proto), tags[0].vlan_id); | ||
2163 | skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id); | ||
2159 | if (!skb) { | 2164 | if (!skb) { |
2160 | net_err_ratelimited("failed to insert outer VLAN tag\n"); | 2165 | net_err_ratelimited("failed to insert outer VLAN tag\n"); |
2161 | return; | 2166 | return; |
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, | |||
2164 | arp_xmit(skb); | 2169 | arp_xmit(skb); |
2165 | } | 2170 | } |
2166 | 2171 | ||
2172 | /* Validate the device path between the @start_dev and the @end_dev. | ||
2173 | * The path is valid if the @end_dev is reachable through device | ||
2174 | * stacking. | ||
2175 | * When the path is validated, collect any vlan information in the | ||
2176 | * path. | ||
2177 | */ | ||
2178 | static bool bond_verify_device_path(struct net_device *start_dev, | ||
2179 | struct net_device *end_dev, | ||
2180 | struct bond_vlan_tag *tags) | ||
2181 | { | ||
2182 | struct net_device *upper; | ||
2183 | struct list_head *iter; | ||
2184 | int idx; | ||
2185 | |||
2186 | if (start_dev == end_dev) | ||
2187 | return true; | ||
2188 | |||
2189 | netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { | ||
2190 | if (bond_verify_device_path(upper, end_dev, tags)) { | ||
2191 | if (is_vlan_dev(upper)) { | ||
2192 | idx = vlan_get_encap_level(upper); | ||
2193 | if (idx >= BOND_MAX_VLAN_ENCAP) | ||
2194 | return false; | ||
2195 | |||
2196 | tags[idx].vlan_proto = | ||
2197 | vlan_dev_vlan_proto(upper); | ||
2198 | tags[idx].vlan_id = vlan_dev_vlan_id(upper); | ||
2199 | } | ||
2200 | return true; | ||
2201 | } | ||
2202 | } | ||
2203 | |||
2204 | return false; | ||
2205 | } | ||
2167 | 2206 | ||
2168 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | 2207 | static void bond_arp_send_all(struct bonding *bond, struct slave *slave) |
2169 | { | 2208 | { |
2170 | struct net_device *upper, *vlan_upper; | ||
2171 | struct list_head *iter, *vlan_iter; | ||
2172 | struct rtable *rt; | 2209 | struct rtable *rt; |
2173 | struct bond_vlan_tag inner, outer; | 2210 | struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP]; |
2174 | __be32 *targets = bond->params.arp_targets, addr; | 2211 | __be32 *targets = bond->params.arp_targets, addr; |
2175 | int i; | 2212 | int i; |
2213 | bool ret; | ||
2176 | 2214 | ||
2177 | for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { | 2215 | for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { |
2178 | pr_debug("basa: target %pI4\n", &targets[i]); | 2216 | pr_debug("basa: target %pI4\n", &targets[i]); |
2179 | inner.vlan_proto = 0; | 2217 | memset(tags, 0, sizeof(tags)); |
2180 | inner.vlan_id = 0; | ||
2181 | outer.vlan_proto = 0; | ||
2182 | outer.vlan_id = 0; | ||
2183 | 2218 | ||
2184 | /* Find out through which dev should the packet go */ | 2219 | /* Find out through which dev should the packet go */ |
2185 | rt = ip_route_output(dev_net(bond->dev), targets[i], 0, | 2220 | rt = ip_route_output(dev_net(bond->dev), targets[i], 0, |
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2192 | net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", | 2227 | net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", |
2193 | bond->dev->name, | 2228 | bond->dev->name, |
2194 | &targets[i]); | 2229 | &targets[i]); |
2195 | bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); | 2230 | bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], |
2231 | 0, tags); | ||
2196 | continue; | 2232 | continue; |
2197 | } | 2233 | } |
2198 | 2234 | ||
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) | |||
2201 | goto found; | 2237 | goto found; |
2202 | 2238 | ||
2203 | rcu_read_lock(); | 2239 | rcu_read_lock(); |
2204 | /* first we search only for vlan devices. for every vlan | 2240 | ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags); |
2205 | * found we verify its upper dev list, searching for the | ||
2206 | * rt->dst.dev. If found we save the tag of the vlan and | ||
2207 | * proceed to send the packet. | ||
2208 | */ | ||
2209 | netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, | ||
2210 | vlan_iter) { | ||
2211 | if (!is_vlan_dev(vlan_upper)) | ||
2212 | continue; | ||
2213 | |||
2214 | if (vlan_upper == rt->dst.dev) { | ||
2215 | outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); | ||
2216 | outer.vlan_id = vlan_dev_vlan_id(vlan_upper); | ||
2217 | rcu_read_unlock(); | ||
2218 | goto found; | ||
2219 | } | ||
2220 | netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, | ||
2221 | iter) { | ||
2222 | if (upper == rt->dst.dev) { | ||
2223 | /* If the upper dev is a vlan dev too, | ||
2224 | * set the vlan tag to inner tag. | ||
2225 | */ | ||
2226 | if (is_vlan_dev(upper)) { | ||
2227 | inner.vlan_proto = vlan_dev_vlan_proto(upper); | ||
2228 | inner.vlan_id = vlan_dev_vlan_id(upper); | ||
2229 | } | ||
2230 | outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); | ||
2231 | outer.vlan_id = vlan_dev_vlan_id(vlan_upper); | ||
2232 | rcu_read_unlock(); | ||
2233 | goto found; | ||
2234 | } | ||
2235 | } | ||
2236 | } | ||
2237 | |||
2238 | /* if the device we're looking for is not on top of any of | ||
2239 | * our upper vlans, then just search for any dev that | ||
2240 | * matches, and in case it's a vlan - save the id | ||
2241 | */ | ||
2242 | netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { | ||
2243 | if (upper == rt->dst.dev) { | ||
2244 | rcu_read_unlock(); | ||
2245 | goto found; | ||
2246 | } | ||
2247 | } | ||
2248 | rcu_read_unlock(); | 2241 | rcu_read_unlock(); |
2249 | 2242 | ||
2243 | if (ret) | ||
2244 | goto found; | ||
2245 | |||
2250 | /* Not our device - skip */ | 2246 | /* Not our device - skip */ |
2251 | pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", | 2247 | pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", |
2252 | bond->dev->name, &targets[i], | 2248 | bond->dev->name, &targets[i], |
@@ -2259,7 +2255,7 @@ found: | |||
2259 | addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); | 2255 | addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); |
2260 | ip_rt_put(rt); | 2256 | ip_rt_put(rt); |
2261 | bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], | 2257 | bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], |
2262 | addr, &inner, &outer); | 2258 | addr, tags); |
2263 | } | 2259 | } |
2264 | } | 2260 | } |
2265 | 2261 | ||
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 724e30fa20b9..832070298446 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = { | |||
125 | static const struct bond_opt_value bond_intmax_tbl[] = { | 125 | static const struct bond_opt_value bond_intmax_tbl[] = { |
126 | { "off", 0, BOND_VALFLAG_DEFAULT}, | 126 | { "off", 0, BOND_VALFLAG_DEFAULT}, |
127 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, | 127 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, |
128 | { NULL, -1, 0} | ||
128 | }; | 129 | }; |
129 | 130 | ||
130 | static const struct bond_opt_value bond_lacp_rate_tbl[] = { | 131 | static const struct bond_opt_value bond_lacp_rate_tbl[] = { |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index b8bdd0acc8f3..00bea320e3b5 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" | 37 | #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" |
38 | 38 | ||
39 | #define BOND_MAX_VLAN_ENCAP 2 | ||
39 | #define BOND_MAX_ARP_TARGETS 16 | 40 | #define BOND_MAX_ARP_TARGETS 16 |
40 | 41 | ||
41 | #define BOND_DEFAULT_MIIMON 100 | 42 | #define BOND_DEFAULT_MIIMON 100 |
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig index 8ab7103d4f44..61ffc12d8fd8 100644 --- a/drivers/net/can/c_can/Kconfig +++ b/drivers/net/can/c_can/Kconfig | |||
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM | |||
14 | SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) | 14 | SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) |
15 | boards like am335x, dm814x, dm813x and dm811x. | 15 | boards like am335x, dm814x, dm813x and dm811x. |
16 | 16 | ||
17 | config CAN_C_CAN_STRICT_FRAME_ORDERING | ||
18 | bool "Force a strict RX CAN frame order (may cause frame loss)" | ||
19 | ---help--- | ||
20 | The RX split buffer prevents packet reordering but can cause packet | ||
21 | loss. Only enable this option when you accept to lose CAN frames | ||
22 | in favour of getting the received CAN frames in the correct order. | ||
23 | |||
24 | config CAN_C_CAN_PCI | 17 | config CAN_C_CAN_PCI |
25 | tristate "Generic PCI Bus based C_CAN/D_CAN driver" | 18 | tristate "Generic PCI Bus based C_CAN/D_CAN driver" |
26 | depends on PCI | 19 | depends on PCI |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index a2ca820b5373..95e04e2002da 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend) | |||
732 | static inline void c_can_rx_object_get(struct net_device *dev, | 732 | static inline void c_can_rx_object_get(struct net_device *dev, |
733 | struct c_can_priv *priv, u32 obj) | 733 | struct c_can_priv *priv, u32 obj) |
734 | { | 734 | { |
735 | #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING | ||
736 | if (obj < C_CAN_MSG_RX_LOW_LAST) | ||
737 | c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW); | ||
738 | else | ||
739 | #endif | ||
740 | c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); | 735 | c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); |
741 | } | 736 | } |
742 | 737 | ||
743 | static inline void c_can_rx_finalize(struct net_device *dev, | 738 | static inline void c_can_rx_finalize(struct net_device *dev, |
744 | struct c_can_priv *priv, u32 obj) | 739 | struct c_can_priv *priv, u32 obj) |
745 | { | 740 | { |
746 | #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING | ||
747 | if (obj < C_CAN_MSG_RX_LOW_LAST) | ||
748 | priv->rxmasked |= BIT(obj - 1); | ||
749 | else if (obj == C_CAN_MSG_RX_LOW_LAST) { | ||
750 | priv->rxmasked = 0; | ||
751 | /* activate all lower message objects */ | ||
752 | c_can_activate_all_lower_rx_msg_obj(dev, IF_RX); | ||
753 | } | ||
754 | #endif | ||
755 | if (priv->type != BOSCH_D_CAN) | 741 | if (priv->type != BOSCH_D_CAN) |
756 | c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); | 742 | c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); |
757 | } | 743 | } |
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) | |||
799 | { | 785 | { |
800 | u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); | 786 | u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); |
801 | 787 | ||
802 | #ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING | ||
803 | pend &= ~priv->rxmasked; | ||
804 | #endif | ||
805 | return pend; | 788 | return pend; |
806 | } | 789 | } |
807 | 790 | ||
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) | |||
814 | * has arrived. To work-around this issue, we keep two groups of message | 797 | * has arrived. To work-around this issue, we keep two groups of message |
815 | * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. | 798 | * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. |
816 | * | 799 | * |
817 | * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y | ||
818 | * | ||
819 | * To ensure in-order frame reception we use the following | ||
820 | * approach while re-activating a message object to receive further | ||
821 | * frames: | ||
822 | * - if the current message object number is lower than | ||
823 | * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing | ||
824 | * the INTPND bit. | ||
825 | * - if the current message object number is equal to | ||
826 | * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower | ||
827 | * receive message objects. | ||
828 | * - if the current message object number is greater than | ||
829 | * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of | ||
830 | * only this message object. | ||
831 | * | ||
832 | * This can cause packet loss! | ||
833 | * | ||
834 | * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n | ||
835 | * | ||
836 | * We clear the newdat bit right away. | 800 | * We clear the newdat bit right away. |
837 | * | 801 | * |
838 | * This can result in packet reordering when the readout is slow. | 802 | * This can result in packet reordering when the readout is slow. |
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index c540e3d12e3d..564933ae218c 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c | |||
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
551 | { | 551 | { |
552 | struct sja1000_priv *priv; | 552 | struct sja1000_priv *priv; |
553 | struct peak_pci_chan *chan; | 553 | struct peak_pci_chan *chan; |
554 | struct net_device *dev; | 554 | struct net_device *dev, *prev_dev; |
555 | void __iomem *cfg_base, *reg_base; | 555 | void __iomem *cfg_base, *reg_base; |
556 | u16 sub_sys_id, icr; | 556 | u16 sub_sys_id, icr; |
557 | int i, err, channels; | 557 | int i, err, channels; |
@@ -688,11 +688,13 @@ failure_remove_channels: | |||
688 | writew(0x0, cfg_base + PITA_ICR + 2); | 688 | writew(0x0, cfg_base + PITA_ICR + 2); |
689 | 689 | ||
690 | chan = NULL; | 690 | chan = NULL; |
691 | for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { | 691 | for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { |
692 | unregister_sja1000dev(dev); | ||
693 | free_sja1000dev(dev); | ||
694 | priv = netdev_priv(dev); | 692 | priv = netdev_priv(dev); |
695 | chan = priv->priv; | 693 | chan = priv->priv; |
694 | prev_dev = chan->prev_dev; | ||
695 | |||
696 | unregister_sja1000dev(dev); | ||
697 | free_sja1000dev(dev); | ||
696 | } | 698 | } |
697 | 699 | ||
698 | /* free any PCIeC resources too */ | 700 | /* free any PCIeC resources too */ |
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev) | |||
726 | 728 | ||
727 | /* Loop over all registered devices */ | 729 | /* Loop over all registered devices */ |
728 | while (1) { | 730 | while (1) { |
731 | struct net_device *prev_dev = chan->prev_dev; | ||
732 | |||
729 | dev_info(&pdev->dev, "removing device %s\n", dev->name); | 733 | dev_info(&pdev->dev, "removing device %s\n", dev->name); |
730 | unregister_sja1000dev(dev); | 734 | unregister_sja1000dev(dev); |
731 | free_sja1000dev(dev); | 735 | free_sja1000dev(dev); |
732 | dev = chan->prev_dev; | 736 | dev = prev_dev; |
733 | 737 | ||
734 | if (!dev) { | 738 | if (!dev) { |
735 | /* do that only for first channel */ | 739 | /* do that only for first channel */ |
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 39b26fe28d10..d7401017a3f1 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig" | |||
35 | source "drivers/net/ethernet/chelsio/Kconfig" | 35 | source "drivers/net/ethernet/chelsio/Kconfig" |
36 | source "drivers/net/ethernet/cirrus/Kconfig" | 36 | source "drivers/net/ethernet/cirrus/Kconfig" |
37 | source "drivers/net/ethernet/cisco/Kconfig" | 37 | source "drivers/net/ethernet/cisco/Kconfig" |
38 | |||
39 | config CX_ECAT | ||
40 | tristate "Beckhoff CX5020 EtherCAT master support" | ||
41 | depends on PCI | ||
42 | ---help--- | ||
43 | Driver for EtherCAT master module located on CCAT FPGA | ||
44 | that can be found on Beckhoff CX5020, and possibly other of CX | ||
45 | Beckhoff CX series industrial PCs. | ||
46 | |||
47 | To compile this driver as a module, choose M here. The module | ||
48 | will be called ec_bhf. | ||
49 | |||
38 | source "drivers/net/ethernet/davicom/Kconfig" | 50 | source "drivers/net/ethernet/davicom/Kconfig" |
39 | 51 | ||
40 | config DNET | 52 | config DNET |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 545d0b3b9cb4..35190e36c456 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ | |||
21 | obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ | 21 | obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ |
22 | obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ | 22 | obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ |
23 | obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ | 23 | obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ |
24 | obj-$(CONFIG_CX_ECAT) += ec_bhf.o | ||
24 | obj-$(CONFIG_DM9000) += davicom/ | 25 | obj-$(CONFIG_DM9000) += davicom/ |
25 | obj-$(CONFIG_DNET) += dnet.o | 26 | obj-$(CONFIG_DNET) += dnet.o |
26 | obj-$(CONFIG_NET_VENDOR_DEC) += dec/ | 27 | obj-$(CONFIG_NET_VENDOR_DEC) += dec/ |
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile index d4a187e45369..3eff2fd3997e 100644 --- a/drivers/net/ethernet/altera/Makefile +++ b/drivers/net/ethernet/altera/Makefile | |||
@@ -5,3 +5,4 @@ | |||
5 | obj-$(CONFIG_ALTERA_TSE) += altera_tse.o | 5 | obj-$(CONFIG_ALTERA_TSE) += altera_tse.o |
6 | altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ | 6 | altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ |
7 | altera_msgdma.o altera_sgdma.o altera_utils.o | 7 | altera_msgdma.o altera_sgdma.o altera_utils.o |
8 | ccflags-y += -D__CHECK_ENDIAN__ | ||
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 4d1f2fdd5c32..0fb986ba3290 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c | |||
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv) | |||
37 | void msgdma_reset(struct altera_tse_private *priv) | 37 | void msgdma_reset(struct altera_tse_private *priv) |
38 | { | 38 | { |
39 | int counter; | 39 | int counter; |
40 | struct msgdma_csr *txcsr = | ||
41 | (struct msgdma_csr *)priv->tx_dma_csr; | ||
42 | struct msgdma_csr *rxcsr = | ||
43 | (struct msgdma_csr *)priv->rx_dma_csr; | ||
44 | 40 | ||
45 | /* Reset Rx mSGDMA */ | 41 | /* Reset Rx mSGDMA */ |
46 | iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); | 42 | csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, |
47 | iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); | 43 | msgdma_csroffs(status)); |
44 | csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, | ||
45 | msgdma_csroffs(control)); | ||
48 | 46 | ||
49 | counter = 0; | 47 | counter = 0; |
50 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { | 48 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { |
51 | if (tse_bit_is_clear(&rxcsr->status, | 49 | if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), |
52 | MSGDMA_CSR_STAT_RESETTING)) | 50 | MSGDMA_CSR_STAT_RESETTING)) |
53 | break; | 51 | break; |
54 | udelay(1); | 52 | udelay(1); |
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv) | |||
59 | "TSE Rx mSGDMA resetting bit never cleared!\n"); | 57 | "TSE Rx mSGDMA resetting bit never cleared!\n"); |
60 | 58 | ||
61 | /* clear all status bits */ | 59 | /* clear all status bits */ |
62 | iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); | 60 | csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); |
63 | 61 | ||
64 | /* Reset Tx mSGDMA */ | 62 | /* Reset Tx mSGDMA */ |
65 | iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); | 63 | csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, |
66 | iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); | 64 | msgdma_csroffs(status)); |
65 | |||
66 | csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, | ||
67 | msgdma_csroffs(control)); | ||
67 | 68 | ||
68 | counter = 0; | 69 | counter = 0; |
69 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { | 70 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { |
70 | if (tse_bit_is_clear(&txcsr->status, | 71 | if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), |
71 | MSGDMA_CSR_STAT_RESETTING)) | 72 | MSGDMA_CSR_STAT_RESETTING)) |
72 | break; | 73 | break; |
73 | udelay(1); | 74 | udelay(1); |
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv) | |||
78 | "TSE Tx mSGDMA resetting bit never cleared!\n"); | 79 | "TSE Tx mSGDMA resetting bit never cleared!\n"); |
79 | 80 | ||
80 | /* clear all status bits */ | 81 | /* clear all status bits */ |
81 | iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); | 82 | csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); |
82 | } | 83 | } |
83 | 84 | ||
84 | void msgdma_disable_rxirq(struct altera_tse_private *priv) | 85 | void msgdma_disable_rxirq(struct altera_tse_private *priv) |
85 | { | 86 | { |
86 | struct msgdma_csr *csr = priv->rx_dma_csr; | 87 | tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), |
87 | tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); | 88 | MSGDMA_CSR_CTL_GLOBAL_INTR); |
88 | } | 89 | } |
89 | 90 | ||
90 | void msgdma_enable_rxirq(struct altera_tse_private *priv) | 91 | void msgdma_enable_rxirq(struct altera_tse_private *priv) |
91 | { | 92 | { |
92 | struct msgdma_csr *csr = priv->rx_dma_csr; | 93 | tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), |
93 | tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); | 94 | MSGDMA_CSR_CTL_GLOBAL_INTR); |
94 | } | 95 | } |
95 | 96 | ||
96 | void msgdma_disable_txirq(struct altera_tse_private *priv) | 97 | void msgdma_disable_txirq(struct altera_tse_private *priv) |
97 | { | 98 | { |
98 | struct msgdma_csr *csr = priv->tx_dma_csr; | 99 | tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), |
99 | tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); | 100 | MSGDMA_CSR_CTL_GLOBAL_INTR); |
100 | } | 101 | } |
101 | 102 | ||
102 | void msgdma_enable_txirq(struct altera_tse_private *priv) | 103 | void msgdma_enable_txirq(struct altera_tse_private *priv) |
103 | { | 104 | { |
104 | struct msgdma_csr *csr = priv->tx_dma_csr; | 105 | tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), |
105 | tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); | 106 | MSGDMA_CSR_CTL_GLOBAL_INTR); |
106 | } | 107 | } |
107 | 108 | ||
108 | void msgdma_clear_rxirq(struct altera_tse_private *priv) | 109 | void msgdma_clear_rxirq(struct altera_tse_private *priv) |
109 | { | 110 | { |
110 | struct msgdma_csr *csr = priv->rx_dma_csr; | 111 | csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); |
111 | iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); | ||
112 | } | 112 | } |
113 | 113 | ||
114 | void msgdma_clear_txirq(struct altera_tse_private *priv) | 114 | void msgdma_clear_txirq(struct altera_tse_private *priv) |
115 | { | 115 | { |
116 | struct msgdma_csr *csr = priv->tx_dma_csr; | 116 | csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); |
117 | iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); | ||
118 | } | 117 | } |
119 | 118 | ||
120 | /* return 0 to indicate transmit is pending */ | 119 | /* return 0 to indicate transmit is pending */ |
121 | int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | 120 | int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) |
122 | { | 121 | { |
123 | struct msgdma_extended_desc *desc = priv->tx_dma_desc; | 122 | csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, |
124 | 123 | msgdma_descroffs(read_addr_lo)); | |
125 | iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); | 124 | csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc, |
126 | iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); | 125 | msgdma_descroffs(read_addr_hi)); |
127 | iowrite32(0, &desc->write_addr_lo); | 126 | csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo)); |
128 | iowrite32(0, &desc->write_addr_hi); | 127 | csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi)); |
129 | iowrite32(buffer->len, &desc->len); | 128 | csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len)); |
130 | iowrite32(0, &desc->burst_seq_num); | 129 | csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num)); |
131 | iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); | 130 | csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc, |
132 | iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); | 131 | msgdma_descroffs(stride)); |
132 | csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, | ||
133 | msgdma_descroffs(control)); | ||
133 | return 0; | 134 | return 0; |
134 | } | 135 | } |
135 | 136 | ||
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) | |||
138 | u32 ready = 0; | 139 | u32 ready = 0; |
139 | u32 inuse; | 140 | u32 inuse; |
140 | u32 status; | 141 | u32 status; |
141 | struct msgdma_csr *txcsr = | ||
142 | (struct msgdma_csr *)priv->tx_dma_csr; | ||
143 | 142 | ||
144 | /* Get number of sent descriptors */ | 143 | /* Get number of sent descriptors */ |
145 | inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; | 144 | inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level)) |
145 | & 0xffff; | ||
146 | 146 | ||
147 | if (inuse) { /* Tx FIFO is not empty */ | 147 | if (inuse) { /* Tx FIFO is not empty */ |
148 | ready = priv->tx_prod - priv->tx_cons - inuse - 1; | 148 | ready = priv->tx_prod - priv->tx_cons - inuse - 1; |
149 | } else { | 149 | } else { |
150 | /* Check for buffered last packet */ | 150 | /* Check for buffered last packet */ |
151 | status = ioread32(&txcsr->status); | 151 | status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); |
152 | if (status & MSGDMA_CSR_STAT_BUSY) | 152 | if (status & MSGDMA_CSR_STAT_BUSY) |
153 | ready = priv->tx_prod - priv->tx_cons - 1; | 153 | ready = priv->tx_prod - priv->tx_cons - 1; |
154 | else | 154 | else |
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) | |||
162 | void msgdma_add_rx_desc(struct altera_tse_private *priv, | 162 | void msgdma_add_rx_desc(struct altera_tse_private *priv, |
163 | struct tse_buffer *rxbuffer) | 163 | struct tse_buffer *rxbuffer) |
164 | { | 164 | { |
165 | struct msgdma_extended_desc *desc = priv->rx_dma_desc; | ||
166 | u32 len = priv->rx_dma_buf_sz; | 165 | u32 len = priv->rx_dma_buf_sz; |
167 | dma_addr_t dma_addr = rxbuffer->dma_addr; | 166 | dma_addr_t dma_addr = rxbuffer->dma_addr; |
168 | u32 control = (MSGDMA_DESC_CTL_END_ON_EOP | 167 | u32 control = (MSGDMA_DESC_CTL_END_ON_EOP |
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv, | |||
172 | | MSGDMA_DESC_CTL_TR_ERR_IRQ | 171 | | MSGDMA_DESC_CTL_TR_ERR_IRQ |
173 | | MSGDMA_DESC_CTL_GO); | 172 | | MSGDMA_DESC_CTL_GO); |
174 | 173 | ||
175 | iowrite32(0, &desc->read_addr_lo); | 174 | csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo)); |
176 | iowrite32(0, &desc->read_addr_hi); | 175 | csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi)); |
177 | iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); | 176 | csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc, |
178 | iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); | 177 | msgdma_descroffs(write_addr_lo)); |
179 | iowrite32(len, &desc->len); | 178 | csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc, |
180 | iowrite32(0, &desc->burst_seq_num); | 179 | msgdma_descroffs(write_addr_hi)); |
181 | iowrite32(0x00010001, &desc->stride); | 180 | csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len)); |
182 | iowrite32(control, &desc->control); | 181 | csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num)); |
182 | csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride)); | ||
183 | csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control)); | ||
183 | } | 184 | } |
184 | 185 | ||
185 | /* status is returned on upper 16 bits, | 186 | /* status is returned on upper 16 bits, |
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv) | |||
190 | u32 rxstatus = 0; | 191 | u32 rxstatus = 0; |
191 | u32 pktlength; | 192 | u32 pktlength; |
192 | u32 pktstatus; | 193 | u32 pktstatus; |
193 | struct msgdma_csr *rxcsr = | 194 | |
194 | (struct msgdma_csr *)priv->rx_dma_csr; | 195 | if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level)) |
195 | struct msgdma_response *rxresp = | 196 | & 0xffff) { |
196 | (struct msgdma_response *)priv->rx_dma_resp; | 197 | pktlength = csrrd32(priv->rx_dma_resp, |
197 | 198 | msgdma_respoffs(bytes_transferred)); | |
198 | if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { | 199 | pktstatus = csrrd32(priv->rx_dma_resp, |
199 | pktlength = ioread32(&rxresp->bytes_transferred); | 200 | msgdma_respoffs(status)); |
200 | pktstatus = ioread32(&rxresp->status); | ||
201 | rxstatus = pktstatus; | 201 | rxstatus = pktstatus; |
202 | rxstatus = rxstatus << 16; | 202 | rxstatus = rxstatus << 16; |
203 | rxstatus |= (pktlength & 0xffff); | 203 | rxstatus |= (pktlength & 0xffff); |
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index d7b59ba4019c..e335626e1b6b 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h | |||
@@ -17,15 +17,6 @@ | |||
17 | #ifndef __ALTERA_MSGDMAHW_H__ | 17 | #ifndef __ALTERA_MSGDMAHW_H__ |
18 | #define __ALTERA_MSGDMAHW_H__ | 18 | #define __ALTERA_MSGDMAHW_H__ |
19 | 19 | ||
20 | /* mSGDMA standard descriptor format | ||
21 | */ | ||
22 | struct msgdma_desc { | ||
23 | u32 read_addr; /* data buffer source address */ | ||
24 | u32 write_addr; /* data buffer destination address */ | ||
25 | u32 len; /* the number of bytes to transfer per descriptor */ | ||
26 | u32 control; /* characteristics of the transfer */ | ||
27 | }; | ||
28 | |||
29 | /* mSGDMA extended descriptor format | 20 | /* mSGDMA extended descriptor format |
30 | */ | 21 | */ |
31 | struct msgdma_extended_desc { | 22 | struct msgdma_extended_desc { |
@@ -159,6 +150,10 @@ struct msgdma_response { | |||
159 | u32 status; | 150 | u32 status; |
160 | }; | 151 | }; |
161 | 152 | ||
153 | #define msgdma_respoffs(a) (offsetof(struct msgdma_response, a)) | ||
154 | #define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a)) | ||
155 | #define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a)) | ||
156 | |||
162 | /* mSGDMA response register bit definitions | 157 | /* mSGDMA response register bit definitions |
163 | */ | 158 | */ |
164 | #define MSGDMA_RESP_EARLY_TERM BIT(8) | 159 | #define MSGDMA_RESP_EARLY_TERM BIT(8) |
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 9ce8630692b6..99cc56f451cf 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include "altera_sgdmahw.h" | 20 | #include "altera_sgdmahw.h" |
21 | #include "altera_sgdma.h" | 21 | #include "altera_sgdma.h" |
22 | 22 | ||
23 | static void sgdma_setup_descrip(struct sgdma_descrip *desc, | 23 | static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, |
24 | struct sgdma_descrip *ndesc, | 24 | struct sgdma_descrip __iomem *ndesc, |
25 | dma_addr_t ndesc_phys, | 25 | dma_addr_t ndesc_phys, |
26 | dma_addr_t raddr, | 26 | dma_addr_t raddr, |
27 | dma_addr_t waddr, | 27 | dma_addr_t waddr, |
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, | |||
31 | int wfixed); | 31 | int wfixed); |
32 | 32 | ||
33 | static int sgdma_async_write(struct altera_tse_private *priv, | 33 | static int sgdma_async_write(struct altera_tse_private *priv, |
34 | struct sgdma_descrip *desc); | 34 | struct sgdma_descrip __iomem *desc); |
35 | 35 | ||
36 | static int sgdma_async_read(struct altera_tse_private *priv); | 36 | static int sgdma_async_read(struct altera_tse_private *priv); |
37 | 37 | ||
38 | static dma_addr_t | 38 | static dma_addr_t |
39 | sgdma_txphysaddr(struct altera_tse_private *priv, | 39 | sgdma_txphysaddr(struct altera_tse_private *priv, |
40 | struct sgdma_descrip *desc); | 40 | struct sgdma_descrip __iomem *desc); |
41 | 41 | ||
42 | static dma_addr_t | 42 | static dma_addr_t |
43 | sgdma_rxphysaddr(struct altera_tse_private *priv, | 43 | sgdma_rxphysaddr(struct altera_tse_private *priv, |
44 | struct sgdma_descrip *desc); | 44 | struct sgdma_descrip __iomem *desc); |
45 | 45 | ||
46 | static int sgdma_txbusy(struct altera_tse_private *priv); | 46 | static int sgdma_txbusy(struct altera_tse_private *priv); |
47 | 47 | ||
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv) | |||
79 | priv->rxdescphys = (dma_addr_t) 0; | 79 | priv->rxdescphys = (dma_addr_t) 0; |
80 | priv->txdescphys = (dma_addr_t) 0; | 80 | priv->txdescphys = (dma_addr_t) 0; |
81 | 81 | ||
82 | priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, | 82 | priv->rxdescphys = dma_map_single(priv->device, |
83 | (void __force *)priv->rx_dma_desc, | ||
83 | priv->rxdescmem, DMA_BIDIRECTIONAL); | 84 | priv->rxdescmem, DMA_BIDIRECTIONAL); |
84 | 85 | ||
85 | if (dma_mapping_error(priv->device, priv->rxdescphys)) { | 86 | if (dma_mapping_error(priv->device, priv->rxdescphys)) { |
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv) | |||
88 | return -EINVAL; | 89 | return -EINVAL; |
89 | } | 90 | } |
90 | 91 | ||
91 | priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, | 92 | priv->txdescphys = dma_map_single(priv->device, |
93 | (void __force *)priv->tx_dma_desc, | ||
92 | priv->txdescmem, DMA_TO_DEVICE); | 94 | priv->txdescmem, DMA_TO_DEVICE); |
93 | 95 | ||
94 | if (dma_mapping_error(priv->device, priv->txdescphys)) { | 96 | if (dma_mapping_error(priv->device, priv->txdescphys)) { |
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv) | |||
98 | } | 100 | } |
99 | 101 | ||
100 | /* Initialize descriptor memory to all 0's, sync memory to cache */ | 102 | /* Initialize descriptor memory to all 0's, sync memory to cache */ |
101 | memset(priv->tx_dma_desc, 0, priv->txdescmem); | 103 | memset_io(priv->tx_dma_desc, 0, priv->txdescmem); |
102 | memset(priv->rx_dma_desc, 0, priv->rxdescmem); | 104 | memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); |
103 | 105 | ||
104 | dma_sync_single_for_device(priv->device, priv->txdescphys, | 106 | dma_sync_single_for_device(priv->device, priv->txdescphys, |
105 | priv->txdescmem, DMA_TO_DEVICE); | 107 | priv->txdescmem, DMA_TO_DEVICE); |
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv) | |||
126 | */ | 128 | */ |
127 | void sgdma_reset(struct altera_tse_private *priv) | 129 | void sgdma_reset(struct altera_tse_private *priv) |
128 | { | 130 | { |
129 | u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc; | ||
130 | u32 txdescriplen = priv->txdescmem; | ||
131 | u32 *prxdescripmem = (u32 *)priv->rx_dma_desc; | ||
132 | u32 rxdescriplen = priv->rxdescmem; | ||
133 | struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr; | ||
134 | struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr; | ||
135 | |||
136 | /* Initialize descriptor memory to 0 */ | 131 | /* Initialize descriptor memory to 0 */ |
137 | memset(ptxdescripmem, 0, txdescriplen); | 132 | memset_io(priv->tx_dma_desc, 0, priv->txdescmem); |
138 | memset(prxdescripmem, 0, rxdescriplen); | 133 | memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); |
139 | 134 | ||
140 | iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); | 135 | csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); |
141 | iowrite32(0, &ptxsgdma->control); | 136 | csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); |
142 | 137 | ||
143 | iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); | 138 | csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); |
144 | iowrite32(0, &prxsgdma->control); | 139 | csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); |
145 | } | 140 | } |
146 | 141 | ||
147 | /* For SGDMA, interrupts remain enabled after initially enabling, | 142 | /* For SGDMA, interrupts remain enabled after initially enabling, |
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv) | |||
167 | 162 | ||
168 | void sgdma_clear_rxirq(struct altera_tse_private *priv) | 163 | void sgdma_clear_rxirq(struct altera_tse_private *priv) |
169 | { | 164 | { |
170 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 165 | tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), |
171 | tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); | 166 | SGDMA_CTRLREG_CLRINT); |
172 | } | 167 | } |
173 | 168 | ||
174 | void sgdma_clear_txirq(struct altera_tse_private *priv) | 169 | void sgdma_clear_txirq(struct altera_tse_private *priv) |
175 | { | 170 | { |
176 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; | 171 | tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), |
177 | tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); | 172 | SGDMA_CTRLREG_CLRINT); |
178 | } | 173 | } |
179 | 174 | ||
180 | /* transmits buffer through SGDMA. Returns number of buffers | 175 | /* transmits buffer through SGDMA. Returns number of buffers |
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv) | |||
184 | */ | 179 | */ |
185 | int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | 180 | int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) |
186 | { | 181 | { |
187 | int pktstx = 0; | 182 | struct sgdma_descrip __iomem *descbase = |
188 | struct sgdma_descrip *descbase = | 183 | (struct sgdma_descrip __iomem *)priv->tx_dma_desc; |
189 | (struct sgdma_descrip *)priv->tx_dma_desc; | ||
190 | 184 | ||
191 | struct sgdma_descrip *cdesc = &descbase[0]; | 185 | struct sgdma_descrip __iomem *cdesc = &descbase[0]; |
192 | struct sgdma_descrip *ndesc = &descbase[1]; | 186 | struct sgdma_descrip __iomem *ndesc = &descbase[1]; |
193 | 187 | ||
194 | /* wait 'til the tx sgdma is ready for the next transmit request */ | 188 | /* wait 'til the tx sgdma is ready for the next transmit request */ |
195 | if (sgdma_txbusy(priv)) | 189 | if (sgdma_txbusy(priv)) |
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | |||
205 | 0, /* read fixed */ | 199 | 0, /* read fixed */ |
206 | SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ | 200 | SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ |
207 | 201 | ||
208 | pktstx = sgdma_async_write(priv, cdesc); | 202 | sgdma_async_write(priv, cdesc); |
209 | 203 | ||
210 | /* enqueue the request to the pending transmit queue */ | 204 | /* enqueue the request to the pending transmit queue */ |
211 | queue_tx(priv, buffer); | 205 | queue_tx(priv, buffer); |
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | |||
219 | u32 sgdma_tx_completions(struct altera_tse_private *priv) | 213 | u32 sgdma_tx_completions(struct altera_tse_private *priv) |
220 | { | 214 | { |
221 | u32 ready = 0; | 215 | u32 ready = 0; |
222 | struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc; | ||
223 | 216 | ||
224 | if (!sgdma_txbusy(priv) && | 217 | if (!sgdma_txbusy(priv) && |
225 | ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && | 218 | ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) |
219 | & SGDMA_CONTROL_HW_OWNED) == 0) && | ||
226 | (dequeue_tx(priv))) { | 220 | (dequeue_tx(priv))) { |
227 | ready = 1; | 221 | ready = 1; |
228 | } | 222 | } |
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv, | |||
246 | */ | 240 | */ |
247 | u32 sgdma_rx_status(struct altera_tse_private *priv) | 241 | u32 sgdma_rx_status(struct altera_tse_private *priv) |
248 | { | 242 | { |
249 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 243 | struct sgdma_descrip __iomem *base = |
250 | struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; | 244 | (struct sgdma_descrip __iomem *)priv->rx_dma_desc; |
251 | struct sgdma_descrip *desc = NULL; | 245 | struct sgdma_descrip __iomem *desc = NULL; |
252 | int pktsrx; | ||
253 | unsigned int rxstatus = 0; | ||
254 | unsigned int pktlength = 0; | ||
255 | unsigned int pktstatus = 0; | ||
256 | struct tse_buffer *rxbuffer = NULL; | 246 | struct tse_buffer *rxbuffer = NULL; |
247 | unsigned int rxstatus = 0; | ||
257 | 248 | ||
258 | u32 sts = ioread32(&csr->status); | 249 | u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); |
259 | 250 | ||
260 | desc = &base[0]; | 251 | desc = &base[0]; |
261 | if (sts & SGDMA_STSREG_EOP) { | 252 | if (sts & SGDMA_STSREG_EOP) { |
253 | unsigned int pktlength = 0; | ||
254 | unsigned int pktstatus = 0; | ||
262 | dma_sync_single_for_cpu(priv->device, | 255 | dma_sync_single_for_cpu(priv->device, |
263 | priv->rxdescphys, | 256 | priv->rxdescphys, |
264 | priv->sgdmadesclen, | 257 | priv->sgdmadesclen, |
265 | DMA_FROM_DEVICE); | 258 | DMA_FROM_DEVICE); |
266 | 259 | ||
267 | pktlength = desc->bytes_xferred; | 260 | pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); |
268 | pktstatus = desc->status & 0x3f; | 261 | pktstatus = csrrd8(desc, sgdma_descroffs(status)); |
269 | rxstatus = pktstatus; | 262 | rxstatus = pktstatus & ~SGDMA_STATUS_EOP; |
270 | rxstatus = rxstatus << 16; | 263 | rxstatus = rxstatus << 16; |
271 | rxstatus |= (pktlength & 0xffff); | 264 | rxstatus |= (pktlength & 0xffff); |
272 | 265 | ||
273 | if (rxstatus) { | 266 | if (rxstatus) { |
274 | desc->status = 0; | 267 | csrwr8(0, desc, sgdma_descroffs(status)); |
275 | 268 | ||
276 | rxbuffer = dequeue_rx(priv); | 269 | rxbuffer = dequeue_rx(priv); |
277 | if (rxbuffer == NULL) | 270 | if (rxbuffer == NULL) |
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) | |||
279 | "sgdma rx and rx queue empty!\n"); | 272 | "sgdma rx and rx queue empty!\n"); |
280 | 273 | ||
281 | /* Clear control */ | 274 | /* Clear control */ |
282 | iowrite32(0, &csr->control); | 275 | csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); |
283 | /* clear status */ | 276 | /* clear status */ |
284 | iowrite32(0xf, &csr->status); | 277 | csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); |
285 | 278 | ||
286 | /* kick the rx sgdma after reaping this descriptor */ | 279 | /* kick the rx sgdma after reaping this descriptor */ |
287 | pktsrx = sgdma_async_read(priv); | 280 | sgdma_async_read(priv); |
288 | 281 | ||
289 | } else { | 282 | } else { |
290 | /* If the SGDMA indicated an end of packet on recv, | 283 | /* If the SGDMA indicated an end of packet on recv, |
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) | |||
298 | */ | 291 | */ |
299 | netdev_err(priv->dev, | 292 | netdev_err(priv->dev, |
300 | "SGDMA RX Error Info: %x, %x, %x\n", | 293 | "SGDMA RX Error Info: %x, %x, %x\n", |
301 | sts, desc->status, rxstatus); | 294 | sts, csrrd8(desc, sgdma_descroffs(status)), |
295 | rxstatus); | ||
302 | } | 296 | } |
303 | } else if (sts == 0) { | 297 | } else if (sts == 0) { |
304 | pktsrx = sgdma_async_read(priv); | 298 | sgdma_async_read(priv); |
305 | } | 299 | } |
306 | 300 | ||
307 | return rxstatus; | 301 | return rxstatus; |
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) | |||
309 | 303 | ||
310 | 304 | ||
311 | /* Private functions */ | 305 | /* Private functions */ |
312 | static void sgdma_setup_descrip(struct sgdma_descrip *desc, | 306 | static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, |
313 | struct sgdma_descrip *ndesc, | 307 | struct sgdma_descrip __iomem *ndesc, |
314 | dma_addr_t ndesc_phys, | 308 | dma_addr_t ndesc_phys, |
315 | dma_addr_t raddr, | 309 | dma_addr_t raddr, |
316 | dma_addr_t waddr, | 310 | dma_addr_t waddr, |
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, | |||
320 | int wfixed) | 314 | int wfixed) |
321 | { | 315 | { |
322 | /* Clear the next descriptor as not owned by hardware */ | 316 | /* Clear the next descriptor as not owned by hardware */ |
323 | u32 ctrl = ndesc->control; | 317 | |
318 | u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); | ||
324 | ctrl &= ~SGDMA_CONTROL_HW_OWNED; | 319 | ctrl &= ~SGDMA_CONTROL_HW_OWNED; |
325 | ndesc->control = ctrl; | 320 | csrwr8(ctrl, ndesc, sgdma_descroffs(control)); |
326 | 321 | ||
327 | ctrl = 0; | ||
328 | ctrl = SGDMA_CONTROL_HW_OWNED; | 322 | ctrl = SGDMA_CONTROL_HW_OWNED; |
329 | ctrl |= generate_eop; | 323 | ctrl |= generate_eop; |
330 | ctrl |= rfixed; | 324 | ctrl |= rfixed; |
331 | ctrl |= wfixed; | 325 | ctrl |= wfixed; |
332 | 326 | ||
333 | /* Channel is implicitly zero, initialized to 0 by default */ | 327 | /* Channel is implicitly zero, initialized to 0 by default */ |
334 | 328 | csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); | |
335 | desc->raddr = raddr; | 329 | csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); |
336 | desc->waddr = waddr; | 330 | |
337 | desc->next = lower_32_bits(ndesc_phys); | 331 | csrwr32(0, desc, sgdma_descroffs(pad1)); |
338 | desc->control = ctrl; | 332 | csrwr32(0, desc, sgdma_descroffs(pad2)); |
339 | desc->status = 0; | 333 | csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); |
340 | desc->rburst = 0; | 334 | |
341 | desc->wburst = 0; | 335 | csrwr8(ctrl, desc, sgdma_descroffs(control)); |
342 | desc->bytes = length; | 336 | csrwr8(0, desc, sgdma_descroffs(status)); |
343 | desc->bytes_xferred = 0; | 337 | csrwr8(0, desc, sgdma_descroffs(wburst)); |
338 | csrwr8(0, desc, sgdma_descroffs(rburst)); | ||
339 | csrwr16(length, desc, sgdma_descroffs(bytes)); | ||
340 | csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); | ||
344 | } | 341 | } |
345 | 342 | ||
346 | /* If hardware is busy, don't restart async read. | 343 | /* If hardware is busy, don't restart async read. |
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, | |||
351 | */ | 348 | */ |
352 | static int sgdma_async_read(struct altera_tse_private *priv) | 349 | static int sgdma_async_read(struct altera_tse_private *priv) |
353 | { | 350 | { |
354 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 351 | struct sgdma_descrip __iomem *descbase = |
355 | struct sgdma_descrip *descbase = | 352 | (struct sgdma_descrip __iomem *)priv->rx_dma_desc; |
356 | (struct sgdma_descrip *)priv->rx_dma_desc; | ||
357 | 353 | ||
358 | struct sgdma_descrip *cdesc = &descbase[0]; | 354 | struct sgdma_descrip __iomem *cdesc = &descbase[0]; |
359 | struct sgdma_descrip *ndesc = &descbase[1]; | 355 | struct sgdma_descrip __iomem *ndesc = &descbase[1]; |
360 | 356 | ||
361 | struct tse_buffer *rxbuffer = NULL; | 357 | struct tse_buffer *rxbuffer = NULL; |
362 | 358 | ||
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv) | |||
382 | priv->sgdmadesclen, | 378 | priv->sgdmadesclen, |
383 | DMA_TO_DEVICE); | 379 | DMA_TO_DEVICE); |
384 | 380 | ||
385 | iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), | 381 | csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), |
386 | &csr->next_descrip); | 382 | priv->rx_dma_csr, |
383 | sgdma_csroffs(next_descrip)); | ||
387 | 384 | ||
388 | iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), | 385 | csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), |
389 | &csr->control); | 386 | priv->rx_dma_csr, |
387 | sgdma_csroffs(control)); | ||
390 | 388 | ||
391 | return 1; | 389 | return 1; |
392 | } | 390 | } |
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv) | |||
395 | } | 393 | } |
396 | 394 | ||
397 | static int sgdma_async_write(struct altera_tse_private *priv, | 395 | static int sgdma_async_write(struct altera_tse_private *priv, |
398 | struct sgdma_descrip *desc) | 396 | struct sgdma_descrip __iomem *desc) |
399 | { | 397 | { |
400 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; | ||
401 | |||
402 | if (sgdma_txbusy(priv)) | 398 | if (sgdma_txbusy(priv)) |
403 | return 0; | 399 | return 0; |
404 | 400 | ||
405 | /* clear control and status */ | 401 | /* clear control and status */ |
406 | iowrite32(0, &csr->control); | 402 | csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); |
407 | iowrite32(0x1f, &csr->status); | 403 | csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); |
408 | 404 | ||
409 | dma_sync_single_for_device(priv->device, priv->txdescphys, | 405 | dma_sync_single_for_device(priv->device, priv->txdescphys, |
410 | priv->sgdmadesclen, DMA_TO_DEVICE); | 406 | priv->sgdmadesclen, DMA_TO_DEVICE); |
411 | 407 | ||
412 | iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), | 408 | csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), |
413 | &csr->next_descrip); | 409 | priv->tx_dma_csr, |
410 | sgdma_csroffs(next_descrip)); | ||
414 | 411 | ||
415 | iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), | 412 | csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), |
416 | &csr->control); | 413 | priv->tx_dma_csr, |
414 | sgdma_csroffs(control)); | ||
417 | 415 | ||
418 | return 1; | 416 | return 1; |
419 | } | 417 | } |
420 | 418 | ||
421 | static dma_addr_t | 419 | static dma_addr_t |
422 | sgdma_txphysaddr(struct altera_tse_private *priv, | 420 | sgdma_txphysaddr(struct altera_tse_private *priv, |
423 | struct sgdma_descrip *desc) | 421 | struct sgdma_descrip __iomem *desc) |
424 | { | 422 | { |
425 | dma_addr_t paddr = priv->txdescmem_busaddr; | 423 | dma_addr_t paddr = priv->txdescmem_busaddr; |
426 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; | 424 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; |
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv, | |||
429 | 427 | ||
430 | static dma_addr_t | 428 | static dma_addr_t |
431 | sgdma_rxphysaddr(struct altera_tse_private *priv, | 429 | sgdma_rxphysaddr(struct altera_tse_private *priv, |
432 | struct sgdma_descrip *desc) | 430 | struct sgdma_descrip __iomem *desc) |
433 | { | 431 | { |
434 | dma_addr_t paddr = priv->rxdescmem_busaddr; | 432 | dma_addr_t paddr = priv->rxdescmem_busaddr; |
435 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; | 433 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; |
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv) | |||
518 | */ | 516 | */ |
519 | static int sgdma_rxbusy(struct altera_tse_private *priv) | 517 | static int sgdma_rxbusy(struct altera_tse_private *priv) |
520 | { | 518 | { |
521 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 519 | return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) |
522 | return ioread32(&csr->status) & SGDMA_STSREG_BUSY; | 520 | & SGDMA_STSREG_BUSY; |
523 | } | 521 | } |
524 | 522 | ||
525 | /* waits for the tx sgdma to finish it's current operation, returns 0 | 523 | /* waits for the tx sgdma to finish it's current operation, returns 0 |
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv) | |||
528 | static int sgdma_txbusy(struct altera_tse_private *priv) | 526 | static int sgdma_txbusy(struct altera_tse_private *priv) |
529 | { | 527 | { |
530 | int delay = 0; | 528 | int delay = 0; |
531 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; | ||
532 | 529 | ||
533 | /* if DMA is busy, wait for current transactino to finish */ | 530 | /* if DMA is busy, wait for current transactino to finish */ |
534 | while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) | 531 | while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) |
532 | & SGDMA_STSREG_BUSY) && (delay++ < 100)) | ||
535 | udelay(1); | 533 | udelay(1); |
536 | 534 | ||
537 | if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { | 535 | if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) |
536 | & SGDMA_STSREG_BUSY) { | ||
538 | netdev_err(priv->dev, "timeout waiting for tx dma\n"); | 537 | netdev_err(priv->dev, "timeout waiting for tx dma\n"); |
539 | return 1; | 538 | return 1; |
540 | } | 539 | } |
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h index ba3334f35383..85bc33b218d9 100644 --- a/drivers/net/ethernet/altera/altera_sgdmahw.h +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h | |||
@@ -19,16 +19,16 @@ | |||
19 | 19 | ||
20 | /* SGDMA descriptor structure */ | 20 | /* SGDMA descriptor structure */ |
21 | struct sgdma_descrip { | 21 | struct sgdma_descrip { |
22 | unsigned int raddr; /* address of data to be read */ | 22 | u32 raddr; /* address of data to be read */ |
23 | unsigned int pad1; | 23 | u32 pad1; |
24 | unsigned int waddr; | 24 | u32 waddr; |
25 | unsigned int pad2; | 25 | u32 pad2; |
26 | unsigned int next; | 26 | u32 next; |
27 | unsigned int pad3; | 27 | u32 pad3; |
28 | unsigned short bytes; | 28 | u16 bytes; |
29 | unsigned char rburst; | 29 | u8 rburst; |
30 | unsigned char wburst; | 30 | u8 wburst; |
31 | unsigned short bytes_xferred; /* 16 bits, bytes xferred */ | 31 | u16 bytes_xferred; /* 16 bits, bytes xferred */ |
32 | 32 | ||
33 | /* bit 0: error | 33 | /* bit 0: error |
34 | * bit 1: length error | 34 | * bit 1: length error |
@@ -39,7 +39,7 @@ struct sgdma_descrip { | |||
39 | * bit 6: reserved | 39 | * bit 6: reserved |
40 | * bit 7: status eop for recv case | 40 | * bit 7: status eop for recv case |
41 | */ | 41 | */ |
42 | unsigned char status; | 42 | u8 status; |
43 | 43 | ||
44 | /* bit 0: eop | 44 | /* bit 0: eop |
45 | * bit 1: read_fixed | 45 | * bit 1: read_fixed |
@@ -47,7 +47,7 @@ struct sgdma_descrip { | |||
47 | * bits 3,4,5,6: Channel (always 0) | 47 | * bits 3,4,5,6: Channel (always 0) |
48 | * bit 7: hardware owned | 48 | * bit 7: hardware owned |
49 | */ | 49 | */ |
50 | unsigned char control; | 50 | u8 control; |
51 | } __packed; | 51 | } __packed; |
52 | 52 | ||
53 | 53 | ||
@@ -101,6 +101,8 @@ struct sgdma_csr { | |||
101 | u32 pad3[3]; | 101 | u32 pad3[3]; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | #define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a)) | ||
105 | #define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a)) | ||
104 | 106 | ||
105 | #define SGDMA_STSREG_ERR BIT(0) /* Error */ | 107 | #define SGDMA_STSREG_ERR BIT(0) /* Error */ |
106 | #define SGDMA_STSREG_EOP BIT(1) /* EOP */ | 108 | #define SGDMA_STSREG_EOP BIT(1) /* EOP */ |
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 465c4aabebbd..2adb24d4523c 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h | |||
@@ -357,6 +357,8 @@ struct altera_tse_mac { | |||
357 | u32 reserved5[42]; | 357 | u32 reserved5[42]; |
358 | }; | 358 | }; |
359 | 359 | ||
360 | #define tse_csroffs(a) (offsetof(struct altera_tse_mac, a)) | ||
361 | |||
360 | /* Transmit and Receive Command Registers Bit Definitions | 362 | /* Transmit and Receive Command Registers Bit Definitions |
361 | */ | 363 | */ |
362 | #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) | 364 | #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) |
@@ -487,4 +489,49 @@ struct altera_tse_private { | |||
487 | */ | 489 | */ |
488 | void altera_tse_set_ethtool_ops(struct net_device *); | 490 | void altera_tse_set_ethtool_ops(struct net_device *); |
489 | 491 | ||
492 | static inline | ||
493 | u32 csrrd32(void __iomem *mac, size_t offs) | ||
494 | { | ||
495 | void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); | ||
496 | return readl(paddr); | ||
497 | } | ||
498 | |||
499 | static inline | ||
500 | u16 csrrd16(void __iomem *mac, size_t offs) | ||
501 | { | ||
502 | void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); | ||
503 | return readw(paddr); | ||
504 | } | ||
505 | |||
506 | static inline | ||
507 | u8 csrrd8(void __iomem *mac, size_t offs) | ||
508 | { | ||
509 | void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); | ||
510 | return readb(paddr); | ||
511 | } | ||
512 | |||
513 | static inline | ||
514 | void csrwr32(u32 val, void __iomem *mac, size_t offs) | ||
515 | { | ||
516 | void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); | ||
517 | |||
518 | writel(val, paddr); | ||
519 | } | ||
520 | |||
521 | static inline | ||
522 | void csrwr16(u16 val, void __iomem *mac, size_t offs) | ||
523 | { | ||
524 | void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); | ||
525 | |||
526 | writew(val, paddr); | ||
527 | } | ||
528 | |||
529 | static inline | ||
530 | void csrwr8(u8 val, void __iomem *mac, size_t offs) | ||
531 | { | ||
532 | void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); | ||
533 | |||
534 | writeb(val, paddr); | ||
535 | } | ||
536 | |||
490 | #endif /* __ALTERA_TSE_H__ */ | 537 | #endif /* __ALTERA_TSE_H__ */ |
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index 76133caffa78..54c25eff7952 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c | |||
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, | |||
96 | u64 *buf) | 96 | u64 *buf) |
97 | { | 97 | { |
98 | struct altera_tse_private *priv = netdev_priv(dev); | 98 | struct altera_tse_private *priv = netdev_priv(dev); |
99 | struct altera_tse_mac *mac = priv->mac_dev; | ||
100 | u64 ext; | 99 | u64 ext; |
101 | 100 | ||
102 | buf[0] = ioread32(&mac->frames_transmitted_ok); | 101 | buf[0] = csrrd32(priv->mac_dev, |
103 | buf[1] = ioread32(&mac->frames_received_ok); | 102 | tse_csroffs(frames_transmitted_ok)); |
104 | buf[2] = ioread32(&mac->frames_check_sequence_errors); | 103 | buf[1] = csrrd32(priv->mac_dev, |
105 | buf[3] = ioread32(&mac->alignment_errors); | 104 | tse_csroffs(frames_received_ok)); |
105 | buf[2] = csrrd32(priv->mac_dev, | ||
106 | tse_csroffs(frames_check_sequence_errors)); | ||
107 | buf[3] = csrrd32(priv->mac_dev, | ||
108 | tse_csroffs(alignment_errors)); | ||
106 | 109 | ||
107 | /* Extended aOctetsTransmittedOK counter */ | 110 | /* Extended aOctetsTransmittedOK counter */ |
108 | ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; | 111 | ext = (u64) csrrd32(priv->mac_dev, |
109 | ext |= ioread32(&mac->octets_transmitted_ok); | 112 | tse_csroffs(msb_octets_transmitted_ok)) << 32; |
113 | |||
114 | ext |= csrrd32(priv->mac_dev, | ||
115 | tse_csroffs(octets_transmitted_ok)); | ||
110 | buf[4] = ext; | 116 | buf[4] = ext; |
111 | 117 | ||
112 | /* Extended aOctetsReceivedOK counter */ | 118 | /* Extended aOctetsReceivedOK counter */ |
113 | ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; | 119 | ext = (u64) csrrd32(priv->mac_dev, |
114 | ext |= ioread32(&mac->octets_received_ok); | 120 | tse_csroffs(msb_octets_received_ok)) << 32; |
121 | |||
122 | ext |= csrrd32(priv->mac_dev, | ||
123 | tse_csroffs(octets_received_ok)); | ||
115 | buf[5] = ext; | 124 | buf[5] = ext; |
116 | 125 | ||
117 | buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); | 126 | buf[6] = csrrd32(priv->mac_dev, |
118 | buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); | 127 | tse_csroffs(tx_pause_mac_ctrl_frames)); |
119 | buf[8] = ioread32(&mac->if_in_errors); | 128 | buf[7] = csrrd32(priv->mac_dev, |
120 | buf[9] = ioread32(&mac->if_out_errors); | 129 | tse_csroffs(rx_pause_mac_ctrl_frames)); |
121 | buf[10] = ioread32(&mac->if_in_ucast_pkts); | 130 | buf[8] = csrrd32(priv->mac_dev, |
122 | buf[11] = ioread32(&mac->if_in_multicast_pkts); | 131 | tse_csroffs(if_in_errors)); |
123 | buf[12] = ioread32(&mac->if_in_broadcast_pkts); | 132 | buf[9] = csrrd32(priv->mac_dev, |
124 | buf[13] = ioread32(&mac->if_out_discards); | 133 | tse_csroffs(if_out_errors)); |
125 | buf[14] = ioread32(&mac->if_out_ucast_pkts); | 134 | buf[10] = csrrd32(priv->mac_dev, |
126 | buf[15] = ioread32(&mac->if_out_multicast_pkts); | 135 | tse_csroffs(if_in_ucast_pkts)); |
127 | buf[16] = ioread32(&mac->if_out_broadcast_pkts); | 136 | buf[11] = csrrd32(priv->mac_dev, |
128 | buf[17] = ioread32(&mac->ether_stats_drop_events); | 137 | tse_csroffs(if_in_multicast_pkts)); |
138 | buf[12] = csrrd32(priv->mac_dev, | ||
139 | tse_csroffs(if_in_broadcast_pkts)); | ||
140 | buf[13] = csrrd32(priv->mac_dev, | ||
141 | tse_csroffs(if_out_discards)); | ||
142 | buf[14] = csrrd32(priv->mac_dev, | ||
143 | tse_csroffs(if_out_ucast_pkts)); | ||
144 | buf[15] = csrrd32(priv->mac_dev, | ||
145 | tse_csroffs(if_out_multicast_pkts)); | ||
146 | buf[16] = csrrd32(priv->mac_dev, | ||
147 | tse_csroffs(if_out_broadcast_pkts)); | ||
148 | buf[17] = csrrd32(priv->mac_dev, | ||
149 | tse_csroffs(ether_stats_drop_events)); | ||
129 | 150 | ||
130 | /* Extended etherStatsOctets counter */ | 151 | /* Extended etherStatsOctets counter */ |
131 | ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; | 152 | ext = (u64) csrrd32(priv->mac_dev, |
132 | ext |= ioread32(&mac->ether_stats_octets); | 153 | tse_csroffs(msb_ether_stats_octets)) << 32; |
154 | ext |= csrrd32(priv->mac_dev, | ||
155 | tse_csroffs(ether_stats_octets)); | ||
133 | buf[18] = ext; | 156 | buf[18] = ext; |
134 | 157 | ||
135 | buf[19] = ioread32(&mac->ether_stats_pkts); | 158 | buf[19] = csrrd32(priv->mac_dev, |
136 | buf[20] = ioread32(&mac->ether_stats_undersize_pkts); | 159 | tse_csroffs(ether_stats_pkts)); |
137 | buf[21] = ioread32(&mac->ether_stats_oversize_pkts); | 160 | buf[20] = csrrd32(priv->mac_dev, |
138 | buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); | 161 | tse_csroffs(ether_stats_undersize_pkts)); |
139 | buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); | 162 | buf[21] = csrrd32(priv->mac_dev, |
140 | buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); | 163 | tse_csroffs(ether_stats_oversize_pkts)); |
141 | buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); | 164 | buf[22] = csrrd32(priv->mac_dev, |
142 | buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); | 165 | tse_csroffs(ether_stats_pkts_64_octets)); |
143 | buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); | 166 | buf[23] = csrrd32(priv->mac_dev, |
144 | buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); | 167 | tse_csroffs(ether_stats_pkts_65to127_octets)); |
145 | buf[29] = ioread32(&mac->ether_stats_jabbers); | 168 | buf[24] = csrrd32(priv->mac_dev, |
146 | buf[30] = ioread32(&mac->ether_stats_fragments); | 169 | tse_csroffs(ether_stats_pkts_128to255_octets)); |
170 | buf[25] = csrrd32(priv->mac_dev, | ||
171 | tse_csroffs(ether_stats_pkts_256to511_octets)); | ||
172 | buf[26] = csrrd32(priv->mac_dev, | ||
173 | tse_csroffs(ether_stats_pkts_512to1023_octets)); | ||
174 | buf[27] = csrrd32(priv->mac_dev, | ||
175 | tse_csroffs(ether_stats_pkts_1024to1518_octets)); | ||
176 | buf[28] = csrrd32(priv->mac_dev, | ||
177 | tse_csroffs(ether_stats_pkts_1519tox_octets)); | ||
178 | buf[29] = csrrd32(priv->mac_dev, | ||
179 | tse_csroffs(ether_stats_jabbers)); | ||
180 | buf[30] = csrrd32(priv->mac_dev, | ||
181 | tse_csroffs(ether_stats_fragments)); | ||
147 | } | 182 | } |
148 | 183 | ||
149 | static int tse_sset_count(struct net_device *dev, int sset) | 184 | static int tse_sset_count(struct net_device *dev, int sset) |
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
178 | { | 213 | { |
179 | int i; | 214 | int i; |
180 | struct altera_tse_private *priv = netdev_priv(dev); | 215 | struct altera_tse_private *priv = netdev_priv(dev); |
181 | u32 *tse_mac_regs = (u32 *)priv->mac_dev; | ||
182 | u32 *buf = regbuf; | 216 | u32 *buf = regbuf; |
183 | 217 | ||
184 | /* Set version to a known value, so ethtool knows | 218 | /* Set version to a known value, so ethtool knows |
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
196 | regs->version = 1; | 230 | regs->version = 1; |
197 | 231 | ||
198 | for (i = 0; i < TSE_NUM_REGS; i++) | 232 | for (i = 0; i < TSE_NUM_REGS; i++) |
199 | buf[i] = ioread32(&tse_mac_regs[i]); | 233 | buf[i] = csrrd32(priv->mac_dev, i * 4); |
200 | } | 234 | } |
201 | 235 | ||
202 | static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 236 | static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index e44a4aeb9701..7330681574d2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv) | |||
100 | */ | 100 | */ |
101 | static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | 101 | static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
102 | { | 102 | { |
103 | struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; | 103 | struct net_device *ndev = bus->priv; |
104 | unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; | 104 | struct altera_tse_private *priv = netdev_priv(ndev); |
105 | u32 data; | ||
106 | 105 | ||
107 | /* set MDIO address */ | 106 | /* set MDIO address */ |
108 | iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); | 107 | csrwr32((mii_id & 0x1f), priv->mac_dev, |
108 | tse_csroffs(mdio_phy0_addr)); | ||
109 | 109 | ||
110 | /* get the data */ | 110 | /* get the data */ |
111 | data = ioread32(&mdio_regs[regnum]) & 0xffff; | 111 | return csrrd32(priv->mac_dev, |
112 | return data; | 112 | tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; |
113 | } | 113 | } |
114 | 114 | ||
115 | static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | 115 | static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
116 | u16 value) | 116 | u16 value) |
117 | { | 117 | { |
118 | struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; | 118 | struct net_device *ndev = bus->priv; |
119 | unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; | 119 | struct altera_tse_private *priv = netdev_priv(ndev); |
120 | 120 | ||
121 | /* set MDIO address */ | 121 | /* set MDIO address */ |
122 | iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); | 122 | csrwr32((mii_id & 0x1f), priv->mac_dev, |
123 | tse_csroffs(mdio_phy0_addr)); | ||
123 | 124 | ||
124 | /* write the data */ | 125 | /* write the data */ |
125 | iowrite32((u32) value, &mdio_regs[regnum]); | 126 | csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); |
126 | return 0; | 127 | return 0; |
127 | } | 128 | } |
128 | 129 | ||
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) | |||
168 | for (i = 0; i < PHY_MAX_ADDR; i++) | 169 | for (i = 0; i < PHY_MAX_ADDR; i++) |
169 | mdio->irq[i] = PHY_POLL; | 170 | mdio->irq[i] = PHY_POLL; |
170 | 171 | ||
171 | mdio->priv = priv->mac_dev; | 172 | mdio->priv = dev; |
172 | mdio->parent = priv->device; | 173 | mdio->parent = priv->device; |
173 | 174 | ||
174 | ret = of_mdiobus_register(mdio, mdio_node); | 175 | ret = of_mdiobus_register(mdio, mdio_node); |
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
563 | unsigned int nopaged_len = skb_headlen(skb); | 564 | unsigned int nopaged_len = skb_headlen(skb); |
564 | enum netdev_tx ret = NETDEV_TX_OK; | 565 | enum netdev_tx ret = NETDEV_TX_OK; |
565 | dma_addr_t dma_addr; | 566 | dma_addr_t dma_addr; |
566 | int txcomplete = 0; | ||
567 | 567 | ||
568 | spin_lock_bh(&priv->tx_lock); | 568 | spin_lock_bh(&priv->tx_lock); |
569 | 569 | ||
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
599 | dma_sync_single_for_device(priv->device, buffer->dma_addr, | 599 | dma_sync_single_for_device(priv->device, buffer->dma_addr, |
600 | buffer->len, DMA_TO_DEVICE); | 600 | buffer->len, DMA_TO_DEVICE); |
601 | 601 | ||
602 | txcomplete = priv->dmaops->tx_buffer(priv, buffer); | 602 | priv->dmaops->tx_buffer(priv, buffer); |
603 | 603 | ||
604 | skb_tx_timestamp(skb); | 604 | skb_tx_timestamp(skb); |
605 | 605 | ||
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev) | |||
698 | struct altera_tse_private *priv = netdev_priv(dev); | 698 | struct altera_tse_private *priv = netdev_priv(dev); |
699 | struct phy_device *phydev = NULL; | 699 | struct phy_device *phydev = NULL; |
700 | char phy_id_fmt[MII_BUS_ID_SIZE + 3]; | 700 | char phy_id_fmt[MII_BUS_ID_SIZE + 3]; |
701 | int ret; | ||
702 | 701 | ||
703 | if (priv->phy_addr != POLL_PHY) { | 702 | if (priv->phy_addr != POLL_PHY) { |
704 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, | 703 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, |
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev) | |||
712 | netdev_err(dev, "Could not attach to PHY\n"); | 711 | netdev_err(dev, "Could not attach to PHY\n"); |
713 | 712 | ||
714 | } else { | 713 | } else { |
714 | int ret; | ||
715 | phydev = phy_find_first(priv->mdio); | 715 | phydev = phy_find_first(priv->mdio); |
716 | if (phydev == NULL) { | 716 | if (phydev == NULL) { |
717 | netdev_err(dev, "No PHY found\n"); | 717 | netdev_err(dev, "No PHY found\n"); |
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev) | |||
791 | 791 | ||
792 | static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) | 792 | static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) |
793 | { | 793 | { |
794 | struct altera_tse_mac *mac = priv->mac_dev; | ||
795 | u32 msb; | 794 | u32 msb; |
796 | u32 lsb; | 795 | u32 lsb; |
797 | 796 | ||
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) | |||
799 | lsb = ((addr[5] << 8) | addr[4]) & 0xffff; | 798 | lsb = ((addr[5] << 8) | addr[4]) & 0xffff; |
800 | 799 | ||
801 | /* Set primary MAC address */ | 800 | /* Set primary MAC address */ |
802 | iowrite32(msb, &mac->mac_addr_0); | 801 | csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); |
803 | iowrite32(lsb, &mac->mac_addr_1); | 802 | csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); |
804 | } | 803 | } |
805 | 804 | ||
806 | /* MAC software reset. | 805 | /* MAC software reset. |
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) | |||
811 | */ | 810 | */ |
812 | static int reset_mac(struct altera_tse_private *priv) | 811 | static int reset_mac(struct altera_tse_private *priv) |
813 | { | 812 | { |
814 | void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config; | ||
815 | int counter; | 813 | int counter; |
816 | u32 dat; | 814 | u32 dat; |
817 | 815 | ||
818 | dat = ioread32(cmd_cfg_reg); | 816 | dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
819 | dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); | 817 | dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); |
820 | dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; | 818 | dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; |
821 | iowrite32(dat, cmd_cfg_reg); | 819 | csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); |
822 | 820 | ||
823 | counter = 0; | 821 | counter = 0; |
824 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { | 822 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { |
825 | if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) | 823 | if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), |
824 | MAC_CMDCFG_SW_RESET)) | ||
826 | break; | 825 | break; |
827 | udelay(1); | 826 | udelay(1); |
828 | } | 827 | } |
829 | 828 | ||
830 | if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { | 829 | if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { |
831 | dat = ioread32(cmd_cfg_reg); | 830 | dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
832 | dat &= ~MAC_CMDCFG_SW_RESET; | 831 | dat &= ~MAC_CMDCFG_SW_RESET; |
833 | iowrite32(dat, cmd_cfg_reg); | 832 | csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); |
834 | return -1; | 833 | return -1; |
835 | } | 834 | } |
836 | return 0; | 835 | return 0; |
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv) | |||
840 | */ | 839 | */ |
841 | static int init_mac(struct altera_tse_private *priv) | 840 | static int init_mac(struct altera_tse_private *priv) |
842 | { | 841 | { |
843 | struct altera_tse_mac *mac = priv->mac_dev; | ||
844 | unsigned int cmd = 0; | 842 | unsigned int cmd = 0; |
845 | u32 frm_length; | 843 | u32 frm_length; |
846 | 844 | ||
847 | /* Setup Rx FIFO */ | 845 | /* Setup Rx FIFO */ |
848 | iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, | 846 | csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, |
849 | &mac->rx_section_empty); | 847 | priv->mac_dev, tse_csroffs(rx_section_empty)); |
850 | iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); | 848 | |
851 | iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); | 849 | csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, |
852 | iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); | 850 | tse_csroffs(rx_section_full)); |
851 | |||
852 | csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, | ||
853 | tse_csroffs(rx_almost_empty)); | ||
854 | |||
855 | csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, | ||
856 | tse_csroffs(rx_almost_full)); | ||
853 | 857 | ||
854 | /* Setup Tx FIFO */ | 858 | /* Setup Tx FIFO */ |
855 | iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, | 859 | csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, |
856 | &mac->tx_section_empty); | 860 | priv->mac_dev, tse_csroffs(tx_section_empty)); |
857 | iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); | 861 | |
858 | iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); | 862 | csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, |
859 | iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); | 863 | tse_csroffs(tx_section_full)); |
864 | |||
865 | csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, | ||
866 | tse_csroffs(tx_almost_empty)); | ||
867 | |||
868 | csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, | ||
869 | tse_csroffs(tx_almost_full)); | ||
860 | 870 | ||
861 | /* MAC Address Configuration */ | 871 | /* MAC Address Configuration */ |
862 | tse_update_mac_addr(priv, priv->dev->dev_addr); | 872 | tse_update_mac_addr(priv, priv->dev->dev_addr); |
863 | 873 | ||
864 | /* MAC Function Configuration */ | 874 | /* MAC Function Configuration */ |
865 | frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; | 875 | frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; |
866 | iowrite32(frm_length, &mac->frm_length); | 876 | csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); |
867 | iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); | 877 | |
878 | csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, | ||
879 | tse_csroffs(tx_ipg_length)); | ||
868 | 880 | ||
869 | /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit | 881 | /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit |
870 | * start address | 882 | * start address |
871 | */ | 883 | */ |
872 | tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); | 884 | tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), |
873 | tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | | 885 | ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); |
874 | ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); | 886 | |
887 | tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), | ||
888 | ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | | ||
889 | ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); | ||
875 | 890 | ||
876 | /* Set the MAC options */ | 891 | /* Set the MAC options */ |
877 | cmd = ioread32(&mac->command_config); | 892 | cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
878 | cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ | 893 | cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ |
879 | cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ | 894 | cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ |
880 | cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames | 895 | cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames |
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv) | |||
889 | cmd &= ~MAC_CMDCFG_ETH_SPEED; | 904 | cmd &= ~MAC_CMDCFG_ETH_SPEED; |
890 | cmd &= ~MAC_CMDCFG_ENA_10; | 905 | cmd &= ~MAC_CMDCFG_ENA_10; |
891 | 906 | ||
892 | iowrite32(cmd, &mac->command_config); | 907 | csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); |
893 | 908 | ||
894 | iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta); | 909 | csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, |
910 | tse_csroffs(pause_quanta)); | ||
895 | 911 | ||
896 | if (netif_msg_hw(priv)) | 912 | if (netif_msg_hw(priv)) |
897 | dev_dbg(priv->device, | 913 | dev_dbg(priv->device, |
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv) | |||
904 | */ | 920 | */ |
905 | static void tse_set_mac(struct altera_tse_private *priv, bool enable) | 921 | static void tse_set_mac(struct altera_tse_private *priv, bool enable) |
906 | { | 922 | { |
907 | struct altera_tse_mac *mac = priv->mac_dev; | 923 | u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); |
908 | u32 value = ioread32(&mac->command_config); | ||
909 | 924 | ||
910 | if (enable) | 925 | if (enable) |
911 | value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; | 926 | value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; |
912 | else | 927 | else |
913 | value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); | 928 | value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); |
914 | 929 | ||
915 | iowrite32(value, &mac->command_config); | 930 | csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); |
916 | } | 931 | } |
917 | 932 | ||
918 | /* Change the MTU | 933 | /* Change the MTU |
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu) | |||
942 | static void altera_tse_set_mcfilter(struct net_device *dev) | 957 | static void altera_tse_set_mcfilter(struct net_device *dev) |
943 | { | 958 | { |
944 | struct altera_tse_private *priv = netdev_priv(dev); | 959 | struct altera_tse_private *priv = netdev_priv(dev); |
945 | struct altera_tse_mac *mac = priv->mac_dev; | ||
946 | int i; | 960 | int i; |
947 | struct netdev_hw_addr *ha; | 961 | struct netdev_hw_addr *ha; |
948 | 962 | ||
949 | /* clear the hash filter */ | 963 | /* clear the hash filter */ |
950 | for (i = 0; i < 64; i++) | 964 | for (i = 0; i < 64; i++) |
951 | iowrite32(0, &(mac->hash_table[i])); | 965 | csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); |
952 | 966 | ||
953 | netdev_for_each_mc_addr(ha, dev) { | 967 | netdev_for_each_mc_addr(ha, dev) { |
954 | unsigned int hash = 0; | 968 | unsigned int hash = 0; |
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev) | |||
964 | 978 | ||
965 | hash = (hash << 1) | xor_bit; | 979 | hash = (hash << 1) | xor_bit; |
966 | } | 980 | } |
967 | iowrite32(1, &(mac->hash_table[hash])); | 981 | csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); |
968 | } | 982 | } |
969 | } | 983 | } |
970 | 984 | ||
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev) | |||
972 | static void altera_tse_set_mcfilterall(struct net_device *dev) | 986 | static void altera_tse_set_mcfilterall(struct net_device *dev) |
973 | { | 987 | { |
974 | struct altera_tse_private *priv = netdev_priv(dev); | 988 | struct altera_tse_private *priv = netdev_priv(dev); |
975 | struct altera_tse_mac *mac = priv->mac_dev; | ||
976 | int i; | 989 | int i; |
977 | 990 | ||
978 | /* set the hash filter */ | 991 | /* set the hash filter */ |
979 | for (i = 0; i < 64; i++) | 992 | for (i = 0; i < 64; i++) |
980 | iowrite32(1, &(mac->hash_table[i])); | 993 | csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); |
981 | } | 994 | } |
982 | 995 | ||
983 | /* Set or clear the multicast filter for this adaptor | 996 | /* Set or clear the multicast filter for this adaptor |
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev) | |||
985 | static void tse_set_rx_mode_hashfilter(struct net_device *dev) | 998 | static void tse_set_rx_mode_hashfilter(struct net_device *dev) |
986 | { | 999 | { |
987 | struct altera_tse_private *priv = netdev_priv(dev); | 1000 | struct altera_tse_private *priv = netdev_priv(dev); |
988 | struct altera_tse_mac *mac = priv->mac_dev; | ||
989 | 1001 | ||
990 | spin_lock(&priv->mac_cfg_lock); | 1002 | spin_lock(&priv->mac_cfg_lock); |
991 | 1003 | ||
992 | if (dev->flags & IFF_PROMISC) | 1004 | if (dev->flags & IFF_PROMISC) |
993 | tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); | 1005 | tse_set_bit(priv->mac_dev, tse_csroffs(command_config), |
1006 | MAC_CMDCFG_PROMIS_EN); | ||
994 | 1007 | ||
995 | if (dev->flags & IFF_ALLMULTI) | 1008 | if (dev->flags & IFF_ALLMULTI) |
996 | altera_tse_set_mcfilterall(dev); | 1009 | altera_tse_set_mcfilterall(dev); |
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev) | |||
1005 | static void tse_set_rx_mode(struct net_device *dev) | 1018 | static void tse_set_rx_mode(struct net_device *dev) |
1006 | { | 1019 | { |
1007 | struct altera_tse_private *priv = netdev_priv(dev); | 1020 | struct altera_tse_private *priv = netdev_priv(dev); |
1008 | struct altera_tse_mac *mac = priv->mac_dev; | ||
1009 | 1021 | ||
1010 | spin_lock(&priv->mac_cfg_lock); | 1022 | spin_lock(&priv->mac_cfg_lock); |
1011 | 1023 | ||
1012 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || | 1024 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || |
1013 | !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) | 1025 | !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) |
1014 | tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); | 1026 | tse_set_bit(priv->mac_dev, tse_csroffs(command_config), |
1027 | MAC_CMDCFG_PROMIS_EN); | ||
1015 | else | 1028 | else |
1016 | tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); | 1029 | tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), |
1030 | MAC_CMDCFG_PROMIS_EN); | ||
1017 | 1031 | ||
1018 | spin_unlock(&priv->mac_cfg_lock); | 1032 | spin_unlock(&priv->mac_cfg_lock); |
1019 | } | 1033 | } |
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev) | |||
1362 | of_property_read_bool(pdev->dev.of_node, | 1376 | of_property_read_bool(pdev->dev.of_node, |
1363 | "altr,has-hash-multicast-filter"); | 1377 | "altr,has-hash-multicast-filter"); |
1364 | 1378 | ||
1379 | /* Set hash filter to not set for now until the | ||
1380 | * multicast filter receive issue is debugged | ||
1381 | */ | ||
1382 | priv->hash_filter = 0; | ||
1383 | |||
1365 | /* get supplemental address settings for this instance */ | 1384 | /* get supplemental address settings for this instance */ |
1366 | priv->added_unicast = | 1385 | priv->added_unicast = |
1367 | of_property_read_bool(pdev->dev.of_node, | 1386 | of_property_read_bool(pdev->dev.of_node, |
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev) | |||
1493 | return 0; | 1512 | return 0; |
1494 | } | 1513 | } |
1495 | 1514 | ||
1496 | struct altera_dmaops altera_dtype_sgdma = { | 1515 | static const struct altera_dmaops altera_dtype_sgdma = { |
1497 | .altera_dtype = ALTERA_DTYPE_SGDMA, | 1516 | .altera_dtype = ALTERA_DTYPE_SGDMA, |
1498 | .dmamask = 32, | 1517 | .dmamask = 32, |
1499 | .reset_dma = sgdma_reset, | 1518 | .reset_dma = sgdma_reset, |
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = { | |||
1512 | .start_rxdma = sgdma_start_rxdma, | 1531 | .start_rxdma = sgdma_start_rxdma, |
1513 | }; | 1532 | }; |
1514 | 1533 | ||
1515 | struct altera_dmaops altera_dtype_msgdma = { | 1534 | static const struct altera_dmaops altera_dtype_msgdma = { |
1516 | .altera_dtype = ALTERA_DTYPE_MSGDMA, | 1535 | .altera_dtype = ALTERA_DTYPE_MSGDMA, |
1517 | .dmamask = 64, | 1536 | .dmamask = 64, |
1518 | .reset_dma = msgdma_reset, | 1537 | .reset_dma = msgdma_reset, |
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c index 70fa13f486b2..d7eeb1713ad2 100644 --- a/drivers/net/ethernet/altera/altera_utils.c +++ b/drivers/net/ethernet/altera/altera_utils.c | |||
@@ -17,28 +17,28 @@ | |||
17 | #include "altera_tse.h" | 17 | #include "altera_tse.h" |
18 | #include "altera_utils.h" | 18 | #include "altera_utils.h" |
19 | 19 | ||
20 | void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) | 20 | void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) |
21 | { | 21 | { |
22 | u32 value = ioread32(ioaddr); | 22 | u32 value = csrrd32(ioaddr, offs); |
23 | value |= bit_mask; | 23 | value |= bit_mask; |
24 | iowrite32(value, ioaddr); | 24 | csrwr32(value, ioaddr, offs); |
25 | } | 25 | } |
26 | 26 | ||
27 | void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) | 27 | void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) |
28 | { | 28 | { |
29 | u32 value = ioread32(ioaddr); | 29 | u32 value = csrrd32(ioaddr, offs); |
30 | value &= ~bit_mask; | 30 | value &= ~bit_mask; |
31 | iowrite32(value, ioaddr); | 31 | csrwr32(value, ioaddr, offs); |
32 | } | 32 | } |
33 | 33 | ||
34 | int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) | 34 | int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) |
35 | { | 35 | { |
36 | u32 value = ioread32(ioaddr); | 36 | u32 value = csrrd32(ioaddr, offs); |
37 | return (value & bit_mask) ? 1 : 0; | 37 | return (value & bit_mask) ? 1 : 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) | 40 | int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) |
41 | { | 41 | { |
42 | u32 value = ioread32(ioaddr); | 42 | u32 value = csrrd32(ioaddr, offs); |
43 | return (value & bit_mask) ? 0 : 1; | 43 | return (value & bit_mask) ? 0 : 1; |
44 | } | 44 | } |
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h index ce1db36d3583..baf100ccf587 100644 --- a/drivers/net/ethernet/altera/altera_utils.h +++ b/drivers/net/ethernet/altera/altera_utils.h | |||
@@ -19,9 +19,9 @@ | |||
19 | #ifndef __ALTERA_UTILS_H__ | 19 | #ifndef __ALTERA_UTILS_H__ |
20 | #define __ALTERA_UTILS_H__ | 20 | #define __ALTERA_UTILS_H__ |
21 | 21 | ||
22 | void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); | 22 | void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); |
23 | void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); | 23 | void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); |
24 | int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); | 24 | int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); |
25 | int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); | 25 | int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); |
26 | 26 | ||
27 | #endif /* __ALTERA_UTILS_H__*/ | 27 | #endif /* __ALTERA_UTILS_H__*/ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b260913db236..3b0d43154e67 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10051 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) | 10051 | #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) |
10052 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) | 10052 | #define BCM_5710_UNDI_FW_MF_MINOR (0x08) |
10053 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) | 10053 | #define BCM_5710_UNDI_FW_MF_VERS (0x05) |
10054 | #define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) | 10054 | #define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) |
10055 | #define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) | 10055 | #define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) |
10056 | static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) | 10056 | static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) |
10057 | { | 10057 | { |
10058 | u8 major, minor, version; | 10058 | u8 major, minor, version; |
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10352 | /* Reset should be performed after BRB is emptied */ | 10352 | /* Reset should be performed after BRB is emptied */ |
10353 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { | 10353 | if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { |
10354 | u32 timer_count = 1000; | 10354 | u32 timer_count = 1000; |
10355 | bool need_write = true; | ||
10355 | 10356 | ||
10356 | /* Close the MAC Rx to prevent BRB from filling up */ | 10357 | /* Close the MAC Rx to prevent BRB from filling up */ |
10357 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10358 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10398 | * cleaning methods - might be redundant but harmless. | 10399 | * cleaning methods - might be redundant but harmless. |
10399 | */ | 10400 | */ |
10400 | if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { | 10401 | if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { |
10401 | bnx2x_prev_unload_undi_mf(bp); | 10402 | if (need_write) { |
10403 | bnx2x_prev_unload_undi_mf(bp); | ||
10404 | need_write = false; | ||
10405 | } | ||
10402 | } else if (prev_undi) { | 10406 | } else if (prev_undi) { |
10403 | /* If UNDI resides in memory, | 10407 | /* If UNDI resides in memory, |
10404 | * manually increment it | 10408 | * manually increment it |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 81cc2d9831c2..b8078d50261b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -2695,7 +2695,7 @@ out: | |||
2695 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); | 2695 | bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); |
2696 | } | 2696 | } |
2697 | 2697 | ||
2698 | return 0; | 2698 | return rc; |
2699 | } | 2699 | } |
2700 | 2700 | ||
2701 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | 2701 | int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0c067e8564dd..784c7155b98a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) | |||
747 | out: | 747 | out: |
748 | bnx2x_vfpf_finalize(bp, &req->first_tlv); | 748 | bnx2x_vfpf_finalize(bp, &req->first_tlv); |
749 | 749 | ||
750 | return 0; | 750 | return rc; |
751 | } | 751 | } |
752 | 752 | ||
753 | /* request pf to config rss table for vf queues*/ | 753 | /* request pf to config rss table for vf queues*/ |
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c new file mode 100644 index 000000000000..4884205e56ee --- /dev/null +++ b/drivers/net/ethernet/ec_bhf.c | |||
@@ -0,0 +1,706 @@ | |||
1 | /* | ||
2 | * drivers/net/ethernet/beckhoff/ec_bhf.c | ||
3 | * | ||
4 | * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | /* This is a driver for EtherCAT master module present on CCAT FPGA. | ||
18 | * Those can be found on Bechhoff CX50xx industrial PCs. | ||
19 | */ | ||
20 | |||
21 | #if 0 | ||
22 | #define DEBUG | ||
23 | #endif | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/moduleparam.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/init.h> | ||
29 | |||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/etherdevice.h> | ||
32 | #include <linux/ip.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/stat.h> | ||
37 | |||
38 | #define TIMER_INTERVAL_NSEC 20000 | ||
39 | |||
40 | #define INFO_BLOCK_SIZE 0x10 | ||
41 | #define INFO_BLOCK_TYPE 0x0 | ||
42 | #define INFO_BLOCK_REV 0x2 | ||
43 | #define INFO_BLOCK_BLK_CNT 0x4 | ||
44 | #define INFO_BLOCK_TX_CHAN 0x4 | ||
45 | #define INFO_BLOCK_RX_CHAN 0x5 | ||
46 | #define INFO_BLOCK_OFFSET 0x8 | ||
47 | |||
48 | #define EC_MII_OFFSET 0x4 | ||
49 | #define EC_FIFO_OFFSET 0x8 | ||
50 | #define EC_MAC_OFFSET 0xc | ||
51 | |||
52 | #define MAC_FRAME_ERR_CNT 0x0 | ||
53 | #define MAC_RX_ERR_CNT 0x1 | ||
54 | #define MAC_CRC_ERR_CNT 0x2 | ||
55 | #define MAC_LNK_LST_ERR_CNT 0x3 | ||
56 | #define MAC_TX_FRAME_CNT 0x10 | ||
57 | #define MAC_RX_FRAME_CNT 0x14 | ||
58 | #define MAC_TX_FIFO_LVL 0x20 | ||
59 | #define MAC_DROPPED_FRMS 0x28 | ||
60 | #define MAC_CONNECTED_CCAT_FLAG 0x78 | ||
61 | |||
62 | #define MII_MAC_ADDR 0x8 | ||
63 | #define MII_MAC_FILT_FLAG 0xe | ||
64 | #define MII_LINK_STATUS 0xf | ||
65 | |||
66 | #define FIFO_TX_REG 0x0 | ||
67 | #define FIFO_TX_RESET 0x8 | ||
68 | #define FIFO_RX_REG 0x10 | ||
69 | #define FIFO_RX_ADDR_VALID (1u << 31) | ||
70 | #define FIFO_RX_RESET 0x18 | ||
71 | |||
72 | #define DMA_CHAN_OFFSET 0x1000 | ||
73 | #define DMA_CHAN_SIZE 0x8 | ||
74 | |||
75 | #define DMA_WINDOW_SIZE_MASK 0xfffffffc | ||
76 | |||
77 | static struct pci_device_id ids[] = { | ||
78 | { PCI_DEVICE(0x15ec, 0x5000), }, | ||
79 | { 0, } | ||
80 | }; | ||
81 | MODULE_DEVICE_TABLE(pci, ids); | ||
82 | |||
83 | struct rx_header { | ||
84 | #define RXHDR_NEXT_ADDR_MASK 0xffffffu | ||
85 | #define RXHDR_NEXT_VALID (1u << 31) | ||
86 | __le32 next; | ||
87 | #define RXHDR_NEXT_RECV_FLAG 0x1 | ||
88 | __le32 recv; | ||
89 | #define RXHDR_LEN_MASK 0xfffu | ||
90 | __le16 len; | ||
91 | __le16 port; | ||
92 | __le32 reserved; | ||
93 | u8 timestamp[8]; | ||
94 | } __packed; | ||
95 | |||
96 | #define PKT_PAYLOAD_SIZE 0x7e8 | ||
97 | struct rx_desc { | ||
98 | struct rx_header header; | ||
99 | u8 data[PKT_PAYLOAD_SIZE]; | ||
100 | } __packed; | ||
101 | |||
102 | struct tx_header { | ||
103 | __le16 len; | ||
104 | #define TX_HDR_PORT_0 0x1 | ||
105 | #define TX_HDR_PORT_1 0x2 | ||
106 | u8 port; | ||
107 | u8 ts_enable; | ||
108 | #define TX_HDR_SENT 0x1 | ||
109 | __le32 sent; | ||
110 | u8 timestamp[8]; | ||
111 | } __packed; | ||
112 | |||
113 | struct tx_desc { | ||
114 | struct tx_header header; | ||
115 | u8 data[PKT_PAYLOAD_SIZE]; | ||
116 | } __packed; | ||
117 | |||
118 | #define FIFO_SIZE 64 | ||
119 | |||
120 | static long polling_frequency = TIMER_INTERVAL_NSEC; | ||
121 | |||
122 | struct bhf_dma { | ||
123 | u8 *buf; | ||
124 | size_t len; | ||
125 | dma_addr_t buf_phys; | ||
126 | |||
127 | u8 *alloc; | ||
128 | size_t alloc_len; | ||
129 | dma_addr_t alloc_phys; | ||
130 | }; | ||
131 | |||
132 | struct ec_bhf_priv { | ||
133 | struct net_device *net_dev; | ||
134 | |||
135 | struct pci_dev *dev; | ||
136 | |||
137 | void * __iomem io; | ||
138 | void * __iomem dma_io; | ||
139 | |||
140 | struct hrtimer hrtimer; | ||
141 | |||
142 | int tx_dma_chan; | ||
143 | int rx_dma_chan; | ||
144 | void * __iomem ec_io; | ||
145 | void * __iomem fifo_io; | ||
146 | void * __iomem mii_io; | ||
147 | void * __iomem mac_io; | ||
148 | |||
149 | struct bhf_dma rx_buf; | ||
150 | struct rx_desc *rx_descs; | ||
151 | int rx_dnext; | ||
152 | int rx_dcount; | ||
153 | |||
154 | struct bhf_dma tx_buf; | ||
155 | struct tx_desc *tx_descs; | ||
156 | int tx_dcount; | ||
157 | int tx_dnext; | ||
158 | |||
159 | u64 stat_rx_bytes; | ||
160 | u64 stat_tx_bytes; | ||
161 | }; | ||
162 | |||
163 | #define PRIV_TO_DEV(priv) (&(priv)->dev->dev) | ||
164 | |||
165 | #define ETHERCAT_MASTER_ID 0x14 | ||
166 | |||
167 | static void ec_bhf_print_status(struct ec_bhf_priv *priv) | ||
168 | { | ||
169 | struct device *dev = PRIV_TO_DEV(priv); | ||
170 | |||
171 | dev_dbg(dev, "Frame error counter: %d\n", | ||
172 | ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); | ||
173 | dev_dbg(dev, "RX error counter: %d\n", | ||
174 | ioread8(priv->mac_io + MAC_RX_ERR_CNT)); | ||
175 | dev_dbg(dev, "CRC error counter: %d\n", | ||
176 | ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); | ||
177 | dev_dbg(dev, "TX frame counter: %d\n", | ||
178 | ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); | ||
179 | dev_dbg(dev, "RX frame counter: %d\n", | ||
180 | ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); | ||
181 | dev_dbg(dev, "TX fifo level: %d\n", | ||
182 | ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); | ||
183 | dev_dbg(dev, "Dropped frames: %d\n", | ||
184 | ioread8(priv->mac_io + MAC_DROPPED_FRMS)); | ||
185 | dev_dbg(dev, "Connected with CCAT slot: %d\n", | ||
186 | ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); | ||
187 | dev_dbg(dev, "Link status: %d\n", | ||
188 | ioread8(priv->mii_io + MII_LINK_STATUS)); | ||
189 | } | ||
190 | |||
191 | static void ec_bhf_reset(struct ec_bhf_priv *priv) | ||
192 | { | ||
193 | iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); | ||
194 | iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); | ||
195 | iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); | ||
196 | iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); | ||
197 | iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); | ||
198 | iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); | ||
199 | iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); | ||
200 | |||
201 | iowrite8(0, priv->fifo_io + FIFO_TX_RESET); | ||
202 | iowrite8(0, priv->fifo_io + FIFO_RX_RESET); | ||
203 | |||
204 | iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); | ||
205 | } | ||
206 | |||
207 | static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) | ||
208 | { | ||
209 | u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header); | ||
210 | u32 addr = (u8 *)desc - priv->tx_buf.buf; | ||
211 | |||
212 | iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); | ||
213 | |||
214 | dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); | ||
215 | } | ||
216 | |||
217 | static int ec_bhf_desc_sent(struct tx_desc *desc) | ||
218 | { | ||
219 | return le32_to_cpu(desc->header.sent) & TX_HDR_SENT; | ||
220 | } | ||
221 | |||
222 | static void ec_bhf_process_tx(struct ec_bhf_priv *priv) | ||
223 | { | ||
224 | if (unlikely(netif_queue_stopped(priv->net_dev))) { | ||
225 | /* Make sure that we perceive changes to tx_dnext. */ | ||
226 | smp_rmb(); | ||
227 | |||
228 | if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) | ||
229 | netif_wake_queue(priv->net_dev); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | static int ec_bhf_pkt_received(struct rx_desc *desc) | ||
234 | { | ||
235 | return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG; | ||
236 | } | ||
237 | |||
238 | static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) | ||
239 | { | ||
240 | iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), | ||
241 | priv->fifo_io + FIFO_RX_REG); | ||
242 | } | ||
243 | |||
244 | static void ec_bhf_process_rx(struct ec_bhf_priv *priv) | ||
245 | { | ||
246 | struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; | ||
247 | struct device *dev = PRIV_TO_DEV(priv); | ||
248 | |||
249 | while (ec_bhf_pkt_received(desc)) { | ||
250 | int pkt_size = (le16_to_cpu(desc->header.len) & | ||
251 | RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; | ||
252 | u8 *data = desc->data; | ||
253 | struct sk_buff *skb; | ||
254 | |||
255 | skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); | ||
256 | dev_dbg(dev, "Received packet, size: %d\n", pkt_size); | ||
257 | |||
258 | if (skb) { | ||
259 | memcpy(skb_put(skb, pkt_size), data, pkt_size); | ||
260 | skb->protocol = eth_type_trans(skb, priv->net_dev); | ||
261 | dev_dbg(dev, "Protocol type: %x\n", skb->protocol); | ||
262 | |||
263 | priv->stat_rx_bytes += pkt_size; | ||
264 | |||
265 | netif_rx(skb); | ||
266 | } else { | ||
267 | dev_err_ratelimited(dev, | ||
268 | "Couldn't allocate a skb_buff for a packet of size %u\n", | ||
269 | pkt_size); | ||
270 | } | ||
271 | |||
272 | desc->header.recv = 0; | ||
273 | |||
274 | ec_bhf_add_rx_desc(priv, desc); | ||
275 | |||
276 | priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; | ||
277 | desc = &priv->rx_descs[priv->rx_dnext]; | ||
278 | } | ||
279 | |||
280 | } | ||
281 | |||
282 | static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) | ||
283 | { | ||
284 | struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, | ||
285 | hrtimer); | ||
286 | ec_bhf_process_rx(priv); | ||
287 | ec_bhf_process_tx(priv); | ||
288 | |||
289 | if (!netif_running(priv->net_dev)) | ||
290 | return HRTIMER_NORESTART; | ||
291 | |||
292 | hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); | ||
293 | return HRTIMER_RESTART; | ||
294 | } | ||
295 | |||
296 | static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) | ||
297 | { | ||
298 | struct device *dev = PRIV_TO_DEV(priv); | ||
299 | unsigned block_count, i; | ||
300 | void * __iomem ec_info; | ||
301 | |||
302 | dev_dbg(dev, "Info block:\n"); | ||
303 | dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); | ||
304 | dev_dbg(dev, "Revision of function: %x\n", | ||
305 | (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); | ||
306 | |||
307 | block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); | ||
308 | dev_dbg(dev, "Number of function blocks: %x\n", block_count); | ||
309 | |||
310 | for (i = 0; i < block_count; i++) { | ||
311 | u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + | ||
312 | INFO_BLOCK_TYPE); | ||
313 | if (type == ETHERCAT_MASTER_ID) | ||
314 | break; | ||
315 | } | ||
316 | if (i == block_count) { | ||
317 | dev_err(dev, "EtherCAT master with DMA block not found\n"); | ||
318 | return -ENODEV; | ||
319 | } | ||
320 | dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); | ||
321 | |||
322 | ec_info = priv->io + i * INFO_BLOCK_SIZE; | ||
323 | dev_dbg(dev, "EtherCAT master revision: %d\n", | ||
324 | ioread16(ec_info + INFO_BLOCK_REV)); | ||
325 | |||
326 | priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); | ||
327 | dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", | ||
328 | priv->tx_dma_chan); | ||
329 | |||
330 | priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); | ||
331 | dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", | ||
332 | priv->rx_dma_chan); | ||
333 | |||
334 | priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); | ||
335 | priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); | ||
336 | priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); | ||
337 | priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); | ||
338 | |||
339 | dev_dbg(dev, | ||
340 | "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", | ||
341 | priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, | ||
347 | struct net_device *net_dev) | ||
348 | { | ||
349 | struct ec_bhf_priv *priv = netdev_priv(net_dev); | ||
350 | struct tx_desc *desc; | ||
351 | unsigned len; | ||
352 | |||
353 | dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); | ||
354 | |||
355 | desc = &priv->tx_descs[priv->tx_dnext]; | ||
356 | |||
357 | skb_copy_and_csum_dev(skb, desc->data); | ||
358 | len = skb->len; | ||
359 | |||
360 | memset(&desc->header, 0, sizeof(desc->header)); | ||
361 | desc->header.len = cpu_to_le16(len); | ||
362 | desc->header.port = TX_HDR_PORT_0; | ||
363 | |||
364 | ec_bhf_send_packet(priv, desc); | ||
365 | |||
366 | priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; | ||
367 | |||
368 | if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { | ||
369 | /* Make sure that update updates to tx_dnext are perceived | ||
370 | * by timer routine. | ||
371 | */ | ||
372 | smp_wmb(); | ||
373 | |||
374 | netif_stop_queue(net_dev); | ||
375 | |||
376 | dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); | ||
377 | ec_bhf_print_status(priv); | ||
378 | } | ||
379 | |||
380 | priv->stat_tx_bytes += len; | ||
381 | |||
382 | dev_kfree_skb(skb); | ||
383 | |||
384 | return NETDEV_TX_OK; | ||
385 | } | ||
386 | |||
387 | static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, | ||
388 | struct bhf_dma *buf, | ||
389 | int channel, | ||
390 | int size) | ||
391 | { | ||
392 | int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; | ||
393 | struct device *dev = PRIV_TO_DEV(priv); | ||
394 | u32 mask; | ||
395 | |||
396 | iowrite32(0xffffffff, priv->dma_io + offset); | ||
397 | |||
398 | mask = ioread32(priv->dma_io + offset); | ||
399 | mask &= DMA_WINDOW_SIZE_MASK; | ||
400 | dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); | ||
401 | |||
402 | /* We want to allocate a chunk of memory that is: | ||
403 | * - aligned to the mask we just read | ||
404 | * - is of size 2^mask bytes (at most) | ||
405 | * In order to ensure that we will allocate buffer of | ||
406 | * 2 * 2^mask bytes. | ||
407 | */ | ||
408 | buf->len = min_t(int, ~mask + 1, size); | ||
409 | buf->alloc_len = 2 * buf->len; | ||
410 | |||
411 | dev_dbg(dev, "Allocating %d bytes for channel %d", | ||
412 | (int)buf->alloc_len, channel); | ||
413 | buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, | ||
414 | GFP_KERNEL); | ||
415 | if (buf->alloc == NULL) { | ||
416 | dev_info(dev, "Failed to allocate buffer\n"); | ||
417 | return -ENOMEM; | ||
418 | } | ||
419 | |||
420 | buf->buf_phys = (buf->alloc_phys + buf->len) & mask; | ||
421 | buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); | ||
422 | |||
423 | iowrite32(0, priv->dma_io + offset + 4); | ||
424 | iowrite32(buf->buf_phys, priv->dma_io + offset); | ||
425 | dev_dbg(dev, "Buffer: %x and read from dev: %x", | ||
426 | (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); | ||
427 | |||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) | ||
432 | { | ||
433 | int i = 0; | ||
434 | |||
435 | priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); | ||
436 | priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; | ||
437 | priv->tx_dnext = 0; | ||
438 | |||
439 | for (i = 0; i < priv->tx_dcount; i++) | ||
440 | priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); | ||
441 | } | ||
442 | |||
443 | static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) | ||
444 | { | ||
445 | int i; | ||
446 | |||
447 | priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); | ||
448 | priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; | ||
449 | priv->rx_dnext = 0; | ||
450 | |||
451 | for (i = 0; i < priv->rx_dcount; i++) { | ||
452 | struct rx_desc *desc = &priv->rx_descs[i]; | ||
453 | u32 next; | ||
454 | |||
455 | if (i != priv->rx_dcount - 1) | ||
456 | next = (u8 *)(desc + 1) - priv->rx_buf.buf; | ||
457 | else | ||
458 | next = 0; | ||
459 | next |= RXHDR_NEXT_VALID; | ||
460 | desc->header.next = cpu_to_le32(next); | ||
461 | desc->header.recv = 0; | ||
462 | ec_bhf_add_rx_desc(priv, desc); | ||
463 | } | ||
464 | } | ||
465 | |||
466 | static int ec_bhf_open(struct net_device *net_dev) | ||
467 | { | ||
468 | struct ec_bhf_priv *priv = netdev_priv(net_dev); | ||
469 | struct device *dev = PRIV_TO_DEV(priv); | ||
470 | int err = 0; | ||
471 | |||
472 | dev_info(dev, "Opening device\n"); | ||
473 | |||
474 | ec_bhf_reset(priv); | ||
475 | |||
476 | err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, | ||
477 | FIFO_SIZE * sizeof(struct rx_desc)); | ||
478 | if (err) { | ||
479 | dev_err(dev, "Failed to allocate rx buffer\n"); | ||
480 | goto out; | ||
481 | } | ||
482 | ec_bhf_setup_rx_descs(priv); | ||
483 | |||
484 | dev_info(dev, "RX buffer allocated, address: %x\n", | ||
485 | (unsigned)priv->rx_buf.buf_phys); | ||
486 | |||
487 | err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, | ||
488 | FIFO_SIZE * sizeof(struct tx_desc)); | ||
489 | if (err) { | ||
490 | dev_err(dev, "Failed to allocate tx buffer\n"); | ||
491 | goto error_rx_free; | ||
492 | } | ||
493 | dev_dbg(dev, "TX buffer allocated, addres: %x\n", | ||
494 | (unsigned)priv->tx_buf.buf_phys); | ||
495 | |||
496 | iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); | ||
497 | |||
498 | ec_bhf_setup_tx_descs(priv); | ||
499 | |||
500 | netif_start_queue(net_dev); | ||
501 | |||
502 | hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
503 | priv->hrtimer.function = ec_bhf_timer_fun; | ||
504 | hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), | ||
505 | HRTIMER_MODE_REL); | ||
506 | |||
507 | dev_info(PRIV_TO_DEV(priv), "Device open\n"); | ||
508 | |||
509 | ec_bhf_print_status(priv); | ||
510 | |||
511 | return 0; | ||
512 | |||
513 | error_rx_free: | ||
514 | dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, | ||
515 | priv->rx_buf.alloc_len); | ||
516 | out: | ||
517 | return err; | ||
518 | } | ||
519 | |||
520 | static int ec_bhf_stop(struct net_device *net_dev) | ||
521 | { | ||
522 | struct ec_bhf_priv *priv = netdev_priv(net_dev); | ||
523 | struct device *dev = PRIV_TO_DEV(priv); | ||
524 | |||
525 | hrtimer_cancel(&priv->hrtimer); | ||
526 | |||
527 | ec_bhf_reset(priv); | ||
528 | |||
529 | netif_tx_disable(net_dev); | ||
530 | |||
531 | dma_free_coherent(dev, priv->tx_buf.alloc_len, | ||
532 | priv->tx_buf.alloc, priv->tx_buf.alloc_phys); | ||
533 | dma_free_coherent(dev, priv->rx_buf.alloc_len, | ||
534 | priv->rx_buf.alloc, priv->rx_buf.alloc_phys); | ||
535 | |||
536 | return 0; | ||
537 | } | ||
538 | |||
539 | static struct rtnl_link_stats64 * | ||
540 | ec_bhf_get_stats(struct net_device *net_dev, | ||
541 | struct rtnl_link_stats64 *stats) | ||
542 | { | ||
543 | struct ec_bhf_priv *priv = netdev_priv(net_dev); | ||
544 | |||
545 | stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + | ||
546 | ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + | ||
547 | ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); | ||
548 | stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); | ||
549 | stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); | ||
550 | stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); | ||
551 | |||
552 | stats->tx_bytes = priv->stat_tx_bytes; | ||
553 | stats->rx_bytes = priv->stat_rx_bytes; | ||
554 | |||
555 | return stats; | ||
556 | } | ||
557 | |||
558 | static const struct net_device_ops ec_bhf_netdev_ops = { | ||
559 | .ndo_start_xmit = ec_bhf_start_xmit, | ||
560 | .ndo_open = ec_bhf_open, | ||
561 | .ndo_stop = ec_bhf_stop, | ||
562 | .ndo_get_stats64 = ec_bhf_get_stats, | ||
563 | .ndo_change_mtu = eth_change_mtu, | ||
564 | .ndo_validate_addr = eth_validate_addr, | ||
565 | .ndo_set_mac_address = eth_mac_addr | ||
566 | }; | ||
567 | |||
568 | static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
569 | { | ||
570 | struct net_device *net_dev; | ||
571 | struct ec_bhf_priv *priv; | ||
572 | void * __iomem dma_io; | ||
573 | void * __iomem io; | ||
574 | int err = 0; | ||
575 | |||
576 | err = pci_enable_device(dev); | ||
577 | if (err) | ||
578 | return err; | ||
579 | |||
580 | pci_set_master(dev); | ||
581 | |||
582 | err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); | ||
583 | if (err) { | ||
584 | dev_err(&dev->dev, | ||
585 | "Required dma mask not supported, failed to initialize device\n"); | ||
586 | err = -EIO; | ||
587 | goto err_disable_dev; | ||
588 | } | ||
589 | |||
590 | err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); | ||
591 | if (err) { | ||
592 | dev_err(&dev->dev, | ||
593 | "Required dma mask not supported, failed to initialize device\n"); | ||
594 | goto err_disable_dev; | ||
595 | } | ||
596 | |||
597 | err = pci_request_regions(dev, "ec_bhf"); | ||
598 | if (err) { | ||
599 | dev_err(&dev->dev, "Failed to request pci memory regions\n"); | ||
600 | goto err_disable_dev; | ||
601 | } | ||
602 | |||
603 | io = pci_iomap(dev, 0, 0); | ||
604 | if (!io) { | ||
605 | dev_err(&dev->dev, "Failed to map pci card memory bar 0"); | ||
606 | err = -EIO; | ||
607 | goto err_release_regions; | ||
608 | } | ||
609 | |||
610 | dma_io = pci_iomap(dev, 2, 0); | ||
611 | if (!dma_io) { | ||
612 | dev_err(&dev->dev, "Failed to map pci card memory bar 2"); | ||
613 | err = -EIO; | ||
614 | goto err_unmap; | ||
615 | } | ||
616 | |||
617 | net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); | ||
618 | if (net_dev == 0) { | ||
619 | err = -ENOMEM; | ||
620 | goto err_unmap_dma_io; | ||
621 | } | ||
622 | |||
623 | pci_set_drvdata(dev, net_dev); | ||
624 | SET_NETDEV_DEV(net_dev, &dev->dev); | ||
625 | |||
626 | net_dev->features = 0; | ||
627 | net_dev->flags |= IFF_NOARP; | ||
628 | |||
629 | net_dev->netdev_ops = &ec_bhf_netdev_ops; | ||
630 | |||
631 | priv = netdev_priv(net_dev); | ||
632 | priv->net_dev = net_dev; | ||
633 | priv->io = io; | ||
634 | priv->dma_io = dma_io; | ||
635 | priv->dev = dev; | ||
636 | |||
637 | err = ec_bhf_setup_offsets(priv); | ||
638 | if (err < 0) | ||
639 | goto err_free_net_dev; | ||
640 | |||
641 | memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); | ||
642 | |||
643 | dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", | ||
644 | net_dev->dev_addr); | ||
645 | |||
646 | err = register_netdev(net_dev); | ||
647 | if (err < 0) | ||
648 | goto err_free_net_dev; | ||
649 | |||
650 | return 0; | ||
651 | |||
652 | err_free_net_dev: | ||
653 | free_netdev(net_dev); | ||
654 | err_unmap_dma_io: | ||
655 | pci_iounmap(dev, dma_io); | ||
656 | err_unmap: | ||
657 | pci_iounmap(dev, io); | ||
658 | err_release_regions: | ||
659 | pci_release_regions(dev); | ||
660 | err_disable_dev: | ||
661 | pci_clear_master(dev); | ||
662 | pci_disable_device(dev); | ||
663 | |||
664 | return err; | ||
665 | } | ||
666 | |||
667 | static void ec_bhf_remove(struct pci_dev *dev) | ||
668 | { | ||
669 | struct net_device *net_dev = pci_get_drvdata(dev); | ||
670 | struct ec_bhf_priv *priv = netdev_priv(net_dev); | ||
671 | |||
672 | unregister_netdev(net_dev); | ||
673 | free_netdev(net_dev); | ||
674 | |||
675 | pci_iounmap(dev, priv->dma_io); | ||
676 | pci_iounmap(dev, priv->io); | ||
677 | pci_release_regions(dev); | ||
678 | pci_clear_master(dev); | ||
679 | pci_disable_device(dev); | ||
680 | } | ||
681 | |||
682 | static struct pci_driver pci_driver = { | ||
683 | .name = "ec_bhf", | ||
684 | .id_table = ids, | ||
685 | .probe = ec_bhf_probe, | ||
686 | .remove = ec_bhf_remove, | ||
687 | }; | ||
688 | |||
689 | static int __init ec_bhf_init(void) | ||
690 | { | ||
691 | return pci_register_driver(&pci_driver); | ||
692 | } | ||
693 | |||
694 | static void __exit ec_bhf_exit(void) | ||
695 | { | ||
696 | pci_unregister_driver(&pci_driver); | ||
697 | } | ||
698 | |||
699 | module_init(ec_bhf_init); | ||
700 | module_exit(ec_bhf_exit); | ||
701 | |||
702 | module_param(polling_frequency, long, S_IRUGO); | ||
703 | MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); | ||
704 | |||
705 | MODULE_LICENSE("GPL"); | ||
706 | MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>"); | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a18645407d21..dc19bc5dec77 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev) | |||
4949 | if (status) | 4949 | if (status) |
4950 | goto err; | 4950 | goto err; |
4951 | 4951 | ||
4952 | /* On some BE3 FW versions, after a HW reset, | ||
4953 | * interrupts will remain disabled for each function. | ||
4954 | * So, explicitly enable interrupts | ||
4955 | */ | ||
4956 | be_intr_set(adapter, true); | ||
4957 | |||
4952 | /* tell fw we're ready to fire cmds */ | 4958 | /* tell fw we're ready to fire cmds */ |
4953 | status = be_cmd_fw_init(adapter); | 4959 | status = be_cmd_fw_init(adapter); |
4954 | if (status) | 4960 | if (status) |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b0c6050479eb..b78378cea5e3 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme, | |||
1988 | return idx; | 1988 | return idx; |
1989 | } | 1989 | } |
1990 | 1990 | ||
1991 | static void | 1991 | static int |
1992 | jme_fill_tx_map(struct pci_dev *pdev, | 1992 | jme_fill_tx_map(struct pci_dev *pdev, |
1993 | struct txdesc *txdesc, | 1993 | struct txdesc *txdesc, |
1994 | struct jme_buffer_info *txbi, | 1994 | struct jme_buffer_info *txbi, |
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev, | |||
2005 | len, | 2005 | len, |
2006 | PCI_DMA_TODEVICE); | 2006 | PCI_DMA_TODEVICE); |
2007 | 2007 | ||
2008 | if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) | ||
2009 | return -EINVAL; | ||
2010 | |||
2008 | pci_dma_sync_single_for_device(pdev, | 2011 | pci_dma_sync_single_for_device(pdev, |
2009 | dmaaddr, | 2012 | dmaaddr, |
2010 | len, | 2013 | len, |
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev, | |||
2021 | 2024 | ||
2022 | txbi->mapping = dmaaddr; | 2025 | txbi->mapping = dmaaddr; |
2023 | txbi->len = len; | 2026 | txbi->len = len; |
2027 | return 0; | ||
2024 | } | 2028 | } |
2025 | 2029 | ||
2026 | static void | 2030 | static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) |
2031 | { | ||
2032 | struct jme_ring *txring = &(jme->txring[0]); | ||
2033 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; | ||
2034 | int mask = jme->tx_ring_mask; | ||
2035 | int j; | ||
2036 | |||
2037 | for (j = 0 ; j < count ; j++) { | ||
2038 | ctxbi = txbi + ((startidx + j + 2) & (mask)); | ||
2039 | pci_unmap_page(jme->pdev, | ||
2040 | ctxbi->mapping, | ||
2041 | ctxbi->len, | ||
2042 | PCI_DMA_TODEVICE); | ||
2043 | |||
2044 | ctxbi->mapping = 0; | ||
2045 | ctxbi->len = 0; | ||
2046 | } | ||
2047 | |||
2048 | } | ||
2049 | |||
2050 | static int | ||
2027 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | 2051 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) |
2028 | { | 2052 | { |
2029 | struct jme_ring *txring = &(jme->txring[0]); | 2053 | struct jme_ring *txring = &(jme->txring[0]); |
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2034 | int mask = jme->tx_ring_mask; | 2058 | int mask = jme->tx_ring_mask; |
2035 | const struct skb_frag_struct *frag; | 2059 | const struct skb_frag_struct *frag; |
2036 | u32 len; | 2060 | u32 len; |
2061 | int ret = 0; | ||
2037 | 2062 | ||
2038 | for (i = 0 ; i < nr_frags ; ++i) { | 2063 | for (i = 0 ; i < nr_frags ; ++i) { |
2039 | frag = &skb_shinfo(skb)->frags[i]; | 2064 | frag = &skb_shinfo(skb)->frags[i]; |
2040 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); | 2065 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); |
2041 | ctxbi = txbi + ((idx + i + 2) & (mask)); | 2066 | ctxbi = txbi + ((idx + i + 2) & (mask)); |
2042 | 2067 | ||
2043 | jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, | 2068 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, |
2044 | skb_frag_page(frag), | 2069 | skb_frag_page(frag), |
2045 | frag->page_offset, skb_frag_size(frag), hidma); | 2070 | frag->page_offset, skb_frag_size(frag), hidma); |
2071 | if (ret) { | ||
2072 | jme_drop_tx_map(jme, idx, i); | ||
2073 | goto out; | ||
2074 | } | ||
2075 | |||
2046 | } | 2076 | } |
2047 | 2077 | ||
2048 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | 2078 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
2049 | ctxdesc = txdesc + ((idx + 1) & (mask)); | 2079 | ctxdesc = txdesc + ((idx + 1) & (mask)); |
2050 | ctxbi = txbi + ((idx + 1) & (mask)); | 2080 | ctxbi = txbi + ((idx + 1) & (mask)); |
2051 | jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), | 2081 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), |
2052 | offset_in_page(skb->data), len, hidma); | 2082 | offset_in_page(skb->data), len, hidma); |
2083 | if (ret) | ||
2084 | jme_drop_tx_map(jme, idx, i); | ||
2085 | |||
2086 | out: | ||
2087 | return ret; | ||
2053 | 2088 | ||
2054 | } | 2089 | } |
2055 | 2090 | ||
2091 | |||
2056 | static int | 2092 | static int |
2057 | jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) | 2093 | jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) |
2058 | { | 2094 | { |
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2131 | struct txdesc *txdesc; | 2167 | struct txdesc *txdesc; |
2132 | struct jme_buffer_info *txbi; | 2168 | struct jme_buffer_info *txbi; |
2133 | u8 flags; | 2169 | u8 flags; |
2170 | int ret = 0; | ||
2134 | 2171 | ||
2135 | txdesc = (struct txdesc *)txring->desc + idx; | 2172 | txdesc = (struct txdesc *)txring->desc + idx; |
2136 | txbi = txring->bufinf + idx; | 2173 | txbi = txring->bufinf + idx; |
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2155 | if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) | 2192 | if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) |
2156 | jme_tx_csum(jme, skb, &flags); | 2193 | jme_tx_csum(jme, skb, &flags); |
2157 | jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); | 2194 | jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); |
2158 | jme_map_tx_skb(jme, skb, idx); | 2195 | ret = jme_map_tx_skb(jme, skb, idx); |
2196 | if (ret) | ||
2197 | return ret; | ||
2198 | |||
2159 | txdesc->desc1.flags = flags; | 2199 | txdesc->desc1.flags = flags; |
2160 | /* | 2200 | /* |
2161 | * Set tx buffer info after telling NIC to send | 2201 | * Set tx buffer info after telling NIC to send |
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2228 | return NETDEV_TX_BUSY; | 2268 | return NETDEV_TX_BUSY; |
2229 | } | 2269 | } |
2230 | 2270 | ||
2231 | jme_fill_tx_desc(jme, skb, idx); | 2271 | if (jme_fill_tx_desc(jme, skb, idx)) |
2272 | return NETDEV_TX_OK; | ||
2232 | 2273 | ||
2233 | jwrite32(jme, JME_TXCS, jme->reg_txcs | | 2274 | jwrite32(jme, JME_TXCS, jme->reg_txcs | |
2234 | TXCS_SELECT_QUEUE0 | | 2275 | TXCS_SELECT_QUEUE0 | |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78099eab7673..92d3249f63f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = { | |||
1253 | }, | 1253 | }, |
1254 | { | 1254 | { |
1255 | .opcode = MLX4_CMD_UPDATE_QP, | 1255 | .opcode = MLX4_CMD_UPDATE_QP, |
1256 | .has_inbox = false, | 1256 | .has_inbox = true, |
1257 | .has_outbox = false, | 1257 | .has_outbox = false, |
1258 | .out_is_imm = false, | 1258 | .out_is_imm = false, |
1259 | .encode_slave_id = false, | 1259 | .encode_slave_id = false, |
1260 | .verify = NULL, | 1260 | .verify = NULL, |
1261 | .wrapper = mlx4_CMD_EPERM_wrapper | 1261 | .wrapper = mlx4_UPDATE_QP_wrapper |
1262 | }, | 1262 | }, |
1263 | { | 1263 | { |
1264 | .opcode = MLX4_CMD_GET_OP_REQ, | 1264 | .opcode = MLX4_CMD_GET_OP_REQ, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index f9c465101963..212cea440f90 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
1195 | struct mlx4_cmd_mailbox *outbox, | 1195 | struct mlx4_cmd_mailbox *outbox, |
1196 | struct mlx4_cmd_info *cmd); | 1196 | struct mlx4_cmd_info *cmd); |
1197 | 1197 | ||
1198 | int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, | ||
1199 | struct mlx4_vhcr *vhcr, | ||
1200 | struct mlx4_cmd_mailbox *inbox, | ||
1201 | struct mlx4_cmd_mailbox *outbox, | ||
1202 | struct mlx4_cmd_info *cmd); | ||
1203 | |||
1198 | int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, | 1204 | int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, |
1199 | struct mlx4_vhcr *vhcr, | 1205 | struct mlx4_vhcr *vhcr, |
1200 | struct mlx4_cmd_mailbox *inbox, | 1206 | struct mlx4_cmd_mailbox *inbox, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 61d64ebffd56..fbd32af89c7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -389,6 +389,41 @@ err_icm: | |||
389 | 389 | ||
390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 390 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
391 | 391 | ||
392 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | ||
393 | int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, | ||
394 | enum mlx4_update_qp_attr attr, | ||
395 | struct mlx4_update_qp_params *params) | ||
396 | { | ||
397 | struct mlx4_cmd_mailbox *mailbox; | ||
398 | struct mlx4_update_qp_context *cmd; | ||
399 | u64 pri_addr_path_mask = 0; | ||
400 | int err = 0; | ||
401 | |||
402 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
403 | if (IS_ERR(mailbox)) | ||
404 | return PTR_ERR(mailbox); | ||
405 | |||
406 | cmd = (struct mlx4_update_qp_context *)mailbox->buf; | ||
407 | |||
408 | if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) | ||
409 | return -EINVAL; | ||
410 | |||
411 | if (attr & MLX4_UPDATE_QP_SMAC) { | ||
412 | pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; | ||
413 | cmd->qp_context.pri_path.grh_mylmc = params->smac_index; | ||
414 | } | ||
415 | |||
416 | cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); | ||
417 | |||
418 | err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, | ||
419 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, | ||
420 | MLX4_CMD_NATIVE); | ||
421 | |||
422 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
423 | return err; | ||
424 | } | ||
425 | EXPORT_SYMBOL_GPL(mlx4_update_qp); | ||
426 | |||
392 | void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) | 427 | void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) |
393 | { | 428 | { |
394 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | 429 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1c3fdd4a1f7d..8f1254a79832 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, | |||
3895 | 3895 | ||
3896 | } | 3896 | } |
3897 | 3897 | ||
3898 | #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) | ||
3899 | int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, | ||
3900 | struct mlx4_vhcr *vhcr, | ||
3901 | struct mlx4_cmd_mailbox *inbox, | ||
3902 | struct mlx4_cmd_mailbox *outbox, | ||
3903 | struct mlx4_cmd_info *cmd_info) | ||
3904 | { | ||
3905 | int err; | ||
3906 | u32 qpn = vhcr->in_modifier & 0xffffff; | ||
3907 | struct res_qp *rqp; | ||
3908 | u64 mac; | ||
3909 | unsigned port; | ||
3910 | u64 pri_addr_path_mask; | ||
3911 | struct mlx4_update_qp_context *cmd; | ||
3912 | int smac_index; | ||
3913 | |||
3914 | cmd = (struct mlx4_update_qp_context *)inbox->buf; | ||
3915 | |||
3916 | pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); | ||
3917 | if (cmd->qp_mask || cmd->secondary_addr_path_mask || | ||
3918 | (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) | ||
3919 | return -EPERM; | ||
3920 | |||
3921 | /* Just change the smac for the QP */ | ||
3922 | err = get_res(dev, slave, qpn, RES_QP, &rqp); | ||
3923 | if (err) { | ||
3924 | mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); | ||
3925 | return err; | ||
3926 | } | ||
3927 | |||
3928 | port = (rqp->sched_queue >> 6 & 1) + 1; | ||
3929 | smac_index = cmd->qp_context.pri_path.grh_mylmc; | ||
3930 | err = mac_find_smac_ix_in_slave(dev, slave, port, | ||
3931 | smac_index, &mac); | ||
3932 | if (err) { | ||
3933 | mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", | ||
3934 | qpn, smac_index); | ||
3935 | goto err_mac; | ||
3936 | } | ||
3937 | |||
3938 | err = mlx4_cmd(dev, inbox->dma, | ||
3939 | vhcr->in_modifier, 0, | ||
3940 | MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, | ||
3941 | MLX4_CMD_NATIVE); | ||
3942 | if (err) { | ||
3943 | mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); | ||
3944 | goto err_mac; | ||
3945 | } | ||
3946 | |||
3947 | err_mac: | ||
3948 | put_res(dev, slave, qpn, RES_QP); | ||
3949 | return err; | ||
3950 | } | ||
3951 | |||
3898 | int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | 3952 | int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
3899 | struct mlx4_vhcr *vhcr, | 3953 | struct mlx4_vhcr *vhcr, |
3900 | struct mlx4_cmd_mailbox *inbox, | 3954 | struct mlx4_cmd_mailbox *inbox, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7b52a88923ef..f785d01c7d12 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) | |||
1719 | tx_ring->producer; | 1719 | tx_ring->producer; |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, | ||
1723 | struct net_device *netdev) | ||
1724 | { | ||
1725 | int err; | ||
1726 | |||
1727 | netdev->num_tx_queues = adapter->drv_tx_rings; | ||
1728 | netdev->real_num_tx_queues = adapter->drv_tx_rings; | ||
1729 | |||
1730 | err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); | ||
1731 | if (err) | ||
1732 | netdev_err(netdev, "failed to set %d Tx queues\n", | ||
1733 | adapter->drv_tx_rings); | ||
1734 | |||
1735 | return err; | ||
1736 | } | ||
1737 | |||
1738 | struct qlcnic_nic_template { | 1722 | struct qlcnic_nic_template { |
1739 | int (*config_bridged_mode) (struct qlcnic_adapter *, u32); | 1723 | int (*config_bridged_mode) (struct qlcnic_adapter *, u32); |
1740 | int (*config_led) (struct qlcnic_adapter *, u32, u32); | 1724 | int (*config_led) (struct qlcnic_adapter *, u32, u32); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0bc914859e38..7e55e88a81bf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) | |||
2206 | ahw->max_uc_count = count; | 2206 | ahw->max_uc_count = count; |
2207 | } | 2207 | } |
2208 | 2208 | ||
2209 | static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, | ||
2210 | u8 tx_queues, u8 rx_queues) | ||
2211 | { | ||
2212 | struct net_device *netdev = adapter->netdev; | ||
2213 | int err = 0; | ||
2214 | |||
2215 | if (tx_queues) { | ||
2216 | err = netif_set_real_num_tx_queues(netdev, tx_queues); | ||
2217 | if (err) { | ||
2218 | netdev_err(netdev, "failed to set %d Tx queues\n", | ||
2219 | tx_queues); | ||
2220 | return err; | ||
2221 | } | ||
2222 | } | ||
2223 | |||
2224 | if (rx_queues) { | ||
2225 | err = netif_set_real_num_rx_queues(netdev, rx_queues); | ||
2226 | if (err) | ||
2227 | netdev_err(netdev, "failed to set %d Rx queues\n", | ||
2228 | rx_queues); | ||
2229 | } | ||
2230 | |||
2231 | return err; | ||
2232 | } | ||
2233 | |||
2209 | int | 2234 | int |
2210 | qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, | 2235 | qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, |
2211 | int pci_using_dac) | 2236 | int pci_using_dac) |
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, | |||
2269 | netdev->priv_flags |= IFF_UNICAST_FLT; | 2294 | netdev->priv_flags |= IFF_UNICAST_FLT; |
2270 | netdev->irq = adapter->msix_entries[0].vector; | 2295 | netdev->irq = adapter->msix_entries[0].vector; |
2271 | 2296 | ||
2272 | err = qlcnic_set_real_num_queues(adapter, netdev); | 2297 | err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, |
2298 | adapter->drv_sds_rings); | ||
2273 | if (err) | 2299 | if (err) |
2274 | return err; | 2300 | return err; |
2275 | 2301 | ||
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter) | |||
2943 | tx_ring->tx_stats.xmit_called, | 2969 | tx_ring->tx_stats.xmit_called, |
2944 | tx_ring->tx_stats.xmit_on, | 2970 | tx_ring->tx_stats.xmit_on, |
2945 | tx_ring->tx_stats.xmit_off); | 2971 | tx_ring->tx_stats.xmit_off); |
2972 | |||
2973 | if (tx_ring->crb_intr_mask) | ||
2974 | netdev_info(netdev, "crb_intr_mask=%d\n", | ||
2975 | readl(tx_ring->crb_intr_mask)); | ||
2976 | |||
2946 | netdev_info(netdev, | 2977 | netdev_info(netdev, |
2947 | "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", | 2978 | "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", |
2948 | readl(tx_ring->crb_intr_mask), | ||
2949 | readl(tx_ring->crb_cmd_producer), | 2979 | readl(tx_ring->crb_cmd_producer), |
2950 | tx_ring->producer, tx_ring->sw_consumer, | 2980 | tx_ring->producer, tx_ring->sw_consumer, |
2951 | le32_to_cpu(*(tx_ring->hw_consumer))); | 2981 | le32_to_cpu(*(tx_ring->hw_consumer))); |
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, | |||
3978 | int qlcnic_setup_rings(struct qlcnic_adapter *adapter) | 4008 | int qlcnic_setup_rings(struct qlcnic_adapter *adapter) |
3979 | { | 4009 | { |
3980 | struct net_device *netdev = adapter->netdev; | 4010 | struct net_device *netdev = adapter->netdev; |
4011 | u8 tx_rings, rx_rings; | ||
3981 | int err; | 4012 | int err; |
3982 | 4013 | ||
3983 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) | 4014 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) |
3984 | return -EBUSY; | 4015 | return -EBUSY; |
3985 | 4016 | ||
4017 | tx_rings = adapter->drv_tss_rings; | ||
4018 | rx_rings = adapter->drv_rss_rings; | ||
4019 | |||
3986 | netif_device_detach(netdev); | 4020 | netif_device_detach(netdev); |
4021 | |||
4022 | err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings); | ||
4023 | if (err) | ||
4024 | goto done; | ||
4025 | |||
3987 | if (netif_running(netdev)) | 4026 | if (netif_running(netdev)) |
3988 | __qlcnic_down(adapter, netdev); | 4027 | __qlcnic_down(adapter, netdev); |
3989 | 4028 | ||
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter) | |||
4003 | return err; | 4042 | return err; |
4004 | } | 4043 | } |
4005 | 4044 | ||
4006 | netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); | 4045 | /* Check if we need to update real_num_{tx|rx}_queues because |
4046 | * qlcnic_setup_intr() may change Tx/Rx rings size | ||
4047 | */ | ||
4048 | if ((tx_rings != adapter->drv_tx_rings) || | ||
4049 | (rx_rings != adapter->drv_sds_rings)) { | ||
4050 | err = qlcnic_set_real_num_queues(adapter, | ||
4051 | adapter->drv_tx_rings, | ||
4052 | adapter->drv_sds_rings); | ||
4053 | if (err) | ||
4054 | goto done; | ||
4055 | } | ||
4007 | 4056 | ||
4008 | if (qlcnic_83xx_check(adapter)) { | 4057 | if (qlcnic_83xx_check(adapter)) { |
4009 | qlcnic_83xx_initialize_nic(adapter, 1); | 4058 | qlcnic_83xx_initialize_nic(adapter, 1); |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 32d969e857f7..89b83e59e1dc 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) | |||
156 | efx->net_dev->rx_cpu_rmap = NULL; | 156 | efx->net_dev->rx_cpu_rmap = NULL; |
157 | #endif | 157 | #endif |
158 | 158 | ||
159 | /* Disable MSI/MSI-X interrupts */ | 159 | if (EFX_INT_MODE_USE_MSI(efx)) { |
160 | efx_for_each_channel(channel, efx) | 160 | /* Disable MSI/MSI-X interrupts */ |
161 | free_irq(channel->irq, &efx->msi_context[channel->channel]); | 161 | efx_for_each_channel(channel, efx) |
162 | 162 | free_irq(channel->irq, | |
163 | /* Disable legacy interrupt */ | 163 | &efx->msi_context[channel->channel]); |
164 | if (efx->legacy_irq) | 164 | } else { |
165 | /* Disable legacy interrupt */ | ||
165 | free_irq(efx->legacy_irq, efx); | 166 | free_irq(efx->legacy_irq, efx); |
167 | } | ||
166 | } | 168 | } |
167 | 169 | ||
168 | /* Register dump */ | 170 | /* Register dump */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d940034acdd4..0f4841d2e8dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev) | |||
1704 | if (ret) { | 1704 | if (ret) { |
1705 | pr_err("%s: Cannot attach to PHY (error: %d)\n", | 1705 | pr_err("%s: Cannot attach to PHY (error: %d)\n", |
1706 | __func__, ret); | 1706 | __func__, ret); |
1707 | goto phy_error; | 1707 | return ret; |
1708 | } | 1708 | } |
1709 | } | 1709 | } |
1710 | 1710 | ||
@@ -1779,8 +1779,6 @@ init_error: | |||
1779 | dma_desc_error: | 1779 | dma_desc_error: |
1780 | if (priv->phydev) | 1780 | if (priv->phydev) |
1781 | phy_disconnect(priv->phydev); | 1781 | phy_disconnect(priv->phydev); |
1782 | phy_error: | ||
1783 | clk_disable_unprepare(priv->stmmac_clk); | ||
1784 | 1782 | ||
1785 | return ret; | 1783 | return ret; |
1786 | } | 1784 | } |
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index df8d383acf48..b9ac20f42651 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c | |||
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp) | |||
246 | int i; | 246 | int i; |
247 | 247 | ||
248 | for (i = 0; i < N_TX_RINGS; i++) | 248 | for (i = 0; i < N_TX_RINGS; i++) |
249 | spin_lock(&cp->tx_lock[i]); | 249 | spin_lock_nested(&cp->tx_lock[i], i); |
250 | } | 250 | } |
251 | 251 | ||
252 | static inline void cas_lock_all(struct cas *cp) | 252 | static inline void cas_lock_all(struct cas *cp) |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 36aa109416c4..c331b7ebc812 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1871 | mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); | 1871 | mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); |
1872 | phyid = be32_to_cpup(parp+1); | 1872 | phyid = be32_to_cpup(parp+1); |
1873 | mdio = of_find_device_by_node(mdio_node); | 1873 | mdio = of_find_device_by_node(mdio_node); |
1874 | 1874 | of_node_put(mdio_node); | |
1875 | if (strncmp(mdio->name, "gpio", 4) == 0) { | 1875 | if (!mdio) { |
1876 | /* GPIO bitbang MDIO driver attached */ | 1876 | pr_err("Missing mdio platform device\n"); |
1877 | struct mii_bus *bus = dev_get_drvdata(&mdio->dev); | 1877 | return -EINVAL; |
1878 | |||
1879 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | ||
1880 | PHY_ID_FMT, bus->id, phyid); | ||
1881 | } else { | ||
1882 | /* davinci MDIO driver attached */ | ||
1883 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | ||
1884 | PHY_ID_FMT, mdio->name, phyid); | ||
1885 | } | 1878 | } |
1879 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | ||
1880 | PHY_ID_FMT, mdio->name, phyid); | ||
1886 | 1881 | ||
1887 | mac_addr = of_get_mac_address(slave_node); | 1882 | mac_addr = of_get_mac_address(slave_node); |
1888 | if (mac_addr) | 1883 | if (mac_addr) |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b0e2865a6810..d53e299ae1d9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) | |||
458 | struct macvlan_dev *vlan = netdev_priv(dev); | 458 | struct macvlan_dev *vlan = netdev_priv(dev); |
459 | struct net_device *lowerdev = vlan->lowerdev; | 459 | struct net_device *lowerdev = vlan->lowerdev; |
460 | 460 | ||
461 | if (change & IFF_ALLMULTI) | 461 | if (dev->flags & IFF_UP) { |
462 | dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); | 462 | if (change & IFF_ALLMULTI) |
463 | dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); | ||
464 | } | ||
463 | } | 465 | } |
464 | 466 | ||
465 | static void macvlan_set_mac_lists(struct net_device *dev) | 467 | static void macvlan_set_mac_lists(struct net_device *dev) |
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; | |||
515 | #define MACVLAN_STATE_MASK \ | 517 | #define MACVLAN_STATE_MASK \ |
516 | ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) | 518 | ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) |
517 | 519 | ||
520 | static int macvlan_get_nest_level(struct net_device *dev) | ||
521 | { | ||
522 | return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; | ||
523 | } | ||
524 | |||
518 | static void macvlan_set_lockdep_class_one(struct net_device *dev, | 525 | static void macvlan_set_lockdep_class_one(struct net_device *dev, |
519 | struct netdev_queue *txq, | 526 | struct netdev_queue *txq, |
520 | void *_unused) | 527 | void *_unused) |
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev, | |||
525 | 532 | ||
526 | static void macvlan_set_lockdep_class(struct net_device *dev) | 533 | static void macvlan_set_lockdep_class(struct net_device *dev) |
527 | { | 534 | { |
528 | lockdep_set_class(&dev->addr_list_lock, | 535 | lockdep_set_class_and_subclass(&dev->addr_list_lock, |
529 | &macvlan_netdev_addr_lock_key); | 536 | &macvlan_netdev_addr_lock_key, |
537 | macvlan_get_nest_level(dev)); | ||
530 | netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); | 538 | netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); |
531 | } | 539 | } |
532 | 540 | ||
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = { | |||
721 | .ndo_fdb_add = macvlan_fdb_add, | 729 | .ndo_fdb_add = macvlan_fdb_add, |
722 | .ndo_fdb_del = macvlan_fdb_del, | 730 | .ndo_fdb_del = macvlan_fdb_del, |
723 | .ndo_fdb_dump = ndo_dflt_fdb_dump, | 731 | .ndo_fdb_dump = ndo_dflt_fdb_dump, |
732 | .ndo_get_lock_subclass = macvlan_get_nest_level, | ||
724 | }; | 733 | }; |
725 | 734 | ||
726 | void macvlan_common_setup(struct net_device *dev) | 735 | void macvlan_common_setup(struct net_device *dev) |
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
849 | vlan->dev = dev; | 858 | vlan->dev = dev; |
850 | vlan->port = port; | 859 | vlan->port = port; |
851 | vlan->set_features = MACVLAN_FEATURES; | 860 | vlan->set_features = MACVLAN_FEATURES; |
861 | vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; | ||
852 | 862 | ||
853 | vlan->mode = MACVLAN_MODE_VEPA; | 863 | vlan->mode = MACVLAN_MODE_VEPA; |
854 | if (data && data[IFLA_MACVLAN_MODE]) | 864 | if (data && data[IFLA_MACVLAN_MODE]) |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 9c4defdec67b..5f1a2250018f 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev) | |||
215 | if (pdev->dev.of_node) { | 215 | if (pdev->dev.of_node) { |
216 | pdata = mdio_gpio_of_get_data(pdev); | 216 | pdata = mdio_gpio_of_get_data(pdev); |
217 | bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); | 217 | bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); |
218 | if (bus_id < 0) { | ||
219 | dev_warn(&pdev->dev, "failed to get alias id\n"); | ||
220 | bus_id = 0; | ||
221 | } | ||
218 | } else { | 222 | } else { |
219 | pdata = dev_get_platdata(&pdev->dev); | 223 | pdata = dev_get_platdata(&pdev->dev); |
220 | bus_id = pdev->id; | 224 | bus_id = pdev->id; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a972056b2249..3bc079a67a3d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work) | |||
715 | struct delayed_work *dwork = to_delayed_work(work); | 715 | struct delayed_work *dwork = to_delayed_work(work); |
716 | struct phy_device *phydev = | 716 | struct phy_device *phydev = |
717 | container_of(dwork, struct phy_device, state_queue); | 717 | container_of(dwork, struct phy_device, state_queue); |
718 | int needs_aneg = 0, do_suspend = 0; | 718 | bool needs_aneg = false, do_suspend = false, do_resume = false; |
719 | int err = 0; | 719 | int err = 0; |
720 | 720 | ||
721 | mutex_lock(&phydev->lock); | 721 | mutex_lock(&phydev->lock); |
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work) | |||
727 | case PHY_PENDING: | 727 | case PHY_PENDING: |
728 | break; | 728 | break; |
729 | case PHY_UP: | 729 | case PHY_UP: |
730 | needs_aneg = 1; | 730 | needs_aneg = true; |
731 | 731 | ||
732 | phydev->link_timeout = PHY_AN_TIMEOUT; | 732 | phydev->link_timeout = PHY_AN_TIMEOUT; |
733 | 733 | ||
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work) | |||
757 | phydev->adjust_link(phydev->attached_dev); | 757 | phydev->adjust_link(phydev->attached_dev); |
758 | 758 | ||
759 | } else if (0 == phydev->link_timeout--) | 759 | } else if (0 == phydev->link_timeout--) |
760 | needs_aneg = 1; | 760 | needs_aneg = true; |
761 | break; | 761 | break; |
762 | case PHY_NOLINK: | 762 | case PHY_NOLINK: |
763 | err = phy_read_status(phydev); | 763 | err = phy_read_status(phydev); |
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work) | |||
791 | netif_carrier_on(phydev->attached_dev); | 791 | netif_carrier_on(phydev->attached_dev); |
792 | } else { | 792 | } else { |
793 | if (0 == phydev->link_timeout--) | 793 | if (0 == phydev->link_timeout--) |
794 | needs_aneg = 1; | 794 | needs_aneg = true; |
795 | } | 795 | } |
796 | 796 | ||
797 | phydev->adjust_link(phydev->attached_dev); | 797 | phydev->adjust_link(phydev->attached_dev); |
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work) | |||
827 | phydev->link = 0; | 827 | phydev->link = 0; |
828 | netif_carrier_off(phydev->attached_dev); | 828 | netif_carrier_off(phydev->attached_dev); |
829 | phydev->adjust_link(phydev->attached_dev); | 829 | phydev->adjust_link(phydev->attached_dev); |
830 | do_suspend = 1; | 830 | do_suspend = true; |
831 | } | 831 | } |
832 | break; | 832 | break; |
833 | case PHY_RESUMING: | 833 | case PHY_RESUMING: |
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work) | |||
876 | } | 876 | } |
877 | phydev->adjust_link(phydev->attached_dev); | 877 | phydev->adjust_link(phydev->attached_dev); |
878 | } | 878 | } |
879 | do_resume = true; | ||
879 | break; | 880 | break; |
880 | } | 881 | } |
881 | 882 | ||
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work) | |||
883 | 884 | ||
884 | if (needs_aneg) | 885 | if (needs_aneg) |
885 | err = phy_start_aneg(phydev); | 886 | err = phy_start_aneg(phydev); |
886 | 887 | else if (do_suspend) | |
887 | if (do_suspend) | ||
888 | phy_suspend(phydev); | 888 | phy_suspend(phydev); |
889 | else if (do_resume) | ||
890 | phy_resume(phydev); | ||
889 | 891 | ||
890 | if (err < 0) | 892 | if (err < 0) |
891 | phy_error(phydev); | 893 | phy_error(phydev); |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0ce606624296..4987a1c6dc52 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
614 | err = phy_init_hw(phydev); | 614 | err = phy_init_hw(phydev); |
615 | if (err) | 615 | if (err) |
616 | phy_detach(phydev); | 616 | phy_detach(phydev); |
617 | 617 | else | |
618 | phy_resume(phydev); | 618 | phy_resume(phydev); |
619 | 619 | ||
620 | return err; | 620 | return err; |
621 | } | 621 | } |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index c9f3281506af..2e025ddcef21 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf) | |||
120 | cdc_ncm_unbind(dev, intf); | 120 | cdc_ncm_unbind(dev, intf); |
121 | } | 121 | } |
122 | 122 | ||
123 | /* verify that the ethernet protocol is IPv4 or IPv6 */ | ||
124 | static bool is_ip_proto(__be16 proto) | ||
125 | { | ||
126 | switch (proto) { | ||
127 | case htons(ETH_P_IP): | ||
128 | case htons(ETH_P_IPV6): | ||
129 | return true; | ||
130 | } | ||
131 | return false; | ||
132 | } | ||
123 | 133 | ||
124 | static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | 134 | static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) |
125 | { | 135 | { |
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb | |||
128 | struct cdc_ncm_ctx *ctx = info->ctx; | 138 | struct cdc_ncm_ctx *ctx = info->ctx; |
129 | __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); | 139 | __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); |
130 | u16 tci = 0; | 140 | u16 tci = 0; |
141 | bool is_ip; | ||
131 | u8 *c; | 142 | u8 *c; |
132 | 143 | ||
133 | if (!ctx) | 144 | if (!ctx) |
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb | |||
137 | if (skb->len <= ETH_HLEN) | 148 | if (skb->len <= ETH_HLEN) |
138 | goto error; | 149 | goto error; |
139 | 150 | ||
151 | /* Some applications using e.g. packet sockets will | ||
152 | * bypass the VLAN acceleration and create tagged | ||
153 | * ethernet frames directly. We primarily look for | ||
154 | * the accelerated out-of-band tag, but fall back if | ||
155 | * required | ||
156 | */ | ||
157 | skb_reset_mac_header(skb); | ||
158 | if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && | ||
159 | __vlan_get_tag(skb, &tci) == 0) { | ||
160 | is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); | ||
161 | skb_pull(skb, VLAN_ETH_HLEN); | ||
162 | } else { | ||
163 | is_ip = is_ip_proto(eth_hdr(skb)->h_proto); | ||
164 | skb_pull(skb, ETH_HLEN); | ||
165 | } | ||
166 | |||
140 | /* mapping VLANs to MBIM sessions: | 167 | /* mapping VLANs to MBIM sessions: |
141 | * no tag => IPS session <0> | 168 | * no tag => IPS session <0> |
142 | * 1 - 255 => IPS session <vlanid> | 169 | * 1 - 255 => IPS session <vlanid> |
143 | * 256 - 511 => DSS session <vlanid - 256> | 170 | * 256 - 511 => DSS session <vlanid - 256> |
144 | * 512 - 4095 => unsupported, drop | 171 | * 512 - 4095 => unsupported, drop |
145 | */ | 172 | */ |
146 | vlan_get_tag(skb, &tci); | ||
147 | |||
148 | switch (tci & 0x0f00) { | 173 | switch (tci & 0x0f00) { |
149 | case 0x0000: /* VLAN ID 0 - 255 */ | 174 | case 0x0000: /* VLAN ID 0 - 255 */ |
150 | /* verify that datagram is IPv4 or IPv6 */ | 175 | if (!is_ip) |
151 | skb_reset_mac_header(skb); | ||
152 | switch (eth_hdr(skb)->h_proto) { | ||
153 | case htons(ETH_P_IP): | ||
154 | case htons(ETH_P_IPV6): | ||
155 | break; | ||
156 | default: | ||
157 | goto error; | 176 | goto error; |
158 | } | ||
159 | c = (u8 *)&sign; | 177 | c = (u8 *)&sign; |
160 | c[3] = tci; | 178 | c[3] = tci; |
161 | break; | 179 | break; |
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb | |||
169 | "unsupported tci=0x%04x\n", tci); | 187 | "unsupported tci=0x%04x\n", tci); |
170 | goto error; | 188 | goto error; |
171 | } | 189 | } |
172 | skb_pull(skb, ETH_HLEN); | ||
173 | } | 190 | } |
174 | 191 | ||
175 | spin_lock_bh(&ctx->mtx); | 192 | spin_lock_bh(&ctx->mtx); |
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) | |||
204 | return; | 221 | return; |
205 | 222 | ||
206 | /* need to send the NA on the VLAN dev, if any */ | 223 | /* need to send the NA on the VLAN dev, if any */ |
207 | if (tci) | 224 | rcu_read_lock(); |
225 | if (tci) { | ||
208 | netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), | 226 | netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), |
209 | tci); | 227 | tci); |
210 | else | 228 | if (!netdev) { |
229 | rcu_read_unlock(); | ||
230 | return; | ||
231 | } | ||
232 | } else { | ||
211 | netdev = dev->net; | 233 | netdev = dev->net; |
212 | if (!netdev) | 234 | } |
213 | return; | 235 | dev_hold(netdev); |
236 | rcu_read_unlock(); | ||
214 | 237 | ||
215 | in6_dev = in6_dev_get(netdev); | 238 | in6_dev = in6_dev_get(netdev); |
216 | if (!in6_dev) | 239 | if (!in6_dev) |
217 | return; | 240 | goto out; |
218 | is_router = !!in6_dev->cnf.forwarding; | 241 | is_router = !!in6_dev->cnf.forwarding; |
219 | in6_dev_put(in6_dev); | 242 | in6_dev_put(in6_dev); |
220 | 243 | ||
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) | |||
224 | true /* solicited */, | 247 | true /* solicited */, |
225 | false /* override */, | 248 | false /* override */, |
226 | true /* inc_opt */); | 249 | true /* inc_opt */); |
250 | out: | ||
251 | dev_put(netdev); | ||
227 | } | 252 | } |
228 | 253 | ||
229 | static bool is_neigh_solicit(u8 *buf, size_t len) | 254 | static bool is_neigh_solicit(u8 *buf, size_t len) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f46cd0250e48..5627917c5ff7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | |||
95 | 95 | ||
96 | if ((vif->type == NL80211_IFTYPE_AP || | 96 | if ((vif->type == NL80211_IFTYPE_AP || |
97 | vif->type == NL80211_IFTYPE_MESH_POINT) && | 97 | vif->type == NL80211_IFTYPE_MESH_POINT) && |
98 | bss_conf->enable_beacon) | 98 | bss_conf->enable_beacon) { |
99 | priv->reconfig_beacon = true; | 99 | priv->reconfig_beacon = true; |
100 | priv->rearm_ani = true; | ||
101 | } | ||
100 | 102 | ||
101 | if (bss_conf->assoc) { | 103 | if (bss_conf->assoc) { |
102 | priv->rearm_ani = true; | 104 | priv->rearm_ani = true; |
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, | |||
257 | 259 | ||
258 | ath9k_htc_ps_wakeup(priv); | 260 | ath9k_htc_ps_wakeup(priv); |
259 | 261 | ||
262 | ath9k_htc_stop_ani(priv); | ||
260 | del_timer_sync(&priv->tx.cleanup_timer); | 263 | del_timer_sync(&priv->tx.cleanup_timer); |
261 | ath9k_htc_tx_drain(priv); | 264 | ath9k_htc_tx_drain(priv); |
262 | 265 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index afb3d15e38ff..be1985296bdc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp) | |||
4948 | if (!err) { | 4948 | if (!err) { |
4949 | /* only set 2G bandwidth using bw_cap command */ | 4949 | /* only set 2G bandwidth using bw_cap command */ |
4950 | band_bwcap.band = cpu_to_le32(WLC_BAND_2G); | 4950 | band_bwcap.band = cpu_to_le32(WLC_BAND_2G); |
4951 | band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); | 4951 | band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ); |
4952 | err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, | 4952 | err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, |
4953 | sizeof(band_bwcap)); | 4953 | sizeof(band_bwcap)); |
4954 | } else { | 4954 | } else { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index fa858d548d13..0489314425cb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c | |||
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) | |||
611 | bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); | 611 | bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); |
612 | 612 | ||
613 | if (IWL_MVM_BT_COEX_CORUNNING) { | 613 | if (IWL_MVM_BT_COEX_CORUNNING) { |
614 | bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | | 614 | bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | |
615 | BT_VALID_CORUN_LUT_40); | 615 | BT_VALID_CORUN_LUT_40); |
616 | bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); | 616 | bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); |
617 | } | 617 | } |
618 | 618 | ||
619 | if (IWL_MVM_BT_COEX_MPLUT) { | 619 | if (IWL_MVM_BT_COEX_MPLUT) { |
620 | bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); | 620 | bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); |
621 | bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); | 621 | bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); |
622 | } | 622 | } |
623 | 623 | ||
624 | if (mvm->cfg->bt_shared_single_ant) | 624 | if (mvm->cfg->bt_shared_single_ant) |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 9426905de6b2..d73a89ecd78a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
@@ -183,9 +183,9 @@ enum iwl_scan_type { | |||
183 | * this number of packets were received (typically 1) | 183 | * this number of packets were received (typically 1) |
184 | * @passive2active: is auto switching from passive to active during scan allowed | 184 | * @passive2active: is auto switching from passive to active during scan allowed |
185 | * @rxchain_sel_flags: RXON_RX_CHAIN_* | 185 | * @rxchain_sel_flags: RXON_RX_CHAIN_* |
186 | * @max_out_time: in usecs, max out of serving channel time | 186 | * @max_out_time: in TUs, max out of serving channel time |
187 | * @suspend_time: how long to pause scan when returning to service channel: | 187 | * @suspend_time: how long to pause scan when returning to service channel: |
188 | * bits 0-19: beacon interal in usecs (suspend before executing) | 188 | * bits 0-19: beacon interal in TUs (suspend before executing) |
189 | * bits 20-23: reserved | 189 | * bits 20-23: reserved |
190 | * bits 24-31: number of beacons (suspend between channels) | 190 | * bits 24-31: number of beacons (suspend between channels) |
191 | * @rxon_flags: RXON_FLG_* | 191 | * @rxon_flags: RXON_FLG_* |
@@ -383,8 +383,8 @@ enum scan_framework_client { | |||
383 | * @quiet_plcp_th: quiet channel num of packets threshold | 383 | * @quiet_plcp_th: quiet channel num of packets threshold |
384 | * @good_CRC_th: passive to active promotion threshold | 384 | * @good_CRC_th: passive to active promotion threshold |
385 | * @rx_chain: RXON rx chain. | 385 | * @rx_chain: RXON rx chain. |
386 | * @max_out_time: max uSec to be out of assoceated channel | 386 | * @max_out_time: max TUs to be out of assoceated channel |
387 | * @suspend_time: pause scan this long when returning to service channel | 387 | * @suspend_time: pause scan this TUs when returning to service channel |
388 | * @flags: RXON flags | 388 | * @flags: RXON flags |
389 | * @filter_flags: RXONfilter | 389 | * @filter_flags: RXONfilter |
390 | * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. | 390 | * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index f0cebf12c7b8..b41dc84e9431 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, | |||
1007 | memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); | 1007 | memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); |
1008 | len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); | 1008 | len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); |
1009 | 1009 | ||
1010 | ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); | 1010 | ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); |
1011 | if (ret) | 1011 | if (ret) |
1012 | IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); | 1012 | IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); |
1013 | } | 1013 | } |
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) | |||
1023 | if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) | 1023 | if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) |
1024 | return; | 1024 | return; |
1025 | 1025 | ||
1026 | ieee80211_iterate_active_interfaces( | 1026 | ieee80211_iterate_active_interfaces_atomic( |
1027 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, | 1027 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, |
1028 | iwl_mvm_mc_iface_iterator, &iter_data); | 1028 | iwl_mvm_mc_iface_iterator, &iter_data); |
1029 | } | 1029 | } |
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, | |||
1807 | 1807 | ||
1808 | mutex_lock(&mvm->mutex); | 1808 | mutex_lock(&mvm->mutex); |
1809 | 1809 | ||
1810 | if (!iwl_mvm_is_idle(mvm)) { | ||
1811 | ret = -EBUSY; | ||
1812 | goto out; | ||
1813 | } | ||
1814 | |||
1810 | switch (mvm->scan_status) { | 1815 | switch (mvm->scan_status) { |
1811 | case IWL_MVM_SCAN_OS: | 1816 | case IWL_MVM_SCAN_OS: |
1812 | IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); | 1817 | IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d564233a65da..f1ec0986c3c9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) | |||
1003 | return mvmvif->low_latency; | 1003 | return mvmvif->low_latency; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | /* Assoc status */ | ||
1007 | bool iwl_mvm_is_idle(struct iwl_mvm *mvm); | ||
1008 | |||
1006 | /* Thermal management and CT-kill */ | 1009 | /* Thermal management and CT-kill */ |
1007 | void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); | 1010 | void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); |
1008 | void iwl_mvm_tt_handler(struct iwl_mvm *mvm); | 1011 | void iwl_mvm_tt_handler(struct iwl_mvm *mvm); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 9f52c5b3f0ec..e1c838899363 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c | |||
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, | |||
1010 | return; | 1010 | return; |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | #ifdef CPTCFG_MAC80211_DEBUGFS | 1013 | #ifdef CONFIG_MAC80211_DEBUGFS |
1014 | /* Disable last tx check if we are debugging with fixed rate */ | 1014 | /* Disable last tx check if we are debugging with fixed rate */ |
1015 | if (lq_sta->dbg_fixed_rate) { | 1015 | if (lq_sta->dbg_fixed_rate) { |
1016 | IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); | 1016 | IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index c91dc8498852..c28de54c75d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, | |||
277 | IEEE80211_IFACE_ITER_NORMAL, | 277 | IEEE80211_IFACE_ITER_NORMAL, |
278 | iwl_mvm_scan_condition_iterator, | 278 | iwl_mvm_scan_condition_iterator, |
279 | &global_bound); | 279 | &global_bound); |
280 | /* | ||
281 | * Under low latency traffic passive scan is fragmented meaning | ||
282 | * that dwell on a particular channel will be fragmented. Each fragment | ||
283 | * dwell time is 20ms and fragments period is 105ms. Skipping to next | ||
284 | * channel will be delayed by the same period - 105ms. So suspend_time | ||
285 | * parameter describing both fragments and channels skipping periods is | ||
286 | * set to 105ms. This value is chosen so that overall passive scan | ||
287 | * duration will not be too long. Max_out_time in this case is set to | ||
288 | * 70ms, so for active scanning operating channel will be left for 70ms | ||
289 | * while for passive still for 20ms (fragment dwell). | ||
290 | */ | ||
291 | if (global_bound) { | ||
292 | if (!iwl_mvm_low_latency(mvm)) { | ||
293 | params->suspend_time = ieee80211_tu_to_usec(100); | ||
294 | params->max_out_time = ieee80211_tu_to_usec(600); | ||
295 | } else { | ||
296 | params->suspend_time = ieee80211_tu_to_usec(105); | ||
297 | /* P2P doesn't support fragmented passive scan, so | ||
298 | * configure max_out_time to be at least longest dwell | ||
299 | * time for passive scan. | ||
300 | */ | ||
301 | if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { | ||
302 | params->max_out_time = ieee80211_tu_to_usec(70); | ||
303 | params->passive_fragmented = true; | ||
304 | } else { | ||
305 | u32 passive_dwell; | ||
306 | 280 | ||
307 | /* | 281 | if (!global_bound) |
308 | * Use band G so that passive channel dwell time | 282 | goto not_bound; |
309 | * will be assigned with maximum value. | 283 | |
310 | */ | 284 | params->suspend_time = 100; |
311 | band = IEEE80211_BAND_2GHZ; | 285 | params->max_out_time = 600; |
312 | passive_dwell = iwl_mvm_get_passive_dwell(band); | 286 | |
313 | params->max_out_time = | 287 | if (iwl_mvm_low_latency(mvm)) { |
314 | ieee80211_tu_to_usec(passive_dwell); | 288 | params->suspend_time = 250; |
315 | } | 289 | params->max_out_time = 250; |
316 | } | ||
317 | } | 290 | } |
318 | 291 | ||
292 | not_bound: | ||
293 | |||
319 | for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { | 294 | for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { |
320 | if (params->passive_fragmented) | 295 | params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); |
321 | params->dwell[band].passive = 20; | ||
322 | else | ||
323 | params->dwell[band].passive = | ||
324 | iwl_mvm_get_passive_dwell(band); | ||
325 | params->dwell[band].active = iwl_mvm_get_active_dwell(band, | 296 | params->dwell[band].active = iwl_mvm_get_active_dwell(band, |
326 | n_ssids); | 297 | n_ssids); |
327 | } | 298 | } |
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, | |||
761 | int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; | 732 | int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; |
762 | int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; | 733 | int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; |
763 | int head = 0; | 734 | int head = 0; |
764 | int tail = band_2ghz + band_5ghz; | 735 | int tail = band_2ghz + band_5ghz - 1; |
765 | u32 ssid_bitmap; | 736 | u32 ssid_bitmap; |
766 | int cmd_len; | 737 | int cmd_len; |
767 | int ret; | 738 | int ret; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index d619851745a1..2180902266ae 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c | |||
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm) | |||
644 | 644 | ||
645 | return result; | 645 | return result; |
646 | } | 646 | } |
647 | |||
648 | static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) | ||
649 | { | ||
650 | bool *idle = _data; | ||
651 | |||
652 | if (!vif->bss_conf.idle) | ||
653 | *idle = false; | ||
654 | } | ||
655 | |||
656 | bool iwl_mvm_is_idle(struct iwl_mvm *mvm) | ||
657 | { | ||
658 | bool idle = true; | ||
659 | |||
660 | ieee80211_iterate_active_interfaces_atomic( | ||
661 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, | ||
662 | iwl_mvm_idle_iter, &idle); | ||
663 | |||
664 | return idle; | ||
665 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index dcfd6d866d09..2365553f1ef7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1749 | * PCI Tx retries from interfering with C3 CPU state */ | 1749 | * PCI Tx retries from interfering with C3 CPU state */ |
1750 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | 1750 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); |
1751 | 1751 | ||
1752 | trans->dev = &pdev->dev; | ||
1753 | trans_pcie->pci_dev = pdev; | ||
1754 | iwl_disable_interrupts(trans); | ||
1755 | |||
1752 | err = pci_enable_msi(pdev); | 1756 | err = pci_enable_msi(pdev); |
1753 | if (err) { | 1757 | if (err) { |
1754 | dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); | 1758 | dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); |
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1760 | } | 1764 | } |
1761 | } | 1765 | } |
1762 | 1766 | ||
1763 | trans->dev = &pdev->dev; | ||
1764 | trans_pcie->pci_dev = pdev; | ||
1765 | trans->hw_rev = iwl_read32(trans, CSR_HW_REV); | 1767 | trans->hw_rev = iwl_read32(trans, CSR_HW_REV); |
1766 | trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; | 1768 | trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; |
1767 | snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), | 1769 | snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), |
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1787 | goto out_pci_disable_msi; | 1789 | goto out_pci_disable_msi; |
1788 | } | 1790 | } |
1789 | 1791 | ||
1790 | trans_pcie->inta_mask = CSR_INI_SET_MASK; | ||
1791 | |||
1792 | if (iwl_pcie_alloc_ict(trans)) | 1792 | if (iwl_pcie_alloc_ict(trans)) |
1793 | goto out_free_cmd_pool; | 1793 | goto out_free_cmd_pool; |
1794 | 1794 | ||
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1800 | goto out_free_ict; | 1800 | goto out_free_ict; |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | trans_pcie->inta_mask = CSR_INI_SET_MASK; | ||
1804 | |||
1803 | return trans; | 1805 | return trans; |
1804 | 1806 | ||
1805 | out_free_ict: | 1807 | out_free_ict: |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 630a3fcf65bc..0d4a285cbd7e 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif, | |||
226 | grant_ref_t rx_ring_ref); | 226 | grant_ref_t rx_ring_ref); |
227 | 227 | ||
228 | /* Check for SKBs from frontend and schedule backend processing */ | 228 | /* Check for SKBs from frontend and schedule backend processing */ |
229 | void xenvif_check_rx_xenvif(struct xenvif *vif); | 229 | void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); |
230 | 230 | ||
231 | /* Prevent the device from generating any further traffic. */ | 231 | /* Prevent the device from generating any further traffic. */ |
232 | void xenvif_carrier_off(struct xenvif *vif); | 232 | void xenvif_carrier_off(struct xenvif *vif); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ef05c5c49d41..20e9defa1060 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget) | |||
75 | work_done = xenvif_tx_action(vif, budget); | 75 | work_done = xenvif_tx_action(vif, budget); |
76 | 76 | ||
77 | if (work_done < budget) { | 77 | if (work_done < budget) { |
78 | int more_to_do = 0; | 78 | napi_complete(napi); |
79 | unsigned long flags; | 79 | xenvif_napi_schedule_or_enable_events(vif); |
80 | |||
81 | /* It is necessary to disable IRQ before calling | ||
82 | * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might | ||
83 | * lose event from the frontend. | ||
84 | * | ||
85 | * Consider: | ||
86 | * RING_HAS_UNCONSUMED_REQUESTS | ||
87 | * <frontend generates event to trigger napi_schedule> | ||
88 | * __napi_complete | ||
89 | * | ||
90 | * This handler is still in scheduled state so the | ||
91 | * event has no effect at all. After __napi_complete | ||
92 | * this handler is descheduled and cannot get | ||
93 | * scheduled again. We lose event in this case and the ring | ||
94 | * will be completely stalled. | ||
95 | */ | ||
96 | |||
97 | local_irq_save(flags); | ||
98 | |||
99 | RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); | ||
100 | if (!more_to_do) | ||
101 | __napi_complete(napi); | ||
102 | |||
103 | local_irq_restore(flags); | ||
104 | } | 80 | } |
105 | 81 | ||
106 | return work_done; | 82 | return work_done; |
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif) | |||
194 | enable_irq(vif->tx_irq); | 170 | enable_irq(vif->tx_irq); |
195 | if (vif->tx_irq != vif->rx_irq) | 171 | if (vif->tx_irq != vif->rx_irq) |
196 | enable_irq(vif->rx_irq); | 172 | enable_irq(vif->rx_irq); |
197 | xenvif_check_rx_xenvif(vif); | 173 | xenvif_napi_schedule_or_enable_events(vif); |
198 | } | 174 | } |
199 | 175 | ||
200 | static void xenvif_down(struct xenvif *vif) | 176 | static void xenvif_down(struct xenvif *vif) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 76665405c5aa..7367208ee8cd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif, | |||
104 | 104 | ||
105 | /* Find the containing VIF's structure from a pointer in pending_tx_info array | 105 | /* Find the containing VIF's structure from a pointer in pending_tx_info array |
106 | */ | 106 | */ |
107 | static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) | 107 | static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) |
108 | { | 108 | { |
109 | u16 pending_idx = ubuf->desc; | 109 | u16 pending_idx = ubuf->desc; |
110 | struct pending_tx_info *temp = | 110 | struct pending_tx_info *temp = |
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
323 | } | 323 | } |
324 | 324 | ||
325 | /* | 325 | /* |
326 | * Find the grant ref for a given frag in a chain of struct ubuf_info's | ||
327 | * skb: the skb itself | ||
328 | * i: the frag's number | ||
329 | * ubuf: a pointer to an element in the chain. It should not be NULL | ||
330 | * | ||
331 | * Returns a pointer to the element in the chain where the page were found. If | ||
332 | * not found, returns NULL. | ||
333 | * See the definition of callback_struct in common.h for more details about | ||
334 | * the chain. | ||
335 | */ | ||
336 | static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, | ||
337 | const int i, | ||
338 | const struct ubuf_info *ubuf) | ||
339 | { | ||
340 | struct xenvif *foreign_vif = ubuf_to_vif(ubuf); | ||
341 | |||
342 | do { | ||
343 | u16 pending_idx = ubuf->desc; | ||
344 | |||
345 | if (skb_shinfo(skb)->frags[i].page.p == | ||
346 | foreign_vif->mmap_pages[pending_idx]) | ||
347 | break; | ||
348 | ubuf = (struct ubuf_info *) ubuf->ctx; | ||
349 | } while (ubuf); | ||
350 | |||
351 | return ubuf; | ||
352 | } | ||
353 | |||
354 | /* | ||
326 | * Prepare an SKB to be transmitted to the frontend. | 355 | * Prepare an SKB to be transmitted to the frontend. |
327 | * | 356 | * |
328 | * This function is responsible for allocating grant operations, meta | 357 | * This function is responsible for allocating grant operations, meta |
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
346 | int head = 1; | 375 | int head = 1; |
347 | int old_meta_prod; | 376 | int old_meta_prod; |
348 | int gso_type; | 377 | int gso_type; |
349 | struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; | 378 | const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; |
350 | grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; | 379 | const struct ubuf_info *const head_ubuf = ubuf; |
351 | struct xenvif *foreign_vif = NULL; | ||
352 | 380 | ||
353 | old_meta_prod = npo->meta_prod; | 381 | old_meta_prod = npo->meta_prod; |
354 | 382 | ||
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
386 | npo->copy_off = 0; | 414 | npo->copy_off = 0; |
387 | npo->copy_gref = req->gref; | 415 | npo->copy_gref = req->gref; |
388 | 416 | ||
389 | if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && | ||
390 | (ubuf->callback == &xenvif_zerocopy_callback)) { | ||
391 | int i = 0; | ||
392 | foreign_vif = ubuf_to_vif(ubuf); | ||
393 | |||
394 | do { | ||
395 | u16 pending_idx = ubuf->desc; | ||
396 | foreign_grefs[i++] = | ||
397 | foreign_vif->pending_tx_info[pending_idx].req.gref; | ||
398 | ubuf = (struct ubuf_info *) ubuf->ctx; | ||
399 | } while (ubuf); | ||
400 | } | ||
401 | |||
402 | data = skb->data; | 417 | data = skb->data; |
403 | while (data < skb_tail_pointer(skb)) { | 418 | while (data < skb_tail_pointer(skb)) { |
404 | unsigned int offset = offset_in_page(data); | 419 | unsigned int offset = offset_in_page(data); |
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
415 | } | 430 | } |
416 | 431 | ||
417 | for (i = 0; i < nr_frags; i++) { | 432 | for (i = 0; i < nr_frags; i++) { |
433 | /* This variable also signals whether foreign_gref has a real | ||
434 | * value or not. | ||
435 | */ | ||
436 | struct xenvif *foreign_vif = NULL; | ||
437 | grant_ref_t foreign_gref; | ||
438 | |||
439 | if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && | ||
440 | (ubuf->callback == &xenvif_zerocopy_callback)) { | ||
441 | const struct ubuf_info *const startpoint = ubuf; | ||
442 | |||
443 | /* Ideally ubuf points to the chain element which | ||
444 | * belongs to this frag. Or if frags were removed from | ||
445 | * the beginning, then shortly before it. | ||
446 | */ | ||
447 | ubuf = xenvif_find_gref(skb, i, ubuf); | ||
448 | |||
449 | /* Try again from the beginning of the list, if we | ||
450 | * haven't tried from there. This only makes sense in | ||
451 | * the unlikely event of reordering the original frags. | ||
452 | * For injected local pages it's an unnecessary second | ||
453 | * run. | ||
454 | */ | ||
455 | if (unlikely(!ubuf) && startpoint != head_ubuf) | ||
456 | ubuf = xenvif_find_gref(skb, i, head_ubuf); | ||
457 | |||
458 | if (likely(ubuf)) { | ||
459 | u16 pending_idx = ubuf->desc; | ||
460 | |||
461 | foreign_vif = ubuf_to_vif(ubuf); | ||
462 | foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; | ||
463 | /* Just a safety measure. If this was the last | ||
464 | * element on the list, the for loop will | ||
465 | * iterate again if a local page were added to | ||
466 | * the end. Using head_ubuf here prevents the | ||
467 | * second search on the chain. Or the original | ||
468 | * frags changed order, but that's less likely. | ||
469 | * In any way, ubuf shouldn't be NULL. | ||
470 | */ | ||
471 | ubuf = ubuf->ctx ? | ||
472 | (struct ubuf_info *) ubuf->ctx : | ||
473 | head_ubuf; | ||
474 | } else | ||
475 | /* This frag was a local page, added to the | ||
476 | * array after the skb left netback. | ||
477 | */ | ||
478 | ubuf = head_ubuf; | ||
479 | } | ||
418 | xenvif_gop_frag_copy(vif, skb, npo, | 480 | xenvif_gop_frag_copy(vif, skb, npo, |
419 | skb_frag_page(&skb_shinfo(skb)->frags[i]), | 481 | skb_frag_page(&skb_shinfo(skb)->frags[i]), |
420 | skb_frag_size(&skb_shinfo(skb)->frags[i]), | 482 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
421 | skb_shinfo(skb)->frags[i].page_offset, | 483 | skb_shinfo(skb)->frags[i].page_offset, |
422 | &head, | 484 | &head, |
423 | foreign_vif, | 485 | foreign_vif, |
424 | foreign_grefs[i]); | 486 | foreign_vif ? foreign_gref : UINT_MAX); |
425 | } | 487 | } |
426 | 488 | ||
427 | return npo->meta_prod - old_meta_prod; | 489 | return npo->meta_prod - old_meta_prod; |
@@ -654,7 +716,7 @@ done: | |||
654 | notify_remote_via_irq(vif->rx_irq); | 716 | notify_remote_via_irq(vif->rx_irq); |
655 | } | 717 | } |
656 | 718 | ||
657 | void xenvif_check_rx_xenvif(struct xenvif *vif) | 719 | void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) |
658 | { | 720 | { |
659 | int more_to_do; | 721 | int more_to_do; |
660 | 722 | ||
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data) | |||
688 | { | 750 | { |
689 | struct xenvif *vif = (struct xenvif *)data; | 751 | struct xenvif *vif = (struct xenvif *)data; |
690 | tx_add_credit(vif); | 752 | tx_add_credit(vif); |
691 | xenvif_check_rx_xenvif(vif); | 753 | xenvif_napi_schedule_or_enable_events(vif); |
692 | } | 754 | } |
693 | 755 | ||
694 | static void xenvif_tx_err(struct xenvif *vif, | 756 | static void xenvif_tx_err(struct xenvif *vif, |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 6d4ee22708c9..32e969d95319 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop) | |||
1831 | if (!found) | 1831 | if (!found) |
1832 | return -ENODEV; | 1832 | return -ENODEV; |
1833 | 1833 | ||
1834 | /* At early boot, bail out and defer setup to of_init() */ | ||
1835 | if (!of_kset) | ||
1836 | return found ? 0 : -ENODEV; | ||
1837 | |||
1834 | /* Update the sysfs attribute */ | 1838 | /* Update the sysfs attribute */ |
1835 | sysfs_remove_bin_file(&np->kobj, &oldprop->attr); | 1839 | sysfs_remove_bin_file(&np->kobj, &oldprop->attr); |
1836 | __of_add_property_sysfs(np, newprop); | 1840 | __of_add_property_sysfs(np, newprop); |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index d3d1cfd51e09..e384e2534594 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, | |||
293 | return PCIBIOS_SUCCESSFUL; | 293 | return PCIBIOS_SUCCESSFUL; |
294 | } | 294 | } |
295 | 295 | ||
296 | /* | ||
297 | * Remove windows, starting from the largest ones to the smallest | ||
298 | * ones. | ||
299 | */ | ||
300 | static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, | ||
301 | phys_addr_t base, size_t size) | ||
302 | { | ||
303 | while (size) { | ||
304 | size_t sz = 1 << (fls(size) - 1); | ||
305 | |||
306 | mvebu_mbus_del_window(base, sz); | ||
307 | base += sz; | ||
308 | size -= sz; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * MBus windows can only have a power of two size, but PCI BARs do not | ||
314 | * have this constraint. Therefore, we have to split the PCI BAR into | ||
315 | * areas each having a power of two size. We start from the largest | ||
316 | * one (i.e highest order bit set in the size). | ||
317 | */ | ||
318 | static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, | ||
319 | unsigned int target, unsigned int attribute, | ||
320 | phys_addr_t base, size_t size, | ||
321 | phys_addr_t remap) | ||
322 | { | ||
323 | size_t size_mapped = 0; | ||
324 | |||
325 | while (size) { | ||
326 | size_t sz = 1 << (fls(size) - 1); | ||
327 | int ret; | ||
328 | |||
329 | ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, | ||
330 | sz, remap); | ||
331 | if (ret) { | ||
332 | dev_err(&port->pcie->pdev->dev, | ||
333 | "Could not create MBus window at 0x%x, size 0x%x: %d\n", | ||
334 | base, sz, ret); | ||
335 | mvebu_pcie_del_windows(port, base - size_mapped, | ||
336 | size_mapped); | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | size -= sz; | ||
341 | size_mapped += sz; | ||
342 | base += sz; | ||
343 | if (remap != MVEBU_MBUS_NO_REMAP) | ||
344 | remap += sz; | ||
345 | } | ||
346 | } | ||
347 | |||
296 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | 348 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) |
297 | { | 349 | { |
298 | phys_addr_t iobase; | 350 | phys_addr_t iobase; |
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
304 | 356 | ||
305 | /* If a window was configured, remove it */ | 357 | /* If a window was configured, remove it */ |
306 | if (port->iowin_base) { | 358 | if (port->iowin_base) { |
307 | mvebu_mbus_del_window(port->iowin_base, | 359 | mvebu_pcie_del_windows(port, port->iowin_base, |
308 | port->iowin_size); | 360 | port->iowin_size); |
309 | port->iowin_base = 0; | 361 | port->iowin_base = 0; |
310 | port->iowin_size = 0; | 362 | port->iowin_size = 0; |
311 | } | 363 | } |
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | |||
331 | port->iowin_base = port->pcie->io.start + iobase; | 383 | port->iowin_base = port->pcie->io.start + iobase; |
332 | port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | | 384 | port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | |
333 | (port->bridge.iolimitupper << 16)) - | 385 | (port->bridge.iolimitupper << 16)) - |
334 | iobase); | 386 | iobase) + 1; |
335 | 387 | ||
336 | mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, | 388 | mvebu_pcie_add_windows(port, port->io_target, port->io_attr, |
337 | port->iowin_base, port->iowin_size, | 389 | port->iowin_base, port->iowin_size, |
338 | iobase); | 390 | iobase); |
339 | } | 391 | } |
340 | 392 | ||
341 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) | 393 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) |
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) | |||
346 | 398 | ||
347 | /* If a window was configured, remove it */ | 399 | /* If a window was configured, remove it */ |
348 | if (port->memwin_base) { | 400 | if (port->memwin_base) { |
349 | mvebu_mbus_del_window(port->memwin_base, | 401 | mvebu_pcie_del_windows(port, port->memwin_base, |
350 | port->memwin_size); | 402 | port->memwin_size); |
351 | port->memwin_base = 0; | 403 | port->memwin_base = 0; |
352 | port->memwin_size = 0; | 404 | port->memwin_size = 0; |
353 | } | 405 | } |
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) | |||
364 | port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); | 416 | port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); |
365 | port->memwin_size = | 417 | port->memwin_size = |
366 | (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - | 418 | (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - |
367 | port->memwin_base; | 419 | port->memwin_base + 1; |
368 | 420 | ||
369 | mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, | 421 | mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, |
370 | port->memwin_base, port->memwin_size); | 422 | port->memwin_base, port->memwin_size, |
423 | MVEBU_MBUS_NO_REMAP); | ||
371 | } | 424 | } |
372 | 425 | ||
373 | /* | 426 | /* |
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | |||
743 | 796 | ||
744 | /* | 797 | /* |
745 | * On the PCI-to-PCI bridge side, the I/O windows must have at | 798 | * On the PCI-to-PCI bridge side, the I/O windows must have at |
746 | * least a 64 KB size and be aligned on their size, and the | 799 | * least a 64 KB size and the memory windows must have at |
747 | * memory windows must have at least a 1 MB size and be | 800 | * least a 1 MB size. Moreover, MBus windows need to have a |
748 | * aligned on their size | 801 | * base address aligned on their size, and their size must be |
802 | * a power of two. This means that if the BAR doesn't have a | ||
803 | * power of two size, several MBus windows will actually be | ||
804 | * created. We need to ensure that the biggest MBus window | ||
805 | * (which will be the first one) is aligned on its size, which | ||
806 | * explains the rounddown_pow_of_two() being done here. | ||
749 | */ | 807 | */ |
750 | if (res->flags & IORESOURCE_IO) | 808 | if (res->flags & IORESOURCE_IO) |
751 | return round_up(start, max_t(resource_size_t, SZ_64K, size)); | 809 | return round_up(start, max_t(resource_size_t, SZ_64K, |
810 | rounddown_pow_of_two(size))); | ||
752 | else if (res->flags & IORESOURCE_MEM) | 811 | else if (res->flags & IORESOURCE_MEM) |
753 | return round_up(start, max_t(resource_size_t, SZ_1M, size)); | 812 | return round_up(start, max_t(resource_size_t, SZ_1M, |
813 | rounddown_pow_of_two(size))); | ||
754 | else | 814 | else |
755 | return start; | 815 | return start; |
756 | } | 816 | } |
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index 58499277903a..6efc2ec5e4db 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot) | |||
282 | return WRONG_BUS_FREQUENCY; | 282 | return WRONG_BUS_FREQUENCY; |
283 | } | 283 | } |
284 | 284 | ||
285 | bsp = ctrl->pci_dev->bus->cur_bus_speed; | 285 | bsp = ctrl->pci_dev->subordinate->cur_bus_speed; |
286 | msp = ctrl->pci_dev->bus->max_bus_speed; | 286 | msp = ctrl->pci_dev->subordinate->max_bus_speed; |
287 | 287 | ||
288 | /* Check if there are other slots or devices on the same bus */ | 288 | /* Check if there are other slots or devices on the same bus */ |
289 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) | 289 | if (!list_empty(&ctrl->pci_dev->subordinate->devices)) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7325d43bf030..759475ef6ff3 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev) | |||
3067 | if (!pci_is_pcie(dev)) | 3067 | if (!pci_is_pcie(dev)) |
3068 | return 1; | 3068 | return 1; |
3069 | 3069 | ||
3070 | return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); | 3070 | return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, |
3071 | PCI_EXP_DEVSTA_TRPND); | ||
3071 | } | 3072 | } |
3072 | EXPORT_SYMBOL(pci_wait_for_pending_transaction); | 3073 | EXPORT_SYMBOL(pci_wait_for_pending_transaction); |
3073 | 3074 | ||
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe) | |||
3109 | return 0; | 3110 | return 0; |
3110 | 3111 | ||
3111 | /* Wait for Transaction Pending bit clean */ | 3112 | /* Wait for Transaction Pending bit clean */ |
3112 | if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) | 3113 | if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) |
3113 | goto clear; | 3114 | goto clear; |
3114 | 3115 | ||
3115 | dev_err(&dev->dev, "transaction is not cleared; " | 3116 | dev_err(&dev->dev, "transaction is not cleared; " |
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index 9802b67040cc..2c61281bebd7 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c | |||
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset) | |||
523 | return GPIOF_DIR_IN; | 523 | return GPIOF_DIR_IN; |
524 | } | 524 | } |
525 | 525 | ||
526 | static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
527 | { | ||
528 | return pinctrl_gpio_direction_input(chip->base + offset); | ||
529 | } | ||
530 | |||
531 | static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | ||
532 | int value) | ||
533 | { | ||
534 | return pinctrl_gpio_direction_output(chip->base + offset); | ||
535 | } | ||
536 | |||
537 | static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) | 526 | static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) |
538 | { | 527 | { |
539 | struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); | 528 | struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); |
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset, | |||
568 | wmt_clearbits(data, reg_data_out, BIT(bit)); | 557 | wmt_clearbits(data, reg_data_out, BIT(bit)); |
569 | } | 558 | } |
570 | 559 | ||
560 | static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
561 | { | ||
562 | return pinctrl_gpio_direction_input(chip->base + offset); | ||
563 | } | ||
564 | |||
565 | static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | ||
566 | int value) | ||
567 | { | ||
568 | wmt_gpio_set_value(chip, offset, value); | ||
569 | return pinctrl_gpio_direction_output(chip->base + offset); | ||
570 | } | ||
571 | |||
571 | static struct gpio_chip wmt_gpio_chip = { | 572 | static struct gpio_chip wmt_gpio_chip = { |
572 | .label = "gpio-wmt", | 573 | .label = "gpio-wmt", |
573 | .owner = THIS_MODULE, | 574 | .owner = THIS_MODULE, |
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c91f69b39db4..bbf78b2d6d93 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -570,6 +570,14 @@ static const struct dmi_system_id video_vendor_dmi_table[] = { | |||
570 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), | 570 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), |
571 | }, | 571 | }, |
572 | }, | 572 | }, |
573 | { | ||
574 | .callback = video_set_backlight_video_vendor, | ||
575 | .ident = "Acer Aspire 5741", | ||
576 | .matches = { | ||
577 | DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), | ||
578 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), | ||
579 | }, | ||
580 | }, | ||
573 | {} | 581 | {} |
574 | }; | 582 | }; |
575 | 583 | ||
@@ -2228,7 +2236,7 @@ static int __init acer_wmi_init(void) | |||
2228 | pr_info("Brightness must be controlled by acpi video driver\n"); | 2236 | pr_info("Brightness must be controlled by acpi video driver\n"); |
2229 | } else { | 2237 | } else { |
2230 | pr_info("Disabling ACPI video driver\n"); | 2238 | pr_info("Disabling ACPI video driver\n"); |
2231 | acpi_video_unregister(); | 2239 | acpi_video_unregister_backlight(); |
2232 | } | 2240 | } |
2233 | 2241 | ||
2234 | if (wmi_has_guid(WMID_GUID3)) { | 2242 | if (wmi_has_guid(WMID_GUID3)) { |
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index c31aa07b3ba5..b81448b2c75d 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c | |||
@@ -30,26 +30,6 @@ | |||
30 | 30 | ||
31 | static int num; | 31 | static int num; |
32 | 32 | ||
33 | /* We need only to blacklist devices that have already an acpi driver that | ||
34 | * can't use pnp layer. We don't need to blacklist device that are directly | ||
35 | * used by the kernel (PCI root, ...), as it is harmless and there were | ||
36 | * already present in pnpbios. But there is an exception for devices that | ||
37 | * have irqs (PIC, Timer) because we call acpi_register_gsi. | ||
38 | * Finally, only devices that have a CRS method need to be in this list. | ||
39 | */ | ||
40 | static struct acpi_device_id excluded_id_list[] __initdata = { | ||
41 | {"PNP0C09", 0}, /* EC */ | ||
42 | {"PNP0C0F", 0}, /* Link device */ | ||
43 | {"PNP0000", 0}, /* PIC */ | ||
44 | {"PNP0100", 0}, /* Timer */ | ||
45 | {"", 0}, | ||
46 | }; | ||
47 | |||
48 | static inline int __init is_exclusive_device(struct acpi_device *dev) | ||
49 | { | ||
50 | return (!acpi_match_device_ids(dev, excluded_id_list)); | ||
51 | } | ||
52 | |||
53 | /* | 33 | /* |
54 | * Compatible Device IDs | 34 | * Compatible Device IDs |
55 | */ | 35 | */ |
@@ -266,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device) | |||
266 | if (!pnpid) | 246 | if (!pnpid) |
267 | return 0; | 247 | return 0; |
268 | 248 | ||
269 | if (is_exclusive_device(device) || !device->status.present) | 249 | if (!device->status.present) |
270 | return 0; | 250 | return 0; |
271 | 251 | ||
272 | dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); | 252 | dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); |
@@ -326,10 +306,10 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, | |||
326 | { | 306 | { |
327 | struct acpi_device *device; | 307 | struct acpi_device *device; |
328 | 308 | ||
329 | if (!acpi_bus_get_device(handle, &device)) | 309 | if (acpi_bus_get_device(handle, &device)) |
330 | pnpacpi_add_device(device); | ||
331 | else | ||
332 | return AE_CTRL_DEPTH; | 310 | return AE_CTRL_DEPTH; |
311 | if (acpi_is_pnp_device(device)) | ||
312 | pnpacpi_add_device(device); | ||
333 | return AE_OK; | 313 | return AE_OK; |
334 | } | 314 | } |
335 | 315 | ||
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 26606641fe44..5a5a24e7d43c 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
@@ -537,7 +537,7 @@ static void psy_unregister_cooler(struct power_supply *psy) | |||
537 | } | 537 | } |
538 | #endif | 538 | #endif |
539 | 539 | ||
540 | int power_supply_register(struct device *parent, struct power_supply *psy) | 540 | int __power_supply_register(struct device *parent, struct power_supply *psy, bool ws) |
541 | { | 541 | { |
542 | struct device *dev; | 542 | struct device *dev; |
543 | int rc; | 543 | int rc; |
@@ -568,7 +568,7 @@ int power_supply_register(struct device *parent, struct power_supply *psy) | |||
568 | } | 568 | } |
569 | 569 | ||
570 | spin_lock_init(&psy->changed_lock); | 570 | spin_lock_init(&psy->changed_lock); |
571 | rc = device_init_wakeup(dev, true); | 571 | rc = device_init_wakeup(dev, ws); |
572 | if (rc) | 572 | if (rc) |
573 | goto wakeup_init_failed; | 573 | goto wakeup_init_failed; |
574 | 574 | ||
@@ -606,8 +606,19 @@ dev_set_name_failed: | |||
606 | success: | 606 | success: |
607 | return rc; | 607 | return rc; |
608 | } | 608 | } |
609 | |||
610 | int power_supply_register(struct device *parent, struct power_supply *psy) | ||
611 | { | ||
612 | return __power_supply_register(parent, psy, true); | ||
613 | } | ||
609 | EXPORT_SYMBOL_GPL(power_supply_register); | 614 | EXPORT_SYMBOL_GPL(power_supply_register); |
610 | 615 | ||
616 | int power_supply_register_no_ws(struct device *parent, struct power_supply *psy) | ||
617 | { | ||
618 | return __power_supply_register(parent, psy, false); | ||
619 | } | ||
620 | EXPORT_SYMBOL_GPL(power_supply_register_no_ws); | ||
621 | |||
611 | void power_supply_unregister(struct power_supply *psy) | 622 | void power_supply_unregister(struct power_supply *psy) |
612 | { | 623 | { |
613 | cancel_work_sync(&psy->changed_work); | 624 | cancel_work_sync(&psy->changed_work); |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index d9a0770b6c73..b1cda6ffdbcc 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
@@ -951,7 +951,9 @@ static const struct x86_cpu_id rapl_ids[] = { | |||
951 | { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */ | 951 | { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */ |
952 | { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */ | 952 | { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */ |
953 | { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ | 953 | { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ |
954 | { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */ | 954 | { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */ |
955 | { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */ | ||
956 | { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */ | ||
955 | /* TODO: Add more CPU IDs after testing */ | 957 | /* TODO: Add more CPU IDs after testing */ |
956 | {} | 958 | {} |
957 | }; | 959 | }; |
@@ -1124,8 +1126,7 @@ err_cleanup_package: | |||
1124 | static int rapl_check_domain(int cpu, int domain) | 1126 | static int rapl_check_domain(int cpu, int domain) |
1125 | { | 1127 | { |
1126 | unsigned msr; | 1128 | unsigned msr; |
1127 | u64 val1, val2 = 0; | 1129 | u64 val = 0; |
1128 | int retry = 0; | ||
1129 | 1130 | ||
1130 | switch (domain) { | 1131 | switch (domain) { |
1131 | case RAPL_DOMAIN_PACKAGE: | 1132 | case RAPL_DOMAIN_PACKAGE: |
@@ -1144,26 +1145,13 @@ static int rapl_check_domain(int cpu, int domain) | |||
1144 | pr_err("invalid domain id %d\n", domain); | 1145 | pr_err("invalid domain id %d\n", domain); |
1145 | return -EINVAL; | 1146 | return -EINVAL; |
1146 | } | 1147 | } |
1147 | if (rdmsrl_safe_on_cpu(cpu, msr, &val1)) | 1148 | /* make sure domain counters are available and contains non-zero |
1148 | return -ENODEV; | 1149 | * values, otherwise skip it. |
1149 | |||
1150 | /* PP1/uncore/graphics domain may not be active at the time of | ||
1151 | * driver loading. So skip further checks. | ||
1152 | */ | 1150 | */ |
1153 | if (domain == RAPL_DOMAIN_PP1) | 1151 | if (rdmsrl_safe_on_cpu(cpu, msr, &val) || !val) |
1154 | return 0; | 1152 | return -ENODEV; |
1155 | /* energy counters roll slowly on some domains */ | ||
1156 | while (++retry < 10) { | ||
1157 | usleep_range(10000, 15000); | ||
1158 | rdmsrl_safe_on_cpu(cpu, msr, &val2); | ||
1159 | if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK)) | ||
1160 | return 0; | ||
1161 | } | ||
1162 | /* if energy counter does not change, report as bad domain */ | ||
1163 | pr_info("domain %s energy ctr %llu:%llu not working, skip\n", | ||
1164 | rapl_domain_names[domain], val1, val2); | ||
1165 | 1153 | ||
1166 | return -ENODEV; | 1154 | return 0; |
1167 | } | 1155 | } |
1168 | 1156 | ||
1169 | /* Detect active and valid domains for the given CPU, caller must | 1157 | /* Detect active and valid domains for the given CPU, caller must |
@@ -1180,6 +1168,9 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) | |||
1180 | /* use physical package id to read counters */ | 1168 | /* use physical package id to read counters */ |
1181 | if (!rapl_check_domain(cpu, i)) | 1169 | if (!rapl_check_domain(cpu, i)) |
1182 | rp->domain_map |= 1 << i; | 1170 | rp->domain_map |= 1 << i; |
1171 | else | ||
1172 | pr_warn("RAPL domain %s detection failed\n", | ||
1173 | rapl_domain_names[i]); | ||
1183 | } | 1174 | } |
1184 | rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); | 1175 | rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); |
1185 | if (!rp->nr_domains) { | 1176 | if (!rp->nr_domains) { |
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 6963bdf54175..6aea373547f6 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig | |||
@@ -6,6 +6,7 @@ menu "PTP clock support" | |||
6 | 6 | ||
7 | config PTP_1588_CLOCK | 7 | config PTP_1588_CLOCK |
8 | tristate "PTP clock support" | 8 | tristate "PTP clock support" |
9 | depends on NET | ||
9 | select PPS | 10 | select PPS |
10 | select NET_PTP_CLASSIFY | 11 | select NET_PTP_CLASSIFY |
11 | help | 12 | help |
@@ -74,7 +75,7 @@ config DP83640_PHY | |||
74 | config PTP_1588_CLOCK_PCH | 75 | config PTP_1588_CLOCK_PCH |
75 | tristate "Intel PCH EG20T as PTP clock" | 76 | tristate "Intel PCH EG20T as PTP clock" |
76 | depends on X86 || COMPILE_TEST | 77 | depends on X86 || COMPILE_TEST |
77 | depends on HAS_IOMEM | 78 | depends on HAS_IOMEM && NET |
78 | select PTP_1588_CLOCK | 79 | select PTP_1588_CLOCK |
79 | help | 80 | help |
80 | This driver adds support for using the PCH EG20T as a PTP | 81 | This driver adds support for using the PCH EG20T as a PTP |
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c index bd628a6f981d..e5f13c4310fe 100644 --- a/drivers/rtc/rtc-hym8563.c +++ b/drivers/rtc/rtc-hym8563.c | |||
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client, | |||
569 | if (IS_ERR(hym8563->rtc)) | 569 | if (IS_ERR(hym8563->rtc)) |
570 | return PTR_ERR(hym8563->rtc); | 570 | return PTR_ERR(hym8563->rtc); |
571 | 571 | ||
572 | /* the hym8563 alarm only supports a minute accuracy */ | ||
573 | hym8563->rtc->uie_unsupported = 1; | ||
574 | |||
572 | #ifdef CONFIG_COMMON_CLK | 575 | #ifdef CONFIG_COMMON_CLK |
573 | hym8563_clkout_register_clk(hym8563); | 576 | hym8563_clkout_register_clk(hym8563); |
574 | #endif | 577 | #endif |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde0..c341f855fadc 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy) | |||
1621 | list_del(&rphy->list); | 1621 | list_del(&rphy->list); |
1622 | mutex_unlock(&sas_host->lock); | 1622 | mutex_unlock(&sas_host->lock); |
1623 | 1623 | ||
1624 | sas_bsg_remove(shost, rphy); | ||
1625 | |||
1626 | transport_destroy_device(dev); | 1624 | transport_destroy_device(dev); |
1627 | 1625 | ||
1628 | put_device(dev); | 1626 | put_device(dev); |
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy) | |||
1681 | } | 1679 | } |
1682 | 1680 | ||
1683 | sas_rphy_unlink(rphy); | 1681 | sas_rphy_unlink(rphy); |
1682 | sas_bsg_remove(NULL, rphy); | ||
1684 | transport_remove_device(dev); | 1683 | transport_remove_device(dev); |
1685 | device_del(dev); | 1684 | device_del(dev); |
1686 | } | 1685 | } |
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index fc67f564f02c..788ed9b59b4e 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile | |||
@@ -1,10 +1,12 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the SuperH specific drivers. | 2 | # Makefile for the SuperH specific drivers. |
3 | # | 3 | # |
4 | obj-y := intc/ | 4 | obj-$(CONFIG_SUPERH) += intc/ |
5 | obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/ | ||
6 | ifneq ($(CONFIG_COMMON_CLK),y) | ||
7 | obj-$(CONFIG_HAVE_CLK) += clk/ | ||
8 | endif | ||
9 | obj-$(CONFIG_MAPLE) += maple/ | ||
10 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ | ||
5 | 11 | ||
6 | obj-$(CONFIG_HAVE_CLK) += clk/ | 12 | obj-y += pm_runtime.o |
7 | obj-$(CONFIG_MAPLE) += maple/ | ||
8 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ | ||
9 | |||
10 | obj-y += pm_runtime.o | ||
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 8afa5a4589f2..10c65eb51f85 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = { | |||
50 | .con_ids = { NULL, }, | 50 | .con_ids = { NULL, }, |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static bool default_pm_on; | ||
54 | |||
53 | static int __init sh_pm_runtime_init(void) | 55 | static int __init sh_pm_runtime_init(void) |
54 | { | 56 | { |
57 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { | ||
58 | if (!of_machine_is_compatible("renesas,emev2") && | ||
59 | !of_machine_is_compatible("renesas,r7s72100") && | ||
60 | !of_machine_is_compatible("renesas,r8a73a4") && | ||
61 | !of_machine_is_compatible("renesas,r8a7740") && | ||
62 | !of_machine_is_compatible("renesas,r8a7778") && | ||
63 | !of_machine_is_compatible("renesas,r8a7779") && | ||
64 | !of_machine_is_compatible("renesas,r8a7790") && | ||
65 | !of_machine_is_compatible("renesas,r8a7791") && | ||
66 | !of_machine_is_compatible("renesas,sh7372") && | ||
67 | !of_machine_is_compatible("renesas,sh73a0")) | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | default_pm_on = true; | ||
55 | pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); | 72 | pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); |
56 | return 0; | 73 | return 0; |
57 | } | 74 | } |
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init); | |||
59 | 76 | ||
60 | static int __init sh_pm_runtime_late_init(void) | 77 | static int __init sh_pm_runtime_late_init(void) |
61 | { | 78 | { |
62 | pm_genpd_poweroff_unused(); | 79 | if (default_pm_on) |
80 | pm_genpd_poweroff_unused(); | ||
63 | return 0; | 81 | return 0; |
64 | } | 82 | } |
65 | late_initcall(sh_pm_runtime_late_init); | 83 | late_initcall(sh_pm_runtime_late_init); |
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 713af4806f26..f6759dc0153b 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c | |||
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, | |||
29 | struct sg_table *sgt; | 29 | struct sg_table *sgt; |
30 | void *buf, *pbuf; | 30 | void *buf, *pbuf; |
31 | 31 | ||
32 | /* | ||
33 | * Some DMA controllers have problems transferring buffers that are | ||
34 | * not multiple of 4 bytes. So we truncate the transfer so that it | ||
35 | * is suitable for such controllers, and handle the trailing bytes | ||
36 | * manually after the DMA completes. | ||
37 | * | ||
38 | * REVISIT: It would be better if this information could be | ||
39 | * retrieved directly from the DMA device in a similar way than | ||
40 | * ->copy_align etc. is done. | ||
41 | */ | ||
42 | len = ALIGN(drv_data->len, 4); | ||
43 | |||
44 | if (dir == DMA_TO_DEVICE) { | 32 | if (dir == DMA_TO_DEVICE) { |
45 | dmadev = drv_data->tx_chan->device->dev; | 33 | dmadev = drv_data->tx_chan->device->dev; |
46 | sgt = &drv_data->tx_sgt; | 34 | sgt = &drv_data->tx_sgt; |
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, | |||
144 | if (!error) { | 132 | if (!error) { |
145 | pxa2xx_spi_unmap_dma_buffers(drv_data); | 133 | pxa2xx_spi_unmap_dma_buffers(drv_data); |
146 | 134 | ||
147 | /* Handle the last bytes of unaligned transfer */ | ||
148 | drv_data->tx += drv_data->tx_map_len; | 135 | drv_data->tx += drv_data->tx_map_len; |
149 | drv_data->write(drv_data); | ||
150 | |||
151 | drv_data->rx += drv_data->rx_map_len; | 136 | drv_data->rx += drv_data->rx_map_len; |
152 | drv_data->read(drv_data); | ||
153 | 137 | ||
154 | msg->actual_length += drv_data->len; | 138 | msg->actual_length += drv_data->len; |
155 | msg->state = pxa2xx_spi_next_transfer(drv_data); | 139 | msg->state = pxa2xx_spi_next_transfer(drv_data); |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index b032e8885e24..78c66e3c53ed 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev) | |||
734 | int ret; | 734 | int ret; |
735 | 735 | ||
736 | ret = pm_runtime_get_sync(&pdev->dev); | 736 | ret = pm_runtime_get_sync(&pdev->dev); |
737 | if (ret) | 737 | if (ret < 0) |
738 | return ret; | 738 | return ret; |
739 | 739 | ||
740 | ret = spi_qup_set_state(controller, QUP_STATE_RESET); | 740 | ret = spi_qup_set_state(controller, QUP_STATE_RESET); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 4eb9bf02996c..939edf473235 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable) | |||
580 | spi->master->set_cs(spi, !enable); | 580 | spi->master->set_cs(spi, !enable); |
581 | } | 581 | } |
582 | 582 | ||
583 | #ifdef CONFIG_HAS_DMA | ||
583 | static int spi_map_buf(struct spi_master *master, struct device *dev, | 584 | static int spi_map_buf(struct spi_master *master, struct device *dev, |
584 | struct sg_table *sgt, void *buf, size_t len, | 585 | struct sg_table *sgt, void *buf, size_t len, |
585 | enum dma_data_direction dir) | 586 | enum dma_data_direction dir) |
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev, | |||
637 | } | 638 | } |
638 | } | 639 | } |
639 | 640 | ||
640 | static int spi_map_msg(struct spi_master *master, struct spi_message *msg) | 641 | static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) |
641 | { | 642 | { |
642 | struct device *tx_dev, *rx_dev; | 643 | struct device *tx_dev, *rx_dev; |
643 | struct spi_transfer *xfer; | 644 | struct spi_transfer *xfer; |
644 | void *tmp; | ||
645 | unsigned int max_tx, max_rx; | ||
646 | int ret; | 645 | int ret; |
647 | 646 | ||
648 | if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { | ||
649 | max_tx = 0; | ||
650 | max_rx = 0; | ||
651 | |||
652 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
653 | if ((master->flags & SPI_MASTER_MUST_TX) && | ||
654 | !xfer->tx_buf) | ||
655 | max_tx = max(xfer->len, max_tx); | ||
656 | if ((master->flags & SPI_MASTER_MUST_RX) && | ||
657 | !xfer->rx_buf) | ||
658 | max_rx = max(xfer->len, max_rx); | ||
659 | } | ||
660 | |||
661 | if (max_tx) { | ||
662 | tmp = krealloc(master->dummy_tx, max_tx, | ||
663 | GFP_KERNEL | GFP_DMA); | ||
664 | if (!tmp) | ||
665 | return -ENOMEM; | ||
666 | master->dummy_tx = tmp; | ||
667 | memset(tmp, 0, max_tx); | ||
668 | } | ||
669 | |||
670 | if (max_rx) { | ||
671 | tmp = krealloc(master->dummy_rx, max_rx, | ||
672 | GFP_KERNEL | GFP_DMA); | ||
673 | if (!tmp) | ||
674 | return -ENOMEM; | ||
675 | master->dummy_rx = tmp; | ||
676 | } | ||
677 | |||
678 | if (max_tx || max_rx) { | ||
679 | list_for_each_entry(xfer, &msg->transfers, | ||
680 | transfer_list) { | ||
681 | if (!xfer->tx_buf) | ||
682 | xfer->tx_buf = master->dummy_tx; | ||
683 | if (!xfer->rx_buf) | ||
684 | xfer->rx_buf = master->dummy_rx; | ||
685 | } | ||
686 | } | ||
687 | } | ||
688 | |||
689 | if (!master->can_dma) | 647 | if (!master->can_dma) |
690 | return 0; | 648 | return 0; |
691 | 649 | ||
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) | |||
742 | 700 | ||
743 | return 0; | 701 | return 0; |
744 | } | 702 | } |
703 | #else /* !CONFIG_HAS_DMA */ | ||
704 | static inline int __spi_map_msg(struct spi_master *master, | ||
705 | struct spi_message *msg) | ||
706 | { | ||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | static inline int spi_unmap_msg(struct spi_master *master, | ||
711 | struct spi_message *msg) | ||
712 | { | ||
713 | return 0; | ||
714 | } | ||
715 | #endif /* !CONFIG_HAS_DMA */ | ||
716 | |||
717 | static int spi_map_msg(struct spi_master *master, struct spi_message *msg) | ||
718 | { | ||
719 | struct spi_transfer *xfer; | ||
720 | void *tmp; | ||
721 | unsigned int max_tx, max_rx; | ||
722 | |||
723 | if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { | ||
724 | max_tx = 0; | ||
725 | max_rx = 0; | ||
726 | |||
727 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
728 | if ((master->flags & SPI_MASTER_MUST_TX) && | ||
729 | !xfer->tx_buf) | ||
730 | max_tx = max(xfer->len, max_tx); | ||
731 | if ((master->flags & SPI_MASTER_MUST_RX) && | ||
732 | !xfer->rx_buf) | ||
733 | max_rx = max(xfer->len, max_rx); | ||
734 | } | ||
735 | |||
736 | if (max_tx) { | ||
737 | tmp = krealloc(master->dummy_tx, max_tx, | ||
738 | GFP_KERNEL | GFP_DMA); | ||
739 | if (!tmp) | ||
740 | return -ENOMEM; | ||
741 | master->dummy_tx = tmp; | ||
742 | memset(tmp, 0, max_tx); | ||
743 | } | ||
744 | |||
745 | if (max_rx) { | ||
746 | tmp = krealloc(master->dummy_rx, max_rx, | ||
747 | GFP_KERNEL | GFP_DMA); | ||
748 | if (!tmp) | ||
749 | return -ENOMEM; | ||
750 | master->dummy_rx = tmp; | ||
751 | } | ||
752 | |||
753 | if (max_tx || max_rx) { | ||
754 | list_for_each_entry(xfer, &msg->transfers, | ||
755 | transfer_list) { | ||
756 | if (!xfer->tx_buf) | ||
757 | xfer->tx_buf = master->dummy_tx; | ||
758 | if (!xfer->rx_buf) | ||
759 | xfer->rx_buf = master->dummy_rx; | ||
760 | } | ||
761 | } | ||
762 | } | ||
763 | |||
764 | return __spi_map_msg(master, msg); | ||
765 | } | ||
745 | 766 | ||
746 | /* | 767 | /* |
747 | * spi_transfer_one_message - Default implementation of transfer_one_message() | 768 | * spi_transfer_one_message - Default implementation of transfer_one_message() |
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master) | |||
1151 | { | 1172 | { |
1152 | int ret; | 1173 | int ret; |
1153 | 1174 | ||
1154 | master->queued = true; | ||
1155 | master->transfer = spi_queued_transfer; | 1175 | master->transfer = spi_queued_transfer; |
1156 | if (!master->transfer_one_message) | 1176 | if (!master->transfer_one_message) |
1157 | master->transfer_one_message = spi_transfer_one_message; | 1177 | master->transfer_one_message = spi_transfer_one_message; |
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master) | |||
1162 | dev_err(&master->dev, "problem initializing queue\n"); | 1182 | dev_err(&master->dev, "problem initializing queue\n"); |
1163 | goto err_init_queue; | 1183 | goto err_init_queue; |
1164 | } | 1184 | } |
1185 | master->queued = true; | ||
1165 | ret = spi_start_queue(master); | 1186 | ret = spi_start_queue(master); |
1166 | if (ret) { | 1187 | if (ret) { |
1167 | dev_err(&master->dev, "problem starting queue\n"); | 1188 | dev_err(&master->dev, "problem starting queue\n"); |
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master) | |||
1171 | return 0; | 1192 | return 0; |
1172 | 1193 | ||
1173 | err_start_queue: | 1194 | err_start_queue: |
1174 | err_init_queue: | ||
1175 | spi_destroy_queue(master); | 1195 | spi_destroy_queue(master); |
1196 | err_init_queue: | ||
1176 | return ret; | 1197 | return ret; |
1177 | } | 1198 | } |
1178 | 1199 | ||
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); | |||
1756 | */ | 1777 | */ |
1757 | int spi_setup(struct spi_device *spi) | 1778 | int spi_setup(struct spi_device *spi) |
1758 | { | 1779 | { |
1759 | unsigned bad_bits; | 1780 | unsigned bad_bits, ugly_bits; |
1760 | int status = 0; | 1781 | int status = 0; |
1761 | 1782 | ||
1762 | /* check mode to prevent that DUAL and QUAD set at the same time | 1783 | /* check mode to prevent that DUAL and QUAD set at the same time |
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi) | |||
1776 | * that aren't supported with their current master | 1797 | * that aren't supported with their current master |
1777 | */ | 1798 | */ |
1778 | bad_bits = spi->mode & ~spi->master->mode_bits; | 1799 | bad_bits = spi->mode & ~spi->master->mode_bits; |
1800 | ugly_bits = bad_bits & | ||
1801 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); | ||
1802 | if (ugly_bits) { | ||
1803 | dev_warn(&spi->dev, | ||
1804 | "setup: ignoring unsupported mode bits %x\n", | ||
1805 | ugly_bits); | ||
1806 | spi->mode &= ~ugly_bits; | ||
1807 | bad_bits &= ~ugly_bits; | ||
1808 | } | ||
1779 | if (bad_bits) { | 1809 | if (bad_bits) { |
1780 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n", | 1810 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n", |
1781 | bad_bits); | 1811 | bad_bits); |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 4144a75e5f71..c270c9ae6d27 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node, | |||
517 | of_node_put(port); | 517 | of_node_put(port); |
518 | if (port == imx_crtc->port) { | 518 | if (port == imx_crtc->port) { |
519 | ret = of_graph_parse_endpoint(ep, &endpoint); | 519 | ret = of_graph_parse_endpoint(ep, &endpoint); |
520 | return ret ? ret : endpoint.id; | 520 | return ret ? ret : endpoint.port; |
521 | } | 521 | } |
522 | } while (ep); | 522 | } while (ep); |
523 | 523 | ||
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev) | |||
675 | if (!remote || !of_device_is_available(remote)) { | 675 | if (!remote || !of_device_is_available(remote)) { |
676 | of_node_put(remote); | 676 | of_node_put(remote); |
677 | continue; | 677 | continue; |
678 | } else if (!of_device_is_available(remote->parent)) { | ||
679 | dev_warn(&pdev->dev, "parent device of %s is not available\n", | ||
680 | remote->full_name); | ||
681 | of_node_put(remote); | ||
682 | continue; | ||
678 | } | 683 | } |
679 | 684 | ||
680 | ret = imx_drm_add_component(&pdev->dev, remote); | 685 | ret = imx_drm_add_component(&pdev->dev, remote); |
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 575533f4fd64..a23f4f773146 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c | |||
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) | |||
582 | tve->dev = dev; | 582 | tve->dev = dev; |
583 | spin_lock_init(&tve->lock); | 583 | spin_lock_init(&tve->lock); |
584 | 584 | ||
585 | ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); | 585 | ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); |
586 | if (ddc_node) { | 586 | if (ddc_node) { |
587 | tve->ddc = of_find_i2c_adapter_by_node(ddc_node); | 587 | tve->ddc = of_find_i2c_adapter_by_node(ddc_node); |
588 | of_node_put(ddc_node); | 588 | of_node_put(ddc_node); |
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 8c101cbbee97..acc8184c46cd 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c | |||
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq) | |||
1247 | struct vpfe_fh *fh = vb2_get_drv_priv(vq); | 1247 | struct vpfe_fh *fh = vb2_get_drv_priv(vq); |
1248 | struct vpfe_video_device *video = fh->video; | 1248 | struct vpfe_video_device *video = fh->video; |
1249 | 1249 | ||
1250 | if (!vb2_is_streaming(vq)) | ||
1251 | return 0; | ||
1252 | /* release all active buffers */ | 1250 | /* release all active buffers */ |
1251 | if (video->cur_frm == video->next_frm) { | ||
1252 | vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR); | ||
1253 | } else { | ||
1254 | if (video->cur_frm != NULL) | ||
1255 | vb2_buffer_done(&video->cur_frm->vb, | ||
1256 | VB2_BUF_STATE_ERROR); | ||
1257 | if (video->next_frm != NULL) | ||
1258 | vb2_buffer_done(&video->next_frm->vb, | ||
1259 | VB2_BUF_STATE_ERROR); | ||
1260 | } | ||
1261 | |||
1253 | while (!list_empty(&video->dma_queue)) { | 1262 | while (!list_empty(&video->dma_queue)) { |
1254 | video->next_frm = list_entry(video->dma_queue.next, | 1263 | video->next_frm = list_entry(video->dma_queue.next, |
1255 | struct vpfe_cap_buffer, list); | 1264 | struct vpfe_cap_buffer, list); |
diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h index b3d2cc729657..4ba569258498 100644 --- a/drivers/staging/media/sn9c102/sn9c102_devtable.h +++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h | |||
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = { | |||
48 | { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, | 48 | { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, |
49 | /* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ | 49 | /* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ |
50 | { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, | 50 | { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, |
51 | #endif | ||
52 | { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, | 51 | { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, |
53 | { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, | 52 | { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, |
54 | #if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE | ||
55 | { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, | 53 | { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, |
56 | { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, | 54 | { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, |
57 | { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, | 55 | { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, |
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c index 57eca7a45672..4fe751f7c2bf 100644 --- a/drivers/staging/rtl8723au/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c | |||
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev) | |||
953 | #endif /* CONFIG_8723AU_P2P */ | 953 | #endif /* CONFIG_8723AU_P2P */ |
954 | 954 | ||
955 | rtw_scan_abort23a(padapter); | 955 | rtw_scan_abort23a(padapter); |
956 | /* set this at the end */ | ||
957 | padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR; | ||
958 | 956 | ||
959 | RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); | 957 | RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); |
960 | DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); | 958 | DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); |
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c index c49160e477d8..07e542e5d156 100644 --- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c +++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c | |||
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr) | |||
26 | if (addr == RECV_BULK_IN_ADDR) { | 26 | if (addr == RECV_BULK_IN_ADDR) { |
27 | pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); | 27 | pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); |
28 | } else if (addr == RECV_INT_IN_ADDR) { | 28 | } else if (addr == RECV_INT_IN_ADDR) { |
29 | pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); | 29 | pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]); |
30 | } else if (addr < HW_QUEUE_ENTRY) { | 30 | } else if (addr < HW_QUEUE_ENTRY) { |
31 | ep_num = pdvobj->Queue2Pipe[addr]; | 31 | ep_num = pdvobj->Queue2Pipe[addr]; |
32 | pipe = usb_sndbulkpipe(pusbd, ep_num); | 32 | pipe = usb_sndbulkpipe(pusbd, ep_num); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 78cab13bbb1b..46588c85d39b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1593 | * Initiator is expecting a NopIN ping reply.. | 1593 | * Initiator is expecting a NopIN ping reply.. |
1594 | */ | 1594 | */ |
1595 | if (hdr->itt != RESERVED_ITT) { | 1595 | if (hdr->itt != RESERVED_ITT) { |
1596 | BUG_ON(!cmd); | 1596 | if (!cmd) |
1597 | return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, | ||
1598 | (unsigned char *)hdr); | ||
1597 | 1599 | ||
1598 | spin_lock_bh(&conn->cmd_lock); | 1600 | spin_lock_bh(&conn->cmd_lock); |
1599 | list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); | 1601 | list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); |
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 6960f22909ae..302eb3b78715 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h | |||
@@ -775,6 +775,7 @@ struct iscsi_np { | |||
775 | int np_ip_proto; | 775 | int np_ip_proto; |
776 | int np_sock_type; | 776 | int np_sock_type; |
777 | enum np_thread_state_table np_thread_state; | 777 | enum np_thread_state_table np_thread_state; |
778 | bool enabled; | ||
778 | enum iscsi_timer_flags_table np_login_timer_flags; | 779 | enum iscsi_timer_flags_table np_login_timer_flags; |
779 | u32 np_exports; | 780 | u32 np_exports; |
780 | enum np_flags_table np_flags; | 781 | enum np_flags_table np_flags; |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8739b98f6f93..ca31fa1b8a4b 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2( | |||
436 | } | 436 | } |
437 | off = mrdsl % PAGE_SIZE; | 437 | off = mrdsl % PAGE_SIZE; |
438 | if (!off) | 438 | if (!off) |
439 | return 0; | 439 | goto check_prot; |
440 | 440 | ||
441 | if (mrdsl < PAGE_SIZE) | 441 | if (mrdsl < PAGE_SIZE) |
442 | mrdsl = PAGE_SIZE; | 442 | mrdsl = PAGE_SIZE; |
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2( | |||
452 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | 452 | ISCSI_LOGIN_STATUS_NO_RESOURCES); |
453 | return -1; | 453 | return -1; |
454 | } | 454 | } |
455 | /* | ||
456 | * ISER currently requires that ImmediateData + Unsolicited | ||
457 | * Data be disabled when protection / signature MRs are enabled. | ||
458 | */ | ||
459 | check_prot: | ||
460 | if (sess->se_sess->sup_prot_ops & | ||
461 | (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | | ||
462 | TARGET_PROT_DOUT_INSERT)) { | ||
463 | |||
464 | sprintf(buf, "ImmediateData=No"); | ||
465 | if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { | ||
466 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
467 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | ||
468 | return -1; | ||
469 | } | ||
470 | |||
471 | sprintf(buf, "InitialR2T=Yes"); | ||
472 | if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { | ||
473 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | ||
474 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | ||
475 | return -1; | ||
476 | } | ||
477 | pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" | ||
478 | " T10-PI enabled ISER session\n"); | ||
479 | } | ||
455 | } | 480 | } |
456 | 481 | ||
457 | return 0; | 482 | return 0; |
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket( | |||
984 | } | 1009 | } |
985 | 1010 | ||
986 | np->np_transport = t; | 1011 | np->np_transport = t; |
1012 | np->enabled = true; | ||
987 | return 0; | 1013 | return 0; |
988 | } | 1014 | } |
989 | 1015 | ||
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index eb96b20dc09e..ca1811858afd 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread( | |||
184 | return; | 184 | return; |
185 | } | 185 | } |
186 | 186 | ||
187 | tpg_np->tpg_np->enabled = false; | ||
187 | iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); | 188 | iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); |
188 | } | 189 | } |
189 | 190 | ||
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 65001e133670..26416c15d65c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
798 | pr_err("emulate_write_cache not supported for pSCSI\n"); | 798 | pr_err("emulate_write_cache not supported for pSCSI\n"); |
799 | return -EINVAL; | 799 | return -EINVAL; |
800 | } | 800 | } |
801 | if (dev->transport->get_write_cache) { | 801 | if (flag && |
802 | pr_warn("emulate_write_cache cannot be changed when underlying" | 802 | dev->transport->get_write_cache) { |
803 | " HW reports WriteCacheEnabled, ignoring request\n"); | 803 | pr_err("emulate_write_cache not supported for this device\n"); |
804 | return 0; | 804 | return -EINVAL; |
805 | } | 805 | } |
806 | 806 | ||
807 | dev->dev_attrib.emulate_write_cache = flag; | 807 | dev->dev_attrib.emulate_write_cache = flag; |
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag) | |||
936 | return 0; | 936 | return 0; |
937 | } | 937 | } |
938 | if (!dev->transport->init_prot || !dev->transport->free_prot) { | 938 | if (!dev->transport->init_prot || !dev->transport->free_prot) { |
939 | /* 0 is only allowed value for non-supporting backends */ | ||
940 | if (flag == 0) | ||
941 | return 0; | ||
942 | |||
939 | pr_err("DIF protection not supported by backend: %s\n", | 943 | pr_err("DIF protection not supported by backend: %s\n", |
940 | dev->transport->name); | 944 | dev->transport->name); |
941 | return -ENOSYS; | 945 | return -ENOSYS; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d4b98690a736..789aa9eb0a1e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd( | |||
1113 | init_completion(&cmd->cmd_wait_comp); | 1113 | init_completion(&cmd->cmd_wait_comp); |
1114 | init_completion(&cmd->task_stop_comp); | 1114 | init_completion(&cmd->task_stop_comp); |
1115 | spin_lock_init(&cmd->t_state_lock); | 1115 | spin_lock_init(&cmd->t_state_lock); |
1116 | kref_init(&cmd->cmd_kref); | ||
1116 | cmd->transport_state = CMD_T_DEV_ACTIVE; | 1117 | cmd->transport_state = CMD_T_DEV_ACTIVE; |
1117 | 1118 | ||
1118 | cmd->se_tfo = tfo; | 1119 | cmd->se_tfo = tfo; |
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
2357 | unsigned long flags; | 2358 | unsigned long flags; |
2358 | int ret = 0; | 2359 | int ret = 0; |
2359 | 2360 | ||
2360 | kref_init(&se_cmd->cmd_kref); | ||
2361 | /* | 2361 | /* |
2362 | * Add a second kref if the fabric caller is expecting to handle | 2362 | * Add a second kref if the fabric caller is expecting to handle |
2363 | * fabric acknowledgement that requires two target_put_sess_cmd() | 2363 | * fabric acknowledgement that requires two target_put_sess_cmd() |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 01cf37f212c3..f5fd515b2bee 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) | |||
90 | { | 90 | { |
91 | struct fc_frame *fp; | 91 | struct fc_frame *fp; |
92 | struct fc_lport *lport; | 92 | struct fc_lport *lport; |
93 | struct se_session *se_sess; | 93 | struct ft_sess *sess; |
94 | 94 | ||
95 | if (!cmd) | 95 | if (!cmd) |
96 | return; | 96 | return; |
97 | se_sess = cmd->sess->se_sess; | 97 | sess = cmd->sess; |
98 | fp = cmd->req_frame; | 98 | fp = cmd->req_frame; |
99 | lport = fr_dev(fp); | 99 | lport = fr_dev(fp); |
100 | if (fr_seq(fp)) | 100 | if (fr_seq(fp)) |
101 | lport->tt.seq_release(fr_seq(fp)); | 101 | lport->tt.seq_release(fr_seq(fp)); |
102 | fc_frame_free(fp); | 102 | fc_frame_free(fp); |
103 | percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); | 103 | percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); |
104 | ft_sess_put(cmd->sess); /* undo get from lookup at recv */ | 104 | ft_sess_put(sess); /* undo get from lookup at recv */ |
105 | } | 105 | } |
106 | 106 | ||
107 | void ft_release_cmd(struct se_cmd *se_cmd) | 107 | void ft_release_cmd(struct se_cmd *se_cmd) |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index bd2172c2d650..428089009cd5 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | static struct list_head backlight_dev_list; | 24 | static struct list_head backlight_dev_list; |
25 | static struct mutex backlight_dev_list_mutex; | 25 | static struct mutex backlight_dev_list_mutex; |
26 | static struct blocking_notifier_head backlight_notifier; | ||
26 | 27 | ||
27 | static const char *const backlight_types[] = { | 28 | static const char *const backlight_types[] = { |
28 | [BACKLIGHT_RAW] = "raw", | 29 | [BACKLIGHT_RAW] = "raw", |
@@ -370,6 +371,9 @@ struct backlight_device *backlight_device_register(const char *name, | |||
370 | list_add(&new_bd->entry, &backlight_dev_list); | 371 | list_add(&new_bd->entry, &backlight_dev_list); |
371 | mutex_unlock(&backlight_dev_list_mutex); | 372 | mutex_unlock(&backlight_dev_list_mutex); |
372 | 373 | ||
374 | blocking_notifier_call_chain(&backlight_notifier, | ||
375 | BACKLIGHT_REGISTERED, new_bd); | ||
376 | |||
373 | return new_bd; | 377 | return new_bd; |
374 | } | 378 | } |
375 | EXPORT_SYMBOL(backlight_device_register); | 379 | EXPORT_SYMBOL(backlight_device_register); |
@@ -413,6 +417,10 @@ void backlight_device_unregister(struct backlight_device *bd) | |||
413 | pmac_backlight = NULL; | 417 | pmac_backlight = NULL; |
414 | mutex_unlock(&pmac_backlight_mutex); | 418 | mutex_unlock(&pmac_backlight_mutex); |
415 | #endif | 419 | #endif |
420 | |||
421 | blocking_notifier_call_chain(&backlight_notifier, | ||
422 | BACKLIGHT_UNREGISTERED, bd); | ||
423 | |||
416 | mutex_lock(&bd->ops_lock); | 424 | mutex_lock(&bd->ops_lock); |
417 | bd->ops = NULL; | 425 | bd->ops = NULL; |
418 | mutex_unlock(&bd->ops_lock); | 426 | mutex_unlock(&bd->ops_lock); |
@@ -438,6 +446,36 @@ static int devm_backlight_device_match(struct device *dev, void *res, | |||
438 | } | 446 | } |
439 | 447 | ||
440 | /** | 448 | /** |
449 | * backlight_register_notifier - get notified of backlight (un)registration | ||
450 | * @nb: notifier block with the notifier to call on backlight (un)registration | ||
451 | * | ||
452 | * @return 0 on success, otherwise a negative error code | ||
453 | * | ||
454 | * Register a notifier to get notified when backlight devices get registered | ||
455 | * or unregistered. | ||
456 | */ | ||
457 | int backlight_register_notifier(struct notifier_block *nb) | ||
458 | { | ||
459 | return blocking_notifier_chain_register(&backlight_notifier, nb); | ||
460 | } | ||
461 | EXPORT_SYMBOL(backlight_register_notifier); | ||
462 | |||
463 | /** | ||
464 | * backlight_unregister_notifier - unregister a backlight notifier | ||
465 | * @nb: notifier block to unregister | ||
466 | * | ||
467 | * @return 0 on success, otherwise a negative error code | ||
468 | * | ||
469 | * Register a notifier to get notified when backlight devices get registered | ||
470 | * or unregistered. | ||
471 | */ | ||
472 | int backlight_unregister_notifier(struct notifier_block *nb) | ||
473 | { | ||
474 | return blocking_notifier_chain_unregister(&backlight_notifier, nb); | ||
475 | } | ||
476 | EXPORT_SYMBOL(backlight_unregister_notifier); | ||
477 | |||
478 | /** | ||
441 | * devm_backlight_device_register - resource managed backlight_device_register() | 479 | * devm_backlight_device_register - resource managed backlight_device_register() |
442 | * @dev: the device to register | 480 | * @dev: the device to register |
443 | * @name: the name of the device | 481 | * @name: the name of the device |
@@ -544,6 +582,8 @@ static int __init backlight_class_init(void) | |||
544 | backlight_class->pm = &backlight_class_dev_pm_ops; | 582 | backlight_class->pm = &backlight_class_dev_pm_ops; |
545 | INIT_LIST_HEAD(&backlight_dev_list); | 583 | INIT_LIST_HEAD(&backlight_dev_list); |
546 | mutex_init(&backlight_dev_list_mutex); | 584 | mutex_init(&backlight_dev_list_mutex); |
585 | BLOCKING_INIT_NOTIFIER_HEAD(&backlight_notifier); | ||
586 | |||
547 | return 0; | 587 | return 0; |
548 | } | 588 | } |
549 | 589 | ||
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 96109a9972b6..84b4bfb84344 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c | |||
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue); | |||
66 | static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; | 66 | static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; |
67 | static unsigned event_array_pages __read_mostly; | 67 | static unsigned event_array_pages __read_mostly; |
68 | 68 | ||
69 | /* | ||
70 | * sync_set_bit() and friends must be unsigned long aligned on non-x86 | ||
71 | * platforms. | ||
72 | */ | ||
73 | #if !defined(CONFIG_X86) && BITS_PER_LONG > 32 | ||
74 | |||
75 | #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) | ||
76 | #define EVTCHN_FIFO_BIT(b, w) \ | ||
77 | (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b) | ||
78 | |||
79 | #else | ||
80 | |||
69 | #define BM(w) ((unsigned long *)(w)) | 81 | #define BM(w) ((unsigned long *)(w)) |
82 | #define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b | ||
83 | |||
84 | #endif | ||
70 | 85 | ||
71 | static inline event_word_t *event_word_from_port(unsigned port) | 86 | static inline event_word_t *event_word_from_port(unsigned port) |
72 | { | 87 | { |
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) | |||
161 | static void evtchn_fifo_clear_pending(unsigned port) | 176 | static void evtchn_fifo_clear_pending(unsigned port) |
162 | { | 177 | { |
163 | event_word_t *word = event_word_from_port(port); | 178 | event_word_t *word = event_word_from_port(port); |
164 | sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); | 179 | sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); |
165 | } | 180 | } |
166 | 181 | ||
167 | static void evtchn_fifo_set_pending(unsigned port) | 182 | static void evtchn_fifo_set_pending(unsigned port) |
168 | { | 183 | { |
169 | event_word_t *word = event_word_from_port(port); | 184 | event_word_t *word = event_word_from_port(port); |
170 | sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); | 185 | sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); |
171 | } | 186 | } |
172 | 187 | ||
173 | static bool evtchn_fifo_is_pending(unsigned port) | 188 | static bool evtchn_fifo_is_pending(unsigned port) |
174 | { | 189 | { |
175 | event_word_t *word = event_word_from_port(port); | 190 | event_word_t *word = event_word_from_port(port); |
176 | return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); | 191 | return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); |
177 | } | 192 | } |
178 | 193 | ||
179 | static bool evtchn_fifo_test_and_set_mask(unsigned port) | 194 | static bool evtchn_fifo_test_and_set_mask(unsigned port) |
180 | { | 195 | { |
181 | event_word_t *word = event_word_from_port(port); | 196 | event_word_t *word = event_word_from_port(port); |
182 | return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); | 197 | return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); |
183 | } | 198 | } |
184 | 199 | ||
185 | static void evtchn_fifo_mask(unsigned port) | 200 | static void evtchn_fifo_mask(unsigned port) |
186 | { | 201 | { |
187 | event_word_t *word = event_word_from_port(port); | 202 | event_word_t *word = event_word_from_port(port); |
188 | sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); | 203 | sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); |
189 | } | 204 | } |
190 | 205 | ||
206 | static bool evtchn_fifo_is_masked(unsigned port) | ||
207 | { | ||
208 | event_word_t *word = event_word_from_port(port); | ||
209 | return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); | ||
210 | } | ||
191 | /* | 211 | /* |
192 | * Clear MASKED, spinning if BUSY is set. | 212 | * Clear MASKED, spinning if BUSY is set. |
193 | */ | 213 | */ |
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port) | |||
211 | BUG_ON(!irqs_disabled()); | 231 | BUG_ON(!irqs_disabled()); |
212 | 232 | ||
213 | clear_masked(word); | 233 | clear_masked(word); |
214 | if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { | 234 | if (evtchn_fifo_is_pending(port)) { |
215 | struct evtchn_unmask unmask = { .port = port }; | 235 | struct evtchn_unmask unmask = { .port = port }; |
216 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | 236 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); |
217 | } | 237 | } |
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port) | |||
243 | 263 | ||
244 | static void consume_one_event(unsigned cpu, | 264 | static void consume_one_event(unsigned cpu, |
245 | struct evtchn_fifo_control_block *control_block, | 265 | struct evtchn_fifo_control_block *control_block, |
246 | unsigned priority, uint32_t *ready) | 266 | unsigned priority, unsigned long *ready) |
247 | { | 267 | { |
248 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); | 268 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); |
249 | uint32_t head; | 269 | uint32_t head; |
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu, | |||
273 | * copy of the ready word. | 293 | * copy of the ready word. |
274 | */ | 294 | */ |
275 | if (head == 0) | 295 | if (head == 0) |
276 | clear_bit(priority, BM(ready)); | 296 | clear_bit(priority, ready); |
277 | 297 | ||
278 | if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) | 298 | if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) |
279 | && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word))) | ||
280 | handle_irq_for_port(port); | 299 | handle_irq_for_port(port); |
281 | 300 | ||
282 | q->head[priority] = head; | 301 | q->head[priority] = head; |
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu, | |||
285 | static void evtchn_fifo_handle_events(unsigned cpu) | 304 | static void evtchn_fifo_handle_events(unsigned cpu) |
286 | { | 305 | { |
287 | struct evtchn_fifo_control_block *control_block; | 306 | struct evtchn_fifo_control_block *control_block; |
288 | uint32_t ready; | 307 | unsigned long ready; |
289 | unsigned q; | 308 | unsigned q; |
290 | 309 | ||
291 | control_block = per_cpu(cpu_control_block, cpu); | 310 | control_block = per_cpu(cpu_control_block, cpu); |